Compare commits

..

No commits in common. "master" and "master" have entirely different histories.

48 changed files with 1133 additions and 3201 deletions

View file

@ -1,4 +1,4 @@
FROM golang:1.24-alpine AS basebuilder
FROM golang:1.22-alpine AS basebuilder
RUN apk add --update make bash ca-certificates
FROM basebuilder AS builder

View file

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.23', '1.24' ]
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3

View file

@ -14,7 +14,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.23'
cache: true
- name: Install linters
@ -28,7 +28,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.23', '1.24' ]
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
@ -53,7 +53,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.24'
go-version: '1.23'
- name: Run integration tests
run: |-

View file

@ -16,8 +16,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
check-latest: true
go-version: '1.22.12'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -22,6 +22,9 @@ linters-settings:
# 'default' case is present, even if all enum members aren't listed in the
# switch
default-signifies-exhaustive: true
govet:
# report about shadowed variables
check-shadowing: false
custom:
truecloudlab-linters:
path: bin/external_linters.so

View file

@ -4,12 +4,9 @@ This document outlines major changes between releases.
## [Unreleased]
- Update Go to 1.23 (#228)
### Added
- Add handling quota limit reached error (#187)
- Add slash clipping for FileName attribute (#174)
- Add new format of tag names config
## [0.32.3] - 2025-02-05

View file

@ -2,9 +2,9 @@
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.23
LINT_VERSION ?= 1.64.8
TRUECLOUDLAB_LINT_VERSION ?= 0.0.10
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
BUILD ?= $(shell date -u --iso=seconds)
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs-http-gw
@ -30,10 +30,9 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
sed "s/-/~/")-${OS_RELEASE}
.PHONY: debpackage debclean
FUZZING_DIR = $(shell pwd)/tests/fuzzing/files
NGFUZZ_REPO = https://gitflic.ru/project/yadro/ngfuzz.git
FUZZ_NGFUZZ_DIR ?= ""
FUZZ_TIMEOUT ?= 30
FUZZ_FUNCTIONS ?= ""
FUZZ_FUNCTIONS ?= "all"
FUZZ_AUX ?= ""
# Make all binaries
@ -100,22 +99,18 @@ check-ngfuzz:
exit 1; \
fi
.PHONY: install-ngfuzz
install-ngfuzz:
ifeq (,$(wildcard $(FUZZING_DIR)/ngfuzz))
@rm -rf $(FUZZING_DIR)/ngfuzz
@git clone $(NGFUZZ_REPO) $(FUZZING_DIR)/ngfuzz
@cd $(FUZZING_DIR)/ngfuzz && make
endif
.PHONY: install-fuzzing-deps
install-fuzzing-deps: check-clang check-ngfuzz
.PHONY: fuzz
fuzz: check-clang install-ngfuzz
fuzz: install-fuzzing-deps
@START_PATH=$$(pwd); \
ROOT_PATH=$$(realpath --relative-to=$(FUZZING_DIR)/ngfuzz $$START_PATH) ; \
cd $(FUZZING_DIR)/ngfuzz && \
./bin/ngfuzz clean && \
env CGO_ENABLED=1 ./bin/ngfuzz fuzz --funcs $(FUZZ_FUNCTIONS) --rootdir $$ROOT_PATH --timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
./bin/ngfuzz coverage --rootdir $$ROOT_PATH
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
cd $(FUZZ_NGFUZZ_DIR) && \
./ngfuzz -clean && \
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
./ngfuzz -report
# Reformat code
fmt:
@ -155,7 +150,7 @@ dirty-image:
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters
lint:

View file

@ -17,13 +17,10 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
containerClient "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/contracts/container"
contractsUtil "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/contracts/util"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
@ -33,7 +30,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@ -41,7 +37,6 @@ import (
"github.com/nspcc-dev/neo-go/cli/flags"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/panjf2000/ants/v2"
@ -70,8 +65,6 @@ type (
settings *appSettings
loggerSettings *loggerSettings
bucketCache *cache.BucketCache
handle *handler.Handler
corsCnrID cid.ID
servers []Server
unbindServers []ServerInfo
@ -103,29 +96,41 @@ type (
workerPoolSize int
logLevelConfig *logLevelConfig
mu sync.RWMutex
defaultTimestamp bool
archiveCompression bool
clientCut bool
returnIndexPage bool
indexPageTemplate string
bufferMaxSizeForPut uint64
namespaceHeader string
defaultNamespaces []string
cors *data.CORSRule
enableFilepathFallback bool
enableFilepathSlashFallback bool
mu sync.RWMutex
defaultTimestamp bool
archiveCompression bool
clientCut bool
returnIndexPage bool
indexPageTemplate string
bufferMaxSizeForPut uint64
namespaceHeader string
defaultNamespaces []string
corsAllowOrigin string
corsAllowMethods []string
corsAllowHeaders []string
corsExposeHeaders []string
corsAllowCredentials bool
corsMaxAge int
enableFilepathFallback bool
}
tagsConfig struct {
tagLogs sync.Map
defaultLvl zap.AtomicLevel
tagLogs sync.Map
}
logLevelConfig struct {
logLevel zap.AtomicLevel
tagsConfig *tagsConfig
}
CORS struct {
AllowOrigin string
AllowMethods []string
AllowHeaders []string
ExposeHeaders []string
AllowCredentials bool
MaxAge int
}
)
func newLogLevel(v *viper.Viper) zap.AtomicLevel {
@ -139,34 +144,19 @@ func newLogLevel(v *viper.Viper) zap.AtomicLevel {
}
func newTagsConfig(v *viper.Viper, ll zapcore.Level) *tagsConfig {
t := tagsConfig{defaultLvl: zap.NewAtomicLevelAt(ll)}
var t tagsConfig
if err := t.update(v, ll); err != nil {
// panic here is analogue of the similar panic during common log level initialization.
panic(err.Error())
}
return &t
}
func newLogLevelConfig(lvl zap.AtomicLevel, tagsConfig *tagsConfig) *logLevelConfig {
cfg := &logLevelConfig{
return &logLevelConfig{
logLevel: lvl,
tagsConfig: tagsConfig,
}
cfg.setMinLogLevel()
return cfg
}
func (l *logLevelConfig) setMinLogLevel() {
l.tagsConfig.tagLogs.Range(func(_, value any) bool {
v := value.(zapcore.Level)
if v < l.logLevel.Level() {
l.logLevel.SetLevel(v)
}
return true
})
}
func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) {
@ -179,34 +169,34 @@ func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) {
if err := l.tagsConfig.update(cfg, l.logLevel.Level()); err != nil {
log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
}
l.setMinLogLevel()
}
func (t *tagsConfig) LevelEnabled(tag string, tgtLevel zapcore.Level) bool {
lvl, ok := t.tagLogs.Load(tag)
if !ok {
return t.defaultLvl.Enabled(tgtLevel)
return false
}
return lvl.(zapcore.Level).Enabled(tgtLevel)
}
func (t *tagsConfig) DefaultEnabled(lvl zapcore.Level) bool {
return t.defaultLvl.Enabled(lvl)
}
func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error {
tags, err := fetchLogTagsConfig(cfg, ll)
if err != nil {
return err
}
t.tagLogs.Range(func(key, _ any) bool {
t.tagLogs.Range(func(key, value any) bool {
k := key.(string)
v := value.(zapcore.Level)
if _, ok := tags[k]; !ok {
if lvl, ok := tags[k]; ok {
if lvl != v {
t.tagLogs.Store(key, lvl)
}
} else {
t.tagLogs.Delete(key)
delete(tags, k)
}
return true
})
@ -214,7 +204,6 @@ func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error {
for k, v := range tags {
t.tagLogs.Store(k, v)
}
t.defaultLvl.SetLevel(ll)
return nil
}
@ -262,7 +251,6 @@ func newApp(ctx context.Context, cfg *appCfg) App {
a.initResolver()
a.initMetrics()
a.initTracing(ctx)
a.initContainers(ctx)
return a
}
@ -271,22 +259,6 @@ func (a *app) config() *viper.Viper {
return a.cfg.config()
}
func (a *app) initContainers(ctx context.Context) {
corsCnrID, err := a.fetchContainerID(ctx, cfgContainersCORS)
if err != nil {
a.log.Fatal(logs.CouldNotFetchCORSContainerInfo, zap.Error(err), logs.TagField(logs.TagApp))
}
a.corsCnrID = *corsCnrID
}
func (a *app) initRPCClient(ctx context.Context) *rpcclient.Client {
rpcCli, err := rpcclient.New(ctx, a.config().GetString(cfgRPCEndpoint), rpcclient.Options{})
if err != nil {
a.log.Fatal(logs.InitRPCClientFailed, zap.Error(err), logs.TagField(logs.TagApp))
}
return rpcCli
}
func (a *app) initAppSettings(lc *logLevelConfig) {
a.settings = &appSettings{
reconnectInterval: fetchReconnectInterval(a.config()),
@ -306,9 +278,13 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
namespaceHeader := v.GetString(cfgResolveNamespaceHeader)
defaultNamespaces := fetchDefaultNamespaces(v)
indexPage, indexEnabled := fetchIndexPageTemplate(v, l)
cors := fetchCORSConfig(v)
corsAllowOrigin := v.GetString(cfgCORSAllowOrigin)
corsAllowMethods := v.GetStringSlice(cfgCORSAllowMethods)
corsAllowHeaders := v.GetStringSlice(cfgCORSAllowHeaders)
corsExposeHeaders := v.GetStringSlice(cfgCORSExposeHeaders)
corsAllowCredentials := v.GetBool(cfgCORSAllowCredentials)
corsMaxAge := fetchCORSMaxAge(v)
enableFilepathFallback := v.GetBool(cfgFeaturesEnableFilepathFallback)
enableFilepathSlashFallback := v.GetBool(cfgFeaturesEnableFilepathSlashFallback)
s.mu.Lock()
defer s.mu.Unlock()
@ -322,9 +298,13 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
s.defaultNamespaces = defaultNamespaces
s.returnIndexPage = indexEnabled
s.indexPageTemplate = indexPage
s.cors = cors
s.corsAllowOrigin = corsAllowOrigin
s.corsAllowMethods = corsAllowMethods
s.corsAllowHeaders = corsAllowHeaders
s.corsExposeHeaders = corsExposeHeaders
s.corsAllowCredentials = corsAllowCredentials
s.corsMaxAge = corsMaxAge
s.enableFilepathFallback = enableFilepathFallback
s.enableFilepathSlashFallback = enableFilepathSlashFallback
}
func (s *loggerSettings) DroppedLogsInc() {
@ -370,33 +350,26 @@ func (s *appSettings) IndexPageTemplate() string {
return s.indexPageTemplate
}
func (s *appSettings) CORS() *data.CORSRule {
func (s *appSettings) CORS() CORS {
s.mu.RLock()
defer s.mu.RUnlock()
if s.cors == nil {
return nil
}
allowMethods := make([]string, len(s.corsAllowMethods))
copy(allowMethods, s.corsAllowMethods)
allowMethods := make([]string, len(s.cors.AllowedMethods))
copy(allowMethods, s.cors.AllowedMethods)
allowHeaders := make([]string, len(s.corsAllowHeaders))
copy(allowHeaders, s.corsAllowHeaders)
allowHeaders := make([]string, len(s.cors.AllowedHeaders))
copy(allowHeaders, s.cors.AllowedHeaders)
exposeHeaders := make([]string, len(s.corsExposeHeaders))
copy(exposeHeaders, s.corsExposeHeaders)
exposeHeaders := make([]string, len(s.cors.ExposeHeaders))
copy(exposeHeaders, s.cors.ExposeHeaders)
allowOrigins := make([]string, len(s.cors.AllowedOrigins))
copy(allowOrigins, s.cors.AllowedOrigins)
return &data.CORSRule{
AllowedOrigins: allowOrigins,
AllowedMethods: allowMethods,
AllowedHeaders: allowHeaders,
ExposeHeaders: exposeHeaders,
AllowedCredentials: s.cors.AllowedCredentials,
MaxAgeSeconds: s.cors.MaxAgeSeconds,
return CORS{
AllowOrigin: s.corsAllowOrigin,
AllowMethods: allowMethods,
AllowHeaders: allowHeaders,
ExposeHeaders: exposeHeaders,
AllowCredentials: s.corsAllowCredentials,
MaxAge: s.corsMaxAge,
}
}
@ -418,15 +391,15 @@ func (s *appSettings) NamespaceHeader() string {
return s.namespaceHeader
}
func (s *appSettings) FormContainerZone(ns string) string {
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
s.mu.RLock()
namespaces := s.defaultNamespaces
s.mu.RUnlock()
if slices.Contains(namespaces, ns) {
return v2container.SysAttributeZoneDefault
return v2container.SysAttributeZoneDefault, true
}
return ns + ".ns"
return ns + ".ns", false
}
func (s *appSettings) EnableFilepathFallback() bool {
@ -435,12 +408,6 @@ func (s *appSettings) EnableFilepathFallback() bool {
return s.enableFilepathFallback
}
func (s *appSettings) EnableFilepathSlashFallback() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.enableFilepathSlashFallback
}
func (a *app) initResolver() {
var err error
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
@ -453,6 +420,7 @@ func (a *app) getResolverConfig() ([]string, *resolver.Config) {
resolveCfg := &resolver.Config{
FrostFS: frostfs.NewResolverFrostFS(a.pool),
RPCAddress: a.config().GetString(cfgRPCEndpoint),
Settings: a.settings,
}
order := a.config().GetStringSlice(cfgResolveOrder)
@ -638,8 +606,10 @@ func (a *app) Serve() {
close(a.webDone)
}()
handle := handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool)), workerPool)
// Configure router.
a.configureRouter(workerPool)
a.configureRouter(handle)
a.startServices()
a.initServers(a.ctx)
@ -709,7 +679,7 @@ func (a *app) configReload(ctx context.Context) {
return
}
a.settings.logLevelConfig.update(a.cfg.config(), a.log)
a.settings.logLevelConfig.update(a.cfg.settings, a.log)
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil {
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
@ -760,48 +730,31 @@ func (a *app) stopServices() {
}
}
func (a *app) configureRouter(workerPool *ants.Pool) {
rpcCli := a.initRPCClient(a.ctx)
cnrContractName := a.config().GetString(cfgContractsContainerName)
rpcEndpoint := a.config().GetString(cfgRPCEndpoint)
cnrAddr, err := contractsUtil.ResolveContractHash(cnrContractName, rpcEndpoint)
if err != nil {
a.log.Fatal(logs.FailedToResolveContractHash, zap.Error(err), logs.TagField(logs.TagApp))
}
cnrClient, err := containerClient.New(containerClient.Config{
ContractHash: cnrAddr,
Key: a.key,
RPCClient: rpcCli,
})
if err != nil {
a.log.Fatal(logs.InitContainerContractFailed, zap.Error(err), logs.TagField(logs.TagApp))
}
a.handle = handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool), a.log), cnrClient, workerPool)
func (a *app) configureRouter(h *handler.Handler) {
r := router.New()
r.RedirectTrailingSlash = true
r.NotFound = func(r *fasthttp.RequestCtx) {
handler.ResponseError(r, "Route Not found", fasthttp.StatusNotFound)
handler.ResponseError(r, "Not found", fasthttp.StatusNotFound)
}
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
handler.ResponseError(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
}
r.POST("/upload/{cid}", a.addMiddlewares(a.handle.Upload))
r.OPTIONS("/upload/{cid}", a.addPreflight(a.handle.Preflight))
r.POST("/upload/{cid}", a.addMiddlewares(h.Upload))
r.OPTIONS("/upload/{cid}", a.addPreflight())
a.log.Info(logs.AddedPathUploadCid, logs.TagField(logs.TagApp))
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(a.handle.DownloadByAddressOrBucketName))
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(a.handle.HeadByAddressOrBucketName))
r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight(a.handle.Preflight))
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(h.DownloadByAddressOrBucketName))
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(h.HeadByAddressOrBucketName))
r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight())
a.log.Info(logs.AddedPathGetCidOid, logs.TagField(logs.TagApp))
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(a.handle.DownloadByAttribute))
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(a.handle.HeadByAttribute))
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight(a.handle.Preflight))
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.DownloadByAttribute))
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.HeadByAttribute))
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight())
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal, logs.TagField(logs.TagApp))
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(a.handle.DownloadZip))
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight(a.handle.Preflight))
r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(a.handle.DownloadTar))
r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight(a.handle.Preflight))
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadZip))
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight())
r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadTar))
r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight())
a.log.Info(logs.AddedPathZipCidPrefix, logs.TagField(logs.TagApp))
a.webServer.Handler = r.Handler
@ -824,14 +777,14 @@ func (a *app) addMiddlewares(h fasthttp.RequestHandler) fasthttp.RequestHandler
return h
}
func (a *app) addPreflight(h fasthttp.RequestHandler) fasthttp.RequestHandler {
func (a *app) addPreflight() fasthttp.RequestHandler {
list := []func(fasthttp.RequestHandler) fasthttp.RequestHandler{
a.tracer,
a.logger,
a.canonicalizer,
a.reqNamespace,
}
h := a.preflightHandler
for i := len(list) - 1; i >= 0; i-- {
h = list[i](h)
}
@ -839,16 +792,46 @@ func (a *app) addPreflight(h fasthttp.RequestHandler) fasthttp.RequestHandler {
return h
}
func (a *app) preflightHandler(c *fasthttp.RequestCtx) {
cors := a.settings.CORS()
setCORSHeaders(c, cors)
}
func (a *app) cors(h fasthttp.RequestHandler) fasthttp.RequestHandler {
return func(c *fasthttp.RequestCtx) {
h(c)
code := c.Response.StatusCode()
if code >= fasthttp.StatusOK && code < fasthttp.StatusMultipleChoices {
a.handle.SetCORSHeaders(c)
cors := a.settings.CORS()
setCORSHeaders(c, cors)
}
}
}
func setCORSHeaders(c *fasthttp.RequestCtx, cors CORS) {
c.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(cors.MaxAge))
if len(cors.AllowOrigin) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, cors.AllowOrigin)
}
if len(cors.AllowMethods) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(cors.AllowMethods, ","))
}
if len(cors.AllowHeaders) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, strings.Join(cors.AllowHeaders, ","))
}
if len(cors.ExposeHeaders) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(cors.ExposeHeaders, ","))
}
if cors.AllowCredentials {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
}
}
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
return func(req *fasthttp.RequestCtx) {
requiredFields := []zap.Field{zap.Uint64("id", req.ID())}
@ -947,13 +930,11 @@ func (a *app) reqNamespace(h fasthttp.RequestHandler) fasthttp.RequestHandler {
func (a *app) AppParams() *handler.AppParams {
return &handler.AppParams{
Logger: a.log,
FrostFS: frostfs.NewFrostFS(a.pool),
Owner: a.owner,
Resolver: a.resolver,
Cache: a.bucketCache,
CORSCnrID: a.corsCnrID,
CORSCache: cache.NewCORSCache(getCORSCacheOptions(a.config(), a.log)),
Logger: a.log,
FrostFS: frostfs.NewFrostFS(a.pool),
Owner: a.owner,
Resolver: a.resolver,
Cache: a.bucketCache,
}
}
@ -1154,44 +1135,3 @@ func (a *app) tryReconnect(ctx context.Context, sr *fasthttp.Server) bool {
return len(a.unbindServers) == 0
}
func (a *app) fetchContainerID(ctx context.Context, cfgKey string) (id *cid.ID, err error) {
cnrID, err := a.resolveContainerID(ctx, cfgKey)
if err != nil {
return nil, err
}
err = checkContainerExists(ctx, *cnrID, a.pool)
if err != nil {
return nil, err
}
return cnrID, nil
}
func (a *app) resolveContainerID(ctx context.Context, cfgKey string) (*cid.ID, error) {
containerString := a.config().GetString(cfgKey)
id := new(cid.ID)
if err := id.DecodeString(containerString); err != nil {
i := strings.Index(containerString, ".")
if i < 0 {
return nil, fmt.Errorf("invalid container address: %s", containerString)
}
if id, err = a.resolver.Resolve(ctx, containerString[i+1:], containerString[:i]); err != nil {
return nil, fmt.Errorf("resolve container address %s: %w", containerString, err)
}
}
return id, nil
}
func checkContainerExists(ctx context.Context, id cid.ID, frostFSPool *pool.Pool) error {
prm := pool.PrmContainerGet{
ContainerID: id,
}
_, err := frostFSPool.GetContainer(ctx, prm)
return err
}

View file

@ -20,11 +20,9 @@ import (
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -45,10 +43,9 @@ type putResponse struct {
}
const (
testContainerName = "friendly"
testListenAddress = "localhost:8082"
testHost = "http://" + testListenAddress
testCORSContainerName = "cors"
testContainerName = "friendly"
testListenAddress = "localhost:8082"
testHost = "http://" + testListenAddress
)
func TestIntegration(t *testing.T) {
@ -79,14 +76,10 @@ func TestIntegration(t *testing.T) {
registerUser(t, ctx, aioContainer, file.Name())
}
// Creating CORS container
clientPool := getPool(ctx, t, key)
_, err = createContainer(ctx, t, clientPool, ownerID, testCORSContainerName)
require.NoError(t, err, version)
// See the logs from the command execution.
server, cancel := runServer(file.Name())
CID, err := createContainer(ctx, t, clientPool, ownerID, testContainerName)
clientPool := getPool(ctx, t, key)
CID, err := createContainer(ctx, t, clientPool, ownerID)
require.NoError(t, err, version)
jsonToken, binaryToken := makeBearerTokens(t, key, ownerID, version)
@ -101,7 +94,6 @@ func TestIntegration(t *testing.T) {
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID) })
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID) })
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID) })
t.Run("test status codes "+version, func(t *testing.T) { checkStatusCodes(ctx, t, clientPool, ownerID, version) })
cancel()
server.Wait()
@ -118,8 +110,6 @@ func runServer(pathToWallet string) (App, context.CancelFunc) {
v.config().Set(cfgWalletPath, pathToWallet)
v.config().Set(cfgWalletPassphrase, "")
v.config().Set(cfgContainersCORS, testCORSContainerName+"."+containerv2.SysAttributeZoneDefault)
application := newApp(cancelCtx, v)
go application.Serve()
@ -270,7 +260,7 @@ func putWithDuplicateKeys(t *testing.T, CID cid.ID) {
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Contains(t, string(body), "key duplication error: "+attr+"\n")
require.Equal(t, "key duplication error: "+attr+"\n", string(body))
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
}
@ -439,80 +429,7 @@ func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
resp, err = http.DefaultClient.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusNotFound, resp.StatusCode)
}
func checkStatusCodes(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, version string) {
cli := http.Client{Timeout: 30 * time.Second}
t.Run("container not found by name", func(t *testing.T) {
resp, err := cli.Get(testHost + "/get/unknown/object")
require.NoError(t, err)
require.Equal(t, http.StatusNotFound, resp.StatusCode)
requireBodyContains(t, resp, "container not found")
})
t.Run("container not found by cid", func(t *testing.T) {
cnrIDTest := cidtest.ID()
resp, err := cli.Get(testHost + "/get/" + cnrIDTest.EncodeToString() + "/object")
require.NoError(t, err)
requireBodyContains(t, resp, "container not found")
require.Equal(t, http.StatusNotFound, resp.StatusCode)
})
t.Run("object not found in storage", func(t *testing.T) {
resp, err := cli.Get(testHost + "/get_by_attribute/" + testContainerName + "/FilePath/object2")
require.NoError(t, err)
requireBodyContains(t, resp, "object not found")
require.Equal(t, http.StatusNotFound, resp.StatusCode)
})
t.Run("access denied", func(t *testing.T) {
basicACL := acl.Private
var recs []*eacl.Record
if version == "1.2.7" {
basicACL = acl.PublicRWExtended
rec := eacl.NewRecord()
rec.SetAction(eacl.ActionDeny)
rec.SetOperation(eacl.OperationGet)
recs = append(recs, rec)
}
cnrID, err := createContainerBase(ctx, t, clientPool, ownerID, basicACL, "")
require.NoError(t, err)
key, err := keys.NewPrivateKey()
require.NoError(t, err)
jsonToken, _ := makeBearerTokens(t, key, ownerID, version, recs...)
t.Run("get", func(t *testing.T) {
request, err := http.NewRequest(http.MethodGet, testHost+"/get/"+cnrID.EncodeToString()+"/object", nil)
require.NoError(t, err)
request.Header.Set("Authorization", "Bearer "+jsonToken)
resp, err := cli.Do(request)
require.NoError(t, err)
requireBodyContains(t, resp, "access denied")
require.Equal(t, http.StatusForbidden, resp.StatusCode)
})
t.Run("upload", func(t *testing.T) {
request, _, _ := makePutRequest(t, testHost+"/upload/"+cnrID.EncodeToString())
request.Header.Set("Authorization", "Bearer "+jsonToken)
resp, err := cli.Do(request)
require.NoError(t, err)
requireBodyContains(t, resp, "access denied")
require.Equal(t, http.StatusForbidden, resp.StatusCode)
})
})
}
func requireBodyContains(t *testing.T, resp *http.Response, msg string) {
data, err := io.ReadAll(resp.Body)
require.NoError(t, err)
defer resp.Body.Close()
require.Contains(t, strings.ToLower(string(data)), strings.ToLower(msg))
}
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
@ -560,11 +477,7 @@ func getPool(ctx context.Context, t *testing.T, key *keys.PrivateKey) *pool.Pool
return clientPool
}
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, name string) (cid.ID, error) {
return createContainerBase(ctx, t, clientPool, ownerID, acl.PublicRWExtended, name)
}
func createContainerBase(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, basicACL acl.Basic, name string) (cid.ID, error) {
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID) (cid.ID, error) {
var policy netmap.PlacementPolicy
err := policy.DecodeString("REP 1")
require.NoError(t, err)
@ -572,28 +485,24 @@ func createContainerBase(ctx context.Context, t *testing.T, clientPool *pool.Poo
var cnr container.Container
cnr.Init()
cnr.SetPlacementPolicy(policy)
cnr.SetBasicACL(basicACL)
cnr.SetBasicACL(acl.PublicRWExtended)
cnr.SetOwner(ownerID)
container.SetCreationTime(&cnr, time.Now())
if name != "" {
var domain container.Domain
domain.SetName(name)
var domain container.Domain
domain.SetName(testContainerName)
cnr.SetAttribute(containerv2.SysAttributeName, domain.Name())
cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone())
}
cnr.SetAttribute(containerv2.SysAttributeName, domain.Name())
cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone())
prm := pool.PrmContainerPut{
ClientParams: client.PrmContainerPut{
Container: &cnr,
},
WaitParams: &pool.WaitParams{
Timeout: 15 * time.Second,
PollInterval: 3 * time.Second,
},
}
var waitPrm pool.WaitParams
waitPrm.SetTimeout(15 * time.Second)
waitPrm.SetPollInterval(3 * time.Second)
var prm pool.PrmContainerPut
prm.SetContainer(cnr)
prm.SetWaitParams(waitPrm)
CID, err := clientPool.PutContainer(ctx, prm)
if err != nil {
@ -640,18 +549,13 @@ func registerUser(t *testing.T, ctx context.Context, aioContainer testcontainers
require.NoError(t, err)
}
func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string, records ...*eacl.Record) (jsonTokenBase64, binaryTokenBase64 string) {
func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) (jsonTokenBase64, binaryTokenBase64 string) {
tkn := new(bearer.Token)
tkn.ForUser(ownerID)
tkn.SetExp(10000)
if version == "1.2.7" {
table := eacl.NewTable()
for i := range records {
table.AddRecord(records[i])
}
tkn.SetEACLTable(*table)
tkn.SetEACLTable(*eacl.NewTable())
} else {
tkn.SetImpersonate(true)
}

View file

@ -41,7 +41,6 @@ type zapCoreTagFilterWrapper struct {
type TagFilterSettings interface {
LevelEnabled(tag string, lvl zapcore.Level) bool
DefaultEnabled(lvl zapcore.Level) bool
}
func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool {
@ -64,26 +63,24 @@ func (c *zapCoreTagFilterWrapper) Check(entry zapcore.Entry, checked *zapcore.Ch
}
func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error {
if c.shouldSkip(entry, fields, c.extra) {
if c.shouldSkip(entry, fields) || c.shouldSkip(entry, c.extra) {
return nil
}
return c.core.Write(entry, fields)
}
func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field, extra []zap.Field) bool {
func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field) bool {
for _, field := range fields {
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
return !c.settings.LevelEnabled(field.String, entry.Level)
}
}
for _, field := range extra {
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
return !c.settings.LevelEnabled(field.String, entry.Level)
if !c.settings.LevelEnabled(field.String, entry.Level) {
return true
}
break
}
}
return !c.settings.DefaultEnabled(entry.Level)
return false
}
func (c *zapCoreTagFilterWrapper) Sync() error {
@ -130,13 +127,14 @@ func newLogEncoder() zapcore.Encoder {
//
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
func newStdoutLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
stdout := zapcore.AddSync(os.Stdout)
stdout := zapcore.AddSync(os.Stderr)
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, lvl)
consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, loggerSettings, tagSetting)
return &Logger{
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
lvl: lvl,
}
}
@ -154,6 +152,7 @@ func newJournaldLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings Logge
return &Logger{
logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
lvl: lvl,
}
}

View file

@ -74,6 +74,7 @@ func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) {
ln = tls.NewListener(ln, &tls.Config{
GetCertificate: tlsProvider.GetCertificate,
NextProtos: []string{"h2"}, // required to enable HTTP/2 requests in `http.Serve`
})
}

View file

@ -18,7 +18,7 @@ import (
"time"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
"golang.org/x/net/http2"
)
const (
@ -26,10 +26,14 @@ const (
expHeaderValue = "Bar"
)
func TestHTTP_TLS(t *testing.T) {
func TestHTTP2TLS(t *testing.T) {
ctx := context.Background()
certPath, keyPath := prepareTestCerts(t)
srv := &http.Server{
Handler: http.HandlerFunc(testHandler),
}
tlsListener, err := newServer(ctx, ServerInfo{
Address: ":0",
TLS: ServerTLSInfo{
@ -43,34 +47,37 @@ func TestHTTP_TLS(t *testing.T) {
addr := fmt.Sprintf("https://localhost:%d", port)
go func() {
_ = fasthttp.Serve(tlsListener.Listener(), testHandler)
_ = srv.Serve(tlsListener.Listener())
}()
// Server is running, now send HTTP/2 request
tlsClientConfig := &tls.Config{
InsecureSkipVerify: true,
}
cliHTTP := http.Client{Transport: &http.Transport{}}
cliHTTPS := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
cliHTTP1 := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
cliHTTP2 := http.Client{Transport: &http2.Transport{TLSClientConfig: tlsClientConfig}}
req, err := http.NewRequest("GET", addr, nil)
require.NoError(t, err)
req.Header[expHeaderKey] = []string{expHeaderValue}
resp, err := cliHTTPS.Do(req)
resp, err := cliHTTP1.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
_, err = cliHTTP.Do(req)
require.ErrorContains(t, err, "failed to verify certificate")
resp, err = cliHTTP2.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
}
func testHandler(ctx *fasthttp.RequestCtx) {
hdr := ctx.Request.Header.Peek(expHeaderKey)
if len(hdr) == 0 || string(hdr) != expHeaderValue {
ctx.Response.SetStatusCode(http.StatusBadRequest)
func testHandler(resp http.ResponseWriter, req *http.Request) {
hdr, ok := req.Header[expHeaderKey]
if !ok || len(hdr) != 1 || hdr[0] != expHeaderValue {
resp.WriteHeader(http.StatusBadRequest)
} else {
ctx.Response.SetStatusCode(http.StatusOK)
resp.WriteHeader(http.StatusOK)
}
}

View file

@ -16,13 +16,11 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
qostagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"github.com/spf13/pflag"
@ -62,8 +60,6 @@ const (
defaultMultinetFallbackDelay = 300 * time.Millisecond
defaultContainerContractName = "container.frostfs"
cfgServer = "server"
cfgTLSEnabled = "tls.enabled"
cfgTLSCertFile = "tls.cert_file"
@ -115,7 +111,7 @@ const (
cfgLoggerTags = "logger.tags"
cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d."
cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "names"
cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "name"
cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level"
// Wallet.
@ -158,21 +154,18 @@ const (
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
cfgBucketsCacheSize = "cache.buckets.size"
cfgNetmapCacheLifetime = "cache.netmap.lifetime"
cfgCORSCacheLifetime = "cache.cors.lifetime"
cfgCORSCacheSize = "cache.cors.size"
// Bucket resolving options.
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces"
// CORS.
cfgCORS = "cors"
cfgCORSAllowOrigin = cfgCORS + ".allow_origin"
cfgCORSAllowMethods = cfgCORS + ".allow_methods"
cfgCORSAllowHeaders = cfgCORS + ".allow_headers"
cfgCORSExposeHeaders = cfgCORS + ".expose_headers"
cfgCORSAllowCredentials = cfgCORS + ".allow_credentials"
cfgCORSMaxAge = cfgCORS + ".max_age"
cfgCORSAllowOrigin = "cors.allow_origin"
cfgCORSAllowMethods = "cors.allow_methods"
cfgCORSAllowHeaders = "cors.allow_headers"
cfgCORSExposeHeaders = "cors.expose_headers"
cfgCORSAllowCredentials = "cors.allow_credentials"
cfgCORSMaxAge = "cors.max_age"
// Multinet.
cfgMultinetEnabled = "multinet.enabled"
@ -182,12 +175,8 @@ const (
cfgMultinetSubnets = "multinet.subnets"
// Feature.
cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback"
cfgFeaturesEnableFilepathSlashFallback = "features.enable_filepath_slash_fallback"
cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support"
// Containers.
cfgContainersCORS = "containers.cors"
cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback"
cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support"
// Command line args.
cmdHelp = "help"
@ -199,9 +188,6 @@ const (
cmdConfig = "config"
cmdConfigDir = "config-dir"
cmdListenAddress = "listen_address"
// Contracts.
cfgContractsContainerName = "contracts.container.name"
)
var ignore = map[string]struct{}{
@ -214,6 +200,7 @@ var defaultTags = []string{logs.TagApp, logs.TagDatapath, logs.TagExternalStorag
type Logger struct {
logger *zap.Logger
lvl zap.AtomicLevel
}
type appCfg struct {
@ -406,9 +393,6 @@ func setDefaults(v *viper.Viper, flags *pflag.FlagSet) {
// multinet
v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay)
// contracts
v.SetDefault(cfgContractsContainerName, defaultContainerContractName)
if resolveMethods, err := flags.GetStringSlice(cfgResolveOrder); err == nil {
v.SetDefault(cfgResolveOrder, resolveMethods)
}
@ -524,8 +508,8 @@ func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]za
res := make(map[string]zapcore.Level)
for i := 0; ; i++ {
tagNames := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i))
if tagNames == "" {
name := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i))
if name == "" {
break
}
@ -537,12 +521,7 @@ func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]za
}
}
for _, tagName := range strings.Split(tagNames, ",") {
tagName = strings.TrimSpace(tagName)
if len(tagName) != 0 {
res[tagName] = lvl
}
}
res[name] = lvl
}
if len(res) == 0 && !v.IsSet(cfgLoggerTags) {
@ -691,8 +670,6 @@ func (a *app) initPools(ctx context.Context) {
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
grpc.WithContextDialer(a.settings.dialerSource.GrpcContextDialer()),
grpc.WithChainUnaryInterceptor(qostagging.NewUnaryClientInteceptor()),
grpc.WithChainStreamInterceptor(qostagging.NewStreamClientInterceptor()),
}
prm.SetGRPCDialOptions(interceptors...)
prmTree.SetGRPCDialOptions(interceptors...)
@ -779,15 +756,6 @@ func getNetmapCacheOptions(v *viper.Viper, l *zap.Logger) *cache.NetmapCacheConf
return cacheCfg
}
func getCORSCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
cacheCfg := cache.DefaultCORSConfig(l)
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgCORSCacheLifetime, cacheCfg.Lifetime)
cacheCfg.Size = fetchCacheSize(v, l, cfgCORSCacheSize, cacheCfg.Size)
return cacheCfg
}
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
if v.IsSet(cfgEntry) {
lifetime := v.GetDuration(cfgEntry)
@ -883,18 +851,3 @@ func fetchArchiveCompression(v *viper.Viper) bool {
}
return v.GetBool(cfgArchiveCompression)
}
func fetchCORSConfig(v *viper.Viper) *data.CORSRule {
if !v.IsSet(cfgCORS) {
return nil
}
return &data.CORSRule{
AllowedOrigins: []string{v.GetString(cfgCORSAllowOrigin)},
AllowedMethods: v.GetStringSlice(cfgCORSAllowMethods),
AllowedHeaders: v.GetStringSlice(cfgCORSAllowHeaders),
ExposeHeaders: v.GetStringSlice(cfgCORSExposeHeaders),
AllowedCredentials: v.GetBool(cfgCORSAllowCredentials),
MaxAgeSeconds: fetchCORSMaxAge(v),
}
}

View file

@ -20,9 +20,8 @@ HTTP_GW_LOGGER_SAMPLING_ENABLED=false
HTTP_GW_LOGGER_SAMPLING_INITIAL=100
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
HTTP_GW_LOGGER_TAGS_0_NAMES=app,datapath
HTTP_GW_LOGGER_TAGS_0_LEVEL=level
HTTP_GW_LOGGER_TAGS_1_NAME=external_storage_tree
HTTP_GW_LOGGER_TAGS_0_NAME=app
HTTP_GW_LOGGER_TAGS_1_NAME=datapath
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
HTTP_GW_SERVER_0_TLS_ENABLED=false
@ -130,9 +129,6 @@ HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
HTTP_GW_CACHE_BUCKETS_SIZE=1000
# Cache which stores netmap
HTTP_GW_CACHE_NETMAP_LIFETIME=1m
# Cache which stores container CORS configurations
HTTP_GW_CACHE_CORS_LIFETIME=5m
HTTP_GW_CACHE_CORS_SIZE=1000
# Header to determine zone to resolve bucket name
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
@ -174,13 +170,5 @@ HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl
# Enable using fallback path to search for a object by attribute
HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false
# See description in docs/gate-configuration.md
HTTP_GW_FEATURES_ENABLE_FILEPATH_SLASH_FALLBACK=false
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true
# Containers properties
HTTP_GW_CONTAINERS_CORS=AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
# Container contract hash (LE) or name in NNS.
HTTP_GW_CONTRACTS_CONTAINER_NAME=container.frostfs

View file

@ -30,7 +30,8 @@ logger:
thereafter: 100
interval: 1s
tags:
- names: app,datapath
- name: app
- name: datapath
level: debug
server:
@ -155,10 +156,6 @@ cache:
# Cache which stores netmap
netmap:
lifetime: 1m
# Cache which stores container CORS configurations
cors:
lifetime: 5m
size: 1000
resolve_bucket:
namespace_header: X-Frostfs-Namespace
@ -192,15 +189,5 @@ multinet:
features:
# Enable using fallback path to search for a object by attribute
enable_filepath_fallback: false
# See description in docs/gate-configuration.md
enable_filepath_slash_fallback: false
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
tree_pool_netmap_support: true
containers:
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
contracts:
container:
# Container contract hash (LE) or name in NNS.
name: container.frostfs

View file

@ -94,8 +94,6 @@ The `filename` field from the multipart form will be set as `FileName` attribute
|--------|----------------------------------------------|
| 200 | Object created successfully. |
| 400 | Some error occurred during object uploading. |
| 403 | Access denied. |
| 409 | Can not upload object due to quota reached. |
## Get object
@ -143,7 +141,6 @@ Get an object (payload and attributes) by an address.
|--------|------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 403 | Access denied. |
| 404 | Container or object not found. |
###### Body
@ -186,7 +183,6 @@ Get an object attributes by an address.
|--------|---------------------------------------------------|
| 200 | Object head successfully. |
| 400 | Some error occurred during object HEAD operation. |
| 403 | Access denied. |
| 404 | Container or object not found. |
## Search object
@ -237,7 +233,6 @@ If more than one object is found, an arbitrary one will be returned.
|--------|------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 403 | Access denied. |
| 404 | Container or object not found. |
#### HEAD
@ -274,7 +269,6 @@ If more than one object is found, an arbitrary one will be used to get attribute
|--------|---------------------------------------|
| 200 | Object head successfully. |
| 400 | Some error occurred during operation. |
| 403 | Access denied. |
| 404 | Container or object not found. |
## Download archive
@ -310,16 +304,16 @@ Archive can be compressed (see http-gw [configuration](gate-configuration.md#arc
###### Headers
| Header | Description |
|-----------------------|---------------------------------------------------------------------------------------------|
| `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. |
| `Content-Type` | Indicate content type of object. Set to `application/zip` |
| Header | Description |
|-----------------------|-------------------------------------------------------------------------------------------------------------------|
| `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. |
| `Content-Type` | Indicate content type of object. Set to `application/zip` |
###### Status codes
| Status | Description |
|--------|------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 403 | Access denied. |
| 404 | Container or objects not found. |
| Status | Description |
|--------|-----------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 404 | Container or objects not found. |
| 500 | Some inner error (e.g. error on streaming objects). |

View file

@ -8,6 +8,7 @@ There are some custom types used for brevity:
* `duration` -- string consisting of a number and a suffix. Suffix examples include `s` (seconds), `m` (minutes), `ms` (
milliseconds).
# Reload on SIGHUP
Some config values can be reloaded on SIGHUP signal.
@ -59,8 +60,6 @@ $ cat http.log
| `index_page` | [Index page configuration](#index_page-section) |
| `multinet` | [Multinet configuration](#multinet-section) |
| `features` | [Features configuration](#features-section) |
| `containers` | [Containers configuration](#containers-section) |
| `contracts` | [Contracts configuration](#contracts-section) |
# General section
@ -163,6 +162,7 @@ server:
| `tls.cert_file` | `string` | yes | | Path to the TLS certificate. |
| `tls.key_file` | `string` | yes | | Path to the key. |
# `logger` section
```yaml
@ -175,9 +175,10 @@ logger:
thereafter: 100
interval: 1s
tags:
- names: "app,datapath"
level: info
- names: "external_storage_tree"
- name: "app"
level: info
- name: "datapath"
- name: "external_storage_tree"
```
| Parameter | Type | SIGHUP reload | Default value | Description |
@ -197,14 +198,14 @@ parameter. Available tags:
```yaml
tags:
- names: "app,datapath"
- name: "app"
level: info
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
| `names` | `[]string` | yes | | Tag names separated by `,`. Possible values see below in `Tag values` section. |
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. |
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
### Tag values
@ -234,6 +235,7 @@ web:
| `stream_request_body` | `bool` | `true` | Enables request body streaming, and calls the handler sooner when given body is larger than the current limit. |
| `max_request_body_size` | `int` | `4194304` | Maximum request body size. The server rejects requests with bodies exceeding this limit. |
# `upload-header` section
```yaml
@ -269,6 +271,7 @@ archive:
|---------------|--------|---------------|---------------|------------------------------------------------------------------|
| `compression` | `bool` | yes | `false` | Enable archive compression when download files by common prefix. |
# `pprof` section
Contains configuration for the `pprof` profiler.
@ -317,13 +320,14 @@ tracing:
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|--------------|----------------------------------------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------|
| ------------ | -------------------------------------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------- |
| `enabled` | `bool` | yes | `false` | Flag to enable the tracing. |
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |
| `trusted_ca` | `string` | yes | | Path to certificate of a certification authority in pem format, that issued the TLS certificate of the telemetry remote server. |
| `attributes` | [[]Attributes](#attributes-subsection) | yes | | An array of configurable attributes in key-value format. |
#### `attributes` subsection
```yaml
@ -334,13 +338,12 @@ tracing:
value: value
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------|----------|---------------|---------------|------------------|
| `key` | `string` | yes | | Attribute key. |
| `value` | `string` | yes | | Attribute value. |
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------------|----------|---------------|---------------|----------------------------------------------------------|
| `key` | `string` | yes | | Attribute key. |
| `value` | `string` | yes | | Attribute value. |
# `runtime` section
Contains runtime parameters.
```yaml
@ -369,6 +372,7 @@ frostfs:
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
### `cache` section
```yaml
@ -378,16 +382,13 @@ cache:
size: 1000
netmap:
lifetime: 1m
cors:
lifetime: 5m
size: 1000
```
| Parameter | Type | Default value | Description |
|-----------|-----------------------------------|---------------------------------|---------------------------------------------------------------------------|
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
| `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. |
| `cors` | [Cache config](#cache-subsection) | `lifetime: 5m`<br>`size: 1000` | Cache which stores container CORS configurations. |
#### `cache` subsection
@ -401,6 +402,7 @@ size: 1000
| `lifetime` | `duration` | depends on cache | Lifetime of entries in cache. |
| `size` | `int` | depends on cache | LRU cache size. |
# `resolve_bucket` section
Bucket name resolving parameters from and to container ID.
@ -411,10 +413,10 @@ resolve_bucket:
default_namespaces: [ "", "root" ]
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|----------------------|------------|---------------|-----------------------|--------------------------------------------------|
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
| Parameter | Type | SIGHUP reload | Default value | Description |
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
# `index_page` section
@ -439,14 +441,14 @@ index_page:
# `cors` section
Parameters for CORS (used in OPTIONS requests and responses in all handlers).
If values are not set, settings from CORS container will be used.
If values are not set, headers will not be included to response.
```yaml
cors:
allow_origin: "*"
allow_methods: [ "GET", "HEAD" ]
allow_headers: [ "Authorization" ]
expose_headers: [ "*" ]
allow_methods: ["GET", "HEAD"]
allow_headers: ["Authorization"]
expose_headers: ["*"]
allow_credentials: false
max_age: 600
```
@ -466,15 +468,15 @@ Configuration of multinet support.
```yaml
multinet:
enabled: false
balancer: roundrobin
restrict: false
fallback_delay: 300ms
subnets:
- mask: 1.2.3.4/24
source_ips:
- 1.2.3.4
- 1.2.3.5
enabled: false
balancer: roundrobin
restrict: false
fallback_delay: 300ms
subnets:
- mask: 1.2.3.4/24
source_ips:
- 1.2.3.4
- 1.2.3.5
```
| Parameter | Type | SIGHUP reload | Default value | Description |
@ -506,37 +508,10 @@ Contains parameters for enabling features.
```yaml
features:
enable_filepath_fallback: true
enable_filepath_slash_fallback: false
tree_pool_netmap_support: true
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-------------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by `FileName` attribute if object with `FilePath` attribute wasn't found. |
| `features.enable_filepath_slash_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by `FilePath`/`FileName` with/without (depends on provided value in `FilePath`/`FileName`) leading slash if object with provided `FilePath`/`FileName` wasn't found. This fallback goes before `enable_filepath_fallback`. |
| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. |
# `containers` section
Section for well-known containers to store data and settings.
```yaml
containers:
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------|----------|---------------|---------------|-----------------------------------------|
| `cors` | `string` | no | | Container name for CORS configurations. |
# `contracts` section
```yaml
contracts:
container:
name: container.frostfs
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|------------------|----------|---------------|---------------------|----------------------------------------------|
| `container.name` | `string` | no | `container.frostfs` | Container contract hash (LE) or name in NNS. |
| Parameter | Type | SIGHUP reload | Default value | Description |
|-------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by attribute. If the value of the `FilePath` attribute in the request contains no `/` symbols or single leading `/` symbol and the object was not found, then an attempt is made to search for the object by the attribute `FileName`. |
| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. |

9
go.mod
View file

@ -1,12 +1,10 @@
module git.frostfs.info/TrueCloudLab/frostfs-http-gw
go 1.23
go 1.22
require (
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/bluele/gcache v0.0.2
@ -28,12 +26,14 @@ require (
go.opentelemetry.io/otel/trace v1.31.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
golang.org/x/net v0.30.0
golang.org/x/sys v0.28.0
google.golang.org/grpc v1.69.2
)
require (
dario.cat/mergo v1.0.0 // indirect
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
@ -125,7 +125,6 @@ require (
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect

6
go.sum
View file

@ -42,12 +42,10 @@ git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a h1:Ud+3zz4WP9HPxEQxDPJZPpiPdm30nDNSKucsWP9L54M=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121 h1:/Z8DfbLZXp7exUQWUKoG/9tbFdI9d5lV1qSReaYoG8I=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc=
git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc h1:fS6Yp4GvI+C22UrWz9oqJXwvQw5Q6SmADIY4H9eIQsc=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=

View file

@ -1,62 +0,0 @@
package cache
import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/bluele/gcache"
"go.uber.org/zap"
)
// CORSCache contains cache with CORS objects.
type CORSCache struct {
cache gcache.Cache
logger *zap.Logger
}
const (
// DefaultCORSCacheSize is a default maximum number of entries in cache.
DefaultCORSCacheSize = 1e3
// DefaultCORSCacheLifetime is a default lifetime of entries in cache.
DefaultCORSCacheLifetime = 5 * time.Minute
)
// DefaultCORSConfig returns new default cache expiration values.
func DefaultCORSConfig(logger *zap.Logger) *Config {
return &Config{
Size: DefaultCORSCacheSize,
Lifetime: DefaultCORSCacheLifetime,
Logger: logger,
}
}
// NewCORSCache creates an object of CORSCache.
func NewCORSCache(config *Config) *CORSCache {
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
return &CORSCache{cache: gc, logger: config.Logger}
}
// Get returns a cached object.
func (o *CORSCache) Get(cnrID cid.ID) *data.CORSConfiguration {
entry, err := o.cache.Get(cnrID)
if err != nil {
return nil
}
result, ok := entry.(*data.CORSConfiguration)
if !ok {
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
return nil
}
return result
}
// Put puts an object to cache.
func (o *CORSCache) Put(cnrID cid.ID, cors *data.CORSConfiguration) error {
return o.cache.Set(cnrID, cors)
}

View file

@ -1,18 +0,0 @@
package data
type (
// CORSConfiguration stores CORS configuration of a request.
CORSConfiguration struct {
CORSRules []CORSRule `xml:"CORSRule" json:"CORSRules"`
}
// CORSRule stores rules for CORS configuration.
CORSRule struct {
AllowedHeaders []string `xml:"AllowedHeader" json:"AllowedHeaders"`
AllowedMethods []string `xml:"AllowedMethod" json:"AllowedMethods"`
AllowedOrigins []string `xml:"AllowedOrigin" json:"AllowedOrigins"`
ExposeHeaders []string `xml:"ExposeHeader" json:"ExposeHeaders"`
MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty" json:"MaxAgeSeconds,omitempty"`
AllowedCredentials bool `xml:"AllowedCredentials,omitempty" json:"AllowedCredentials,omitempty"`
}
)

View file

@ -12,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -130,15 +131,11 @@ func parentDir(prefix string) string {
return prefix[index:]
}
func getParent(encPrefix string) string {
func trimPrefix(encPrefix string) string {
prefix, err := url.PathUnescape(encPrefix)
if err != nil {
return ""
}
if prefix != "" && prefix[len(prefix)-1] == '/' {
prefix = prefix[:len(prefix)-1]
}
slashIndex := strings.LastIndex(prefix, "/")
if slashIndex == -1 {
return ""
@ -164,15 +161,10 @@ func urlencode(path string) string {
type GetObjectsResponse struct {
objects []ResponseObject
hasErrors bool
isNative bool
}
func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
if prefix != "" && prefix[len(prefix)-1] == '/' {
prefix = prefix[:len(prefix)-1]
}
nodes, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
nodes, _, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
if err != nil {
return nil, err
}
@ -193,7 +185,7 @@ func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketIn
if obj.IsDeleteMarker {
continue
}
obj.FilePath = prefix + "/" + obj.FileName
obj.FilePath = prefix + obj.FileName
obj.GetURL = "/get/" + bucketInfo.Name + urlencode(obj.FilePath)
result.objects = append(result.objects, obj)
}
@ -202,9 +194,9 @@ func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketIn
}
func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
basePath := prefix
if basePath != "" && basePath[len(basePath)-1] != '/' {
basePath += "/"
var basePath string
if ind := strings.LastIndex(prefix, "/"); ind != -1 {
basePath = prefix[:ind+1]
}
filters := object.NewSearchFilters()
@ -231,11 +223,10 @@ func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.Buck
return nil, err
}
log := h.reqLogger(ctx)
log := utils.GetReqLogOrDefault(ctx, h.log)
dirs := make(map[string]struct{})
result := &GetObjectsResponse{
objects: make([]ResponseObject, 0, 100),
isNative: true,
objects: make([]ResponseObject, 0, 100),
}
for objExt := range resp {
if objExt.Error != nil {
@ -267,7 +258,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
go func() {
defer close(res)
log := h.reqLogger(ctx).With(
log := utils.GetReqLogOrDefault(ctx, h.log).With(
zap.String("cid", cnrID.EncodeToString()),
zap.String("path", basePath),
)
@ -282,7 +273,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
})
if err != nil {
wg.Done()
log.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath))
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath))
}
select {
case <-ctx.Done():
@ -331,16 +322,30 @@ func (h *Handler) headDirObject(ctx context.Context, cnrID cid.ID, objID oid.ID,
}
type browseParams struct {
bucketInfo *data.BucketInfo
prefix string
objects *GetObjectsResponse
bucketInfo *data.BucketInfo
prefix string
isNative bool
listObjects func(ctx context.Context, bucketName *data.BucketInfo, prefix string) (*GetObjectsResponse, error)
}
func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p browseParams) {
func (h *Handler) browseObjects(c *fasthttp.RequestCtx, p browseParams) {
const S3Protocol = "s3"
const FrostfsProtocol = "frostfs"
objects := p.objects.objects
ctx := utils.GetContextFromRequest(c)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(
zap.String("bucket", p.bucketInfo.Name),
zap.String("container", p.bucketInfo.CID.EncodeToString()),
zap.String("prefix", p.prefix),
)
resp, err := p.listObjects(ctx, p.bucketInfo, p.prefix)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
objects := resp.objects
sort.Slice(objects, func(i, j int) bool {
if objects[i].IsDir == objects[j].IsDir {
return objects[i].FileName < objects[j].FileName
@ -350,33 +355,28 @@ func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p
tmpl, err := template.New("index").Funcs(template.FuncMap{
"formatSize": formatSize,
"getParent": getParent,
"trimPrefix": trimPrefix,
"urlencode": urlencode,
"parentDir": parentDir,
}).Parse(h.config.IndexPageTemplate())
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToParseTemplate, err)
logAndSendBucketError(c, log, err)
return
}
bucketName := p.bucketInfo.Name
protocol := S3Protocol
if p.objects.isNative {
if p.isNative {
bucketName = p.bucketInfo.CID.EncodeToString()
protocol = FrostfsProtocol
}
prefix := p.prefix
if prefix != "" && prefix[len(prefix)-1] != '/' {
prefix += "/"
}
if err = tmpl.Execute(req, &BrowsePageData{
if err = tmpl.Execute(c, &BrowsePageData{
Container: bucketName,
Prefix: prefix,
Prefix: p.prefix,
Objects: objects,
Protocol: protocol,
HasErrors: p.objects.hasErrors,
HasErrors: resp.hasErrors,
}); err != nil {
h.logAndSendError(ctx, req, logs.FailedToExecuteTemplate, err)
logAndSendBucketError(c, log, err)
return
}
}

View file

@ -1,42 +0,0 @@
package handler
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.uber.org/zap"
)
func (h *Handler) containerInfo(ctx context.Context, cnrID cid.ID) (*data.BucketInfo, error) {
info := &data.BucketInfo{
CID: cnrID,
Name: cnrID.EncodeToString(),
}
res, err := h.cnrContract.GetContainerByID(cnrID)
if err != nil {
return nil, fmt.Errorf("get frostfs container: %w", err)
}
cnr := *res
if domain := container.ReadDomain(cnr); domain.Name() != "" {
info.Name = domain.Name()
info.Zone = domain.Zone()
}
info.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(cnr)
info.PlacementPolicy = cnr.PlacementPolicy()
if err = h.cache.Put(info); err != nil {
h.reqLogger(ctx).Warn(logs.CouldntPutBucketIntoCache,
zap.String("bucket name", info.Name),
zap.Stringer("cid", info.CID),
zap.Error(err),
logs.TagField(logs.TagDatapath))
}
return info, nil
}

View file

@ -1,345 +0,0 @@
package handler
import (
"context"
"encoding/xml"
"errors"
"fmt"
"regexp"
"slices"
"sort"
"strconv"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
qostagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
const (
internalIOTag = "internal"
corsFilePathTemplate = "/%s.cors"
wildcard = "*"
)
var errNoCORS = errors.New("no CORS objects found")
func (h *Handler) Preflight(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.Preflight")
defer span.End()
ctx = qostagging.ContextWithIOTag(ctx, internalIOTag)
cidParam, _ := req.UserValue("cid").(string)
reqLog := h.reqLogger(ctx)
log := reqLog.With(zap.String("cid", cidParam))
origin := req.Request.Header.Peek(fasthttp.HeaderOrigin)
if len(origin) == 0 {
log.Error(logs.EmptyOriginRequestHeader, logs.TagField(logs.TagDatapath))
ResponseError(req, "Origin request header needed", fasthttp.StatusBadRequest)
return
}
method := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestMethod)
if len(method) == 0 {
log.Error(logs.EmptyAccessControlRequestMethodHeader, logs.TagField(logs.TagDatapath))
ResponseError(req, "Access-Control-Request-Method request header needed", fasthttp.StatusBadRequest)
return
}
corsRule := h.config.CORS()
if corsRule != nil {
setCORSHeadersFromRule(req, corsRule)
return
}
corsConfig, err := h.getCORSConfig(ctx, log, cidParam)
if err != nil {
log.Error(logs.CouldNotGetCORSConfiguration, zap.Error(err), logs.TagField(logs.TagDatapath))
status := fasthttp.StatusInternalServerError
if errors.Is(err, errNoCORS) {
status = fasthttp.StatusNotFound
}
ResponseError(req, "could not get CORS configuration: "+err.Error(), status)
return
}
var headers []string
requestHeaders := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestHeaders)
if len(requestHeaders) > 0 {
headers = strings.Split(string(requestHeaders), ", ")
}
for _, rule := range corsConfig.CORSRules {
for _, o := range rule.AllowedOrigins {
if o == string(origin) || o == wildcard || (strings.Contains(o, "*") && match(o, string(origin))) {
for _, m := range rule.AllowedMethods {
if m == string(method) {
if !checkSubslice(rule.AllowedHeaders, headers) {
continue
}
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin))
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
if headers != nil {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, string(requestHeaders))
}
if rule.ExposeHeaders != nil {
req.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(rule.ExposeHeaders, ", "))
}
if rule.MaxAgeSeconds > 0 || rule.MaxAgeSeconds == -1 {
req.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(rule.MaxAgeSeconds))
}
if o != wildcard {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
}
return
}
}
}
}
}
log.Error(logs.CORSRuleWasNotMatched, logs.TagField(logs.TagDatapath))
ResponseError(req, "Forbidden", fasthttp.StatusForbidden)
}
func (h *Handler) SetCORSHeaders(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.SetCORSHeaders")
defer span.End()
origin := req.Request.Header.Peek(fasthttp.HeaderOrigin)
if len(origin) == 0 {
return
}
method := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestMethod)
if len(method) == 0 {
method = req.Method()
}
ctx = qostagging.ContextWithIOTag(ctx, internalIOTag)
cidParam, _ := req.UserValue("cid").(string)
reqLog := h.reqLogger(ctx)
log := reqLog.With(zap.String("cid", cidParam))
corsRule := h.config.CORS()
if corsRule != nil {
setCORSHeadersFromRule(req, corsRule)
return
}
corsConfig, err := h.getCORSConfig(ctx, log, cidParam)
if err != nil {
log.Error(logs.CouldNotGetCORSConfiguration, zap.Error(err), logs.TagField(logs.TagDatapath))
return
}
var withCredentials bool
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
withCredentials = true
}
for _, rule := range corsConfig.CORSRules {
for _, o := range rule.AllowedOrigins {
if o == string(origin) || (strings.Contains(o, "*") && len(o) > 1 && match(o, string(origin))) {
for _, m := range rule.AllowedMethods {
if m == string(method) {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin))
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
req.Response.Header.Set(fasthttp.HeaderVary, fasthttp.HeaderOrigin)
return
}
}
}
if o == wildcard {
for _, m := range rule.AllowedMethods {
if m == string(method) {
if withCredentials {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin))
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
req.Response.Header.Set(fasthttp.HeaderVary, fasthttp.HeaderOrigin)
} else {
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, o)
}
req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", "))
return
}
}
}
}
}
}
func (h *Handler) getCORSConfig(ctx context.Context, log *zap.Logger, cidStr string) (*data.CORSConfiguration, error) {
cnrID, err := h.resolveContainer(ctx, cidStr)
if err != nil {
return nil, fmt.Errorf("resolve container '%s': %w", cidStr, err)
}
if cors := h.corsCache.Get(*cnrID); cors != nil {
return cors, nil
}
objID, err := h.getLastCORSObject(ctx, *cnrID)
if err != nil {
return nil, fmt.Errorf("get last cors object: %w", err)
}
var addr oid.Address
addr.SetContainer(h.corsCnrID)
addr.SetObject(objID)
corsObj, err := h.frostfs.GetObject(ctx, PrmObjectGet{
Address: addr,
})
if err != nil {
return nil, fmt.Errorf("get cors object '%s': %w", addr.EncodeToString(), err)
}
corsConfig := &data.CORSConfiguration{}
if err = xml.NewDecoder(corsObj.Payload).Decode(corsConfig); err != nil {
return nil, fmt.Errorf("decode cors object: %w", err)
}
if err = h.corsCache.Put(*cnrID, corsConfig); err != nil {
log.Warn(logs.CouldntCacheCors, zap.Error(err), logs.TagField(logs.TagDatapath))
}
return corsConfig, nil
}
func (h *Handler) getLastCORSObject(ctx context.Context, cnrID cid.ID) (oid.ID, error) {
filters := object.NewSearchFilters()
filters.AddRootFilter()
filters.AddFilter(object.AttributeFilePath, fmt.Sprintf(corsFilePathTemplate, cnrID), object.MatchStringEqual)
res, err := h.frostfs.SearchObjects(ctx, PrmObjectSearch{
Container: h.corsCnrID,
Filters: filters,
})
if err != nil {
return oid.ID{}, fmt.Errorf("search cors versions: %w", err)
}
defer res.Close()
var (
addr oid.Address
obj *object.Object
headErr error
objs = make([]*object.Object, 0)
)
addr.SetContainer(h.corsCnrID)
err = res.Iterate(func(id oid.ID) bool {
addr.SetObject(id)
obj, headErr = h.frostfs.HeadObject(ctx, PrmObjectHead{
Address: addr,
})
if headErr != nil {
headErr = fmt.Errorf("head cors object '%s': %w", addr.EncodeToString(), headErr)
return true
}
objs = append(objs, obj)
return false
})
if err != nil {
return oid.ID{}, fmt.Errorf("iterate cors objects: %w", err)
}
if headErr != nil {
return oid.ID{}, headErr
}
if len(objs) == 0 {
return oid.ID{}, errNoCORS
}
sort.Slice(objs, func(i, j int) bool {
versionID1, _ := objs[i].ID()
versionID2, _ := objs[j].ID()
timestamp1 := utils.GetAttributeValue(objs[i].Attributes(), object.AttributeTimestamp)
timestamp2 := utils.GetAttributeValue(objs[j].Attributes(), object.AttributeTimestamp)
if objs[i].CreationEpoch() != objs[j].CreationEpoch() {
return objs[i].CreationEpoch() < objs[j].CreationEpoch()
}
if len(timestamp1) > 0 && len(timestamp2) > 0 && timestamp1 != timestamp2 {
unixTime1, err := strconv.ParseInt(timestamp1, 10, 64)
if err != nil {
return versionID1.EncodeToString() < versionID2.EncodeToString()
}
unixTime2, err := strconv.ParseInt(timestamp2, 10, 64)
if err != nil {
return versionID1.EncodeToString() < versionID2.EncodeToString()
}
return unixTime1 < unixTime2
}
return versionID1.EncodeToString() < versionID2.EncodeToString()
})
objID, _ := objs[len(objs)-1].ID()
return objID, nil
}
func setCORSHeadersFromRule(c *fasthttp.RequestCtx, cors *data.CORSRule) {
c.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(cors.MaxAgeSeconds))
if len(cors.AllowedOrigins) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, cors.AllowedOrigins[0])
}
if len(cors.AllowedMethods) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(cors.AllowedMethods, ", "))
}
if len(cors.AllowedHeaders) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, strings.Join(cors.AllowedHeaders, ", "))
}
if len(cors.ExposeHeaders) != 0 {
c.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(cors.ExposeHeaders, ", "))
}
if cors.AllowedCredentials {
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
}
}
func checkSubslice(slice []string, subSlice []string) bool {
if slices.Contains(slice, wildcard) {
return true
}
for _, r := range subSlice {
if !sliceContains(slice, r) {
return false
}
}
return true
}
func sliceContains(slice []string, str string) bool {
for _, s := range slice {
if s == str || (strings.Contains(s, "*") && match(s, str)) {
return true
}
}
return false
}
func match(tmpl, str string) bool {
regexpStr := "^" + regexp.QuoteMeta(tmpl) + "$"
regexpStr = regexpStr[:strings.Index(regexpStr, "*")-1] + "." + regexpStr[strings.Index(regexpStr, "*"):]
reg := regexp.MustCompile(regexpStr)
return reg.Match([]byte(str))
}

View file

@ -1,930 +0,0 @@
package handler
import (
"encoding/base64"
"encoding/xml"
"fmt"
"net/http"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
)
func TestPreflight(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-preflight"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
var epoch uint64
t.Run("CORS object", func(t *testing.T) {
for _, tc := range []struct {
name string
corsConfig *data.CORSConfiguration
requestHeaders map[string]string
expectedHeaders map[string]string
status int
}{
{
name: "no CORS configuration",
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "",
fasthttp.HeaderAccessControlMaxAge: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
},
status: fasthttp.StatusNotFound,
},
{
name: "specific allowed origin",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"http://example.com"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type"},
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
MaxAgeSeconds: 900,
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "Content-Type",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "Content-Type",
fasthttp.HeaderAccessControlExposeHeaders: "x-amz-*, X-Amz-*",
fasthttp.HeaderAccessControlMaxAge: "900",
fasthttp.HeaderAccessControlAllowCredentials: "true",
},
status: fasthttp.StatusOK,
},
{
name: "wildcard allowed origin",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type"},
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
MaxAgeSeconds: 900,
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "x-amz-*, X-Amz-*",
fasthttp.HeaderAccessControlMaxAge: "900",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
status: fasthttp.StatusOK,
},
{
name: "not allowed header",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type"},
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
fasthttp.HeaderAccessControlRequestHeaders: "Authorization",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "",
fasthttp.HeaderAccessControlMaxAge: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
status: fasthttp.StatusForbidden,
},
{
name: "empty Origin header",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "",
fasthttp.HeaderAccessControlMaxAge: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
status: fasthttp.StatusBadRequest,
},
{
name: "empty Access-Control-Request-Method header",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
fasthttp.HeaderAccessControlExposeHeaders: "",
fasthttp.HeaderAccessControlMaxAge: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
status: fasthttp.StatusBadRequest,
},
} {
t.Run(tc.name, func(t *testing.T) {
if tc.corsConfig != nil {
epoch++
setCORSObject(t, hc, cnrID, tc.corsConfig, epoch)
}
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
hc.Handler().Preflight(r)
require.Equal(t, tc.status, r.Response.StatusCode())
for k, v := range tc.expectedHeaders {
require.Equal(t, v, string(r.Response.Header.Peek(k)))
}
})
}
})
t.Run("CORS config", func(t *testing.T) {
hc.cfg.cors = &data.CORSRule{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type", "Content-Encoding"},
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
MaxAgeSeconds: 900,
AllowedCredentials: true,
}
r := prepareCORSRequest(t, bktName, map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
})
hc.Handler().Preflight(r)
require.Equal(t, fasthttp.StatusOK, r.Response.StatusCode())
require.Equal(t, "900", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlMaxAge)))
require.Equal(t, "*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowOrigin)))
require.Equal(t, "GET, HEAD", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowMethods)))
require.Equal(t, "Content-Type, Content-Encoding", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowHeaders)))
require.Equal(t, "x-amz-*, X-Amz-*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlExposeHeaders)))
require.Equal(t, "true", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowCredentials)))
})
}
func TestSetCORSHeaders(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-set-cors-headers"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
var epoch uint64
t.Run("CORS object", func(t *testing.T) {
for _, tc := range []struct {
name string
corsConfig *data.CORSConfiguration
requestHeaders map[string]string
expectedHeaders map[string]string
}{
{
name: "empty Origin header",
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderVary: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
},
{
name: "no CORS configuration",
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderVary: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
},
},
{
name: "specific allowed origin",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"http://example.com"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderVary: fasthttp.HeaderOrigin,
fasthttp.HeaderAccessControlAllowCredentials: "true",
},
},
{
name: "wildcard allowed origin, with credentials",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
requestHeaders: func() map[string]string {
tkn := new(bearer.Token)
err = tkn.Sign(hc.key.PrivateKey)
require.NoError(t, err)
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, t64)
return map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
fasthttp.HeaderAuthorization: "Bearer " + t64,
}
}(),
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderVary: fasthttp.HeaderOrigin,
fasthttp.HeaderAccessControlAllowCredentials: "true",
},
},
{
name: "wildcard allowed origin, without credentials",
corsConfig: &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
},
},
},
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "*",
fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD",
fasthttp.HeaderVary: "",
fasthttp.HeaderAccessControlAllowCredentials: "",
},
},
} {
t.Run(tc.name, func(t *testing.T) {
epoch++
setCORSObject(t, hc, cnrID, tc.corsConfig, epoch)
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
hc.Handler().SetCORSHeaders(r)
require.Equal(t, fasthttp.StatusOK, r.Response.StatusCode())
for k, v := range tc.expectedHeaders {
require.Equal(t, v, string(r.Response.Header.Peek(k)))
}
})
}
})
t.Run("CORS config", func(t *testing.T) {
hc.cfg.cors = &data.CORSRule{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{"GET", "HEAD"},
AllowedHeaders: []string{"Content-Type", "Content-Encoding"},
ExposeHeaders: []string{"x-amz-*", "X-Amz-*"},
MaxAgeSeconds: 900,
AllowedCredentials: true,
}
r := prepareCORSRequest(t, bktName, map[string]string{fasthttp.HeaderOrigin: "http://example.com"})
hc.Handler().SetCORSHeaders(r)
require.Equal(t, "900", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlMaxAge)))
require.Equal(t, "*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowOrigin)))
require.Equal(t, "GET, HEAD", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowMethods)))
require.Equal(t, "Content-Type, Content-Encoding", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowHeaders)))
require.Equal(t, "x-amz-*, X-Amz-*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlExposeHeaders)))
require.Equal(t, "true", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowCredentials)))
})
}
func TestCheckSubslice(t *testing.T) {
for _, tc := range []struct {
name string
allowed []string
actual []string
expected bool
}{
{
name: "empty allowed slice",
allowed: []string{},
actual: []string{"str1", "str2", "str3"},
expected: false,
},
{
name: "empty actual slice",
allowed: []string{"str1", "str2", "str3"},
actual: []string{},
expected: true,
},
{
name: "allowed wildcard",
allowed: []string{"str", "*"},
actual: []string{"str1", "str2", "str3"},
expected: true,
},
{
name: "similar allowed and actual",
allowed: []string{"str1", "str2", "str3"},
actual: []string{"str1", "str2", "str3"},
expected: true,
},
{
name: "allowed actual",
allowed: []string{"str", "str1", "str2", "str4"},
actual: []string{"str1", "str2"},
expected: true,
},
{
name: "not allowed actual",
allowed: []string{"str", "str1", "str2", "str4"},
actual: []string{"str1", "str5"},
expected: false,
},
{
name: "wildcard in allowed",
allowed: []string{"str*"},
actual: []string{"str", "str5"},
expected: true,
},
} {
t.Run(tc.name, func(t *testing.T) {
require.Equal(t, tc.expected, checkSubslice(tc.allowed, tc.actual))
})
}
}
func TestAllowedOriginWildcards(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-allowed-origin-wildcards"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
cfg := &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"*suffix.example"},
AllowedMethods: []string{"GET"},
},
{
AllowedOrigins: []string{"https://*example"},
AllowedMethods: []string{"GET"},
},
{
AllowedOrigins: []string{"prefix.example*"},
AllowedMethods: []string{"GET"},
},
},
}
setCORSObject(t, hc, cnrID, cfg, 1)
for _, tc := range []struct {
name string
handler func(*fasthttp.RequestCtx)
requestHeaders map[string]string
expectedHeaders map[string]string
expectedStatus int
}{
{
name: "set cors headers, empty request cors headers",
handler: hc.Handler().SetCORSHeaders,
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, invalid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://origin.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, first rule, no symbols in place of wildcard",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "suffix.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "suffix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, first rule, valid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://suffix.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://suffix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, first rule, invalid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://suffix-example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, second rule, no symbols in place of wildcard",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, second rule, valid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, second rule, invalid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, third rule, no symbols in place of wildcard",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, third rule, valid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "set cors headers, third rule, invalid origin",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "www.prefix.example",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, third rule, invalid request method in header",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlRequestMethod: "PUT",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
},
{
name: "set cors headers, third rule, valid request method in header",
handler: hc.Handler().SetCORSHeaders,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, empty request cors headers",
handler: hc.Handler().Preflight,
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusBadRequest,
},
{
name: "preflight, invalid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://origin.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "preflight, first rule, no symbols in place of wildcard",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "suffix.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "suffix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "prelight, first rule, valid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://suffix.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "http://suffix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, first rule, invalid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "http://suffix-example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "preflight, second rule, no symbols in place of wildcard",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, second rule, valid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, second rule, invalid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "preflight, third rule, no symbols in place of wildcard",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, third rule, valid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlAllowMethods: "GET",
},
},
{
name: "preflight, third rule, invalid origin",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "www.prefix.example",
fasthttp.HeaderAccessControlRequestMethod: "GET",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "preflight, third rule, invalid request method in header",
handler: hc.Handler().Preflight,
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "prefix.example.com",
fasthttp.HeaderAccessControlRequestMethod: "PUT",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
},
expectedStatus: http.StatusForbidden,
},
} {
t.Run(tc.name, func(t *testing.T) {
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
tc.handler(r)
expectedStatus := fasthttp.StatusOK
if tc.expectedStatus != 0 {
expectedStatus = tc.expectedStatus
}
require.Equal(t, expectedStatus, r.Response.StatusCode())
for k, v := range tc.expectedHeaders {
require.Equal(t, v, string(r.Response.Header.Peek(k)))
}
})
}
}
func TestAllowedHeaderWildcards(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-allowed-header-wildcards"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
cfg := &data.CORSConfiguration{
CORSRules: []data.CORSRule{
{
AllowedOrigins: []string{"https://www.example.com"},
AllowedMethods: []string{"HEAD"},
AllowedHeaders: []string{"*-suffix"},
},
{
AllowedOrigins: []string{"https://www.example.com"},
AllowedMethods: []string{"HEAD"},
AllowedHeaders: []string{"start-*-end"},
},
{
AllowedOrigins: []string{"https://www.example.com"},
AllowedMethods: []string{"HEAD"},
AllowedHeaders: []string{"X-Amz-*"},
},
},
}
setCORSObject(t, hc, cnrID, cfg, 1)
for _, tc := range []struct {
name string
requestHeaders map[string]string
expectedHeaders map[string]string
expectedStatus int
}{
{
name: "first rule, valid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "header-suffix, -suffix",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlAllowMethods: "HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "header-suffix, -suffix",
},
},
{
name: "first rule, invalid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "header-suffix-*",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "second rule, valid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "start--end, start-header-end",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlAllowMethods: "HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "start--end, start-header-end",
},
},
{
name: "second rule, invalid header ending",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "start-header-end-*",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "second rule, invalid header beginning",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "*-start-header-end",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
},
expectedStatus: http.StatusForbidden,
},
{
name: "third rule, valid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "X-Amz-Date, X-Amz-Content-Sha256",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlAllowMethods: "HEAD",
fasthttp.HeaderAccessControlAllowHeaders: "X-Amz-Date, X-Amz-Content-Sha256",
},
},
{
name: "third rule, invalid headers",
requestHeaders: map[string]string{
fasthttp.HeaderOrigin: "https://www.example.com",
fasthttp.HeaderAccessControlRequestMethod: "HEAD",
fasthttp.HeaderAccessControlRequestHeaders: "Authorization",
},
expectedHeaders: map[string]string{
fasthttp.HeaderAccessControlAllowOrigin: "",
fasthttp.HeaderAccessControlAllowMethods: "",
fasthttp.HeaderAccessControlAllowHeaders: "",
},
expectedStatus: http.StatusForbidden,
},
} {
t.Run(tc.name, func(t *testing.T) {
r := prepareCORSRequest(t, bktName, tc.requestHeaders)
hc.Handler().Preflight(r)
expectedStatus := http.StatusOK
if tc.expectedStatus != 0 {
expectedStatus = tc.expectedStatus
}
require.Equal(t, expectedStatus, r.Response.StatusCode())
for k, v := range tc.expectedHeaders {
require.Equal(t, v, string(r.Response.Header.Peek(k)))
}
})
}
}
func setCORSObject(t *testing.T, hc *handlerContext, cnrID cid.ID, corsConfig *data.CORSConfiguration, epoch uint64) {
payload, err := xml.Marshal(corsConfig)
require.NoError(t, err)
a := object.NewAttribute()
a.SetKey(object.AttributeFilePath)
a.SetValue(fmt.Sprintf(corsFilePathTemplate, cnrID))
objID := oidtest.ID()
obj := object.New()
obj.SetAttributes(*a)
obj.SetOwnerID(hc.owner)
obj.SetPayload(payload)
obj.SetPayloadSize(uint64(len(payload)))
obj.SetContainerID(hc.corsCnr)
obj.SetID(objID)
obj.SetCreationEpoch(epoch)
var addr oid.Address
addr.SetObject(objID)
addr.SetContainer(hc.corsCnr)
hc.frostfs.SetObject(addr, obj)
}

View file

@ -10,12 +10,11 @@ import (
"fmt"
"io"
"net/url"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -26,199 +25,57 @@ import (
)
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
func (h *Handler) DownloadByAddressOrBucketName(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadByAddressOrBucketName")
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadByAddressOrBucketName")
defer span.End()
utils.SetContextToRequest(ctx, c)
cidParam := req.UserValue("cid").(string)
oidParam := req.UserValue("oid").(string)
cidParam := c.UserValue("cid").(string)
oidParam := c.UserValue("oid").(string)
downloadParam := c.QueryArgs().GetBool("download")
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
log := utils.GetReqLogOrDefault(ctx, h.log).With(
zap.String("cid", cidParam),
zap.String("oid", oidParam),
))
)
path, err := url.QueryUnescape(oidParam)
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToUnescapePath, err)
return
}
bktInfo, err := h.getBucketInfo(ctx, cidParam)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
logAndSendBucketError(c, log, err)
return
}
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
if checkS3Err != nil && !errors.Is(checkS3Err, tree.ErrNodeNotFound) {
h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err)
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()),
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
logAndSendBucketError(c, log, checkS3Err)
return
}
prm := MiddlewareParam{
Context: ctx,
Request: req,
BktInfo: bktInfo,
Path: path,
}
req := newRequest(c, log)
indexPageEnabled := h.config.IndexPageEnabled()
if checkS3Err == nil {
run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound),
Middleware{Func: h.byS3PathMiddleware(h.receiveFile, noopFormer), Enabled: true},
Middleware{Func: h.byS3PathMiddleware(h.receiveFile, indexFormer), Enabled: indexPageEnabled},
Middleware{Func: h.browseIndexMiddleware(h.getDirObjectsS3), Enabled: indexPageEnabled},
)
var objID oid.ID
if checkS3Err == nil && shouldDownload(oidParam, downloadParam) {
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.receiveFile)
} else if err = objID.DecodeString(oidParam); err == nil {
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.receiveFile)
} else {
slashFallbackEnabled := h.config.EnableFilepathSlashFallback()
fileNameFallbackEnabled := h.config.EnableFilepathFallback()
run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound),
Middleware{Func: h.byAddressMiddleware(h.receiveFile), Enabled: true},
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, noopFormer), Enabled: true},
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, reverseLeadingSlash), Enabled: slashFallbackEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, noopFormer), Enabled: fileNameFallbackEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, reverseLeadingSlash), Enabled: fileNameFallbackEnabled && slashFallbackEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, indexFormer), Enabled: indexPageEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, indexFormer), Enabled: fileNameFallbackEnabled && indexPageEnabled},
Middleware{Func: h.browseIndexMiddleware(h.getDirObjectsNative), Enabled: indexPageEnabled},
)
h.browseIndex(c, checkS3Err != nil)
}
}
type ObjectHandlerFunc func(context.Context, *fasthttp.RequestCtx, oid.Address)
type MiddlewareFunc func(param MiddlewareParam) bool
type MiddlewareParam struct {
Context context.Context
Request *fasthttp.RequestCtx
BktInfo *data.BucketInfo
Path string
}
type Middleware struct {
Func MiddlewareFunc
Enabled bool
}
func run(prm MiddlewareParam, defaultMiddleware MiddlewareFunc, middlewares ...Middleware) {
for _, m := range middlewares {
if m.Enabled && !m.Func(prm) {
return
}
}
defaultMiddleware(prm)
}
func indexFormer(path string) string {
indexPath := path
if indexPath != "" && !strings.HasSuffix(indexPath, "/") {
indexPath += "/"
}
return indexPath + "index.html"
}
func reverseLeadingSlash(path string) string {
if path == "" || path == "/" {
return path
}
if path[0] == '/' {
return path[1:]
}
return "/" + path
}
func noopFormer(path string) string {
return path
}
func (h *Handler) byS3PathMiddleware(handler func(context.Context, *fasthttp.RequestCtx, oid.Address), pathFormer func(string) string) MiddlewareFunc {
return func(prm MiddlewareParam) bool {
ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byS3Path")
defer span.End()
path := pathFormer(prm.Path)
foundOID, err := h.tree.GetLatestVersion(ctx, &prm.BktInfo.CID, path)
if err == nil {
if foundOID.IsDeleteMarker {
h.logAndSendError(ctx, prm.Request, logs.IndexWasDeleted, ErrObjectNotFound)
return false
}
addr := newAddress(prm.BktInfo.CID, foundOID.OID)
handler(ctx, prm.Request, addr)
return false
}
if !errors.Is(err, tree.ErrNodeNotFound) {
h.logAndSendError(ctx, prm.Request, logs.FailedToGetLatestVersionOfIndexObject, err, zap.String("path", path))
return false
}
return true
}
}
func (h *Handler) byAttributeSearchMiddleware(handler ObjectHandlerFunc, attr string, pathFormer func(string) string) MiddlewareFunc {
return func(prm MiddlewareParam) bool {
ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byAttributeSearch")
defer span.End()
path := pathFormer(prm.Path)
res, err := h.search(ctx, prm.BktInfo.CID, attr, path, object.MatchStringEqual)
if err != nil {
h.logAndSendError(ctx, prm.Request, logs.FailedToFindObjectByAttribute, err)
return false
}
defer res.Close()
buf := make([]oid.ID, 1)
n, err := res.Read(buf)
if err == nil && n > 0 {
addr := newAddress(prm.BktInfo.CID, buf[0])
handler(ctx, prm.Request, addr)
return false
}
if !errors.Is(err, io.EOF) {
h.logAndSendError(ctx, prm.Request, logs.FailedToFindObjectByAttribute, err)
return false
}
return true
}
}
func (h *Handler) byAddressMiddleware(handler ObjectHandlerFunc) MiddlewareFunc {
return func(prm MiddlewareParam) bool {
ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byAddress")
defer span.End()
var objID oid.ID
if objID.DecodeString(prm.Path) == nil {
handler(ctx, prm.Request, newAddress(prm.BktInfo.CID, objID))
return false
}
return true
}
func shouldDownload(oidParam string, downloadParam bool) bool {
return !isDir(oidParam) || downloadParam
}
// DownloadByAttribute handles attribute-based download requests.
func (h *Handler) DownloadByAttribute(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadByAttribute")
func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadByAttribute")
defer span.End()
utils.SetContextToRequest(ctx, c)
h.byAttribute(ctx, req, h.receiveFile)
h.byAttribute(c, h.receiveFile)
}
func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op object.SearchMatchType) (ResObjectSearch, error) {
@ -238,33 +95,31 @@ func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op
}
// DownloadZip handles zip by prefix requests.
func (h *Handler) DownloadZip(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadZip")
func (h *Handler) DownloadZip(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadZip")
defer span.End()
utils.SetContextToRequest(ctx, c)
scid, _ := req.UserValue("cid").(string)
prefix, _ := req.UserValue("prefix").(string)
scid, _ := c.UserValue("cid").(string)
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", scid), zap.String("prefix", prefix)))
bktInfo, err := h.getBucketInfo(ctx, scid)
log := utils.GetReqLogOrDefault(ctx, h.log)
bktInfo, err := h.getBucketInfo(ctx, scid, log)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
logAndSendBucketError(c, log, err)
return
}
resSearch, err := h.searchObjectsByPrefix(ctx, bktInfo.CID, prefix)
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
if err != nil {
return
}
req.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
req.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
req.SetBodyStreamWriter(h.getZipResponseWriter(ctx, resSearch, bktInfo))
c.SetBodyStreamWriter(h.getZipResponseWriter(ctx, log, resSearch, bktInfo))
}
func (h *Handler) getZipResponseWriter(ctx context.Context, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
func (h *Handler) getZipResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
return func(w *bufio.Writer) {
defer resSearch.Close()
@ -272,20 +127,20 @@ func (h *Handler) getZipResponseWriter(ctx context.Context, resSearch ResObjectS
zipWriter := zip.NewWriter(w)
var objectsWritten int
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, bktInfo.CID, buf,
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
func(obj *object.Object) (io.Writer, error) {
objectsWritten++
return h.createZipFile(zipWriter, obj)
}),
)
if errIter != nil {
h.reqLogger(ctx).Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
return
} else if objectsWritten == 0 {
h.reqLogger(ctx).Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
}
if err := zipWriter.Close(); err != nil {
h.reqLogger(ctx).Error(logs.CloseZipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
log.Error(logs.CloseZipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
}
}
}
@ -309,33 +164,31 @@ func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer,
}
// DownloadTar forms tar.gz from objects by prefix.
func (h *Handler) DownloadTar(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadTar")
func (h *Handler) DownloadTar(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadTar")
defer span.End()
utils.SetContextToRequest(ctx, c)
scid, _ := req.UserValue("cid").(string)
prefix, _ := req.UserValue("prefix").(string)
scid, _ := c.UserValue("cid").(string)
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", scid), zap.String("prefix", prefix)))
bktInfo, err := h.getBucketInfo(ctx, scid)
log := utils.GetReqLogOrDefault(ctx, h.log)
bktInfo, err := h.getBucketInfo(ctx, scid, log)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
logAndSendBucketError(c, log, err)
return
}
resSearch, err := h.searchObjectsByPrefix(ctx, bktInfo.CID, prefix)
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
if err != nil {
return
}
req.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip")
req.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"")
c.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip")
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"")
req.SetBodyStreamWriter(h.getTarResponseWriter(ctx, resSearch, bktInfo))
c.SetBodyStreamWriter(h.getTarResponseWriter(ctx, log, resSearch, bktInfo))
}
func (h *Handler) getTarResponseWriter(ctx context.Context, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
return func(w *bufio.Writer) {
defer resSearch.Close()
@ -350,26 +203,26 @@ func (h *Handler) getTarResponseWriter(ctx context.Context, resSearch ResObjectS
defer func() {
if err := tarWriter.Close(); err != nil {
h.reqLogger(ctx).Error(logs.CloseTarWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
log.Error(logs.CloseTarWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
}
if err := gzipWriter.Close(); err != nil {
h.reqLogger(ctx).Error(logs.CloseGzipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
log.Error(logs.CloseGzipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
}
}()
var objectsWritten int
buf := make([]byte, 3<<20) // the same as for upload
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, bktInfo.CID, buf,
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
func(obj *object.Object) (io.Writer, error) {
objectsWritten++
return h.createTarFile(tarWriter, obj)
}),
)
if errIter != nil {
h.reqLogger(ctx).Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
} else if objectsWritten == 0 {
h.reqLogger(ctx).Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
}
}
}
@ -387,9 +240,9 @@ func (h *Handler) createTarFile(tw *tar.Writer, obj *object.Object) (io.Writer,
})
}
func (h *Handler) putObjectToArchive(ctx context.Context, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool {
func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool {
return func(id oid.ID) bool {
logger := h.reqLogger(ctx).With(zap.String("oid", id.EncodeToString()))
log = log.With(zap.String("oid", id.EncodeToString()))
prm := PrmObjectGet{
PrmAuth: PrmAuth{
@ -400,18 +253,18 @@ func (h *Handler) putObjectToArchive(ctx context.Context, cnrID cid.ID, buf []by
resGet, err := h.frostfs.GetObject(ctx, prm)
if err != nil {
logger.Error(logs.FailedToGetObject, zap.Error(err), logs.TagField(logs.TagExternalStorage))
log.Error(logs.FailedToGetObject, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return false
}
fileWriter, err := createArchiveHeader(&resGet.Header)
if err != nil {
logger.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
return false
}
if err = writeToArchive(resGet, fileWriter, buf); err != nil {
logger.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
return false
}
@ -419,17 +272,28 @@ func (h *Handler) putObjectToArchive(ctx context.Context, cnrID cid.ID, buf []by
}
}
func (h *Handler) searchObjectsByPrefix(ctx context.Context, cnrID cid.ID, prefix string) (ResObjectSearch, error) {
func (h *Handler) searchObjectsByPrefix(c *fasthttp.RequestCtx, log *zap.Logger, cnrID cid.ID) (ResObjectSearch, error) {
scid, _ := c.UserValue("cid").(string)
prefix, _ := c.UserValue("prefix").(string)
ctx := utils.GetContextFromRequest(c)
prefix, err := url.QueryUnescape(prefix)
if err != nil {
return nil, fmt.Errorf("unescape prefix: %w", err)
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix),
zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
return nil, err
}
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
resSearch, err := h.search(ctx, cnrID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
if err != nil {
return nil, fmt.Errorf("search objects by prefix: %w", err)
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage))
ResponseError(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
return nil, err
}
return resSearch, nil
}

View file

@ -52,10 +52,6 @@ func (t *TestFrostFS) SetContainer(cnrID cid.ID, cnr *container.Container) {
t.containers[cnrID.EncodeToString()] = cnr
}
func (t *TestFrostFS) SetObject(addr oid.Address, obj *object.Object) {
t.objects[addr.EncodeToString()] = obj
}
// AllowUserOperation grants access to object operations.
// Empty userID and objID means any user and object respectively.
func (t *TestFrostFS) AllowUserOperation(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) {
@ -233,16 +229,6 @@ func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) (Res
return &resObjectSearchMock{res: res}, nil
}
func (t *TestFrostFS) GetContainerByID(cid cid.ID) (*container.Container, error) {
for k, v := range t.containers {
if k == cid.EncodeToString() {
return v, nil
}
}
return nil, fmt.Errorf("container does not exist %s", cid)
}
func (t *TestFrostFS) InitMultiObjectReader(context.Context, PrmInitMultiObjectReader) (io.Reader, error) {
return nil, nil
}

View file

@ -11,11 +11,12 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -35,9 +36,6 @@ type Config interface {
BufferMaxSizeForPut() uint64
NamespaceHeader() string
EnableFilepathFallback() bool
EnableFilepathSlashFallback() bool
FormContainerZone(string) string
CORS() *data.CORSRule
}
// PrmContainer groups parameters of FrostFS.Container operation.
@ -144,10 +142,6 @@ var (
ErrGatewayTimeout = errors.New("gateway timeout")
// ErrQuotaLimitReached is returned from FrostFS in case of quota exceeded.
ErrQuotaLimitReached = errors.New("quota limit reached")
// ErrContainerNotFound is returned from FrostFS in case of container was not found.
ErrContainerNotFound = errors.New("container not found")
// ErrObjectNotFound is returned from FrostFS in case of object was not found.
ErrObjectNotFound = errors.New("object not found")
)
// FrostFS represents virtual connection to FrostFS network.
@ -164,12 +158,7 @@ type FrostFS interface {
}
type ContainerResolver interface {
Resolve(ctx context.Context, zone, name string) (*cid.ID, error)
}
type ContainerContract interface {
// GetContainerByID reads a container from contract by ID.
GetContainerByID(cid.ID) (*container.Container, error)
Resolve(ctx context.Context, name string) (*cid.ID, error)
}
type Handler struct {
@ -178,25 +167,20 @@ type Handler struct {
ownerID *user.ID
config Config
containerResolver ContainerResolver
cnrContract ContainerContract
tree *tree.Tree
tree layer.TreeService
cache *cache.BucketCache
workerPool *ants.Pool
corsCnrID cid.ID
corsCache *cache.CORSCache
}
type AppParams struct {
Logger *zap.Logger
FrostFS FrostFS
Owner *user.ID
Resolver ContainerResolver
Cache *cache.BucketCache
CORSCnrID cid.ID
CORSCache *cache.CORSCache
Logger *zap.Logger
FrostFS FrostFS
Owner *user.ID
Resolver ContainerResolver
Cache *cache.BucketCache
}
func New(params *AppParams, config Config, tree *tree.Tree, rpcCli ContainerContract, workerPool *ants.Pool) *Handler {
func New(params *AppParams, config Config, tree layer.TreeService, workerPool *ants.Pool) *Handler {
return &Handler{
log: params.Logger,
frostfs: params.FrostFS,
@ -206,45 +190,89 @@ func New(params *AppParams, config Config, tree *tree.Tree, rpcCli ContainerCont
tree: tree,
cache: params.Cache,
workerPool: workerPool,
corsCnrID: params.CORSCnrID,
corsCache: params.CORSCache,
cnrContract: rpcCli,
}
}
// byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// prepares request and object address to it.
func (h *Handler) byNativeAddress(ctx context.Context, req request, cnrID cid.ID, objID oid.ID, handler func(context.Context, request, oid.Address)) {
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byNativeAddress")
defer span.End()
addr := newAddress(cnrID, objID)
handler(ctx, req, addr)
}
// byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// resolves object address from S3-like path <bucket name>/<object key>.
func (h *Handler) byS3Path(ctx context.Context, req request, cnrID cid.ID, path string, handler func(context.Context, request, oid.Address)) {
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byS3Path")
defer span.End()
c, log := req.RequestCtx, req.log
foundOID, err := h.tree.GetLatestVersion(ctx, &cnrID, path)
if err != nil {
log.Error(logs.FailedToGetLatestVersionOfObject, zap.Error(err), zap.String("cid", cnrID.String()),
zap.String("path", path), logs.TagField(logs.TagExternalStorageTree))
logAndSendBucketError(c, log, err)
return
}
if foundOID.IsDeleteMarker {
log.Error(logs.ObjectWasDeleted, logs.TagField(logs.TagExternalStorageTree))
ResponseError(c, "object deleted", fasthttp.StatusNotFound)
return
}
addr := newAddress(cnrID, foundOID.OID)
handler(ctx, newRequest(c, log), addr)
}
// byAttribute is a wrapper similar to byNativeAddress.
func (h *Handler) byAttribute(ctx context.Context, req *fasthttp.RequestCtx, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) {
cidParam, _ := req.UserValue("cid").(string)
key, _ := req.UserValue("attr_key").(string)
val, _ := req.UserValue("attr_val").(string)
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Context, request, oid.Address)) {
cidParam, _ := c.UserValue("cid").(string)
key, _ := c.UserValue("attr_key").(string)
val, _ := c.UserValue("attr_val").(string)
ctx := utils.GetContextFromRequest(c)
log := utils.GetReqLogOrDefault(ctx, h.log)
key, err := url.QueryUnescape(key)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToUnescapeQuery, err, zap.String("cid", cidParam), zap.String("attr_key", key))
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_key", key),
zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
return
}
val, err = url.QueryUnescape(val)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToUnescapeQuery, err, zap.String("cid", cidParam), zap.String("attr_val", key))
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_val", val),
zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
return
}
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", cidParam),
zap.String("attr_key", key), zap.String("attr_val", val)))
if key == attrFileName {
val = prepareFileName(val)
}
bktInfo, err := h.getBucketInfo(ctx, cidParam)
log = log.With(zap.String("cid", cidParam), zap.String("attr_key", key), zap.String("attr_val", val))
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
logAndSendBucketError(c, log, err)
return
}
objID, err := h.findObjectByAttribute(ctx, bktInfo.CID, key, val)
objID, err := h.findObjectByAttribute(ctx, log, bktInfo.CID, key, val)
if err != nil {
if errors.Is(err, io.EOF) {
err = fmt.Errorf("%w: %s", ErrObjectNotFound, err.Error())
ResponseError(c, err.Error(), fasthttp.StatusNotFound)
return
}
h.logAndSendError(ctx, req, logs.FailedToFindObjectByAttribute, err)
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
return
}
@ -252,13 +280,14 @@ func (h *Handler) byAttribute(ctx context.Context, req *fasthttp.RequestCtx, han
addr.SetContainer(bktInfo.CID)
addr.SetObject(objID)
handler(ctx, req, addr)
handler(ctx, newRequest(c, log), addr)
}
func (h *Handler) findObjectByAttribute(ctx context.Context, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
res, err := h.search(ctx, cnrID, attrKey, attrVal, object.MatchStringEqual)
if err != nil {
return oid.ID{}, fmt.Errorf("search objects: %w", err)
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return oid.ID{}, fmt.Errorf("could not search for objects: %w", err)
}
defer res.Close()
@ -267,11 +296,14 @@ func (h *Handler) findObjectByAttribute(ctx context.Context, cnrID cid.ID, attrK
n, err := res.Read(buf)
if n == 0 {
switch {
case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal):
log.Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName, logs.TagField(logs.TagExternalStorage))
return h.findObjectByAttribute(ctx, log, cnrID, attrFileName, prepareFileName(attrVal))
case errors.Is(err, io.EOF):
h.reqLogger(ctx).Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage))
log.Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return oid.ID{}, fmt.Errorf("object not found: %w", err)
default:
h.reqLogger(ctx).Error(logs.ReadObjectListFailed, zap.Error(err), logs.TagField(logs.TagExternalStorage))
log.Error(logs.ReadObjectListFailed, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return oid.ID{}, fmt.Errorf("read object list failed: %w", err)
}
}
@ -279,28 +311,37 @@ func (h *Handler) findObjectByAttribute(ctx context.Context, cnrID cid.ID, attrK
return buf[0], nil
}
func (h *Handler) needSearchByFileName(key, val string) bool {
if key != attrFilePath || !h.config.EnableFilepathFallback() {
return false
}
return strings.HasPrefix(val, "/") && strings.Count(val, "/") == 1 || !strings.Contains(val, "/")
}
func prepareFileName(fileName string) string {
if strings.HasPrefix(fileName, "/") {
return fileName[1:]
}
return fileName
}
// resolveContainer decode container id, if it's not a valid container id
// then trey to resolve name using provided resolver.
func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) {
cnrID := new(cid.ID)
err := cnrID.DecodeString(containerID)
if err != nil {
var namespace string
namespace, err = middleware.GetNamespace(ctx)
if err != nil {
return nil, err
}
zone := h.config.FormContainerZone(namespace)
cnrID, err = h.containerResolver.Resolve(ctx, zone, containerID)
cnrID, err = h.containerResolver.Resolve(ctx, containerID)
if err != nil && strings.Contains(err.Error(), "not found") {
err = fmt.Errorf("%w: %s", ErrContainerNotFound, err.Error())
err = fmt.Errorf("%w: %s", new(apistatus.ContainerNotFound), err.Error())
}
}
return cnrID, err
}
func (h *Handler) getBucketInfo(ctx context.Context, containerName string) (*data.BucketInfo, error) {
func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *zap.Logger) (*data.BucketInfo, error) {
ns, err := middleware.GetNamespace(ctx)
if err != nil {
return nil, err
@ -312,37 +353,91 @@ func (h *Handler) getBucketInfo(ctx context.Context, containerName string) (*dat
cnrID, err := h.resolveContainer(ctx, containerName)
if err != nil {
return nil, fmt.Errorf("resolve container: %w", err)
log.Error(logs.CouldNotResolveContainerID, zap.Error(err), zap.String("cnrName", containerName),
logs.TagField(logs.TagDatapath))
return nil, err
}
return h.containerInfo(ctx, *cnrID)
}
type ListFunc func(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error)
func (h *Handler) browseIndexMiddleware(fn ListFunc) MiddlewareFunc {
return func(prm MiddlewareParam) bool {
ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.browseIndex")
defer span.End()
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
zap.String("bucket", prm.BktInfo.Name),
zap.String("container", prm.BktInfo.CID.EncodeToString()),
zap.String("prefix", prm.Path),
))
objects, err := fn(ctx, prm.BktInfo, prm.Path)
if err != nil {
h.logAndSendError(ctx, prm.Request, logs.FailedToListObjects, err)
return false
}
h.browseObjects(ctx, prm.Request, browseParams{
bucketInfo: prm.BktInfo,
prefix: prm.Path,
objects: objects,
})
return false
bktInfo, err := h.readContainer(ctx, *cnrID)
if err != nil {
log.Error(logs.CouldNotGetContainerInfo, zap.Error(err), zap.String("cnrName", containerName),
zap.String("cnrName", cnrID.String()),
logs.TagField(logs.TagExternalStorage))
return nil, err
}
if err = h.cache.Put(bktInfo); err != nil {
log.Warn(logs.CouldntPutBucketIntoCache,
zap.String("bucket name", bktInfo.Name),
zap.Stringer("bucket cid", bktInfo.CID),
zap.Error(err),
logs.TagField(logs.TagDatapath))
}
return bktInfo, nil
}
func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.BucketInfo, error) {
prm := PrmContainer{ContainerID: cnrID}
res, err := h.frostfs.Container(ctx, prm)
if err != nil {
return nil, fmt.Errorf("get frostfs container '%s': %w", cnrID.String(), err)
}
bktInfo := &data.BucketInfo{
CID: cnrID,
Name: cnrID.EncodeToString(),
}
if domain := container.ReadDomain(*res); domain.Name() != "" {
bktInfo.Name = domain.Name()
bktInfo.Zone = domain.Zone()
}
bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(*res)
bktInfo.PlacementPolicy = res.PlacementPolicy()
return bktInfo, err
}
func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.browseIndex")
defer span.End()
utils.SetContextToRequest(ctx, c)
if !h.config.IndexPageEnabled() {
c.SetStatusCode(fasthttp.StatusNotFound)
return
}
cidURLParam := c.UserValue("cid").(string)
oidURLParam := c.UserValue("oid").(string)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("cid", cidURLParam), zap.String("oid", oidURLParam))
unescapedKey, err := url.QueryUnescape(oidURLParam)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
bktInfo, err := h.getBucketInfo(ctx, cidURLParam, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
listFunc := h.getDirObjectsS3
if isNativeList {
// tree probe failed, trying to use native
listFunc = h.getDirObjectsNative
}
h.browseObjects(c, browseParams{
bucketInfo: bktInfo,
prefix: unescapedKey,
listObjects: listFunc,
isNative: isNativeList,
})
}

View file

@ -21,7 +21,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
go_fuzz_utils "github.com/trailofbits/go-fuzz-utils"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
const (
@ -126,7 +125,7 @@ func maybeFillRandom(tp *go_fuzz_utils.TypeProvider, initValue string) (string,
}
func upload(tp *go_fuzz_utils.TypeProvider) (context.Context, *handlerContext, cid.ID, *fasthttp.RequestCtx, string, string, string, error) {
hc, err := prepareHandlerContextBase(zap.NewExample())
hc, err := prepareHandlerContext()
if err != nil {
return nil, nil, cid.ID{}, nil, "", "", "", err
}

View file

@ -14,12 +14,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -27,21 +24,42 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
type treeServiceMock struct {
system map[string]map[string]*data.BaseNodeVersion
}
func newTreeService() *treeServiceMock {
return &treeServiceMock{
system: make(map[string]map[string]*data.BaseNodeVersion),
}
}
func (t *treeServiceMock) CheckSettingsNodeExists(context.Context, *data.BucketInfo) error {
_, ok := t.system["bucket-settings"]
if !ok {
return layer.ErrNodeNotFound
}
return nil
}
func (t *treeServiceMock) GetSubTreeByPrefix(context.Context, *data.BucketInfo, string, bool) ([]data.NodeInfo, string, error) {
return nil, "", nil
}
func (t *treeServiceMock) GetLatestVersion(context.Context, *cid.ID, string) (*data.NodeVersion, error) {
return nil, nil
}
type configMock struct {
additionalFilenameSearch bool
additionalSlashSearch bool
indexEnabled bool
cors *data.CORSRule
additionalSearch bool
}
func (c *configMock) DefaultTimestamp() bool {
@ -53,11 +71,11 @@ func (c *configMock) ArchiveCompression() bool {
}
func (c *configMock) IndexPageEnabled() bool {
return c.indexEnabled
return false
}
func (c *configMock) IndexPageTemplate() string {
return templates.DefaultIndexTemplate
return ""
}
func (c *configMock) IndexPageNativeTemplate() string {
@ -77,29 +95,16 @@ func (c *configMock) NamespaceHeader() string {
}
func (c *configMock) EnableFilepathFallback() bool {
return c.additionalFilenameSearch
}
func (c *configMock) EnableFilepathSlashFallback() bool {
return c.additionalSlashSearch
}
func (c *configMock) FormContainerZone(string) string {
return v2container.SysAttributeZoneDefault
}
func (c *configMock) CORS() *data.CORSRule {
return c.cors
return c.additionalSearch
}
type handlerContext struct {
key *keys.PrivateKey
owner user.ID
corsCnr cid.ID
key *keys.PrivateKey
owner user.ID
h *Handler
frostfs *TestFrostFS
tree *treeServiceClientMock
tree *treeServiceMock
cfg *configMock
}
@ -107,13 +112,12 @@ func (hc *handlerContext) Handler() *Handler {
return hc.h
}
func prepareHandlerContext(t *testing.T) *handlerContext {
hc, err := prepareHandlerContextBase(zaptest.NewLogger(t))
require.NoError(t, err)
return hc
}
func prepareHandlerContext() (*handlerContext, error) {
logger, err := zap.NewDevelopment()
if err != nil {
return nil, err
}
func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) {
key, err := keys.NewPrivateKey()
if err != nil {
return nil, err
@ -125,12 +129,10 @@ func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) {
testFrostFS := NewTestFrostFS(key)
testResolver := &resolver.Resolver{Name: "test_resolver"}
testResolver.SetResolveFunc(func(_ context.Context, _, name string) (*cid.ID, error) {
testResolver.SetResolveFunc(func(_ context.Context, name string) (*cid.ID, error) {
return testFrostFS.ContainerID(name)
})
cnrID := createCORSContainer(owner, testFrostFS)
params := &AppParams{
Logger: logger,
FrostFS: testFrostFS,
@ -141,27 +143,20 @@ func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) {
Lifetime: 1,
Logger: logger,
}, false),
CORSCnrID: cnrID,
CORSCache: cache.NewCORSCache(&cache.Config{
Size: 1,
Lifetime: 1,
Logger: logger,
}),
}
treeMock := newTreeServiceClientMock()
treeMock := newTreeService()
cfgMock := &configMock{}
workerPool, err := ants.NewPool(1)
if err != nil {
return nil, err
}
handler := New(params, cfgMock, tree.NewTree(treeMock, logger), testFrostFS, workerPool)
handler := New(params, cfgMock, treeMock, workerPool)
return &handlerContext{
key: key,
owner: owner,
corsCnr: cnrID,
h: handler,
frostfs: testFrostFS,
tree: treeMock,
@ -169,20 +164,6 @@ func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) {
}, nil
}
func createCORSContainer(owner user.ID, frostfs *TestFrostFS) cid.ID {
var cnr container.Container
cnr.Init()
cnr.SetOwner(owner)
cnrID := cidtest.ID()
frostfs.SetContainer(cnrID, &cnr)
frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectSearch, oid.ID{})
frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectHead, oid.ID{})
frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectGet, oid.ID{})
return cnrID
}
func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid.ID, *container.Container, error) {
var pp netmap.PlacementPolicy
err := pp.DecodeString("REP 1")
@ -215,7 +196,8 @@ func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid
}
func TestBasic(t *testing.T) {
hc := prepareHandlerContext(t)
hc, err := prepareHandlerContext()
require.NoError(t, err)
bktName := "bucket"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
@ -236,25 +218,14 @@ func TestBasic(t *testing.T) {
err = json.Unmarshal(r.Response.Body(), &putRes)
require.NoError(t, err)
hc.cfg.additionalFilenameSearch = true
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
fileName := prepareObjectAttributes(object.AttributeFileName, objFileName)
filePath := prepareObjectAttributes(object.AttributeFilePath, objFilePath)
obj.SetAttributes(append(obj.Attributes(), fileName)...)
obj.SetAttributes(append(obj.Attributes(), filePath)...)
attr := prepareObjectAttributes(object.AttributeFilePath, objFileName)
obj.SetAttributes(append(obj.Attributes(), attr)...)
t.Run("get", func(t *testing.T) {
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, content, string(r.Response.Body()))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFilePath)
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, content, string(r.Response.Body()))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFileName)
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, content, string(r.Response.Body()))
})
t.Run("head", func(t *testing.T) {
@ -262,16 +233,6 @@ func TestBasic(t *testing.T) {
hc.Handler().HeadByAddressOrBucketName(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFilePath)
hc.Handler().HeadByAddressOrBucketName(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFileName)
hc.Handler().HeadByAddressOrBucketName(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
})
t.Run("get by attribute", func(t *testing.T) {
@ -279,13 +240,9 @@ func TestBasic(t *testing.T) {
hc.Handler().DownloadByAttribute(r)
require.Equal(t, content, string(r.Response.Body()))
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath)
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, "/"+objFileName)
hc.Handler().DownloadByAttribute(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName)
hc.Handler().DownloadByAttribute(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
require.Equal(t, content, string(r.Response.Body()))
})
t.Run("head by attribute", func(t *testing.T) {
@ -294,13 +251,10 @@ func TestBasic(t *testing.T) {
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath)
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, "/"+objFileName)
hc.Handler().HeadByAttribute(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName)
hc.Handler().HeadByAttribute(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
})
t.Run("zip", func(t *testing.T) {
@ -311,7 +265,7 @@ func TestBasic(t *testing.T) {
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
require.NoError(t, err)
require.Len(t, zipReader.File, 1)
require.Equal(t, objFilePath, zipReader.File[0].Name)
require.Equal(t, objFileName, zipReader.File[0].Name)
f, err := zipReader.File[0].Open()
require.NoError(t, err)
defer func() {
@ -324,281 +278,176 @@ func TestBasic(t *testing.T) {
})
}
func prepareHandlerAndBucket(t *testing.T) (*handlerContext, cid.ID) {
hc := prepareHandlerContext(t)
func TestFindObjectByAttribute(t *testing.T) {
hc, err := prepareHandlerContext()
require.NoError(t, err)
hc.cfg.additionalSearch = true
bktName := "bucket"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
return hc, cnrID
ctx := context.Background()
ctx = middleware.SetNamespace(ctx, "")
content := "hello"
r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content)
require.NoError(t, err)
hc.Handler().Upload(r)
require.Equal(t, r.Response.StatusCode(), http.StatusOK)
var putRes putResponse
err = json.Unmarshal(r.Response.Body(), &putRes)
require.NoError(t, err)
testAttrVal1 := "/folder/cat.jpg"
testAttrVal2 := "cat.jpg"
testAttrVal3 := "test-attr-val3"
for _, tc := range []struct {
name string
firstAttr object.Attribute
secondAttr object.Attribute
reqAttrKey string
reqAttrValue string
err string
additionalSearch bool
}{
{
name: "success search by FileName",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFileName,
reqAttrValue: testAttrVal2,
additionalSearch: false,
},
{
name: "failed search by FileName",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFileName,
reqAttrValue: testAttrVal3,
err: "not found",
additionalSearch: false,
},
{
name: "success search by FilePath (with additional search)",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFilePath,
reqAttrValue: testAttrVal2,
additionalSearch: true,
},
{
name: "failed by FilePath (with additional search)",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFilePath,
reqAttrValue: testAttrVal3,
err: "not found",
additionalSearch: true,
},
{
name: "success search by FilePath with leading slash (with additional search)",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFilePath,
reqAttrValue: "/cat.jpg",
additionalSearch: true,
},
} {
t.Run(tc.name, func(t *testing.T) {
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
obj.SetAttributes(tc.firstAttr, tc.secondAttr)
hc.cfg.additionalSearch = tc.additionalSearch
objID, err := hc.Handler().findObjectByAttribute(ctx, hc.Handler().log, cnrID, tc.reqAttrKey, tc.reqAttrValue)
if tc.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.err)
return
}
require.NoError(t, err)
require.Equal(t, putRes.ObjectID, objID.EncodeToString())
})
}
}
func TestGetObjectWithFallback(t *testing.T) {
ctx := middleware.SetNamespace(context.Background(), "")
func TestNeedSearchByFileName(t *testing.T) {
hc, err := prepareHandlerContext()
require.NoError(t, err)
t.Run("by oid", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
for _, tc := range []struct {
name string
attrKey string
attrVal string
additionalSearch bool
expected bool
}{
{
name: "need search - not contains slash",
attrKey: attrFilePath,
attrVal: "cat.png",
additionalSearch: true,
expected: true,
},
{
name: "need search - single lead slash",
attrKey: attrFilePath,
attrVal: "/cat.png",
additionalSearch: true,
expected: true,
},
{
name: "don't need search - single slash but not lead",
attrKey: attrFilePath,
attrVal: "cats/cat.png",
additionalSearch: true,
expected: false,
},
{
name: "don't need search - more one slash",
attrKey: attrFilePath,
attrVal: "/cats/cat.png",
additionalSearch: true,
expected: false,
},
{
name: "don't need search - incorrect attribute key",
attrKey: attrFileName,
attrVal: "cat.png",
additionalSearch: true,
expected: false,
},
{
name: "don't need search - additional search disabled",
attrKey: attrFilePath,
attrVal: "cat.png",
additionalSearch: false,
expected: false,
},
} {
t.Run(tc.name, func(t *testing.T) {
hc.cfg.additionalSearch = tc.additionalSearch
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), obj1ID.String())
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
})
t.Run("by filepath as it is", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
obj2ID := oidtest.ID()
obj2 := object.New()
obj2.SetID(obj2ID)
obj2.SetPayload([]byte("obj2"))
obj2.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "/filepath/obj2"))
hc.frostfs.objects[cnrID.String()+"/"+obj2ID.String()] = obj2
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj2")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj2.Payload()), string(r.Response.Body()))
})
t.Run("by filepath slash fallback", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.additionalSlashSearch = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
})
t.Run("by filename fallback", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.additionalFilenameSearch = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
})
t.Run("by filename and slash fallback", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.additionalFilenameSearch = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.additionalSlashSearch = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
})
t.Run("index fallback", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/index.html"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.indexEnabled = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
})
t.Run("index filename fallback", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/index.html"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.indexEnabled = true
hc.cfg.additionalFilenameSearch = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
})
res := hc.h.needSearchByFileName(tc.attrKey, tc.attrVal)
require.Equal(t, tc.expected, res)
})
}
}
func TestIndex(t *testing.T) {
ctx := middleware.SetNamespace(context.Background(), "")
func TestPrepareFileName(t *testing.T) {
fileName := "/cat.jpg"
expected := "cat.jpg"
actual := prepareFileName(fileName)
require.Equal(t, expected, actual)
t.Run("s3", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "prefix/obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
hc.tree.containers[cnrID.String()] = containerInfo{
trees: map[string]map[string]nodeResponse{
"system": {"bucket-settings": nodeResponse{nodeID: 1}},
"version": {
"": nodeResponse{}, //root
"prefix": nodeResponse{
nodeID: 1,
meta: []nodeMeta{{key: tree.FileNameKey, value: []byte("prefix")}}},
"obj1": nodeResponse{
parentID: 1,
nodeID: 2,
meta: []nodeMeta{
{key: tree.FileNameKey, value: []byte("obj1")},
{key: "OID", value: []byte(obj1ID.String())},
},
},
},
},
}
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.indexEnabled = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/prefix")
require.Contains(t, string(r.Response.Body()), obj1ID.String())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/prefix")
require.Contains(t, string(r.Response.Body()), obj1ID.String())
r = prepareGetRequest(ctx, "bucket", "dummy")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/dummy")
})
t.Run("native", func(t *testing.T) {
hc, cnrID := prepareHandlerAndBucket(t)
obj1ID := oidtest.ID()
obj1 := object.New()
obj1.SetID(obj1ID)
obj1.SetPayload([]byte("obj1"))
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "prefix/obj1"))
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
hc.cfg.indexEnabled = true
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/prefix")
require.Contains(t, string(r.Response.Body()), obj1ID.String())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/prefix")
require.Contains(t, string(r.Response.Body()), obj1ID.String())
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "dummy")
hc.Handler().DownloadByAddressOrBucketName(r)
require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/dummy")
})
fileName = "cat.jpg"
actual = prepareFileName(fileName)
require.Equal(t, expected, actual)
}
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
@ -616,25 +465,6 @@ func prepareGetRequest(ctx context.Context, bucket, objID string) *fasthttp.Requ
return r
}
func prepareCORSRequest(t *testing.T, bucket string, headers map[string]string) *fasthttp.RequestCtx {
ctx := context.Background()
ctx = middleware.SetNamespace(ctx, "")
r := new(fasthttp.RequestCtx)
r.SetUserValue("cid", bucket)
for k, v := range headers {
r.Request.Header.Set(k, v)
}
ctx, err := tokens.StoreBearerTokenAppCtx(ctx, r)
require.NoError(t, err)
utils.SetContextToRequest(ctx, r)
return r
}
func prepareGetByAttributeRequest(ctx context.Context, bucket, attrKey, attrVal string) *fasthttp.RequestCtx {
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
@ -663,7 +493,6 @@ const (
keyAttr = "User-Attribute"
valAttr = "user value"
objFileName = "newFile.txt"
objFilePath = "/newFile.txt"
)
func fillMultipartBody(r *fasthttp.RequestCtx, content string) error {

View file

@ -5,12 +5,11 @@ import (
"errors"
"io"
"net/http"
"net/url"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -28,7 +27,7 @@ const (
hdrContainerID = "X-Container-Id"
)
func (h *Handler) headObject(ctx context.Context, req *fasthttp.RequestCtx, objectAddress oid.Address) {
func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid.Address) {
var start = time.Now()
btoken := bearerToken(ctx)
@ -42,7 +41,7 @@ func (h *Handler) headObject(ctx context.Context, req *fasthttp.RequestCtx, obje
obj, err := h.frostfs.HeadObject(ctx, prm)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToHeadObject, err, zap.Stringer("elapsed", time.Since(start)))
req.handleFrostFSErr(err, start)
return
}
@ -66,7 +65,7 @@ func (h *Handler) headObject(ctx context.Context, req *fasthttp.RequestCtx, obje
case object.AttributeTimestamp:
value, err := strconv.ParseInt(val, 10, 64)
if err != nil {
h.reqLogger(ctx).Info(logs.CouldntParseCreationDate,
req.log.Info(logs.CouldntParseCreationDate,
zap.String("key", key),
zap.String("val", val),
zap.Error(err),
@ -101,7 +100,7 @@ func (h *Handler) headObject(ctx context.Context, req *fasthttp.RequestCtx, obje
return h.frostfs.RangeObject(ctx, prmRange)
}, filename)
if err != nil && err != io.EOF {
h.logAndSendError(ctx, req, logs.FailedToDetectContentTypeFromPayload, err, zap.Stringer("elapsed", time.Since(start)))
req.handleFrostFSErr(err, start)
return
}
}
@ -117,77 +116,48 @@ func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
}
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
func (h *Handler) HeadByAddressOrBucketName(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.HeadByAddressOrBucketName")
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.HeadByAddressOrBucketName")
defer span.End()
cidParam, _ := req.UserValue("cid").(string)
oidParam, _ := req.UserValue("oid").(string)
cidParam, _ := c.UserValue("cid").(string)
oidParam, _ := c.UserValue("oid").(string)
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
log := utils.GetReqLogOrDefault(ctx, h.log).With(
zap.String("cid", cidParam),
zap.String("oid", oidParam),
))
)
path, err := url.QueryUnescape(oidParam)
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToUnescapePath, err)
logAndSendBucketError(c, log, err)
return
}
bktInfo, err := h.getBucketInfo(ctx, cidParam)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
return
}
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
if checkS3Err != nil && !errors.Is(checkS3Err, tree.ErrNodeNotFound) {
h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err)
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()),
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
logAndSendBucketError(c, log, checkS3Err)
return
}
prm := MiddlewareParam{
Context: ctx,
Request: req,
BktInfo: bktInfo,
Path: path,
}
indexPageEnabled := h.config.IndexPageEnabled()
req := newRequest(c, log)
var objID oid.ID
if checkS3Err == nil {
run(prm, h.errorMiddleware(logs.ObjectNotFound, tree.ErrNodeNotFound),
Middleware{Func: h.byS3PathMiddleware(h.headObject, noopFormer), Enabled: true},
Middleware{Func: h.byS3PathMiddleware(h.headObject, indexFormer), Enabled: indexPageEnabled},
)
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.headObject)
} else if err = objID.DecodeString(oidParam); err == nil {
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.headObject)
} else {
slashFallbackEnabled := h.config.EnableFilepathSlashFallback()
fileNameFallbackEnabled := h.config.EnableFilepathFallback()
run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound),
Middleware{Func: h.byAddressMiddleware(h.headObject), Enabled: true},
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, noopFormer), Enabled: true},
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, reverseLeadingSlash), Enabled: slashFallbackEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, noopFormer), Enabled: fileNameFallbackEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, reverseLeadingSlash), Enabled: fileNameFallbackEnabled && slashFallbackEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, indexFormer), Enabled: indexPageEnabled},
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, indexFormer), Enabled: fileNameFallbackEnabled && indexPageEnabled},
)
logAndSendBucketError(c, log, checkS3Err)
}
}
// HeadByAttribute handles attribute-based head requests.
func (h *Handler) HeadByAttribute(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.HeadByAttribute")
func (h *Handler) HeadByAttribute(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.HeadByAttribute")
defer span.End()
utils.SetContextToRequest(ctx, c)
h.byAttribute(ctx, req, h.headObject)
}
func (h *Handler) errorMiddleware(msg string, err error) MiddlewareFunc {
return func(prm MiddlewareParam) bool {
h.logAndSendError(prm.Context, prm.Request, msg, err)
return false
}
h.byAttribute(c, h.headObject)
}

View file

@ -1,7 +1,6 @@
package handler
import (
"context"
"errors"
"io"
"strconv"
@ -54,7 +53,7 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
}
// getPayload returns initial payload if object is not multipart else composes new reader with parts data.
func (h *Handler) getPayload(ctx context.Context, p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) {
func (h *Handler) getPayload(p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) {
cid, ok := p.obj.Header.ContainerID()
if !ok {
return nil, 0, errors.New("no container id set")
@ -67,6 +66,7 @@ func (h *Handler) getPayload(ctx context.Context, p getMultiobjectBodyParams) (i
if err != nil {
return nil, 0, err
}
ctx := p.req.RequestCtx
params := PrmInitMultiObjectReader{
Addr: newAddress(cid, oid),
Bearer: bearerToken(ctx),

View file

@ -60,7 +60,12 @@ func BenchmarkAll(b *testing.B) {
func defaultMultipart(filename string) error {
r, bound := multipartFile(filename)
file, err := fetchMultipartFileDefault(zap.NewNop(), r, bound)
logger, err := zap.NewProduction()
if err != nil {
return err
}
file, err := fetchMultipartFileDefault(logger, r, bound)
if err != nil {
return err
}
@ -82,7 +87,12 @@ func TestName(t *testing.T) {
func customMultipart(filename string) error {
r, bound := multipartFile(filename)
file, err := fetchMultipartFile(zap.NewNop(), r, bound)
logger, err := zap.NewProduction()
if err != nil {
return err
}
file, err := fetchMultipartFile(logger, r, bound)
if err != nil {
return err
}

View file

@ -63,10 +63,11 @@ func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error), file
type getMultiobjectBodyParams struct {
obj *Object
req request
strSize string
}
func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, objAddress oid.Address) {
func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.Address) {
var (
shouldDownload = req.QueryArgs().GetBool("download")
start = time.Now()
@ -84,12 +85,12 @@ func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, obj
rObj, err := h.frostfs.GetObject(ctx, prm)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToGetObject, err, zap.Stringer("elapsed", time.Since(start)))
req.handleFrostFSErr(err, start)
return
}
// we can't close reader in this function, so how to do it?
setIDs(req, rObj.Header)
req.setIDs(rObj.Header)
payload := rObj.Payload
payloadSize := rObj.Header.PayloadSize()
for _, attr := range rObj.Header.Attributes() {
@ -106,8 +107,8 @@ func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, obj
case object.AttributeFileName:
filename = val
case object.AttributeTimestamp:
if err = setTimestamp(req, val); err != nil {
h.reqLogger(ctx).Error(logs.CouldntParseCreationDate,
if err = req.setTimestamp(val); err != nil {
req.log.Error(logs.CouldntParseCreationDate,
zap.String("val", val),
zap.Error(err),
logs.TagField(logs.TagDatapath))
@ -117,12 +118,13 @@ func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, obj
case object.AttributeFilePath:
filepath = val
case attributeMultipartObjectSize:
payload, payloadSize, err = h.getPayload(ctx, getMultiobjectBodyParams{
payload, payloadSize, err = h.getPayload(getMultiobjectBodyParams{
obj: rObj,
req: req,
strSize: val,
})
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToGetObjectPayload, err, zap.Stringer("elapsed", time.Since(start)))
req.handleFrostFSErr(err, start)
return
}
}
@ -131,7 +133,7 @@ func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, obj
filename = filepath
}
setDisposition(req, shouldDownload, filename)
req.setDisposition(shouldDownload, filename)
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
@ -143,7 +145,8 @@ func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, obj
return payload, nil
}, filename)
if err != nil && err != io.EOF {
h.logAndSendError(ctx, req, logs.FailedToDetectContentTypeFromPayload, err, zap.Stringer("elapsed", time.Since(start)))
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
return
}
@ -162,7 +165,7 @@ func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, obj
req.Response.SetBodyStream(payload, int(payloadSize))
}
func setIDs(r *fasthttp.RequestCtx, obj object.Object) {
func (r *request) setIDs(obj object.Object) {
objID, _ := obj.ID()
cnrID, _ := obj.ContainerID()
r.Response.Header.Set(hdrObjectID, objID.String())
@ -170,7 +173,7 @@ func setIDs(r *fasthttp.RequestCtx, obj object.Object) {
r.Response.Header.Set(hdrContainerID, cnrID.String())
}
func setDisposition(r *fasthttp.RequestCtx, shouldDownload bool, filename string) {
func (r *request) setDisposition(shouldDownload bool, filename string) {
const (
inlineDisposition = "inline"
attachmentDisposition = "attachment"
@ -184,7 +187,7 @@ func setDisposition(r *fasthttp.RequestCtx, shouldDownload bool, filename string
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
}
func setTimestamp(r *fasthttp.RequestCtx, timestamp string) error {
func (r *request) setTimestamp(timestamp string) error {
value, err := strconv.ParseInt(timestamp, 10, 64)
if err != nil {
return err

View file

@ -1,141 +0,0 @@
package handler
import (
"context"
"errors"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
)
type nodeMeta struct {
key string
value []byte
}
func (m nodeMeta) GetKey() string {
return m.key
}
func (m nodeMeta) GetValue() []byte {
return m.value
}
type nodeResponse struct {
meta []nodeMeta
nodeID uint64
parentID uint64
timestamp uint64
}
func (n nodeResponse) GetNodeID() []uint64 {
return []uint64{n.nodeID}
}
func (n nodeResponse) GetParentID() []uint64 {
return []uint64{n.parentID}
}
func (n nodeResponse) GetTimestamp() []uint64 {
return []uint64{n.timestamp}
}
func (n nodeResponse) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.meta))
for i, value := range n.meta {
res[i] = value
}
return res
}
type containerInfo struct {
trees map[string]map[string]nodeResponse
}
type treeServiceClientMock struct {
containers map[string]containerInfo
}
func newTreeServiceClientMock() *treeServiceClientMock {
return &treeServiceClientMock{
containers: make(map[string]containerInfo),
}
}
func (t *treeServiceClientMock) GetNodes(_ context.Context, p *tree.GetNodesParams) ([]tree.NodeResponse, error) {
cnr, ok := t.containers[p.CnrID.EncodeToString()]
if !ok {
return nil, tree.ErrNodeNotFound
}
tr, ok := cnr.trees[p.TreeID]
if !ok {
return nil, tree.ErrNodeNotFound
}
node, ok := tr[strings.Join(p.Path, "/")]
if !ok {
return nil, tree.ErrNodeNotFound
}
return []tree.NodeResponse{node}, nil
}
func (t *treeServiceClientMock) GetSubTree(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, _ bool) ([]tree.NodeResponse, error) {
cnr, ok := t.containers[bktInfo.CID.EncodeToString()]
if !ok {
return nil, tree.ErrNodeNotFound
}
tr, ok := cnr.trees[treeID]
if !ok {
return nil, tree.ErrNodeNotFound
}
if len(rootID) != 1 {
return nil, errors.New("invalid rootID")
}
var root *nodeResponse
for _, v := range tr {
if v.nodeID == rootID[0] {
root = &v
break
}
}
if root == nil {
return nil, tree.ErrNodeNotFound
}
var res []nodeResponse
if depth == 0 {
for _, v := range tr {
res = append(res, v)
}
} else {
res = append(res, *root)
depthIndex := 0
for i := uint32(0); i < depth-1; i++ {
childrenCount := 0
for _, v := range tr {
for j := range res[depthIndex:] {
if v.parentID == res[j].nodeID {
res = append(res, v)
childrenCount++
break
}
}
}
depthIndex = len(res) - childrenCount
}
}
res2 := make([]tree.NodeResponse, len(res))
for i := range res {
res2[i] = res[i]
}
return res2, nil
}

View file

@ -50,41 +50,44 @@ func (pr *putResponse) encode(w io.Writer) error {
}
// Upload handles multipart upload request.
func (h *Handler) Upload(req *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.Upload")
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.Upload")
defer span.End()
utils.SetContextToRequest(ctx, c)
var file MultipartFile
scid, _ := req.UserValue("cid").(string)
bodyStream := req.RequestBodyStream()
scid, _ := c.UserValue("cid").(string)
bodyStream := c.RequestBodyStream()
drainBuf := make([]byte, drainBufSize)
log := h.reqLogger(ctx)
ctx = utils.SetReqLog(ctx, log.With(zap.String("cid", scid)))
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("cid", scid))
bktInfo, err := h.getBucketInfo(ctx, scid)
bktInfo, err := h.getBucketInfo(ctx, scid, log)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
logAndSendBucketError(c, log, err)
return
}
boundary := string(req.Request.Header.MultipartFormBoundary())
boundary := string(c.Request.Header.MultipartFormBoundary())
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
h.logAndSendError(ctx, req, logs.CouldNotReceiveMultipartForm, err)
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
return
}
filtered, err := filterHeaders(log, &req.Request.Header)
filtered, err := filterHeaders(log, &c.Request.Header)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToFilterHeaders, err)
log.Error(logs.FailedToFilterHeaders, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
return
}
if req.Request.Header.Peek(explodeArchiveHeader) != nil {
h.explodeArchive(ctx, req, bktInfo, file, filtered)
if c.Request.Header.Peek(explodeArchiveHeader) != nil {
h.explodeArchive(request{c, log}, bktInfo, file, filtered)
} else {
h.uploadSingleObject(ctx, req, bktInfo, file, filtered)
h.uploadSingleObject(request{c, log}, bktInfo, file, filtered)
}
// Multipart is multipart and thus can contain more than one part which
@ -101,39 +104,46 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
}
}
func (h *Handler) uploadSingleObject(ctx context.Context, req *fasthttp.RequestCtx, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) {
ctx, span := tracing.StartSpanFromContext(ctx, "handler.uploadSingleObject")
func (h *Handler) uploadSingleObject(req request, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) {
c, log := req.RequestCtx, req.log
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.uploadSingleObject")
defer span.End()
utils.SetContextToRequest(ctx, c)
setIfNotExist(filtered, object.AttributeFileName, file.FileName())
attributes, err := h.extractAttributes(ctx, req, filtered)
attributes, err := h.extractAttributes(c, log, filtered)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToGetAttributes, err)
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
return
}
idObj, err := h.uploadObject(ctx, bkt, attributes, file)
idObj, err := h.uploadObject(c, bkt, attributes, file)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToUploadObject, err)
h.handlePutFrostFSErr(c, err, log)
return
}
h.reqLogger(ctx).Debug(logs.ObjectUploaded,
log.Debug(logs.ObjectUploaded,
zap.String("oid", idObj.EncodeToString()),
zap.String("FileName", file.FileName()),
logs.TagField(logs.TagExternalStorage),
)
addr := newAddress(bkt.CID, idObj)
req.Response.Header.SetContentType(jsonHeader)
c.Response.Header.SetContentType(jsonHeader)
// Try to return the response, otherwise, if something went wrong, throw an error.
if err = newPutResponse(addr).encode(req); err != nil {
h.logAndSendError(ctx, req, logs.CouldNotEncodeResponse, err)
if err = newPutResponse(addr).encode(c); err != nil {
log.Error(logs.CouldNotEncodeResponse, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not encode response", fasthttp.StatusBadRequest)
return
}
}
func (h *Handler) uploadObject(ctx context.Context, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) {
func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) {
ctx := utils.GetContextFromRequest(c)
obj := object.New()
obj.SetContainerID(bkt.CID)
obj.SetOwnerID(*h.ownerID)
@ -158,18 +168,19 @@ func (h *Handler) uploadObject(ctx context.Context, bkt *data.BucketInfo, attrs
return idObj, nil
}
func (h *Handler) extractAttributes(ctx context.Context, req *fasthttp.RequestCtx, filtered map[string]string) ([]object.Attribute, error) {
func (h *Handler) extractAttributes(c *fasthttp.RequestCtx, log *zap.Logger, filtered map[string]string) ([]object.Attribute, error) {
ctx := utils.GetContextFromRequest(c)
now := time.Now()
if rawHeader := req.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
h.reqLogger(ctx).Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err),
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err),
logs.TagField(logs.TagDatapath))
} else {
now = parsed
}
}
if err := utils.PrepareExpirationHeader(ctx, h.frostfs, filtered, now); err != nil {
h.reqLogger(ctx).Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err), logs.TagField(logs.TagDatapath))
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err), logs.TagField(logs.TagDatapath))
return nil, err
}
attributes := make([]object.Attribute, 0, len(filtered))
@ -196,33 +207,38 @@ func newAttribute(key string, val string) object.Attribute {
// explodeArchive read files from archive and creates objects for each of them.
// Sets FilePath attribute with name from tar.Header.
func (h *Handler) explodeArchive(ctx context.Context, req *fasthttp.RequestCtx, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) {
ctx, span := tracing.StartSpanFromContext(ctx, "handler.explodeArchive")
func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) {
c, log := req.RequestCtx, req.log
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.explodeArchive")
defer span.End()
utils.SetContextToRequest(ctx, c)
// remove user attributes which vary for each file in archive
// to guarantee that they won't appear twice
delete(filtered, object.AttributeFileName)
delete(filtered, object.AttributeFilePath)
commonAttributes, err := h.extractAttributes(ctx, req, filtered)
commonAttributes, err := h.extractAttributes(c, log, filtered)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToGetAttributes, err)
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
return
}
attributes := commonAttributes
reader := file
if bytes.EqualFold(req.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
h.reqLogger(ctx).Debug(logs.GzipReaderSelected, logs.TagField(logs.TagDatapath))
if bytes.EqualFold(c.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
log.Debug(logs.GzipReaderSelected, logs.TagField(logs.TagDatapath))
gzipReader, err := gzip.NewReader(file)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToCreateGzipReader, err)
log.Error(logs.FailedToCreateGzipReader, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could read gzip file: "+err.Error(), fasthttp.StatusBadRequest)
return
}
defer func() {
if err := gzipReader.Close(); err != nil {
h.reqLogger(ctx).Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
log.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
}
}()
reader = gzipReader
@ -234,7 +250,8 @@ func (h *Handler) explodeArchive(ctx context.Context, req *fasthttp.RequestCtx,
if errors.Is(err, io.EOF) {
break
} else if err != nil {
h.logAndSendError(ctx, req, logs.FailedToReadFileFromTar, err)
log.Error(logs.FailedToReadFileFromTar, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not get next entry: "+err.Error(), fasthttp.StatusBadRequest)
return
}
@ -248,13 +265,13 @@ func (h *Handler) explodeArchive(ctx context.Context, req *fasthttp.RequestCtx,
attributes = append(attributes, newAttribute(object.AttributeFilePath, obj.Name))
attributes = append(attributes, newAttribute(object.AttributeFileName, fileName))
idObj, err := h.uploadObject(ctx, bkt, attributes, tarReader)
idObj, err := h.uploadObject(c, bkt, attributes, tarReader)
if err != nil {
h.logAndSendError(ctx, req, logs.FailedToUploadObject, err)
h.handlePutFrostFSErr(c, err, log)
return
}
h.reqLogger(ctx).Debug(logs.ObjectUploaded,
log.Debug(logs.ObjectUploaded,
zap.String("oid", idObj.EncodeToString()),
zap.String("FileName", fileName),
logs.TagField(logs.TagExternalStorage),
@ -262,6 +279,14 @@ func (h *Handler) explodeArchive(ctx context.Context, req *fasthttp.RequestCtx,
}
}
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *zap.Logger) {
statusCode, msg, additionalFields := formErrorResponse("could not store file in frostfs", err)
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
log.Error(logs.CouldNotStoreFileInFrostfs, append(logFields, logs.TagField(logs.TagExternalStorage))...)
ResponseError(r, msg, statusCode)
}
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
return tkn

View file

@ -5,12 +5,13 @@ import (
"errors"
"fmt"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -18,6 +19,30 @@ import (
"go.uber.org/zap"
)
type request struct {
*fasthttp.RequestCtx
log *zap.Logger
}
func newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) request {
return request{
RequestCtx: ctx,
log: log,
}
}
func (r *request) handleFrostFSErr(err error, start time.Time) {
logFields := []zap.Field{
zap.Stringer("elapsed", time.Since(start)),
zap.Error(err),
}
statusCode, msg, additionalFields := formErrorResponse("could not receive object", err)
logFields = append(logFields, additionalFields...)
r.log.Error(logs.CouldNotReceiveObject, append(logFields, logs.TagField(logs.TagExternalStorage))...)
ResponseError(r.RequestCtx, msg, statusCode)
}
func bearerToken(ctx context.Context) *bearer.Token {
if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
return tkn
@ -59,16 +84,14 @@ func isValidValue(s string) bool {
return true
}
func (h *Handler) reqLogger(ctx context.Context) *zap.Logger {
return utils.GetReqLogOrDefault(ctx, h.log)
}
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
log.Error(logs.CouldNotGetBucket, zap.Error(err), logs.TagField(logs.TagDatapath))
func (h *Handler) logAndSendError(ctx context.Context, c *fasthttp.RequestCtx, msg string, err error, additional ...zap.Field) {
utils.GetReqLogOrDefault(ctx, h.log).Error(msg,
append([]zap.Field{zap.Error(err), logs.TagField(logs.TagDatapath)}, additional...)...)
msg, code := formErrorResponse(err)
ResponseError(c, msg, code)
if client.IsErrContainerNotFound(err) {
ResponseError(c, "Not Found", fasthttp.StatusNotFound)
return
}
ResponseError(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
}
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
@ -89,23 +112,31 @@ func ResponseError(r *fasthttp.RequestCtx, msg string, code int) {
r.Error(msg+"\n", code)
}
func formErrorResponse(err error) (string, int) {
func formErrorResponse(message string, err error) (int, string, []zap.Field) {
var (
msg string
statusCode int
logFields []zap.Field
)
st := new(sdkstatus.ObjectAccessDenied)
switch {
case errors.Is(err, ErrAccessDenied):
return fmt.Sprintf("Storage Access Denied:\n%v", err), fasthttp.StatusForbidden
case errors.Is(err, tree.ErrNodeAccessDenied):
return fmt.Sprintf("Tree Access Denied:\n%v", err), fasthttp.StatusForbidden
case errors.As(err, &st):
statusCode = fasthttp.StatusForbidden
reason := st.Reason()
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
logFields = append(logFields, zap.String("error_detail", reason))
case errors.Is(err, ErrQuotaLimitReached):
return fmt.Sprintf("Quota Reached:\n%v", err), fasthttp.StatusConflict
case errors.Is(err, ErrContainerNotFound):
return fmt.Sprintf("Container Not Found:\n%v", err), fasthttp.StatusNotFound
case errors.Is(err, ErrObjectNotFound):
return fmt.Sprintf("Object Not Found:\n%v", err), fasthttp.StatusNotFound
case errors.Is(err, tree.ErrNodeNotFound):
return fmt.Sprintf("Tree Node Not Found:\n%v", err), fasthttp.StatusNotFound
case errors.Is(err, ErrGatewayTimeout):
return fmt.Sprintf("Gateway Timeout:\n%v", err), fasthttp.StatusGatewayTimeout
statusCode = fasthttp.StatusConflict
msg = fmt.Sprintf("%s: %v", message, err)
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
statusCode = fasthttp.StatusNotFound
msg = "Not Found"
default:
return fmt.Sprintf("Bad Request:\n%v", err), fasthttp.StatusBadRequest
statusCode = fasthttp.StatusBadRequest
msg = fmt.Sprintf("%s: %v", message, err)
}
return statusCode, msg, logFields
}

View file

@ -0,0 +1,24 @@
package layer
import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// TreeService provide interface to interact with tree service using s3 data models.
type TreeService interface {
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error)
GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error)
CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error
}
var (
// ErrNodeNotFound is returned from Tree service in case of not found error.
ErrNodeNotFound = errors.New("not found")
// ErrNodeAccessDenied is returned from Tree service in case of access denied error.
ErrNodeAccessDenied = errors.New("access denied")
)

View file

@ -72,71 +72,60 @@ const (
TagsLogConfigWontBeUpdated = "tags log config won't be updated"
FailedToReadIndexPageTemplate = "failed to read index page template"
SetCustomIndexPageTemplate = "set custom index page template"
CouldNotFetchCORSContainerInfo = "couldn't fetch CORS container info"
InitRPCClientFailed = "init rpc client faileds"
InitContainerContractFailed = "init container contract failed"
FailedToResolveContractHash = "failed to resolve contract hash"
)
// Log messages with the "datapath" tag.
const (
CouldntParseCreationDate = "couldn't parse creation date"
FailedToDetectContentTypeFromPayload = "failed to detect Content-Type from payload"
FailedToAddObjectToArchive = "failed to add object to archive"
CloseZipWriter = "close zip writer"
IgnorePartEmptyFormName = "ignore part, empty form name"
IgnorePartEmptyFilename = "ignore part, empty filename"
CouldNotParseClientTime = "could not parse client time"
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
CouldNotEncodeResponse = "could not encode response"
AddAttributeToResultObject = "add attribute to result object"
Request = "request"
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
FailedToIterateOverResponse = "failed to iterate over search response"
InvalidCacheEntryType = "invalid cache entry type"
FailedToUnescapeQuery = "failed to unescape query"
CouldntCacheNetmap = "couldn't cache netmap"
FailedToCloseReader = "failed to close reader"
FailedToFilterHeaders = "failed to filter headers"
FailedToReadFileFromTar = "failed to read file from tar"
FailedToGetAttributes = "failed to get attributes"
CloseGzipWriter = "close gzip writer"
CloseTarWriter = "close tar writer"
FailedToCreateGzipReader = "failed to create gzip reader"
GzipReaderSelected = "gzip reader selected"
CouldNotReceiveMultipartForm = "could not receive multipart/form"
ObjectsNotFound = "objects not found"
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
FailedToGetBucketInfo = "could not get bucket info"
FailedToSubmitTaskToPool = "failed to submit task to pool"
IndexWasDeleted = "index was deleted"
FailedToGetLatestVersionOfIndexObject = "failed to get latest version of index object"
FailedToCheckIfSettingsNodeExist = "failed to check if settings node exists"
FailedToListObjects = "failed to list objects"
FailedToParseTemplate = "failed to parse template"
FailedToExecuteTemplate = "failed to execute template"
FailedToUploadObject = "failed to upload object"
FailedToHeadObject = "failed to head object"
FailedToGetObject = "failed to get object"
FailedToGetObjectPayload = "failed to get object payload"
FailedToFindObjectByAttribute = "failed to get find object by attribute"
FailedToUnescapePath = "failed to unescape path"
CouldNotGetCORSConfiguration = "could not get cors configuration"
EmptyOriginRequestHeader = "empty Origin request header"
EmptyAccessControlRequestMethodHeader = "empty Access-Control-Request-Method request header"
CORSRuleWasNotMatched = "cors rule was not matched"
CouldntCacheCors = "couldn't cache cors"
CouldntParseCreationDate = "couldn't parse creation date"
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload"
FailedToAddObjectToArchive = "failed to add object to archive"
CloseZipWriter = "close zip writer"
IgnorePartEmptyFormName = "ignore part, empty form name"
IgnorePartEmptyFilename = "ignore part, empty filename"
CouldNotParseClientTime = "could not parse client time"
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
CouldNotEncodeResponse = "could not encode response"
AddAttributeToResultObject = "add attribute to result object"
Request = "request"
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
FailedToIterateOverResponse = "failed to iterate over search response"
InvalidCacheEntryType = "invalid cache entry type"
FailedToUnescapeQuery = "failed to unescape query"
CouldntCacheNetmap = "couldn't cache netmap"
FailedToCloseReader = "failed to close reader"
FailedToFilterHeaders = "failed to filter headers"
FailedToReadFileFromTar = "failed to read file from tar"
FailedToGetAttributes = "failed to get attributes"
CloseGzipWriter = "close gzip writer"
CloseTarWriter = "close tar writer"
FailedToCreateGzipReader = "failed to create gzip reader"
GzipReaderSelected = "gzip reader selected"
CouldNotReceiveMultipartForm = "could not receive multipart/form"
ObjectsNotFound = "objects not found"
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
CouldNotGetBucket = "could not get bucket"
CouldNotResolveContainerID = "could not resolve container id"
FailedToSumbitTaskToPool = "failed to submit task to pool"
)
// Log messages with the "external_storage" tag.
const (
ObjectNotFound = "object not found"
ReadObjectListFailed = "read object list failed"
ObjectUploaded = "object uploaded"
CouldNotReceiveObject = "could not receive object"
CouldNotSearchForObjects = "could not search for objects"
ObjectNotFound = "object not found"
ReadObjectListFailed = "read object list failed"
CouldNotStoreFileInFrostfs = "could not store file in frostfs"
FailedToHeadObject = "failed to head object"
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
FailedToGetObject = "failed to get object"
ObjectUploaded = "object uploaded"
CouldNotGetContainerInfo = "could not get container info"
)
// Log messages with the "external_storage_tree" tag.
const (
FoundSeveralSystemTreeNodes = "found several system tree nodes"
ObjectWasDeleted = "object was deleted"
FailedToGetLatestVersionOfObject = "failed to get latest version of object"
FailedToCheckIfSettingsNodeExist = "Failed to check if settings node exists"
)

View file

@ -1,73 +0,0 @@
package container
import (
"fmt"
"strings"
containercontract "git.frostfs.info/TrueCloudLab/frostfs-contract/container"
containerclient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
"github.com/nspcc-dev/neo-go/pkg/rpcclient/actor"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
)
type Client struct {
contract *containerclient.Contract
}
type Config struct {
ContractHash util.Uint160
Key *keys.PrivateKey
RPCClient *rpcclient.Client
}
func New(cfg Config) (*Client, error) {
var err error
key := cfg.Key
if key == nil {
if key, err = keys.NewPrivateKey(); err != nil {
return nil, fmt.Errorf("generate anon private key for container contract: %w", err)
}
}
acc := wallet.NewAccountFromPrivateKey(key)
act, err := actor.NewSimple(cfg.RPCClient, acc)
if err != nil {
return nil, fmt.Errorf("create new actor: %w", err)
}
return &Client{
contract: containerclient.New(act, cfg.ContractHash),
}, nil
}
func (c *Client) GetContainerByID(cnrID cid.ID) (*container.Container, error) {
items, err := c.contract.Get(cnrID[:])
if err != nil {
if strings.Contains(err.Error(), containercontract.NotFoundError) {
return nil, fmt.Errorf("%w: %s", handler.ErrContainerNotFound, err)
}
return nil, err
}
if len(items) != 4 {
return nil, fmt.Errorf("unexpected container stack item count: %d", len(items))
}
cnrBytes, err := items[0].TryBytes()
if err != nil {
return nil, fmt.Errorf("could not get byte array of container: %w", err)
}
var cnr container.Container
if err = cnr.Unmarshal(cnrBytes); err != nil {
return nil, fmt.Errorf("can't unmarshal container: %w", err)
}
return &cnr, nil
}

View file

@ -1,34 +0,0 @@
package util
import (
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
"github.com/nspcc-dev/neo-go/pkg/util"
)
// ResolveContractHash determine contract hash by resolving NNS name.
func ResolveContractHash(contractHash, rpcAddress string) (util.Uint160, error) {
if hash, err := util.Uint160DecodeStringLE(contractHash); err == nil {
return hash, nil
}
splitName := strings.Split(contractHash, ".")
if len(splitName) != 2 {
return util.Uint160{}, fmt.Errorf("invalid contract name: '%s'", contractHash)
}
var domain container.Domain
domain.SetName(splitName[0])
domain.SetZone(splitName[1])
var nns ns.NNS
if err := nns.Dial(rpcAddress); err != nil {
return util.Uint160{}, fmt.Errorf("dial nns %s: %w", rpcAddress, err)
}
defer nns.Close()
return nns.ResolveContractHash(domain)
}

View file

@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@ -46,7 +45,7 @@ func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContain
res, err := x.pool.GetContainer(ctx, prm)
if err != nil {
return nil, handleStorageError("read container via connection pool", err)
return nil, handleObjectError("read container via connection pool", err)
}
return &res, nil
@ -70,7 +69,7 @@ func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate)
idObj, err := x.pool.PutObject(ctx, prmPut)
if err != nil {
return oid.ID{}, handleStorageError("save object via connection pool", err)
return oid.ID{}, handleObjectError("save object via connection pool", err)
}
return idObj.ObjectID, nil
}
@ -86,7 +85,7 @@ func (x payloadReader) Read(p []byte) (int, error) {
if err != nil && errors.Is(err, io.EOF) {
return n, err
}
return n, handleStorageError("read payload", err)
return n, handleObjectError("read payload", err)
}
// HeadObject implements frostfs.FrostFS interface method.
@ -103,7 +102,7 @@ func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*o
res, err := x.pool.HeadObject(ctx, prmHead)
if err != nil {
return nil, handleStorageError("read object header via connection pool", err)
return nil, handleObjectError("read object header via connection pool", err)
}
return &res, nil
@ -123,7 +122,7 @@ func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*han
res, err := x.pool.GetObject(ctx, prmGet)
if err != nil {
return nil, handleStorageError("init full object reading via connection pool", err)
return nil, handleObjectError("init full object reading via connection pool", err)
}
return &handler.Object{
@ -148,7 +147,7 @@ func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (
res, err := x.pool.ObjectRange(ctx, prmRange)
if err != nil {
return nil, handleStorageError("init payload range reading via connection pool", err)
return nil, handleObjectError("init payload range reading via connection pool", err)
}
return payloadReader{&res}, nil
@ -169,7 +168,7 @@ func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch
res, err := x.pool.SearchObjects(ctx, prmSearch)
if err != nil {
return nil, handleStorageError("init object search via connection pool", err)
return nil, handleObjectError("init object search via connection pool", err)
}
return &res, nil
@ -203,7 +202,7 @@ func (x *FrostFS) NetmapSnapshot(ctx context.Context) (netmap.NetMap, error) {
netmapSnapshot, err := x.pool.NetMapSnapshot(ctx)
if err != nil {
return netmapSnapshot, handleStorageError("get netmap via connection pool", err)
return netmapSnapshot, handleObjectError("get netmap via connection pool", err)
}
return netmapSnapshot, nil
@ -227,7 +226,7 @@ func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
networkInfo, err := x.pool.NetworkInfo(ctx)
if err != nil {
return "", handleStorageError("read network info via client", err)
return "", handleObjectError("read network info via client", err)
}
domain := networkInfo.RawNetworkParameter("SystemDNS")
@ -238,7 +237,7 @@ func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
return string(domain), nil
}
func handleStorageError(msg string, err error) error {
func handleObjectError(msg string, err error) error {
if err == nil {
return nil
}
@ -251,14 +250,6 @@ func handleStorageError(msg string, err error) error {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
}
if client.IsErrContainerNotFound(err) {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrContainerNotFound, err.Error())
}
if client.IsErrObjectNotFound(err) {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrObjectNotFound, err.Error())
}
if IsTimeoutError(err) {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrGatewayTimeout, err.Error())
}

View file

@ -18,7 +18,7 @@ func TestHandleObjectError(t *testing.T) {
msg := "some msg"
t.Run("nil error", func(t *testing.T) {
err := handleStorageError(msg, nil)
err := handleObjectError(msg, nil)
require.Nil(t, err)
})
@ -27,7 +27,7 @@ func TestHandleObjectError(t *testing.T) {
inputErr := new(apistatus.ObjectAccessDenied)
inputErr.WriteReason(reason)
err := handleStorageError(msg, inputErr)
err := handleObjectError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrAccessDenied)
require.Contains(t, err.Error(), reason)
require.Contains(t, err.Error(), msg)
@ -38,7 +38,7 @@ func TestHandleObjectError(t *testing.T) {
inputErr := new(apistatus.ObjectAccessDenied)
inputErr.WriteReason(reason)
err := handleStorageError(msg, inputErr)
err := handleObjectError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrQuotaLimitReached)
require.Contains(t, err.Error(), reason)
require.Contains(t, err.Error(), msg)
@ -47,7 +47,7 @@ func TestHandleObjectError(t *testing.T) {
t.Run("simple timeout", func(t *testing.T) {
inputErr := errors.New("timeout")
err := handleStorageError(msg, inputErr)
err := handleObjectError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
require.Contains(t, err.Error(), inputErr.Error())
require.Contains(t, err.Error(), msg)
@ -58,7 +58,7 @@ func TestHandleObjectError(t *testing.T) {
defer cancel()
<-ctx.Done()
err := handleStorageError(msg, ctx.Err())
err := handleObjectError(msg, ctx.Err())
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
require.Contains(t, err.Error(), ctx.Err().Error())
require.Contains(t, err.Error(), msg)
@ -67,7 +67,7 @@ func TestHandleObjectError(t *testing.T) {
t.Run("grpc deadline exceeded", func(t *testing.T) {
inputErr := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error"))
err := handleStorageError(msg, inputErr)
err := handleObjectError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
require.Contains(t, err.Error(), inputErr.Error())
require.Contains(t, err.Error(), msg)
@ -76,7 +76,7 @@ func TestHandleObjectError(t *testing.T) {
t.Run("unknown error", func(t *testing.T) {
inputErr := errors.New("unknown error")
err := handleStorageError(msg, inputErr)
err := handleObjectError(msg, inputErr)
require.ErrorIs(t, err, inputErr)
require.Contains(t, err.Error(), msg)
})

View file

@ -63,7 +63,7 @@ func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([
nodes, err := w.p.GetNodes(ctx, poolPrm)
if err != nil {
return nil, handleTreeError(err)
return nil, handleError(err)
}
res := make([]tree.NodeResponse, len(nodes))
@ -82,7 +82,7 @@ func getBearer(ctx context.Context) []byte {
return token.Marshal()
}
func handleTreeError(err error) error {
func handleError(err error) error {
if err == nil {
return nil
}
@ -122,7 +122,7 @@ func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo,
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
if err != nil {
return nil, handleTreeError(err)
return nil, handleError(err)
}
var subtree []tree.NodeResponse
@ -133,7 +133,7 @@ func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo,
node, err = subTreeReader.Next()
}
if err != io.EOF {
return nil, handleTreeError(err)
return nil, handleError(err)
}
return subtree, nil

View file

@ -1,9 +1,11 @@
{{$container := .Container}}
{{ $prefix := trimPrefix .Prefix }}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8"/>
<title>Index of {{.Protocol}}://{{$container}}/{{.Prefix}}</title>
<title>Index of {{.Protocol}}://{{$container}}
/{{if $prefix}}/{{$prefix}}/{{end}}</title>
<style>
.alert {
width: 80%;
@ -38,7 +40,7 @@
</style>
</head>
<body>
<h1>Index of {{.Protocol}}://{{$container}}/{{.Prefix}}</h1>
<h1>Index of {{.Protocol}}://{{$container}}/{{if $prefix}}{{$prefix}}/{{end}}</h1>
{{ if .HasErrors }}
<div class="alert">
Errors occurred while processing the request. Perhaps some objects are missing
@ -55,11 +57,11 @@
</tr>
</thead>
<tbody>
{{ $parentPrefix := getParent .Prefix }}
{{if $parentPrefix }}
{{ $trimmedPrefix := trimPrefix $prefix }}
{{if $trimmedPrefix }}
<tr>
<td>
⮐<a href="/get/{{$container}}{{ urlencode $parentPrefix }}/">..</a>
⮐<a href="/get/{{$container}}{{ urlencode $trimmedPrefix }}/">..</a>
</td>
<td></td>
<td></td>

View file

@ -6,7 +6,7 @@ import (
"fmt"
"sync"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
@ -29,9 +29,14 @@ type FrostFS interface {
SystemDNS(context.Context) (string, error)
}
type Settings interface {
FormContainerZone(ns string) (zone string, isDefault bool)
}
type Config struct {
FrostFS FrostFS
RPCAddress string
Settings Settings
}
type ContainerResolver struct {
@ -41,15 +46,15 @@ type ContainerResolver struct {
type Resolver struct {
Name string
resolve func(context.Context, string, string) (*cid.ID, error)
resolve func(context.Context, string) (*cid.ID, error)
}
func (r *Resolver) SetResolveFunc(fn func(context.Context, string, string) (*cid.ID, error)) {
func (r *Resolver) SetResolveFunc(fn func(context.Context, string) (*cid.ID, error)) {
r.resolve = fn
}
func (r *Resolver) Resolve(ctx context.Context, zone, name string) (*cid.ID, error) {
return r.resolve(ctx, zone, name)
func (r *Resolver) Resolve(ctx context.Context, name string) (*cid.ID, error) {
return r.resolve(ctx, name)
}
func NewContainerResolver(resolverNames []string, cfg *Config) (*ContainerResolver, error) {
@ -76,13 +81,13 @@ func createResolvers(resolverNames []string, cfg *Config) ([]*Resolver, error) {
return resolvers, nil
}
func (r *ContainerResolver) Resolve(ctx context.Context, cnrZone, cnrName string) (*cid.ID, error) {
func (r *ContainerResolver) Resolve(ctx context.Context, cnrName string) (*cid.ID, error) {
r.mu.RLock()
defer r.mu.RUnlock()
var err error
for _, resolver := range r.resolvers {
cnrID, resolverErr := resolver.Resolve(ctx, cnrZone, cnrName)
cnrID, resolverErr := resolver.Resolve(ctx, cnrName)
if resolverErr != nil {
resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr)
if err == nil {
@ -136,25 +141,34 @@ func (r *ContainerResolver) equals(resolverNames []string) bool {
func newResolver(name string, cfg *Config) (*Resolver, error) {
switch name {
case DNSResolver:
return NewDNSResolver(cfg.FrostFS)
return NewDNSResolver(cfg.FrostFS, cfg.Settings)
case NNSResolver:
return NewNNSResolver(cfg.RPCAddress)
return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
default:
return nil, fmt.Errorf("unknown resolver: %s", name)
}
}
func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
if frostFS == nil {
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
}
if settings == nil {
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
}
var dns ns.DNS
resolveFunc := func(ctx context.Context, zone, name string) (*cid.ID, error) {
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
var err error
if zone == v2container.SysAttributeZoneDefault {
namespace, err := middleware.GetNamespace(ctx)
if err != nil {
return nil, err
}
zone, isDefault := settings.FormContainerZone(namespace)
if isDefault {
zone, err = frostFS.SystemDNS(ctx)
if err != nil {
return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
@ -176,10 +190,13 @@ func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
}, nil
}
func NewNNSResolver(rpcAddress string) (*Resolver, error) {
func NewNNSResolver(rpcAddress string, settings Settings) (*Resolver, error) {
if rpcAddress == "" {
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
}
if settings == nil {
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
}
var nns ns.NNS
@ -187,9 +204,16 @@ func NewNNSResolver(rpcAddress string) (*Resolver, error) {
return nil, fmt.Errorf("could not dial nns: %w", err)
}
resolveFunc := func(_ context.Context, zone, name string) (*cid.ID, error) {
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
var d container.Domain
d.SetName(name)
namespace, err := middleware.GetNamespace(ctx)
if err != nil {
return nil, err
}
zone, _ := settings.FormContainerZone(namespace)
d.SetZone(zone)
cnrID, err := nns.ResolveContainerDomain(d)

View file

@ -7,18 +7,15 @@ import (
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
type (
Tree struct {
service ServiceClient
log *zap.Logger
}
// ServiceClient is a client to interact with tree service.
@ -51,10 +48,10 @@ type (
var (
// ErrNodeNotFound is returned from ServiceClient in case of not found error.
ErrNodeNotFound = errors.New("not found")
ErrNodeNotFound = layer.ErrNodeNotFound
// ErrNodeAccessDenied is returned from ServiceClient service in case of access denied error.
ErrNodeAccessDenied = errors.New("access denied")
ErrNodeAccessDenied = layer.ErrNodeAccessDenied
)
const (
@ -76,8 +73,8 @@ const (
)
// NewTree creates instance of Tree using provided address and create grpc connection.
func NewTree(service ServiceClient, log *zap.Logger) *Tree {
return &Tree{service: service, log: log}
func NewTree(service ServiceClient) *Tree {
return &Tree{service: service}
}
type Meta interface {
@ -258,10 +255,7 @@ func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name
nodes = filterMultipartNodes(nodes)
if len(nodes) == 0 {
return nil, ErrNodeNotFound
}
if len(nodes) != 1 {
c.reqLogger(ctx).Warn(logs.FoundSeveralSystemTreeNodes, zap.String("name", name), logs.TagField(logs.TagExternalStorageTree))
return nil, layer.ErrNodeNotFound
}
return newMultiNode(nodes)
@ -302,7 +296,7 @@ func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) {
}
if targetIndexNode == -1 {
return nil, fmt.Errorf("latest version: %w", ErrNodeNotFound)
return nil, layer.ErrNodeNotFound
}
return nodes[targetIndexNode], nil
@ -323,23 +317,20 @@ func pathFromName(objectName string) []string {
return strings.Split(objectName, separator)
}
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, error) {
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetSubTreeByPrefix")
defer span.End()
rootID, err := c.getPrefixNodeID(ctx, bktInfo, versionTree, strings.Split(prefix, separator))
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix)
if err != nil {
if errors.Is(err, ErrNodeNotFound) {
return nil, nil
}
return nil, err
return nil, "", err
}
subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false)
if err != nil {
if errors.Is(err, ErrNodeNotFound) {
return nil, nil
return nil, "", nil
}
return nil, err
return nil, "", err
}
nodesMap := make(map[string][]NodeResponse, len(subTree))
@ -349,6 +340,10 @@ func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo,
}
fileName := GetFilename(node)
if !strings.HasPrefix(fileName, tailPrefix) {
continue
}
nodes := nodesMap[fileName]
// Add all nodes if flag latestOnly is false.
@ -372,7 +367,7 @@ func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo,
result = append(result, nodeResponseToNodeInfo(nodes)...)
}
return result, nil
return result, strings.TrimSuffix(prefix, tailPrefix), nil
}
func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo {
@ -384,6 +379,22 @@ func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo {
return nodesInfo
}
func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) {
rootID := []uint64{0}
path := strings.Split(prefix, separator)
tailPrefix := path[len(path)-1]
if len(path) > 1 {
var err error
rootID, err = c.getPrefixNodeID(ctx, bktInfo, treeID, path[:len(path)-1])
if err != nil {
return nil, "", err
}
}
return rootID, tailPrefix, nil
}
func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefixPath []string) ([]uint64, error) {
p := &GetNodesParams{
CnrID: bktInfo.CID,
@ -406,16 +417,12 @@ func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, tr
}
if len(intermediateNodes) == 0 {
return nil, ErrNodeNotFound
return nil, layer.ErrNodeNotFound
}
return intermediateNodes, nil
}
func (c *Tree) reqLogger(ctx context.Context) *zap.Logger {
return utils.GetReqLogOrDefault(ctx, c.log)
}
func GetFilename(node NodeResponse) string {
for _, kv := range node.GetMeta() {
if kv.GetKey() == FileNameKey {

View file

@ -11,8 +11,6 @@ import (
"time"
"unicode"
"unicode/utf8"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
type EpochDurations struct {
@ -258,12 +256,3 @@ func (t systemTransformer) updateExpirationHeader(headers map[string]string, dur
headers[t.expirationEpochAttr()] = strconv.FormatUint(expirationEpoch, 10)
}
func GetAttributeValue(attrs []object.Attribute, key string) string {
for _, attr := range attrs {
if attr.Key() == key {
return attr.Value()
}
}
return ""
}