diff --git a/.docker/Dockerfile b/.docker/Dockerfile index f45c864..8d6f806 100644 --- a/.docker/Dockerfile +++ b/.docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22-alpine AS basebuilder +FROM golang:1.24-alpine AS basebuilder RUN apk add --update make bash ca-certificates FROM basebuilder AS builder diff --git a/.forgejo/workflows/builds.yml b/.forgejo/workflows/builds.yml index 7c2bb04..ebb6bcc 100644 --- a/.forgejo/workflows/builds.yml +++ b/.forgejo/workflows/builds.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.22', '1.23' ] + go_versions: [ '1.23', '1.24' ] fail-fast: false steps: - uses: actions/checkout@v3 diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml index d4182ed..8fb4c10 100644 --- a/.forgejo/workflows/tests.yml +++ b/.forgejo/workflows/tests.yml @@ -14,7 +14,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' cache: true - name: Install linters @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go_versions: [ '1.22', '1.23' ] + go_versions: [ '1.23', '1.24' ] fail-fast: false steps: - uses: actions/checkout@v3 @@ -53,7 +53,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '1.23' + go-version: '1.24' - name: Run integration tests run: |- diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml index 5fb9dc5..a58d2df 100644 --- a/.forgejo/workflows/vulncheck.yml +++ b/.forgejo/workflows/vulncheck.yml @@ -16,7 +16,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: '1.22' + go-version: '1.23' check-latest: true - name: Install govulncheck diff --git a/.golangci.yml b/.golangci.yml index d9f93eb..2c754ac 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -22,9 +22,6 @@ linters-settings: # 'default' case is present, even if all enum members aren't listed in the # switch default-signifies-exhaustive: true - govet: - # report about shadowed variables - check-shadowing: false custom: truecloudlab-linters: path: bin/external_linters.so diff --git a/CHANGELOG.md b/CHANGELOG.md index 2025b6d..4465d2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,9 +4,12 @@ This document outlines major changes between releases. ## [Unreleased] +- Update Go to 1.23 (#228) + ### Added - Add handling quota limit reached error (#187) - Add slash clipping for FileName attribute (#174) +- Add new format of tag names config ## [0.32.3] - 2025-02-05 diff --git a/Makefile b/Makefile index 5b9e5bf..11084f0 100755 --- a/Makefile +++ b/Makefile @@ -2,9 +2,9 @@ REPO ?= $(shell go list -m) VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") -GO_VERSION ?= 1.22 -LINT_VERSION ?= 1.60.3 -TRUECLOUDLAB_LINT_VERSION ?= 0.0.6 +GO_VERSION ?= 1.23 +LINT_VERSION ?= 1.64.8 +TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 BUILD ?= $(shell date -u --iso=seconds) HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs-http-gw @@ -30,9 +30,10 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \ sed "s/-/~/")-${OS_RELEASE} .PHONY: debpackage debclean -FUZZ_NGFUZZ_DIR ?= "" +FUZZING_DIR = $(shell pwd)/tests/fuzzing/files +NGFUZZ_REPO = https://gitflic.ru/project/yadro/ngfuzz.git FUZZ_TIMEOUT ?= 30 -FUZZ_FUNCTIONS ?= "all" +FUZZ_FUNCTIONS ?= "" FUZZ_AUX ?= "" # Make all binaries @@ -99,18 +100,22 @@ check-ngfuzz: exit 1; \ fi -.PHONY: install-fuzzing-deps -install-fuzzing-deps: check-clang check-ngfuzz +.PHONY: install-ngfuzz +install-ngfuzz: +ifeq (,$(wildcard $(FUZZING_DIR)/ngfuzz)) + @rm -rf $(FUZZING_DIR)/ngfuzz + @git clone $(NGFUZZ_REPO) $(FUZZING_DIR)/ngfuzz + @cd $(FUZZING_DIR)/ngfuzz && make +endif .PHONY: fuzz -fuzz: install-fuzzing-deps +fuzz: check-clang install-ngfuzz @START_PATH=$$(pwd); \ - ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \ - cd $(FUZZ_NGFUZZ_DIR) && \ - ./ngfuzz -clean && \ - ./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \ - ./ngfuzz -report - + ROOT_PATH=$$(realpath --relative-to=$(FUZZING_DIR)/ngfuzz $$START_PATH) ; \ + cd $(FUZZING_DIR)/ngfuzz && \ + ./bin/ngfuzz clean && \ + env CGO_ENABLED=1 ./bin/ngfuzz fuzz --funcs $(FUZZ_FUNCTIONS) --rootdir $$ROOT_PATH --timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \ + ./bin/ngfuzz coverage --rootdir $$ROOT_PATH # Reformat code fmt: @@ -150,7 +155,7 @@ dirty-image: @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) @rm -rf $(TMP_DIR)/linters @rmdir $(TMP_DIR) 2>/dev/null || true - @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) + @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) # Run linters lint: diff --git a/cmd/http-gw/app.go b/cmd/http-gw/app.go index de186fb..4a83caf 100644 --- a/cmd/http-gw/app.go +++ b/cmd/http-gw/app.go @@ -22,6 +22,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net" + containerClient "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/contracts/container" + contractsUtil "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/contracts/util" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics" @@ -39,6 +41,7 @@ import ( "github.com/nspcc-dev/neo-go/cli/flags" "github.com/nspcc-dev/neo-go/cli/input" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/rpcclient" "github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/panjf2000/ants/v2" @@ -100,21 +103,23 @@ type ( workerPoolSize int logLevelConfig *logLevelConfig - mu sync.RWMutex - defaultTimestamp bool - archiveCompression bool - clientCut bool - returnIndexPage bool - indexPageTemplate string - bufferMaxSizeForPut uint64 - namespaceHeader string - defaultNamespaces []string - cors *data.CORSRule - enableFilepathFallback bool + mu sync.RWMutex + defaultTimestamp bool + archiveCompression bool + clientCut bool + returnIndexPage bool + indexPageTemplate string + bufferMaxSizeForPut uint64 + namespaceHeader string + defaultNamespaces []string + cors *data.CORSRule + enableFilepathFallback bool + enableFilepathSlashFallback bool } tagsConfig struct { - tagLogs sync.Map + tagLogs sync.Map + defaultLvl zap.AtomicLevel } logLevelConfig struct { @@ -134,19 +139,34 @@ func newLogLevel(v *viper.Viper) zap.AtomicLevel { } func newTagsConfig(v *viper.Viper, ll zapcore.Level) *tagsConfig { - var t tagsConfig + t := tagsConfig{defaultLvl: zap.NewAtomicLevelAt(ll)} if err := t.update(v, ll); err != nil { // panic here is analogue of the similar panic during common log level initialization. panic(err.Error()) } + return &t } func newLogLevelConfig(lvl zap.AtomicLevel, tagsConfig *tagsConfig) *logLevelConfig { - return &logLevelConfig{ + cfg := &logLevelConfig{ logLevel: lvl, tagsConfig: tagsConfig, } + + cfg.setMinLogLevel() + + return cfg +} + +func (l *logLevelConfig) setMinLogLevel() { + l.tagsConfig.tagLogs.Range(func(_, value any) bool { + v := value.(zapcore.Level) + if v < l.logLevel.Level() { + l.logLevel.SetLevel(v) + } + return true + }) } func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) { @@ -159,34 +179,34 @@ func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) { if err := l.tagsConfig.update(cfg, l.logLevel.Level()); err != nil { log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp)) } + + l.setMinLogLevel() } func (t *tagsConfig) LevelEnabled(tag string, tgtLevel zapcore.Level) bool { lvl, ok := t.tagLogs.Load(tag) if !ok { - return false + return t.defaultLvl.Enabled(tgtLevel) } return lvl.(zapcore.Level).Enabled(tgtLevel) } +func (t *tagsConfig) DefaultEnabled(lvl zapcore.Level) bool { + return t.defaultLvl.Enabled(lvl) +} + func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error { tags, err := fetchLogTagsConfig(cfg, ll) if err != nil { return err } - t.tagLogs.Range(func(key, value any) bool { + t.tagLogs.Range(func(key, _ any) bool { k := key.(string) - v := value.(zapcore.Level) - if lvl, ok := tags[k]; ok { - if lvl != v { - t.tagLogs.Store(key, lvl) - } - } else { + if _, ok := tags[k]; !ok { t.tagLogs.Delete(key) - delete(tags, k) } return true }) @@ -194,6 +214,7 @@ func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error { for k, v := range tags { t.tagLogs.Store(k, v) } + t.defaultLvl.SetLevel(ll) return nil } @@ -258,6 +279,14 @@ func (a *app) initContainers(ctx context.Context) { a.corsCnrID = *corsCnrID } +func (a *app) initRPCClient(ctx context.Context) *rpcclient.Client { + rpcCli, err := rpcclient.New(ctx, a.config().GetString(cfgRPCEndpoint), rpcclient.Options{}) + if err != nil { + a.log.Fatal(logs.InitRPCClientFailed, zap.Error(err), logs.TagField(logs.TagApp)) + } + return rpcCli +} + func (a *app) initAppSettings(lc *logLevelConfig) { a.settings = &appSettings{ reconnectInterval: fetchReconnectInterval(a.config()), @@ -279,6 +308,7 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) { indexPage, indexEnabled := fetchIndexPageTemplate(v, l) cors := fetchCORSConfig(v) enableFilepathFallback := v.GetBool(cfgFeaturesEnableFilepathFallback) + enableFilepathSlashFallback := v.GetBool(cfgFeaturesEnableFilepathSlashFallback) s.mu.Lock() defer s.mu.Unlock() @@ -294,6 +324,7 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) { s.indexPageTemplate = indexPage s.cors = cors s.enableFilepathFallback = enableFilepathFallback + s.enableFilepathSlashFallback = enableFilepathSlashFallback } func (s *loggerSettings) DroppedLogsInc() { @@ -404,6 +435,12 @@ func (s *appSettings) EnableFilepathFallback() bool { return s.enableFilepathFallback } +func (s *appSettings) EnableFilepathSlashFallback() bool { + s.mu.RLock() + defer s.mu.RUnlock() + return s.enableFilepathSlashFallback +} + func (a *app) initResolver() { var err error a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig()) @@ -672,7 +709,7 @@ func (a *app) configReload(ctx context.Context) { return } - a.settings.logLevelConfig.update(a.cfg.settings, a.log) + a.settings.logLevelConfig.update(a.cfg.config(), a.log) if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil { a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp)) @@ -724,7 +761,22 @@ func (a *app) stopServices() { } func (a *app) configureRouter(workerPool *ants.Pool) { - a.handle = handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool), a.log), workerPool) + rpcCli := a.initRPCClient(a.ctx) + cnrContractName := a.config().GetString(cfgContractsContainerName) + rpcEndpoint := a.config().GetString(cfgRPCEndpoint) + cnrAddr, err := contractsUtil.ResolveContractHash(cnrContractName, rpcEndpoint) + if err != nil { + a.log.Fatal(logs.FailedToResolveContractHash, zap.Error(err), logs.TagField(logs.TagApp)) + } + cnrClient, err := containerClient.New(containerClient.Config{ + ContractHash: cnrAddr, + Key: a.key, + RPCClient: rpcCli, + }) + if err != nil { + a.log.Fatal(logs.InitContainerContractFailed, zap.Error(err), logs.TagField(logs.TagApp)) + } + a.handle = handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool), a.log), cnrClient, workerPool) r := router.New() r.RedirectTrailingSlash = true diff --git a/cmd/http-gw/logger.go b/cmd/http-gw/logger.go index 91105f7..196cff3 100644 --- a/cmd/http-gw/logger.go +++ b/cmd/http-gw/logger.go @@ -41,6 +41,7 @@ type zapCoreTagFilterWrapper struct { type TagFilterSettings interface { LevelEnabled(tag string, lvl zapcore.Level) bool + DefaultEnabled(lvl zapcore.Level) bool } func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool { @@ -63,24 +64,26 @@ func (c *zapCoreTagFilterWrapper) Check(entry zapcore.Entry, checked *zapcore.Ch } func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error { - if c.shouldSkip(entry, fields) || c.shouldSkip(entry, c.extra) { + if c.shouldSkip(entry, fields, c.extra) { return nil } return c.core.Write(entry, fields) } -func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field) bool { +func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field, extra []zap.Field) bool { for _, field := range fields { if field.Key == logs.TagFieldName && field.Type == zapcore.StringType { - if !c.settings.LevelEnabled(field.String, entry.Level) { - return true - } - break + return !c.settings.LevelEnabled(field.String, entry.Level) + } + } + for _, field := range extra { + if field.Key == logs.TagFieldName && field.Type == zapcore.StringType { + return !c.settings.LevelEnabled(field.String, entry.Level) } } - return false + return !c.settings.DefaultEnabled(entry.Level) } func (c *zapCoreTagFilterWrapper) Sync() error { @@ -127,14 +130,13 @@ func newLogEncoder() zapcore.Encoder { // // See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace. func newStdoutLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger { - stdout := zapcore.AddSync(os.Stderr) + stdout := zapcore.AddSync(os.Stdout) consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, lvl) consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, loggerSettings, tagSetting) return &Logger{ logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))), - lvl: lvl, } } @@ -152,7 +154,6 @@ func newJournaldLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings Logge return &Logger{ logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))), - lvl: lvl, } } diff --git a/cmd/http-gw/settings.go b/cmd/http-gw/settings.go index 132c627..4071969 100644 --- a/cmd/http-gw/settings.go +++ b/cmd/http-gw/settings.go @@ -62,6 +62,8 @@ const ( defaultMultinetFallbackDelay = 300 * time.Millisecond + defaultContainerContractName = "container.frostfs" + cfgServer = "server" cfgTLSEnabled = "tls.enabled" cfgTLSCertFile = "tls.cert_file" @@ -113,7 +115,7 @@ const ( cfgLoggerTags = "logger.tags" cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d." - cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "name" + cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "names" cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level" // Wallet. @@ -180,8 +182,9 @@ const ( cfgMultinetSubnets = "multinet.subnets" // Feature. - cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback" - cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support" + cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback" + cfgFeaturesEnableFilepathSlashFallback = "features.enable_filepath_slash_fallback" + cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support" // Containers. cfgContainersCORS = "containers.cors" @@ -196,6 +199,9 @@ const ( cmdConfig = "config" cmdConfigDir = "config-dir" cmdListenAddress = "listen_address" + + // Contracts. + cfgContractsContainerName = "contracts.container.name" ) var ignore = map[string]struct{}{ @@ -208,7 +214,6 @@ var defaultTags = []string{logs.TagApp, logs.TagDatapath, logs.TagExternalStorag type Logger struct { logger *zap.Logger - lvl zap.AtomicLevel } type appCfg struct { @@ -401,6 +406,9 @@ func setDefaults(v *viper.Viper, flags *pflag.FlagSet) { // multinet v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay) + // contracts + v.SetDefault(cfgContractsContainerName, defaultContainerContractName) + if resolveMethods, err := flags.GetStringSlice(cfgResolveOrder); err == nil { v.SetDefault(cfgResolveOrder, resolveMethods) } @@ -516,8 +524,8 @@ func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]za res := make(map[string]zapcore.Level) for i := 0; ; i++ { - name := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i)) - if name == "" { + tagNames := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i)) + if tagNames == "" { break } @@ -529,7 +537,12 @@ func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]za } } - res[name] = lvl + for _, tagName := range strings.Split(tagNames, ",") { + tagName = strings.TrimSpace(tagName) + if len(tagName) != 0 { + res[tagName] = lvl + } + } } if len(res) == 0 && !v.IsSet(cfgLoggerTags) { diff --git a/config/config.env b/config/config.env index 0ff2dec..ff880d5 100644 --- a/config/config.env +++ b/config/config.env @@ -20,8 +20,9 @@ HTTP_GW_LOGGER_SAMPLING_ENABLED=false HTTP_GW_LOGGER_SAMPLING_INITIAL=100 HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100 HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s -HTTP_GW_LOGGER_TAGS_0_NAME=app -HTTP_GW_LOGGER_TAGS_1_NAME=datapath +HTTP_GW_LOGGER_TAGS_0_NAMES=app,datapath +HTTP_GW_LOGGER_TAGS_0_LEVEL=level +HTTP_GW_LOGGER_TAGS_1_NAME=external_storage_tree HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443 HTTP_GW_SERVER_0_TLS_ENABLED=false @@ -173,8 +174,13 @@ HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl # Enable using fallback path to search for a object by attribute HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false +# See description in docs/gate-configuration.md +HTTP_GW_FEATURES_ENABLE_FILEPATH_SLASH_FALLBACK=false # Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true # Containers properties HTTP_GW_CONTAINERS_CORS=AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj + +# Container contract hash (LE) or name in NNS. +HTTP_GW_CONTRACTS_CONTAINER_NAME=container.frostfs diff --git a/config/config.yaml b/config/config.yaml index 05bba2e..9b4b3c9 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -30,8 +30,7 @@ logger: thereafter: 100 interval: 1s tags: - - name: app - - name: datapath + - names: app,datapath level: debug server: @@ -193,8 +192,15 @@ multinet: features: # Enable using fallback path to search for a object by attribute enable_filepath_fallback: false + # See description in docs/gate-configuration.md + enable_filepath_slash_fallback: false # Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service tree_pool_netmap_support: true containers: cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj + +contracts: + container: + # Container contract hash (LE) or name in NNS. + name: container.frostfs diff --git a/docs/gate-configuration.md b/docs/gate-configuration.md index 628d3c7..7f3c4ef 100644 --- a/docs/gate-configuration.md +++ b/docs/gate-configuration.md @@ -8,7 +8,6 @@ There are some custom types used for brevity: * `duration` -- string consisting of a number and a suffix. Suffix examples include `s` (seconds), `m` (minutes), `ms` ( milliseconds). - # Reload on SIGHUP Some config values can be reloaded on SIGHUP signal. @@ -61,6 +60,7 @@ $ cat http.log | `multinet` | [Multinet configuration](#multinet-section) | | `features` | [Features configuration](#features-section) | | `containers` | [Containers configuration](#containers-section) | +| `contracts` | [Contracts configuration](#contracts-section) | # General section @@ -163,7 +163,6 @@ server: | `tls.cert_file` | `string` | yes | | Path to the TLS certificate. | | `tls.key_file` | `string` | yes | | Path to the key. | - # `logger` section ```yaml @@ -176,10 +175,9 @@ logger: thereafter: 100 interval: 1s tags: - - name: "app" - level: info - - name: "datapath" - - name: "external_storage_tree" + - names: "app,datapath" + level: info + - names: "external_storage_tree" ``` | Parameter | Type | SIGHUP reload | Default value | Description | @@ -199,14 +197,14 @@ parameter. Available tags: ```yaml tags: - - name: "app" + - names: "app,datapath" level: info ``` -| Parameter | Type | SIGHUP reload | Default value | Description | -|-----------------------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------| -| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. | -| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. | +| Parameter | Type | SIGHUP reload | Default value | Description | +|-----------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------| +| `names` | `[]string` | yes | | Tag names separated by `,`. Possible values see below in `Tag values` section. | +| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. | ### Tag values @@ -236,7 +234,6 @@ web: | `stream_request_body` | `bool` | `true` | Enables request body streaming, and calls the handler sooner when given body is larger than the current limit. | | `max_request_body_size` | `int` | `4194304` | Maximum request body size. The server rejects requests with bodies exceeding this limit. | - # `upload-header` section ```yaml @@ -272,7 +269,6 @@ archive: |---------------|--------|---------------|---------------|------------------------------------------------------------------| | `compression` | `bool` | yes | `false` | Enable archive compression when download files by common prefix. | - # `pprof` section Contains configuration for the `pprof` profiler. @@ -321,14 +317,13 @@ tracing: ``` | Parameter | Type | SIGHUP reload | Default value | Description | -| ------------ | -------------------------------------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------- | +|--------------|----------------------------------------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------| | `enabled` | `bool` | yes | `false` | Flag to enable the tracing. | | `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). | | `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. | | `trusted_ca` | `string` | yes | | Path to certificate of a certification authority in pem format, that issued the TLS certificate of the telemetry remote server. | | `attributes` | [[]Attributes](#attributes-subsection) | yes | | An array of configurable attributes in key-value format. | - #### `attributes` subsection ```yaml @@ -339,12 +334,13 @@ tracing: value: value ``` -| Parameter | Type | SIGHUP reload | Default value | Description | -|-----------------------|----------|---------------|---------------|----------------------------------------------------------| -| `key` | `string` | yes | | Attribute key. | -| `value` | `string` | yes | | Attribute value. | +| Parameter | Type | SIGHUP reload | Default value | Description | +|-----------|----------|---------------|---------------|------------------| +| `key` | `string` | yes | | Attribute key. | +| `value` | `string` | yes | | Attribute value. | # `runtime` section + Contains runtime parameters. ```yaml @@ -373,7 +369,6 @@ frostfs: | `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. | | `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. | - ### `cache` section ```yaml @@ -394,7 +389,6 @@ cache: | `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. | | `cors` | [Cache config](#cache-subsection) | `lifetime: 5m`
`size: 1000` | Cache which stores container CORS configurations. | - #### `cache` subsection ```yaml @@ -407,7 +401,6 @@ size: 1000 | `lifetime` | `duration` | depends on cache | Lifetime of entries in cache. | | `size` | `int` | depends on cache | LRU cache size. | - # `resolve_bucket` section Bucket name resolving parameters from and to container ID. @@ -418,10 +411,10 @@ resolve_bucket: default_namespaces: [ "", "root" ] ``` -| Parameter | Type | SIGHUP reload | Default value | Description | -|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------| -| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. | -| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. | +| Parameter | Type | SIGHUP reload | Default value | Description | +|----------------------|------------|---------------|-----------------------|--------------------------------------------------| +| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. | +| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. | # `index_page` section @@ -451,9 +444,9 @@ If values are not set, settings from CORS container will be used. ```yaml cors: allow_origin: "*" - allow_methods: ["GET", "HEAD"] - allow_headers: ["Authorization"] - expose_headers: ["*"] + allow_methods: [ "GET", "HEAD" ] + allow_headers: [ "Authorization" ] + expose_headers: [ "*" ] allow_credentials: false max_age: 600 ``` @@ -473,15 +466,15 @@ Configuration of multinet support. ```yaml multinet: - enabled: false - balancer: roundrobin - restrict: false - fallback_delay: 300ms - subnets: - - mask: 1.2.3.4/24 - source_ips: - - 1.2.3.4 - - 1.2.3.5 + enabled: false + balancer: roundrobin + restrict: false + fallback_delay: 300ms + subnets: + - mask: 1.2.3.4/24 + source_ips: + - 1.2.3.4 + - 1.2.3.5 ``` | Parameter | Type | SIGHUP reload | Default value | Description | @@ -513,13 +506,15 @@ Contains parameters for enabling features. ```yaml features: enable_filepath_fallback: true + enable_filepath_slash_fallback: false tree_pool_netmap_support: true ``` -| Parameter | Type | SIGHUP reload | Default value | Description | -|-------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by attribute. If the value of the `FilePath` attribute in the request contains no `/` symbols or single leading `/` symbol and the object was not found, then an attempt is made to search for the object by the attribute `FileName`. | -| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. | +| Parameter | Type | SIGHUP reload | Default value | Description | +|-------------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by `FileName` attribute if object with `FilePath` attribute wasn't found. | +| `features.enable_filepath_slash_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by `FilePath`/`FileName` with/without (depends on provided value in `FilePath`/`FileName`) leading slash if object with provided `FilePath`/`FileName` wasn't found. This fallback goes before `enable_filepath_fallback`. | +| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. | # `containers` section @@ -530,6 +525,18 @@ containers: cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj ``` -| Parameter | Type | SIGHUP reload | Default value | Description | -|-------------|----------|---------------|---------------|-----------------------------------------| -| `cors` | `string` | no | | Container name for CORS configurations. | +| Parameter | Type | SIGHUP reload | Default value | Description | +|-----------|----------|---------------|---------------|-----------------------------------------| +| `cors` | `string` | no | | Container name for CORS configurations. | + +# `contracts` section + +```yaml +contracts: + container: + name: container.frostfs +``` + +| Parameter | Type | SIGHUP reload | Default value | Description | +|------------------|----------|---------------|---------------------|----------------------------------------------| +| `container.name` | `string` | no | `container.frostfs` | Container contract hash (LE) or name in NNS. | diff --git a/go.mod b/go.mod index 31cf242..6082ef6 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,9 @@ module git.frostfs.info/TrueCloudLab/frostfs-http-gw -go 1.22 +go 1.23 require ( + git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121 git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc @@ -33,7 +34,6 @@ require ( require ( dario.cat/mergo v1.0.0 // indirect - git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect diff --git a/internal/handler/browse.go b/internal/handler/browse.go index ebe9004..d9e6625 100644 --- a/internal/handler/browse.go +++ b/internal/handler/browse.go @@ -12,7 +12,6 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" @@ -131,11 +130,15 @@ func parentDir(prefix string) string { return prefix[index:] } -func trimPrefix(encPrefix string) string { +func getParent(encPrefix string) string { prefix, err := url.PathUnescape(encPrefix) if err != nil { return "" } + if prefix != "" && prefix[len(prefix)-1] == '/' { + prefix = prefix[:len(prefix)-1] + } + slashIndex := strings.LastIndex(prefix, "/") if slashIndex == -1 { return "" @@ -161,10 +164,15 @@ func urlencode(path string) string { type GetObjectsResponse struct { objects []ResponseObject hasErrors bool + isNative bool } func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) { - nodes, _, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true) + if prefix != "" && prefix[len(prefix)-1] == '/' { + prefix = prefix[:len(prefix)-1] + } + + nodes, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true) if err != nil { return nil, err } @@ -185,7 +193,7 @@ func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketIn if obj.IsDeleteMarker { continue } - obj.FilePath = prefix + obj.FileName + obj.FilePath = prefix + "/" + obj.FileName obj.GetURL = "/get/" + bucketInfo.Name + urlencode(obj.FilePath) result.objects = append(result.objects, obj) } @@ -194,9 +202,9 @@ func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketIn } func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) { - var basePath string - if ind := strings.LastIndex(prefix, "/"); ind != -1 { - basePath = prefix[:ind+1] + basePath := prefix + if basePath != "" && basePath[len(basePath)-1] != '/' { + basePath += "/" } filters := object.NewSearchFilters() @@ -226,7 +234,8 @@ func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.Buck log := h.reqLogger(ctx) dirs := make(map[string]struct{}) result := &GetObjectsResponse{ - objects: make([]ResponseObject, 0, 100), + objects: make([]ResponseObject, 0, 100), + isNative: true, } for objExt := range resp { if objExt.Error != nil { @@ -322,28 +331,16 @@ func (h *Handler) headDirObject(ctx context.Context, cnrID cid.ID, objID oid.ID, } type browseParams struct { - bucketInfo *data.BucketInfo - prefix string - isNative bool - listObjects func(ctx context.Context, bucketName *data.BucketInfo, prefix string) (*GetObjectsResponse, error) + bucketInfo *data.BucketInfo + prefix string + objects *GetObjectsResponse } func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p browseParams) { const S3Protocol = "s3" const FrostfsProtocol = "frostfs" - ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With( - zap.String("bucket", p.bucketInfo.Name), - zap.String("container", p.bucketInfo.CID.EncodeToString()), - zap.String("prefix", p.prefix), - )) - resp, err := p.listObjects(ctx, p.bucketInfo, p.prefix) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToListObjects, err) - return - } - - objects := resp.objects + objects := p.objects.objects sort.Slice(objects, func(i, j int) bool { if objects[i].IsDir == objects[j].IsDir { return objects[i].FileName < objects[j].FileName @@ -353,7 +350,7 @@ func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p tmpl, err := template.New("index").Funcs(template.FuncMap{ "formatSize": formatSize, - "trimPrefix": trimPrefix, + "getParent": getParent, "urlencode": urlencode, "parentDir": parentDir, }).Parse(h.config.IndexPageTemplate()) @@ -363,16 +360,21 @@ func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p } bucketName := p.bucketInfo.Name protocol := S3Protocol - if p.isNative { + if p.objects.isNative { bucketName = p.bucketInfo.CID.EncodeToString() protocol = FrostfsProtocol } + prefix := p.prefix + if prefix != "" && prefix[len(prefix)-1] != '/' { + prefix += "/" + } + if err = tmpl.Execute(req, &BrowsePageData{ Container: bucketName, - Prefix: p.prefix, + Prefix: prefix, Objects: objects, Protocol: protocol, - HasErrors: resp.hasErrors, + HasErrors: p.objects.hasErrors, }); err != nil { h.logAndSendError(ctx, req, logs.FailedToExecuteTemplate, err) return diff --git a/internal/handler/container.go b/internal/handler/container.go new file mode 100644 index 0000000..3c7bec8 --- /dev/null +++ b/internal/handler/container.go @@ -0,0 +1,42 @@ +package handler + +import ( + "context" + "fmt" + + "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" + "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "go.uber.org/zap" +) + +func (h *Handler) containerInfo(ctx context.Context, cnrID cid.ID) (*data.BucketInfo, error) { + info := &data.BucketInfo{ + CID: cnrID, + Name: cnrID.EncodeToString(), + } + res, err := h.cnrContract.GetContainerByID(cnrID) + if err != nil { + return nil, fmt.Errorf("get frostfs container: %w", err) + } + + cnr := *res + + if domain := container.ReadDomain(cnr); domain.Name() != "" { + info.Name = domain.Name() + info.Zone = domain.Zone() + } + info.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(cnr) + info.PlacementPolicy = cnr.PlacementPolicy() + + if err = h.cache.Put(info); err != nil { + h.reqLogger(ctx).Warn(logs.CouldntPutBucketIntoCache, + zap.String("bucket name", info.Name), + zap.Stringer("cid", info.CID), + zap.Error(err), + logs.TagField(logs.TagDatapath)) + } + + return info, nil +} diff --git a/internal/handler/cors.go b/internal/handler/cors.go index d77ae02..7e8db93 100644 --- a/internal/handler/cors.go +++ b/internal/handler/cors.go @@ -5,6 +5,8 @@ import ( "encoding/xml" "errors" "fmt" + "regexp" + "slices" "sort" "strconv" "strings" @@ -78,7 +80,7 @@ func (h *Handler) Preflight(req *fasthttp.RequestCtx) { for _, rule := range corsConfig.CORSRules { for _, o := range rule.AllowedOrigins { - if o == string(origin) || o == wildcard { + if o == string(origin) || o == wildcard || (strings.Contains(o, "*") && match(o, string(origin))) { for _, m := range rule.AllowedMethods { if m == string(method) { if !checkSubslice(rule.AllowedHeaders, headers) { @@ -117,6 +119,11 @@ func (h *Handler) SetCORSHeaders(req *fasthttp.RequestCtx) { return } + method := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestMethod) + if len(method) == 0 { + method = req.Method() + } + ctx = qostagging.ContextWithIOTag(ctx, internalIOTag) cidParam, _ := req.UserValue("cid").(string) reqLog := h.reqLogger(ctx) @@ -141,9 +148,9 @@ func (h *Handler) SetCORSHeaders(req *fasthttp.RequestCtx) { for _, rule := range corsConfig.CORSRules { for _, o := range rule.AllowedOrigins { - if o == string(origin) { + if o == string(origin) || (strings.Contains(o, "*") && len(o) > 1 && match(o, string(origin))) { for _, m := range rule.AllowedMethods { - if m == string(req.Method()) { + if m == string(method) { req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin)) req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", ")) req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true") @@ -154,7 +161,7 @@ func (h *Handler) SetCORSHeaders(req *fasthttp.RequestCtx) { } if o == wildcard { for _, m := range rule.AllowedMethods { - if m == string(req.Method()) { + if m == string(method) { if withCredentials { req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin)) req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true") @@ -190,9 +197,6 @@ func (h *Handler) getCORSConfig(ctx context.Context, log *zap.Logger, cidStr str addr.SetContainer(h.corsCnrID) addr.SetObject(objID) corsObj, err := h.frostfs.GetObject(ctx, PrmObjectGet{ - PrmAuth: PrmAuth{ - BearerToken: bearerToken(ctx), - }, Address: addr, }) if err != nil { @@ -216,11 +220,7 @@ func (h *Handler) getLastCORSObject(ctx context.Context, cnrID cid.ID) (oid.ID, filters.AddRootFilter() filters.AddFilter(object.AttributeFilePath, fmt.Sprintf(corsFilePathTemplate, cnrID), object.MatchStringEqual) - prmAuth := PrmAuth{ - BearerToken: bearerToken(ctx), - } res, err := h.frostfs.SearchObjects(ctx, PrmObjectSearch{ - PrmAuth: prmAuth, Container: h.corsCnrID, Filters: filters, }) @@ -239,7 +239,6 @@ func (h *Handler) getLastCORSObject(ctx context.Context, cnrID cid.ID) (oid.ID, err = res.Iterate(func(id oid.ID) bool { addr.SetObject(id) obj, headErr = h.frostfs.HeadObject(ctx, PrmObjectHead{ - PrmAuth: prmAuth, Address: addr, }) if headErr != nil { @@ -318,12 +317,9 @@ func setCORSHeadersFromRule(c *fasthttp.RequestCtx, cors *data.CORSRule) { } func checkSubslice(slice []string, subSlice []string) bool { - if sliceContains(slice, wildcard) { + if slices.Contains(slice, wildcard) { return true } - if len(subSlice) > len(slice) { - return false - } for _, r := range subSlice { if !sliceContains(slice, r) { return false @@ -334,9 +330,16 @@ func checkSubslice(slice []string, subSlice []string) bool { func sliceContains(slice []string, str string) bool { for _, s := range slice { - if s == str { + if s == str || (strings.Contains(s, "*") && match(s, str)) { return true } } return false } + +func match(tmpl, str string) bool { + regexpStr := "^" + regexp.QuoteMeta(tmpl) + "$" + regexpStr = regexpStr[:strings.Index(regexpStr, "*")-1] + "." + regexpStr[strings.Index(regexpStr, "*"):] + reg := regexp.MustCompile(regexpStr) + return reg.Match([]byte(str)) +} diff --git a/internal/handler/cors_test.go b/internal/handler/cors_test.go index 7cd7b0d..1ac07d7 100644 --- a/internal/handler/cors_test.go +++ b/internal/handler/cors_test.go @@ -4,6 +4,7 @@ import ( "encoding/base64" "encoding/xml" "fmt" + "net/http" "testing" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" @@ -407,6 +408,12 @@ func TestCheckSubslice(t *testing.T) { actual: []string{"str1", "str5"}, expected: false, }, + { + name: "wildcard in allowed", + allowed: []string{"str*"}, + actual: []string{"str", "str5"}, + expected: true, + }, } { t.Run(tc.name, func(t *testing.T) { require.Equal(t, tc.expected, checkSubslice(tc.allowed, tc.actual)) @@ -414,6 +421,489 @@ func TestCheckSubslice(t *testing.T) { } } +func TestAllowedOriginWildcards(t *testing.T) { + hc := prepareHandlerContext(t) + bktName := "bucket-allowed-origin-wildcards" + cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private) + require.NoError(t, err) + hc.frostfs.SetContainer(cnrID, cnr) + + cfg := &data.CORSConfiguration{ + CORSRules: []data.CORSRule{ + { + AllowedOrigins: []string{"*suffix.example"}, + AllowedMethods: []string{"GET"}, + }, + { + AllowedOrigins: []string{"https://*example"}, + AllowedMethods: []string{"GET"}, + }, + { + AllowedOrigins: []string{"prefix.example*"}, + AllowedMethods: []string{"GET"}, + }, + }, + } + setCORSObject(t, hc, cnrID, cfg, 1) + + for _, tc := range []struct { + name string + handler func(*fasthttp.RequestCtx) + requestHeaders map[string]string + expectedHeaders map[string]string + expectedStatus int + }{ + { + name: "set cors headers, empty request cors headers", + handler: hc.Handler().SetCORSHeaders, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + }, + }, + { + name: "set cors headers, invalid origin", + handler: hc.Handler().SetCORSHeaders, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://origin.com", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + }, + }, + { + name: "set cors headers, first rule, no symbols in place of wildcard", + handler: hc.Handler().SetCORSHeaders, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "suffix.example", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "suffix.example", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "set cors headers, first rule, valid origin", + handler: hc.Handler().SetCORSHeaders, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "http://suffix.example", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "http://suffix.example", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "set cors headers, first rule, invalid origin", + handler: hc.Handler().SetCORSHeaders, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "http://suffix-example", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + }, + }, + { + name: "set cors headers, second rule, no symbols in place of wildcard", + handler: hc.Handler().SetCORSHeaders, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://example", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "https://example", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "set cors headers, second rule, valid origin", + handler: hc.Handler().SetCORSHeaders, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://www.example", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "https://www.example", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "set cors headers, second rule, invalid origin", + handler: hc.Handler().SetCORSHeaders, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://www.example.com", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + }, + }, + { + name: "set cors headers, third rule, no symbols in place of wildcard", + handler: hc.Handler().SetCORSHeaders, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "prefix.example", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "prefix.example", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "set cors headers, third rule, valid origin", + handler: hc.Handler().SetCORSHeaders, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "prefix.example.com", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "set cors headers, third rule, invalid origin", + handler: hc.Handler().SetCORSHeaders, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "www.prefix.example", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + }, + }, + { + name: "set cors headers, third rule, invalid request method in header", + handler: hc.Handler().SetCORSHeaders, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "prefix.example.com", + fasthttp.HeaderAccessControlRequestMethod: "PUT", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + }, + }, + { + name: "set cors headers, third rule, valid request method in header", + handler: hc.Handler().SetCORSHeaders, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "prefix.example.com", + fasthttp.HeaderAccessControlRequestMethod: "GET", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "preflight, empty request cors headers", + handler: hc.Handler().Preflight, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + }, + expectedStatus: http.StatusBadRequest, + }, + { + name: "preflight, invalid origin", + handler: hc.Handler().Preflight, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://origin.com", + fasthttp.HeaderAccessControlRequestMethod: "GET", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + }, + expectedStatus: http.StatusForbidden, + }, + { + name: "preflight, first rule, no symbols in place of wildcard", + handler: hc.Handler().Preflight, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "suffix.example", + fasthttp.HeaderAccessControlRequestMethod: "GET", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "suffix.example", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "prelight, first rule, valid origin", + handler: hc.Handler().Preflight, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "http://suffix.example", + fasthttp.HeaderAccessControlRequestMethod: "GET", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "http://suffix.example", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "preflight, first rule, invalid origin", + handler: hc.Handler().Preflight, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "http://suffix-example", + fasthttp.HeaderAccessControlRequestMethod: "GET", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + }, + expectedStatus: http.StatusForbidden, + }, + { + name: "preflight, second rule, no symbols in place of wildcard", + handler: hc.Handler().Preflight, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://example", + fasthttp.HeaderAccessControlRequestMethod: "GET", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "https://example", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "preflight, second rule, valid origin", + handler: hc.Handler().Preflight, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://www.example", + fasthttp.HeaderAccessControlRequestMethod: "GET", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "https://www.example", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "preflight, second rule, invalid origin", + handler: hc.Handler().Preflight, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://www.example.com", + fasthttp.HeaderAccessControlRequestMethod: "GET", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + }, + expectedStatus: http.StatusForbidden, + }, + { + name: "preflight, third rule, no symbols in place of wildcard", + handler: hc.Handler().Preflight, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "prefix.example", + fasthttp.HeaderAccessControlRequestMethod: "GET", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "prefix.example", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "preflight, third rule, valid origin", + handler: hc.Handler().Preflight, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "prefix.example.com", + fasthttp.HeaderAccessControlRequestMethod: "GET", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com", + fasthttp.HeaderAccessControlAllowMethods: "GET", + }, + }, + { + name: "preflight, third rule, invalid origin", + handler: hc.Handler().Preflight, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "www.prefix.example", + fasthttp.HeaderAccessControlRequestMethod: "GET", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + }, + expectedStatus: http.StatusForbidden, + }, + { + name: "preflight, third rule, invalid request method in header", + handler: hc.Handler().Preflight, + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "prefix.example.com", + fasthttp.HeaderAccessControlRequestMethod: "PUT", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + }, + expectedStatus: http.StatusForbidden, + }, + } { + t.Run(tc.name, func(t *testing.T) { + r := prepareCORSRequest(t, bktName, tc.requestHeaders) + tc.handler(r) + + expectedStatus := fasthttp.StatusOK + if tc.expectedStatus != 0 { + expectedStatus = tc.expectedStatus + } + require.Equal(t, expectedStatus, r.Response.StatusCode()) + for k, v := range tc.expectedHeaders { + require.Equal(t, v, string(r.Response.Header.Peek(k))) + } + }) + } +} + +func TestAllowedHeaderWildcards(t *testing.T) { + hc := prepareHandlerContext(t) + bktName := "bucket-allowed-header-wildcards" + cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private) + require.NoError(t, err) + hc.frostfs.SetContainer(cnrID, cnr) + + cfg := &data.CORSConfiguration{ + CORSRules: []data.CORSRule{ + { + AllowedOrigins: []string{"https://www.example.com"}, + AllowedMethods: []string{"HEAD"}, + AllowedHeaders: []string{"*-suffix"}, + }, + { + AllowedOrigins: []string{"https://www.example.com"}, + AllowedMethods: []string{"HEAD"}, + AllowedHeaders: []string{"start-*-end"}, + }, + { + AllowedOrigins: []string{"https://www.example.com"}, + AllowedMethods: []string{"HEAD"}, + AllowedHeaders: []string{"X-Amz-*"}, + }, + }, + } + setCORSObject(t, hc, cnrID, cfg, 1) + + for _, tc := range []struct { + name string + requestHeaders map[string]string + expectedHeaders map[string]string + expectedStatus int + }{ + { + name: "first rule, valid headers", + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://www.example.com", + fasthttp.HeaderAccessControlRequestMethod: "HEAD", + fasthttp.HeaderAccessControlRequestHeaders: "header-suffix, -suffix", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com", + fasthttp.HeaderAccessControlAllowMethods: "HEAD", + fasthttp.HeaderAccessControlAllowHeaders: "header-suffix, -suffix", + }, + }, + { + name: "first rule, invalid headers", + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://www.example.com", + fasthttp.HeaderAccessControlRequestMethod: "HEAD", + fasthttp.HeaderAccessControlRequestHeaders: "header-suffix-*", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + fasthttp.HeaderAccessControlAllowHeaders: "", + }, + expectedStatus: http.StatusForbidden, + }, + { + name: "second rule, valid headers", + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://www.example.com", + fasthttp.HeaderAccessControlRequestMethod: "HEAD", + fasthttp.HeaderAccessControlRequestHeaders: "start--end, start-header-end", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com", + fasthttp.HeaderAccessControlAllowMethods: "HEAD", + fasthttp.HeaderAccessControlAllowHeaders: "start--end, start-header-end", + }, + }, + { + name: "second rule, invalid header ending", + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://www.example.com", + fasthttp.HeaderAccessControlRequestMethod: "HEAD", + fasthttp.HeaderAccessControlRequestHeaders: "start-header-end-*", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + fasthttp.HeaderAccessControlAllowHeaders: "", + }, + expectedStatus: http.StatusForbidden, + }, + { + name: "second rule, invalid header beginning", + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://www.example.com", + fasthttp.HeaderAccessControlRequestMethod: "HEAD", + fasthttp.HeaderAccessControlRequestHeaders: "*-start-header-end", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + fasthttp.HeaderAccessControlAllowHeaders: "", + }, + expectedStatus: http.StatusForbidden, + }, + { + name: "third rule, valid headers", + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://www.example.com", + fasthttp.HeaderAccessControlRequestMethod: "HEAD", + fasthttp.HeaderAccessControlRequestHeaders: "X-Amz-Date, X-Amz-Content-Sha256", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com", + fasthttp.HeaderAccessControlAllowMethods: "HEAD", + fasthttp.HeaderAccessControlAllowHeaders: "X-Amz-Date, X-Amz-Content-Sha256", + }, + }, + { + name: "third rule, invalid headers", + requestHeaders: map[string]string{ + fasthttp.HeaderOrigin: "https://www.example.com", + fasthttp.HeaderAccessControlRequestMethod: "HEAD", + fasthttp.HeaderAccessControlRequestHeaders: "Authorization", + }, + expectedHeaders: map[string]string{ + fasthttp.HeaderAccessControlAllowOrigin: "", + fasthttp.HeaderAccessControlAllowMethods: "", + fasthttp.HeaderAccessControlAllowHeaders: "", + }, + expectedStatus: http.StatusForbidden, + }, + } { + t.Run(tc.name, func(t *testing.T) { + r := prepareCORSRequest(t, bktName, tc.requestHeaders) + hc.Handler().Preflight(r) + + expectedStatus := http.StatusOK + if tc.expectedStatus != 0 { + expectedStatus = tc.expectedStatus + } + require.Equal(t, expectedStatus, r.Response.StatusCode()) + for k, v := range tc.expectedHeaders { + require.Equal(t, v, string(r.Response.Header.Peek(k))) + } + }) + } +} + func setCORSObject(t *testing.T, hc *handlerContext, cnrID cid.ID, corsConfig *data.CORSConfiguration, epoch uint64) { payload, err := xml.Marshal(corsConfig) require.NoError(t, err) diff --git a/internal/handler/download.go b/internal/handler/download.go index 114bf34..15fb886 100644 --- a/internal/handler/download.go +++ b/internal/handler/download.go @@ -10,11 +10,12 @@ import ( "fmt" "io" "net/url" + "strings" "time" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -31,13 +32,18 @@ func (h *Handler) DownloadByAddressOrBucketName(req *fasthttp.RequestCtx) { cidParam := req.UserValue("cid").(string) oidParam := req.UserValue("oid").(string) - downloadParam := req.QueryArgs().GetBool("download") ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With( zap.String("cid", cidParam), zap.String("oid", oidParam), )) + path, err := url.QueryUnescape(oidParam) + if err != nil { + h.logAndSendError(ctx, req, logs.FailedToUnescapePath, err) + return + } + bktInfo, err := h.getBucketInfo(ctx, cidParam) if err != nil { h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err) @@ -45,23 +51,166 @@ func (h *Handler) DownloadByAddressOrBucketName(req *fasthttp.RequestCtx) { } checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo) - if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) { + if checkS3Err != nil && !errors.Is(checkS3Err, tree.ErrNodeNotFound) { h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err) return } - var objID oid.ID - if checkS3Err == nil && shouldDownload(oidParam, downloadParam) { - h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.receiveFile) - } else if err = objID.DecodeString(oidParam); err == nil { - h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.receiveFile) + prm := MiddlewareParam{ + Context: ctx, + Request: req, + BktInfo: bktInfo, + Path: path, + } + + indexPageEnabled := h.config.IndexPageEnabled() + + if checkS3Err == nil { + run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound), + Middleware{Func: h.byS3PathMiddleware(h.receiveFile, noopFormer), Enabled: true}, + Middleware{Func: h.byS3PathMiddleware(h.receiveFile, indexFormer), Enabled: indexPageEnabled}, + Middleware{Func: h.browseIndexMiddleware(h.getDirObjectsS3), Enabled: indexPageEnabled}, + ) } else { - h.browseIndex(ctx, req, cidParam, oidParam, checkS3Err != nil) + slashFallbackEnabled := h.config.EnableFilepathSlashFallback() + fileNameFallbackEnabled := h.config.EnableFilepathFallback() + + run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound), + Middleware{Func: h.byAddressMiddleware(h.receiveFile), Enabled: true}, + Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, noopFormer), Enabled: true}, + Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, reverseLeadingSlash), Enabled: slashFallbackEnabled}, + Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, noopFormer), Enabled: fileNameFallbackEnabled}, + Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, reverseLeadingSlash), Enabled: fileNameFallbackEnabled && slashFallbackEnabled}, + Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, indexFormer), Enabled: indexPageEnabled}, + Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, indexFormer), Enabled: fileNameFallbackEnabled && indexPageEnabled}, + Middleware{Func: h.browseIndexMiddleware(h.getDirObjectsNative), Enabled: indexPageEnabled}, + ) } } -func shouldDownload(oidParam string, downloadParam bool) bool { - return !isDir(oidParam) || downloadParam +type ObjectHandlerFunc func(context.Context, *fasthttp.RequestCtx, oid.Address) + +type MiddlewareFunc func(param MiddlewareParam) bool + +type MiddlewareParam struct { + Context context.Context + Request *fasthttp.RequestCtx + BktInfo *data.BucketInfo + Path string +} + +type Middleware struct { + Func MiddlewareFunc + Enabled bool +} + +func run(prm MiddlewareParam, defaultMiddleware MiddlewareFunc, middlewares ...Middleware) { + for _, m := range middlewares { + if m.Enabled && !m.Func(prm) { + return + } + } + + defaultMiddleware(prm) +} + +func indexFormer(path string) string { + indexPath := path + if indexPath != "" && !strings.HasSuffix(indexPath, "/") { + indexPath += "/" + } + + return indexPath + "index.html" +} + +func reverseLeadingSlash(path string) string { + if path == "" || path == "/" { + return path + } + + if path[0] == '/' { + return path[1:] + } + + return "/" + path +} + +func noopFormer(path string) string { + return path +} + +func (h *Handler) byS3PathMiddleware(handler func(context.Context, *fasthttp.RequestCtx, oid.Address), pathFormer func(string) string) MiddlewareFunc { + return func(prm MiddlewareParam) bool { + ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byS3Path") + defer span.End() + + path := pathFormer(prm.Path) + + foundOID, err := h.tree.GetLatestVersion(ctx, &prm.BktInfo.CID, path) + if err == nil { + if foundOID.IsDeleteMarker { + h.logAndSendError(ctx, prm.Request, logs.IndexWasDeleted, ErrObjectNotFound) + return false + } + + addr := newAddress(prm.BktInfo.CID, foundOID.OID) + handler(ctx, prm.Request, addr) + return false + } + + if !errors.Is(err, tree.ErrNodeNotFound) { + h.logAndSendError(ctx, prm.Request, logs.FailedToGetLatestVersionOfIndexObject, err, zap.String("path", path)) + return false + } + + return true + } +} + +func (h *Handler) byAttributeSearchMiddleware(handler ObjectHandlerFunc, attr string, pathFormer func(string) string) MiddlewareFunc { + return func(prm MiddlewareParam) bool { + ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byAttributeSearch") + defer span.End() + + path := pathFormer(prm.Path) + + res, err := h.search(ctx, prm.BktInfo.CID, attr, path, object.MatchStringEqual) + if err != nil { + h.logAndSendError(ctx, prm.Request, logs.FailedToFindObjectByAttribute, err) + return false + } + defer res.Close() + + buf := make([]oid.ID, 1) + n, err := res.Read(buf) + if err == nil && n > 0 { + addr := newAddress(prm.BktInfo.CID, buf[0]) + handler(ctx, prm.Request, addr) + return false + } + + if !errors.Is(err, io.EOF) { + h.logAndSendError(ctx, prm.Request, logs.FailedToFindObjectByAttribute, err) + return false + } + + return true + } +} + +func (h *Handler) byAddressMiddleware(handler ObjectHandlerFunc) MiddlewareFunc { + return func(prm MiddlewareParam) bool { + ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byAddress") + defer span.End() + + var objID oid.ID + if objID.DecodeString(prm.Path) == nil { + handler(ctx, prm.Request, newAddress(prm.BktInfo.CID, objID)) + return false + } + + return true + } } // DownloadByAttribute handles attribute-based download requests. diff --git a/internal/handler/frostfs_mock.go b/internal/handler/frostfs_mock.go index 7d72ad9..540697f 100644 --- a/internal/handler/frostfs_mock.go +++ b/internal/handler/frostfs_mock.go @@ -233,6 +233,16 @@ func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) (Res return &resObjectSearchMock{res: res}, nil } +func (t *TestFrostFS) GetContainerByID(cid cid.ID) (*container.Container, error) { + for k, v := range t.containers { + if k == cid.EncodeToString() { + return v, nil + } + } + + return nil, fmt.Errorf("container does not exist %s", cid) +} + func (t *TestFrostFS) InitMultiObjectReader(context.Context, PrmInitMultiObjectReader) (io.Reader, error) { return nil, nil } diff --git a/internal/handler/handler.go b/internal/handler/handler.go index a982bc2..2efd71d 100644 --- a/internal/handler/handler.go +++ b/internal/handler/handler.go @@ -11,8 +11,8 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" @@ -35,6 +35,7 @@ type Config interface { BufferMaxSizeForPut() uint64 NamespaceHeader() string EnableFilepathFallback() bool + EnableFilepathSlashFallback() bool FormContainerZone(string) string CORS() *data.CORSRule } @@ -166,13 +167,19 @@ type ContainerResolver interface { Resolve(ctx context.Context, zone, name string) (*cid.ID, error) } +type ContainerContract interface { + // GetContainerByID reads a container from contract by ID. + GetContainerByID(cid.ID) (*container.Container, error) +} + type Handler struct { log *zap.Logger frostfs FrostFS ownerID *user.ID config Config containerResolver ContainerResolver - tree layer.TreeService + cnrContract ContainerContract + tree *tree.Tree cache *cache.BucketCache workerPool *ants.Pool corsCnrID cid.ID @@ -189,7 +196,7 @@ type AppParams struct { CORSCache *cache.CORSCache } -func New(params *AppParams, config Config, tree layer.TreeService, workerPool *ants.Pool) *Handler { +func New(params *AppParams, config Config, tree *tree.Tree, rpcCli ContainerContract, workerPool *ants.Pool) *Handler { return &Handler{ log: params.Logger, frostfs: params.FrostFS, @@ -201,39 +208,10 @@ func New(params *AppParams, config Config, tree layer.TreeService, workerPool *a workerPool: workerPool, corsCnrID: params.CORSCnrID, corsCache: params.CORSCache, + cnrContract: rpcCli, } } -// byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that -// prepares request and object address to it. -func (h *Handler) byNativeAddress(ctx context.Context, req *fasthttp.RequestCtx, cnrID cid.ID, objID oid.ID, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) { - ctx, span := tracing.StartSpanFromContext(ctx, "handler.byNativeAddress") - defer span.End() - - addr := newAddress(cnrID, objID) - handler(ctx, req, addr) -} - -// byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that -// resolves object address from S3-like path /. -func (h *Handler) byS3Path(ctx context.Context, req *fasthttp.RequestCtx, cnrID cid.ID, path string, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) { - ctx, span := tracing.StartSpanFromContext(ctx, "handler.byS3Path") - defer span.End() - - foundOID, err := h.tree.GetLatestVersion(ctx, &cnrID, path) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToGetLatestVersionOfObject, err, zap.String("path", path)) - return - } - if foundOID.IsDeleteMarker { - h.logAndSendError(ctx, req, logs.ObjectWasDeleted, ErrObjectNotFound) - return - } - - addr := newAddress(cnrID, foundOID.OID) - handler(ctx, req, addr) -} - // byAttribute is a wrapper similar to byNativeAddress. func (h *Handler) byAttribute(ctx context.Context, req *fasthttp.RequestCtx, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) { cidParam, _ := req.UserValue("cid").(string) @@ -252,8 +230,6 @@ func (h *Handler) byAttribute(ctx context.Context, req *fasthttp.RequestCtx, han return } - val = prepareAtribute(key, val) - ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", cidParam), zap.String("attr_key", key), zap.String("attr_val", val))) @@ -291,10 +267,6 @@ func (h *Handler) findObjectByAttribute(ctx context.Context, cnrID cid.ID, attrK n, err := res.Read(buf) if n == 0 { switch { - case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal): - h.reqLogger(ctx).Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName, logs.TagField(logs.TagExternalStorage)) - attrVal = prepareAtribute(attrFileName, attrVal) - return h.findObjectByAttribute(ctx, cnrID, attrFileName, attrVal) case errors.Is(err, io.EOF): h.reqLogger(ctx).Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage)) return oid.ID{}, fmt.Errorf("object not found: %w", err) @@ -307,42 +279,6 @@ func (h *Handler) findObjectByAttribute(ctx context.Context, cnrID cid.ID, attrK return buf[0], nil } -func (h *Handler) needSearchByFileName(key, val string) bool { - if key != attrFilePath || !h.config.EnableFilepathFallback() { - return false - } - - return strings.HasPrefix(val, "/") && strings.Count(val, "/") == 1 || !strings.Contains(val, "/") -} - -func prepareAtribute(attrKey, attrVal string) string { - if attrKey == attrFileName { - return prepareFileName(attrVal) - } - - if attrKey == attrFilePath { - return prepareFilePath(attrVal) - } - - return attrVal -} - -func prepareFileName(fileName string) string { - if strings.HasPrefix(fileName, "/") { - return fileName[1:] - } - - return fileName -} - -func prepareFilePath(filePath string) string { - if !strings.HasPrefix(filePath, "/") { - return "/" + filePath - } - - return filePath -} - // resolveContainer decode container id, if it's not a valid container id // then trey to resolve name using provided resolver. func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) { @@ -379,76 +315,34 @@ func (h *Handler) getBucketInfo(ctx context.Context, containerName string) (*dat return nil, fmt.Errorf("resolve container: %w", err) } - bktInfo, err := h.readContainer(ctx, *cnrID) - if err != nil { - return nil, fmt.Errorf("read container: %w", err) - } - - if err = h.cache.Put(bktInfo); err != nil { - h.reqLogger(ctx).Warn(logs.CouldntPutBucketIntoCache, - zap.String("bucket name", bktInfo.Name), - zap.Stringer("bucket cid", bktInfo.CID), - zap.Error(err), - logs.TagField(logs.TagDatapath)) - } - - return bktInfo, nil + return h.containerInfo(ctx, *cnrID) } -func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.BucketInfo, error) { - prm := PrmContainer{ContainerID: cnrID} - res, err := h.frostfs.Container(ctx, prm) - if err != nil { - return nil, fmt.Errorf("get frostfs container '%s': %w", cnrID.String(), err) +type ListFunc func(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) + +func (h *Handler) browseIndexMiddleware(fn ListFunc) MiddlewareFunc { + return func(prm MiddlewareParam) bool { + ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.browseIndex") + defer span.End() + + ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With( + zap.String("bucket", prm.BktInfo.Name), + zap.String("container", prm.BktInfo.CID.EncodeToString()), + zap.String("prefix", prm.Path), + )) + + objects, err := fn(ctx, prm.BktInfo, prm.Path) + if err != nil { + h.logAndSendError(ctx, prm.Request, logs.FailedToListObjects, err) + return false + } + + h.browseObjects(ctx, prm.Request, browseParams{ + bucketInfo: prm.BktInfo, + prefix: prm.Path, + objects: objects, + }) + + return false } - - bktInfo := &data.BucketInfo{ - CID: cnrID, - Name: cnrID.EncodeToString(), - } - - if domain := container.ReadDomain(*res); domain.Name() != "" { - bktInfo.Name = domain.Name() - bktInfo.Zone = domain.Zone() - } - - bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(*res) - bktInfo.PlacementPolicy = res.PlacementPolicy() - - return bktInfo, err -} - -func (h *Handler) browseIndex(ctx context.Context, req *fasthttp.RequestCtx, cidParam, oidParam string, isNativeList bool) { - ctx, span := tracing.StartSpanFromContext(ctx, "handler.browseIndex") - defer span.End() - - if !h.config.IndexPageEnabled() { - req.SetStatusCode(fasthttp.StatusNotFound) - return - } - - unescapedKey, err := url.QueryUnescape(oidParam) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToUnescapeOIDParam, err) - return - } - - bktInfo, err := h.getBucketInfo(ctx, cidParam) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err) - return - } - - listFunc := h.getDirObjectsS3 - if isNativeList { - // tree probe failed, trying to use native - listFunc = h.getDirObjectsNative - } - - h.browseObjects(ctx, req, browseParams{ - bucketInfo: bktInfo, - prefix: unescapedKey, - listObjects: listFunc, - isNative: isNativeList, - }) } diff --git a/internal/handler/handler_test.go b/internal/handler/handler_test.go index 93cb1d9..6c715fe 100644 --- a/internal/handler/handler_test.go +++ b/internal/handler/handler_test.go @@ -14,9 +14,10 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer" + "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens" + "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" @@ -26,6 +27,7 @@ import ( "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/panjf2000/ants/v2" @@ -35,35 +37,11 @@ import ( "go.uber.org/zap/zaptest" ) -type treeServiceMock struct { - system map[string]map[string]*data.BaseNodeVersion -} - -func newTreeService() *treeServiceMock { - return &treeServiceMock{ - system: make(map[string]map[string]*data.BaseNodeVersion), - } -} - -func (t *treeServiceMock) CheckSettingsNodeExists(context.Context, *data.BucketInfo) error { - _, ok := t.system["bucket-settings"] - if !ok { - return layer.ErrNodeNotFound - } - return nil -} - -func (t *treeServiceMock) GetSubTreeByPrefix(context.Context, *data.BucketInfo, string, bool) ([]data.NodeInfo, string, error) { - return nil, "", nil -} - -func (t *treeServiceMock) GetLatestVersion(context.Context, *cid.ID, string) (*data.NodeVersion, error) { - return nil, nil -} - type configMock struct { - additionalSearch bool - cors *data.CORSRule + additionalFilenameSearch bool + additionalSlashSearch bool + indexEnabled bool + cors *data.CORSRule } func (c *configMock) DefaultTimestamp() bool { @@ -75,11 +53,11 @@ func (c *configMock) ArchiveCompression() bool { } func (c *configMock) IndexPageEnabled() bool { - return false + return c.indexEnabled } func (c *configMock) IndexPageTemplate() string { - return "" + return templates.DefaultIndexTemplate } func (c *configMock) IndexPageNativeTemplate() string { @@ -99,7 +77,11 @@ func (c *configMock) NamespaceHeader() string { } func (c *configMock) EnableFilepathFallback() bool { - return c.additionalSearch + return c.additionalFilenameSearch +} + +func (c *configMock) EnableFilepathSlashFallback() bool { + return c.additionalSlashSearch } func (c *configMock) FormContainerZone(string) string { @@ -117,7 +99,7 @@ type handlerContext struct { h *Handler frostfs *TestFrostFS - tree *treeServiceMock + tree *treeServiceClientMock cfg *configMock } @@ -167,14 +149,14 @@ func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) { }), } - treeMock := newTreeService() + treeMock := newTreeServiceClientMock() cfgMock := &configMock{} workerPool, err := ants.NewPool(1) if err != nil { return nil, err } - handler := New(params, cfgMock, treeMock, workerPool) + handler := New(params, cfgMock, tree.NewTree(treeMock, logger), testFrostFS, workerPool) return &handlerContext{ key: key, @@ -254,6 +236,7 @@ func TestBasic(t *testing.T) { err = json.Unmarshal(r.Response.Body(), &putRes) require.NoError(t, err) + hc.cfg.additionalFilenameSearch = true obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID] fileName := prepareObjectAttributes(object.AttributeFileName, objFileName) filePath := prepareObjectAttributes(object.AttributeFilePath, objFilePath) @@ -264,6 +247,14 @@ func TestBasic(t *testing.T) { r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID) hc.Handler().DownloadByAddressOrBucketName(r) require.Equal(t, content, string(r.Response.Body())) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFilePath) + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, content, string(r.Response.Body())) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFileName) + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, content, string(r.Response.Body())) }) t.Run("head", func(t *testing.T) { @@ -271,6 +262,16 @@ func TestBasic(t *testing.T) { hc.Handler().HeadByAddressOrBucketName(r) require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID))) require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID))) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFilePath) + hc.Handler().HeadByAddressOrBucketName(r) + require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID))) + require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID))) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFileName) + hc.Handler().HeadByAddressOrBucketName(r) + require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID))) + require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID))) }) t.Run("get by attribute", func(t *testing.T) { @@ -280,11 +281,11 @@ func TestBasic(t *testing.T) { r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath) hc.Handler().DownloadByAttribute(r) - require.Equal(t, content, string(r.Response.Body())) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName) hc.Handler().DownloadByAttribute(r) - require.Equal(t, content, string(r.Response.Body())) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) }) t.Run("head by attribute", func(t *testing.T) { @@ -295,13 +296,11 @@ func TestBasic(t *testing.T) { r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath) hc.Handler().HeadByAttribute(r) - require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID))) - require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID))) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName) hc.Handler().HeadByAttribute(r) - require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID))) - require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID))) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) }) t.Run("zip", func(t *testing.T) { @@ -325,185 +324,281 @@ func TestBasic(t *testing.T) { }) } -func TestFindObjectByAttribute(t *testing.T) { +func prepareHandlerAndBucket(t *testing.T) (*handlerContext, cid.ID) { hc := prepareHandlerContext(t) - hc.cfg.additionalSearch = true bktName := "bucket" cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended) require.NoError(t, err) hc.frostfs.SetContainer(cnrID, cnr) - ctx := context.Background() - ctx = middleware.SetNamespace(ctx, "") - - content := "hello" - r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content) - require.NoError(t, err) - - hc.Handler().Upload(r) - require.Equal(t, r.Response.StatusCode(), http.StatusOK) - - var putRes putResponse - err = json.Unmarshal(r.Response.Body(), &putRes) - require.NoError(t, err) - - testAttrVal1 := "/folder/cat.jpg" - testAttrVal2 := "cat.jpg" - testAttrVal3 := "test-attr-val3" - - for _, tc := range []struct { - name string - firstAttr object.Attribute - secondAttr object.Attribute - reqAttrKey string - reqAttrValue string - err string - additionalSearch bool - }{ - { - name: "success search by FileName", - firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1), - secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2), - reqAttrKey: attrFileName, - reqAttrValue: testAttrVal2, - additionalSearch: false, - }, - { - name: "failed search by FileName", - firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1), - secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2), - reqAttrKey: attrFileName, - reqAttrValue: testAttrVal3, - err: "not found", - additionalSearch: false, - }, - { - name: "success search by FilePath (with additional search)", - firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1), - secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2), - reqAttrKey: attrFilePath, - reqAttrValue: testAttrVal2, - additionalSearch: true, - }, - { - name: "failed by FilePath (with additional search)", - firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1), - secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2), - reqAttrKey: attrFilePath, - reqAttrValue: testAttrVal3, - err: "not found", - additionalSearch: true, - }, - { - name: "success search by FilePath with leading slash (with additional search)", - firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1), - secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2), - reqAttrKey: attrFilePath, - reqAttrValue: "/cat.jpg", - additionalSearch: true, - }, - } { - t.Run(tc.name, func(t *testing.T) { - obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID] - obj.SetAttributes(tc.firstAttr, tc.secondAttr) - hc.cfg.additionalSearch = tc.additionalSearch - - objID, err := hc.Handler().findObjectByAttribute(ctx, cnrID, tc.reqAttrKey, tc.reqAttrValue) - if tc.err != "" { - require.Error(t, err) - require.Contains(t, err.Error(), tc.err) - return - } - - require.NoError(t, err) - require.Equal(t, putRes.ObjectID, objID.EncodeToString()) - }) - } + return hc, cnrID } -func TestNeedSearchByFileName(t *testing.T) { - hc := prepareHandlerContext(t) +func TestGetObjectWithFallback(t *testing.T) { + ctx := middleware.SetNamespace(context.Background(), "") - for _, tc := range []struct { - name string - attrKey string - attrVal string - additionalSearch bool - expected bool - }{ - { - name: "need search - not contains slash", - attrKey: attrFilePath, - attrVal: "cat.png", - additionalSearch: true, - expected: true, - }, - { - name: "need search - single lead slash", - attrKey: attrFilePath, - attrVal: "/cat.png", - additionalSearch: true, - expected: true, - }, - { - name: "don't need search - single slash but not lead", - attrKey: attrFilePath, - attrVal: "cats/cat.png", - additionalSearch: true, - expected: false, - }, - { - name: "don't need search - more one slash", - attrKey: attrFilePath, - attrVal: "/cats/cat.png", - additionalSearch: true, - expected: false, - }, - { - name: "don't need search - incorrect attribute key", - attrKey: attrFileName, - attrVal: "cat.png", - additionalSearch: true, - expected: false, - }, - { - name: "don't need search - additional search disabled", - attrKey: attrFilePath, - attrVal: "cat.png", - additionalSearch: false, - expected: false, - }, - } { - t.Run(tc.name, func(t *testing.T) { - hc.cfg.additionalSearch = tc.additionalSearch + t.Run("by oid", func(t *testing.T) { + hc, cnrID := prepareHandlerAndBucket(t) - res := hc.h.needSearchByFileName(tc.attrKey, tc.attrVal) - require.Equal(t, tc.expected, res) - }) - } + obj1ID := oidtest.ID() + obj1 := object.New() + obj1.SetID(obj1ID) + obj1.SetPayload([]byte("obj1")) + hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 + + r := prepareGetRequest(ctx, cnrID.EncodeToString(), obj1ID.String()) + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) + }) + + t.Run("by filepath as it is", func(t *testing.T) { + hc, cnrID := prepareHandlerAndBucket(t) + + obj1ID := oidtest.ID() + obj1 := object.New() + obj1.SetID(obj1ID) + obj1.SetPayload([]byte("obj1")) + obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/obj1")) + hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 + + obj2ID := oidtest.ID() + obj2 := object.New() + obj2.SetID(obj2ID) + obj2.SetPayload([]byte("obj2")) + obj2.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "/filepath/obj2")) + hc.frostfs.objects[cnrID.String()+"/"+obj2ID.String()] = obj2 + + r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/obj1") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj2") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, string(obj2.Payload()), string(r.Response.Body())) + }) + + t.Run("by filepath slash fallback", func(t *testing.T) { + hc, cnrID := prepareHandlerAndBucket(t) + + obj1ID := oidtest.ID() + obj1 := object.New() + obj1.SetID(obj1ID) + obj1.SetPayload([]byte("obj1")) + obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/obj1")) + hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 + + r := prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj1") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) + + hc.cfg.additionalSlashSearch = true + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj1") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) + }) + + t.Run("by filename fallback", func(t *testing.T) { + hc, cnrID := prepareHandlerAndBucket(t) + + obj1ID := oidtest.ID() + obj1 := object.New() + obj1.SetID(obj1ID) + obj1.SetPayload([]byte("obj1")) + obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/obj1")) + hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 + + r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/obj1") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) + + hc.cfg.additionalFilenameSearch = true + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/obj1") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) + }) + + t.Run("by filename and slash fallback", func(t *testing.T) { + hc, cnrID := prepareHandlerAndBucket(t) + + obj1ID := oidtest.ID() + obj1 := object.New() + obj1.SetID(obj1ID) + obj1.SetPayload([]byte("obj1")) + obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/obj1")) + hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 + + r := prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) + + hc.cfg.additionalFilenameSearch = true + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) + + hc.cfg.additionalSlashSearch = true + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) + }) + + t.Run("index fallback", func(t *testing.T) { + hc, cnrID := prepareHandlerAndBucket(t) + + obj1ID := oidtest.ID() + obj1 := object.New() + obj1.SetID(obj1ID) + obj1.SetPayload([]byte("obj1")) + obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/index.html")) + hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 + + r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) + + hc.cfg.indexEnabled = true + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) + }) + + t.Run("index filename fallback", func(t *testing.T) { + hc, cnrID := prepareHandlerAndBucket(t) + + obj1ID := oidtest.ID() + obj1 := object.New() + obj1.SetID(obj1ID) + obj1.SetPayload([]byte("obj1")) + obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/index.html")) + hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 + + r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) + + hc.cfg.indexEnabled = true + hc.cfg.additionalFilenameSearch = true + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) + }) } -func TestPrepareFileName(t *testing.T) { - fileName := "/cat.jpg" - expected := "cat.jpg" - actual := prepareFileName(fileName) - require.Equal(t, expected, actual) +func TestIndex(t *testing.T) { + ctx := middleware.SetNamespace(context.Background(), "") - fileName = "cat.jpg" - actual = prepareFileName(fileName) - require.Equal(t, expected, actual) -} + t.Run("s3", func(t *testing.T) { + hc, cnrID := prepareHandlerAndBucket(t) -func TestPrepareFilePath(t *testing.T) { - filePath := "cat.jpg" - expected := "/cat.jpg" - actual := prepareFilePath(filePath) - require.Equal(t, expected, actual) + obj1ID := oidtest.ID() + obj1 := object.New() + obj1.SetID(obj1ID) + obj1.SetPayload([]byte("obj1")) + obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "prefix/obj1")) + hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 - filePath = "/cat.jpg" - actual = prepareFilePath(filePath) - require.Equal(t, expected, actual) + hc.tree.containers[cnrID.String()] = containerInfo{ + trees: map[string]map[string]nodeResponse{ + "system": {"bucket-settings": nodeResponse{nodeID: 1}}, + "version": { + "": nodeResponse{}, //root + "prefix": nodeResponse{ + nodeID: 1, + meta: []nodeMeta{{key: tree.FileNameKey, value: []byte("prefix")}}}, + "obj1": nodeResponse{ + parentID: 1, + nodeID: 2, + meta: []nodeMeta{ + {key: tree.FileNameKey, value: []byte("obj1")}, + {key: "OID", value: []byte(obj1ID.String())}, + }, + }, + }, + }, + } + + r := prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) + + hc.cfg.indexEnabled = true + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/prefix") + require.Contains(t, string(r.Response.Body()), obj1ID.String()) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/prefix") + require.Contains(t, string(r.Response.Body()), obj1ID.String()) + + r = prepareGetRequest(ctx, "bucket", "dummy") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/dummy") + }) + + t.Run("native", func(t *testing.T) { + hc, cnrID := prepareHandlerAndBucket(t) + + obj1ID := oidtest.ID() + obj1 := object.New() + obj1.SetID(obj1ID) + obj1.SetPayload([]byte("obj1")) + obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "prefix/obj1")) + hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 + + r := prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) + + hc.cfg.indexEnabled = true + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/prefix") + require.Contains(t, string(r.Response.Body()), obj1ID.String()) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/prefix") + require.Contains(t, string(r.Response.Body()), obj1ID.String()) + + r = prepareGetRequest(ctx, cnrID.EncodeToString(), "dummy") + hc.Handler().DownloadByAddressOrBucketName(r) + require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/dummy") + }) } func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) { diff --git a/internal/handler/head.go b/internal/handler/head.go index 11d45fc..508dc37 100644 --- a/internal/handler/head.go +++ b/internal/handler/head.go @@ -5,11 +5,12 @@ import ( "errors" "io" "net/http" + "net/url" "strconv" "time" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" + "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" @@ -128,6 +129,12 @@ func (h *Handler) HeadByAddressOrBucketName(req *fasthttp.RequestCtx) { zap.String("oid", oidParam), )) + path, err := url.QueryUnescape(oidParam) + if err != nil { + h.logAndSendError(ctx, req, logs.FailedToUnescapePath, err) + return + } + bktInfo, err := h.getBucketInfo(ctx, cidParam) if err != nil { h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err) @@ -135,18 +142,38 @@ func (h *Handler) HeadByAddressOrBucketName(req *fasthttp.RequestCtx) { } checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo) - if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) { + if checkS3Err != nil && !errors.Is(checkS3Err, tree.ErrNodeNotFound) { h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err) return } - var objID oid.ID + prm := MiddlewareParam{ + Context: ctx, + Request: req, + BktInfo: bktInfo, + Path: path, + } + + indexPageEnabled := h.config.IndexPageEnabled() + if checkS3Err == nil { - h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.headObject) - } else if err = objID.DecodeString(oidParam); err == nil { - h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.headObject) + run(prm, h.errorMiddleware(logs.ObjectNotFound, tree.ErrNodeNotFound), + Middleware{Func: h.byS3PathMiddleware(h.headObject, noopFormer), Enabled: true}, + Middleware{Func: h.byS3PathMiddleware(h.headObject, indexFormer), Enabled: indexPageEnabled}, + ) } else { - h.logAndSendError(ctx, req, logs.InvalidOIDParam, err) + slashFallbackEnabled := h.config.EnableFilepathSlashFallback() + fileNameFallbackEnabled := h.config.EnableFilepathFallback() + + run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound), + Middleware{Func: h.byAddressMiddleware(h.headObject), Enabled: true}, + Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, noopFormer), Enabled: true}, + Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, reverseLeadingSlash), Enabled: slashFallbackEnabled}, + Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, noopFormer), Enabled: fileNameFallbackEnabled}, + Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, reverseLeadingSlash), Enabled: fileNameFallbackEnabled && slashFallbackEnabled}, + Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, indexFormer), Enabled: indexPageEnabled}, + Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, indexFormer), Enabled: fileNameFallbackEnabled && indexPageEnabled}, + ) } } @@ -157,3 +184,10 @@ func (h *Handler) HeadByAttribute(req *fasthttp.RequestCtx) { h.byAttribute(ctx, req, h.headObject) } + +func (h *Handler) errorMiddleware(msg string, err error) MiddlewareFunc { + return func(prm MiddlewareParam) bool { + h.logAndSendError(prm.Context, prm.Request, msg, err) + return false + } +} diff --git a/internal/handler/tree_service_client_mock_test.go b/internal/handler/tree_service_client_mock_test.go new file mode 100644 index 0000000..f3af52a --- /dev/null +++ b/internal/handler/tree_service_client_mock_test.go @@ -0,0 +1,141 @@ +package handler + +import ( + "context" + "errors" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" + "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" +) + +type nodeMeta struct { + key string + value []byte +} + +func (m nodeMeta) GetKey() string { + return m.key +} + +func (m nodeMeta) GetValue() []byte { + return m.value +} + +type nodeResponse struct { + meta []nodeMeta + nodeID uint64 + parentID uint64 + timestamp uint64 +} + +func (n nodeResponse) GetNodeID() []uint64 { + return []uint64{n.nodeID} +} + +func (n nodeResponse) GetParentID() []uint64 { + return []uint64{n.parentID} +} + +func (n nodeResponse) GetTimestamp() []uint64 { + return []uint64{n.timestamp} +} + +func (n nodeResponse) GetMeta() []tree.Meta { + res := make([]tree.Meta, len(n.meta)) + for i, value := range n.meta { + res[i] = value + } + return res +} + +type containerInfo struct { + trees map[string]map[string]nodeResponse +} + +type treeServiceClientMock struct { + containers map[string]containerInfo +} + +func newTreeServiceClientMock() *treeServiceClientMock { + return &treeServiceClientMock{ + containers: make(map[string]containerInfo), + } +} + +func (t *treeServiceClientMock) GetNodes(_ context.Context, p *tree.GetNodesParams) ([]tree.NodeResponse, error) { + cnr, ok := t.containers[p.CnrID.EncodeToString()] + if !ok { + return nil, tree.ErrNodeNotFound + } + + tr, ok := cnr.trees[p.TreeID] + if !ok { + return nil, tree.ErrNodeNotFound + } + + node, ok := tr[strings.Join(p.Path, "/")] + if !ok { + return nil, tree.ErrNodeNotFound + } + + return []tree.NodeResponse{node}, nil +} + +func (t *treeServiceClientMock) GetSubTree(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, _ bool) ([]tree.NodeResponse, error) { + cnr, ok := t.containers[bktInfo.CID.EncodeToString()] + if !ok { + return nil, tree.ErrNodeNotFound + } + + tr, ok := cnr.trees[treeID] + if !ok { + return nil, tree.ErrNodeNotFound + } + + if len(rootID) != 1 { + return nil, errors.New("invalid rootID") + } + + var root *nodeResponse + for _, v := range tr { + if v.nodeID == rootID[0] { + root = &v + break + } + } + + if root == nil { + return nil, tree.ErrNodeNotFound + } + + var res []nodeResponse + if depth == 0 { + for _, v := range tr { + res = append(res, v) + } + } else { + res = append(res, *root) + depthIndex := 0 + for i := uint32(0); i < depth-1; i++ { + childrenCount := 0 + for _, v := range tr { + for j := range res[depthIndex:] { + if v.parentID == res[j].nodeID { + res = append(res, v) + childrenCount++ + break + } + } + } + depthIndex = len(res) - childrenCount + } + } + + res2 := make([]tree.NodeResponse, len(res)) + for i := range res { + res2[i] = res[i] + } + + return res2, nil +} diff --git a/internal/handler/utils.go b/internal/handler/utils.go index 8cb070d..c17b878 100644 --- a/internal/handler/utils.go +++ b/internal/handler/utils.go @@ -6,9 +6,9 @@ import ( "fmt" "strings" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens" + "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" @@ -93,7 +93,7 @@ func formErrorResponse(err error) (string, int) { switch { case errors.Is(err, ErrAccessDenied): return fmt.Sprintf("Storage Access Denied:\n%v", err), fasthttp.StatusForbidden - case errors.Is(err, layer.ErrNodeAccessDenied): + case errors.Is(err, tree.ErrNodeAccessDenied): return fmt.Sprintf("Tree Access Denied:\n%v", err), fasthttp.StatusForbidden case errors.Is(err, ErrQuotaLimitReached): return fmt.Sprintf("Quota Reached:\n%v", err), fasthttp.StatusConflict @@ -101,7 +101,7 @@ func formErrorResponse(err error) (string, int) { return fmt.Sprintf("Container Not Found:\n%v", err), fasthttp.StatusNotFound case errors.Is(err, ErrObjectNotFound): return fmt.Sprintf("Object Not Found:\n%v", err), fasthttp.StatusNotFound - case errors.Is(err, layer.ErrNodeNotFound): + case errors.Is(err, tree.ErrNodeNotFound): return fmt.Sprintf("Tree Node Not Found:\n%v", err), fasthttp.StatusNotFound case errors.Is(err, ErrGatewayTimeout): return fmt.Sprintf("Gateway Timeout:\n%v", err), fasthttp.StatusGatewayTimeout diff --git a/internal/layer/tree_service.go b/internal/layer/tree_service.go deleted file mode 100644 index ff80543..0000000 --- a/internal/layer/tree_service.go +++ /dev/null @@ -1,24 +0,0 @@ -package layer - -import ( - "context" - "errors" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" -) - -// TreeService provide interface to interact with tree service using s3 data models. -type TreeService interface { - GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error) - GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error) - CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error -} - -var ( - // ErrNodeNotFound is returned from Tree service in case of not found error. - ErrNodeNotFound = errors.New("not found") - - // ErrNodeAccessDenied is returned from Tree service in case of access denied error. - ErrNodeAccessDenied = errors.New("access denied") -) diff --git a/internal/logs/logs.go b/internal/logs/logs.go index 3e9b931..86921dd 100644 --- a/internal/logs/logs.go +++ b/internal/logs/logs.go @@ -73,6 +73,9 @@ const ( FailedToReadIndexPageTemplate = "failed to read index page template" SetCustomIndexPageTemplate = "set custom index page template" CouldNotFetchCORSContainerInfo = "couldn't fetch CORS container info" + InitRPCClientFailed = "init rpc client faileds" + InitContainerContractFailed = "init container contract failed" + FailedToResolveContractHash = "failed to resolve contract hash" ) // Log messages with the "datapath" tag. @@ -107,8 +110,8 @@ const ( IteratingOverSelectedObjectsFailed = "iterating over selected objects failed" FailedToGetBucketInfo = "could not get bucket info" FailedToSubmitTaskToPool = "failed to submit task to pool" - ObjectWasDeleted = "object was deleted" - FailedToGetLatestVersionOfObject = "failed to get latest version of object" + IndexWasDeleted = "index was deleted" + FailedToGetLatestVersionOfIndexObject = "failed to get latest version of index object" FailedToCheckIfSettingsNodeExist = "failed to check if settings node exists" FailedToListObjects = "failed to list objects" FailedToParseTemplate = "failed to parse template" @@ -118,8 +121,7 @@ const ( FailedToGetObject = "failed to get object" FailedToGetObjectPayload = "failed to get object payload" FailedToFindObjectByAttribute = "failed to get find object by attribute" - FailedToUnescapeOIDParam = "failed to unescape oid param" - InvalidOIDParam = "invalid oid param" + FailedToUnescapePath = "failed to unescape path" CouldNotGetCORSConfiguration = "could not get cors configuration" EmptyOriginRequestHeader = "empty Origin request header" EmptyAccessControlRequestMethodHeader = "empty Access-Control-Request-Method request header" @@ -129,10 +131,9 @@ const ( // Log messages with the "external_storage" tag. const ( - ObjectNotFound = "object not found" - ReadObjectListFailed = "read object list failed" - ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName" - ObjectUploaded = "object uploaded" + ObjectNotFound = "object not found" + ReadObjectListFailed = "read object list failed" + ObjectUploaded = "object uploaded" ) // Log messages with the "external_storage_tree" tag. diff --git a/internal/service/contracts/container/client.go b/internal/service/contracts/container/client.go new file mode 100644 index 0000000..09455be --- /dev/null +++ b/internal/service/contracts/container/client.go @@ -0,0 +1,73 @@ +package container + +import ( + "fmt" + "strings" + + containercontract "git.frostfs.info/TrueCloudLab/frostfs-contract/container" + containerclient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container" + "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/nspcc-dev/neo-go/pkg/rpcclient" + "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" + "github.com/nspcc-dev/neo-go/pkg/util" + "github.com/nspcc-dev/neo-go/pkg/wallet" +) + +type Client struct { + contract *containerclient.Contract +} + +type Config struct { + ContractHash util.Uint160 + Key *keys.PrivateKey + RPCClient *rpcclient.Client +} + +func New(cfg Config) (*Client, error) { + var err error + key := cfg.Key + if key == nil { + if key, err = keys.NewPrivateKey(); err != nil { + return nil, fmt.Errorf("generate anon private key for container contract: %w", err) + } + } + acc := wallet.NewAccountFromPrivateKey(key) + + act, err := actor.NewSimple(cfg.RPCClient, acc) + if err != nil { + return nil, fmt.Errorf("create new actor: %w", err) + } + + return &Client{ + contract: containerclient.New(act, cfg.ContractHash), + }, nil +} + +func (c *Client) GetContainerByID(cnrID cid.ID) (*container.Container, error) { + items, err := c.contract.Get(cnrID[:]) + if err != nil { + if strings.Contains(err.Error(), containercontract.NotFoundError) { + return nil, fmt.Errorf("%w: %s", handler.ErrContainerNotFound, err) + } + return nil, err + } + + if len(items) != 4 { + return nil, fmt.Errorf("unexpected container stack item count: %d", len(items)) + } + + cnrBytes, err := items[0].TryBytes() + if err != nil { + return nil, fmt.Errorf("could not get byte array of container: %w", err) + } + + var cnr container.Container + if err = cnr.Unmarshal(cnrBytes); err != nil { + return nil, fmt.Errorf("can't unmarshal container: %w", err) + } + + return &cnr, nil +} diff --git a/internal/service/contracts/util/util.go b/internal/service/contracts/util/util.go new file mode 100644 index 0000000..444504b --- /dev/null +++ b/internal/service/contracts/util/util.go @@ -0,0 +1,34 @@ +package util + +import ( + "fmt" + "strings" + + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns" + "github.com/nspcc-dev/neo-go/pkg/util" +) + +// ResolveContractHash determine contract hash by resolving NNS name. +func ResolveContractHash(contractHash, rpcAddress string) (util.Uint160, error) { + if hash, err := util.Uint160DecodeStringLE(contractHash); err == nil { + return hash, nil + } + + splitName := strings.Split(contractHash, ".") + if len(splitName) != 2 { + return util.Uint160{}, fmt.Errorf("invalid contract name: '%s'", contractHash) + } + + var domain container.Domain + domain.SetName(splitName[0]) + domain.SetZone(splitName[1]) + + var nns ns.NNS + if err := nns.Dial(rpcAddress); err != nil { + return util.Uint160{}, fmt.Errorf("dial nns %s: %w", rpcAddress, err) + } + defer nns.Close() + + return nns.ResolveContractHash(domain) +} diff --git a/internal/templates/index.gotmpl b/internal/templates/index.gotmpl index b14cc06..4c03404 100644 --- a/internal/templates/index.gotmpl +++ b/internal/templates/index.gotmpl @@ -1,11 +1,9 @@ {{$container := .Container}} -{{ $prefix := trimPrefix .Prefix }} - Index of {{.Protocol}}://{{$container}} - /{{if $prefix}}/{{$prefix}}/{{end}} + Index of {{.Protocol}}://{{$container}}/{{.Prefix}} -

Index of {{.Protocol}}://{{$container}}/{{if $prefix}}{{$prefix}}/{{end}}

+

Index of {{.Protocol}}://{{$container}}/{{.Prefix}}

{{ if .HasErrors }}
Errors occurred while processing the request. Perhaps some objects are missing @@ -57,11 +55,11 @@ - {{ $trimmedPrefix := trimPrefix $prefix }} - {{if $trimmedPrefix }} + {{ $parentPrefix := getParent .Prefix }} + {{if $parentPrefix }} - ⮐.. + ⮐.. diff --git a/tree/tree.go b/tree/tree.go index 2ee9356..d99e24b 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -7,7 +7,6 @@ import ( "strings" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" @@ -52,10 +51,10 @@ type ( var ( // ErrNodeNotFound is returned from ServiceClient in case of not found error. - ErrNodeNotFound = layer.ErrNodeNotFound + ErrNodeNotFound = errors.New("not found") // ErrNodeAccessDenied is returned from ServiceClient service in case of access denied error. - ErrNodeAccessDenied = layer.ErrNodeAccessDenied + ErrNodeAccessDenied = errors.New("access denied") ) const ( @@ -259,7 +258,7 @@ func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name nodes = filterMultipartNodes(nodes) if len(nodes) == 0 { - return nil, layer.ErrNodeNotFound + return nil, ErrNodeNotFound } if len(nodes) != 1 { c.reqLogger(ctx).Warn(logs.FoundSeveralSystemTreeNodes, zap.String("name", name), logs.TagField(logs.TagExternalStorageTree)) @@ -303,7 +302,7 @@ func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) { } if targetIndexNode == -1 { - return nil, fmt.Errorf("latest version: %w", layer.ErrNodeNotFound) + return nil, fmt.Errorf("latest version: %w", ErrNodeNotFound) } return nodes[targetIndexNode], nil @@ -324,20 +323,23 @@ func pathFromName(objectName string) []string { return strings.Split(objectName, separator) } -func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error) { +func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, error) { ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetSubTreeByPrefix") defer span.End() - rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix) + rootID, err := c.getPrefixNodeID(ctx, bktInfo, versionTree, strings.Split(prefix, separator)) if err != nil { - return nil, "", err + if errors.Is(err, ErrNodeNotFound) { + return nil, nil + } + return nil, err } subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false) if err != nil { if errors.Is(err, ErrNodeNotFound) { - return nil, "", nil + return nil, nil } - return nil, "", err + return nil, err } nodesMap := make(map[string][]NodeResponse, len(subTree)) @@ -347,10 +349,6 @@ func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, } fileName := GetFilename(node) - if !strings.HasPrefix(fileName, tailPrefix) { - continue - } - nodes := nodesMap[fileName] // Add all nodes if flag latestOnly is false. @@ -374,7 +372,7 @@ func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, result = append(result, nodeResponseToNodeInfo(nodes)...) } - return result, strings.TrimSuffix(prefix, tailPrefix), nil + return result, nil } func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo { @@ -386,22 +384,6 @@ func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo { return nodesInfo } -func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) { - rootID := []uint64{0} - path := strings.Split(prefix, separator) - tailPrefix := path[len(path)-1] - - if len(path) > 1 { - var err error - rootID, err = c.getPrefixNodeID(ctx, bktInfo, treeID, path[:len(path)-1]) - if err != nil { - return nil, "", err - } - } - - return rootID, tailPrefix, nil -} - func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefixPath []string) ([]uint64, error) { p := &GetNodesParams{ CnrID: bktInfo.CID, @@ -424,7 +406,7 @@ func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, tr } if len(intermediateNodes) == 0 { - return nil, layer.ErrNodeNotFound + return nil, ErrNodeNotFound } return intermediateNodes, nil