Compare commits

..

No commits in common. "master" and "v0.31.0" have entirely different histories.

54 changed files with 1938 additions and 2962 deletions

View file

@ -1,27 +0,0 @@
on:
pull_request:
push:
workflow_dispatch:
jobs:
image:
name: OCI image
runs-on: docker
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
steps:
- name: Clone git repo
uses: actions/checkout@v3
- name: Build OCI image
run: make image
- name: Push image to OCI registry
run: |
echo "$REGISTRY_PASSWORD" \
| docker login --username truecloudlab --password-stdin git.frostfs.info
make image-push
if: >-
startsWith(github.ref, 'refs/tags/v') &&
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
env:
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}

View file

@ -43,19 +43,3 @@ jobs:
- name: Run tests
run: make test
integration:
name: Integration tests
runs-on: oci-runner
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
- name: Run integration tests
run: |-
podman-service.sh
make integration-test

View file

@ -16,7 +16,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.22.12'
go-version: '1.22'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -4,38 +4,6 @@ This document outlines major changes between releases.
## [Unreleased]
### Added
- Add handling quota limit reached error (#187)
- Add slash clipping for FileName attribute (#174)
## [0.32.3] - 2025-02-05
### Added
- Add slash clipping for FileName attribute (#174)
## [0.32.2] - 2025-02-03
### Fixed
- Possible memory leak in gRPC client (#202)
## [0.32.1] - 2025-01-27
### Fixed
- SIGHUP panic (#198)
## [0.32.0] - Khumbu - 2024-12-20
### Fixed
- Getting S3 object with FrostFS Object ID-like key (#166)
- Ignore delete marked objects in versioned bucket in index page (#181)
### Added
- Metric of dropped logs by log sampler (#150)
- Fallback FileName attribute search during FilePath attribute search (#174)
### Changed
- Updated tree service pool without api-go dependency (#178)
## [0.31.0] - Rongbuk - 2024-11-20
### Fixed
@ -202,8 +170,4 @@ To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs
[0.30.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.1...v0.30.2
[0.30.3]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.2...v0.30.3
[0.31.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.3...v0.31.0
[0.32.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.31.0...v0.32.0
[0.32.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.0...v0.32.1
[0.32.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.1...v0.32.2
[0.32.3]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.2...v0.32.3
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.3...master
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.31.0...master

View file

@ -1,3 +1 @@
.* @TrueCloudLab/storage-services-developers @TrueCloudLab/storage-services-committers
.forgejo/.* @potyarkin
Makefile @potyarkin
.* @alexvanin @dkirillov

View file

@ -7,7 +7,7 @@ LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
BUILD ?= $(shell date -u --iso=seconds)
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs-http-gw
HUB_IMAGE ?= truecloudlab/frostfs-http-gw
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
METRICS_DUMP_OUT ?= ./metrics-dump.json

141
README.md
View file

@ -38,7 +38,7 @@ version Show current version
```
Or you can also use a [Docker
image](https://git.frostfs.info/TrueCloudLab/-/packages/container/frostfs-http-gw) provided for the released
image](https://hub.docker.com/r/truecloudlab/frostfs-http-gw) provided for the released
(and occasionally unreleased) versions of the gateway (`:latest` points to the
latest stable release).
@ -217,8 +217,41 @@ Also, in case of downloading, you need to have a file inside a container.
### NNS
In all download/upload routes you can use container name instead of its id (`$CID`).
Read more about it in [docs/nns.md](./docs/nns.md).
Steps to start using name resolving:
1. Enable NNS resolving in config (`rpc_endpoint` must be a valid neo rpc node, see [configs](./config) for other examples):
```yaml
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
resolve_order:
- nns
```
2. Make sure your container is registered in NNS contract. If you use [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
you can check if your container (e.g. with `container-name` name) is registered in NNS:
```shell
$ curl -s --data '{"id":1,"jsonrpc":"2.0","method":"getcontractstate","params":[1]}' \
http://morph-chain.frostfs.devenv:30333 | jq -r '.result.hash'
0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667
$ docker exec -it morph_chain neo-go \
contract testinvokefunction \
-r http://morph-chain.frostfs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \
resolve string:container-name.container int:16 \
| jq -r '.stack[0].value | if type=="array" then .[0].value else . end' \
| base64 -d && echo
7f3vvkw4iTiS5ZZbu5BQXEmJtETWbi3uUjLNaSs29xrL
```
3. Use container name instead of its `$CID`. For example:
```shell
$ curl http://localhost:8082/get_by_attribute/container-name/FileName/object-name
```
#### Create a container
@ -429,7 +462,109 @@ object ID, like this:
#### Authentication
Read more about request authentication in [docs/authentication.md](./docs/authemtnication.md)
You can always upload files to public containers (open for anyone to put
objects into), but for restricted containers you need to explicitly allow PUT
operations for a request signed with your HTTP Gateway keys.
If you don't want to manage gateway's secret keys and adjust policies when
gateway configuration changes (new gate, key rotation, etc) or you plan to use
public services, there is an option to let your application backend (or you) to
issue Bearer Tokens and pass them from the client via gate down to FrostFS level
to grant access.
FrostFS Bearer Token basically is a container owner-signed policy (refer to FrostFS
documentation for more details). There are two options to pass them to gateway:
* "Authorization" header with "Bearer" type and base64-encoded token in
credentials field
* "Bearer" cookie with base64-encoded token contents
For example, you have a mobile application frontend with a backend part storing
data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS
Bearer token and provides it to the frontend. Then, the mobile app may generate
some data and upload it via any available FrostFS HTTP Gateway by adding
the corresponding header to the upload request. Accessing policy protected data
works the same way.
##### Example
In order to generate a bearer token, you need to have wallet (which will be used to sign the token)
1. Suppose you have a container with private policy for wallet key
```
$ frostfs-cli container create -r <endpoint> --wallet <wallet> -policy <policy> --basic-acl 0 --await
CID: 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z
$ frostfs-cli ape-manager add -r <endpoint> --wallet <wallet> \
--target-type container --target-name 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z \
--rule "allow Object.* RequestCondition:"\$Actor:publicKey"=03b09baabff3f6107c7e9acb8721a6fc5618d45b50247a314d82e548702cce8cd5 *" \
--chain-id <chainID>
```
2. Form a Bearer token (10000 is lifetime expiration in epoch) to impersonate
HTTP Gateway request as wallet signed request and save it to **bearer.json**:
```
{
"body": {
"allowImpersonate": true,
"lifetime": {
"exp": "10000",
"nbf": "0",
"iat": "0"
}
},
"signature": null
}
```
3. Sign it with the wallet:
```
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w <wallet>
```
4. Encode to base64 to use in header:
```
$ base64 -w 0 signed.json
# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==
```
After that, the Bearer token can be used:
```
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H "Authorization: Bearer Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==" \
http://localhost:8082/upload/BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K
# output:
# {
# "object_id": "DhfES9nVrFksxGDD2jQLunGADfrXExxNwqXbDafyBn9X",
# "container_id": "BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K"
# }
```
##### Note: Bearer Token owner
You can specify exact key who can use Bearer Token (gateway wallet address).
To do this, encode wallet address in base64 format
```
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
```
Then specify this value in Bearer Token Json
```
{
"body": {
"ownerID": {
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
},
...
```
##### Note: Policy override
Instead of impersonation, you can define the set of policies that will be applied
to the request sender. This allows to restrict access to specific operation and
specific objects without giving full impersonation control to the token user.
### Metrics and Pprof

View file

@ -1 +1 @@
v0.32.3
v0.31.0

View file

@ -16,6 +16,7 @@ import (
"syscall"
"time"
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
@ -25,11 +26,11 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@ -44,44 +45,40 @@ import (
"github.com/valyala/fasthttp"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/exp/slices"
)
type (
app struct {
ctx context.Context
log *zap.Logger
pool *pool.Pool
treePool *treepool.Pool
key *keys.PrivateKey
owner *user.ID
cfg *appCfg
webServer *fasthttp.Server
webDone chan struct{}
resolver *resolver.ContainerResolver
metrics *gateMetrics
services []*metrics.Service
settings *appSettings
loggerSettings *loggerSettings
bucketCache *cache.BucketCache
ctx context.Context
log *zap.Logger
logLevel zap.AtomicLevel
pool *pool.Pool
treePool *treepool.Pool
key *keys.PrivateKey
owner *user.ID
cfg *viper.Viper
webServer *fasthttp.Server
webDone chan struct{}
resolver *resolver.ContainerResolver
metrics *gateMetrics
services []*metrics.Service
settings *appSettings
servers []Server
unbindServers []ServerInfo
mu sync.RWMutex
}
loggerSettings struct {
mu sync.RWMutex
appMetrics *metrics.GateMetrics
}
// App is an interface for the main gateway function.
App interface {
Wait()
Serve()
}
// Option is an application option.
Option func(a *app)
gateMetrics struct {
logger *zap.Logger
provider *metrics.GateMetrics
@ -94,33 +91,22 @@ type (
reconnectInterval time.Duration
dialerSource *internalnet.DialerSource
workerPoolSize int
logLevelConfig *logLevelConfig
mu sync.RWMutex
defaultTimestamp bool
archiveCompression bool
clientCut bool
returnIndexPage bool
indexPageTemplate string
bufferMaxSizeForPut uint64
namespaceHeader string
defaultNamespaces []string
corsAllowOrigin string
corsAllowMethods []string
corsAllowHeaders []string
corsExposeHeaders []string
corsAllowCredentials bool
corsMaxAge int
enableFilepathFallback bool
}
tagsConfig struct {
tagLogs sync.Map
}
logLevelConfig struct {
logLevel zap.AtomicLevel
tagsConfig *tagsConfig
mu sync.RWMutex
defaultTimestamp bool
zipCompression bool
clientCut bool
returnIndexPage bool
indexPageTemplate string
bufferMaxSizeForPut uint64
namespaceHeader string
defaultNamespaces []string
corsAllowOrigin string
corsAllowMethods []string
corsAllowHeaders []string
corsExposeHeaders []string
corsAllowCredentials bool
corsMaxAge int
}
CORS struct {
@ -133,114 +119,55 @@ type (
}
)
func newLogLevel(v *viper.Viper) zap.AtomicLevel {
ll, err := getLogLevel(v)
if err != nil {
panic(err.Error())
}
atomicLogLevel := zap.NewAtomicLevel()
atomicLogLevel.SetLevel(ll)
return atomicLogLevel
}
func newTagsConfig(v *viper.Viper, ll zapcore.Level) *tagsConfig {
var t tagsConfig
if err := t.update(v, ll); err != nil {
// panic here is analogue of the similar panic during common log level initialization.
panic(err.Error())
}
return &t
}
func newLogLevelConfig(lvl zap.AtomicLevel, tagsConfig *tagsConfig) *logLevelConfig {
return &logLevelConfig{
logLevel: lvl,
tagsConfig: tagsConfig,
}
}
func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) {
if lvl, err := getLogLevel(cfg); err != nil {
log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
} else {
l.logLevel.SetLevel(lvl)
}
if err := l.tagsConfig.update(cfg, l.logLevel.Level()); err != nil {
log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
}
}
func (t *tagsConfig) LevelEnabled(tag string, tgtLevel zapcore.Level) bool {
lvl, ok := t.tagLogs.Load(tag)
if !ok {
return false
}
return lvl.(zapcore.Level).Enabled(tgtLevel)
}
func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error {
tags, err := fetchLogTagsConfig(cfg, ll)
if err != nil {
return err
}
t.tagLogs.Range(func(key, value any) bool {
k := key.(string)
v := value.(zapcore.Level)
if lvl, ok := tags[k]; ok {
if lvl != v {
t.tagLogs.Store(key, lvl)
}
} else {
t.tagLogs.Delete(key)
delete(tags, k)
// WithLogger returns Option to set a specific logger.
func WithLogger(l *zap.Logger, lvl zap.AtomicLevel) Option {
return func(a *app) {
if l == nil {
return
}
return true
})
for k, v := range tags {
t.tagLogs.Store(k, v)
a.log = l
a.logLevel = lvl
}
return nil
}
func newApp(ctx context.Context, cfg *appCfg) App {
logSettings := &loggerSettings{}
logLevel := newLogLevel(cfg.config())
tagConfig := newTagsConfig(cfg.config(), logLevel.Level())
logConfig := newLogLevelConfig(logLevel, tagConfig)
log := pickLogger(cfg.config(), logConfig.logLevel, logSettings, tagConfig)
// WithConfig returns Option to use specific Viper configuration.
func WithConfig(c *viper.Viper) Option {
return func(a *app) {
if c == nil {
return
}
a.cfg = c
}
}
func newApp(ctx context.Context, opt ...Option) App {
a := &app{
ctx: ctx,
log: log.logger,
cfg: cfg,
loggerSettings: logSettings,
webServer: new(fasthttp.Server),
webDone: make(chan struct{}),
bucketCache: cache.NewBucketCache(getBucketCacheOptions(cfg.config(), log.logger), cfg.config().GetBool(cfgFeaturesTreePoolNetmapSupport)),
ctx: ctx,
log: zap.L(),
cfg: viper.GetViper(),
webServer: new(fasthttp.Server),
webDone: make(chan struct{}),
}
for i := range opt {
opt[i](a)
}
a.initAppSettings(logConfig)
a.initAppSettings()
// -- setup FastHTTP server --
a.webServer.Name = "frost-http-gw"
a.webServer.ReadBufferSize = a.config().GetInt(cfgWebReadBufferSize)
a.webServer.WriteBufferSize = a.config().GetInt(cfgWebWriteBufferSize)
a.webServer.ReadTimeout = a.config().GetDuration(cfgWebReadTimeout)
a.webServer.WriteTimeout = a.config().GetDuration(cfgWebWriteTimeout)
a.webServer.ReadBufferSize = a.cfg.GetInt(cfgWebReadBufferSize)
a.webServer.WriteBufferSize = a.cfg.GetInt(cfgWebWriteBufferSize)
a.webServer.ReadTimeout = a.cfg.GetDuration(cfgWebReadTimeout)
a.webServer.WriteTimeout = a.cfg.GetDuration(cfgWebWriteTimeout)
a.webServer.DisableHeaderNamesNormalizing = true
a.webServer.NoDefaultServerHeader = true
a.webServer.NoDefaultContentType = true
a.webServer.MaxRequestBodySize = a.config().GetInt(cfgWebMaxRequestBodySize)
a.webServer.MaxRequestBodySize = a.cfg.GetInt(cfgWebMaxRequestBodySize)
a.webServer.DisablePreParseMultipartForm = true
a.webServer.StreamRequestBody = a.config().GetBool(cfgWebStreamRequestBody)
a.webServer.StreamRequestBody = a.cfg.GetBool(cfgWebStreamRequestBody)
// -- -- -- -- -- -- -- -- -- -- -- -- -- --
a.initPools(ctx)
a.pool, a.treePool, a.key = getPools(ctx, a.log, a.cfg, a.settings.dialerSource)
var owner user.ID
user.IDFromKey(&owner, a.key.PrivateKey.PublicKey)
@ -255,23 +182,18 @@ func newApp(ctx context.Context, cfg *appCfg) App {
return a
}
func (a *app) config() *viper.Viper {
return a.cfg.config()
}
func (a *app) initAppSettings(lc *logLevelConfig) {
func (a *app) initAppSettings() {
a.settings = &appSettings{
reconnectInterval: fetchReconnectInterval(a.config()),
dialerSource: getDialerSource(a.log, a.config()),
workerPoolSize: a.config().GetInt(cfgWorkerPoolSize),
logLevelConfig: lc,
reconnectInterval: fetchReconnectInterval(a.cfg),
dialerSource: getDialerSource(a.log, a.cfg),
workerPoolSize: a.cfg.GetInt(cfgWorkerPoolSize),
}
a.settings.update(a.config(), a.log)
a.settings.update(a.cfg, a.log)
}
func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
defaultTimestamp := v.GetBool(cfgUploaderHeaderEnableDefaultTimestamp)
archiveCompression := fetchArchiveCompression(v)
zipCompression := v.GetBool(cfgZipCompression)
returnIndexPage := v.GetBool(cfgIndexPageEnabled)
clientCut := v.GetBool(cfgClientCut)
bufferMaxSizeForPut := v.GetUint64(cfgBufferMaxSizeForPut)
@ -284,13 +206,12 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
corsExposeHeaders := v.GetStringSlice(cfgCORSExposeHeaders)
corsAllowCredentials := v.GetBool(cfgCORSAllowCredentials)
corsMaxAge := fetchCORSMaxAge(v)
enableFilepathFallback := v.GetBool(cfgFeaturesEnableFilepathFallback)
s.mu.Lock()
defer s.mu.Unlock()
s.defaultTimestamp = defaultTimestamp
s.archiveCompression = archiveCompression
s.zipCompression = zipCompression
s.returnIndexPage = returnIndexPage
s.clientCut = clientCut
s.bufferMaxSizeForPut = bufferMaxSizeForPut
@ -304,23 +225,6 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
s.corsExposeHeaders = corsExposeHeaders
s.corsAllowCredentials = corsAllowCredentials
s.corsMaxAge = corsMaxAge
s.enableFilepathFallback = enableFilepathFallback
}
func (s *loggerSettings) DroppedLogsInc() {
s.mu.RLock()
defer s.mu.RUnlock()
if s.appMetrics != nil {
s.appMetrics.DroppedLogsInc()
}
}
func (s *loggerSettings) setMetrics(appMetrics *metrics.GateMetrics) {
s.mu.Lock()
defer s.mu.Unlock()
s.appMetrics = appMetrics
}
func (s *appSettings) DefaultTimestamp() bool {
@ -329,10 +233,10 @@ func (s *appSettings) DefaultTimestamp() bool {
return s.defaultTimestamp
}
func (s *appSettings) ArchiveCompression() bool {
func (s *appSettings) ZipCompression() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.archiveCompression
return s.zipCompression
}
func (s *appSettings) IndexPageEnabled() bool {
@ -402,36 +306,29 @@ func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool)
return ns + ".ns", false
}
func (s *appSettings) EnableFilepathFallback() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.enableFilepathFallback
}
func (a *app) initResolver() {
var err error
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
if err != nil {
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err), logs.TagField(logs.TagApp))
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err))
}
}
func (a *app) getResolverConfig() ([]string, *resolver.Config) {
resolveCfg := &resolver.Config{
FrostFS: frostfs.NewResolverFrostFS(a.pool),
RPCAddress: a.config().GetString(cfgRPCEndpoint),
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
Settings: a.settings,
}
order := a.config().GetStringSlice(cfgResolveOrder)
order := a.cfg.GetStringSlice(cfgResolveOrder)
if resolveCfg.RPCAddress == "" {
order = remove(order, resolver.NNSResolver)
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided, logs.TagField(logs.TagApp))
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
}
if len(order) == 0 {
a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty,
logs.TagField(logs.TagApp))
a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty)
}
return order, resolveCfg
@ -439,14 +336,13 @@ func (a *app) getResolverConfig() ([]string, *resolver.Config) {
func (a *app) initMetrics() {
gateMetricsProvider := metrics.NewGateMetrics(a.pool)
a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.config().GetBool(cfgPrometheusEnabled))
a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.cfg.GetBool(cfgPrometheusEnabled))
a.metrics.SetHealth(metrics.HealthStatusStarting)
a.loggerSettings.setMetrics(a.metrics.provider)
}
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
if !enabled {
logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp))
logger.Warn(logs.MetricsAreDisabled)
}
return &gateMetrics{
logger: logger,
@ -464,7 +360,7 @@ func (m *gateMetrics) isEnabled() bool {
func (m *gateMetrics) SetEnabled(enabled bool) {
if !enabled {
m.logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp))
m.logger.Warn(logs.MetricsAreDisabled)
}
m.mu.Lock()
@ -527,7 +423,7 @@ func getFrostFSKey(cfg *viper.Viper, log *zap.Logger) (*keys.PrivateKey, error)
walletPath := cfg.GetString(cfgWalletPath)
if len(walletPath) == 0 {
log.Info(logs.NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun, logs.TagField(logs.TagApp))
log.Info(logs.NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun)
key, err := keys.NewPrivateKey()
if err != nil {
return nil, err
@ -584,10 +480,7 @@ func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*keys
}
func (a *app) Wait() {
a.log.Info(logs.StartingApplication,
zap.String("app_name", "frostfs-http-gw"),
zap.String("version", Version),
logs.TagField(logs.TagApp))
a.log.Info(logs.StartingApplication, zap.String("app_name", "frostfs-http-gw"), zap.String("version", Version))
a.metrics.SetVersion(Version)
a.setHealthStatus()
@ -606,10 +499,10 @@ func (a *app) Serve() {
close(a.webDone)
}()
handle := handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool)), workerPool)
handler := handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool)), workerPool)
// Configure router.
a.configureRouter(handle)
a.configureRouter(handler)
a.startServices()
a.initServers(a.ctx)
@ -618,10 +511,10 @@ func (a *app) Serve() {
for i := range servs {
go func(i int) {
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()), logs.TagField(logs.TagApp))
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()))
if err := a.webServer.Serve(servs[i].Listener()); err != nil && err != http.ErrServerClosed {
a.metrics.MarkUnhealthy(servs[i].Address())
a.log.Fatal(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp))
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
}
}(i)
}
@ -643,7 +536,7 @@ LOOP:
}
}
a.log.Info(logs.ShuttingDownWebServer, zap.Error(a.webServer.Shutdown()), logs.TagField(logs.TagApp))
a.log.Info(logs.ShuttingDownWebServer, zap.Error(a.webServer.Shutdown()))
a.metrics.Shutdown()
a.stopServices()
@ -653,7 +546,7 @@ LOOP:
func (a *app) initWorkerPool() *ants.Pool {
workerPool, err := ants.NewPool(a.settings.workerPoolSize)
if err != nil {
a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err), logs.TagField(logs.TagApp))
a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err))
}
return workerPool
}
@ -664,33 +557,37 @@ func (a *app) shutdownTracing() {
defer cancel()
if err := tracing.Shutdown(shdnCtx); err != nil {
a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err), logs.TagField(logs.TagApp))
a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err))
}
}
func (a *app) configReload(ctx context.Context) {
a.log.Info(logs.SIGHUPConfigReloadStarted, logs.TagField(logs.TagApp))
if !a.config().IsSet(cmdConfig) && !a.config().IsSet(cmdConfigDir) {
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed, logs.TagField(logs.TagApp))
a.log.Info(logs.SIGHUPConfigReloadStarted)
if !a.cfg.IsSet(cmdConfig) && !a.cfg.IsSet(cmdConfigDir) {
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed)
return
}
if err := a.cfg.reload(); err != nil {
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err), logs.TagField(logs.TagApp))
if err := readInConfig(a.cfg); err != nil {
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err))
return
}
a.settings.logLevelConfig.update(a.cfg.settings, a.log)
if lvl, err := getLogLevel(a.cfg); err != nil {
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err))
} else {
a.logLevel.SetLevel(lvl)
}
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil {
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.cfg, a.log)); err != nil {
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err))
}
if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err), logs.TagField(logs.TagApp))
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err))
}
if err := a.updateServers(); err != nil {
a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err), logs.TagField(logs.TagApp))
a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err))
}
a.setRuntimeParameters()
@ -698,24 +595,22 @@ func (a *app) configReload(ctx context.Context) {
a.stopServices()
a.startServices()
a.settings.update(a.config(), a.log)
a.settings.update(a.cfg, a.log)
a.metrics.SetEnabled(a.config().GetBool(cfgPrometheusEnabled))
a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
a.initTracing(ctx)
a.setHealthStatus()
a.log.Info(logs.SIGHUPConfigReloadCompleted, logs.TagField(logs.TagApp))
a.log.Info(logs.SIGHUPConfigReloadCompleted)
}
func (a *app) startServices() {
a.services = a.services[:0]
pprofConfig := metrics.Config{Enabled: a.config().GetBool(cfgPprofEnabled), Address: a.config().GetString(cfgPprofAddress)}
pprofConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPprofEnabled), Address: a.cfg.GetString(cfgPprofAddress)}
pprofService := metrics.NewPprofService(a.log, pprofConfig)
a.services = append(a.services, pprofService)
go pprofService.Start()
prometheusConfig := metrics.Config{Enabled: a.config().GetBool(cfgPrometheusEnabled), Address: a.config().GetString(cfgPrometheusAddress)}
prometheusConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPrometheusEnabled), Address: a.cfg.GetString(cfgPrometheusAddress)}
prometheusService := metrics.NewPrometheusService(a.log, prometheusConfig)
a.services = append(a.services, prometheusService)
go prometheusService.Start()
@ -730,32 +625,30 @@ func (a *app) stopServices() {
}
}
func (a *app) configureRouter(h *handler.Handler) {
func (a *app) configureRouter(handler *handler.Handler) {
r := router.New()
r.RedirectTrailingSlash = true
r.NotFound = func(r *fasthttp.RequestCtx) {
handler.ResponseError(r, "Not found", fasthttp.StatusNotFound)
response.Error(r, "Not found", fasthttp.StatusNotFound)
}
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
handler.ResponseError(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
response.Error(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
}
r.POST("/upload/{cid}", a.addMiddlewares(h.Upload))
r.POST("/upload/{cid}", a.addMiddlewares(handler.Upload))
r.OPTIONS("/upload/{cid}", a.addPreflight())
a.log.Info(logs.AddedPathUploadCid, logs.TagField(logs.TagApp))
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(h.DownloadByAddressOrBucketName))
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(h.HeadByAddressOrBucketName))
a.log.Info(logs.AddedPathUploadCid)
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(handler.DownloadByAddressOrBucketName))
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(handler.HeadByAddressOrBucketName))
r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight())
a.log.Info(logs.AddedPathGetCidOid, logs.TagField(logs.TagApp))
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.DownloadByAttribute))
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.HeadByAttribute))
a.log.Info(logs.AddedPathGetCidOid)
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(handler.DownloadByAttribute))
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(handler.HeadByAttribute))
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight())
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal, logs.TagField(logs.TagApp))
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadZip))
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal)
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(handler.DownloadZipped))
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight())
r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadTar))
r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight())
a.log.Info(logs.AddedPathZipCidPrefix, logs.TagField(logs.TagApp))
a.log.Info(logs.AddedPathZipCidPrefix)
a.webServer.Handler = r.Handler
}
@ -844,11 +737,14 @@ func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
reqCtx = utils.SetReqLog(reqCtx, log)
utils.SetContextToRequest(reqCtx, req)
log.Info(logs.Request, zap.String("remote", req.RemoteAddr().String()),
fields := []zap.Field{
zap.String("remote", req.RemoteAddr().String()),
zap.ByteString("method", req.Method()),
zap.ByteString("path", req.Path()),
zap.ByteString("query", req.QueryArgs().QueryString()),
logs.TagField(logs.TagDatapath))
}
log.Info(logs.Request, fields...)
h(req)
}
}
@ -892,8 +788,8 @@ func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
if err != nil {
log := utils.GetReqLogOrDefault(reqCtx, a.log)
log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Error(err), logs.TagField(logs.TagDatapath))
handler.ResponseError(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Error(err))
response.Error(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
return
}
utils.SetContextToRequest(appCtx, req)
@ -934,12 +830,12 @@ func (a *app) AppParams() *handler.AppParams {
FrostFS: frostfs.NewFrostFS(a.pool),
Owner: a.owner,
Resolver: a.resolver,
Cache: a.bucketCache,
Cache: cache.NewBucketCache(getCacheOptions(a.cfg, a.log)),
}
}
func (a *app) initServers(ctx context.Context) {
serversInfo := fetchServers(a.config(), a.log)
serversInfo := fetchServers(a.cfg, a.log)
a.servers = make([]Server, 0, len(serversInfo))
for _, serverInfo := range serversInfo {
@ -951,22 +847,22 @@ func (a *app) initServers(ctx context.Context) {
if err != nil {
a.unbindServers = append(a.unbindServers, serverInfo)
a.metrics.MarkUnhealthy(serverInfo.Address)
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err), logs.TagField(logs.TagApp))...)
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err))...)
continue
}
a.metrics.MarkHealthy(serverInfo.Address)
a.servers = append(a.servers, srv)
a.log.Info(logs.AddServer, append(fields, logs.TagField(logs.TagApp))...)
a.log.Info(logs.AddServer, fields...)
}
if len(a.servers) == 0 {
a.log.Fatal(logs.NoHealthyServers, logs.TagField(logs.TagApp))
a.log.Fatal(logs.NoHealthyServers)
}
}
func (a *app) updateServers() error {
serversInfo := fetchServers(a.config(), a.log)
serversInfo := fetchServers(a.cfg, a.log)
a.mu.Lock()
defer a.mu.Unlock()
@ -979,8 +875,8 @@ func (a *app) updateServers() error {
if err := ser.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
return fmt.Errorf("failed to update tls certs: %w", err)
}
found = true
}
found = true
} else if unbind := a.updateUnbindServerInfo(serverInfo); unbind {
found = true
}
@ -1024,60 +920,58 @@ func (a *app) initTracing(ctx context.Context) {
instanceID = a.servers[0].Address()
}
cfg := tracing.Config{
Enabled: a.config().GetBool(cfgTracingEnabled),
Exporter: tracing.Exporter(a.config().GetString(cfgTracingExporter)),
Endpoint: a.config().GetString(cfgTracingEndpoint),
Enabled: a.cfg.GetBool(cfgTracingEnabled),
Exporter: tracing.Exporter(a.cfg.GetString(cfgTracingExporter)),
Endpoint: a.cfg.GetString(cfgTracingEndpoint),
Service: "frostfs-http-gw",
InstanceID: instanceID,
Version: Version,
}
if trustedCa := a.config().GetString(cfgTracingTrustedCa); trustedCa != "" {
if trustedCa := a.cfg.GetString(cfgTracingTrustedCa); trustedCa != "" {
caBytes, err := os.ReadFile(trustedCa)
if err != nil {
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
return
}
certPool := x509.NewCertPool()
ok := certPool.AppendCertsFromPEM(caBytes)
if !ok {
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"),
logs.TagField(logs.TagApp))
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"))
return
}
cfg.ServerCaCertPool = certPool
}
attributes, err := fetchTracingAttributes(a.config())
attributes, err := fetchTracingAttributes(a.cfg)
if err != nil {
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
return
}
cfg.Attributes = attributes
updated, err := tracing.Setup(ctx, cfg)
if err != nil {
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
}
if updated {
a.log.Info(logs.TracingConfigUpdated, logs.TagField(logs.TagApp))
a.log.Info(logs.TracingConfigUpdated)
}
}
func (a *app) setRuntimeParameters() {
if len(os.Getenv("GOMEMLIMIT")) != 0 {
// default limit < yaml limit < app env limit < GOMEMLIMIT
a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT, logs.TagField(logs.TagApp))
a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
return
}
softMemoryLimit := fetchSoftMemoryLimit(a.config())
softMemoryLimit := fetchSoftMemoryLimit(a.cfg)
previous := debug.SetMemoryLimit(softMemoryLimit)
if softMemoryLimit != previous {
a.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
zap.Int64("new_value", softMemoryLimit),
zap.Int64("old_value", previous),
logs.TagField(logs.TagApp))
zap.Int64("old_value", previous))
}
}
@ -1103,32 +997,34 @@ func (a *app) tryReconnect(ctx context.Context, sr *fasthttp.Server) bool {
a.mu.Lock()
defer a.mu.Unlock()
a.log.Info(logs.ServerReconnecting, logs.TagField(logs.TagApp))
a.log.Info(logs.ServerReconnecting)
var failedServers []ServerInfo
for _, serverInfo := range a.unbindServers {
fields := []zap.Field{
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
}
srv, err := newServer(ctx, serverInfo)
if err != nil {
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err), logs.TagField(logs.TagApp))
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err))
failedServers = append(failedServers, serverInfo)
a.metrics.MarkUnhealthy(serverInfo.Address)
continue
}
go func() {
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()), logs.TagField(logs.TagApp))
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()))
a.metrics.MarkHealthy(serverInfo.Address)
if err = sr.Serve(srv.Listener()); err != nil && !errors.Is(err, http.ErrServerClosed) {
a.log.Warn(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp))
a.log.Warn(logs.ListenAndServe, zap.Error(err))
a.metrics.MarkUnhealthy(serverInfo.Address)
}
}()
a.servers = append(a.servers, srv)
a.log.Info(logs.ServerReconnectedSuccessfully,
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
logs.TagField(logs.TagApp))
a.log.Info(logs.ServerReconnectedSuccessfully, fields...)
}
a.unbindServers = failedServers

View file

@ -14,11 +14,10 @@ import (
"net/http"
"os"
"sort"
"strings"
"testing"
"time"
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
@ -29,12 +28,13 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
docker "github.com/docker/docker/api/types/container"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
"go.uber.org/zap/zapcore"
)
type putResponse struct {
@ -50,12 +50,11 @@ const (
func TestIntegration(t *testing.T) {
rootCtx := context.Background()
aioImage := "git.frostfs.info/truecloudlab/frostfs-aio:"
aioImage := "truecloudlab/frostfs-aio:"
versions := []string{
"1.2.7",
"1.3.0",
"1.5.0",
"1.6.5",
}
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
require.NoError(t, err)
@ -72,28 +71,21 @@ func TestIntegration(t *testing.T) {
ctx, cancel2 := context.WithCancel(rootCtx)
aioContainer := createDockerContainer(ctx, t, aioImage+version)
if strings.HasPrefix(version, "1.6") {
registerUser(t, ctx, aioContainer, file.Name())
}
// See the logs from the command execution.
server, cancel := runServer(file.Name())
clientPool := getPool(ctx, t, key)
CID, err := createContainer(ctx, t, clientPool, ownerID)
CID, err := createContainer(ctx, t, clientPool, ownerID, version)
require.NoError(t, err, version)
jsonToken, binaryToken := makeBearerTokens(t, key, ownerID, version)
token := makeBearerToken(t, key, ownerID, version)
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID) })
t.Run("put with json bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, jsonToken) })
t.Run("put with json bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, jsonToken) })
t.Run("put with binary bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, binaryToken) })
t.Run("put with binary bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, binaryToken) })
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID, version) })
t.Run("put with bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, token) })
t.Run("put with bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, token) })
t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID) })
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID) })
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID) })
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID) })
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID, version) })
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID, version) })
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID, version) })
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID, version) })
cancel()
server.Wait()
@ -107,16 +99,17 @@ func runServer(pathToWallet string) (App, context.CancelFunc) {
cancelCtx, cancel := context.WithCancel(context.Background())
v := getDefaultConfig()
v.config().Set(cfgWalletPath, pathToWallet)
v.config().Set(cfgWalletPassphrase, "")
v.Set(cfgWalletPath, pathToWallet)
v.Set(cfgWalletPassphrase, "")
application := newApp(cancelCtx, v)
l, lvl := newStdoutLogger(v, zapcore.DebugLevel)
application := newApp(cancelCtx, WithConfig(v), WithLogger(l, lvl))
go application.Serve()
return application, cancel
}
func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID) {
func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, version string) {
url := testHost + "/upload/" + CID.String()
makePutRequestAndCheck(ctx, t, p, CID, url)
@ -264,7 +257,7 @@ func putWithDuplicateKeys(t *testing.T, CID cid.ID) {
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
}
func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
content := "content of file"
attributes := map[string]string{
"some-attr": "some-get-value",
@ -311,7 +304,7 @@ func checkGetByAttrResponse(t *testing.T, resp *http.Response, content string, a
}
}
func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
keyAttr, valAttr := "some-attr", "some-get-by-attr-value"
content := "content of file"
attributes := map[string]string{keyAttr: valAttr}
@ -333,7 +326,7 @@ func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
checkGetByAttrResponse(t, resp, content, expectedAttr)
}
func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
names := []string{"zipfolder/dir/name1.txt", "zipfolder/name2.txt"}
contents := []string{"content of file1", "content of file2"}
attributes1 := map[string]string{object.AttributeFilePath: names[0]}
@ -398,7 +391,7 @@ func checkZip(t *testing.T, data []byte, length int64, names, contents []string)
}
}
func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
content := "content of file"
attributes := map[string]string{
"some-attr": "some-get-value",
@ -434,13 +427,11 @@ func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
req := testcontainers.ContainerRequest{
Image: image,
WaitingFor: wait.NewLogStrategy("aio container started").WithStartupTimeout(2 * time.Minute),
Name: "aio",
Hostname: "aio",
HostConfigModifier: func(hc *docker.HostConfig) {
hc.NetworkMode = "host"
},
Image: image,
WaitingFor: wait.NewLogStrategy("aio container started").WithStartupTimeout(30 * time.Second),
Name: "aio",
Hostname: "aio",
NetworkMode: "host",
}
aioC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
@ -451,14 +442,14 @@ func createDockerContainer(ctx context.Context, t *testing.T, image string) test
return aioC
}
func getDefaultConfig() *appCfg {
func getDefaultConfig() *viper.Viper {
v := settings()
v.config().SetDefault(cfgPeers+".0.address", "localhost:8080")
v.config().SetDefault(cfgPeers+".0.weight", 1)
v.config().SetDefault(cfgPeers+".0.priority", 1)
v.SetDefault(cfgPeers+".0.address", "localhost:8080")
v.SetDefault(cfgPeers+".0.weight", 1)
v.SetDefault(cfgPeers+".0.priority", 1)
v.config().SetDefault(cfgRPCEndpoint, "http://localhost:30333")
v.config().SetDefault("server.0.address", testListenAddress)
v.SetDefault(cfgRPCEndpoint, "http://localhost:30333")
v.SetDefault("server.0.address", testListenAddress)
return v
}
@ -477,7 +468,7 @@ func getPool(ctx context.Context, t *testing.T, key *keys.PrivateKey) *pool.Pool
return clientPool
}
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID) (cid.ID, error) {
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, version string) (cid.ID, error) {
var policy netmap.PlacementPolicy
err := policy.DecodeString("REP 1")
require.NoError(t, err)
@ -537,19 +528,7 @@ func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
return id.ObjectID
}
func registerUser(t *testing.T, ctx context.Context, aioContainer testcontainers.Container, pathToWallet string) {
err := aioContainer.CopyFileToContainer(ctx, pathToWallet, "/usr/wallet.json", 644)
require.NoError(t, err)
_, _, err = aioContainer.Exec(ctx, []string{
"/usr/bin/frostfs-s3-authmate", "register-user",
"--wallet", "/usr/wallet.json",
"--rpc-endpoint", "http://localhost:30333",
"--contract-wallet", "/config/s3-gw-wallet.json"})
require.NoError(t, err)
}
func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) (jsonTokenBase64, binaryTokenBase64 string) {
func makeBearerToken(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) string {
tkn := new(bearer.Token)
tkn.ForUser(ownerID)
tkn.SetExp(10000)
@ -563,16 +542,10 @@ func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, versi
err := tkn.Sign(key.PrivateKey)
require.NoError(t, err)
jsonToken, err := tkn.MarshalJSON()
require.NoError(t, err)
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, t64)
jsonTokenBase64 = base64.StdEncoding.EncodeToString(jsonToken)
binaryTokenBase64 = base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, jsonTokenBase64)
require.NotEmpty(t, binaryTokenBase64)
return
return t64
}
func makeTempWallet(t *testing.T, key *keys.PrivateKey, path string) {

View file

@ -1,174 +0,0 @@
package main
import (
"fmt"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/zapjournald"
"github.com/spf13/viper"
"github.com/ssgreg/journald"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
var lvl zapcore.Level
lvlStr := v.GetString(cfgLoggerLevel)
err := lvl.UnmarshalText([]byte(lvlStr))
if err != nil {
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
zapcore.DebugLevel,
zapcore.InfoLevel,
zapcore.WarnLevel,
zapcore.ErrorLevel,
zapcore.DPanicLevel,
zapcore.PanicLevel,
zapcore.FatalLevel,
})
}
return lvl, nil
}
var _ zapcore.Core = (*zapCoreTagFilterWrapper)(nil)
type zapCoreTagFilterWrapper struct {
core zapcore.Core
settings TagFilterSettings
extra []zap.Field
}
type TagFilterSettings interface {
LevelEnabled(tag string, lvl zapcore.Level) bool
}
func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool {
return c.core.Enabled(level)
}
func (c *zapCoreTagFilterWrapper) With(fields []zapcore.Field) zapcore.Core {
return &zapCoreTagFilterWrapper{
core: c.core.With(fields),
settings: c.settings,
extra: append(c.extra, fields...),
}
}
func (c *zapCoreTagFilterWrapper) Check(entry zapcore.Entry, checked *zapcore.CheckedEntry) *zapcore.CheckedEntry {
if c.core.Enabled(entry.Level) {
return checked.AddCore(entry, c)
}
return checked
}
func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error {
if c.shouldSkip(entry, fields) || c.shouldSkip(entry, c.extra) {
return nil
}
return c.core.Write(entry, fields)
}
func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field) bool {
for _, field := range fields {
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
if !c.settings.LevelEnabled(field.String, entry.Level) {
return true
}
break
}
}
return false
}
func (c *zapCoreTagFilterWrapper) Sync() error {
return c.core.Sync()
}
func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) zapcore.Core {
core = &zapCoreTagFilterWrapper{
core: core,
settings: tagSetting,
}
if v.GetBool(cfgLoggerSamplingEnabled) {
core = zapcore.NewSamplerWithOptions(core,
v.GetDuration(cfgLoggerSamplingInterval),
v.GetInt(cfgLoggerSamplingInitial),
v.GetInt(cfgLoggerSamplingThereafter),
zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) {
if dec&zapcore.LogDropped > 0 {
loggerSettings.DroppedLogsInc()
}
}))
}
return core
}
func newLogEncoder() zapcore.Encoder {
c := zap.NewProductionEncoderConfig()
c.EncodeTime = zapcore.ISO8601TimeEncoder
return zapcore.NewConsoleEncoder(c)
}
// newStdoutLogger constructs a zap.Logger instance for current application.
// Panics on failure.
//
// Logger is built from zap's production logging configuration with:
// - parameterized level (debug by default)
// - console encoding
// - ISO8601 time encoding
//
// Logger records a stack trace for all messages at or above fatal level.
//
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
func newStdoutLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
stdout := zapcore.AddSync(os.Stderr)
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, lvl)
consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, loggerSettings, tagSetting)
return &Logger{
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
lvl: lvl,
}
}
func newJournaldLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields)
coreWithContext := core.With([]zapcore.Field{
zapjournald.SyslogFacility(zapjournald.LogDaemon),
zapjournald.SyslogIdentifier(),
zapjournald.SyslogPid(),
})
coreWithContext = applyZapCoreMiddlewares(coreWithContext, v, loggerSettings, tagSetting)
return &Logger{
logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
lvl: lvl,
}
}
type LoggerAppSettings interface {
DroppedLogsInc()
}
func pickLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSettings TagFilterSettings) *Logger {
dest := v.GetString(cfgLoggerDestination)
switch dest {
case destinationStdout:
return newStdoutLogger(v, lvl, loggerSettings, tagSettings)
case destinationJournald:
return newJournaldLogger(v, lvl, loggerSettings, tagSettings)
default:
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
}
}

View file

@ -8,9 +8,10 @@ import (
func main() {
globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
cfg := settings()
v := settings()
logger, atomicLevel := pickLogger(v)
application := newApp(globalContext, cfg)
application := newApp(globalContext, WithLogger(logger, atomicLevel), WithConfig(v))
go application.Serve()
application.Wait()
}

View file

@ -12,19 +12,20 @@ import (
"sort"
"strconv"
"strings"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"git.frostfs.info/TrueCloudLab/zapjournald"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/ssgreg/journald"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
@ -109,11 +110,6 @@ const (
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
cfgLoggerSamplingInterval = "logger.sampling.interval"
cfgLoggerTags = "logger.tags"
cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d."
cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "name"
cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level"
// Wallet.
cfgWalletPassphrase = "wallet.passphrase"
cfgWalletPath = "wallet.path"
@ -132,13 +128,8 @@ const (
cfgResolveOrder = "resolve_order"
// Zip compression.
//
// Deprecated: Use cfgArchiveCompression instead.
cfgZipCompression = "zip.compression"
// Archive compression.
cfgArchiveCompression = "archive.compression"
// Runtime.
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
@ -153,7 +144,6 @@ const (
// Caching.
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
cfgBucketsCacheSize = "cache.buckets.size"
cfgNetmapCacheLifetime = "cache.netmap.lifetime"
// Bucket resolving options.
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
@ -174,10 +164,6 @@ const (
cfgMultinetFallbackDelay = "multinet.fallback_delay"
cfgMultinetSubnets = "multinet.subnets"
// Feature.
cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback"
cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support"
// Command line args.
cmdHelp = "help"
cmdVersion = "version"
@ -196,79 +182,14 @@ var ignore = map[string]struct{}{
cmdVersion: {},
}
var defaultTags = []string{logs.TagApp, logs.TagDatapath, logs.TagExternalStorage, logs.TagExternalStorageTree}
type Logger struct {
logger *zap.Logger
lvl zap.AtomicLevel
}
type appCfg struct {
flags *pflag.FlagSet
mu sync.RWMutex
settings *viper.Viper
}
func (a *appCfg) reload() error {
old := a.config()
v, err := newViper(a.flags)
if err != nil {
return err
}
if old.IsSet(cmdConfig) {
v.Set(cmdConfig, old.Get(cmdConfig))
}
if old.IsSet(cmdConfigDir) {
v.Set(cmdConfigDir, old.Get(cmdConfigDir))
}
if err = readInConfig(v); err != nil {
return err
}
a.setConfig(v)
return nil
}
func (a *appCfg) config() *viper.Viper {
a.mu.RLock()
defer a.mu.RUnlock()
return a.settings
}
func (a *appCfg) setConfig(v *viper.Viper) {
a.mu.Lock()
a.settings = v
a.mu.Unlock()
}
func newViper(flags *pflag.FlagSet) (*viper.Viper, error) {
func settings() *viper.Viper {
v := viper.New()
v.AutomaticEnv()
v.SetEnvPrefix(Prefix)
v.AllowEmptyEnv(true)
v.SetConfigType("yaml")
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
if err := bindFlags(v, flags); err != nil {
return nil, err
}
setDefaults(v, flags)
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
}
return v, nil
}
func settings() *appCfg {
// flags setup:
flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
flags.SetOutput(os.Stdout)
@ -292,17 +213,92 @@ func settings() *appCfg {
flags.String(cmdListenAddress, "0.0.0.0:8080", "addresses to listen")
flags.String(cfgTLSCertFile, "", "TLS certificate path")
flags.String(cfgTLSKeyFile, "", "TLS key path")
flags.StringArrayP(cfgPeers, "p", nil, "FrostFS nodes")
peers := flags.StringArrayP(cfgPeers, "p", nil, "FrostFS nodes")
flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order")
resolveMethods := flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order")
// set defaults:
// logger:
v.SetDefault(cfgLoggerLevel, "debug")
v.SetDefault(cfgLoggerDestination, "stdout")
v.SetDefault(cfgLoggerSamplingEnabled, false)
v.SetDefault(cfgLoggerSamplingThereafter, 100)
v.SetDefault(cfgLoggerSamplingInitial, 100)
v.SetDefault(cfgLoggerSamplingInterval, defaultLoggerSamplerInterval)
// pool:
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
// frostfs:
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
// web-server:
v.SetDefault(cfgWebReadBufferSize, 4096)
v.SetDefault(cfgWebWriteBufferSize, 4096)
v.SetDefault(cfgWebReadTimeout, time.Minute*10)
v.SetDefault(cfgWebWriteTimeout, time.Minute*5)
v.SetDefault(cfgWebStreamRequestBody, true)
v.SetDefault(cfgWebMaxRequestBodySize, fasthttp.DefaultMaxRequestBodySize)
v.SetDefault(cfgWorkerPoolSize, 1000)
// upload header
v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false)
// zip:
v.SetDefault(cfgZipCompression, false)
// metrics
v.SetDefault(cfgPprofAddress, "localhost:8083")
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
// resolve bucket
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
v.SetDefault(cfgResolveDefaultNamespaces, []string{"", "root"})
// multinet
v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay)
// Binding flags
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgPrometheusEnabled, flags.Lookup(cmdMetrics)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgWalletPath, flags.Lookup(cmdWallet)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgWalletAddress, flags.Lookup(cmdAddress)); err != nil {
panic(err)
}
if err := v.BindPFlags(flags); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgServer+".0.address", flags.Lookup(cmdListenAddress)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil {
panic(err)
}
if err := flags.Parse(os.Args); err != nil {
panic(err)
}
v, err := newViper(flags)
if err != nil {
panic(fmt.Errorf("bind flags: %w", err))
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
}
if resolveMethods != nil {
v.SetDefault(cfgResolveOrder, *resolveMethods)
}
switch {
@ -347,97 +343,15 @@ func settings() *appCfg {
panic(err)
}
return &appCfg{
flags: flags,
settings: v,
}
}
func setDefaults(v *viper.Viper, flags *pflag.FlagSet) {
// set defaults:
// logger:
v.SetDefault(cfgLoggerLevel, "debug")
v.SetDefault(cfgLoggerDestination, "stdout")
v.SetDefault(cfgLoggerSamplingEnabled, false)
v.SetDefault(cfgLoggerSamplingThereafter, 100)
v.SetDefault(cfgLoggerSamplingInitial, 100)
v.SetDefault(cfgLoggerSamplingInterval, defaultLoggerSamplerInterval)
// pool:
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
// frostfs:
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
// web-server:
v.SetDefault(cfgWebReadBufferSize, 4096)
v.SetDefault(cfgWebWriteBufferSize, 4096)
v.SetDefault(cfgWebReadTimeout, time.Minute*10)
v.SetDefault(cfgWebWriteTimeout, time.Minute*5)
v.SetDefault(cfgWebStreamRequestBody, true)
v.SetDefault(cfgWebMaxRequestBodySize, fasthttp.DefaultMaxRequestBodySize)
v.SetDefault(cfgWorkerPoolSize, 1000)
// upload header
v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false)
// metrics
v.SetDefault(cfgPprofAddress, "localhost:8083")
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
// resolve bucket
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
v.SetDefault(cfgResolveDefaultNamespaces, []string{"", "root"})
// multinet
v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay)
if resolveMethods, err := flags.GetStringSlice(cfgResolveOrder); err == nil {
v.SetDefault(cfgResolveOrder, resolveMethods)
}
if peers, err := flags.GetStringArray(cfgPeers); err == nil {
for i := range peers {
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", peers[i])
if peers != nil && len(*peers) > 0 {
for i := range *peers {
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", (*peers)[i])
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1)
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1)
}
}
}
func bindFlags(v *viper.Viper, flags *pflag.FlagSet) error {
// Binding flags
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
return err
}
if err := v.BindPFlag(cfgPrometheusEnabled, flags.Lookup(cmdMetrics)); err != nil {
return err
}
if err := v.BindPFlag(cfgWalletPath, flags.Lookup(cmdWallet)); err != nil {
return err
}
if err := v.BindPFlag(cfgWalletAddress, flags.Lookup(cmdAddress)); err != nil {
return err
}
if err := v.BindPFlags(flags); err != nil {
return err
}
if err := v.BindPFlag(cfgServer+".0.address", flags.Lookup(cmdListenAddress)); err != nil {
return err
}
if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil {
return err
}
if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil {
return err
}
return nil
return v
}
func readInConfig(v *viper.Viper) error {
@ -504,33 +418,107 @@ func mergeConfig(v *viper.Viper, fileName string) error {
return v.MergeConfig(cfgFile)
}
func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]zapcore.Level, error) {
res := make(map[string]zapcore.Level)
for i := 0; ; i++ {
name := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i))
if name == "" {
break
}
lvl := defaultLvl
level := v.GetString(fmt.Sprintf(cfgLoggerTagsLevelTmpl, i))
if level != "" {
if err := lvl.Set(level); err != nil {
return nil, fmt.Errorf("failed to parse log tags config, unknown level: '%s'", level)
}
}
res[name] = lvl
func pickLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
lvl, err := getLogLevel(v)
if err != nil {
panic(err)
}
if len(res) == 0 && !v.IsSet(cfgLoggerTags) {
for _, tag := range defaultTags {
res[tag] = defaultLvl
}
dest := v.GetString(cfgLoggerDestination)
switch dest {
case destinationStdout:
return newStdoutLogger(v, lvl)
case destinationJournald:
return newJournaldLogger(v, lvl)
default:
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
}
}
// newStdoutLogger constructs a zap.Logger instance for current application.
// Panics on failure.
//
// Logger is built from zap's production logging configuration with:
// - parameterized level (debug by default)
// - console encoding
// - ISO8601 time encoding
//
// Logger records a stack trace for all messages at or above fatal level.
//
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
func newStdoutLogger(v *viper.Viper, lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
stdout := zapcore.AddSync(os.Stderr)
level := zap.NewAtomicLevelAt(lvl)
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, level)
consoleOutCore = samplingEnabling(v, consoleOutCore)
l := zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
return l, level
}
func newJournaldLogger(v *viper.Viper, lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
level := zap.NewAtomicLevelAt(lvl)
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
core := zapjournald.NewCore(level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
coreWithContext := core.With([]zapcore.Field{
zapjournald.SyslogFacility(zapjournald.LogDaemon),
zapjournald.SyslogIdentifier(),
zapjournald.SyslogPid(),
})
coreWithContext = samplingEnabling(v, coreWithContext)
l := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
return l, level
}
func newLogEncoder() zapcore.Encoder {
c := zap.NewProductionEncoderConfig()
c.EncodeTime = zapcore.ISO8601TimeEncoder
return zapcore.NewConsoleEncoder(c)
}
func samplingEnabling(v *viper.Viper, core zapcore.Core) zapcore.Core {
// Zap samples by logging the first cgfLoggerSamplingInitial entries with a given level
// and message within the specified time interval.
// In the above config, only the first cgfLoggerSamplingInitial log entries with the same level and message
// are recorded in cfgLoggerSamplingInterval interval. Every other log entry will be dropped within the interval since
// cfgLoggerSamplingThereafter is specified here.
if v.GetBool(cfgLoggerSamplingEnabled) {
core = zapcore.NewSamplerWithOptions(
core,
v.GetDuration(cfgLoggerSamplingInterval),
v.GetInt(cfgLoggerSamplingInitial),
v.GetInt(cfgLoggerSamplingThereafter),
)
}
return res, nil
return core
}
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
var lvl zapcore.Level
lvlStr := v.GetString(cfgLoggerLevel)
err := lvl.UnmarshalText([]byte(lvlStr))
if err != nil {
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
zapcore.DebugLevel,
zapcore.InfoLevel,
zapcore.WarnLevel,
zapcore.ErrorLevel,
zapcore.DPanicLevel,
zapcore.PanicLevel,
zapcore.FatalLevel,
})
}
return lvl, nil
}
func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
@ -546,19 +534,20 @@ func fetchIndexPageTemplate(v *viper.Viper, l *zap.Logger) (string, bool) {
if !v.GetBool(cfgIndexPageEnabled) {
return "", false
}
reader, err := os.Open(v.GetString(cfgIndexPageTemplatePath))
if err != nil {
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err), logs.TagField(logs.TagApp))
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
return "", true
}
tmpl, err := io.ReadAll(reader)
if err != nil {
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err), logs.TagField(logs.TagApp))
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
return "", true
}
l.Info(logs.SetCustomIndexPageTemplate, logs.TagField(logs.TagApp))
l.Info(logs.SetCustomIndexPageTemplate)
return string(tmpl), true
}
@ -599,7 +588,7 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
}
if _, ok := seen[serverInfo.Address]; ok {
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address), logs.TagField(logs.TagApp))
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address))
continue
}
seen[serverInfo.Address] = struct{}{}
@ -609,10 +598,10 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
return servers
}
func (a *app) initPools(ctx context.Context) {
key, err := getFrostFSKey(a.config(), a.log)
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper, dialSource *internalnet.DialerSource) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
key, err := getFrostFSKey(cfg, logger)
if err != nil {
a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err), logs.TagField(logs.TagApp))
logger.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err))
}
var prm pool.InitParameters
@ -620,84 +609,77 @@ func (a *app) initPools(ctx context.Context) {
prm.SetKey(&key.PrivateKey)
prmTree.SetKey(key)
a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())),
logs.TagField(logs.TagApp))
logger.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
for _, peer := range fetchPeers(a.log, a.config()) {
for _, peer := range fetchPeers(logger, cfg) {
prm.AddNode(peer)
prmTree.AddNode(peer)
}
connTimeout := a.config().GetDuration(cfgConTimeout)
connTimeout := cfg.GetDuration(cfgConTimeout)
if connTimeout <= 0 {
connTimeout = defaultConnectTimeout
}
prm.SetNodeDialTimeout(connTimeout)
prmTree.SetNodeDialTimeout(connTimeout)
streamTimeout := a.config().GetDuration(cfgStreamTimeout)
streamTimeout := cfg.GetDuration(cfgStreamTimeout)
if streamTimeout <= 0 {
streamTimeout = defaultStreamTimeout
}
prm.SetNodeStreamTimeout(streamTimeout)
prmTree.SetNodeStreamTimeout(streamTimeout)
healthCheckTimeout := a.config().GetDuration(cfgReqTimeout)
healthCheckTimeout := cfg.GetDuration(cfgReqTimeout)
if healthCheckTimeout <= 0 {
healthCheckTimeout = defaultRequestTimeout
}
prm.SetHealthcheckTimeout(healthCheckTimeout)
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
rebalanceInterval := a.config().GetDuration(cfgRebalance)
rebalanceInterval := cfg.GetDuration(cfgRebalance)
if rebalanceInterval <= 0 {
rebalanceInterval = defaultRebalanceTimer
}
prm.SetClientRebalanceInterval(rebalanceInterval)
prmTree.SetClientRebalanceInterval(rebalanceInterval)
errorThreshold := a.config().GetUint32(cfgPoolErrorThreshold)
errorThreshold := cfg.GetUint32(cfgPoolErrorThreshold)
if errorThreshold <= 0 {
errorThreshold = defaultPoolErrorThreshold
}
prm.SetErrorThreshold(errorThreshold)
prm.SetLogger(a.log.With(logs.TagField(logs.TagDatapath)))
prmTree.SetLogger(a.log.With(logs.TagField(logs.TagDatapath)))
prm.SetLogger(logger)
prmTree.SetLogger(logger)
prmTree.SetMaxRequestAttempts(a.config().GetInt(cfgTreePoolMaxAttempts))
prmTree.SetMaxRequestAttempts(cfg.GetInt(cfgTreePoolMaxAttempts))
interceptors := []grpc.DialOption{
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
grpc.WithContextDialer(a.settings.dialerSource.GrpcContextDialer()),
grpc.WithContextDialer(dialSource.GrpcContextDialer()),
}
prm.SetGRPCDialOptions(interceptors...)
prmTree.SetGRPCDialOptions(interceptors...)
p, err := pool.NewPool(prm)
if err != nil {
a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err), logs.TagField(logs.TagApp))
logger.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err))
}
if err = p.Dial(ctx); err != nil {
a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err), logs.TagField(logs.TagApp))
}
if a.config().GetBool(cfgFeaturesTreePoolNetmapSupport) {
prmTree.SetNetMapInfoSource(frostfs.NewSource(frostfs.NewFrostFS(p), cache.NewNetmapCache(getNetmapCacheOptions(a.config(), a.log)), a.bucketCache, a.log))
logger.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
}
treePool, err := treepool.NewPool(prmTree)
if err != nil {
a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err), logs.TagField(logs.TagApp))
logger.Fatal(logs.FailedToCreateTreePool, zap.Error(err))
}
if err = treePool.Dial(ctx); err != nil {
a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err), logs.TagField(logs.TagApp))
logger.Fatal(logs.FailedToDialTreePool, zap.Error(err))
}
a.pool = p
a.treePool = treePool
a.key = key
return p, treePool, key
}
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
@ -723,8 +705,7 @@ func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
l.Info(logs.AddedStoragePeer,
zap.Int("priority", priority),
zap.String("address", address),
zap.Float64("weight", weight),
logs.TagField(logs.TagApp))
zap.Float64("weight", weight))
}
return nodes
@ -739,7 +720,7 @@ func fetchSoftMemoryLimit(cfg *viper.Viper) int64 {
return int64(softMemoryLimit)
}
func getBucketCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
func getCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
cacheCfg := cache.DefaultBucketConfig(l)
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Lifetime)
@ -748,14 +729,6 @@ func getBucketCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
return cacheCfg
}
func getNetmapCacheOptions(v *viper.Viper, l *zap.Logger) *cache.NetmapCacheConfig {
cacheCfg := cache.DefaultNetmapConfig(l)
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgNetmapCacheLifetime, cacheCfg.Lifetime)
return cacheCfg
}
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
if v.IsSet(cfgEntry) {
lifetime := v.GetDuration(cfgEntry)
@ -763,8 +736,7 @@ func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultV
l.Error(logs.InvalidLifetimeUsingDefaultValue,
zap.String("parameter", cfgEntry),
zap.Duration("value in config", lifetime),
zap.Duration("default", defaultValue),
logs.TagField(logs.TagApp))
zap.Duration("default", defaultValue))
} else {
return lifetime
}
@ -780,8 +752,7 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue
l.Error(logs.InvalidCacheSizeUsingDefaultValue,
zap.String("parameter", cfgEntry),
zap.Int("value in config", size),
zap.Int("default", defaultValue),
logs.TagField(logs.TagApp))
zap.Int("default", defaultValue))
} else {
return size
}
@ -793,7 +764,7 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue
func getDialerSource(logger *zap.Logger, cfg *viper.Viper) *internalnet.DialerSource {
source, err := internalnet.NewDialerSource(fetchMultinetConfig(cfg, logger))
if err != nil {
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err), logs.TagField(logs.TagApp))
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err))
}
return source
}
@ -844,10 +815,3 @@ func fetchTracingAttributes(v *viper.Viper) (map[string]string, error) {
return attributes, nil
}
func fetchArchiveCompression(v *viper.Viper) bool {
if v.IsSet(cfgZipCompression) {
return v.GetBool(cfgZipCompression)
}
return v.GetBool(cfgArchiveCompression)
}

View file

@ -1,60 +0,0 @@
package main
import (
"os"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
"github.com/stretchr/testify/require"
)
func TestConfigReload(t *testing.T) {
f, err := os.CreateTemp("", "conf")
require.NoError(t, err)
defer func() {
require.NoError(t, os.Remove(f.Name()))
}()
confData := `
pprof:
enabled: true
resolve_bucket:
default_namespaces: [""]
resolve_order:
- nns
`
_, err = f.WriteString(confData)
require.NoError(t, err)
require.NoError(t, f.Close())
cfg := settings()
require.NoError(t, cfg.flags.Parse([]string{"--config", f.Name(), "--connect_timeout", "15s"}))
require.NoError(t, cfg.reload())
require.True(t, cfg.config().GetBool(cfgPprofEnabled))
require.Equal(t, []string{""}, cfg.config().GetStringSlice(cfgResolveDefaultNamespaces))
require.Equal(t, []string{resolver.NNSResolver}, cfg.config().GetStringSlice(cfgResolveOrder))
require.Equal(t, 15*time.Second, cfg.config().GetDuration(cfgConTimeout))
require.NoError(t, os.Truncate(f.Name(), 0))
require.NoError(t, cfg.reload())
require.False(t, cfg.config().GetBool(cfgPprofEnabled))
require.Equal(t, []string{"", "root"}, cfg.config().GetStringSlice(cfgResolveDefaultNamespaces))
require.Equal(t, []string{resolver.NNSResolver, resolver.DNSResolver}, cfg.config().GetStringSlice(cfgResolveOrder))
require.Equal(t, 15*time.Second, cfg.config().GetDuration(cfgConTimeout))
}
func TestSetTLSEnabled(t *testing.T) {
cfg := settings()
require.NoError(t, cfg.flags.Parse([]string{"--" + cfgTLSCertFile, "tls.crt", "--" + cfgTLSKeyFile, "tls.key"}))
require.NoError(t, cfg.reload())
require.True(t, cfg.config().GetBool(cfgServer+".0."+cfgTLSEnabled))
}

View file

@ -20,8 +20,6 @@ HTTP_GW_LOGGER_SAMPLING_ENABLED=false
HTTP_GW_LOGGER_SAMPLING_INITIAL=100
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
HTTP_GW_LOGGER_TAGS_0_NAME=app
HTTP_GW_LOGGER_TAGS_1_NAME=datapath
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
HTTP_GW_SERVER_0_TLS_ENABLED=false
@ -99,13 +97,9 @@ HTTP_GW_REBALANCE_TIMER=30s
# The number of errors on connection after which node is considered as unhealthy
HTTP_GW_POOL_ERROR_THRESHOLD=100
# Enable archive compression to download files by common prefix.
# DEPRECATED: Use HTTP_GW_ARCHIVE_COMPRESSION instead.
# Enable zip compression to download files by common prefix.
HTTP_GW_ZIP_COMPRESSION=false
# Enable archive compression to download files by common prefix.
HTTP_GW_ARCHIVE_COMPRESSION=false
HTTP_GW_TRACING_ENABLED=true
HTTP_GW_TRACING_ENDPOINT="localhost:4317"
HTTP_GW_TRACING_EXPORTER="otlp_grpc"
@ -127,8 +121,6 @@ HTTP_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576
# Cache which contains mapping of bucket name to bucket info
HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
HTTP_GW_CACHE_BUCKETS_SIZE=1000
# Cache which stores netmap
HTTP_GW_CACHE_NETMAP_LIFETIME=1m
# Header to determine zone to resolve bucket name
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
@ -166,9 +158,4 @@ HTTP_GW_WORKER_POOL_SIZE=1000
# Enable index page support
HTTP_GW_INDEX_PAGE_ENABLED=false
# Index page template path
HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl
# Enable using fallback path to search for a object by attribute
HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true
HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl

View file

@ -29,10 +29,6 @@ logger:
initial: 100
thereafter: 100
interval: 1s
tags:
- name: app
- name: datapath
level: debug
server:
- address: 0.0.0.0:8080
@ -120,19 +116,13 @@ pool_error_threshold: 100 # The number of errors on connection after which node
# Number of workers in handler's worker pool
worker_pool_size: 1000
# Enables index page to see objects list for specified container and prefix
# Enable index page to see objects list for specified container and prefix
index_page:
enabled: false
template_path: internal/handler/templates/index.gotmpl
# Deprecated: Use archive.compression instead
zip:
# Enables zip compression to download files by common prefix.
compression: false
archive:
# Enables archive compression to download files by common prefix.
compression: false
compression: false # Enable zip compression to download files by common prefix.
runtime:
soft_memory_limit: 1gb
@ -153,9 +143,6 @@ cache:
buckets:
lifetime: 1m
size: 1000
# Cache which stores netmap
netmap:
lifetime: 1m
resolve_bucket:
namespace_header: X-Frostfs-Namespace
@ -185,9 +172,3 @@ multinet:
source_ips:
- 1.2.3.4
- 1.2.3.5
features:
# Enable using fallback path to search for a object by attribute
enable_filepath_fallback: false
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
tree_pool_netmap_support: true

View file

@ -1,14 +1,14 @@
# HTTP Gateway Specification
| Route | Description |
|-------------------------------------------------|--------------------------------------------------|
| `/upload/{cid}` | [Put object](#put-object) |
| `/get/{cid}/{oid}` | [Get object](#get-object) |
| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
| `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}` | [Download objects in archive](#download-archive) |
| Route | Description |
|-------------------------------------------------|----------------------------------------------|
| `/upload/{cid}` | [Put object](#put-object) |
| `/get/{cid}/{oid}` | [Get object](#get-object) |
| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
| `/zip/{cid}/{prefix}` | [Download objects in archive](#download-zip) |
**Note:** `cid` parameter can be base58 encoded container ID or container name
(the name must be registered in NNS, see appropriate section in [nns.md](./nns.md)).
(the name must be registered in NNS, see appropriate section in [README](../README.md#nns)).
Route parameters can be:
@ -18,7 +18,7 @@ Route parameters can be:
### Bearer token
All routes can accept [bearer token](./authentication.md) from:
All routes can accept [bearer token](../README.md#authentication) from:
* `Authorization` header with `Bearer` type and base64-encoded token in
credentials field
@ -56,14 +56,12 @@ Upload file as object with attributes to FrostFS.
###### Headers
| Header | Description |
|------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
| `X-Attribute-System-*` | Used to set system FrostFS object attributes <br/> (e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). |
| `X-Attribute-*` | Used to set regular object attributes <br/> (e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
| `X-Explode-Archive` | If set, gate tries to read files from uploading `tar` archive and creates an object for each file in it. Uploading `tar` could be compressed via Gzip by setting a `Content-Encoding` header. Sets a `FilePath` attribute as a relative path from archive root and a `FileName` as the last path element of the `FilePath`. |
| `Content-Encoding` | If set and value is `gzip`, gate will handle uploading file as a `Gzip` compressed `tar` file. |
| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
| Header | Description |
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
| `X-Attribute-System-*` | Used to set system FrostFS object attributes <br/> (e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). |
| `X-Attribute-*` | Used to set regular object attributes <br/> (e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
There are some reserved headers type of `X-Attribute-FROSTFS-*` (headers are arranged in descending order of priority):
@ -271,9 +269,9 @@ If more than one object is found, an arbitrary one will be used to get attribute
| 400 | Some error occurred during operation. |
| 404 | Container or object not found. |
## Download archive
## Download zip
Route: `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}`
Route: `/zip/{cid}/{prefix}`
| Route parameter | Type | Description |
|-----------------|-----------|---------------------------------------------------------|
@ -284,13 +282,12 @@ Route: `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}`
#### GET
Find objects by prefix for `FilePath` attributes. Return found objects in zip or tar archive.
Find objects by prefix for `FilePath` attributes. Return found objects in zip archive.
Name of files in archive sets to `FilePath` attribute of objects.
Time of files sets to time when object has started downloading.
You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` or
`/tar/{cid}/` route.
You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` route.
Archive can be compressed (see http-gw [configuration](gate-configuration.md#archive-section)).
Archive can be compressed (see http-gw [configuration](gate-configuration.md#zip-section)).
##### Request

View file

@ -1,108 +0,0 @@
# Request authentication
HTTP Gateway does not authorize requests. Gateway converts HTTP request to a
FrostFS request and signs it with its own private key.
You can always upload files to public containers (open for anyone to put
objects into), but for restricted containers you need to explicitly allow PUT
operations for a request signed with your HTTP Gateway keys.
If you don't want to manage gateway's secret keys and adjust policies when
gateway configuration changes (new gate, key rotation, etc) or you plan to use
public services, there is an option to let your application backend (or you) to
issue Bearer Tokens and pass them from the client via gate down to FrostFS level
to grant access.
FrostFS Bearer Token basically is a container owner-signed policy (refer to FrostFS
documentation for more details). There are two options to pass them to gateway:
* "Authorization" header with "Bearer" type and base64-encoded token in
credentials field
* "Bearer" cookie with base64-encoded token contents
For example, you have a mobile application frontend with a backend part storing
data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS
Bearer token and provides it to the frontend. Then, the mobile app may generate
some data and upload it via any available FrostFS HTTP Gateway by adding
the corresponding header to the upload request. Accessing policy protected data
works the same way.
##### Example
In order to generate a bearer token, you need to have wallet (which will be used to sign the token)
1. Suppose you have a container with private policy for wallet key
```
$ frostfs-cli container create -r <endpoint> --wallet <wallet> -policy <policy> --basic-acl 0 --await
CID: 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z
$ frostfs-cli ape-manager add -r <endpoint> --wallet <wallet> \
--target-type container --target-name 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z \
--rule "allow Object.* RequestCondition:"\$Actor:publicKey"=03b09baabff3f6107c7e9acb8721a6fc5618d45b50247a314d82e548702cce8cd5 *" \
--chain-id <chainID>
```
2. Form a Bearer token (10000 is lifetime expiration in epoch) to impersonate
HTTP Gateway request as wallet signed request and save it to **bearer.json**:
```
{
"body": {
"allowImpersonate": true,
"lifetime": {
"exp": "10000",
"nbf": "0",
"iat": "0"
}
},
"signature": null
}
```
3. Sign it with the wallet:
```
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w <wallet>
```
4. Encode to base64 to use in header:
```
$ base64 -w 0 signed.json
# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==
```
After that, the Bearer token can be used:
```
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H "Authorization: Bearer Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==" \
http://localhost:8082/upload/BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K
# output:
# {
# "object_id": "DhfES9nVrFksxGDD2jQLunGADfrXExxNwqXbDafyBn9X",
# "container_id": "BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K"
# }
```
##### Note: Bearer Token owner
You can specify exact key who can use Bearer Token (gateway wallet address).
To do this, encode wallet address in base64 format
```
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
```
Then specify this value in Bearer Token Json
```
{
"body": {
"ownerID": {
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
},
...
```
##### Note: Policy override
Instead of impersonation, you can define the set of policies that will be applied
to the request sender. This allows to restrict access to specific operation and
specific objects without giving full impersonation control to the token user.

View file

@ -59,7 +59,7 @@ $ cat http.log
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
| `index_page` | [Index page configuration](#index_page-section) |
| `multinet` | [Multinet configuration](#multinet-section) |
| `features` | [Features configuration](#features-section) |
# General section
@ -174,11 +174,6 @@ logger:
initial: 100
thereafter: 100
interval: 1s
tags:
- name: "app"
level: info
- name: "datapath"
- name: "external_storage_tree"
```
| Parameter | Type | SIGHUP reload | Default value | Description |
@ -189,30 +184,6 @@ logger:
| `sampling.initial` | `int` | no | '100' | Sampling count of first log entries. |
| `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. |
| `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. |
| `sampling.tags` | `[]Tag` | yes | | Tagged log entries that should be additionally logged (available tags see in the next section). |
## Tags
There are additional log entries that can hurt performance and can be additionally logged by using `logger.tags`
parameter. Available tags:
```yaml
tags:
- name: "app"
level: info
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. |
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
### Tag values
* `app` - common application logs (enabled by default).
* `datapath` - main logic of application (enabled by default).
* `external_storage` - external interaction with storage node (enabled by default).
* `external_storage_tree` - external interaction with tree service in storage node (enabled by default).
# `web` section
@ -247,9 +218,8 @@ upload_header:
|-------------------------|--------|---------------|---------------|-------------------------------------------------------------|
| `use_default_timestamp` | `bool` | yes | `false` | Create timestamp for object if it isn't provided by header. |
# `zip` section
> **_DEPRECATED:_** Use archive section instead
# `zip` section
```yaml
zip:
@ -260,17 +230,6 @@ zip:
|---------------|--------|---------------|---------------|--------------------------------------------------------------|
| `compression` | `bool` | yes | `false` | Enable zip compression when download files by common prefix. |
# `archive` section
```yaml
archive:
compression: false
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|---------------|--------|---------------|---------------|------------------------------------------------------------------|
| `compression` | `bool` | yes | `false` | Enable archive compression when download files by common prefix. |
# `pprof` section
@ -380,14 +339,12 @@ cache:
buckets:
lifetime: 1m
size: 1000
netmap:
lifetime: 1m
```
| Parameter | Type | Default value | Description |
|-----------|-----------------------------------|---------------------------------|---------------------------------------------------------------------------|
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
| `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. |
| Parameter | Type | Default value | Description |
|-----------------|-----------------------------------|-----------------------------------|----------------------------------------------------------------------------------------|
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
#### `cache` subsection
@ -500,18 +457,3 @@ multinet:
|--------------|------------|---------------|---------------|----------------------------------------------------------------------|
| `mask` | `string` | yes | | Destination subnet. |
| `source_ips` | `[]string` | yes | | Array of source IP addresses to use when dialing destination subnet. |
# `features` section
Contains parameters for enabling features.
```yaml
features:
enable_filepath_fallback: true
tree_pool_netmap_support: true
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by attribute. If the value of the `FilePath` attribute in the request contains no `/` symbols or single leading `/` symbol and the object was not found, then an attempt is made to search for the object by the attribute `FileName`. |
| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. |

View file

@ -1,36 +0,0 @@
# Nicename Resolving with NNS
Steps to start using name resolving:
1. Enable NNS resolving in config (`rpc_endpoint` must be a valid neo rpc node, see [configs](./config) for other examples):
```yaml
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
resolve_order:
- nns
```
2. Make sure your container is registered in NNS contract. If you use [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
you can check if your container (e.g. with `container-name` name) is registered in NNS:
```shell
$ curl -s --data '{"id":1,"jsonrpc":"2.0","method":"getcontractstate","params":[1]}' \
http://morph-chain.frostfs.devenv:30333 | jq -r '.result.hash'
0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667
$ docker exec -it morph_chain neo-go \
contract testinvokefunction \
-r http://morph-chain.frostfs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \
resolve string:container-name.container int:16 \
| jq -r '.stack[0].value | if type=="array" then .[0].value else . end' \
| base64 -d && echo
7f3vvkw4iTiS5ZZbu5BQXEmJtETWbi3uUjLNaSs29xrL
```
3. Use container name instead of its `$CID`. For example:
```shell
$ curl http://localhost:8082/get_by_attribute/container-name/FileName/object-name
```

96
go.mod
View file

@ -3,13 +3,13 @@ module git.frostfs.info/TrueCloudLab/frostfs-http-gw
go 1.22
require (
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241011114054-f0fc40e116d1
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241022124111-5361f0ecebd3
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/bluele/gcache v0.0.2
github.com/docker/docker v27.1.1+incompatible
github.com/docker/go-units v0.5.0
github.com/docker/go-units v0.4.0
github.com/fasthttp/router v1.4.1
github.com/nspcc-dev/neo-go v0.106.2
github.com/panjf2000/ants/v2 v2.5.0
@ -19,120 +19,102 @@ require (
github.com/spf13/viper v1.15.0
github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0
github.com/testcontainers/testcontainers-go v0.35.0
github.com/testcontainers/testcontainers-go v0.13.0
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4
github.com/valyala/fasthttp v1.34.0
go.opentelemetry.io/otel v1.31.0
go.opentelemetry.io/otel/trace v1.31.0
go.opentelemetry.io/otel v1.28.0
go.opentelemetry.io/otel/trace v1.28.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
golang.org/x/net v0.30.0
golang.org/x/sys v0.28.0
google.golang.org/grpc v1.69.2
golang.org/x/net v0.26.0
golang.org/x/sys v0.22.0
google.golang.org/grpc v1.66.2
)
require (
dario.cat/mergo v1.0.0 // indirect
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.2 // indirect
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
github.com/andybalholm/brotli v1.0.4 // indirect
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/containerd v1.7.18 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/cpuguy83/dockercfg v0.3.2 // indirect
github.com/containerd/cgroups v1.0.3 // indirect
github.com/containerd/containerd v1.6.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker v20.10.14+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/ipfs/go-cid v0.0.7 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/compress v1.16.4 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/user v0.1.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/moby/sys/mount v0.3.2 // indirect
github.com/moby/sys/mountinfo v0.6.1 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr v0.14.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/runc v1.1.1 // indirect
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/twmb/murmur3 v1.1.8 // indirect
github.com/urfave/cli v1.22.12 // indirect
github.com/urfave/cli v1.22.5 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.etcd.io/bbolt v1.3.9 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.31.0 // indirect
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
google.golang.org/protobuf v1.36.1 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
)

901
go.sum

File diff suppressed because it is too large Load diff

View file

@ -4,15 +4,13 @@ import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// TreeService provide interface to interact with tree service using s3 data models.
type TreeService interface {
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error)
GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error)
CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error)
}
var (

View file

@ -1,4 +1,4 @@
package data
package api
import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -7,21 +7,12 @@ import (
// NodeVersion represent node from tree service.
type NodeVersion struct {
BaseNodeVersion
DeleteMarker bool
IsPrefixNode bool
}
// BaseNodeVersion is minimal node info from tree service.
// Basically used for "system" object.
type BaseNodeVersion struct {
ID uint64
OID oid.ID
IsDeleteMarker bool
}
type NodeInfo struct {
Meta []NodeMeta
}
type NodeMeta interface {
GetKey() string
GetValue() []byte
OID oid.ID
}

View file

@ -6,16 +6,14 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/bluele/gcache"
"go.uber.org/zap"
)
// BucketCache contains cache with objects and the lifetime of cache entries.
type BucketCache struct {
cache gcache.Cache
cidCache gcache.Cache
logger *zap.Logger
cache gcache.Cache
logger *zap.Logger
}
// Config stores expiration params for cache.
@ -42,45 +40,14 @@ func DefaultBucketConfig(logger *zap.Logger) *Config {
}
// NewBucketCache creates an object of BucketCache.
func NewBucketCache(config *Config, cidCache bool) *BucketCache {
cache := &BucketCache{
cache: gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build(),
logger: config.Logger,
}
if cidCache {
cache.cidCache = gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
}
return cache
func NewBucketCache(config *Config) *BucketCache {
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
return &BucketCache{cache: gc, logger: config.Logger}
}
// Get returns a cached object.
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
return o.get(formKey(ns, bktName))
}
func (o *BucketCache) GetByCID(cnrID cid.ID) *data.BucketInfo {
if o.cidCache == nil {
return nil
}
entry, err := o.cidCache.Get(cnrID)
if err != nil {
return nil
}
key, ok := entry.(string)
if !ok {
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", key)), logs.TagField(logs.TagDatapath))
return nil
}
return o.get(key)
}
func (o *BucketCache) get(key string) *data.BucketInfo {
entry, err := o.cache.Get(key)
entry, err := o.cache.Get(formKey(ns, bktName))
if err != nil {
return nil
}
@ -88,7 +55,7 @@ func (o *BucketCache) get(key string) *data.BucketInfo {
result, ok := entry.(*data.BucketInfo)
if !ok {
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
@ -97,12 +64,6 @@ func (o *BucketCache) get(key string) *data.BucketInfo {
// Put puts an object to cache.
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
if o.cidCache != nil {
if err := o.cidCache.Set(bkt.CID, formKey(bkt.Zone, bkt.Name)); err != nil {
return err
}
}
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
}

View file

@ -1,65 +0,0 @@
package cache
import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/bluele/gcache"
"go.uber.org/zap"
)
type (
// NetmapCache provides cache for netmap.
NetmapCache struct {
cache gcache.Cache
logger *zap.Logger
}
// NetmapCacheConfig stores expiration params for cache.
NetmapCacheConfig struct {
Lifetime time.Duration
Logger *zap.Logger
}
)
const (
DefaultNetmapCacheLifetime = time.Minute
netmapCacheSize = 1
netmapKey = "netmap"
)
// DefaultNetmapConfig returns new default cache expiration values.
func DefaultNetmapConfig(logger *zap.Logger) *NetmapCacheConfig {
return &NetmapCacheConfig{
Lifetime: DefaultNetmapCacheLifetime,
Logger: logger,
}
}
// NewNetmapCache creates an object of NetmapCache.
func NewNetmapCache(config *NetmapCacheConfig) *NetmapCache {
gc := gcache.New(netmapCacheSize).LRU().Expiration(config.Lifetime).Build()
return &NetmapCache{cache: gc, logger: config.Logger}
}
func (c *NetmapCache) Get() *netmap.NetMap {
entry, err := c.cache.Get(netmapKey)
if err != nil {
return nil
}
result, ok := entry.(netmap.NetMap)
if !ok {
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
return nil
}
return &result
}
func (c *NetmapCache) Put(nm netmap.NetMap) error {
return c.cache.Set(netmapKey, nm)
}

View file

@ -2,7 +2,6 @@ package data
import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
)
type BucketInfo struct {
@ -10,5 +9,4 @@ type BucketInfo struct {
Zone string // container zone from system attribute
CID cid.ID
HomomorphicHashDisabled bool
PlacementPolicy netmap.PlacementPolicy
}

View file

@ -22,13 +22,11 @@ import (
)
const (
dateFormat = "02-01-2006 15:04"
attrOID = "OID"
attrCreated = "Created"
attrFileName = "FileName"
attrFilePath = "FilePath"
attrSize = "Size"
attrDeleteMarker = "IsDeleteMarker"
dateFormat = "02-01-2006 15:04"
attrOID = "OID"
attrCreated = "Created"
attrFileName = "FileName"
attrSize = "Size"
)
type (
@ -40,25 +38,23 @@ type (
Objects []ResponseObject
}
ResponseObject struct {
OID string
Created string
FileName string
FilePath string
Size string
IsDir bool
GetURL string
IsDeleteMarker bool
OID string
Created string
FileName string
FilePath string
Size string
IsDir bool
GetURL string
}
)
func newListObjectsResponseS3(attrs map[string]string) ResponseObject {
return ResponseObject{
Created: formatTimestamp(attrs[attrCreated]),
OID: attrs[attrOID],
FileName: attrs[attrFileName],
Size: attrs[attrSize],
IsDir: attrs[attrOID] == "",
IsDeleteMarker: attrs[attrDeleteMarker] == "true",
Created: formatTimestamp(attrs[attrCreated]),
OID: attrs[attrOID],
FileName: attrs[attrFileName],
Size: attrs[attrSize],
IsDir: attrs[attrOID] == "",
}
}
@ -173,7 +169,7 @@ func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketIn
objects: make([]ResponseObject, 0, len(nodes)),
}
for _, node := range nodes {
meta := node.Meta
meta := node.GetMeta()
if meta == nil {
continue
}
@ -182,9 +178,6 @@ func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketIn
attrs[m.GetKey()] = string(m.GetValue())
}
obj := newListObjectsResponseS3(attrs)
if obj.IsDeleteMarker {
continue
}
obj.FilePath = prefix + obj.FileName
obj.GetURL = "/get/" + bucketInfo.Name + urlencode(obj.FilePath)
result.objects = append(result.objects, obj)
@ -230,7 +223,7 @@ func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.Buck
}
for objExt := range resp {
if objExt.Error != nil {
log.Error(logs.FailedToHeadObject, zap.Error(objExt.Error), logs.TagField(logs.TagExternalStorage))
log.Error(logs.FailedToHeadObject, zap.Error(objExt.Error))
result.hasErrors = true
continue
}
@ -273,7 +266,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
})
if err != nil {
wg.Done()
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath))
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err))
}
select {
case <-ctx.Done():
@ -283,7 +276,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
}
})
if err != nil {
log.Error(logs.FailedToIterateOverResponse, zap.Error(err), logs.TagField(logs.TagDatapath))
log.Error(logs.FailedToIterateOverResponse, zap.Error(err))
}
wg.Wait()
}()

View file

@ -1,22 +1,19 @@
package handler
import (
"archive/tar"
"archive/zip"
"bufio"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -26,55 +23,28 @@ import (
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadByAddressOrBucketName")
defer span.End()
utils.SetContextToRequest(ctx, c)
oidURLParam := c.UserValue("oid").(string)
downloadQueryParam := c.QueryArgs().GetBool("download")
cidParam := c.UserValue("cid").(string)
oidParam := c.UserValue("oid").(string)
downloadParam := c.QueryArgs().GetBool("download")
log := utils.GetReqLogOrDefault(ctx, h.log).With(
zap.String("cid", cidParam),
zap.String("oid", oidParam),
)
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()),
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
logAndSendBucketError(c, log, checkS3Err)
return
}
req := newRequest(c, log)
var objID oid.ID
if checkS3Err == nil && shouldDownload(oidParam, downloadParam) {
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.receiveFile)
} else if err = objID.DecodeString(oidParam); err == nil {
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.receiveFile)
} else {
h.browseIndex(c, checkS3Err != nil)
switch {
case isObjectID(oidURLParam):
h.byNativeAddress(c, h.receiveFile)
case !isContainerRoot(oidURLParam) && (downloadQueryParam || !isDir(oidURLParam)):
h.byS3Path(c, h.receiveFile)
default:
h.browseIndex(c)
}
}
func shouldDownload(oidParam string, downloadParam bool) bool {
return !isDir(oidParam) || downloadParam
func (h *Handler) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) *request {
return &request{
RequestCtx: ctx,
log: log,
}
}
// DownloadByAttribute handles attribute-based download requests.
func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadByAttribute")
defer span.End()
utils.SetContextToRequest(ctx, c)
h.byAttribute(c, h.receiveFile)
}
@ -94,64 +64,13 @@ func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op
return h.frostfs.SearchObjects(ctx, prm)
}
// DownloadZip handles zip by prefix requests.
func (h *Handler) DownloadZip(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadZip")
defer span.End()
utils.SetContextToRequest(ctx, c)
scid, _ := c.UserValue("cid").(string)
log := utils.GetReqLogOrDefault(ctx, h.log)
bktInfo, err := h.getBucketInfo(ctx, scid, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
if err != nil {
return
}
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
c.SetBodyStreamWriter(h.getZipResponseWriter(ctx, log, resSearch, bktInfo))
}
func (h *Handler) getZipResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
return func(w *bufio.Writer) {
defer resSearch.Close()
buf := make([]byte, 3<<20)
zipWriter := zip.NewWriter(w)
var objectsWritten int
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
func(obj *object.Object) (io.Writer, error) {
objectsWritten++
return h.createZipFile(zipWriter, obj)
}),
)
if errIter != nil {
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
return
} else if objectsWritten == 0 {
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
}
if err := zipWriter.Close(); err != nil {
log.Error(logs.CloseZipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
}
}
}
func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
func (h *Handler) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
method := zip.Store
if h.config.ArchiveCompression() {
if h.config.ZipCompression() {
method = zip.Deflate
}
filePath := getFilePath(obj)
filePath := getZipFilePath(obj)
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
}
@ -163,143 +82,99 @@ func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer,
})
}
// DownloadTar forms tar.gz from objects by prefix.
func (h *Handler) DownloadTar(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadTar")
defer span.End()
utils.SetContextToRequest(ctx, c)
// DownloadZipped handles zip by prefix requests.
func (h *Handler) DownloadZipped(c *fasthttp.RequestCtx) {
scid, _ := c.UserValue("cid").(string)
prefix, _ := c.UserValue("prefix").(string)
ctx := utils.GetContextFromRequest(c)
log := utils.GetReqLogOrDefault(ctx, h.log)
prefix, err := url.QueryUnescape(prefix)
if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Error(err))
response.Error(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
return
}
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
bktInfo, err := h.getBucketInfo(ctx, scid, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
resSearch, err := h.search(ctx, bktInfo.CID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
if err != nil {
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
return
}
c.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip")
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"")
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
c.Response.SetStatusCode(http.StatusOK)
c.SetBodyStreamWriter(h.getTarResponseWriter(ctx, log, resSearch, bktInfo))
}
func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
return func(w *bufio.Writer) {
c.SetBodyStreamWriter(func(w *bufio.Writer) {
defer resSearch.Close()
compressionLevel := gzip.NoCompression
if h.config.ArchiveCompression() {
compressionLevel = gzip.DefaultCompression
}
zipWriter := zip.NewWriter(w)
// ignore error because it's not nil only if compressionLevel argument is invalid
gzipWriter, _ := gzip.NewWriterLevel(w, compressionLevel)
tarWriter := tar.NewWriter(gzipWriter)
var bufZip []byte
var addr oid.Address
defer func() {
if err := tarWriter.Close(); err != nil {
log.Error(logs.CloseTarWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
empty := true
called := false
btoken := bearerToken(ctx)
addr.SetContainer(bktInfo.CID)
errIter := resSearch.Iterate(func(id oid.ID) bool {
called = true
if empty {
bufZip = make([]byte, 3<<20) // the same as for upload
}
if err := gzipWriter.Close(); err != nil {
log.Error(logs.CloseGzipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
empty = false
addr.SetObject(id)
if err = h.zipObject(ctx, zipWriter, addr, btoken, bufZip); err != nil {
log.Error(logs.FailedToAddObjectToArchive, zap.String("oid", id.EncodeToString()), zap.Error(err))
}
}()
var objectsWritten int
buf := make([]byte, 3<<20) // the same as for upload
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
func(obj *object.Object) (io.Writer, error) {
objectsWritten++
return h.createTarFile(tarWriter, obj)
}),
)
return false
})
if errIter != nil {
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
} else if objectsWritten == 0 {
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
} else if !called {
log.Error(logs.ObjectsNotFound)
}
}
}
func (h *Handler) createTarFile(tw *tar.Writer, obj *object.Object) (io.Writer, error) {
filePath := getFilePath(obj)
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
}
return tw, tw.WriteHeader(&tar.Header{
Name: filePath,
Mode: 0655,
Size: int64(obj.PayloadSize()),
if err = zipWriter.Close(); err != nil {
log.Error(logs.CloseZipWriter, zap.Error(err))
}
})
}
func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool {
return func(id oid.ID) bool {
log = log.With(zap.String("oid", id.EncodeToString()))
prm := PrmObjectGet{
PrmAuth: PrmAuth{
BearerToken: bearerToken(ctx),
},
Address: newAddress(cnrID, id),
}
resGet, err := h.frostfs.GetObject(ctx, prm)
if err != nil {
log.Error(logs.FailedToGetObject, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return false
}
fileWriter, err := createArchiveHeader(&resGet.Header)
if err != nil {
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
return false
}
if err = writeToArchive(resGet, fileWriter, buf); err != nil {
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
return false
}
return false
func (h *Handler) zipObject(ctx context.Context, zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
prm := PrmObjectGet{
PrmAuth: PrmAuth{
BearerToken: btoken,
},
Address: addr,
}
}
func (h *Handler) searchObjectsByPrefix(c *fasthttp.RequestCtx, log *zap.Logger, cnrID cid.ID) (ResObjectSearch, error) {
scid, _ := c.UserValue("cid").(string)
prefix, _ := c.UserValue("prefix").(string)
ctx := utils.GetContextFromRequest(c)
prefix, err := url.QueryUnescape(prefix)
resGet, err := h.frostfs.GetObject(ctx, prm)
if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix),
zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
return nil, err
return fmt.Errorf("get FrostFS object: %v", err)
}
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
resSearch, err := h.search(ctx, cnrID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
objWriter, err := h.addObjectToZip(zipWriter, &resGet.Header)
if err != nil {
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage))
ResponseError(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
return nil, err
return fmt.Errorf("zip create header: %v", err)
}
return resSearch, nil
}
func writeToArchive(resGet *Object, objWriter io.Writer, buf []byte) error {
var err error
if _, err = io.CopyBuffer(objWriter, resGet.Payload, buf); err != nil {
if _, err = io.CopyBuffer(objWriter, resGet.Payload, bufZip); err != nil {
return fmt.Errorf("copy object payload to zip file: %v", err)
}
@ -307,10 +182,14 @@ func writeToArchive(resGet *Object, objWriter io.Writer, buf []byte) error {
return fmt.Errorf("object body close error: %w", err)
}
if err = zipWriter.Flush(); err != nil {
return fmt.Errorf("flush zip writer: %v", err)
}
return nil
}
func getFilePath(obj *object.Object) string {
func getZipFilePath(obj *object.Object) string {
for _, attr := range obj.Attributes() {
if attr.Key() == object.AttributeFilePath {
return attr.Value()

View file

@ -50,8 +50,7 @@ func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) (map[string]st
l.Debug(logs.AddAttributeToResultObject,
zap.String("key", k),
zap.String("val", v),
logs.TagField(logs.TagDatapath))
zap.String("val", v))
})
return result, err

View file

@ -11,10 +11,10 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
@ -29,13 +29,12 @@ import (
type Config interface {
DefaultTimestamp() bool
ArchiveCompression() bool
ZipCompression() bool
ClientCut() bool
IndexPageEnabled() bool
IndexPageTemplate() string
BufferMaxSizeForPut() uint64
NamespaceHeader() string
EnableFilepathFallback() bool
}
// PrmContainer groups parameters of FrostFS.Container operation.
@ -140,8 +139,6 @@ var (
ErrAccessDenied = errors.New("access denied")
// ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc.
ErrGatewayTimeout = errors.New("gateway timeout")
// ErrQuotaLimitReached is returned from FrostFS in case of quota exceeded.
ErrQuotaLimitReached = errors.New("quota limit reached")
)
// FrostFS represents virtual connection to FrostFS network.
@ -167,7 +164,7 @@ type Handler struct {
ownerID *user.ID
config Config
containerResolver ContainerResolver
tree layer.TreeService
tree *tree.Tree
cache *cache.BucketCache
workerPool *ants.Pool
}
@ -180,7 +177,7 @@ type AppParams struct {
Cache *cache.BucketCache
}
func New(params *AppParams, config Config, tree layer.TreeService, workerPool *ants.Pool) *Handler {
func New(params *AppParams, config Config, tree *tree.Tree, workerPool *ants.Pool) *Handler {
return &Handler{
log: params.Logger,
frostfs: params.FrostFS,
@ -195,42 +192,77 @@ func New(params *AppParams, config Config, tree layer.TreeService, workerPool *a
// byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// prepares request and object address to it.
func (h *Handler) byNativeAddress(ctx context.Context, req request, cnrID cid.ID, objID oid.ID, handler func(context.Context, request, oid.Address)) {
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byNativeAddress")
defer span.End()
func (h *Handler) byNativeAddress(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
idCnr, _ := c.UserValue("cid").(string)
idObj, _ := url.PathUnescape(c.UserValue("oid").(string))
addr := newAddress(cnrID, objID)
handler(ctx, req, addr)
ctx := utils.GetContextFromRequest(c)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("cid", idCnr), zap.String("oid", idObj))
bktInfo, err := h.getBucketInfo(ctx, idCnr, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
objID := new(oid.ID)
if err = objID.DecodeString(idObj); err != nil {
log.Error(logs.WrongObjectID, zap.Error(err))
response.Error(c, "wrong object id", fasthttp.StatusBadRequest)
return
}
addr := newAddress(bktInfo.CID, *objID)
f(ctx, *h.newRequest(c, log), addr)
}
// byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// resolves object address from S3-like path <bucket name>/<object key>.
func (h *Handler) byS3Path(ctx context.Context, req request, cnrID cid.ID, path string, handler func(context.Context, request, oid.Address)) {
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byS3Path")
defer span.End()
func (h *Handler) byS3Path(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
bucketname := c.UserValue("cid").(string)
key := c.UserValue("oid").(string)
c, log := req.RequestCtx, req.log
ctx := utils.GetContextFromRequest(c)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("bucketname", bucketname), zap.String("key", key))
foundOID, err := h.tree.GetLatestVersion(ctx, &cnrID, path)
unescapedKey, err := url.QueryUnescape(key)
if err != nil {
log.Error(logs.FailedToGetLatestVersionOfObject, zap.Error(err), zap.String("cid", cnrID.String()),
zap.String("path", path), logs.TagField(logs.TagExternalStorageTree))
logAndSendBucketError(c, log, err)
return
}
if foundOID.IsDeleteMarker {
log.Error(logs.ObjectWasDeleted, logs.TagField(logs.TagExternalStorageTree))
ResponseError(c, "object deleted", fasthttp.StatusNotFound)
bktInfo, err := h.getBucketInfo(ctx, bucketname, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
addr := newAddress(cnrID, foundOID.OID)
handler(ctx, newRequest(c, log), addr)
foundOid, err := h.tree.GetLatestVersion(ctx, &bktInfo.CID, unescapedKey)
if err != nil {
if errors.Is(err, tree.ErrNodeAccessDenied) {
response.Error(c, "Access Denied", fasthttp.StatusForbidden)
} else {
response.Error(c, "object wasn't found", fasthttp.StatusNotFound)
log.Error(logs.GetLatestObjectVersion, zap.Error(err))
}
return
}
if foundOid.DeleteMarker {
log.Error(logs.ObjectWasDeleted)
response.Error(c, "object deleted", fasthttp.StatusNotFound)
return
}
addr := newAddress(bktInfo.CID, foundOid.OID)
f(ctx, *h.newRequest(c, log), addr)
}
// byAttribute is a wrapper similar to byNativeAddress.
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Context, request, oid.Address)) {
cidParam, _ := c.UserValue("cid").(string)
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
scid, _ := c.UserValue("cid").(string)
key, _ := c.UserValue("attr_key").(string)
val, _ := c.UserValue("attr_val").(string)
@ -239,92 +271,55 @@ func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Conte
key, err := url.QueryUnescape(key)
if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_key", key),
zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_key", key), zap.Error(err))
response.Error(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
return
}
val, err = url.QueryUnescape(val)
if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_val", val),
zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_val", val), zap.Error(err))
response.Error(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
return
}
if key == attrFileName {
val = prepareFileName(val)
}
log = log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
log = log.With(zap.String("cid", cidParam), zap.String("attr_key", key), zap.String("attr_val", val))
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
bktInfo, err := h.getBucketInfo(ctx, scid, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
objID, err := h.findObjectByAttribute(ctx, log, bktInfo.CID, key, val)
res, err := h.search(ctx, bktInfo.CID, key, val, object.MatchStringEqual)
if err != nil {
if errors.Is(err, io.EOF) {
ResponseError(c, err.Error(), fasthttp.StatusNotFound)
return
}
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
return
}
var addr oid.Address
addr.SetContainer(bktInfo.CID)
addr.SetObject(objID)
handler(ctx, newRequest(c, log), addr)
}
func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
res, err := h.search(ctx, cnrID, attrKey, attrVal, object.MatchStringEqual)
if err != nil {
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return oid.ID{}, fmt.Errorf("could not search for objects: %w", err)
}
defer res.Close()
buf := make([]oid.ID, 1)
n, err := res.Read(buf)
if n == 0 {
switch {
case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal):
log.Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName, logs.TagField(logs.TagExternalStorage))
return h.findObjectByAttribute(ctx, log, cnrID, attrFileName, prepareFileName(attrVal))
case errors.Is(err, io.EOF):
log.Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return oid.ID{}, fmt.Errorf("object not found: %w", err)
default:
log.Error(logs.ReadObjectListFailed, zap.Error(err), logs.TagField(logs.TagExternalStorage))
return oid.ID{}, fmt.Errorf("read object list failed: %w", err)
if errors.Is(err, io.EOF) {
log.Error(logs.ObjectNotFound, zap.Error(err))
response.Error(c, "object not found", fasthttp.StatusNotFound)
return
}
log.Error(logs.ReadObjectListFailed, zap.Error(err))
response.Error(c, "read object list failed: "+err.Error(), fasthttp.StatusBadRequest)
return
}
return buf[0], nil
}
var addrObj oid.Address
addrObj.SetContainer(bktInfo.CID)
addrObj.SetObject(buf[0])
func (h *Handler) needSearchByFileName(key, val string) bool {
if key != attrFilePath || !h.config.EnableFilepathFallback() {
return false
}
return strings.HasPrefix(val, "/") && strings.Count(val, "/") == 1 || !strings.Contains(val, "/")
}
func prepareFileName(fileName string) string {
if strings.HasPrefix(fileName, "/") {
return fileName[1:]
}
return fileName
f(ctx, *h.newRequest(c, log), addrObj)
}
// resolveContainer decode container id, if it's not a valid container id
@ -353,16 +348,11 @@ func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *
cnrID, err := h.resolveContainer(ctx, containerName)
if err != nil {
log.Error(logs.CouldNotResolveContainerID, zap.Error(err), zap.String("cnrName", containerName),
logs.TagField(logs.TagDatapath))
return nil, err
}
bktInfo, err := h.readContainer(ctx, *cnrID)
if err != nil {
log.Error(logs.CouldNotGetContainerInfo, zap.Error(err), zap.String("cnrName", containerName),
zap.String("cnrName", cnrID.String()),
logs.TagField(logs.TagExternalStorage))
return nil, err
}
@ -370,8 +360,7 @@ func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *
log.Warn(logs.CouldntPutBucketIntoCache,
zap.String("bucket name", bktInfo.Name),
zap.Stringer("bucket cid", bktInfo.CID),
zap.Error(err),
logs.TagField(logs.TagDatapath))
zap.Error(err))
}
return bktInfo, nil
@ -395,16 +384,11 @@ func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.Bucket
}
bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(*res)
bktInfo.PlacementPolicy = res.PlacementPolicy()
return bktInfo, err
}
func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.browseIndex")
defer span.End()
utils.SetContextToRequest(ctx, c)
func (h *Handler) browseIndex(c *fasthttp.RequestCtx) {
if !h.config.IndexPageEnabled() {
c.SetStatusCode(fasthttp.StatusNotFound)
return
@ -413,6 +397,7 @@ func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) {
cidURLParam := c.UserValue("cid").(string)
oidURLParam := c.UserValue("oid").(string)
ctx := utils.GetContextFromRequest(c)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("cid", cidURLParam), zap.String("oid", oidURLParam))
@ -429,9 +414,18 @@ func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) {
}
listFunc := h.getDirObjectsS3
if isNativeList {
// tree probe failed, trying to use native
listFunc = h.getDirObjectsNative
isNativeList := false
err = h.tree.CheckSettingsNodeExist(ctx, bktInfo)
if err != nil {
if errors.Is(err, tree.ErrNodeNotFound) {
// tree probe failed, try to use native
listFunc = h.getDirObjectsNative
isNativeList = true
} else {
logAndSendBucketError(c, log, err)
return
}
}
h.browseObjects(c, browseParams{

View file

@ -517,7 +517,7 @@ func DoFuzzDownloadZipped(input []byte) int {
r.SetUserValue("cid", cid)
r.SetUserValue("prefix", prefix)
hc.Handler().DownloadZip(r)
hc.Handler().DownloadZipped(r)
return fuzzSuccessExitCode
}

View file

@ -14,8 +14,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
@ -32,41 +32,25 @@ import (
"go.uber.org/zap"
)
type treeServiceMock struct {
system map[string]map[string]*data.BaseNodeVersion
type treeClientMock struct {
}
func newTreeService() *treeServiceMock {
return &treeServiceMock{
system: make(map[string]map[string]*data.BaseNodeVersion),
}
func (t *treeClientMock) GetNodes(context.Context, *tree.GetNodesParams) ([]tree.NodeResponse, error) {
return nil, nil
}
func (t *treeServiceMock) CheckSettingsNodeExists(context.Context, *data.BucketInfo) error {
_, ok := t.system["bucket-settings"]
if !ok {
return layer.ErrNodeNotFound
}
return nil
}
func (t *treeServiceMock) GetSubTreeByPrefix(context.Context, *data.BucketInfo, string, bool) ([]data.NodeInfo, string, error) {
return nil, "", nil
}
func (t *treeServiceMock) GetLatestVersion(context.Context, *cid.ID, string) (*data.NodeVersion, error) {
func (t *treeClientMock) GetSubTree(context.Context, *data.BucketInfo, string, []uint64, uint32, bool) ([]tree.NodeResponse, error) {
return nil, nil
}
type configMock struct {
additionalSearch bool
}
func (c *configMock) DefaultTimestamp() bool {
return false
}
func (c *configMock) ArchiveCompression() bool {
func (c *configMock) ZipCompression() bool {
return false
}
@ -94,17 +78,13 @@ func (c *configMock) NamespaceHeader() string {
return ""
}
func (c *configMock) EnableFilepathFallback() bool {
return c.additionalSearch
}
type handlerContext struct {
key *keys.PrivateKey
owner user.ID
h *Handler
frostfs *TestFrostFS
tree *treeServiceMock
tree *treeClientMock
cfg *configMock
}
@ -142,17 +122,17 @@ func prepareHandlerContext() (*handlerContext, error) {
Size: 1,
Lifetime: 1,
Logger: logger,
}, false),
}),
}
treeMock := newTreeService()
treeMock := &treeClientMock{}
cfgMock := &configMock{}
workerPool, err := ants.NewPool(1)
workerPool, err := ants.NewPool(1000)
if err != nil {
return nil, err
}
handler := New(params, cfgMock, treeMock, workerPool)
handler := New(params, cfgMock, tree.NewTree(treeMock), workerPool)
return &handlerContext{
key: key,
@ -219,8 +199,10 @@ func TestBasic(t *testing.T) {
require.NoError(t, err)
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
attr := prepareObjectAttributes(object.AttributeFilePath, objFileName)
obj.SetAttributes(append(obj.Attributes(), attr)...)
attr := object.NewAttribute()
attr.SetKey(object.AttributeFilePath)
attr.SetValue(objFileName)
obj.SetAttributes(append(obj.Attributes(), *attr)...)
t.Run("get", func(t *testing.T) {
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
@ -239,10 +221,6 @@ func TestBasic(t *testing.T) {
r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr)
hc.Handler().DownloadByAttribute(r)
require.Equal(t, content, string(r.Response.Body()))
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, "/"+objFileName)
hc.Handler().DownloadByAttribute(r)
require.Equal(t, content, string(r.Response.Body()))
})
t.Run("head by attribute", func(t *testing.T) {
@ -250,16 +228,11 @@ func TestBasic(t *testing.T) {
hc.Handler().HeadByAttribute(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, "/"+objFileName)
hc.Handler().HeadByAttribute(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
})
t.Run("zip", func(t *testing.T) {
r = prepareGetZipped(ctx, bktName, "")
hc.Handler().DownloadZip(r)
hc.Handler().DownloadZipped(r)
readerAt := bytes.NewReader(r.Response.Body())
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
@ -278,178 +251,6 @@ func TestBasic(t *testing.T) {
})
}
func TestFindObjectByAttribute(t *testing.T) {
hc, err := prepareHandlerContext()
require.NoError(t, err)
hc.cfg.additionalSearch = true
bktName := "bucket"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
ctx := context.Background()
ctx = middleware.SetNamespace(ctx, "")
content := "hello"
r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content)
require.NoError(t, err)
hc.Handler().Upload(r)
require.Equal(t, r.Response.StatusCode(), http.StatusOK)
var putRes putResponse
err = json.Unmarshal(r.Response.Body(), &putRes)
require.NoError(t, err)
testAttrVal1 := "/folder/cat.jpg"
testAttrVal2 := "cat.jpg"
testAttrVal3 := "test-attr-val3"
for _, tc := range []struct {
name string
firstAttr object.Attribute
secondAttr object.Attribute
reqAttrKey string
reqAttrValue string
err string
additionalSearch bool
}{
{
name: "success search by FileName",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFileName,
reqAttrValue: testAttrVal2,
additionalSearch: false,
},
{
name: "failed search by FileName",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFileName,
reqAttrValue: testAttrVal3,
err: "not found",
additionalSearch: false,
},
{
name: "success search by FilePath (with additional search)",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFilePath,
reqAttrValue: testAttrVal2,
additionalSearch: true,
},
{
name: "failed by FilePath (with additional search)",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFilePath,
reqAttrValue: testAttrVal3,
err: "not found",
additionalSearch: true,
},
{
name: "success search by FilePath with leading slash (with additional search)",
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
reqAttrKey: attrFilePath,
reqAttrValue: "/cat.jpg",
additionalSearch: true,
},
} {
t.Run(tc.name, func(t *testing.T) {
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
obj.SetAttributes(tc.firstAttr, tc.secondAttr)
hc.cfg.additionalSearch = tc.additionalSearch
objID, err := hc.Handler().findObjectByAttribute(ctx, hc.Handler().log, cnrID, tc.reqAttrKey, tc.reqAttrValue)
if tc.err != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.err)
return
}
require.NoError(t, err)
require.Equal(t, putRes.ObjectID, objID.EncodeToString())
})
}
}
func TestNeedSearchByFileName(t *testing.T) {
hc, err := prepareHandlerContext()
require.NoError(t, err)
for _, tc := range []struct {
name string
attrKey string
attrVal string
additionalSearch bool
expected bool
}{
{
name: "need search - not contains slash",
attrKey: attrFilePath,
attrVal: "cat.png",
additionalSearch: true,
expected: true,
},
{
name: "need search - single lead slash",
attrKey: attrFilePath,
attrVal: "/cat.png",
additionalSearch: true,
expected: true,
},
{
name: "don't need search - single slash but not lead",
attrKey: attrFilePath,
attrVal: "cats/cat.png",
additionalSearch: true,
expected: false,
},
{
name: "don't need search - more one slash",
attrKey: attrFilePath,
attrVal: "/cats/cat.png",
additionalSearch: true,
expected: false,
},
{
name: "don't need search - incorrect attribute key",
attrKey: attrFileName,
attrVal: "cat.png",
additionalSearch: true,
expected: false,
},
{
name: "don't need search - additional search disabled",
attrKey: attrFilePath,
attrVal: "cat.png",
additionalSearch: false,
expected: false,
},
} {
t.Run(tc.name, func(t *testing.T) {
hc.cfg.additionalSearch = tc.additionalSearch
res := hc.h.needSearchByFileName(tc.attrKey, tc.attrVal)
require.Equal(t, tc.expected, res)
})
}
}
func TestPrepareFileName(t *testing.T) {
fileName := "/cat.jpg"
expected := "cat.jpg"
actual := prepareFileName(fileName)
require.Equal(t, expected, actual)
fileName = "cat.jpg"
actual = prepareFileName(fileName)
require.Equal(t, expected, actual)
}
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
@ -482,13 +283,6 @@ func prepareGetZipped(ctx context.Context, bucket, prefix string) *fasthttp.Requ
return r
}
func prepareObjectAttributes(attrKey, attrValue string) object.Attribute {
attr := object.NewAttribute()
attr.SetKey(attrKey)
attr.SetValue(attrValue)
return *attr
}
const (
keyAttr = "User-Attribute"
valAttr = "user value"

View file

@ -2,16 +2,13 @@ package handler
import (
"context"
"errors"
"io"
"net/http"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/valyala/fasthttp"
@ -46,11 +43,7 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
}
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
var (
contentType string
filename string
filepath string
)
var contentType string
for _, attr := range obj.Attributes() {
key := attr.Key()
val := attr.Value()
@ -68,22 +61,14 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
req.log.Info(logs.CouldntParseCreationDate,
zap.String("key", key),
zap.String("val", val),
zap.Error(err),
logs.TagField(logs.TagDatapath))
zap.Error(err))
continue
}
req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
case object.AttributeContentType:
contentType = val
case object.AttributeFilePath:
filepath = val
case object.AttributeFileName:
filename = val
}
}
if filename == "" {
filename = filepath
}
idsToResponse(&req.Response, obj)
@ -98,7 +83,7 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
}
return h.frostfs.RangeObject(ctx, prmRange)
}, filename)
})
if err != nil && err != io.EOF {
req.handleFrostFSErr(err, start)
return
@ -117,47 +102,18 @@ func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.HeadByAddressOrBucketName")
defer span.End()
test, _ := c.UserValue("oid").(string)
var id oid.ID
cidParam, _ := c.UserValue("cid").(string)
oidParam, _ := c.UserValue("oid").(string)
log := utils.GetReqLogOrDefault(ctx, h.log).With(
zap.String("cid", cidParam),
zap.String("oid", oidParam),
)
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
err := id.DecodeString(test)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()),
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
logAndSendBucketError(c, log, checkS3Err)
return
}
req := newRequest(c, log)
var objID oid.ID
if checkS3Err == nil {
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.headObject)
} else if err = objID.DecodeString(oidParam); err == nil {
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.headObject)
h.byS3Path(c, h.headObject)
} else {
logAndSendBucketError(c, log, checkS3Err)
h.byNativeAddress(c, h.headObject)
}
}
// HeadByAttribute handles attribute-based head requests.
func (h *Handler) HeadByAttribute(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.HeadByAttribute")
defer span.End()
utils.SetContextToRequest(ctx, c)
h.byAttribute(c, h.headObject)
}

View file

@ -33,7 +33,7 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
name := part.FormName()
if name == "" {
l.Debug(logs.IgnorePartEmptyFormName, logs.TagField(logs.TagDatapath))
l.Debug(logs.IgnorePartEmptyFormName)
continue
}
@ -41,10 +41,8 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
// ignore multipart/form-data values
if filename == "" {
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name), logs.TagField(logs.TagDatapath))
if err = part.Close(); err != nil {
l.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
}
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
continue
}

View file

@ -112,7 +112,7 @@ func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (Mul
name := part.FormName()
if name == "" {
l.Debug(logs.IgnorePartEmptyFormName, logs.TagField(logs.TagDatapath))
l.Debug(logs.IgnorePartEmptyFormName)
continue
}
@ -120,7 +120,8 @@ func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (Mul
// ignore multipart/form-data values
if filename == "" {
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name), logs.TagField(logs.TagDatapath))
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
continue
}

View file

@ -4,14 +4,13 @@ import (
"bytes"
"context"
"io"
"mime"
"net/http"
"path"
"strconv"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -26,7 +25,7 @@ type readCloser struct {
// initializes io.Reader with the limited size and detects Content-Type from it.
// Returns r's error directly. Also returns the processed data.
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error), filename string) (string, []byte, error) {
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (string, []byte, error) {
if maxSize > sizeToDetectType {
maxSize = sizeToDetectType
}
@ -45,20 +44,7 @@ func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error), file
buf = buf[:n]
contentType := http.DetectContentType(buf)
// Since the detector detects the "text/plain" content type for various types of text files,
// including CSS, JavaScript, and CSV files,
// we'll determine the final content type based on the file's extension.
if strings.HasPrefix(contentType, "text/plain") {
ext := path.Ext(filename)
// If the file doesn't have a file extension, we'll keep the content type as is.
if len(ext) > 0 {
contentType = mime.TypeByExtension(ext)
}
}
return contentType, buf, err // to not lose io.EOF
return http.DetectContentType(buf), buf, err // to not lose io.EOF
}
type getMultiobjectBodyParams struct {
@ -110,8 +96,7 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
if err = req.setTimestamp(val); err != nil {
req.log.Error(logs.CouldntParseCreationDate,
zap.String("val", val),
zap.Error(err),
logs.TagField(logs.TagDatapath))
zap.Error(err))
}
case object.AttributeContentType:
contentType = val
@ -143,10 +128,10 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
return payload, nil
}, filename)
})
if err != nil && err != io.EOF {
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err))
response.Error(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
return
}

View file

@ -10,80 +10,39 @@ import (
"github.com/stretchr/testify/require"
)
const (
txtContentType = "text/plain; charset=utf-8"
cssContentType = "text/css; charset=utf-8"
htmlContentType = "text/html; charset=utf-8"
javascriptContentType = "text/javascript; charset=utf-8"
htmlBody = "<!DOCTYPE html><html ><head><meta charset=\"utf-8\"><title>Test Html</title>"
)
func TestDetector(t *testing.T) {
txtContentType := "text/plain; charset=utf-8"
sb := strings.Builder{}
for i := 0; i < 10; i++ {
sb.WriteString("Some txt content. Content-Type must be detected properly by detector.")
}
for _, tc := range []struct {
Name string
ExpectedContentType string
Content string
FileName string
Name string
ContentType string
Expected string
}{
{
Name: "less than 512b",
ExpectedContentType: txtContentType,
Content: sb.String()[:256],
FileName: "test.txt",
Name: "less than 512b",
ContentType: txtContentType,
Expected: sb.String()[:256],
},
{
Name: "more than 512b",
ExpectedContentType: txtContentType,
Content: sb.String(),
FileName: "test.txt",
},
{
Name: "css content type",
ExpectedContentType: cssContentType,
Content: sb.String(),
FileName: "test.css",
},
{
Name: "javascript content type",
ExpectedContentType: javascriptContentType,
Content: sb.String(),
FileName: "test.js",
},
{
Name: "html content type by file content",
ExpectedContentType: htmlContentType,
Content: htmlBody,
FileName: "test.detect-by-content",
},
{
Name: "html content type by file extension",
ExpectedContentType: htmlContentType,
Content: sb.String(),
FileName: "test.html",
},
{
Name: "empty file extension",
ExpectedContentType: txtContentType,
Content: sb.String(),
FileName: "test",
Name: "more than 512b",
ContentType: txtContentType,
Expected: sb.String(),
},
} {
t.Run(tc.Name, func(t *testing.T) {
contentType, data, err := readContentType(uint64(len(tc.Content)),
contentType, data, err := readContentType(uint64(len(tc.Expected)),
func(uint64) (io.Reader, error) {
return strings.NewReader(tc.Content), nil
}, tc.FileName,
return strings.NewReader(tc.Expected), nil
},
)
require.NoError(t, err)
require.Equal(t, tc.ExpectedContentType, contentType)
require.True(t, strings.HasPrefix(tc.Content, string(data)))
require.Equal(t, tc.ContentType, contentType)
require.True(t, strings.HasPrefix(tc.Expected, string(data)))
})
}
}

View file

@ -1,23 +1,17 @@
package handler
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"path/filepath"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -26,9 +20,8 @@ import (
)
const (
jsonHeader = "application/json; charset=UTF-8"
drainBufSize = 4096
explodeArchiveHeader = "X-Explode-Archive"
jsonHeader = "application/json; charset=UTF-8"
drainBufSize = 4096
)
type putResponse struct {
@ -51,16 +44,17 @@ func (pr *putResponse) encode(w io.Writer) error {
// Upload handles multipart upload request.
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.Upload")
defer span.End()
utils.SetContextToRequest(ctx, c)
var file MultipartFile
var (
file MultipartFile
idObj oid.ID
addr oid.Address
)
scid, _ := c.UserValue("cid").(string)
bodyStream := c.RequestBodyStream()
drainBuf := make([]byte, drainBufSize)
ctx := utils.GetContextFromRequest(c)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("cid", scid))
@ -70,84 +64,76 @@ func (h *Handler) Upload(c *fasthttp.RequestCtx) {
return
}
defer func() {
// If the temporary reader can be closed - let's close it.
if file == nil {
return
}
err := file.Close()
log.Debug(
logs.CloseTemporaryMultipartFormFile,
zap.Stringer("address", addr),
zap.String("filename", file.FileName()),
zap.Error(err),
)
}()
boundary := string(c.Request.Header.MultipartFormBoundary())
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
response.Error(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
return
}
filtered, err := filterHeaders(log, &c.Request.Header)
if err != nil {
log.Error(logs.FailedToFilterHeaders, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
log.Error(logs.CouldNotProcessHeaders, zap.Error(err))
response.Error(c, err.Error(), fasthttp.StatusBadRequest)
return
}
if c.Request.Header.Peek(explodeArchiveHeader) != nil {
h.explodeArchive(request{c, log}, bktInfo, file, filtered)
} else {
h.uploadSingleObject(request{c, log}, bktInfo, file, filtered)
}
// Multipart is multipart and thus can contain more than one part which
// we ignore at the moment. Also, when dealing with chunked encoding
// the last zero-length chunk might be left unread (because multipart
// reader only cares about its boundary and doesn't look further) and
// it will be (erroneously) interpreted as the start of the next
// pipelined header. Thus, we need to drain the body buffer.
for {
_, err = bodyStream.Read(drainBuf)
if err == io.EOF || errors.Is(err, io.ErrUnexpectedEOF) {
break
now := time.Now()
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
} else {
now = parsed
}
}
}
func (h *Handler) uploadSingleObject(req request, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) {
c, log := req.RequestCtx, req.log
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.uploadSingleObject")
defer span.End()
utils.SetContextToRequest(ctx, c)
setIfNotExist(filtered, object.AttributeFileName, file.FileName())
attributes, err := h.extractAttributes(c, log, filtered)
if err != nil {
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
if err = utils.PrepareExpirationHeader(c, h.frostfs, filtered, now); err != nil {
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
response.Error(c, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
return
}
idObj, err := h.uploadObject(c, bkt, attributes, file)
if err != nil {
h.handlePutFrostFSErr(c, err, log)
return
attributes := make([]object.Attribute, 0, len(filtered))
// prepares attributes from filtered headers
for key, val := range filtered {
attribute := object.NewAttribute()
attribute.SetKey(key)
attribute.SetValue(val)
attributes = append(attributes, *attribute)
}
log.Debug(logs.ObjectUploaded,
zap.String("oid", idObj.EncodeToString()),
zap.String("FileName", file.FileName()),
logs.TagField(logs.TagExternalStorage),
)
addr := newAddress(bkt.CID, idObj)
c.Response.Header.SetContentType(jsonHeader)
// Try to return the response, otherwise, if something went wrong, throw an error.
if err = newPutResponse(addr).encode(c); err != nil {
log.Error(logs.CouldNotEncodeResponse, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not encode response", fasthttp.StatusBadRequest)
return
// sets FileName attribute if it wasn't set from header
if _, ok := filtered[object.AttributeFileName]; !ok {
filename := object.NewAttribute()
filename.SetKey(object.AttributeFileName)
filename.SetValue(file.FileName())
attributes = append(attributes, *filename)
}
// sets Timestamp attribute if it wasn't set from header and enabled by settings
if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() {
timestamp := object.NewAttribute()
timestamp.SetKey(object.AttributeTimestamp)
timestamp.SetValue(strconv.FormatInt(time.Now().Unix(), 10))
attributes = append(attributes, *timestamp)
}
}
func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) {
ctx := utils.GetContextFromRequest(c)
obj := object.New()
obj.SetContainerID(bkt.CID)
obj.SetContainerID(bktInfo.CID)
obj.SetOwnerID(*h.ownerID)
obj.SetAttributes(attrs...)
obj.SetAttributes(attributes...)
prm := PrmObjectCreate{
PrmAuth: PrmAuth{
@ -156,135 +142,48 @@ func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, att
Object: obj,
Payload: file,
ClientCut: h.config.ClientCut(),
WithoutHomomorphicHash: bkt.HomomorphicHashDisabled,
WithoutHomomorphicHash: bktInfo.HomomorphicHashDisabled,
BufferMaxSize: h.config.BufferMaxSizeForPut(),
}
idObj, err := h.frostfs.CreateObject(ctx, prm)
if err != nil {
return oid.ID{}, err
}
return idObj, nil
}
func (h *Handler) extractAttributes(c *fasthttp.RequestCtx, log *zap.Logger, filtered map[string]string) ([]object.Attribute, error) {
ctx := utils.GetContextFromRequest(c)
now := time.Now()
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err),
logs.TagField(logs.TagDatapath))
} else {
now = parsed
}
}
if err := utils.PrepareExpirationHeader(ctx, h.frostfs, filtered, now); err != nil {
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err), logs.TagField(logs.TagDatapath))
return nil, err
}
attributes := make([]object.Attribute, 0, len(filtered))
// prepares attributes from filtered headers
for key, val := range filtered {
attribute := newAttribute(key, val)
attributes = append(attributes, attribute)
}
// sets Timestamp attribute if it wasn't set from header and enabled by settings
if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() {
timestamp := newAttribute(object.AttributeTimestamp, strconv.FormatInt(time.Now().Unix(), 10))
attributes = append(attributes, timestamp)
}
return attributes, nil
}
func newAttribute(key string, val string) object.Attribute {
attr := object.NewAttribute()
attr.SetKey(key)
attr.SetValue(val)
return *attr
}
// explodeArchive read files from archive and creates objects for each of them.
// Sets FilePath attribute with name from tar.Header.
func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) {
c, log := req.RequestCtx, req.log
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.explodeArchive")
defer span.End()
utils.SetContextToRequest(ctx, c)
// remove user attributes which vary for each file in archive
// to guarantee that they won't appear twice
delete(filtered, object.AttributeFileName)
delete(filtered, object.AttributeFilePath)
commonAttributes, err := h.extractAttributes(c, log, filtered)
if err != nil {
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
if idObj, err = h.frostfs.CreateObject(ctx, prm); err != nil {
h.handlePutFrostFSErr(c, err, log)
return
}
attributes := commonAttributes
reader := file
if bytes.EqualFold(c.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
log.Debug(logs.GzipReaderSelected, logs.TagField(logs.TagDatapath))
gzipReader, err := gzip.NewReader(file)
if err != nil {
log.Error(logs.FailedToCreateGzipReader, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could read gzip file: "+err.Error(), fasthttp.StatusBadRequest)
return
}
defer func() {
if err := gzipReader.Close(); err != nil {
log.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
}
}()
reader = gzipReader
addr.SetObject(idObj)
addr.SetContainer(bktInfo.CID)
// Try to return the response, otherwise, if something went wrong, throw an error.
if err = newPutResponse(addr).encode(c); err != nil {
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
response.Error(c, "could not encode response", fasthttp.StatusBadRequest)
return
}
tarReader := tar.NewReader(reader)
// Multipart is multipart and thus can contain more than one part which
// we ignore at the moment. Also, when dealing with chunked encoding
// the last zero-length chunk might be left unread (because multipart
// reader only cares about its boundary and doesn't look further) and
// it will be (erroneously) interpreted as the start of the next
// pipelined header. Thus we need to drain the body buffer.
for {
obj, err := tarReader.Next()
if errors.Is(err, io.EOF) {
_, err = bodyStream.Read(drainBuf)
if err == io.EOF || err == io.ErrUnexpectedEOF {
break
} else if err != nil {
log.Error(logs.FailedToReadFileFromTar, zap.Error(err), logs.TagField(logs.TagDatapath))
ResponseError(c, "could not get next entry: "+err.Error(), fasthttp.StatusBadRequest)
return
}
if isDir(obj.Name) {
continue
}
// set varying attributes
attributes = attributes[:len(commonAttributes)]
fileName := filepath.Base(obj.Name)
attributes = append(attributes, newAttribute(object.AttributeFilePath, obj.Name))
attributes = append(attributes, newAttribute(object.AttributeFileName, fileName))
idObj, err := h.uploadObject(c, bkt, attributes, tarReader)
if err != nil {
h.handlePutFrostFSErr(c, err, log)
return
}
log.Debug(logs.ObjectUploaded,
zap.String("oid", idObj.EncodeToString()),
zap.String("FileName", fileName),
logs.TagField(logs.TagExternalStorage),
)
}
// Report status code and content type.
c.Response.SetStatusCode(fasthttp.StatusOK)
c.Response.Header.SetContentType(jsonHeader)
}
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *zap.Logger) {
statusCode, msg, additionalFields := formErrorResponse("could not store file in frostfs", err)
statusCode, msg, additionalFields := response.FormErrorResponse("could not store file in frostfs", err)
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
log.Error(logs.CouldNotStoreFileInFrostfs, append(logFields, logs.TagField(logs.TagExternalStorage))...)
ResponseError(r, msg, statusCode)
log.Error(logs.CouldNotStoreFileInFrostfs, logFields...)
response.Error(r, msg, statusCode)
}
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {

View file

@ -2,16 +2,14 @@ package handler
import (
"context"
"errors"
"fmt"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -24,23 +22,16 @@ type request struct {
log *zap.Logger
}
func newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) request {
return request{
RequestCtx: ctx,
log: log,
}
}
func (r *request) handleFrostFSErr(err error, start time.Time) {
logFields := []zap.Field{
zap.Stringer("elapsed", time.Since(start)),
zap.Error(err),
}
statusCode, msg, additionalFields := formErrorResponse("could not receive object", err)
statusCode, msg, additionalFields := response.FormErrorResponse("could not receive object", err)
logFields = append(logFields, additionalFields...)
r.log.Error(logs.CouldNotReceiveObject, append(logFields, logs.TagField(logs.TagExternalStorage))...)
ResponseError(r.RequestCtx, msg, statusCode)
r.log.Error(logs.CouldNotReceiveObject, logFields...)
response.Error(r.RequestCtx, msg, statusCode)
}
func bearerToken(ctx context.Context) *bearer.Token {
@ -51,7 +42,16 @@ func bearerToken(ctx context.Context) *bearer.Token {
}
func isDir(name string) bool {
return name == "" || strings.HasSuffix(name, "/")
return strings.HasSuffix(name, "/")
}
func isObjectID(s string) bool {
var objID oid.ID
return objID.DecodeString(s) == nil
}
func isContainerRoot(key string) bool {
return key == ""
}
func loadAttributes(attrs []object.Attribute) map[string]string {
@ -85,13 +85,13 @@ func isValidValue(s string) bool {
}
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
log.Error(logs.CouldNotGetBucket, zap.Error(err), logs.TagField(logs.TagDatapath))
log.Error(logs.CouldntGetBucket, zap.Error(err))
if client.IsErrContainerNotFound(err) {
ResponseError(c, "Not Found", fasthttp.StatusNotFound)
response.Error(c, "Not Found", fasthttp.StatusNotFound)
return
}
ResponseError(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
response.Error(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
}
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
@ -100,43 +100,3 @@ func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
addr.SetObject(obj)
return addr
}
// setIfNotExist sets key value to map if key is not present yet.
func setIfNotExist(m map[string]string, key, value string) {
if _, ok := m[key]; !ok {
m[key] = value
}
}
func ResponseError(r *fasthttp.RequestCtx, msg string, code int) {
r.Error(msg+"\n", code)
}
func formErrorResponse(message string, err error) (int, string, []zap.Field) {
var (
msg string
statusCode int
logFields []zap.Field
)
st := new(sdkstatus.ObjectAccessDenied)
switch {
case errors.As(err, &st):
statusCode = fasthttp.StatusForbidden
reason := st.Reason()
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
logFields = append(logFields, zap.String("error_detail", reason))
case errors.Is(err, ErrQuotaLimitReached):
statusCode = fasthttp.StatusConflict
msg = fmt.Sprintf("%s: %v", message, err)
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
statusCode = fasthttp.StatusNotFound
msg = "Not Found"
default:
statusCode = fasthttp.StatusBadRequest
msg = fmt.Sprintf("%s: %v", message, err)
}
return statusCode, msg, logFields
}

View file

@ -1,131 +1,90 @@
package logs
import "go.uber.org/zap"
const (
TagFieldName = "tag"
TagApp = "app"
TagDatapath = "datapath"
TagExternalStorage = "external_storage"
TagExternalStorageTree = "external_storage_tree"
)
func TagField(tag string) zap.Field {
return zap.String(TagFieldName, tag)
}
// Log messages with the "app" tag.
const (
ServiceIsRunning = "service is running"
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port"
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled"
ShuttingDownService = "shutting down service"
CantShutDownService = "can't shut down service"
CantGracefullyShutDownService = "can't gracefully shut down service, force stop"
FailedToCreateResolver = "failed to create resolver"
FailedToCreateWorkerPool = "failed to create worker pool"
StartingApplication = "starting application"
StartingServer = "starting server"
ListenAndServe = "listen and serve"
ShuttingDownWebServer = "shutting down web server"
FailedToShutdownTracing = "failed to shutdown tracing"
AddedPathUploadCid = "added path /upload/{cid}"
AddedPathGetCidOid = "added path /get/{cid}/{oid}"
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}"
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}"
FailedToAddServer = "failed to add server"
AddServer = "add server"
NoHealthyServers = "no healthy servers"
FailedToInitializeTracing = "failed to initialize tracing"
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key"
UsingCredentials = "using credentials"
FailedToCreateConnectionPool = "failed to create connection pool"
FailedToDialConnectionPool = "failed to dial connection pool"
FailedToCreateTreePool = "failed to create tree pool"
FailedToDialTreePool = "failed to dial tree pool"
CouldntParseCreationDate = "couldn't parse creation date" // Info in ../../downloader/*
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload" // Error in ../../downloader/download.go
CouldNotReceiveObject = "could not receive object" // Error in ../../downloader/download.go
WrongObjectID = "wrong object id" // Error in ../../downloader/download.go
GetLatestObjectVersion = "get latest object version" // Error in ../../downloader/download.go
ObjectWasDeleted = "object was deleted" // Error in ../../downloader/download.go
CouldNotSearchForObjects = "could not search for objects" // Error in ../../downloader/download.go
ObjectNotFound = "object not found" // Error in ../../downloader/download.go
ReadObjectListFailed = "read object list failed" // Error in ../../downloader/download.go
FailedToAddObjectToArchive = "failed to add object to archive" // Error in ../../downloader/download.go
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed" // Error in ../../downloader/download.go
ObjectsNotFound = "objects not found" // Error in ../../downloader/download.go
CloseZipWriter = "close zip writer" // Error in ../../downloader/download.go
ServiceIsRunning = "service is running" // Info in ../../metrics/service.go
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port" // Warn in ../../metrics/service.go
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" // Info in ../../metrics/service.go
ShuttingDownService = "shutting down service" // Info in ../../metrics/service.go
CantShutDownService = "can't shut down service" // Panic in ../../metrics/service.go
CantGracefullyShutDownService = "can't gracefully shut down service, force stop" // Error in ../../metrics/service.go
IgnorePartEmptyFormName = "ignore part, empty form name" // Debug in ../../uploader/upload.go
IgnorePartEmptyFilename = "ignore part, empty filename" // Debug in ../../uploader/upload.go
CloseTemporaryMultipartFormFile = "close temporary multipart/form file" // Debug in ../../uploader/upload.go
CouldNotReceiveMultipartForm = "could not receive multipart/form" // Error in ../../uploader/upload.go
CouldNotProcessHeaders = "could not process headers" // Error in ../../uploader/upload.go
CouldNotParseClientTime = "could not parse client time" // Warn in ../../uploader/upload.go
CouldNotPrepareExpirationHeader = "could not prepare expiration header" // Error in ../../uploader/upload.go
CouldNotEncodeResponse = "could not encode response" // Error in ../../uploader/upload.go
CouldNotStoreFileInFrostfs = "could not store file in frostfs" // Error in ../../uploader/upload.go
AddAttributeToResultObject = "add attribute to result object" // Debug in ../../uploader/filter.go
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../app.go
FailedToCreateWorkerPool = "failed to create worker pool" // Fatal in ../../app.go
FailedToReadIndexPageTemplate = "failed to read index page template" // Error in ../../app.go
SetCustomIndexPageTemplate = "set custom index page template" // Info in ../../app.go
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty" // Info in ../../app.go
MetricsAreDisabled = "metrics are disabled" // Warn in ../../app.go
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run" // Info in ../../app.go
StartingApplication = "starting application" // Info in ../../app.go
StartingServer = "starting server" // Info in ../../app.go
ListenAndServe = "listen and serve" // Fatal in ../../app.go
ShuttingDownWebServer = "shutting down web server" // Info in ../../app.go
FailedToShutdownTracing = "failed to shutdown tracing" // Warn in ../../app.go
SIGHUPConfigReloadStarted = "SIGHUP config reload started" // Info in ../../app.go
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed" // Warn in ../../app.go
FailedToReloadConfig = "failed to reload config" // Warn in ../../app.go
LogLevelWontBeUpdated = "log level won't be updated" // Warn in ../../app.go
FailedToUpdateResolvers = "failed to update resolvers" // Warn in ../../app.go
FailedToReloadServerParameters = "failed to reload server parameters" // Warn in ../../app.go
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed" // Info in ../../app.go
AddedPathUploadCid = "added path /upload/{cid}" // Info in ../../app.go
AddedPathGetCidOid = "added path /get/{cid}/{oid}" // Info in ../../app.go
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}" // Info in ../../app.go
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}" // Info in ../../app.go
Request = "request" // Info in ../../app.go
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token" // Error in ../../app.go
FailedToAddServer = "failed to add server" // Warn in ../../app.go
AddServer = "add server" // Info in ../../app.go
NoHealthyServers = "no healthy servers" // Fatal in ../../app.go
FailedToInitializeTracing = "failed to initialize tracing" // Warn in ../../app.go
TracingConfigUpdated = "tracing config updated" // Info in ../../app.go
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided" // Warn in ../../app.go
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../app.go
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../app.go
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" // Fatal in ../../settings.go
UsingCredentials = "using credentials" // Info in ../../settings.go
FailedToCreateConnectionPool = "failed to create connection pool" // Fatal in ../../settings.go
FailedToDialConnectionPool = "failed to dial connection pool" // Fatal in ../../settings.go
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../settings.go
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../settings.go
AddedStoragePeer = "added storage peer" // Info in ../../settings.go
CouldntGetBucket = "could not get bucket" // Error in ../handler/utils.go
CouldntPutBucketIntoCache = "couldn't put bucket info into cache" // Warn in ../handler/handler.go
FailedToSumbitTaskToPool = "failed to submit task to pool" // Error in ../handler/browse.go
FailedToHeadObject = "failed to head object" // Error in ../handler/browse.go
FailedToIterateOverResponse = "failed to iterate over search response" // Error in ../handler/browse.go
InvalidCacheEntryType = "invalid cache entry type" // Warn in ../cache/buckets.go
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" // Error in ../../cmd/http-gw/settings.go
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" // Error in ../../cmd/http-gw/settings.go
FailedToUnescapeQuery = "failed to unescape query"
ServerReconnecting = "reconnecting server..."
ServerReconnectedSuccessfully = "server reconnected successfully"
ServerReconnectFailed = "failed to reconnect server"
WarnDuplicateAddress = "duplicate address"
MultinetDialSuccess = "multinet dial successful"
MultinetDialFail = "multinet dial failed"
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty"
MetricsAreDisabled = "metrics are disabled"
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run"
SIGHUPConfigReloadStarted = "SIGHUP config reload started"
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed"
FailedToReloadConfig = "failed to reload config"
FailedToUpdateResolvers = "failed to update resolvers"
FailedToReloadServerParameters = "failed to reload server parameters"
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed"
TracingConfigUpdated = "tracing config updated"
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided"
AddedStoragePeer = "added storage peer"
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)"
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value"
WarnDuplicateAddress = "duplicate address"
FailedToLoadMultinetConfig = "failed to load multinet config"
MultinetConfigWontBeUpdated = "multinet config won't be updated"
LogLevelWontBeUpdated = "log level won't be updated"
TagsLogConfigWontBeUpdated = "tags log config won't be updated"
FailedToReadIndexPageTemplate = "failed to read index page template"
SetCustomIndexPageTemplate = "set custom index page template"
)
// Log messages with the "datapath" tag.
const (
CouldntParseCreationDate = "couldn't parse creation date"
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload"
FailedToAddObjectToArchive = "failed to add object to archive"
CloseZipWriter = "close zip writer"
IgnorePartEmptyFormName = "ignore part, empty form name"
IgnorePartEmptyFilename = "ignore part, empty filename"
CouldNotParseClientTime = "could not parse client time"
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
CouldNotEncodeResponse = "could not encode response"
AddAttributeToResultObject = "add attribute to result object"
Request = "request"
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
FailedToIterateOverResponse = "failed to iterate over search response"
InvalidCacheEntryType = "invalid cache entry type"
FailedToUnescapeQuery = "failed to unescape query"
CouldntCacheNetmap = "couldn't cache netmap"
FailedToCloseReader = "failed to close reader"
FailedToFilterHeaders = "failed to filter headers"
FailedToReadFileFromTar = "failed to read file from tar"
FailedToGetAttributes = "failed to get attributes"
CloseGzipWriter = "close gzip writer"
CloseTarWriter = "close tar writer"
FailedToCreateGzipReader = "failed to create gzip reader"
GzipReaderSelected = "gzip reader selected"
CouldNotReceiveMultipartForm = "could not receive multipart/form"
ObjectsNotFound = "objects not found"
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
CouldNotGetBucket = "could not get bucket"
CouldNotResolveContainerID = "could not resolve container id"
FailedToSumbitTaskToPool = "failed to submit task to pool"
)
// Log messages with the "external_storage" tag.
const (
CouldNotReceiveObject = "could not receive object"
CouldNotSearchForObjects = "could not search for objects"
ObjectNotFound = "object not found"
ReadObjectListFailed = "read object list failed"
CouldNotStoreFileInFrostfs = "could not store file in frostfs"
FailedToHeadObject = "failed to head object"
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
FailedToGetObject = "failed to get object"
ObjectUploaded = "object uploaded"
CouldNotGetContainerInfo = "could not get container info"
)
// Log messages with the "external_storage_tree" tag.
const (
ObjectWasDeleted = "object was deleted"
FailedToGetLatestVersionOfObject = "failed to get latest version of object"
FailedToCheckIfSettingsNodeExist = "Failed to check if settings node exists"
)

View file

@ -17,11 +17,9 @@ func (l LogEventHandler) DialPerformed(sourceIP net.Addr, _, address string, err
sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
}
if err == nil {
l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString),
zap.String("destination", address), logs.TagField(logs.TagApp))
l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString), zap.String("destination", address))
} else {
l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString),
zap.String("destination", address), logs.TagField(logs.TagApp))
l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString), zap.String("destination", address), zap.Error(err))
}
}

View file

@ -9,10 +9,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
@ -36,9 +34,6 @@ func NewFrostFS(p *pool.Pool) *FrostFS {
// Container implements frostfs.FrostFS interface method.
func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContainer) (*container.Container, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.Container")
defer span.End()
prm := pool.PrmContainerGet{
ContainerID: containerPrm.ContainerID,
}
@ -53,9 +48,6 @@ func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContain
// CreateObject implements frostfs.FrostFS interface method.
func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate) (oid.ID, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.CreateObject")
defer span.End()
var prmPut pool.PrmObjectPut
prmPut.SetHeader(*prm.Object)
prmPut.SetPayload(prm.Payload)
@ -90,9 +82,6 @@ func (x payloadReader) Read(p []byte) (int, error) {
// HeadObject implements frostfs.FrostFS interface method.
func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*object.Object, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.HeadObject")
defer span.End()
var prmHead pool.PrmObjectHead
prmHead.SetAddress(prm.Address)
@ -110,9 +99,6 @@ func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*o
// GetObject implements frostfs.FrostFS interface method.
func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*handler.Object, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetObject")
defer span.End()
var prmGet pool.PrmObjectGet
prmGet.SetAddress(prm.Address)
@ -133,9 +119,6 @@ func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*han
// RangeObject implements frostfs.FrostFS interface method.
func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (io.ReadCloser, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.RangeObject")
defer span.End()
var prmRange pool.PrmObjectRange
prmRange.SetAddress(prm.Address)
prmRange.SetOffset(prm.PayloadRange[0])
@ -155,9 +138,6 @@ func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (
// SearchObjects implements frostfs.FrostFS interface method.
func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch) (handler.ResObjectSearch, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.SearchObjects")
defer span.End()
var prmSearch pool.PrmObjectSearch
prmSearch.SetContainerID(prm.Container)
prmSearch.SetFilters(prm.Filters)
@ -176,9 +156,6 @@ func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch
// GetEpochDurations implements frostfs.FrostFS interface method.
func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetEpochDurations")
defer span.End()
networkInfo, err := x.pool.NetworkInfo(ctx)
if err != nil {
return nil, err
@ -196,18 +173,6 @@ func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations,
return res, nil
}
func (x *FrostFS) NetmapSnapshot(ctx context.Context) (netmap.NetMap, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.NetmapSnapshot")
defer span.End()
netmapSnapshot, err := x.pool.NetMapSnapshot(ctx)
if err != nil {
return netmapSnapshot, handleObjectError("get netmap via connection pool", err)
}
return netmapSnapshot, nil
}
// ResolverFrostFS represents virtual connection to the FrostFS network.
// It implements resolver.FrostFS.
type ResolverFrostFS struct {
@ -221,9 +186,6 @@ func NewResolverFrostFS(p *pool.Pool) *ResolverFrostFS {
// SystemDNS implements resolver.FrostFS interface method.
func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.SystemDNS")
defer span.End()
networkInfo, err := x.pool.NetworkInfo(ctx)
if err != nil {
return "", handleObjectError("read network info via client", err)
@ -243,10 +205,6 @@ func handleObjectError(msg string, err error) error {
}
if reason, ok := IsErrObjectAccessDenied(err); ok {
if strings.Contains(reason, "limit reached") {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrQuotaLimitReached, reason)
}
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
}

View file

@ -1,83 +0,0 @@
package frostfs
import (
"context"
"errors"
"fmt"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestHandleObjectError(t *testing.T) {
msg := "some msg"
t.Run("nil error", func(t *testing.T) {
err := handleObjectError(msg, nil)
require.Nil(t, err)
})
t.Run("simple access denied", func(t *testing.T) {
reason := "some reason"
inputErr := new(apistatus.ObjectAccessDenied)
inputErr.WriteReason(reason)
err := handleObjectError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrAccessDenied)
require.Contains(t, err.Error(), reason)
require.Contains(t, err.Error(), msg)
})
t.Run("access denied - quota reached", func(t *testing.T) {
reason := "Quota limit reached"
inputErr := new(apistatus.ObjectAccessDenied)
inputErr.WriteReason(reason)
err := handleObjectError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrQuotaLimitReached)
require.Contains(t, err.Error(), reason)
require.Contains(t, err.Error(), msg)
})
t.Run("simple timeout", func(t *testing.T) {
inputErr := errors.New("timeout")
err := handleObjectError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
require.Contains(t, err.Error(), inputErr.Error())
require.Contains(t, err.Error(), msg)
})
t.Run("deadline exceeded", func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
<-ctx.Done()
err := handleObjectError(msg, ctx.Err())
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
require.Contains(t, err.Error(), ctx.Err().Error())
require.Contains(t, err.Error(), msg)
})
t.Run("grpc deadline exceeded", func(t *testing.T) {
inputErr := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error"))
err := handleObjectError(msg, inputErr)
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
require.Contains(t, err.Error(), inputErr.Error())
require.Contains(t, err.Error(), msg)
})
t.Run("unknown error", func(t *testing.T) {
inputErr := errors.New("unknown error")
err := handleObjectError(msg, inputErr)
require.ErrorIs(t, err, inputErr)
require.Contains(t, err.Error(), msg)
})
}

View file

@ -9,7 +9,6 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@ -75,9 +74,6 @@ var (
)
func (x *FrostFS) InitMultiObjectReader(ctx context.Context, p handler.PrmInitMultiObjectReader) (io.Reader, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.InitMultiObjectReader")
defer span.End()
combinedObj, err := x.GetObject(ctx, handler.PrmObjectGet{
PrmAuth: handler.PrmAuth{BearerToken: p.Bearer},
Address: p.Addr,
@ -219,9 +215,6 @@ func (x *MultiObjectReader) Read(p []byte) (n int, err error) {
// InitFrostFSObjectPayloadReader initializes payload reader of the FrostFS object.
// Zero range corresponds to full payload (panics if only offset is set).
func (x *FrostFS) InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.InitFrostFSObjectPayloadReader")
defer span.End()
var prmAuth handler.PrmAuth
if p.Off+p.Ln != 0 {

View file

@ -9,21 +9,20 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
apitree "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/tree"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
)
type GetNodeByPathResponseInfoWrapper struct {
response *apitree.GetNodeByPathResponseInfo
response *grpcService.GetNodeByPathResponse_Info
}
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 {
return []uint64{n.response.GetNodeID()}
return []uint64{n.response.GetNodeId()}
}
func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 {
return []uint64{n.response.GetParentID()}
return []uint64{n.response.GetParentId()}
}
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
@ -31,8 +30,8 @@ func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
}
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.GetMeta()))
for i, value := range n.response.GetMeta() {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res
@ -47,9 +46,6 @@ func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
}
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetNodes")
defer span.End()
poolPrm := treepool.GetNodesParams{
CID: prm.CnrID,
TreeID: prm.TreeID,
@ -97,9 +93,6 @@ func handleError(err error) error {
}
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetSubTree")
defer span.End()
order := treepool.NoneOrder
if sort {
order = treepool.AscendingOrder
@ -140,15 +133,15 @@ func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo,
}
type GetSubTreeResponseBodyWrapper struct {
response *apitree.GetSubTreeResponseBody
response *grpcService.GetSubTreeResponse_Body
}
func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 {
return n.response.GetNodeID()
return n.response.GetNodeId()
}
func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 {
resp := n.response.GetParentID()
resp := n.response.GetParentId()
if resp == nil {
// storage sends nil that should be interpreted as []uint64{0}
// due to protobuf compatibility, see 'GetSubTree' function
@ -162,8 +155,8 @@ func (n GetSubTreeResponseBodyWrapper) GetTimestamp() []uint64 {
}
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.GetMeta()))
for i, value := range n.response.GetMeta() {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res

View file

@ -1,69 +0,0 @@
package frostfs
import (
"context"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"go.uber.org/zap"
)
type Source struct {
frostFS *FrostFS
netmapCache *cache.NetmapCache
bucketCache *cache.BucketCache
log *zap.Logger
}
func NewSource(frostFS *FrostFS, netmapCache *cache.NetmapCache, bucketCache *cache.BucketCache, log *zap.Logger) *Source {
return &Source{
frostFS: frostFS,
netmapCache: netmapCache,
bucketCache: bucketCache,
log: log,
}
}
func (s *Source) NetMapSnapshot(ctx context.Context) (netmap.NetMap, error) {
cachedNetmap := s.netmapCache.Get()
if cachedNetmap != nil {
return *cachedNetmap, nil
}
netmapSnapshot, err := s.frostFS.NetmapSnapshot(ctx)
if err != nil {
return netmap.NetMap{}, fmt.Errorf("get netmap: %w", err)
}
if err = s.netmapCache.Put(netmapSnapshot); err != nil {
s.log.Warn(logs.CouldntCacheNetmap, zap.Error(err), logs.TagField(logs.TagDatapath))
}
return netmapSnapshot, nil
}
func (s *Source) PlacementPolicy(ctx context.Context, cnrID cid.ID) (netmap.PlacementPolicy, error) {
info := s.bucketCache.GetByCID(cnrID)
if info != nil {
return info.PlacementPolicy, nil
}
prm := handler.PrmContainer{
ContainerID: cnrID,
}
res, err := s.frostFS.Container(ctx, prm)
if err != nil {
return netmap.PlacementPolicy{}, fmt.Errorf("get container: %w", err)
}
// We don't put container back to the cache to keep cache
// coherent to the requests made by users. FrostFS Source
// is being used by SDK Tree Pool and it should not fill cache
// with possibly irrelevant container values.
return res.PlacementPolicy(), nil
}

View file

@ -76,15 +76,6 @@ var appMetricsDesc = map[string]map[string]Description{
VariableLabels: []string{"endpoint"},
},
},
statisticSubsystem: {
droppedLogs: Description{
Type: dto.MetricType_COUNTER,
Namespace: namespace,
Subsystem: statisticSubsystem,
Name: droppedLogs,
Help: "Dropped logs (by sampling) count",
},
},
}
type Description struct {
@ -157,12 +148,3 @@ func mustNewGaugeVec(description Description) *prometheus.GaugeVec {
description.VariableLabels,
)
}
func mustNewCounter(description Description) prometheus.Counter {
if description.Type != dto.MetricType_COUNTER {
panic("invalid metric type")
}
return prometheus.NewCounter(
prometheus.CounterOpts(newOpts(description)),
)
}

View file

@ -10,17 +10,15 @@ import (
)
const (
namespace = "frostfs_http_gw"
stateSubsystem = "state"
poolSubsystem = "pool"
serverSubsystem = "server"
statisticSubsystem = "statistic"
namespace = "frostfs_http_gw"
stateSubsystem = "state"
poolSubsystem = "pool"
serverSubsystem = "server"
)
const (
healthMetric = "health"
versionInfoMetric = "version_info"
droppedLogs = "dropped_logs"
)
const (
@ -32,19 +30,21 @@ const (
)
const (
methodGetBalance = "get_balance"
methodPutContainer = "put_container"
methodGetContainer = "get_container"
methodListContainer = "list_container"
methodDeleteContainer = "delete_container"
methodEndpointInfo = "endpoint_info"
methodNetworkInfo = "network_info"
methodPutObject = "put_object"
methodDeleteObject = "delete_object"
methodGetObject = "get_object"
methodHeadObject = "head_object"
methodRangeObject = "range_object"
methodCreateSession = "create_session"
methodGetBalance = "get_balance"
methodPutContainer = "put_container"
methodGetContainer = "get_container"
methodListContainer = "list_container"
methodDeleteContainer = "delete_container"
methodGetContainerEacl = "get_container_eacl"
methodSetContainerEacl = "set_container_eacl"
methodEndpointInfo = "endpoint_info"
methodNetworkInfo = "network_info"
methodPutObject = "put_object"
methodDeleteObject = "delete_object"
methodGetObject = "get_object"
methodHeadObject = "head_object"
methodRangeObject = "range_object"
methodCreateSession = "create_session"
)
// HealthStatus of the gate application.
@ -69,7 +69,6 @@ type GateMetrics struct {
stateMetrics
poolMetricsCollector
serverMetrics
statisticMetrics
}
type stateMetrics struct {
@ -77,10 +76,6 @@ type stateMetrics struct {
versionInfo *prometheus.GaugeVec
}
type statisticMetrics struct {
droppedLogs prometheus.Counter
}
type poolMetricsCollector struct {
scraper StatisticScraper
overallErrors prometheus.Gauge
@ -101,14 +96,10 @@ func NewGateMetrics(p StatisticScraper) *GateMetrics {
serverMetric := newServerMetrics()
serverMetric.register()
statsMetric := newStatisticMetrics()
statsMetric.register()
return &GateMetrics{
stateMetrics: *stateMetric,
poolMetricsCollector: *poolMetric,
serverMetrics: *serverMetric,
statisticMetrics: *statsMetric,
}
}
@ -116,7 +107,6 @@ func (g *GateMetrics) Unregister() {
g.stateMetrics.unregister()
prometheus.Unregister(&g.poolMetricsCollector)
g.serverMetrics.unregister()
g.statisticMetrics.unregister()
}
func newStateMetrics() *stateMetrics {
@ -126,20 +116,6 @@ func newStateMetrics() *stateMetrics {
}
}
func newStatisticMetrics() *statisticMetrics {
return &statisticMetrics{
droppedLogs: mustNewCounter(appMetricsDesc[statisticSubsystem][droppedLogs]),
}
}
func (s *statisticMetrics) register() {
prometheus.MustRegister(s.droppedLogs)
}
func (s *statisticMetrics) unregister() {
prometheus.Unregister(s.droppedLogs)
}
func (m stateMetrics) register() {
prometheus.MustRegister(m.healthCheck)
prometheus.MustRegister(m.versionInfo)
@ -158,13 +134,6 @@ func (m stateMetrics) SetVersion(ver string) {
m.versionInfo.WithLabelValues(ver).Set(1)
}
func (s *statisticMetrics) DroppedLogsInc() {
if s == nil {
return
}
s.droppedLogs.Inc()
}
func newPoolMetricsCollector(p StatisticScraper) *poolMetricsCollector {
return &poolMetricsCollector{
scraper: p,

View file

@ -25,24 +25,24 @@ type Config struct {
// Start runs http service with the exposed endpoint on the configured port.
func (ms *Service) Start() {
if ms.enabled {
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp))
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr))
err := ms.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort, logs.TagField(logs.TagApp))
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort)
}
} else {
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled, logs.TagField(logs.TagApp))
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled)
}
}
// ShutDown stops the service.
func (ms *Service) ShutDown(ctx context.Context) {
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp))
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
err := ms.Shutdown(ctx)
if err != nil {
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err), logs.TagField(logs.TagApp))
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err))
if err = ms.Close(); err != nil {
ms.log.Panic(logs.CantShutDownService, zap.Error(err), logs.TagField(logs.TagApp))
ms.log.Panic(logs.CantShutDownService, zap.Error(err))
}
}
}

41
response/utils.go Normal file
View file

@ -0,0 +1,41 @@
package response
import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
func Error(r *fasthttp.RequestCtx, msg string, code int) {
r.Error(msg+"\n", code)
}
func FormErrorResponse(message string, err error) (int, string, []zap.Field) {
var (
msg string
statusCode int
logFields []zap.Field
)
st := new(sdkstatus.ObjectAccessDenied)
switch {
case errors.As(err, &st):
statusCode = fasthttp.StatusForbidden
reason := st.Reason()
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
logFields = append(logFields, zap.String("error_detail", reason))
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
statusCode = fasthttp.StatusNotFound
msg = "Not Found"
default:
statusCode = fasthttp.StatusBadRequest
msg = fmt.Sprintf("%s: %v", message, err)
}
return statusCode, msg, logFields
}

View file

@ -82,22 +82,14 @@ func fetchBearerToken(ctx *fasthttp.RequestCtx) (*bearer.Token, error) {
tkn = new(bearer.Token)
)
for _, parse := range []fromHandler{BearerTokenFromHeader, BearerTokenFromCookie} {
buf = parse(&ctx.Request.Header)
if buf == nil {
if buf = parse(&ctx.Request.Header); buf == nil {
continue
}
data, err := base64.StdEncoding.DecodeString(string(buf))
if err != nil {
} else if data, err := base64.StdEncoding.DecodeString(string(buf)); err != nil {
lastErr = fmt.Errorf("can't base64-decode bearer token: %w", err)
continue
}
if err = tkn.Unmarshal(data); err != nil {
if err = tkn.UnmarshalJSON(data); err != nil {
lastErr = fmt.Errorf("can't unmarshal bearer token: %w", err)
continue
}
} else if err = tkn.Unmarshal(data); err != nil {
lastErr = fmt.Errorf("can't unmarshal bearer token: %w", err)
continue
}
return tkn, nil

View file

@ -98,14 +98,8 @@ func TestFetchBearerToken(t *testing.T) {
tkn := new(bearer.Token)
tkn.ForUser(uid)
jsonToken, err := tkn.MarshalJSON()
require.NoError(t, err)
jsonTokenBase64 := base64.StdEncoding.EncodeToString(jsonToken)
binaryTokenBase64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, jsonTokenBase64)
require.NotEmpty(t, binaryTokenBase64)
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, t64)
cases := []struct {
name string
@ -149,47 +143,25 @@ func TestFetchBearerToken(t *testing.T) {
error: "can't unmarshal bearer token",
},
{
name: "bad header, but good cookie with binary token",
name: "bad header, but good cookie",
header: "dGVzdAo=",
cookie: binaryTokenBase64,
cookie: t64,
expect: tkn,
},
{
name: "bad cookie, but good header with binary token",
header: binaryTokenBase64,
name: "bad cookie, but good header",
header: t64,
cookie: "dGVzdAo=",
expect: tkn,
},
{
name: "bad header, but good cookie with json token",
header: "dGVzdAo=",
cookie: jsonTokenBase64,
name: "ok for header",
header: t64,
expect: tkn,
},
{
name: "bad cookie, but good header with json token",
header: jsonTokenBase64,
cookie: "dGVzdAo=",
expect: tkn,
},
{
name: "ok for header with binary token",
header: binaryTokenBase64,
expect: tkn,
},
{
name: "ok for cookie with binary token",
cookie: binaryTokenBase64,
expect: tkn,
},
{
name: "ok for header with json token",
header: jsonTokenBase64,
expect: tkn,
},
{
name: "ok for cookie with json token",
cookie: jsonTokenBase64,
name: "ok for cookie",
cookie: t64,
expect: tkn,
},
}

View file

@ -6,9 +6,9 @@ import (
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@ -118,7 +118,7 @@ func (n *treeNode) FileName() (string, bool) {
return value, ok
}
func newNodeVersion(node NodeResponse) (*data.NodeVersion, error) {
func newNodeVersion(node NodeResponse) (*api.NodeVersion, error) {
tNode, err := newTreeNode(node)
if err != nil {
return nil, fmt.Errorf("invalid tree node: %w", err)
@ -127,30 +127,20 @@ func newNodeVersion(node NodeResponse) (*data.NodeVersion, error) {
return newNodeVersionFromTreeNode(tNode), nil
}
func newNodeVersionFromTreeNode(treeNode *treeNode) *data.NodeVersion {
func newNodeVersionFromTreeNode(treeNode *treeNode) *api.NodeVersion {
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
version := &data.NodeVersion{
BaseNodeVersion: data.BaseNodeVersion{
OID: treeNode.ObjID,
IsDeleteMarker: isDeleteMarker,
size, _ := treeNode.Get(sizeKV)
version := &api.NodeVersion{
BaseNodeVersion: api.BaseNodeVersion{
OID: treeNode.ObjID,
},
DeleteMarker: isDeleteMarker,
IsPrefixNode: size == "",
}
return version
}
func newNodeInfo(node NodeResponse) data.NodeInfo {
nodeMeta := node.GetMeta()
nodeInfo := data.NodeInfo{
Meta: make([]data.NodeMeta, 0, len(nodeMeta)),
}
for _, meta := range nodeMeta {
nodeInfo.Meta = append(nodeInfo.Meta, meta)
}
return nodeInfo
}
func newMultiNode(nodes []NodeResponse) (*multiSystemNode, error) {
var (
err error
@ -190,10 +180,7 @@ func (m *multiSystemNode) Old() []*treeNode {
return m.nodes[1:]
}
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetLatestVersion")
defer span.End()
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error) {
nodes, err := c.GetVersions(ctx, cnrID, objectName)
if err != nil {
return nil, err
@ -208,9 +195,6 @@ func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName s
}
func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string) ([]NodeResponse, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetVersions")
defer span.End()
meta := []string{oidKV, isDeleteMarkerKV, sizeKV}
path := pathFromName(objectName)
@ -226,10 +210,7 @@ func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string
return c.service.GetNodes(ctx, p)
}
func (c *Tree) CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error {
ctx, span := tracing.StartSpanFromContext(ctx, "tree.CheckSettingsNodeExists")
defer span.End()
func (c *Tree) CheckSettingsNodeExist(ctx context.Context, bktInfo *data.BucketInfo) error {
_, err := c.getSystemNode(ctx, bktInfo, settingsFileName)
if err != nil {
return err
@ -255,7 +236,7 @@ func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name
nodes = filterMultipartNodes(nodes)
if len(nodes) == 0 {
return nil, layer.ErrNodeNotFound
return nil, ErrNodeNotFound
}
return newMultiNode(nodes)
@ -317,17 +298,14 @@ func pathFromName(objectName string) []string {
return strings.Split(objectName, separator)
}
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetSubTreeByPrefix")
defer span.End()
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]NodeResponse, string, error) {
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix)
if err != nil {
return nil, "", err
}
subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false)
if err != nil {
if errors.Is(err, ErrNodeNotFound) {
if errors.Is(err, layer.ErrNodeNotFound) {
return nil, "", nil
}
return nil, "", err
@ -362,23 +340,14 @@ func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo,
nodesMap[fileName] = nodes
}
result := make([]data.NodeInfo, 0, len(subTree))
result := make([]NodeResponse, 0, len(subTree))
for _, nodes := range nodesMap {
result = append(result, nodeResponseToNodeInfo(nodes)...)
result = append(result, nodes...)
}
return result, strings.TrimSuffix(prefix, tailPrefix), nil
}
func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo {
nodesInfo := make([]data.NodeInfo, 0, len(nodes))
for _, node := range nodes {
nodesInfo = append(nodesInfo, newNodeInfo(node))
}
return nodesInfo
}
func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) {
rootID := []uint64{0}
path := strings.Split(prefix, separator)