Compare commits
4 commits
master
...
feature/in
Author | SHA1 | Date | |
---|---|---|---|
a4991bda4e | |||
e579549b41 | |||
0b9b23e67c | |||
9cb9d14146 |
16 changed files with 785 additions and 477 deletions
|
@ -100,17 +100,18 @@ type (
|
||||||
workerPoolSize int
|
workerPoolSize int
|
||||||
logLevelConfig *logLevelConfig
|
logLevelConfig *logLevelConfig
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
defaultTimestamp bool
|
defaultTimestamp bool
|
||||||
archiveCompression bool
|
archiveCompression bool
|
||||||
clientCut bool
|
clientCut bool
|
||||||
returnIndexPage bool
|
returnIndexPage bool
|
||||||
indexPageTemplate string
|
indexPageTemplate string
|
||||||
bufferMaxSizeForPut uint64
|
bufferMaxSizeForPut uint64
|
||||||
namespaceHeader string
|
namespaceHeader string
|
||||||
defaultNamespaces []string
|
defaultNamespaces []string
|
||||||
cors *data.CORSRule
|
cors *data.CORSRule
|
||||||
enableFilepathFallback bool
|
enableFilepathFallback bool
|
||||||
|
enableFilepathSlashFallback bool
|
||||||
}
|
}
|
||||||
|
|
||||||
tagsConfig struct {
|
tagsConfig struct {
|
||||||
|
@ -296,6 +297,7 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
|
||||||
indexPage, indexEnabled := fetchIndexPageTemplate(v, l)
|
indexPage, indexEnabled := fetchIndexPageTemplate(v, l)
|
||||||
cors := fetchCORSConfig(v)
|
cors := fetchCORSConfig(v)
|
||||||
enableFilepathFallback := v.GetBool(cfgFeaturesEnableFilepathFallback)
|
enableFilepathFallback := v.GetBool(cfgFeaturesEnableFilepathFallback)
|
||||||
|
enableFilepathSlashFallback := v.GetBool(cfgFeaturesEnableFilepathSlashFallback)
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
@ -311,6 +313,7 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
|
||||||
s.indexPageTemplate = indexPage
|
s.indexPageTemplate = indexPage
|
||||||
s.cors = cors
|
s.cors = cors
|
||||||
s.enableFilepathFallback = enableFilepathFallback
|
s.enableFilepathFallback = enableFilepathFallback
|
||||||
|
s.enableFilepathSlashFallback = enableFilepathSlashFallback
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *loggerSettings) DroppedLogsInc() {
|
func (s *loggerSettings) DroppedLogsInc() {
|
||||||
|
@ -421,6 +424,12 @@ func (s *appSettings) EnableFilepathFallback() bool {
|
||||||
return s.enableFilepathFallback
|
return s.enableFilepathFallback
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) EnableFilepathSlashFallback() bool {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.enableFilepathSlashFallback
|
||||||
|
}
|
||||||
|
|
||||||
func (a *app) initResolver() {
|
func (a *app) initResolver() {
|
||||||
var err error
|
var err error
|
||||||
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
|
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
|
||||||
|
|
|
@ -180,8 +180,9 @@ const (
|
||||||
cfgMultinetSubnets = "multinet.subnets"
|
cfgMultinetSubnets = "multinet.subnets"
|
||||||
|
|
||||||
// Feature.
|
// Feature.
|
||||||
cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback"
|
cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback"
|
||||||
cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support"
|
cfgFeaturesEnableFilepathSlashFallback = "features.enable_filepath_slash_fallback"
|
||||||
|
cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support"
|
||||||
|
|
||||||
// Containers.
|
// Containers.
|
||||||
cfgContainersCORS = "containers.cors"
|
cfgContainersCORS = "containers.cors"
|
||||||
|
|
|
@ -174,6 +174,8 @@ HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl
|
||||||
|
|
||||||
# Enable using fallback path to search for a object by attribute
|
# Enable using fallback path to search for a object by attribute
|
||||||
HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false
|
HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false
|
||||||
|
# See description in docs/gate-configuration.md
|
||||||
|
HTTP_GW_FEATURES_ENABLE_FILEPATH_SLASH_FALLBACK=false
|
||||||
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
|
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
|
||||||
HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true
|
HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true
|
||||||
|
|
||||||
|
|
|
@ -192,6 +192,8 @@ multinet:
|
||||||
features:
|
features:
|
||||||
# Enable using fallback path to search for a object by attribute
|
# Enable using fallback path to search for a object by attribute
|
||||||
enable_filepath_fallback: false
|
enable_filepath_fallback: false
|
||||||
|
# See description in docs/gate-configuration.md
|
||||||
|
enable_filepath_slash_fallback: false
|
||||||
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
|
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
|
||||||
tree_pool_netmap_support: true
|
tree_pool_netmap_support: true
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,6 @@ There are some custom types used for brevity:
|
||||||
* `duration` -- string consisting of a number and a suffix. Suffix examples include `s` (seconds), `m` (minutes), `ms` (
|
* `duration` -- string consisting of a number and a suffix. Suffix examples include `s` (seconds), `m` (minutes), `ms` (
|
||||||
milliseconds).
|
milliseconds).
|
||||||
|
|
||||||
|
|
||||||
# Reload on SIGHUP
|
# Reload on SIGHUP
|
||||||
|
|
||||||
Some config values can be reloaded on SIGHUP signal.
|
Some config values can be reloaded on SIGHUP signal.
|
||||||
|
@ -163,7 +162,6 @@ server:
|
||||||
| `tls.cert_file` | `string` | yes | | Path to the TLS certificate. |
|
| `tls.cert_file` | `string` | yes | | Path to the TLS certificate. |
|
||||||
| `tls.key_file` | `string` | yes | | Path to the key. |
|
| `tls.key_file` | `string` | yes | | Path to the key. |
|
||||||
|
|
||||||
|
|
||||||
# `logger` section
|
# `logger` section
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -235,7 +233,6 @@ web:
|
||||||
| `stream_request_body` | `bool` | `true` | Enables request body streaming, and calls the handler sooner when given body is larger than the current limit. |
|
| `stream_request_body` | `bool` | `true` | Enables request body streaming, and calls the handler sooner when given body is larger than the current limit. |
|
||||||
| `max_request_body_size` | `int` | `4194304` | Maximum request body size. The server rejects requests with bodies exceeding this limit. |
|
| `max_request_body_size` | `int` | `4194304` | Maximum request body size. The server rejects requests with bodies exceeding this limit. |
|
||||||
|
|
||||||
|
|
||||||
# `upload-header` section
|
# `upload-header` section
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -271,7 +268,6 @@ archive:
|
||||||
|---------------|--------|---------------|---------------|------------------------------------------------------------------|
|
|---------------|--------|---------------|---------------|------------------------------------------------------------------|
|
||||||
| `compression` | `bool` | yes | `false` | Enable archive compression when download files by common prefix. |
|
| `compression` | `bool` | yes | `false` | Enable archive compression when download files by common prefix. |
|
||||||
|
|
||||||
|
|
||||||
# `pprof` section
|
# `pprof` section
|
||||||
|
|
||||||
Contains configuration for the `pprof` profiler.
|
Contains configuration for the `pprof` profiler.
|
||||||
|
@ -320,14 +316,13 @@ tracing:
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
| ------------ | -------------------------------------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
|--------------|----------------------------------------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `enabled` | `bool` | yes | `false` | Flag to enable the tracing. |
|
| `enabled` | `bool` | yes | `false` | Flag to enable the tracing. |
|
||||||
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
|
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
|
||||||
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |
|
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |
|
||||||
| `trusted_ca` | `string` | yes | | Path to certificate of a certification authority in pem format, that issued the TLS certificate of the telemetry remote server. |
|
| `trusted_ca` | `string` | yes | | Path to certificate of a certification authority in pem format, that issued the TLS certificate of the telemetry remote server. |
|
||||||
| `attributes` | [[]Attributes](#attributes-subsection) | yes | | An array of configurable attributes in key-value format. |
|
| `attributes` | [[]Attributes](#attributes-subsection) | yes | | An array of configurable attributes in key-value format. |
|
||||||
|
|
||||||
|
|
||||||
#### `attributes` subsection
|
#### `attributes` subsection
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -338,12 +333,13 @@ tracing:
|
||||||
value: value
|
value: value
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|-----------------------|----------|---------------|---------------|----------------------------------------------------------|
|
|-----------|----------|---------------|---------------|------------------|
|
||||||
| `key` | `string` | yes | | Attribute key. |
|
| `key` | `string` | yes | | Attribute key. |
|
||||||
| `value` | `string` | yes | | Attribute value. |
|
| `value` | `string` | yes | | Attribute value. |
|
||||||
|
|
||||||
# `runtime` section
|
# `runtime` section
|
||||||
|
|
||||||
Contains runtime parameters.
|
Contains runtime parameters.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -372,7 +368,6 @@ frostfs:
|
||||||
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
|
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
|
||||||
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
|
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
|
||||||
|
|
||||||
|
|
||||||
### `cache` section
|
### `cache` section
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -393,7 +388,6 @@ cache:
|
||||||
| `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. |
|
| `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. |
|
||||||
| `cors` | [Cache config](#cache-subsection) | `lifetime: 5m`<br>`size: 1000` | Cache which stores container CORS configurations. |
|
| `cors` | [Cache config](#cache-subsection) | `lifetime: 5m`<br>`size: 1000` | Cache which stores container CORS configurations. |
|
||||||
|
|
||||||
|
|
||||||
#### `cache` subsection
|
#### `cache` subsection
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -406,7 +400,6 @@ size: 1000
|
||||||
| `lifetime` | `duration` | depends on cache | Lifetime of entries in cache. |
|
| `lifetime` | `duration` | depends on cache | Lifetime of entries in cache. |
|
||||||
| `size` | `int` | depends on cache | LRU cache size. |
|
| `size` | `int` | depends on cache | LRU cache size. |
|
||||||
|
|
||||||
|
|
||||||
# `resolve_bucket` section
|
# `resolve_bucket` section
|
||||||
|
|
||||||
Bucket name resolving parameters from and to container ID.
|
Bucket name resolving parameters from and to container ID.
|
||||||
|
@ -417,10 +410,10 @@ resolve_bucket:
|
||||||
default_namespaces: [ "", "root" ]
|
default_namespaces: [ "", "root" ]
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
|
|----------------------|------------|---------------|-----------------------|--------------------------------------------------|
|
||||||
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
|
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
|
||||||
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
|
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
|
||||||
|
|
||||||
# `index_page` section
|
# `index_page` section
|
||||||
|
|
||||||
|
@ -450,9 +443,9 @@ If values are not set, settings from CORS container will be used.
|
||||||
```yaml
|
```yaml
|
||||||
cors:
|
cors:
|
||||||
allow_origin: "*"
|
allow_origin: "*"
|
||||||
allow_methods: ["GET", "HEAD"]
|
allow_methods: [ "GET", "HEAD" ]
|
||||||
allow_headers: ["Authorization"]
|
allow_headers: [ "Authorization" ]
|
||||||
expose_headers: ["*"]
|
expose_headers: [ "*" ]
|
||||||
allow_credentials: false
|
allow_credentials: false
|
||||||
max_age: 600
|
max_age: 600
|
||||||
```
|
```
|
||||||
|
@ -472,15 +465,15 @@ Configuration of multinet support.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
multinet:
|
multinet:
|
||||||
enabled: false
|
enabled: false
|
||||||
balancer: roundrobin
|
balancer: roundrobin
|
||||||
restrict: false
|
restrict: false
|
||||||
fallback_delay: 300ms
|
fallback_delay: 300ms
|
||||||
subnets:
|
subnets:
|
||||||
- mask: 1.2.3.4/24
|
- mask: 1.2.3.4/24
|
||||||
source_ips:
|
source_ips:
|
||||||
- 1.2.3.4
|
- 1.2.3.4
|
||||||
- 1.2.3.5
|
- 1.2.3.5
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|
@ -512,13 +505,15 @@ Contains parameters for enabling features.
|
||||||
```yaml
|
```yaml
|
||||||
features:
|
features:
|
||||||
enable_filepath_fallback: true
|
enable_filepath_fallback: true
|
||||||
|
enable_filepath_slash_fallback: false
|
||||||
tree_pool_netmap_support: true
|
tree_pool_netmap_support: true
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|-------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|-------------------------------------------|--------|---------------|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by attribute. If the value of the `FilePath` attribute in the request contains no `/` symbols or single leading `/` symbol and the object was not found, then an attempt is made to search for the object by the attribute `FileName`. |
|
| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by `FileName` attribute if object with `FilePath` attribute wasn't found. |
|
||||||
| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. |
|
| `features.enable_filepath_slash_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by `FilePath`/`FileName` with/without (depends on provided value in `FilePath`/`FileName`) if object with provided `FilePath`/`FileName` wasn't found. This fallback goes `before enable_filepath_fallback`. |
|
||||||
|
| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. |
|
||||||
|
|
||||||
# `containers` section
|
# `containers` section
|
||||||
|
|
||||||
|
@ -529,6 +524,6 @@ containers:
|
||||||
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|-------------|----------|---------------|---------------|-----------------------------------------|
|
|-----------|----------|---------------|---------------|-----------------------------------------|
|
||||||
| `cors` | `string` | no | | Container name for CORS configurations. |
|
| `cors` | `string` | no | | Container name for CORS configurations. |
|
||||||
|
|
|
@ -12,7 +12,6 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -131,11 +130,15 @@ func parentDir(prefix string) string {
|
||||||
return prefix[index:]
|
return prefix[index:]
|
||||||
}
|
}
|
||||||
|
|
||||||
func trimPrefix(encPrefix string) string {
|
func getParent(encPrefix string) string {
|
||||||
prefix, err := url.PathUnescape(encPrefix)
|
prefix, err := url.PathUnescape(encPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
if prefix != "" && prefix[len(prefix)-1] == '/' {
|
||||||
|
prefix = prefix[:len(prefix)-1]
|
||||||
|
}
|
||||||
|
|
||||||
slashIndex := strings.LastIndex(prefix, "/")
|
slashIndex := strings.LastIndex(prefix, "/")
|
||||||
if slashIndex == -1 {
|
if slashIndex == -1 {
|
||||||
return ""
|
return ""
|
||||||
|
@ -161,10 +164,15 @@ func urlencode(path string) string {
|
||||||
type GetObjectsResponse struct {
|
type GetObjectsResponse struct {
|
||||||
objects []ResponseObject
|
objects []ResponseObject
|
||||||
hasErrors bool
|
hasErrors bool
|
||||||
|
isNative bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
|
func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
|
||||||
nodes, _, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
|
if prefix != "" && prefix[len(prefix)-1] == '/' {
|
||||||
|
prefix = prefix[:len(prefix)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -185,7 +193,7 @@ func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketIn
|
||||||
if obj.IsDeleteMarker {
|
if obj.IsDeleteMarker {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
obj.FilePath = prefix + obj.FileName
|
obj.FilePath = prefix + "/" + obj.FileName
|
||||||
obj.GetURL = "/get/" + bucketInfo.Name + urlencode(obj.FilePath)
|
obj.GetURL = "/get/" + bucketInfo.Name + urlencode(obj.FilePath)
|
||||||
result.objects = append(result.objects, obj)
|
result.objects = append(result.objects, obj)
|
||||||
}
|
}
|
||||||
|
@ -194,9 +202,9 @@ func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketIn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
|
func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
|
||||||
var basePath string
|
basePath := prefix
|
||||||
if ind := strings.LastIndex(prefix, "/"); ind != -1 {
|
if basePath != "" && basePath[len(basePath)-1] != '/' {
|
||||||
basePath = prefix[:ind+1]
|
basePath += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
filters := object.NewSearchFilters()
|
filters := object.NewSearchFilters()
|
||||||
|
@ -226,7 +234,8 @@ func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.Buck
|
||||||
log := h.reqLogger(ctx)
|
log := h.reqLogger(ctx)
|
||||||
dirs := make(map[string]struct{})
|
dirs := make(map[string]struct{})
|
||||||
result := &GetObjectsResponse{
|
result := &GetObjectsResponse{
|
||||||
objects: make([]ResponseObject, 0, 100),
|
objects: make([]ResponseObject, 0, 100),
|
||||||
|
isNative: true,
|
||||||
}
|
}
|
||||||
for objExt := range resp {
|
for objExt := range resp {
|
||||||
if objExt.Error != nil {
|
if objExt.Error != nil {
|
||||||
|
@ -322,28 +331,16 @@ func (h *Handler) headDirObject(ctx context.Context, cnrID cid.ID, objID oid.ID,
|
||||||
}
|
}
|
||||||
|
|
||||||
type browseParams struct {
|
type browseParams struct {
|
||||||
bucketInfo *data.BucketInfo
|
bucketInfo *data.BucketInfo
|
||||||
prefix string
|
prefix string
|
||||||
isNative bool
|
objects *GetObjectsResponse
|
||||||
listObjects func(ctx context.Context, bucketName *data.BucketInfo, prefix string) (*GetObjectsResponse, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p browseParams) {
|
func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p browseParams) {
|
||||||
const S3Protocol = "s3"
|
const S3Protocol = "s3"
|
||||||
const FrostfsProtocol = "frostfs"
|
const FrostfsProtocol = "frostfs"
|
||||||
|
|
||||||
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
|
objects := p.objects.objects
|
||||||
zap.String("bucket", p.bucketInfo.Name),
|
|
||||||
zap.String("container", p.bucketInfo.CID.EncodeToString()),
|
|
||||||
zap.String("prefix", p.prefix),
|
|
||||||
))
|
|
||||||
resp, err := p.listObjects(ctx, p.bucketInfo, p.prefix)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, req, logs.FailedToListObjects, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
objects := resp.objects
|
|
||||||
sort.Slice(objects, func(i, j int) bool {
|
sort.Slice(objects, func(i, j int) bool {
|
||||||
if objects[i].IsDir == objects[j].IsDir {
|
if objects[i].IsDir == objects[j].IsDir {
|
||||||
return objects[i].FileName < objects[j].FileName
|
return objects[i].FileName < objects[j].FileName
|
||||||
|
@ -353,7 +350,7 @@ func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p
|
||||||
|
|
||||||
tmpl, err := template.New("index").Funcs(template.FuncMap{
|
tmpl, err := template.New("index").Funcs(template.FuncMap{
|
||||||
"formatSize": formatSize,
|
"formatSize": formatSize,
|
||||||
"trimPrefix": trimPrefix,
|
"getParent": getParent,
|
||||||
"urlencode": urlencode,
|
"urlencode": urlencode,
|
||||||
"parentDir": parentDir,
|
"parentDir": parentDir,
|
||||||
}).Parse(h.config.IndexPageTemplate())
|
}).Parse(h.config.IndexPageTemplate())
|
||||||
|
@ -363,16 +360,21 @@ func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p
|
||||||
}
|
}
|
||||||
bucketName := p.bucketInfo.Name
|
bucketName := p.bucketInfo.Name
|
||||||
protocol := S3Protocol
|
protocol := S3Protocol
|
||||||
if p.isNative {
|
if p.objects.isNative {
|
||||||
bucketName = p.bucketInfo.CID.EncodeToString()
|
bucketName = p.bucketInfo.CID.EncodeToString()
|
||||||
protocol = FrostfsProtocol
|
protocol = FrostfsProtocol
|
||||||
}
|
}
|
||||||
|
prefix := p.prefix
|
||||||
|
if prefix != "" && prefix[len(prefix)-1] != '/' {
|
||||||
|
prefix += "/"
|
||||||
|
}
|
||||||
|
|
||||||
if err = tmpl.Execute(req, &BrowsePageData{
|
if err = tmpl.Execute(req, &BrowsePageData{
|
||||||
Container: bucketName,
|
Container: bucketName,
|
||||||
Prefix: p.prefix,
|
Prefix: prefix,
|
||||||
Objects: objects,
|
Objects: objects,
|
||||||
Protocol: protocol,
|
Protocol: protocol,
|
||||||
HasErrors: resp.hasErrors,
|
HasErrors: p.objects.hasErrors,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
h.logAndSendError(ctx, req, logs.FailedToExecuteTemplate, err)
|
h.logAndSendError(ctx, req, logs.FailedToExecuteTemplate, err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -10,11 +10,12 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
@ -31,13 +32,18 @@ func (h *Handler) DownloadByAddressOrBucketName(req *fasthttp.RequestCtx) {
|
||||||
|
|
||||||
cidParam := req.UserValue("cid").(string)
|
cidParam := req.UserValue("cid").(string)
|
||||||
oidParam := req.UserValue("oid").(string)
|
oidParam := req.UserValue("oid").(string)
|
||||||
downloadParam := req.QueryArgs().GetBool("download")
|
|
||||||
|
|
||||||
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
|
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
|
||||||
zap.String("cid", cidParam),
|
zap.String("cid", cidParam),
|
||||||
zap.String("oid", oidParam),
|
zap.String("oid", oidParam),
|
||||||
))
|
))
|
||||||
|
|
||||||
|
path, err := url.QueryUnescape(oidParam)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, req, logs.FailedToUnescapePath, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidParam)
|
bktInfo, err := h.getBucketInfo(ctx, cidParam)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
|
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
|
||||||
|
@ -45,23 +51,164 @@ func (h *Handler) DownloadByAddressOrBucketName(req *fasthttp.RequestCtx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
||||||
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
|
if checkS3Err != nil && !errors.Is(checkS3Err, tree.ErrNodeNotFound) {
|
||||||
h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err)
|
h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var objID oid.ID
|
prm := MiddlewareParam{
|
||||||
if checkS3Err == nil && shouldDownload(oidParam, downloadParam) {
|
Context: ctx,
|
||||||
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.receiveFile)
|
Request: req,
|
||||||
} else if err = objID.DecodeString(oidParam); err == nil {
|
BktInfo: bktInfo,
|
||||||
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.receiveFile)
|
Path: path,
|
||||||
|
}
|
||||||
|
|
||||||
|
indexPageEnabled := h.config.IndexPageEnabled()
|
||||||
|
|
||||||
|
if checkS3Err == nil {
|
||||||
|
run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound),
|
||||||
|
Middleware{Func: h.byS3PathMiddleware(h.receiveFile, noopFormer), Enabled: true},
|
||||||
|
Middleware{Func: h.byS3PathMiddleware(h.receiveFile, indexFormer), Enabled: indexPageEnabled},
|
||||||
|
Middleware{Func: h.browseIndexMiddleware(h.getDirObjectsS3), Enabled: indexPageEnabled},
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
h.browseIndex(ctx, req, cidParam, oidParam, checkS3Err != nil)
|
slashFallbackEnabled := h.config.EnableFilepathSlashFallback()
|
||||||
|
fileNameFallbackEnabled := h.config.EnableFilepathFallback()
|
||||||
|
|
||||||
|
run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound),
|
||||||
|
Middleware{Func: h.byAddressMiddleware(h.receiveFile), Enabled: true},
|
||||||
|
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, noopFormer), Enabled: true},
|
||||||
|
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, reverseLeadingSlash), Enabled: slashFallbackEnabled},
|
||||||
|
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, noopFormer), Enabled: fileNameFallbackEnabled},
|
||||||
|
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, reverseLeadingSlash), Enabled: fileNameFallbackEnabled && slashFallbackEnabled},
|
||||||
|
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, indexFormer), Enabled: indexPageEnabled},
|
||||||
|
Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, indexFormer), Enabled: fileNameFallbackEnabled && indexPageEnabled},
|
||||||
|
Middleware{Func: h.browseIndexMiddleware(h.getDirObjectsNative), Enabled: indexPageEnabled},
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func shouldDownload(oidParam string, downloadParam bool) bool {
|
type MiddlewareFunc func(param MiddlewareParam) bool
|
||||||
return !isDir(oidParam) || downloadParam
|
|
||||||
|
type MiddlewareParam struct {
|
||||||
|
Context context.Context
|
||||||
|
Request *fasthttp.RequestCtx
|
||||||
|
BktInfo *data.BucketInfo
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Middleware struct {
|
||||||
|
Func MiddlewareFunc
|
||||||
|
Enabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func run(prm MiddlewareParam, defaultMiddleware MiddlewareFunc, middlewares ...Middleware) {
|
||||||
|
for _, m := range middlewares {
|
||||||
|
if m.Enabled && !m.Func(prm) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultMiddleware(prm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func indexFormer(path string) string {
|
||||||
|
indexPath := path
|
||||||
|
if indexPath != "" && !strings.HasSuffix(indexPath, "/") {
|
||||||
|
indexPath += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
return indexPath + "index.html"
|
||||||
|
}
|
||||||
|
|
||||||
|
func reverseLeadingSlash(path string) string {
|
||||||
|
if path == "" || path == "/" {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
if path[0] == '/' {
|
||||||
|
return path[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return "/" + path
|
||||||
|
}
|
||||||
|
|
||||||
|
func noopFormer(path string) string {
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) byS3PathMiddleware(handler func(context.Context, *fasthttp.RequestCtx, oid.Address), pathFormer func(string) string) MiddlewareFunc {
|
||||||
|
return func(prm MiddlewareParam) bool {
|
||||||
|
ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byS3Path")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
path := pathFormer(prm.Path)
|
||||||
|
|
||||||
|
foundOID, err := h.tree.GetLatestVersion(ctx, &prm.BktInfo.CID, path)
|
||||||
|
if err == nil {
|
||||||
|
if foundOID.IsDeleteMarker {
|
||||||
|
h.logAndSendError(ctx, prm.Request, logs.IndexWasDeleted, ErrObjectNotFound)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
addr := newAddress(prm.BktInfo.CID, foundOID.OID)
|
||||||
|
handler(ctx, prm.Request, addr)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !errors.Is(err, tree.ErrNodeNotFound) {
|
||||||
|
h.logAndSendError(ctx, prm.Request, logs.FailedToGetLatestVersionOfIndexObject, err, zap.String("path", path))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) byAttributeSearchMiddleware(handler func(context.Context, *fasthttp.RequestCtx, oid.Address), attr string, pathFormer func(string) string) MiddlewareFunc {
|
||||||
|
return func(prm MiddlewareParam) bool {
|
||||||
|
ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byAttributeSearch")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
path := pathFormer(prm.Path)
|
||||||
|
|
||||||
|
res, err := h.search(ctx, prm.BktInfo.CID, attr, path, object.MatchStringEqual)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, prm.Request, logs.FailedToFindObjectByAttribute, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer res.Close()
|
||||||
|
|
||||||
|
buf := make([]oid.ID, 1)
|
||||||
|
n, err := res.Read(buf)
|
||||||
|
if err == nil && n > 0 {
|
||||||
|
addr := newAddress(prm.BktInfo.CID, buf[0])
|
||||||
|
handler(ctx, prm.Request, addr)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !errors.Is(err, io.EOF) {
|
||||||
|
h.logAndSendError(ctx, prm.Request, logs.FailedToFindObjectByAttribute, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) byAddressMiddleware(handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) MiddlewareFunc {
|
||||||
|
return func(prm MiddlewareParam) bool {
|
||||||
|
ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byAddress")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
var objID oid.ID
|
||||||
|
if objID.DecodeString(prm.Path) == nil {
|
||||||
|
handler(ctx, prm.Request, newAddress(prm.BktInfo.CID, objID))
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadByAttribute handles attribute-based download requests.
|
// DownloadByAttribute handles attribute-based download requests.
|
||||||
|
|
|
@ -11,8 +11,8 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
@ -35,6 +35,7 @@ type Config interface {
|
||||||
BufferMaxSizeForPut() uint64
|
BufferMaxSizeForPut() uint64
|
||||||
NamespaceHeader() string
|
NamespaceHeader() string
|
||||||
EnableFilepathFallback() bool
|
EnableFilepathFallback() bool
|
||||||
|
EnableFilepathSlashFallback() bool
|
||||||
FormContainerZone(string) string
|
FormContainerZone(string) string
|
||||||
CORS() *data.CORSRule
|
CORS() *data.CORSRule
|
||||||
}
|
}
|
||||||
|
@ -172,7 +173,7 @@ type Handler struct {
|
||||||
ownerID *user.ID
|
ownerID *user.ID
|
||||||
config Config
|
config Config
|
||||||
containerResolver ContainerResolver
|
containerResolver ContainerResolver
|
||||||
tree layer.TreeService
|
tree *tree.Tree
|
||||||
cache *cache.BucketCache
|
cache *cache.BucketCache
|
||||||
workerPool *ants.Pool
|
workerPool *ants.Pool
|
||||||
corsCnrID cid.ID
|
corsCnrID cid.ID
|
||||||
|
@ -189,7 +190,7 @@ type AppParams struct {
|
||||||
CORSCache *cache.CORSCache
|
CORSCache *cache.CORSCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(params *AppParams, config Config, tree layer.TreeService, workerPool *ants.Pool) *Handler {
|
func New(params *AppParams, config Config, tree *tree.Tree, workerPool *ants.Pool) *Handler {
|
||||||
return &Handler{
|
return &Handler{
|
||||||
log: params.Logger,
|
log: params.Logger,
|
||||||
frostfs: params.FrostFS,
|
frostfs: params.FrostFS,
|
||||||
|
@ -204,36 +205,6 @@ func New(params *AppParams, config Config, tree layer.TreeService, workerPool *a
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
|
||||||
// prepares request and object address to it.
|
|
||||||
func (h *Handler) byNativeAddress(ctx context.Context, req *fasthttp.RequestCtx, cnrID cid.ID, objID oid.ID, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byNativeAddress")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
addr := newAddress(cnrID, objID)
|
|
||||||
handler(ctx, req, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
|
||||||
// resolves object address from S3-like path <bucket name>/<object key>.
|
|
||||||
func (h *Handler) byS3Path(ctx context.Context, req *fasthttp.RequestCtx, cnrID cid.ID, path string, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byS3Path")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
foundOID, err := h.tree.GetLatestVersion(ctx, &cnrID, path)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, req, logs.FailedToGetLatestVersionOfObject, err, zap.String("path", path))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if foundOID.IsDeleteMarker {
|
|
||||||
h.logAndSendError(ctx, req, logs.ObjectWasDeleted, ErrObjectNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := newAddress(cnrID, foundOID.OID)
|
|
||||||
handler(ctx, req, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byAttribute is a wrapper similar to byNativeAddress.
|
// byAttribute is a wrapper similar to byNativeAddress.
|
||||||
func (h *Handler) byAttribute(ctx context.Context, req *fasthttp.RequestCtx, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) {
|
func (h *Handler) byAttribute(ctx context.Context, req *fasthttp.RequestCtx, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) {
|
||||||
cidParam, _ := req.UserValue("cid").(string)
|
cidParam, _ := req.UserValue("cid").(string)
|
||||||
|
@ -252,8 +223,6 @@ func (h *Handler) byAttribute(ctx context.Context, req *fasthttp.RequestCtx, han
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
val = prepareAtribute(key, val)
|
|
||||||
|
|
||||||
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", cidParam),
|
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", cidParam),
|
||||||
zap.String("attr_key", key), zap.String("attr_val", val)))
|
zap.String("attr_key", key), zap.String("attr_val", val)))
|
||||||
|
|
||||||
|
@ -291,10 +260,6 @@ func (h *Handler) findObjectByAttribute(ctx context.Context, cnrID cid.ID, attrK
|
||||||
n, err := res.Read(buf)
|
n, err := res.Read(buf)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal):
|
|
||||||
h.reqLogger(ctx).Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName, logs.TagField(logs.TagExternalStorage))
|
|
||||||
attrVal = prepareAtribute(attrFileName, attrVal)
|
|
||||||
return h.findObjectByAttribute(ctx, cnrID, attrFileName, attrVal)
|
|
||||||
case errors.Is(err, io.EOF):
|
case errors.Is(err, io.EOF):
|
||||||
h.reqLogger(ctx).Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
h.reqLogger(ctx).Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
||||||
return oid.ID{}, fmt.Errorf("object not found: %w", err)
|
return oid.ID{}, fmt.Errorf("object not found: %w", err)
|
||||||
|
@ -307,42 +272,6 @@ func (h *Handler) findObjectByAttribute(ctx context.Context, cnrID cid.ID, attrK
|
||||||
return buf[0], nil
|
return buf[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) needSearchByFileName(key, val string) bool {
|
|
||||||
if key != attrFilePath || !h.config.EnableFilepathFallback() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.HasPrefix(val, "/") && strings.Count(val, "/") == 1 || !strings.Contains(val, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareAtribute(attrKey, attrVal string) string {
|
|
||||||
if attrKey == attrFileName {
|
|
||||||
return prepareFileName(attrVal)
|
|
||||||
}
|
|
||||||
|
|
||||||
if attrKey == attrFilePath {
|
|
||||||
return prepareFilePath(attrVal)
|
|
||||||
}
|
|
||||||
|
|
||||||
return attrVal
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareFileName(fileName string) string {
|
|
||||||
if strings.HasPrefix(fileName, "/") {
|
|
||||||
return fileName[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return fileName
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareFilePath(filePath string) string {
|
|
||||||
if !strings.HasPrefix(filePath, "/") {
|
|
||||||
return "/" + filePath
|
|
||||||
}
|
|
||||||
|
|
||||||
return filePath
|
|
||||||
}
|
|
||||||
|
|
||||||
// resolveContainer decode container id, if it's not a valid container id
|
// resolveContainer decode container id, if it's not a valid container id
|
||||||
// then trey to resolve name using provided resolver.
|
// then trey to resolve name using provided resolver.
|
||||||
func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) {
|
func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) {
|
||||||
|
@ -418,37 +347,31 @@ func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.Bucket
|
||||||
return bktInfo, err
|
return bktInfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) browseIndex(ctx context.Context, req *fasthttp.RequestCtx, cidParam, oidParam string, isNativeList bool) {
|
type ListFunc func(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error)
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "handler.browseIndex")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
if !h.config.IndexPageEnabled() {
|
func (h *Handler) browseIndexMiddleware(fn ListFunc) MiddlewareFunc {
|
||||||
req.SetStatusCode(fasthttp.StatusNotFound)
|
return func(prm MiddlewareParam) bool {
|
||||||
return
|
ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.browseIndex")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(
|
||||||
|
zap.String("bucket", prm.BktInfo.Name),
|
||||||
|
zap.String("container", prm.BktInfo.CID.EncodeToString()),
|
||||||
|
zap.String("prefix", prm.Path),
|
||||||
|
))
|
||||||
|
|
||||||
|
objects, err := fn(ctx, prm.BktInfo, prm.Path)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, prm.Request, logs.FailedToListObjects, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
h.browseObjects(ctx, prm.Request, browseParams{
|
||||||
|
bucketInfo: prm.BktInfo,
|
||||||
|
prefix: prm.Path,
|
||||||
|
objects: objects,
|
||||||
|
})
|
||||||
|
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
unescapedKey, err := url.QueryUnescape(oidParam)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, req, logs.FailedToUnescapeOIDParam, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidParam)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
listFunc := h.getDirObjectsS3
|
|
||||||
if isNativeList {
|
|
||||||
// tree probe failed, trying to use native
|
|
||||||
listFunc = h.getDirObjectsNative
|
|
||||||
}
|
|
||||||
|
|
||||||
h.browseObjects(ctx, req, browseParams{
|
|
||||||
bucketInfo: bktInfo,
|
|
||||||
prefix: unescapedKey,
|
|
||||||
listObjects: listFunc,
|
|
||||||
isNative: isNativeList,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,9 +14,10 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
|
@ -26,6 +27,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/panjf2000/ants/v2"
|
"github.com/panjf2000/ants/v2"
|
||||||
|
@ -35,35 +37,11 @@ import (
|
||||||
"go.uber.org/zap/zaptest"
|
"go.uber.org/zap/zaptest"
|
||||||
)
|
)
|
||||||
|
|
||||||
type treeServiceMock struct {
|
|
||||||
system map[string]map[string]*data.BaseNodeVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTreeService() *treeServiceMock {
|
|
||||||
return &treeServiceMock{
|
|
||||||
system: make(map[string]map[string]*data.BaseNodeVersion),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *treeServiceMock) CheckSettingsNodeExists(context.Context, *data.BucketInfo) error {
|
|
||||||
_, ok := t.system["bucket-settings"]
|
|
||||||
if !ok {
|
|
||||||
return layer.ErrNodeNotFound
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *treeServiceMock) GetSubTreeByPrefix(context.Context, *data.BucketInfo, string, bool) ([]data.NodeInfo, string, error) {
|
|
||||||
return nil, "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *treeServiceMock) GetLatestVersion(context.Context, *cid.ID, string) (*data.NodeVersion, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type configMock struct {
|
type configMock struct {
|
||||||
additionalSearch bool
|
additionalFilenameSearch bool
|
||||||
cors *data.CORSRule
|
additionalSlashSearch bool
|
||||||
|
indexEnabled bool
|
||||||
|
cors *data.CORSRule
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) DefaultTimestamp() bool {
|
func (c *configMock) DefaultTimestamp() bool {
|
||||||
|
@ -75,11 +53,11 @@ func (c *configMock) ArchiveCompression() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) IndexPageEnabled() bool {
|
func (c *configMock) IndexPageEnabled() bool {
|
||||||
return false
|
return c.indexEnabled
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) IndexPageTemplate() string {
|
func (c *configMock) IndexPageTemplate() string {
|
||||||
return ""
|
return templates.DefaultIndexTemplate
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) IndexPageNativeTemplate() string {
|
func (c *configMock) IndexPageNativeTemplate() string {
|
||||||
|
@ -99,7 +77,11 @@ func (c *configMock) NamespaceHeader() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) EnableFilepathFallback() bool {
|
func (c *configMock) EnableFilepathFallback() bool {
|
||||||
return c.additionalSearch
|
return c.additionalFilenameSearch
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configMock) EnableFilepathSlashFallback() bool {
|
||||||
|
return c.additionalSlashSearch
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) FormContainerZone(string) string {
|
func (c *configMock) FormContainerZone(string) string {
|
||||||
|
@ -117,7 +99,7 @@ type handlerContext struct {
|
||||||
|
|
||||||
h *Handler
|
h *Handler
|
||||||
frostfs *TestFrostFS
|
frostfs *TestFrostFS
|
||||||
tree *treeServiceMock
|
tree *treeServiceClientMock
|
||||||
cfg *configMock
|
cfg *configMock
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,14 +149,14 @@ func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) {
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
treeMock := newTreeService()
|
treeMock := newTreeServiceClientMock()
|
||||||
cfgMock := &configMock{}
|
cfgMock := &configMock{}
|
||||||
|
|
||||||
workerPool, err := ants.NewPool(1)
|
workerPool, err := ants.NewPool(1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
handler := New(params, cfgMock, treeMock, workerPool)
|
handler := New(params, cfgMock, tree.NewTree(treeMock, logger), workerPool)
|
||||||
|
|
||||||
return &handlerContext{
|
return &handlerContext{
|
||||||
key: key,
|
key: key,
|
||||||
|
@ -254,6 +236,7 @@ func TestBasic(t *testing.T) {
|
||||||
err = json.Unmarshal(r.Response.Body(), &putRes)
|
err = json.Unmarshal(r.Response.Body(), &putRes)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
hc.cfg.additionalFilenameSearch = true
|
||||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
||||||
fileName := prepareObjectAttributes(object.AttributeFileName, objFileName)
|
fileName := prepareObjectAttributes(object.AttributeFileName, objFileName)
|
||||||
filePath := prepareObjectAttributes(object.AttributeFilePath, objFilePath)
|
filePath := prepareObjectAttributes(object.AttributeFilePath, objFilePath)
|
||||||
|
@ -264,6 +247,14 @@ func TestBasic(t *testing.T) {
|
||||||
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
||||||
hc.Handler().DownloadByAddressOrBucketName(r)
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
require.Equal(t, content, string(r.Response.Body()))
|
require.Equal(t, content, string(r.Response.Body()))
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFilePath)
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, content, string(r.Response.Body()))
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFileName)
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, content, string(r.Response.Body()))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("head", func(t *testing.T) {
|
t.Run("head", func(t *testing.T) {
|
||||||
|
@ -271,6 +262,16 @@ func TestBasic(t *testing.T) {
|
||||||
hc.Handler().HeadByAddressOrBucketName(r)
|
hc.Handler().HeadByAddressOrBucketName(r)
|
||||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
||||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFilePath)
|
||||||
|
hc.Handler().HeadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
||||||
|
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFileName)
|
||||||
|
hc.Handler().HeadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
||||||
|
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("get by attribute", func(t *testing.T) {
|
t.Run("get by attribute", func(t *testing.T) {
|
||||||
|
@ -280,11 +281,11 @@ func TestBasic(t *testing.T) {
|
||||||
|
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath)
|
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath)
|
||||||
hc.Handler().DownloadByAttribute(r)
|
hc.Handler().DownloadByAttribute(r)
|
||||||
require.Equal(t, content, string(r.Response.Body()))
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName)
|
r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName)
|
||||||
hc.Handler().DownloadByAttribute(r)
|
hc.Handler().DownloadByAttribute(r)
|
||||||
require.Equal(t, content, string(r.Response.Body()))
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("head by attribute", func(t *testing.T) {
|
t.Run("head by attribute", func(t *testing.T) {
|
||||||
|
@ -295,13 +296,11 @@ func TestBasic(t *testing.T) {
|
||||||
|
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath)
|
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath)
|
||||||
hc.Handler().HeadByAttribute(r)
|
hc.Handler().HeadByAttribute(r)
|
||||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
|
||||||
|
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName)
|
r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName)
|
||||||
hc.Handler().HeadByAttribute(r)
|
hc.Handler().HeadByAttribute(r)
|
||||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("zip", func(t *testing.T) {
|
t.Run("zip", func(t *testing.T) {
|
||||||
|
@ -325,185 +324,281 @@ func TestBasic(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFindObjectByAttribute(t *testing.T) {
|
func prepareHandlerAndBucket(t *testing.T) (*handlerContext, cid.ID) {
|
||||||
hc := prepareHandlerContext(t)
|
hc := prepareHandlerContext(t)
|
||||||
hc.cfg.additionalSearch = true
|
|
||||||
|
|
||||||
bktName := "bucket"
|
bktName := "bucket"
|
||||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
|
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
hc.frostfs.SetContainer(cnrID, cnr)
|
hc.frostfs.SetContainer(cnrID, cnr)
|
||||||
|
|
||||||
ctx := context.Background()
|
return hc, cnrID
|
||||||
ctx = middleware.SetNamespace(ctx, "")
|
|
||||||
|
|
||||||
content := "hello"
|
|
||||||
r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
hc.Handler().Upload(r)
|
|
||||||
require.Equal(t, r.Response.StatusCode(), http.StatusOK)
|
|
||||||
|
|
||||||
var putRes putResponse
|
|
||||||
err = json.Unmarshal(r.Response.Body(), &putRes)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
testAttrVal1 := "/folder/cat.jpg"
|
|
||||||
testAttrVal2 := "cat.jpg"
|
|
||||||
testAttrVal3 := "test-attr-val3"
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
firstAttr object.Attribute
|
|
||||||
secondAttr object.Attribute
|
|
||||||
reqAttrKey string
|
|
||||||
reqAttrValue string
|
|
||||||
err string
|
|
||||||
additionalSearch bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "success search by FileName",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFileName,
|
|
||||||
reqAttrValue: testAttrVal2,
|
|
||||||
additionalSearch: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "failed search by FileName",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFileName,
|
|
||||||
reqAttrValue: testAttrVal3,
|
|
||||||
err: "not found",
|
|
||||||
additionalSearch: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "success search by FilePath (with additional search)",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFilePath,
|
|
||||||
reqAttrValue: testAttrVal2,
|
|
||||||
additionalSearch: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "failed by FilePath (with additional search)",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFilePath,
|
|
||||||
reqAttrValue: testAttrVal3,
|
|
||||||
err: "not found",
|
|
||||||
additionalSearch: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "success search by FilePath with leading slash (with additional search)",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFilePath,
|
|
||||||
reqAttrValue: "/cat.jpg",
|
|
||||||
additionalSearch: true,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
|
||||||
obj.SetAttributes(tc.firstAttr, tc.secondAttr)
|
|
||||||
hc.cfg.additionalSearch = tc.additionalSearch
|
|
||||||
|
|
||||||
objID, err := hc.Handler().findObjectByAttribute(ctx, cnrID, tc.reqAttrKey, tc.reqAttrValue)
|
|
||||||
if tc.err != "" {
|
|
||||||
require.Error(t, err)
|
|
||||||
require.Contains(t, err.Error(), tc.err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, putRes.ObjectID, objID.EncodeToString())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNeedSearchByFileName(t *testing.T) {
|
func TestGetObjectWithFallback(t *testing.T) {
|
||||||
hc := prepareHandlerContext(t)
|
ctx := middleware.SetNamespace(context.Background(), "")
|
||||||
|
|
||||||
for _, tc := range []struct {
|
t.Run("by oid", func(t *testing.T) {
|
||||||
name string
|
hc, cnrID := prepareHandlerAndBucket(t)
|
||||||
attrKey string
|
|
||||||
attrVal string
|
|
||||||
additionalSearch bool
|
|
||||||
expected bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "need search - not contains slash",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "need search - single lead slash",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "/cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - single slash but not lead",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "cats/cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - more one slash",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "/cats/cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - incorrect attribute key",
|
|
||||||
attrKey: attrFileName,
|
|
||||||
attrVal: "cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - additional search disabled",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "cat.png",
|
|
||||||
additionalSearch: false,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
hc.cfg.additionalSearch = tc.additionalSearch
|
|
||||||
|
|
||||||
res := hc.h.needSearchByFileName(tc.attrKey, tc.attrVal)
|
obj1ID := oidtest.ID()
|
||||||
require.Equal(t, tc.expected, res)
|
obj1 := object.New()
|
||||||
})
|
obj1.SetID(obj1ID)
|
||||||
}
|
obj1.SetPayload([]byte("obj1"))
|
||||||
|
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
|
||||||
|
|
||||||
|
r := prepareGetRequest(ctx, cnrID.EncodeToString(), obj1ID.String())
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("by filepath as it is", func(t *testing.T) {
|
||||||
|
hc, cnrID := prepareHandlerAndBucket(t)
|
||||||
|
|
||||||
|
obj1ID := oidtest.ID()
|
||||||
|
obj1 := object.New()
|
||||||
|
obj1.SetID(obj1ID)
|
||||||
|
obj1.SetPayload([]byte("obj1"))
|
||||||
|
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/obj1"))
|
||||||
|
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
|
||||||
|
|
||||||
|
obj2ID := oidtest.ID()
|
||||||
|
obj2 := object.New()
|
||||||
|
obj2.SetID(obj2ID)
|
||||||
|
obj2.SetPayload([]byte("obj2"))
|
||||||
|
obj2.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "/filepath/obj2"))
|
||||||
|
hc.frostfs.objects[cnrID.String()+"/"+obj2ID.String()] = obj2
|
||||||
|
|
||||||
|
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/obj1")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj2")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, string(obj2.Payload()), string(r.Response.Body()))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("by filepath slash fallback", func(t *testing.T) {
|
||||||
|
hc, cnrID := prepareHandlerAndBucket(t)
|
||||||
|
|
||||||
|
obj1ID := oidtest.ID()
|
||||||
|
obj1 := object.New()
|
||||||
|
obj1.SetID(obj1ID)
|
||||||
|
obj1.SetPayload([]byte("obj1"))
|
||||||
|
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/obj1"))
|
||||||
|
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
|
||||||
|
|
||||||
|
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj1")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
|
hc.cfg.additionalSlashSearch = true
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj1")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("by filename fallback", func(t *testing.T) {
|
||||||
|
hc, cnrID := prepareHandlerAndBucket(t)
|
||||||
|
|
||||||
|
obj1ID := oidtest.ID()
|
||||||
|
obj1 := object.New()
|
||||||
|
obj1.SetID(obj1ID)
|
||||||
|
obj1.SetPayload([]byte("obj1"))
|
||||||
|
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/obj1"))
|
||||||
|
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
|
||||||
|
|
||||||
|
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/obj1")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
|
hc.cfg.additionalFilenameSearch = true
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/obj1")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("by filename and slash fallback", func(t *testing.T) {
|
||||||
|
hc, cnrID := prepareHandlerAndBucket(t)
|
||||||
|
|
||||||
|
obj1ID := oidtest.ID()
|
||||||
|
obj1 := object.New()
|
||||||
|
obj1.SetID(obj1ID)
|
||||||
|
obj1.SetPayload([]byte("obj1"))
|
||||||
|
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/obj1"))
|
||||||
|
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
|
||||||
|
|
||||||
|
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
|
hc.cfg.additionalFilenameSearch = true
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
|
hc.cfg.additionalSlashSearch = true
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("index fallback", func(t *testing.T) {
|
||||||
|
hc, cnrID := prepareHandlerAndBucket(t)
|
||||||
|
|
||||||
|
obj1ID := oidtest.ID()
|
||||||
|
obj1 := object.New()
|
||||||
|
obj1.SetID(obj1ID)
|
||||||
|
obj1.SetPayload([]byte("obj1"))
|
||||||
|
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/index.html"))
|
||||||
|
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
|
||||||
|
|
||||||
|
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
|
hc.cfg.indexEnabled = true
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("index filename fallback", func(t *testing.T) {
|
||||||
|
hc, cnrID := prepareHandlerAndBucket(t)
|
||||||
|
|
||||||
|
obj1ID := oidtest.ID()
|
||||||
|
obj1 := object.New()
|
||||||
|
obj1.SetID(obj1ID)
|
||||||
|
obj1.SetPayload([]byte("obj1"))
|
||||||
|
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/index.html"))
|
||||||
|
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
|
||||||
|
|
||||||
|
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
|
hc.cfg.indexEnabled = true
|
||||||
|
hc.cfg.additionalFilenameSearch = true
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, string(obj1.Payload()), string(r.Response.Body()))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPrepareFileName(t *testing.T) {
|
func TestIndex(t *testing.T) {
|
||||||
fileName := "/cat.jpg"
|
ctx := middleware.SetNamespace(context.Background(), "")
|
||||||
expected := "cat.jpg"
|
|
||||||
actual := prepareFileName(fileName)
|
|
||||||
require.Equal(t, expected, actual)
|
|
||||||
|
|
||||||
fileName = "cat.jpg"
|
t.Run("s3", func(t *testing.T) {
|
||||||
actual = prepareFileName(fileName)
|
hc, cnrID := prepareHandlerAndBucket(t)
|
||||||
require.Equal(t, expected, actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPrepareFilePath(t *testing.T) {
|
obj1ID := oidtest.ID()
|
||||||
filePath := "cat.jpg"
|
obj1 := object.New()
|
||||||
expected := "/cat.jpg"
|
obj1.SetID(obj1ID)
|
||||||
actual := prepareFilePath(filePath)
|
obj1.SetPayload([]byte("obj1"))
|
||||||
require.Equal(t, expected, actual)
|
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "prefix/obj1"))
|
||||||
|
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
|
||||||
|
|
||||||
filePath = "/cat.jpg"
|
hc.tree.containers[cnrID.String()] = containerInfo{
|
||||||
actual = prepareFilePath(filePath)
|
trees: map[string]map[string]nodeResponse{
|
||||||
require.Equal(t, expected, actual)
|
"system": {"bucket-settings": nodeResponse{nodeID: 1}},
|
||||||
|
"version": {
|
||||||
|
"": nodeResponse{}, //root
|
||||||
|
"prefix": nodeResponse{
|
||||||
|
nodeID: 1,
|
||||||
|
meta: []nodeMeta{{key: tree.FileNameKey, value: []byte("prefix")}}},
|
||||||
|
"obj1": nodeResponse{
|
||||||
|
parentID: 1,
|
||||||
|
nodeID: 2,
|
||||||
|
meta: []nodeMeta{
|
||||||
|
{key: tree.FileNameKey, value: []byte("obj1")},
|
||||||
|
{key: "OID", value: []byte(obj1ID.String())},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
|
hc.cfg.indexEnabled = true
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/prefix")
|
||||||
|
require.Contains(t, string(r.Response.Body()), obj1ID.String())
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/prefix")
|
||||||
|
require.Contains(t, string(r.Response.Body()), obj1ID.String())
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, "bucket", "dummy")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/dummy")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("native", func(t *testing.T) {
|
||||||
|
hc, cnrID := prepareHandlerAndBucket(t)
|
||||||
|
|
||||||
|
obj1ID := oidtest.ID()
|
||||||
|
obj1 := object.New()
|
||||||
|
obj1.SetID(obj1ID)
|
||||||
|
obj1.SetPayload([]byte("obj1"))
|
||||||
|
obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "prefix/obj1"))
|
||||||
|
hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1
|
||||||
|
|
||||||
|
r := prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode())
|
||||||
|
|
||||||
|
hc.cfg.indexEnabled = true
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/prefix")
|
||||||
|
require.Contains(t, string(r.Response.Body()), obj1ID.String())
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/prefix")
|
||||||
|
require.Contains(t, string(r.Response.Body()), obj1ID.String())
|
||||||
|
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), "dummy")
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/dummy")
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
|
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
|
||||||
|
|
|
@ -5,11 +5,12 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
@ -128,6 +129,12 @@ func (h *Handler) HeadByAddressOrBucketName(req *fasthttp.RequestCtx) {
|
||||||
zap.String("oid", oidParam),
|
zap.String("oid", oidParam),
|
||||||
))
|
))
|
||||||
|
|
||||||
|
path, err := url.QueryUnescape(oidParam)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, req, logs.FailedToUnescapePath, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidParam)
|
bktInfo, err := h.getBucketInfo(ctx, cidParam)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
|
h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err)
|
||||||
|
@ -135,18 +142,38 @@ func (h *Handler) HeadByAddressOrBucketName(req *fasthttp.RequestCtx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
||||||
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
|
if checkS3Err != nil && !errors.Is(checkS3Err, tree.ErrNodeNotFound) {
|
||||||
h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err)
|
h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var objID oid.ID
|
prm := MiddlewareParam{
|
||||||
|
Context: ctx,
|
||||||
|
Request: req,
|
||||||
|
BktInfo: bktInfo,
|
||||||
|
Path: path,
|
||||||
|
}
|
||||||
|
|
||||||
|
indexPageEnabled := h.config.IndexPageEnabled()
|
||||||
|
|
||||||
if checkS3Err == nil {
|
if checkS3Err == nil {
|
||||||
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.headObject)
|
run(prm, h.errorMiddleware(logs.ObjectNotFound, tree.ErrNodeNotFound),
|
||||||
} else if err = objID.DecodeString(oidParam); err == nil {
|
Middleware{Func: h.byS3PathMiddleware(h.headObject, noopFormer), Enabled: true},
|
||||||
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.headObject)
|
Middleware{Func: h.byS3PathMiddleware(h.headObject, indexFormer), Enabled: indexPageEnabled},
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
h.logAndSendError(ctx, req, logs.InvalidOIDParam, err)
|
slashFallbackEnabled := h.config.EnableFilepathSlashFallback()
|
||||||
|
fileNameFallbackEnabled := h.config.EnableFilepathFallback()
|
||||||
|
|
||||||
|
run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound),
|
||||||
|
Middleware{Func: h.byAddressMiddleware(h.headObject), Enabled: true},
|
||||||
|
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, noopFormer), Enabled: true},
|
||||||
|
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, reverseLeadingSlash), Enabled: slashFallbackEnabled},
|
||||||
|
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, noopFormer), Enabled: fileNameFallbackEnabled},
|
||||||
|
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, reverseLeadingSlash), Enabled: fileNameFallbackEnabled && slashFallbackEnabled},
|
||||||
|
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, indexFormer), Enabled: indexPageEnabled},
|
||||||
|
Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, indexFormer), Enabled: fileNameFallbackEnabled && indexPageEnabled},
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,3 +184,10 @@ func (h *Handler) HeadByAttribute(req *fasthttp.RequestCtx) {
|
||||||
|
|
||||||
h.byAttribute(ctx, req, h.headObject)
|
h.byAttribute(ctx, req, h.headObject)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *Handler) errorMiddleware(msg string, err error) MiddlewareFunc {
|
||||||
|
return func(prm MiddlewareParam) bool {
|
||||||
|
h.logAndSendError(prm.Context, prm.Request, msg, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
141
internal/handler/tree_service_client_mock_test.go
Normal file
141
internal/handler/tree_service_client_mock_test.go
Normal file
|
@ -0,0 +1,141 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
|
)
|
||||||
|
|
||||||
|
type nodeMeta struct {
|
||||||
|
key string
|
||||||
|
value []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m nodeMeta) GetKey() string {
|
||||||
|
return m.key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m nodeMeta) GetValue() []byte {
|
||||||
|
return m.value
|
||||||
|
}
|
||||||
|
|
||||||
|
type nodeResponse struct {
|
||||||
|
meta []nodeMeta
|
||||||
|
nodeID uint64
|
||||||
|
parentID uint64
|
||||||
|
timestamp uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nodeResponse) GetNodeID() []uint64 {
|
||||||
|
return []uint64{n.nodeID}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nodeResponse) GetParentID() []uint64 {
|
||||||
|
return []uint64{n.parentID}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nodeResponse) GetTimestamp() []uint64 {
|
||||||
|
return []uint64{n.timestamp}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nodeResponse) GetMeta() []tree.Meta {
|
||||||
|
res := make([]tree.Meta, len(n.meta))
|
||||||
|
for i, value := range n.meta {
|
||||||
|
res[i] = value
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
type containerInfo struct {
|
||||||
|
trees map[string]map[string]nodeResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
type treeServiceClientMock struct {
|
||||||
|
containers map[string]containerInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTreeServiceClientMock() *treeServiceClientMock {
|
||||||
|
return &treeServiceClientMock{
|
||||||
|
containers: make(map[string]containerInfo),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *treeServiceClientMock) GetNodes(_ context.Context, p *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||||
|
cnr, ok := t.containers[p.CnrID.EncodeToString()]
|
||||||
|
if !ok {
|
||||||
|
return nil, tree.ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
tr, ok := cnr.trees[p.TreeID]
|
||||||
|
if !ok {
|
||||||
|
return nil, tree.ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
node, ok := tr[strings.Join(p.Path, "/")]
|
||||||
|
if !ok {
|
||||||
|
return nil, tree.ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return []tree.NodeResponse{node}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *treeServiceClientMock) GetSubTree(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, _ bool) ([]tree.NodeResponse, error) {
|
||||||
|
cnr, ok := t.containers[bktInfo.CID.EncodeToString()]
|
||||||
|
if !ok {
|
||||||
|
return nil, tree.ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
tr, ok := cnr.trees[treeID]
|
||||||
|
if !ok {
|
||||||
|
return nil, tree.ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(rootID) != 1 {
|
||||||
|
return nil, errors.New("invalid rootID")
|
||||||
|
}
|
||||||
|
|
||||||
|
var root *nodeResponse
|
||||||
|
for _, v := range tr {
|
||||||
|
if v.nodeID == rootID[0] {
|
||||||
|
root = &v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if root == nil {
|
||||||
|
return nil, tree.ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
var res []nodeResponse
|
||||||
|
if depth == 0 {
|
||||||
|
for _, v := range tr {
|
||||||
|
res = append(res, v)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
res = append(res, *root)
|
||||||
|
depthIndex := 0
|
||||||
|
for i := uint32(0); i < depth-1; i++ {
|
||||||
|
childrenCount := 0
|
||||||
|
for _, v := range tr {
|
||||||
|
for j := range res[depthIndex:] {
|
||||||
|
if v.parentID == res[j].nodeID {
|
||||||
|
res = append(res, v)
|
||||||
|
childrenCount++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
depthIndex = len(res) - childrenCount
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res2 := make([]tree.NodeResponse, len(res))
|
||||||
|
for i := range res {
|
||||||
|
res2[i] = res[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
return res2, nil
|
||||||
|
}
|
|
@ -6,9 +6,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
@ -93,7 +93,7 @@ func formErrorResponse(err error) (string, int) {
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, ErrAccessDenied):
|
case errors.Is(err, ErrAccessDenied):
|
||||||
return fmt.Sprintf("Storage Access Denied:\n%v", err), fasthttp.StatusForbidden
|
return fmt.Sprintf("Storage Access Denied:\n%v", err), fasthttp.StatusForbidden
|
||||||
case errors.Is(err, layer.ErrNodeAccessDenied):
|
case errors.Is(err, tree.ErrNodeAccessDenied):
|
||||||
return fmt.Sprintf("Tree Access Denied:\n%v", err), fasthttp.StatusForbidden
|
return fmt.Sprintf("Tree Access Denied:\n%v", err), fasthttp.StatusForbidden
|
||||||
case errors.Is(err, ErrQuotaLimitReached):
|
case errors.Is(err, ErrQuotaLimitReached):
|
||||||
return fmt.Sprintf("Quota Reached:\n%v", err), fasthttp.StatusConflict
|
return fmt.Sprintf("Quota Reached:\n%v", err), fasthttp.StatusConflict
|
||||||
|
@ -101,7 +101,7 @@ func formErrorResponse(err error) (string, int) {
|
||||||
return fmt.Sprintf("Container Not Found:\n%v", err), fasthttp.StatusNotFound
|
return fmt.Sprintf("Container Not Found:\n%v", err), fasthttp.StatusNotFound
|
||||||
case errors.Is(err, ErrObjectNotFound):
|
case errors.Is(err, ErrObjectNotFound):
|
||||||
return fmt.Sprintf("Object Not Found:\n%v", err), fasthttp.StatusNotFound
|
return fmt.Sprintf("Object Not Found:\n%v", err), fasthttp.StatusNotFound
|
||||||
case errors.Is(err, layer.ErrNodeNotFound):
|
case errors.Is(err, tree.ErrNodeNotFound):
|
||||||
return fmt.Sprintf("Tree Node Not Found:\n%v", err), fasthttp.StatusNotFound
|
return fmt.Sprintf("Tree Node Not Found:\n%v", err), fasthttp.StatusNotFound
|
||||||
case errors.Is(err, ErrGatewayTimeout):
|
case errors.Is(err, ErrGatewayTimeout):
|
||||||
return fmt.Sprintf("Gateway Timeout:\n%v", err), fasthttp.StatusGatewayTimeout
|
return fmt.Sprintf("Gateway Timeout:\n%v", err), fasthttp.StatusGatewayTimeout
|
||||||
|
|
|
@ -1,24 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TreeService provide interface to interact with tree service using s3 data models.
|
|
||||||
type TreeService interface {
|
|
||||||
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error)
|
|
||||||
GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error)
|
|
||||||
CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNodeNotFound is returned from Tree service in case of not found error.
|
|
||||||
ErrNodeNotFound = errors.New("not found")
|
|
||||||
|
|
||||||
// ErrNodeAccessDenied is returned from Tree service in case of access denied error.
|
|
||||||
ErrNodeAccessDenied = errors.New("access denied")
|
|
||||||
)
|
|
|
@ -108,7 +108,9 @@ const (
|
||||||
FailedToGetBucketInfo = "could not get bucket info"
|
FailedToGetBucketInfo = "could not get bucket info"
|
||||||
FailedToSubmitTaskToPool = "failed to submit task to pool"
|
FailedToSubmitTaskToPool = "failed to submit task to pool"
|
||||||
ObjectWasDeleted = "object was deleted"
|
ObjectWasDeleted = "object was deleted"
|
||||||
|
IndexWasDeleted = "index was deleted"
|
||||||
FailedToGetLatestVersionOfObject = "failed to get latest version of object"
|
FailedToGetLatestVersionOfObject = "failed to get latest version of object"
|
||||||
|
FailedToGetLatestVersionOfIndexObject = "failed to get latest version of index object"
|
||||||
FailedToCheckIfSettingsNodeExist = "failed to check if settings node exists"
|
FailedToCheckIfSettingsNodeExist = "failed to check if settings node exists"
|
||||||
FailedToListObjects = "failed to list objects"
|
FailedToListObjects = "failed to list objects"
|
||||||
FailedToParseTemplate = "failed to parse template"
|
FailedToParseTemplate = "failed to parse template"
|
||||||
|
@ -118,7 +120,7 @@ const (
|
||||||
FailedToGetObject = "failed to get object"
|
FailedToGetObject = "failed to get object"
|
||||||
FailedToGetObjectPayload = "failed to get object payload"
|
FailedToGetObjectPayload = "failed to get object payload"
|
||||||
FailedToFindObjectByAttribute = "failed to get find object by attribute"
|
FailedToFindObjectByAttribute = "failed to get find object by attribute"
|
||||||
FailedToUnescapeOIDParam = "failed to unescape oid param"
|
FailedToUnescapePath = "failed to unescape path"
|
||||||
InvalidOIDParam = "invalid oid param"
|
InvalidOIDParam = "invalid oid param"
|
||||||
CouldNotGetCORSConfiguration = "could not get cors configuration"
|
CouldNotGetCORSConfiguration = "could not get cors configuration"
|
||||||
EmptyOriginRequestHeader = "empty Origin request header"
|
EmptyOriginRequestHeader = "empty Origin request header"
|
||||||
|
@ -129,10 +131,9 @@ const (
|
||||||
|
|
||||||
// Log messages with the "external_storage" tag.
|
// Log messages with the "external_storage" tag.
|
||||||
const (
|
const (
|
||||||
ObjectNotFound = "object not found"
|
ObjectNotFound = "object not found"
|
||||||
ReadObjectListFailed = "read object list failed"
|
ReadObjectListFailed = "read object list failed"
|
||||||
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
|
ObjectUploaded = "object uploaded"
|
||||||
ObjectUploaded = "object uploaded"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Log messages with the "external_storage_tree" tag.
|
// Log messages with the "external_storage_tree" tag.
|
||||||
|
|
|
@ -1,11 +1,9 @@
|
||||||
{{$container := .Container}}
|
{{$container := .Container}}
|
||||||
{{ $prefix := trimPrefix .Prefix }}
|
|
||||||
<!DOCTYPE html>
|
<!DOCTYPE html>
|
||||||
<html lang="en">
|
<html lang="en">
|
||||||
<head>
|
<head>
|
||||||
<meta charset="UTF-8"/>
|
<meta charset="UTF-8"/>
|
||||||
<title>Index of {{.Protocol}}://{{$container}}
|
<title>Index of {{.Protocol}}://{{$container}}/{{.Prefix}}</title>
|
||||||
/{{if $prefix}}/{{$prefix}}/{{end}}</title>
|
|
||||||
<style>
|
<style>
|
||||||
.alert {
|
.alert {
|
||||||
width: 80%;
|
width: 80%;
|
||||||
|
@ -40,7 +38,7 @@
|
||||||
</style>
|
</style>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<h1>Index of {{.Protocol}}://{{$container}}/{{if $prefix}}{{$prefix}}/{{end}}</h1>
|
<h1>Index of {{.Protocol}}://{{$container}}/{{.Prefix}}</h1>
|
||||||
{{ if .HasErrors }}
|
{{ if .HasErrors }}
|
||||||
<div class="alert">
|
<div class="alert">
|
||||||
Errors occurred while processing the request. Perhaps some objects are missing
|
Errors occurred while processing the request. Perhaps some objects are missing
|
||||||
|
@ -57,11 +55,11 @@
|
||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
<tbody>
|
<tbody>
|
||||||
{{ $trimmedPrefix := trimPrefix $prefix }}
|
{{ $parentPrefix := getParent .Prefix }}
|
||||||
{{if $trimmedPrefix }}
|
{{if $parentPrefix }}
|
||||||
<tr>
|
<tr>
|
||||||
<td>
|
<td>
|
||||||
⮐<a href="/get/{{$container}}{{ urlencode $trimmedPrefix }}/">..</a>
|
⮐<a href="/get/{{$container}}{{ urlencode $parentPrefix }}/">..</a>
|
||||||
</td>
|
</td>
|
||||||
<td></td>
|
<td></td>
|
||||||
<td></td>
|
<td></td>
|
||||||
|
|
46
tree/tree.go
46
tree/tree.go
|
@ -7,7 +7,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||||
|
@ -52,10 +51,10 @@ type (
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrNodeNotFound is returned from ServiceClient in case of not found error.
|
// ErrNodeNotFound is returned from ServiceClient in case of not found error.
|
||||||
ErrNodeNotFound = layer.ErrNodeNotFound
|
ErrNodeNotFound = errors.New("not found")
|
||||||
|
|
||||||
// ErrNodeAccessDenied is returned from ServiceClient service in case of access denied error.
|
// ErrNodeAccessDenied is returned from ServiceClient service in case of access denied error.
|
||||||
ErrNodeAccessDenied = layer.ErrNodeAccessDenied
|
ErrNodeAccessDenied = errors.New("access denied")
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -259,7 +258,7 @@ func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name
|
||||||
nodes = filterMultipartNodes(nodes)
|
nodes = filterMultipartNodes(nodes)
|
||||||
|
|
||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
return nil, layer.ErrNodeNotFound
|
return nil, ErrNodeNotFound
|
||||||
}
|
}
|
||||||
if len(nodes) != 1 {
|
if len(nodes) != 1 {
|
||||||
c.reqLogger(ctx).Warn(logs.FoundSeveralSystemTreeNodes, zap.String("name", name), logs.TagField(logs.TagExternalStorageTree))
|
c.reqLogger(ctx).Warn(logs.FoundSeveralSystemTreeNodes, zap.String("name", name), logs.TagField(logs.TagExternalStorageTree))
|
||||||
|
@ -303,7 +302,7 @@ func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if targetIndexNode == -1 {
|
if targetIndexNode == -1 {
|
||||||
return nil, fmt.Errorf("latest version: %w", layer.ErrNodeNotFound)
|
return nil, fmt.Errorf("latest version: %w", ErrNodeNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nodes[targetIndexNode], nil
|
return nodes[targetIndexNode], nil
|
||||||
|
@ -324,20 +323,23 @@ func pathFromName(objectName string) []string {
|
||||||
return strings.Split(objectName, separator)
|
return strings.Split(objectName, separator)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error) {
|
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetSubTreeByPrefix")
|
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetSubTreeByPrefix")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix)
|
rootID, err := c.getPrefixNodeID(ctx, bktInfo, versionTree, strings.Split(prefix, separator))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
if errors.Is(err, ErrNodeNotFound) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false)
|
subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrNodeNotFound) {
|
if errors.Is(err, ErrNodeNotFound) {
|
||||||
return nil, "", nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return nil, "", err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nodesMap := make(map[string][]NodeResponse, len(subTree))
|
nodesMap := make(map[string][]NodeResponse, len(subTree))
|
||||||
|
@ -347,10 +349,6 @@ func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
fileName := GetFilename(node)
|
fileName := GetFilename(node)
|
||||||
if !strings.HasPrefix(fileName, tailPrefix) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
nodes := nodesMap[fileName]
|
nodes := nodesMap[fileName]
|
||||||
|
|
||||||
// Add all nodes if flag latestOnly is false.
|
// Add all nodes if flag latestOnly is false.
|
||||||
|
@ -374,7 +372,7 @@ func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo,
|
||||||
result = append(result, nodeResponseToNodeInfo(nodes)...)
|
result = append(result, nodeResponseToNodeInfo(nodes)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, strings.TrimSuffix(prefix, tailPrefix), nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo {
|
func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo {
|
||||||
|
@ -386,22 +384,6 @@ func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo {
|
||||||
return nodesInfo
|
return nodesInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) {
|
|
||||||
rootID := []uint64{0}
|
|
||||||
path := strings.Split(prefix, separator)
|
|
||||||
tailPrefix := path[len(path)-1]
|
|
||||||
|
|
||||||
if len(path) > 1 {
|
|
||||||
var err error
|
|
||||||
rootID, err = c.getPrefixNodeID(ctx, bktInfo, treeID, path[:len(path)-1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rootID, tailPrefix, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefixPath []string) ([]uint64, error) {
|
func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefixPath []string) ([]uint64, error) {
|
||||||
p := &GetNodesParams{
|
p := &GetNodesParams{
|
||||||
CnrID: bktInfo.CID,
|
CnrID: bktInfo.CID,
|
||||||
|
@ -424,7 +406,7 @@ func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, tr
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(intermediateNodes) == 0 {
|
if len(intermediateNodes) == 0 {
|
||||||
return nil, layer.ErrNodeNotFound
|
return nil, ErrNodeNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return intermediateNodes, nil
|
return intermediateNodes, nil
|
||||||
|
|
Loading…
Add table
Reference in a new issue