[#137] Add index page support
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
This commit is contained in:
parent
843708a558
commit
242bf356d3
15 changed files with 593 additions and 86 deletions
|
@ -90,6 +90,8 @@ type (
|
||||||
defaultTimestamp bool
|
defaultTimestamp bool
|
||||||
zipCompression bool
|
zipCompression bool
|
||||||
clientCut bool
|
clientCut bool
|
||||||
|
returnIndexPage bool
|
||||||
|
indexPageTemplatePath string
|
||||||
bufferMaxSizeForPut uint64
|
bufferMaxSizeForPut uint64
|
||||||
namespaceHeader string
|
namespaceHeader string
|
||||||
defaultNamespaces []string
|
defaultNamespaces []string
|
||||||
|
@ -176,12 +178,36 @@ func (s *appSettings) ZipCompression() bool {
|
||||||
return s.zipCompression
|
return s.zipCompression
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) IndexPageEnabled() bool {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.returnIndexPage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) IndexPageTemplatePath() string {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.indexPageTemplatePath
|
||||||
|
}
|
||||||
|
|
||||||
func (s *appSettings) setZipCompression(val bool) {
|
func (s *appSettings) setZipCompression(val bool) {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
s.zipCompression = val
|
s.zipCompression = val
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setReturnIndexPage(val bool) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.returnIndexPage = val
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setIndexTemplatePath(val string) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.indexPageTemplatePath = val
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func (s *appSettings) ClientCut() bool {
|
func (s *appSettings) ClientCut() bool {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
|
@ -498,6 +524,8 @@ func (a *app) configReload(ctx context.Context) {
|
||||||
func (a *app) updateSettings() {
|
func (a *app) updateSettings() {
|
||||||
a.settings.setDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
|
a.settings.setDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
|
||||||
a.settings.setZipCompression(a.cfg.GetBool(cfgZipCompression))
|
a.settings.setZipCompression(a.cfg.GetBool(cfgZipCompression))
|
||||||
|
a.settings.setReturnIndexPage(a.cfg.GetBool(cfgIndexPageEnabled))
|
||||||
|
a.settings.setIndexTemplatePath(a.cfg.GetString(cfgIndexPageTemplatePath))
|
||||||
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
|
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
|
||||||
a.settings.setBufferMaxSizeForPut(a.cfg.GetUint64(cfgBufferMaxSizeForPut))
|
a.settings.setBufferMaxSizeForPut(a.cfg.GetUint64(cfgBufferMaxSizeForPut))
|
||||||
a.settings.setNamespaceHeader(a.cfg.GetString(cfgResolveNamespaceHeader))
|
a.settings.setNamespaceHeader(a.cfg.GetString(cfgResolveNamespaceHeader))
|
||||||
|
|
|
@ -60,6 +60,9 @@ const (
|
||||||
|
|
||||||
cfgReconnectInterval = "reconnect_interval"
|
cfgReconnectInterval = "reconnect_interval"
|
||||||
|
|
||||||
|
cfgIndexPageEnabled = "index_page.enabled"
|
||||||
|
cfgIndexPageTemplatePath = "index_page.template_path"
|
||||||
|
|
||||||
// Web.
|
// Web.
|
||||||
cfgWebReadBufferSize = "web.read_buffer_size"
|
cfgWebReadBufferSize = "web.read_buffer_size"
|
||||||
cfgWebWriteBufferSize = "web.write_buffer_size"
|
cfgWebWriteBufferSize = "web.write_buffer_size"
|
||||||
|
@ -191,6 +194,9 @@ func settings() *viper.Viper {
|
||||||
// pool:
|
// pool:
|
||||||
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
||||||
|
|
||||||
|
v.SetDefault(cfgIndexPageEnabled, false)
|
||||||
|
v.SetDefault(cfgIndexPageTemplatePath, "")
|
||||||
|
|
||||||
// frostfs:
|
// frostfs:
|
||||||
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
|
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
|
||||||
|
|
||||||
|
|
|
@ -101,6 +101,11 @@ request_timeout: 5s # Timeout to check node health during rebalance.
|
||||||
rebalance_timer: 30s # Interval to check nodes health.
|
rebalance_timer: 30s # Interval to check nodes health.
|
||||||
pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.
|
pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.
|
||||||
|
|
||||||
|
# Enable index page to see objects list for specified container and prefix
|
||||||
|
index_page:
|
||||||
|
enabled: false
|
||||||
|
template_path: /templates/index.gotmpl
|
||||||
|
|
||||||
zip:
|
zip:
|
||||||
compression: false # Enable zip compression to download files by common prefix.
|
compression: false # Enable zip compression to download files by common prefix.
|
||||||
|
|
||||||
|
|
13
docs/api.md
13
docs/api.md
|
@ -95,12 +95,12 @@ The `filename` field from the multipart form will be set as `FileName` attribute
|
||||||
|
|
||||||
## Get object
|
## Get object
|
||||||
|
|
||||||
Route: `/get/{cid}/{oid}?[download=true]`
|
Route: `/get/{cid}/{oid}?[download=false]`
|
||||||
|
|
||||||
| Route parameter | Type | Description |
|
| Route parameter | Type | Description |
|
||||||
|-----------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|-----------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
|
| `cid` | Single | Base58 encoded `container ID` or `container name` from NNS or `bucket name`. |
|
||||||
| `oid` | Single | Base58 encoded object ID. |
|
| `oid` | Single | Base58 encoded `object ID`. Also could be `S3 object name` if `cid` is specified as bucket name. |
|
||||||
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response.<br/> This make the browser to download object as file instead of showing it on the page. |
|
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response.<br/> This make the browser to download object as file instead of showing it on the page. |
|
||||||
|
|
||||||
### Methods
|
### Methods
|
||||||
|
@ -141,6 +141,13 @@ Get an object (payload and attributes) by an address.
|
||||||
| 400 | Some error occurred during object downloading. |
|
| 400 | Some error occurred during object downloading. |
|
||||||
| 404 | Container or object not found. |
|
| 404 | Container or object not found. |
|
||||||
|
|
||||||
|
###### Body
|
||||||
|
|
||||||
|
Returns object data. If request performed from browser, either displays raw data or downloads it as
|
||||||
|
attachment if `download` query parameter is set to `true`.
|
||||||
|
If `index_page.enabled` is set to `true`, returns HTML with index-page if no object with specified
|
||||||
|
S3-name was found.
|
||||||
|
|
||||||
#### HEAD
|
#### HEAD
|
||||||
|
|
||||||
Get an object attributes by an address.
|
Get an object attributes by an address.
|
||||||
|
|
|
@ -57,6 +57,7 @@ $ cat http.log
|
||||||
| `frostfs` | [Frostfs configuration](#frostfs-section) |
|
| `frostfs` | [Frostfs configuration](#frostfs-section) |
|
||||||
| `cache` | [Cache configuration](#cache-section) |
|
| `cache` | [Cache configuration](#cache-section) |
|
||||||
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
|
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
|
||||||
|
| `index_page` | [Index page configuration](#index_page-section) |
|
||||||
|
|
||||||
|
|
||||||
# General section
|
# General section
|
||||||
|
@ -76,7 +77,7 @@ reconnect_interval: 1m
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|------------------------|------------|---------------|----------------|------------------------------------------------------------------------------------|
|
|------------------------|------------|---------------|---------------|-------------------------------------------------------------------------------------------------|
|
||||||
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
|
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
|
||||||
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
|
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
|
||||||
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
|
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
|
||||||
|
@ -336,3 +337,18 @@ resolve_bucket:
|
||||||
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
|
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
|
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
|
||||||
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
|
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
|
||||||
|
|
||||||
|
# `index_page` section
|
||||||
|
|
||||||
|
Parameters for index HTML-page output with S3-bucket or S3-subdir content for `Get object` request
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
index_page:
|
||||||
|
enabled: false
|
||||||
|
template_path: ""
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|
|-----------------|----------|---------------|---------------|---------------------------------------------------------------------------------|
|
||||||
|
| `enabled` | `bool` | yes | `false` | Flag to enable index_page return if no object with specified S3-name was found. |
|
||||||
|
| `template_path` | `string` | yes | `""` | Path to .gotmpl file with html template for index_page. |
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
type NodeVersion struct {
|
type NodeVersion struct {
|
||||||
BaseNodeVersion
|
BaseNodeVersion
|
||||||
DeleteMarker bool
|
DeleteMarker bool
|
||||||
|
IsPrefixNode bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// BaseNodeVersion is minimal node info from tree service.
|
// BaseNodeVersion is minimal node info from tree service.
|
||||||
|
|
|
@ -4,7 +4,9 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
|
@ -15,16 +17,16 @@ type GetNodeByPathResponseInfoWrapper struct {
|
||||||
response *grpcService.GetNodeByPathResponse_Info
|
response *grpcService.GetNodeByPathResponse_Info
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() uint64 {
|
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 {
|
||||||
return n.response.GetNodeId()
|
return []uint64{n.response.GetNodeId()}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetParentID() uint64 {
|
func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 {
|
||||||
return n.response.GetParentId()
|
return []uint64{n.response.GetParentId()}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() uint64 {
|
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
|
||||||
return n.response.GetTimestamp()
|
return []uint64{n.response.GetTimestamp()}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
||||||
|
@ -89,3 +91,73 @@ func handleError(err error) error {
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
|
||||||
|
order := treepool.NoneOrder
|
||||||
|
if sort {
|
||||||
|
order = treepool.AscendingOrder
|
||||||
|
}
|
||||||
|
poolPrm := treepool.GetSubTreeParams{
|
||||||
|
CID: bktInfo.CID,
|
||||||
|
TreeID: treeID,
|
||||||
|
RootID: rootID,
|
||||||
|
Depth: depth,
|
||||||
|
BearerToken: getBearer(ctx),
|
||||||
|
Order: order,
|
||||||
|
}
|
||||||
|
if len(rootID) == 1 && rootID[0] == 0 {
|
||||||
|
// storage node interprets 'nil' value as []uint64{0}
|
||||||
|
// gate wants to send 'nil' value instead of []uint64{0}, because
|
||||||
|
// it provides compatibility with previous tree service api where
|
||||||
|
// single uint64(0) value is dropped from signature
|
||||||
|
poolPrm.RootID = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var subtree []tree.NodeResponse
|
||||||
|
|
||||||
|
node, err := subTreeReader.Next()
|
||||||
|
for err == nil {
|
||||||
|
subtree = append(subtree, GetSubTreeResponseBodyWrapper{node})
|
||||||
|
node, err = subTreeReader.Next()
|
||||||
|
}
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, handleError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return subtree, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetSubTreeResponseBodyWrapper struct {
|
||||||
|
response *grpcService.GetSubTreeResponse_Body
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 {
|
||||||
|
return n.response.GetNodeId()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 {
|
||||||
|
resp := n.response.GetParentId()
|
||||||
|
if resp == nil {
|
||||||
|
// storage sends nil that should be interpreted as []uint64{0}
|
||||||
|
// due to protobuf compatibility, see 'GetSubTree' function
|
||||||
|
return []uint64{0}
|
||||||
|
}
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() []uint64 {
|
||||||
|
return n.response.GetTimestamp()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
|
||||||
|
res := make([]tree.Meta, len(n.response.Meta))
|
||||||
|
for i, value := range n.response.Meta {
|
||||||
|
res[i] = value
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
99
internal/handler/browse.go
Normal file
99
internal/handler/browse.go
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"html/template"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
dateFormat = "02-01-2006 15:04"
|
||||||
|
attrOID, attrCreated, attrFileName, attrSize = "OID", "Created", "FileName", "Size"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
BrowsePageData struct {
|
||||||
|
BucketName,
|
||||||
|
Prefix string
|
||||||
|
Objects []ResponseObject
|
||||||
|
}
|
||||||
|
ResponseObject struct {
|
||||||
|
OID string
|
||||||
|
Created string
|
||||||
|
FileName string
|
||||||
|
Size string
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseTimestamp(tstamp string) (time.Time, error) {
|
||||||
|
millis, err := strconv.ParseInt(tstamp, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.UnixMilli(millis), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewResponseObject(nodes map[string]string) ResponseObject {
|
||||||
|
return ResponseObject{
|
||||||
|
OID: nodes[attrOID],
|
||||||
|
Created: nodes[attrCreated],
|
||||||
|
FileName: nodes[attrFileName],
|
||||||
|
Size: nodes[attrSize],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatTimestamp(strdate string) string {
|
||||||
|
date, err := parseTimestamp(strdate)
|
||||||
|
if err != nil || date.IsZero() {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return date.Format(dateFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) browseObjects(c *fasthttp.RequestCtx, bucketName, prefix string) {
|
||||||
|
var log = h.log.With(zap.String("bucket", bucketName))
|
||||||
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
nodes, err := h.listObjects(ctx, bucketName, prefix)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
respObjects := make([]ResponseObject, len(nodes))
|
||||||
|
for i, node := range nodes {
|
||||||
|
respObjects[i] = NewResponseObject(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(respObjects, func(a, b ResponseObject) int {
|
||||||
|
if a.FileName < b.FileName {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
})
|
||||||
|
|
||||||
|
templatePath := h.config.IndexPageTemplatePath()
|
||||||
|
tmpl, err := template.New(path.Base(templatePath)).Funcs(template.FuncMap{
|
||||||
|
"formatTimestamp": formatTimestamp,
|
||||||
|
}).ParseFiles(templatePath)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = tmpl.Execute(c, &BrowsePageData{
|
||||||
|
BucketName: bucketName,
|
||||||
|
Prefix: strings.TrimRight(prefix, "/"),
|
||||||
|
Objects: respObjects,
|
||||||
|
}); err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
|
@ -30,6 +30,8 @@ type Config interface {
|
||||||
DefaultTimestamp() bool
|
DefaultTimestamp() bool
|
||||||
ZipCompression() bool
|
ZipCompression() bool
|
||||||
ClientCut() bool
|
ClientCut() bool
|
||||||
|
IndexPageEnabled() bool
|
||||||
|
IndexPageTemplatePath() string
|
||||||
BufferMaxSizeForPut() uint64
|
BufferMaxSizeForPut() uint64
|
||||||
NamespaceHeader() string
|
NamespaceHeader() string
|
||||||
}
|
}
|
||||||
|
@ -208,41 +210,47 @@ func (h *Handler) byAddress(c *fasthttp.RequestCtx, f func(context.Context, requ
|
||||||
|
|
||||||
// byObjectName is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
// byObjectName is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||||
// prepares request and object address to it.
|
// prepares request and object address to it.
|
||||||
func (h *Handler) byObjectName(req *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
func (h *Handler) byObjectName(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
var (
|
var (
|
||||||
bucketname = req.UserValue("cid").(string)
|
bucketname = c.UserValue("cid").(string)
|
||||||
key = req.UserValue("oid").(string)
|
key = c.UserValue("oid").(string)
|
||||||
log = h.log.With(zap.String("bucketname", bucketname), zap.String("key", key))
|
log = h.log.With(zap.String("bucketname", bucketname), zap.String("key", key))
|
||||||
)
|
)
|
||||||
|
|
||||||
unescapedKey, err := url.QueryUnescape(key)
|
unescapedKey, err := url.QueryUnescape(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(req, log, err)
|
logAndSendBucketError(c, log, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(req)
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, bucketname, log)
|
bktInfo, err := h.getBucketInfo(ctx, bucketname, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(req, log, err)
|
logAndSendBucketError(c, log, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
needIndexPage := h.config.IndexPageEnabled()
|
||||||
foundOid, err := h.tree.GetLatestVersion(ctx, &bktInfo.CID, unescapedKey)
|
foundOid, err := h.tree.GetLatestVersion(ctx, &bktInfo.CID, unescapedKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, tree.ErrNodeAccessDenied) {
|
if errors.Is(err, tree.ErrNodeAccessDenied) {
|
||||||
response.Error(req, "Access Denied", fasthttp.StatusForbidden)
|
response.Error(c, "Access Denied", fasthttp.StatusForbidden)
|
||||||
|
} else if needIndexPage && string(c.Method()) != fasthttp.MethodHead {
|
||||||
|
h.browseObjects(c, bucketname, key)
|
||||||
|
} else {
|
||||||
|
log.Error(logs.GetLatestObjectVersion, zap.Error(err))
|
||||||
|
response.Error(c, "object wasn't found", fasthttp.StatusNotFound)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Error(logs.GetLatestObjectVersion, zap.Error(err))
|
if foundOid.IsPrefixNode && needIndexPage {
|
||||||
|
h.browseObjects(c, bucketname, key)
|
||||||
response.Error(req, "object wasn't found", fasthttp.StatusNotFound)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if foundOid.DeleteMarker {
|
if foundOid.DeleteMarker {
|
||||||
log.Error(logs.ObjectWasDeleted)
|
log.Error(logs.ObjectWasDeleted)
|
||||||
response.Error(req, "object deleted", fasthttp.StatusNotFound)
|
response.Error(c, "object deleted", fasthttp.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -250,7 +258,7 @@ func (h *Handler) byObjectName(req *fasthttp.RequestCtx, f func(context.Context,
|
||||||
addr.SetContainer(bktInfo.CID)
|
addr.SetContainer(bktInfo.CID)
|
||||||
addr.SetObject(foundOid.OID)
|
addr.SetObject(foundOid.OID)
|
||||||
|
|
||||||
f(ctx, *h.newRequest(req, log), addr)
|
f(ctx, *h.newRequest(c, log), addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// byAttribute is a wrapper similar to byAddress.
|
// byAttribute is a wrapper similar to byAddress.
|
||||||
|
@ -379,3 +387,32 @@ func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.Bucket
|
||||||
|
|
||||||
return bktInfo, err
|
return bktInfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *Handler) listObjects(ctx context.Context, bucketName, prefix string) ([]map[string]string, error) {
|
||||||
|
var (
|
||||||
|
log = h.log.With(zap.String("bucket", bucketName))
|
||||||
|
)
|
||||||
|
bucketInfo, err := h.getBucketInfo(ctx, bucketName, log)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
nodes, _, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var objects = make([]map[string]string, 0, len(nodes))
|
||||||
|
for _, node := range nodes {
|
||||||
|
meta := node.GetMeta()
|
||||||
|
if meta == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var obj = make(map[string]string, len(meta))
|
||||||
|
for _, m := range meta {
|
||||||
|
obj[m.GetKey()] = string(m.GetValue())
|
||||||
|
}
|
||||||
|
objects = append(objects, obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
return objects, nil
|
||||||
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
|
@ -37,6 +38,10 @@ func (t *treeClientMock) GetNodes(context.Context, *tree.GetNodesParams) ([]tree
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *treeClientMock) GetSubTree(context.Context, *data.BucketInfo, string, []uint64, uint32, bool) ([]tree.NodeResponse, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
type configMock struct {
|
type configMock struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,6 +53,14 @@ func (c *configMock) ZipCompression() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *configMock) IndexPageEnabled() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configMock) IndexPageTemplatePath() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
func (c *configMock) ClientCut() bool {
|
func (c *configMock) ClientCut() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,22 +43,22 @@ func (pr *putResponse) encode(w io.Writer) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload handles multipart upload request.
|
// Upload handles multipart upload request.
|
||||||
func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
||||||
var (
|
var (
|
||||||
file MultipartFile
|
file MultipartFile
|
||||||
idObj oid.ID
|
idObj oid.ID
|
||||||
addr oid.Address
|
addr oid.Address
|
||||||
scid, _ = req.UserValue("cid").(string)
|
scid, _ = c.UserValue("cid").(string)
|
||||||
log = h.log.With(zap.String("cid", scid))
|
log = h.log.With(zap.String("cid", scid))
|
||||||
bodyStream = req.RequestBodyStream()
|
bodyStream = c.RequestBodyStream()
|
||||||
drainBuf = make([]byte, drainBufSize)
|
drainBuf = make([]byte, drainBufSize)
|
||||||
)
|
)
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(req)
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(req, log, err)
|
logAndSendBucketError(c, log, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,21 +75,21 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
)
|
)
|
||||||
}()
|
}()
|
||||||
boundary := string(req.Request.Header.MultipartFormBoundary())
|
boundary := string(c.Request.Header.MultipartFormBoundary())
|
||||||
if file, err = fetchMultipartFile(h.log, bodyStream, boundary); err != nil {
|
if file, err = fetchMultipartFile(h.log, bodyStream, boundary); err != nil {
|
||||||
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
|
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
|
||||||
response.Error(req, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
filtered, err := filterHeaders(h.log, &req.Request.Header)
|
filtered, err := filterHeaders(h.log, &c.Request.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.CouldNotProcessHeaders, zap.Error(err))
|
log.Error(logs.CouldNotProcessHeaders, zap.Error(err))
|
||||||
response.Error(req, err.Error(), fasthttp.StatusBadRequest)
|
response.Error(c, err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
if rawHeader := req.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||||
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
||||||
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
||||||
} else {
|
} else {
|
||||||
|
@ -97,9 +97,9 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = utils.PrepareExpirationHeader(req, h.frostfs, filtered, now); err != nil {
|
if err = utils.PrepareExpirationHeader(c, h.frostfs, filtered, now); err != nil {
|
||||||
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
||||||
response.Error(req, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(c, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if idObj, err = h.frostfs.CreateObject(ctx, prm); err != nil {
|
if idObj, err = h.frostfs.CreateObject(ctx, prm); err != nil {
|
||||||
h.handlePutFrostFSErr(req, err)
|
h.handlePutFrostFSErr(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,9 +151,9 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
||||||
addr.SetContainer(bktInfo.CID)
|
addr.SetContainer(bktInfo.CID)
|
||||||
|
|
||||||
// Try to return the response, otherwise, if something went wrong, throw an error.
|
// Try to return the response, otherwise, if something went wrong, throw an error.
|
||||||
if err = newPutResponse(addr).encode(req); err != nil {
|
if err = newPutResponse(addr).encode(c); err != nil {
|
||||||
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
||||||
response.Error(req, "could not encode response", fasthttp.StatusBadRequest)
|
response.Error(c, "could not encode response", fasthttp.StatusBadRequest)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -170,8 +170,8 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Report status code and content type.
|
// Report status code and content type.
|
||||||
req.Response.SetStatusCode(fasthttp.StatusOK)
|
c.Response.SetStatusCode(fasthttp.StatusOK)
|
||||||
req.Response.Header.SetContentType(jsonHeader)
|
c.Response.Header.SetContentType(jsonHeader)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error) {
|
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error) {
|
||||||
|
|
58
templates/index.gotmpl
Normal file
58
templates/index.gotmpl
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
{{$bucketName := .BucketName}}
|
||||||
|
{{$prefix := .Prefix }}
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<title>Index of {{$bucketName}}/{{$prefix}}</title>
|
||||||
|
<style>
|
||||||
|
table {
|
||||||
|
width: 100%;
|
||||||
|
border-collapse: collapse;
|
||||||
|
}
|
||||||
|
table, th, td {
|
||||||
|
border: 1px solid black;
|
||||||
|
}
|
||||||
|
th, td {
|
||||||
|
padding: 10px;
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
th {
|
||||||
|
background-color: #f2f2f2;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>Index of {{$bucketName}}/{{$prefix}}</h1>
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Filename</th>
|
||||||
|
<th>Size (bytes)</th>
|
||||||
|
<th>Created</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
{{if $prefix}}
|
||||||
|
<tr>
|
||||||
|
<td><a href="../">../</a></td>
|
||||||
|
<td></td>
|
||||||
|
<td></td>
|
||||||
|
</tr>
|
||||||
|
{{end}}
|
||||||
|
{{range .Objects}}
|
||||||
|
{{if .FileName}}
|
||||||
|
<tr>
|
||||||
|
<td>
|
||||||
|
<a href="/get/{{ $bucketName }}/{{if $prefix}}{{$prefix}}/{{end}}{{.FileName}}{{if not .Size}}/{{end}}?download=true">
|
||||||
|
{{.FileName}}{{if not .Size}}/{{end}}
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
<td>{{.Size}}</td>
|
||||||
|
<td>{{ formatTimestamp .Created }}</td>
|
||||||
|
</tr>
|
||||||
|
{{end}}
|
||||||
|
{{end}}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</body>
|
||||||
|
</html>
|
|
@ -52,8 +52,8 @@ func BearerTokenFromCookie(h *fasthttp.RequestHeader) []byte {
|
||||||
|
|
||||||
// StoreBearerTokenAppCtx extracts a bearer token from the header or cookie and stores
|
// StoreBearerTokenAppCtx extracts a bearer token from the header or cookie and stores
|
||||||
// it in the application context.
|
// it in the application context.
|
||||||
func StoreBearerTokenAppCtx(ctx context.Context, req *fasthttp.RequestCtx) (context.Context, error) {
|
func StoreBearerTokenAppCtx(ctx context.Context, c *fasthttp.RequestCtx) (context.Context, error) {
|
||||||
tkn, err := fetchBearerToken(req)
|
tkn, err := fetchBearerToken(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
209
tree/tree.go
209
tree/tree.go
|
@ -2,11 +2,13 @@ package tree
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api/layer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
)
|
)
|
||||||
|
@ -20,6 +22,7 @@ type (
|
||||||
// Each method must return ErrNodeNotFound or ErrNodeAccessDenied if relevant.
|
// Each method must return ErrNodeNotFound or ErrNodeAccessDenied if relevant.
|
||||||
ServiceClient interface {
|
ServiceClient interface {
|
||||||
GetNodes(ctx context.Context, p *GetNodesParams) ([]NodeResponse, error)
|
GetNodes(ctx context.Context, p *GetNodesParams) ([]NodeResponse, error)
|
||||||
|
GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]NodeResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
treeNode struct {
|
treeNode struct {
|
||||||
|
@ -29,6 +32,7 @@ type (
|
||||||
|
|
||||||
GetNodesParams struct {
|
GetNodesParams struct {
|
||||||
CnrID cid.ID
|
CnrID cid.ID
|
||||||
|
BktInfo *data.BucketInfo
|
||||||
TreeID string
|
TreeID string
|
||||||
Path []string
|
Path []string
|
||||||
Meta []string
|
Meta []string
|
||||||
|
@ -54,6 +58,7 @@ const (
|
||||||
|
|
||||||
// keys for delete marker nodes.
|
// keys for delete marker nodes.
|
||||||
isDeleteMarkerKV = "IsDeleteMarker"
|
isDeleteMarkerKV = "IsDeleteMarker"
|
||||||
|
sizeKV = "Size"
|
||||||
|
|
||||||
// versionTree -- ID of a tree with object versions.
|
// versionTree -- ID of a tree with object versions.
|
||||||
versionTree = "version"
|
versionTree = "version"
|
||||||
|
@ -73,26 +78,27 @@ type Meta interface {
|
||||||
|
|
||||||
type NodeResponse interface {
|
type NodeResponse interface {
|
||||||
GetMeta() []Meta
|
GetMeta() []Meta
|
||||||
GetTimestamp() uint64
|
GetTimestamp() []uint64
|
||||||
|
GetNodeID() []uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
|
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
|
||||||
treeNode := &treeNode{
|
tNode := &treeNode{
|
||||||
Meta: make(map[string]string, len(nodeInfo.GetMeta())),
|
Meta: make(map[string]string, len(nodeInfo.GetMeta())),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, kv := range nodeInfo.GetMeta() {
|
for _, kv := range nodeInfo.GetMeta() {
|
||||||
switch kv.GetKey() {
|
switch kv.GetKey() {
|
||||||
case oidKV:
|
case oidKV:
|
||||||
if err := treeNode.ObjID.DecodeString(string(kv.GetValue())); err != nil {
|
if err := tNode.ObjID.DecodeString(string(kv.GetValue())); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
treeNode.Meta[kv.GetKey()] = string(kv.GetValue())
|
tNode.Meta[kv.GetKey()] = string(kv.GetValue())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return treeNode, nil
|
return tNode, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *treeNode) Get(key string) (string, bool) {
|
func (n *treeNode) Get(key string) (string, bool) {
|
||||||
|
@ -106,29 +112,44 @@ func (n *treeNode) FileName() (string, bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNodeVersion(node NodeResponse) (*api.NodeVersion, error) {
|
func newNodeVersion(node NodeResponse) (*api.NodeVersion, error) {
|
||||||
treeNode, err := newTreeNode(node)
|
tNode, err := newTreeNode(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid tree node: %w", err)
|
return nil, fmt.Errorf("invalid tree node: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newNodeVersionFromTreeNode(treeNode), nil
|
return newNodeVersionFromTreeNode(tNode), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNodeVersionFromTreeNode(treeNode *treeNode) *api.NodeVersion {
|
func newNodeVersionFromTreeNode(treeNode *treeNode) *api.NodeVersion {
|
||||||
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
|
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
|
||||||
|
size, _ := treeNode.Get(sizeKV)
|
||||||
version := &api.NodeVersion{
|
version := &api.NodeVersion{
|
||||||
BaseNodeVersion: api.BaseNodeVersion{
|
BaseNodeVersion: api.BaseNodeVersion{
|
||||||
OID: treeNode.ObjID,
|
OID: treeNode.ObjID,
|
||||||
},
|
},
|
||||||
DeleteMarker: isDeleteMarker,
|
DeleteMarker: isDeleteMarker,
|
||||||
|
IsPrefixNode: size == "",
|
||||||
}
|
}
|
||||||
|
|
||||||
return version
|
return version
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error) {
|
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error) {
|
||||||
meta := []string{oidKV, isDeleteMarkerKV}
|
nodes, err := c.GetVersions(ctx, cnrID, objectName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
latestNode, err := getLatestVersionNode(nodes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return newNodeVersion(latestNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string) ([]NodeResponse, error) {
|
||||||
|
meta := []string{oidKV, isDeleteMarkerKV, sizeKV}
|
||||||
path := pathFromName(objectName)
|
path := pathFromName(objectName)
|
||||||
|
|
||||||
p := &GetNodesParams{
|
p := &GetNodesParams{
|
||||||
|
@ -139,30 +160,24 @@ func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName s
|
||||||
LatestOnly: false,
|
LatestOnly: false,
|
||||||
AllAttrs: false,
|
AllAttrs: false,
|
||||||
}
|
}
|
||||||
nodes, err := c.service.GetNodes(ctx, p)
|
|
||||||
if err != nil {
|
return c.service.GetNodes(ctx, p)
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
latestNode, err := getLatestNode(nodes)
|
func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return newNodeVersion(latestNode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getLatestNode(nodes []NodeResponse) (NodeResponse, error) {
|
|
||||||
var (
|
var (
|
||||||
maxCreationTime uint64
|
maxCreationTime uint64
|
||||||
targetIndexNode = -1
|
targetIndexNode = -1
|
||||||
)
|
)
|
||||||
|
|
||||||
for i, node := range nodes {
|
for i, node := range nodes {
|
||||||
currentCreationTime := node.GetTimestamp()
|
if !checkExistOID(node.GetMeta()) {
|
||||||
if checkExistOID(node.GetMeta()) && currentCreationTime > maxCreationTime {
|
continue
|
||||||
maxCreationTime = currentCreationTime
|
}
|
||||||
|
|
||||||
|
if currentCreationTime := getMaxTimestamp(node); currentCreationTime > maxCreationTime {
|
||||||
targetIndexNode = i
|
targetIndexNode = i
|
||||||
|
maxCreationTime = currentCreationTime
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,3 +202,149 @@ func checkExistOID(meta []Meta) bool {
|
||||||
func pathFromName(objectName string) []string {
|
func pathFromName(objectName string) []string {
|
||||||
return strings.Split(objectName, separator)
|
return strings.Split(objectName, separator)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]NodeResponse, string, error) {
|
||||||
|
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, layer.ErrNodeNotFound) {
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, layer.ErrNodeNotFound) {
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodesMap := make(map[string][]NodeResponse, len(subTree))
|
||||||
|
for _, node := range subTree {
|
||||||
|
if MultiID(rootID).Equal(node.GetNodeID()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fileName := getFilename(node)
|
||||||
|
if !strings.HasPrefix(fileName, tailPrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes := nodesMap[fileName]
|
||||||
|
|
||||||
|
// Add all nodes if flag latestOnly is false.
|
||||||
|
// Add all intermediate nodes
|
||||||
|
// and only latest leaf (object) nodes. To do this store and replace last leaf (object) node in nodes[0]
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
nodes = []NodeResponse{node}
|
||||||
|
} else if !latestOnly || isIntermediate(node) {
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
} else if isIntermediate(nodes[0]) {
|
||||||
|
nodes = append([]NodeResponse{node}, nodes...)
|
||||||
|
} else if getMaxTimestamp(node) > getMaxTimestamp(nodes[0]) {
|
||||||
|
nodes[0] = node
|
||||||
|
}
|
||||||
|
|
||||||
|
nodesMap[fileName] = nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make([]NodeResponse, 0, len(subTree))
|
||||||
|
for _, nodes := range nodesMap {
|
||||||
|
result = append(result, nodes...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, strings.TrimSuffix(prefix, tailPrefix), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) {
|
||||||
|
rootID := []uint64{0}
|
||||||
|
path := strings.Split(prefix, separator)
|
||||||
|
tailPrefix := path[len(path)-1]
|
||||||
|
|
||||||
|
if len(path) > 1 {
|
||||||
|
var err error
|
||||||
|
rootID, err = c.getPrefixNodeID(ctx, bktInfo, treeID, path[:len(path)-1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rootID, tailPrefix, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefixPath []string) ([]uint64, error) {
|
||||||
|
p := &GetNodesParams{
|
||||||
|
CnrID: bktInfo.CID,
|
||||||
|
BktInfo: bktInfo,
|
||||||
|
TreeID: treeID,
|
||||||
|
Path: prefixPath,
|
||||||
|
LatestOnly: false,
|
||||||
|
AllAttrs: true,
|
||||||
|
}
|
||||||
|
nodes, err := c.service.GetNodes(ctx, p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var intermediateNodes []uint64
|
||||||
|
for _, node := range nodes {
|
||||||
|
if isIntermediate(node) {
|
||||||
|
intermediateNodes = append(intermediateNodes, node.GetNodeID()...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(intermediateNodes) == 0 {
|
||||||
|
return nil, layer.ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return intermediateNodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFilename(node NodeResponse) string {
|
||||||
|
for _, kv := range node.GetMeta() {
|
||||||
|
if kv.GetKey() == FileNameKey {
|
||||||
|
return string(kv.GetValue())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIntermediate(node NodeResponse) bool {
|
||||||
|
if len(node.GetMeta()) != 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return node.GetMeta()[0].GetKey() == FileNameKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMaxTimestamp(node NodeResponse) uint64 {
|
||||||
|
var maxTimestamp uint64
|
||||||
|
|
||||||
|
for _, timestamp := range node.GetTimestamp() {
|
||||||
|
if timestamp > maxTimestamp {
|
||||||
|
maxTimestamp = timestamp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return maxTimestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
type MultiID []uint64
|
||||||
|
|
||||||
|
func (m MultiID) Equal(id MultiID) bool {
|
||||||
|
seen := make(map[uint64]struct{}, len(m))
|
||||||
|
|
||||||
|
for i := range m {
|
||||||
|
seen[m[i]] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range id {
|
||||||
|
if _, ok := seen[id[i]]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
|
@ -24,8 +24,8 @@ type nodeResponse struct {
|
||||||
timestamp uint64
|
timestamp uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n nodeResponse) GetTimestamp() uint64 {
|
func (n nodeResponse) GetTimestamp() []uint64 {
|
||||||
return n.timestamp
|
return []uint64{n.timestamp}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n nodeResponse) GetMeta() []Meta {
|
func (n nodeResponse) GetMeta() []Meta {
|
||||||
|
@ -36,6 +36,10 @@ func (n nodeResponse) GetMeta() []Meta {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n nodeResponse) GetNodeID() []uint64 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func TestGetLatestNode(t *testing.T) {
|
func TestGetLatestNode(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -130,7 +134,7 @@ func TestGetLatestNode(t *testing.T) {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
actualNode, err := getLatestNode(tc.nodes)
|
actualNode, err := getLatestVersionNode(tc.nodes)
|
||||||
if tc.error {
|
if tc.error {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
return
|
return
|
||||||
|
|
Loading…
Reference in a new issue