diff --git a/cmd/http-gw/app.go b/cmd/http-gw/app.go
index 561598f..b9b8867 100644
--- a/cmd/http-gw/app.go
+++ b/cmd/http-gw/app.go
@@ -86,13 +86,15 @@ type (
appSettings struct {
reconnectInterval time.Duration
- mu sync.RWMutex
- defaultTimestamp bool
- zipCompression bool
- clientCut bool
- bufferMaxSizeForPut uint64
- namespaceHeader string
- defaultNamespaces []string
+ mu sync.RWMutex
+ defaultTimestamp bool
+ zipCompression bool
+ clientCut bool
+ returnIndexPage bool
+ indexPageTemplatePath string
+ bufferMaxSizeForPut uint64
+ namespaceHeader string
+ defaultNamespaces []string
}
)
@@ -176,12 +178,36 @@ func (s *appSettings) ZipCompression() bool {
return s.zipCompression
}
+func (s *appSettings) IndexPageEnabled() bool {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.returnIndexPage
+}
+
+func (s *appSettings) IndexPageTemplatePath() string {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.indexPageTemplatePath
+}
+
func (s *appSettings) setZipCompression(val bool) {
s.mu.Lock()
s.zipCompression = val
s.mu.Unlock()
}
+func (s *appSettings) setReturnIndexPage(val bool) {
+ s.mu.Lock()
+ s.returnIndexPage = val
+ s.mu.Unlock()
+}
+
+func (s *appSettings) setIndexTemplatePath(val string) {
+ s.mu.Lock()
+ s.indexPageTemplatePath = val
+ s.mu.Unlock()
+}
+
func (s *appSettings) ClientCut() bool {
s.mu.RLock()
defer s.mu.RUnlock()
@@ -498,6 +524,8 @@ func (a *app) configReload(ctx context.Context) {
func (a *app) updateSettings() {
a.settings.setDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
a.settings.setZipCompression(a.cfg.GetBool(cfgZipCompression))
+ a.settings.setReturnIndexPage(a.cfg.GetBool(cfgIndexPageEnabled))
+ a.settings.setIndexTemplatePath(a.cfg.GetString(cfgIndexPageTemplatePath))
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
a.settings.setBufferMaxSizeForPut(a.cfg.GetUint64(cfgBufferMaxSizeForPut))
a.settings.setNamespaceHeader(a.cfg.GetString(cfgResolveNamespaceHeader))
diff --git a/cmd/http-gw/settings.go b/cmd/http-gw/settings.go
index 0d97dcb..782f633 100644
--- a/cmd/http-gw/settings.go
+++ b/cmd/http-gw/settings.go
@@ -60,6 +60,9 @@ const (
cfgReconnectInterval = "reconnect_interval"
+ cfgIndexPageEnabled = "index_page.enabled"
+ cfgIndexPageTemplatePath = "index_page.template_path"
+
// Web.
cfgWebReadBufferSize = "web.read_buffer_size"
cfgWebWriteBufferSize = "web.write_buffer_size"
@@ -191,6 +194,9 @@ func settings() *viper.Viper {
// pool:
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
+ v.SetDefault(cfgIndexPageEnabled, false)
+ v.SetDefault(cfgIndexPageTemplatePath, "")
+
// frostfs:
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
diff --git a/config/config.yaml b/config/config.yaml
index 7f8077b..5a6e2fd 100644
--- a/config/config.yaml
+++ b/config/config.yaml
@@ -101,6 +101,11 @@ request_timeout: 5s # Timeout to check node health during rebalance.
rebalance_timer: 30s # Interval to check nodes health.
pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.
+# Enable index page to see objects list for specified container and prefix
+index_page:
+ enabled: false
+ template_path: /templates/index.gotmpl
+
zip:
compression: false # Enable zip compression to download files by common prefix.
@@ -126,4 +131,4 @@ cache:
resolve_bucket:
namespace_header: X-Frostfs-Namespace
- default_namespaces: [ "", "root" ]
\ No newline at end of file
+ default_namespaces: [ "", "root" ]
diff --git a/docs/api.md b/docs/api.md
index 78df766..f7eb3a4 100644
--- a/docs/api.md
+++ b/docs/api.md
@@ -95,12 +95,12 @@ The `filename` field from the multipart form will be set as `FileName` attribute
## Get object
-Route: `/get/{cid}/{oid}?[download=true]`
+Route: `/get/{cid}/{oid}?[download=false]`
| Route parameter | Type | Description |
|-----------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `cid` | Single | Base58 encoded container ID or container name from NNS. |
-| `oid` | Single | Base58 encoded object ID. |
+| `cid` | Single | Base58 encoded `container ID` or `container name` from NNS or `bucket name`. |
+| `oid` | Single | Base58 encoded `object ID`. Also could be `S3 object name` if `cid` is specified as bucket name. |
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response.
This make the browser to download object as file instead of showing it on the page. |
### Methods
@@ -141,6 +141,13 @@ Get an object (payload and attributes) by an address.
| 400 | Some error occurred during object downloading. |
| 404 | Container or object not found. |
+###### Body
+
+Returns object data. If request performed from browser, either displays raw data or downloads it as
+attachment if `download` query parameter is set to `true`.
+If `index_page.enabled` is set to `true`, returns HTML with index-page if no object with specified
+S3-name was found.
+
#### HEAD
Get an object attributes by an address.
diff --git a/docs/gate-configuration.md b/docs/gate-configuration.md
index 8e3daad..aaaaa33 100644
--- a/docs/gate-configuration.md
+++ b/docs/gate-configuration.md
@@ -57,6 +57,7 @@ $ cat http.log
| `frostfs` | [Frostfs configuration](#frostfs-section) |
| `cache` | [Cache configuration](#cache-section) |
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
+| `index_page` | [Index page configuration](#index_page-section) |
# General section
@@ -75,16 +76,16 @@ pool_error_threshold: 100
reconnect_interval: 1m
```
-| Parameter | Type | SIGHUP reload | Default value | Description |
-|------------------------|------------|---------------|----------------|------------------------------------------------------------------------------------|
-| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
-| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
-| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
-| `stream_timeout` | `duration` | | `10s` | Timeout for individual operations in streaming RPC. |
-| `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
-| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
-| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
-| `reconnect_interval` | `duration` | no | `1m` | Listeners reconnection interval. |
+| Parameter | Type | SIGHUP reload | Default value | Description |
+|------------------------|------------|---------------|---------------|-------------------------------------------------------------------------------------------------|
+| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
+| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
+| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
+| `stream_timeout` | `duration` | | `10s` | Timeout for individual operations in streaming RPC. |
+| `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
+| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
+| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
+| `reconnect_interval` | `duration` | no | `1m` | Listeners reconnection interval. |
# `wallet` section
@@ -335,4 +336,19 @@ resolve_bucket:
| Parameter | Type | SIGHUP reload | Default value | Description |
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
-| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
\ No newline at end of file
+| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
+
+# `index_page` section
+
+Parameters for index HTML-page output with S3-bucket or S3-subdir content for `Get object` request
+
+```yaml
+index_page:
+ enabled: false
+ template_path: ""
+```
+
+| Parameter | Type | SIGHUP reload | Default value | Description |
+|-----------------|----------|---------------|---------------|---------------------------------------------------------------------------------|
+| `enabled` | `bool` | yes | `false` | Flag to enable index_page return if no object with specified S3-name was found. |
+| `template_path` | `string` | yes | `""` | Path to .gotmpl file with html template for index_page. |
diff --git a/internal/api/tree.go b/internal/api/tree.go
index 4d16cc7..5b1d608 100644
--- a/internal/api/tree.go
+++ b/internal/api/tree.go
@@ -8,6 +8,7 @@ import (
type NodeVersion struct {
BaseNodeVersion
DeleteMarker bool
+ IsPrefixNode bool
}
// BaseNodeVersion is minimal node info from tree service.
diff --git a/internal/frostfs/services/pool_wrapper.go b/internal/frostfs/services/pool_wrapper.go
index f7b0a26..eb35d6c 100644
--- a/internal/frostfs/services/pool_wrapper.go
+++ b/internal/frostfs/services/pool_wrapper.go
@@ -4,7 +4,9 @@ import (
"context"
"errors"
"fmt"
+ "io"
+ "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
@@ -15,16 +17,16 @@ type GetNodeByPathResponseInfoWrapper struct {
response *grpcService.GetNodeByPathResponse_Info
}
-func (n GetNodeByPathResponseInfoWrapper) GetNodeID() uint64 {
- return n.response.GetNodeId()
+func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 {
+ return []uint64{n.response.GetNodeId()}
}
-func (n GetNodeByPathResponseInfoWrapper) GetParentID() uint64 {
- return n.response.GetParentId()
+func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 {
+ return []uint64{n.response.GetParentId()}
}
-func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() uint64 {
- return n.response.GetTimestamp()
+func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
+ return []uint64{n.response.GetTimestamp()}
}
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
@@ -89,3 +91,73 @@ func handleError(err error) error {
return err
}
+
+func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
+ order := treepool.NoneOrder
+ if sort {
+ order = treepool.AscendingOrder
+ }
+ poolPrm := treepool.GetSubTreeParams{
+ CID: bktInfo.CID,
+ TreeID: treeID,
+ RootID: rootID,
+ Depth: depth,
+ BearerToken: getBearer(ctx),
+ Order: order,
+ }
+ if len(rootID) == 1 && rootID[0] == 0 {
+ // storage node interprets 'nil' value as []uint64{0}
+ // gate wants to send 'nil' value instead of []uint64{0}, because
+ // it provides compatibility with previous tree service api where
+ // single uint64(0) value is dropped from signature
+ poolPrm.RootID = nil
+ }
+
+ subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
+ if err != nil {
+ return nil, handleError(err)
+ }
+
+ var subtree []tree.NodeResponse
+
+ node, err := subTreeReader.Next()
+ for err == nil {
+ subtree = append(subtree, GetSubTreeResponseBodyWrapper{node})
+ node, err = subTreeReader.Next()
+ }
+ if err != nil && err != io.EOF {
+ return nil, handleError(err)
+ }
+
+ return subtree, nil
+}
+
+type GetSubTreeResponseBodyWrapper struct {
+ response *grpcService.GetSubTreeResponse_Body
+}
+
+func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 {
+ return n.response.GetNodeId()
+}
+
+func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 {
+ resp := n.response.GetParentId()
+ if resp == nil {
+ // storage sends nil that should be interpreted as []uint64{0}
+ // due to protobuf compatibility, see 'GetSubTree' function
+ return []uint64{0}
+ }
+ return resp
+}
+
+func (n GetSubTreeResponseBodyWrapper) GetTimestamp() []uint64 {
+ return n.response.GetTimestamp()
+}
+
+func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
+ res := make([]tree.Meta, len(n.response.Meta))
+ for i, value := range n.response.Meta {
+ res[i] = value
+ }
+ return res
+}
diff --git a/internal/handler/browse.go b/internal/handler/browse.go
new file mode 100644
index 0000000..a8789f2
--- /dev/null
+++ b/internal/handler/browse.go
@@ -0,0 +1,118 @@
+package handler
+
+import (
+ "html/template"
+ "path"
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
+ "github.com/docker/go-units"
+ "github.com/valyala/fasthttp"
+ "go.uber.org/zap"
+ "golang.org/x/exp/slices"
+)
+
+const (
+ dateFormat = "02-01-2006 15:04"
+ attrOID, attrCreated, attrFileName, attrSize = "OID", "Created", "FileName", "Size"
+)
+
+type (
+ BrowsePageData struct {
+ BucketName,
+ Prefix string
+ Objects []ResponseObject
+ }
+ ResponseObject struct {
+ OID string
+ Created string
+ FileName string
+ Size string
+ }
+)
+
+func parseTimestamp(tstamp string) (time.Time, error) {
+ millis, err := strconv.ParseInt(tstamp, 10, 64)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ return time.UnixMilli(millis), nil
+}
+
+func NewResponseObject(nodes map[string]string) ResponseObject {
+ return ResponseObject{
+ OID: nodes[attrOID],
+ Created: nodes[attrCreated],
+ FileName: nodes[attrFileName],
+ Size: nodes[attrSize],
+ }
+}
+
+func formatTimestamp(strdate string) string {
+ date, err := parseTimestamp(strdate)
+ if err != nil || date.IsZero() {
+ return ""
+ }
+
+ return date.Format(dateFormat)
+}
+
+func formatSize(strsize string) string {
+ size, err := strconv.ParseFloat(strsize, 64)
+ if err != nil {
+ return ""
+ }
+ return units.HumanSize(size)
+}
+
+func (h *Handler) browseObjects(c *fasthttp.RequestCtx, bucketName, prefix string) {
+ var log = h.log.With(zap.String("bucket", bucketName))
+ ctx := utils.GetContextFromRequest(c)
+ nodes, err := h.listObjects(ctx, bucketName, prefix)
+ if err != nil {
+ logAndSendBucketError(c, log, err)
+ return
+ }
+
+ respObjects := make([]ResponseObject, len(nodes))
+ for i, node := range nodes {
+ respObjects[i] = NewResponseObject(node)
+ }
+
+ slices.SortFunc(respObjects, func(a, b ResponseObject) int {
+ aIsDir := a.Size == ""
+ bIsDir := b.Size == ""
+
+ // prefix objects go first
+ if aIsDir && !bIsDir {
+ return -1
+ } else if !aIsDir && bIsDir {
+ return 1
+ }
+
+ if a.FileName < b.FileName {
+ return -1
+ }
+ return 1
+ })
+
+ templatePath := h.config.IndexPageTemplatePath()
+ tmpl, err := template.New(path.Base(templatePath)).Funcs(template.FuncMap{
+ "formatTimestamp": formatTimestamp,
+ "formatSize": formatSize,
+ }).ParseFiles(templatePath)
+ if err != nil {
+ logAndSendBucketError(c, log, err)
+ return
+ }
+ if err = tmpl.Execute(c, &BrowsePageData{
+ BucketName: bucketName,
+ Prefix: prefix,
+ Objects: respObjects,
+ }); err != nil {
+ logAndSendBucketError(c, log, err)
+ return
+ }
+}
diff --git a/internal/handler/handler.go b/internal/handler/handler.go
index 4de9d9a..bf37951 100644
--- a/internal/handler/handler.go
+++ b/internal/handler/handler.go
@@ -30,6 +30,8 @@ type Config interface {
DefaultTimestamp() bool
ZipCompression() bool
ClientCut() bool
+ IndexPageEnabled() bool
+ IndexPageTemplatePath() string
BufferMaxSizeForPut() uint64
NamespaceHeader() string
}
@@ -208,41 +210,47 @@ func (h *Handler) byAddress(c *fasthttp.RequestCtx, f func(context.Context, requ
// byObjectName is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// prepares request and object address to it.
-func (h *Handler) byObjectName(req *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
+func (h *Handler) byObjectName(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
var (
- bucketname = req.UserValue("cid").(string)
- key = req.UserValue("oid").(string)
+ bucketname = c.UserValue("cid").(string)
+ key = c.UserValue("oid").(string)
log = h.log.With(zap.String("bucketname", bucketname), zap.String("key", key))
)
unescapedKey, err := url.QueryUnescape(key)
if err != nil {
- logAndSendBucketError(req, log, err)
+ logAndSendBucketError(c, log, err)
return
}
- ctx := utils.GetContextFromRequest(req)
+ ctx := utils.GetContextFromRequest(c)
bktInfo, err := h.getBucketInfo(ctx, bucketname, log)
if err != nil {
- logAndSendBucketError(req, log, err)
+ logAndSendBucketError(c, log, err)
return
}
+ needIndexPage := h.config.IndexPageEnabled()
foundOid, err := h.tree.GetLatestVersion(ctx, &bktInfo.CID, unescapedKey)
if err != nil {
if errors.Is(err, tree.ErrNodeAccessDenied) {
- response.Error(req, "Access Denied", fasthttp.StatusForbidden)
- return
+ response.Error(c, "Access Denied", fasthttp.StatusForbidden)
+ } else if needIndexPage && string(c.Method()) != fasthttp.MethodHead {
+ h.browseObjects(c, bucketname, key)
+ } else {
+ log.Error(logs.GetLatestObjectVersion, zap.Error(err))
+ response.Error(c, "object wasn't found", fasthttp.StatusNotFound)
}
- log.Error(logs.GetLatestObjectVersion, zap.Error(err))
-
- response.Error(req, "object wasn't found", fasthttp.StatusNotFound)
+ return
+ }
+ if foundOid.IsPrefixNode && needIndexPage {
+ h.browseObjects(c, bucketname, key)
return
}
if foundOid.DeleteMarker {
log.Error(logs.ObjectWasDeleted)
- response.Error(req, "object deleted", fasthttp.StatusNotFound)
+ response.Error(c, "object deleted", fasthttp.StatusNotFound)
return
}
@@ -250,7 +258,7 @@ func (h *Handler) byObjectName(req *fasthttp.RequestCtx, f func(context.Context,
addr.SetContainer(bktInfo.CID)
addr.SetObject(foundOid.OID)
- f(ctx, *h.newRequest(req, log), addr)
+ f(ctx, *h.newRequest(c, log), addr)
}
// byAttribute is a wrapper similar to byAddress.
@@ -379,3 +387,32 @@ func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.Bucket
return bktInfo, err
}
+
+func (h *Handler) listObjects(ctx context.Context, bucketName, prefix string) ([]map[string]string, error) {
+ var (
+ log = h.log.With(zap.String("bucket", bucketName))
+ )
+ bucketInfo, err := h.getBucketInfo(ctx, bucketName, log)
+ if err != nil {
+ return nil, err
+ }
+ nodes, _, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
+ if err != nil {
+ return nil, err
+ }
+
+ var objects = make([]map[string]string, 0, len(nodes))
+ for _, node := range nodes {
+ meta := node.GetMeta()
+ if meta == nil {
+ continue
+ }
+ var obj = make(map[string]string, len(meta))
+ for _, m := range meta {
+ obj[m.GetKey()] = string(m.GetValue())
+ }
+ objects = append(objects, obj)
+ }
+
+ return objects, nil
+}
diff --git a/internal/handler/handler_test.go b/internal/handler/handler_test.go
index ed67f88..03e0ff7 100644
--- a/internal/handler/handler_test.go
+++ b/internal/handler/handler_test.go
@@ -12,6 +12,7 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
+ "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
@@ -37,6 +38,10 @@ func (t *treeClientMock) GetNodes(context.Context, *tree.GetNodesParams) ([]tree
return nil, nil
}
+func (t *treeClientMock) GetSubTree(context.Context, *data.BucketInfo, string, []uint64, uint32, bool) ([]tree.NodeResponse, error) {
+ return nil, nil
+}
+
type configMock struct {
}
@@ -48,6 +53,14 @@ func (c *configMock) ZipCompression() bool {
return false
}
+func (c *configMock) IndexPageEnabled() bool {
+ return false
+}
+
+func (c *configMock) IndexPageTemplatePath() string {
+ return ""
+}
+
func (c *configMock) ClientCut() bool {
return false
}
diff --git a/internal/handler/upload.go b/internal/handler/upload.go
index cea2250..6c0e117 100644
--- a/internal/handler/upload.go
+++ b/internal/handler/upload.go
@@ -43,22 +43,22 @@ func (pr *putResponse) encode(w io.Writer) error {
}
// Upload handles multipart upload request.
-func (h *Handler) Upload(req *fasthttp.RequestCtx) {
+func (h *Handler) Upload(c *fasthttp.RequestCtx) {
var (
file MultipartFile
idObj oid.ID
addr oid.Address
- scid, _ = req.UserValue("cid").(string)
+ scid, _ = c.UserValue("cid").(string)
log = h.log.With(zap.String("cid", scid))
- bodyStream = req.RequestBodyStream()
+ bodyStream = c.RequestBodyStream()
drainBuf = make([]byte, drainBufSize)
)
- ctx := utils.GetContextFromRequest(req)
+ ctx := utils.GetContextFromRequest(c)
bktInfo, err := h.getBucketInfo(ctx, scid, log)
if err != nil {
- logAndSendBucketError(req, log, err)
+ logAndSendBucketError(c, log, err)
return
}
@@ -75,21 +75,21 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
zap.Error(err),
)
}()
- boundary := string(req.Request.Header.MultipartFormBoundary())
+ boundary := string(c.Request.Header.MultipartFormBoundary())
if file, err = fetchMultipartFile(h.log, bodyStream, boundary); err != nil {
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
- response.Error(req, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
+ response.Error(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
return
}
- filtered, err := filterHeaders(h.log, &req.Request.Header)
+ filtered, err := filterHeaders(h.log, &c.Request.Header)
if err != nil {
log.Error(logs.CouldNotProcessHeaders, zap.Error(err))
- response.Error(req, err.Error(), fasthttp.StatusBadRequest)
+ response.Error(c, err.Error(), fasthttp.StatusBadRequest)
return
}
now := time.Now()
- if rawHeader := req.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
+ if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
} else {
@@ -97,9 +97,9 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
}
}
- if err = utils.PrepareExpirationHeader(req, h.frostfs, filtered, now); err != nil {
+ if err = utils.PrepareExpirationHeader(c, h.frostfs, filtered, now); err != nil {
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
- response.Error(req, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
+ response.Error(c, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
return
}
@@ -143,7 +143,7 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
}
if idObj, err = h.frostfs.CreateObject(ctx, prm); err != nil {
- h.handlePutFrostFSErr(req, err)
+ h.handlePutFrostFSErr(c, err)
return
}
@@ -151,9 +151,9 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
addr.SetContainer(bktInfo.CID)
// Try to return the response, otherwise, if something went wrong, throw an error.
- if err = newPutResponse(addr).encode(req); err != nil {
+ if err = newPutResponse(addr).encode(c); err != nil {
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
- response.Error(req, "could not encode response", fasthttp.StatusBadRequest)
+ response.Error(c, "could not encode response", fasthttp.StatusBadRequest)
return
}
@@ -170,8 +170,8 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
}
}
// Report status code and content type.
- req.Response.SetStatusCode(fasthttp.StatusOK)
- req.Response.Header.SetContentType(jsonHeader)
+ c.Response.SetStatusCode(fasthttp.StatusOK)
+ c.Response.Header.SetContentType(jsonHeader)
}
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error) {
diff --git a/templates/index.gotmpl b/templates/index.gotmpl
new file mode 100644
index 0000000..2958ab2
--- /dev/null
+++ b/templates/index.gotmpl
@@ -0,0 +1,70 @@
+{{$bucketName := .BucketName}}
+{{ $prefix := .Prefix }}
+
+
+
Filename | +Size | +Created | +Download | +
---|---|---|---|
../ | ++ | + | |
+ + {{.FileName}}{{if not .Size}}/{{end}} + + | +{{ formatSize .Size }} | +{{ formatTimestamp .Created }} | ++ {{ if .Size }} + + Link + + {{ end }} + | +