2021-03-31 18:24:41 +00:00
|
|
|
package downloader
|
2019-12-13 16:02:48 +00:00
|
|
|
|
|
|
|
import (
|
2021-10-19 07:52:41 +00:00
|
|
|
"archive/zip"
|
2022-06-17 09:17:02 +00:00
|
|
|
"bufio"
|
2022-03-04 06:53:49 +00:00
|
|
|
"bytes"
|
2021-03-31 18:24:41 +00:00
|
|
|
"context"
|
2021-04-29 15:32:01 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2019-12-13 16:02:48 +00:00
|
|
|
"io"
|
|
|
|
"net/http"
|
2022-01-24 14:04:34 +00:00
|
|
|
"net/url"
|
2020-02-04 11:02:29 +00:00
|
|
|
"path"
|
2019-12-13 16:02:48 +00:00
|
|
|
"strconv"
|
2020-11-09 13:43:23 +00:00
|
|
|
"strings"
|
2020-02-14 10:06:43 +00:00
|
|
|
"time"
|
2019-12-13 16:02:48 +00:00
|
|
|
|
2023-03-07 14:08:53 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
2023-05-05 08:19:35 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
2023-03-07 14:08:53 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
|
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
2020-02-28 17:03:56 +00:00
|
|
|
"github.com/valyala/fasthttp"
|
2022-09-09 06:57:48 +00:00
|
|
|
"go.uber.org/atomic"
|
2019-12-13 16:02:48 +00:00
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
2022-03-04 06:53:49 +00:00
|
|
|
type request struct {
|
|
|
|
*fasthttp.RequestCtx
|
2023-03-15 08:07:44 +00:00
|
|
|
log *zap.Logger
|
2021-06-23 10:51:53 +00:00
|
|
|
}
|
|
|
|
|
2021-04-29 20:46:38 +00:00
|
|
|
func isValidToken(s string) bool {
|
|
|
|
for _, c := range s {
|
|
|
|
if c <= ' ' || c > 127 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if strings.ContainsRune("()<>@,;:\\\"/[]?={}", c) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func isValidValue(s string) bool {
|
|
|
|
for _, c := range s {
|
|
|
|
// HTTP specification allows for more technically, but we don't want to escape things.
|
|
|
|
if c < ' ' || c > 127 || c == '"' {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2022-03-04 06:53:49 +00:00
|
|
|
type readCloser struct {
|
|
|
|
io.Reader
|
|
|
|
io.Closer
|
|
|
|
}
|
|
|
|
|
2022-04-21 08:35:57 +00:00
|
|
|
// initializes io.Reader with the limited size and detects Content-Type from it.
|
|
|
|
// Returns r's error directly. Also returns the processed data.
|
2022-03-04 06:53:49 +00:00
|
|
|
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (string, []byte, error) {
|
|
|
|
if maxSize > sizeToDetectType {
|
|
|
|
maxSize = sizeToDetectType
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := make([]byte, maxSize) // maybe sync-pool the slice?
|
|
|
|
|
|
|
|
r, err := rInit(maxSize)
|
|
|
|
if err != nil {
|
|
|
|
return "", nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err := r.Read(buf)
|
|
|
|
if err != nil && err != io.EOF {
|
|
|
|
return "", nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = buf[:n]
|
|
|
|
|
|
|
|
return http.DetectContentType(buf), buf, err // to not lose io.EOF
|
|
|
|
}
|
|
|
|
|
2023-03-15 08:07:44 +00:00
|
|
|
func receiveFile(ctx context.Context, req request, clnt *pool.Pool, objectAddress oid.Address) {
|
2019-12-13 16:02:48 +00:00
|
|
|
var (
|
2020-12-03 15:00:43 +00:00
|
|
|
err error
|
|
|
|
dis = "inline"
|
|
|
|
start = time.Now()
|
2020-11-09 13:43:23 +00:00
|
|
|
filename string
|
2019-12-13 16:02:48 +00:00
|
|
|
)
|
2022-02-08 16:24:23 +00:00
|
|
|
|
2022-04-07 12:56:18 +00:00
|
|
|
var prm pool.PrmObjectGet
|
2022-07-25 09:47:48 +00:00
|
|
|
prm.SetAddress(objectAddress)
|
2023-05-30 14:01:20 +00:00
|
|
|
if btoken := bearerToken(ctx); btoken != nil {
|
2022-04-19 15:46:51 +00:00
|
|
|
prm.UseBearer(*btoken)
|
|
|
|
}
|
2022-04-07 12:56:18 +00:00
|
|
|
|
2023-03-15 08:07:44 +00:00
|
|
|
rObj, err := clnt.GetObject(ctx, prm)
|
2020-11-09 13:43:23 +00:00
|
|
|
if err != nil {
|
2023-03-15 08:07:44 +00:00
|
|
|
req.handleFrostFSErr(err, start)
|
2020-02-28 17:03:56 +00:00
|
|
|
return
|
2019-12-13 16:02:48 +00:00
|
|
|
}
|
2022-02-08 16:24:23 +00:00
|
|
|
|
|
|
|
// we can't close reader in this function, so how to do it?
|
|
|
|
|
2023-03-15 08:07:44 +00:00
|
|
|
if req.Request.URI().QueryArgs().GetBool("download") {
|
2020-12-03 15:00:43 +00:00
|
|
|
dis = "attachment"
|
2020-11-09 13:43:23 +00:00
|
|
|
}
|
2022-02-08 16:24:23 +00:00
|
|
|
|
2022-03-04 06:53:49 +00:00
|
|
|
payloadSize := rObj.Header.PayloadSize()
|
2022-02-08 16:24:23 +00:00
|
|
|
|
2023-03-15 08:07:44 +00:00
|
|
|
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
2021-06-23 10:51:53 +00:00
|
|
|
var contentType string
|
2022-02-08 16:24:23 +00:00
|
|
|
for _, attr := range rObj.Header.Attributes() {
|
2020-11-23 09:32:03 +00:00
|
|
|
key := attr.Key()
|
|
|
|
val := attr.Value()
|
2021-04-29 20:46:38 +00:00
|
|
|
if !isValidToken(key) || !isValidValue(val) {
|
|
|
|
continue
|
|
|
|
}
|
2023-03-16 08:40:08 +00:00
|
|
|
|
|
|
|
key = utils.BackwardTransformIfSystem(key)
|
|
|
|
|
2023-03-15 08:07:44 +00:00
|
|
|
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
2020-11-23 09:32:03 +00:00
|
|
|
switch key {
|
|
|
|
case object.AttributeFileName:
|
2020-11-09 13:43:23 +00:00
|
|
|
filename = val
|
2020-11-23 09:32:03 +00:00
|
|
|
case object.AttributeTimestamp:
|
|
|
|
value, err := strconv.ParseInt(val, 10, 64)
|
|
|
|
if err != nil {
|
2023-03-15 08:07:44 +00:00
|
|
|
req.log.Info("couldn't parse creation date",
|
2020-11-23 09:32:03 +00:00
|
|
|
zap.String("key", key),
|
|
|
|
zap.String("val", val),
|
|
|
|
zap.Error(err))
|
|
|
|
continue
|
|
|
|
}
|
2023-03-15 08:07:44 +00:00
|
|
|
req.Response.Header.Set(fasthttp.HeaderLastModified,
|
2021-06-29 09:25:14 +00:00
|
|
|
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
2021-06-21 13:56:51 +00:00
|
|
|
case object.AttributeContentType:
|
2021-06-23 10:51:53 +00:00
|
|
|
contentType = val
|
2020-02-28 17:03:56 +00:00
|
|
|
}
|
2020-11-09 13:43:23 +00:00
|
|
|
}
|
2022-02-15 09:13:43 +00:00
|
|
|
|
2023-03-15 08:07:44 +00:00
|
|
|
idsToResponse(&req.Response, &rObj.Header)
|
2021-06-23 10:51:53 +00:00
|
|
|
|
|
|
|
if len(contentType) == 0 {
|
2022-03-04 06:53:49 +00:00
|
|
|
// determine the Content-Type from the payload head
|
|
|
|
var payloadHead []byte
|
|
|
|
|
|
|
|
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
|
|
|
return rObj.Payload, nil
|
|
|
|
})
|
|
|
|
if err != nil && err != io.EOF {
|
2023-03-15 08:07:44 +00:00
|
|
|
req.log.Error("could not detect Content-Type from payload", zap.Error(err))
|
|
|
|
response.Error(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
2021-06-23 10:51:53 +00:00
|
|
|
return
|
|
|
|
}
|
2022-03-04 06:53:49 +00:00
|
|
|
|
2022-04-21 08:35:57 +00:00
|
|
|
// reset payload reader since a part of the data has been read
|
2022-03-04 08:36:23 +00:00
|
|
|
var headReader io.Reader = bytes.NewReader(payloadHead)
|
2022-03-04 06:53:49 +00:00
|
|
|
|
|
|
|
if err != io.EOF { // otherwise, we've already read full payload
|
2022-03-04 08:36:23 +00:00
|
|
|
headReader = io.MultiReader(headReader, rObj.Payload)
|
2022-03-04 06:53:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// note: we could do with io.Reader, but SetBodyStream below closes body stream
|
|
|
|
// if it implements io.Closer and that's useful for us.
|
2022-03-04 08:36:23 +00:00
|
|
|
rObj.Payload = readCloser{headReader, rObj.Payload}
|
2021-06-23 10:51:53 +00:00
|
|
|
}
|
2023-03-15 08:07:44 +00:00
|
|
|
req.SetContentType(contentType)
|
2021-06-23 10:51:53 +00:00
|
|
|
|
2023-03-15 08:07:44 +00:00
|
|
|
req.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
2022-03-04 06:53:49 +00:00
|
|
|
|
2023-03-15 08:07:44 +00:00
|
|
|
req.Response.SetBodyStream(rObj.Payload, int(payloadSize))
|
2020-12-03 15:00:43 +00:00
|
|
|
}
|
|
|
|
|
2022-04-19 15:46:51 +00:00
|
|
|
func bearerToken(ctx context.Context) *bearer.Token {
|
2021-07-09 13:28:39 +00:00
|
|
|
if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
|
2022-04-07 12:56:18 +00:00
|
|
|
return tkn
|
2021-07-09 13:28:39 +00:00
|
|
|
}
|
2022-04-07 12:56:18 +00:00
|
|
|
return nil
|
2021-07-09 13:28:39 +00:00
|
|
|
}
|
|
|
|
|
2022-12-20 11:01:50 +00:00
|
|
|
func (r *request) handleFrostFSErr(err error, start time.Time) {
|
2023-03-13 11:55:19 +00:00
|
|
|
logFields := []zap.Field{
|
2021-07-09 13:28:39 +00:00
|
|
|
zap.Stringer("elapsed", time.Since(start)),
|
|
|
|
zap.Error(err),
|
|
|
|
}
|
2023-03-13 11:55:19 +00:00
|
|
|
statusCode, msg, additionalFields := response.FormErrorResponse("could not receive object", err)
|
|
|
|
logFields = append(logFields, additionalFields...)
|
2021-07-20 10:40:39 +00:00
|
|
|
|
2023-03-13 11:55:19 +00:00
|
|
|
r.log.Error("could not receive object", logFields...)
|
|
|
|
response.Error(r.RequestCtx, msg, statusCode)
|
2021-07-09 13:28:39 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 12:22:03 +00:00
|
|
|
// Downloader is a download request handler.
|
2021-03-31 18:24:41 +00:00
|
|
|
type Downloader struct {
|
2022-04-20 09:17:20 +00:00
|
|
|
log *zap.Logger
|
|
|
|
pool *pool.Pool
|
|
|
|
containerResolver *resolver.ContainerResolver
|
2022-09-09 06:33:31 +00:00
|
|
|
settings *Settings
|
2023-05-05 08:19:35 +00:00
|
|
|
tree *tree.Tree
|
2021-10-29 13:11:34 +00:00
|
|
|
}
|
|
|
|
|
2022-09-16 12:47:16 +00:00
|
|
|
// Settings stores reloading parameters, so it has to provide atomic getters and setters.
|
2021-10-29 13:11:34 +00:00
|
|
|
type Settings struct {
|
2022-09-09 06:33:31 +00:00
|
|
|
zipCompression atomic.Bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Settings) ZipCompression() bool {
|
|
|
|
return s.zipCompression.Load()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Settings) SetZipCompression(val bool) {
|
|
|
|
s.zipCompression.Store(val)
|
2021-03-31 18:24:41 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 12:22:03 +00:00
|
|
|
// New creates an instance of Downloader using specified options.
|
2023-05-30 14:01:20 +00:00
|
|
|
func New(params *utils.AppParams, settings *Settings, tree *tree.Tree) *Downloader {
|
2022-04-20 09:17:20 +00:00
|
|
|
return &Downloader{
|
|
|
|
log: params.Logger,
|
|
|
|
pool: params.Pool,
|
|
|
|
settings: settings,
|
|
|
|
containerResolver: params.Resolver,
|
2023-05-05 08:19:35 +00:00
|
|
|
tree: tree,
|
2022-04-20 09:17:20 +00:00
|
|
|
}
|
2021-03-31 18:24:41 +00:00
|
|
|
}
|
|
|
|
|
2021-04-07 12:54:30 +00:00
|
|
|
func (d *Downloader) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) *request {
|
2020-12-03 15:00:43 +00:00
|
|
|
return &request{
|
2021-05-28 08:57:28 +00:00
|
|
|
RequestCtx: ctx,
|
|
|
|
log: log,
|
2020-12-03 15:00:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-05 08:19:35 +00:00
|
|
|
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
|
|
|
|
func (d *Downloader) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
|
|
|
test, _ := c.UserValue("oid").(string)
|
|
|
|
var id oid.ID
|
|
|
|
err := id.DecodeString(test)
|
|
|
|
if err != nil {
|
|
|
|
d.byBucketname(c, receiveFile)
|
|
|
|
} else {
|
|
|
|
d.byAddress(c, receiveFile)
|
|
|
|
}
|
2021-07-09 13:28:39 +00:00
|
|
|
}
|
|
|
|
|
2022-04-21 08:35:57 +00:00
|
|
|
// byAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
2021-07-09 13:28:39 +00:00
|
|
|
// prepares request and object address to it.
|
2023-03-15 08:07:44 +00:00
|
|
|
func (d *Downloader) byAddress(c *fasthttp.RequestCtx, f func(context.Context, request, *pool.Pool, oid.Address)) {
|
2020-12-03 15:00:43 +00:00
|
|
|
var (
|
2022-02-08 16:24:23 +00:00
|
|
|
idCnr, _ = c.UserValue("cid").(string)
|
|
|
|
idObj, _ = c.UserValue("oid").(string)
|
|
|
|
log = d.log.With(zap.String("cid", idCnr), zap.String("oid", idObj))
|
2020-12-03 15:00:43 +00:00
|
|
|
)
|
2022-04-20 09:17:20 +00:00
|
|
|
|
2023-05-30 14:01:20 +00:00
|
|
|
ctx := utils.GetContextFromRequest(c)
|
2023-03-15 08:07:44 +00:00
|
|
|
|
|
|
|
cnrID, err := utils.GetContainerID(ctx, idCnr, d.containerResolver)
|
2022-04-20 09:17:20 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Error("wrong container id", zap.Error(err))
|
|
|
|
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
2020-12-03 15:00:43 +00:00
|
|
|
return
|
|
|
|
}
|
2021-05-28 08:57:28 +00:00
|
|
|
|
2022-04-20 09:17:20 +00:00
|
|
|
objID := new(oid.ID)
|
|
|
|
if err = objID.DecodeString(idObj); err != nil {
|
|
|
|
log.Error("wrong object id", zap.Error(err))
|
|
|
|
response.Error(c, "wrong object id", fasthttp.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-07-25 09:47:48 +00:00
|
|
|
var addr oid.Address
|
|
|
|
addr.SetContainer(*cnrID)
|
|
|
|
addr.SetObject(*objID)
|
2022-04-20 09:17:20 +00:00
|
|
|
|
2023-03-15 08:07:44 +00:00
|
|
|
f(ctx, *d.newRequest(c, log), d.pool, addr)
|
2020-12-03 15:00:43 +00:00
|
|
|
}
|
|
|
|
|
2023-05-05 08:19:35 +00:00
|
|
|
// byBucketname is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
|
|
|
// prepares request and object address to it.
|
2023-05-24 09:19:15 +00:00
|
|
|
func (d *Downloader) byBucketname(req *fasthttp.RequestCtx, f func(context.Context, request, *pool.Pool, oid.Address)) {
|
2023-05-05 08:19:35 +00:00
|
|
|
var (
|
2023-05-24 09:19:15 +00:00
|
|
|
bucketname = req.UserValue("cid").(string)
|
|
|
|
key = req.UserValue("oid").(string)
|
2023-05-05 08:19:35 +00:00
|
|
|
log = d.log.With(zap.String("bucketname", bucketname), zap.String("key", key))
|
|
|
|
)
|
|
|
|
|
2023-05-30 14:01:20 +00:00
|
|
|
ctx := utils.GetContextFromRequest(req)
|
2023-05-24 09:19:15 +00:00
|
|
|
|
|
|
|
cnrID, err := utils.GetContainerID(ctx, bucketname, d.containerResolver)
|
2023-05-05 08:19:35 +00:00
|
|
|
if err != nil {
|
2023-05-24 09:19:15 +00:00
|
|
|
log.Error("wrong container id", zap.Error(err))
|
|
|
|
response.Error(req, "wrong container id", fasthttp.StatusBadRequest)
|
2023-05-05 08:19:35 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
foundOid, err := d.tree.GetLatestVersion(ctx, cnrID, key)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("object wasn't found", zap.Error(err))
|
2023-05-24 09:19:15 +00:00
|
|
|
response.Error(req, "object wasn't found", fasthttp.StatusNotFound)
|
2023-05-05 08:19:35 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if foundOid.DeleteMarker {
|
|
|
|
log.Error("object was deleted")
|
2023-05-24 09:19:15 +00:00
|
|
|
response.Error(req, "object deleted", fasthttp.StatusNotFound)
|
2023-05-05 08:19:35 +00:00
|
|
|
return
|
|
|
|
}
|
2023-05-24 09:19:15 +00:00
|
|
|
|
2023-05-05 08:19:35 +00:00
|
|
|
var addr oid.Address
|
|
|
|
addr.SetContainer(*cnrID)
|
|
|
|
addr.SetObject(foundOid.OID)
|
|
|
|
|
2023-05-24 09:19:15 +00:00
|
|
|
f(ctx, *d.newRequest(req, log), d.pool, addr)
|
2023-05-05 08:19:35 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 12:22:03 +00:00
|
|
|
// DownloadByAttribute handles attribute-based download requests.
|
2021-04-07 12:54:30 +00:00
|
|
|
func (d *Downloader) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
2023-03-15 08:07:44 +00:00
|
|
|
d.byAttribute(c, receiveFile)
|
2021-07-09 13:28:39 +00:00
|
|
|
}
|
|
|
|
|
2022-04-21 08:35:57 +00:00
|
|
|
// byAttribute is a wrapper similar to byAddress.
|
2023-03-15 08:07:44 +00:00
|
|
|
func (d *Downloader) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, request, *pool.Pool, oid.Address)) {
|
2020-12-03 15:00:43 +00:00
|
|
|
var (
|
2022-04-20 09:17:20 +00:00
|
|
|
scid, _ = c.UserValue("cid").(string)
|
|
|
|
key, _ = url.QueryUnescape(c.UserValue("attr_key").(string))
|
|
|
|
val, _ = url.QueryUnescape(c.UserValue("attr_val").(string))
|
|
|
|
log = d.log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
|
2020-12-03 15:00:43 +00:00
|
|
|
)
|
2022-04-20 09:17:20 +00:00
|
|
|
|
2023-05-30 14:01:20 +00:00
|
|
|
ctx := utils.GetContextFromRequest(c)
|
2023-03-15 08:07:44 +00:00
|
|
|
|
|
|
|
containerID, err := utils.GetContainerID(ctx, scid, d.containerResolver)
|
2022-04-20 09:17:20 +00:00
|
|
|
if err != nil {
|
2020-12-03 15:00:43 +00:00
|
|
|
log.Error("wrong container id", zap.Error(err))
|
2022-04-20 09:17:20 +00:00
|
|
|
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
2021-07-09 13:28:39 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-05-30 14:01:20 +00:00
|
|
|
res, err := d.search(ctx, containerID, key, val, object.MatchStringEqual)
|
2021-07-09 13:28:39 +00:00
|
|
|
if err != nil {
|
2022-02-08 16:24:23 +00:00
|
|
|
log.Error("could not search for objects", zap.Error(err))
|
2022-04-20 10:03:06 +00:00
|
|
|
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
2020-12-03 15:00:43 +00:00
|
|
|
return
|
2021-03-30 22:46:33 +00:00
|
|
|
}
|
2021-05-28 08:57:28 +00:00
|
|
|
|
2022-02-08 16:24:23 +00:00
|
|
|
defer res.Close()
|
2021-07-09 13:28:39 +00:00
|
|
|
|
2022-02-08 16:24:23 +00:00
|
|
|
buf := make([]oid.ID, 1)
|
2021-10-19 07:52:41 +00:00
|
|
|
|
2022-02-08 16:24:23 +00:00
|
|
|
n, err := res.Read(buf)
|
|
|
|
if n == 0 {
|
|
|
|
if errors.Is(err, io.EOF) {
|
|
|
|
log.Error("object not found", zap.Error(err))
|
|
|
|
response.Error(c, "object not found", fasthttp.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
2021-10-19 07:52:41 +00:00
|
|
|
|
2022-02-08 16:24:23 +00:00
|
|
|
log.Error("read object list failed", zap.Error(err))
|
2022-04-20 10:03:06 +00:00
|
|
|
response.Error(c, "read object list failed: "+err.Error(), fasthttp.StatusBadRequest)
|
2022-02-08 16:24:23 +00:00
|
|
|
return
|
|
|
|
}
|
2021-10-19 07:52:41 +00:00
|
|
|
|
2022-07-25 09:47:48 +00:00
|
|
|
var addrObj oid.Address
|
|
|
|
addrObj.SetContainer(*containerID)
|
|
|
|
addrObj.SetObject(buf[0])
|
2021-05-28 08:57:28 +00:00
|
|
|
|
2023-03-15 08:07:44 +00:00
|
|
|
f(ctx, *d.newRequest(c, log), d.pool, addrObj)
|
2021-10-19 07:52:41 +00:00
|
|
|
}
|
|
|
|
|
2023-05-30 14:01:20 +00:00
|
|
|
func (d *Downloader) search(ctx context.Context, cid *cid.ID, key, val string, op object.SearchMatchType) (pool.ResObjectSearch, error) {
|
2022-02-08 16:24:23 +00:00
|
|
|
filters := object.NewSearchFilters()
|
|
|
|
filters.AddRootFilter()
|
|
|
|
filters.AddFilter(key, val, op)
|
2021-10-19 07:52:41 +00:00
|
|
|
|
2022-04-07 12:56:18 +00:00
|
|
|
var prm pool.PrmObjectSearch
|
|
|
|
prm.SetContainerID(*cid)
|
|
|
|
prm.SetFilters(filters)
|
2023-05-30 14:01:20 +00:00
|
|
|
if btoken := bearerToken(ctx); btoken != nil {
|
2022-04-19 15:46:51 +00:00
|
|
|
prm.UseBearer(*btoken)
|
|
|
|
}
|
2022-04-07 12:56:18 +00:00
|
|
|
|
2023-05-24 09:19:15 +00:00
|
|
|
return d.pool.SearchObjects(ctx, prm)
|
2021-10-19 07:52:41 +00:00
|
|
|
}
|
|
|
|
|
2023-05-24 09:19:15 +00:00
|
|
|
func (d *Downloader) getContainer(ctx context.Context, cnrID cid.ID) (container.Container, error) {
|
2022-11-02 12:08:14 +00:00
|
|
|
var prm pool.PrmContainerGet
|
|
|
|
prm.SetContainerID(cnrID)
|
|
|
|
|
2023-05-24 09:19:15 +00:00
|
|
|
return d.pool.GetContainer(ctx, prm)
|
2022-11-02 12:08:14 +00:00
|
|
|
}
|
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
func (d *Downloader) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
|
|
|
method := zip.Store
|
2022-09-09 06:33:31 +00:00
|
|
|
if d.settings.ZipCompression() {
|
2022-06-17 09:17:02 +00:00
|
|
|
method = zip.Deflate
|
|
|
|
}
|
|
|
|
|
2022-11-02 12:08:14 +00:00
|
|
|
filePath := getZipFilePath(obj)
|
|
|
|
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
|
|
|
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
|
|
|
}
|
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
return zw.CreateHeader(&zip.FileHeader{
|
2022-11-02 12:08:14 +00:00
|
|
|
Name: filePath,
|
2022-06-17 09:17:02 +00:00
|
|
|
Method: method,
|
|
|
|
Modified: time.Now(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-10-19 07:52:41 +00:00
|
|
|
// DownloadZipped handles zip by prefix requests.
|
|
|
|
func (d *Downloader) DownloadZipped(c *fasthttp.RequestCtx) {
|
|
|
|
scid, _ := c.UserValue("cid").(string)
|
2022-01-26 14:54:10 +00:00
|
|
|
prefix, _ := url.QueryUnescape(c.UserValue("prefix").(string))
|
2021-10-19 07:52:41 +00:00
|
|
|
log := d.log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
|
|
|
|
2023-05-30 14:01:20 +00:00
|
|
|
ctx := utils.GetContextFromRequest(c)
|
2023-05-24 09:19:15 +00:00
|
|
|
|
|
|
|
containerID, err := utils.GetContainerID(ctx, scid, d.containerResolver)
|
2022-04-20 09:17:20 +00:00
|
|
|
if err != nil {
|
2021-10-19 07:52:41 +00:00
|
|
|
log.Error("wrong container id", zap.Error(err))
|
2022-02-08 16:24:23 +00:00
|
|
|
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
2021-10-19 07:52:41 +00:00
|
|
|
return
|
2020-12-03 15:00:43 +00:00
|
|
|
}
|
2021-10-19 07:52:41 +00:00
|
|
|
|
2022-11-02 12:08:14 +00:00
|
|
|
// check if container exists here to be able to return 404 error,
|
|
|
|
// otherwise we get this error only in object iteration step
|
|
|
|
// and client get 200 OK.
|
2023-05-24 09:19:15 +00:00
|
|
|
if _, err = d.getContainer(ctx, *containerID); err != nil {
|
2022-11-02 12:08:14 +00:00
|
|
|
log.Error("could not check container existence", zap.Error(err))
|
|
|
|
if client.IsErrContainerNotFound(err) {
|
|
|
|
response.Error(c, "Not Found", fasthttp.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
response.Error(c, "could not check container existence: "+err.Error(), fasthttp.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-05-30 14:01:20 +00:00
|
|
|
resSearch, err := d.search(ctx, containerID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
2021-10-19 07:52:41 +00:00
|
|
|
if err != nil {
|
2022-02-08 16:24:23 +00:00
|
|
|
log.Error("could not search for objects", zap.Error(err))
|
2022-04-20 10:03:06 +00:00
|
|
|
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
2021-10-19 07:52:41 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-02-15 08:27:51 +00:00
|
|
|
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
|
|
|
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
2021-10-19 07:52:41 +00:00
|
|
|
c.Response.SetStatusCode(http.StatusOK)
|
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
c.SetBodyStreamWriter(func(w *bufio.Writer) {
|
|
|
|
defer resSearch.Close()
|
2021-10-29 13:11:34 +00:00
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
zipWriter := zip.NewWriter(w)
|
2021-10-19 07:52:41 +00:00
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
var bufZip []byte
|
2022-07-25 09:47:48 +00:00
|
|
|
var addr oid.Address
|
2021-10-19 07:52:41 +00:00
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
empty := true
|
|
|
|
called := false
|
2023-05-30 14:01:20 +00:00
|
|
|
btoken := bearerToken(ctx)
|
2022-07-25 09:47:48 +00:00
|
|
|
addr.SetContainer(*containerID)
|
2022-02-08 16:24:23 +00:00
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
errIter := resSearch.Iterate(func(id oid.ID) bool {
|
|
|
|
called = true
|
2021-10-19 07:52:41 +00:00
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
if empty {
|
|
|
|
bufZip = make([]byte, 3<<20) // the same as for upload
|
|
|
|
}
|
|
|
|
empty = false
|
2022-02-08 16:24:23 +00:00
|
|
|
|
2022-07-25 09:47:48 +00:00
|
|
|
addr.SetObject(id)
|
2023-05-24 09:19:15 +00:00
|
|
|
if err = d.zipObject(ctx, zipWriter, addr, btoken, bufZip); err != nil {
|
2022-11-02 12:08:14 +00:00
|
|
|
log.Error("failed to add object to archive", zap.String("oid", id.EncodeToString()), zap.Error(err))
|
2022-06-17 09:17:02 +00:00
|
|
|
}
|
2022-02-08 16:24:23 +00:00
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
return false
|
|
|
|
})
|
|
|
|
if errIter != nil {
|
|
|
|
log.Error("iterating over selected objects failed", zap.Error(errIter))
|
|
|
|
} else if !called {
|
|
|
|
log.Error("objects not found")
|
2022-03-01 20:38:03 +00:00
|
|
|
}
|
2022-02-08 16:24:23 +00:00
|
|
|
|
2022-11-02 12:08:14 +00:00
|
|
|
if err = zipWriter.Close(); err != nil {
|
|
|
|
log.Error("close zip writer", zap.Error(err))
|
2022-03-01 20:38:03 +00:00
|
|
|
}
|
2022-06-17 09:17:02 +00:00
|
|
|
})
|
|
|
|
}
|
2022-02-08 16:24:23 +00:00
|
|
|
|
2023-05-24 09:19:15 +00:00
|
|
|
func (d *Downloader) zipObject(ctx context.Context, zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
|
2022-06-17 09:17:02 +00:00
|
|
|
var prm pool.PrmObjectGet
|
|
|
|
prm.SetAddress(addr)
|
|
|
|
if btoken != nil {
|
|
|
|
prm.UseBearer(*btoken)
|
|
|
|
}
|
2022-02-08 16:24:23 +00:00
|
|
|
|
2023-05-24 09:19:15 +00:00
|
|
|
resGet, err := d.pool.GetObject(ctx, prm)
|
2022-06-17 09:17:02 +00:00
|
|
|
if err != nil {
|
2022-12-20 11:01:50 +00:00
|
|
|
return fmt.Errorf("get FrostFS object: %v", err)
|
2022-06-17 09:17:02 +00:00
|
|
|
}
|
2022-02-08 16:24:23 +00:00
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
objWriter, err := d.addObjectToZip(zipWriter, &resGet.Header)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("zip create header: %v", err)
|
|
|
|
}
|
2022-03-01 20:38:03 +00:00
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
if _, err = io.CopyBuffer(objWriter, resGet.Payload, bufZip); err != nil {
|
|
|
|
return fmt.Errorf("copy object payload to zip file: %v", err)
|
2021-10-19 07:52:41 +00:00
|
|
|
}
|
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
if err = resGet.Payload.Close(); err != nil {
|
|
|
|
return fmt.Errorf("object body close error: %w", err)
|
2022-02-08 16:24:23 +00:00
|
|
|
}
|
|
|
|
|
2022-06-17 09:17:02 +00:00
|
|
|
if err = zipWriter.Flush(); err != nil {
|
|
|
|
return fmt.Errorf("flush zip writer: %v", err)
|
2022-02-08 16:24:23 +00:00
|
|
|
}
|
2022-06-17 09:17:02 +00:00
|
|
|
|
|
|
|
return nil
|
2021-10-19 07:52:41 +00:00
|
|
|
}
|
|
|
|
|
2022-04-19 12:02:18 +00:00
|
|
|
func getZipFilePath(obj *object.Object) string {
|
2021-10-19 07:52:41 +00:00
|
|
|
for _, attr := range obj.Attributes() {
|
2022-11-02 12:08:14 +00:00
|
|
|
if attr.Key() == object.AttributeFilePath {
|
2021-10-19 07:52:41 +00:00
|
|
|
return attr.Value()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ""
|
2019-12-13 16:02:48 +00:00
|
|
|
}
|