2021-03-31 18:24:41 +00:00
|
|
|
package downloader
|
2019-12-13 16:02:48 +00:00
|
|
|
|
|
|
|
import (
|
2021-10-19 07:52:41 +00:00
|
|
|
"archive/zip"
|
2021-03-31 18:24:41 +00:00
|
|
|
"context"
|
2021-04-29 15:32:01 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2019-12-13 16:02:48 +00:00
|
|
|
"io"
|
|
|
|
"net/http"
|
2022-01-24 14:04:34 +00:00
|
|
|
"net/url"
|
2020-02-04 11:02:29 +00:00
|
|
|
"path"
|
2019-12-13 16:02:48 +00:00
|
|
|
"strconv"
|
2020-11-09 13:43:23 +00:00
|
|
|
"strings"
|
2020-02-14 10:06:43 +00:00
|
|
|
"time"
|
2019-12-13 16:02:48 +00:00
|
|
|
|
2021-11-12 11:37:05 +00:00
|
|
|
"github.com/nspcc-dev/neofs-http-gw/response"
|
2021-05-18 11:18:50 +00:00
|
|
|
"github.com/nspcc-dev/neofs-http-gw/tokens"
|
2021-11-30 07:22:05 +00:00
|
|
|
"github.com/nspcc-dev/neofs-http-gw/utils"
|
2021-11-15 11:12:15 +00:00
|
|
|
"github.com/nspcc-dev/neofs-sdk-go/client"
|
|
|
|
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
|
|
|
"github.com/nspcc-dev/neofs-sdk-go/object"
|
|
|
|
"github.com/nspcc-dev/neofs-sdk-go/pool"
|
2020-02-28 17:03:56 +00:00
|
|
|
"github.com/valyala/fasthttp"
|
2019-12-13 16:02:48 +00:00
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
2020-12-03 15:00:43 +00:00
|
|
|
type (
|
|
|
|
detector struct {
|
2021-06-23 10:51:53 +00:00
|
|
|
io.Reader
|
|
|
|
err error
|
2020-12-03 15:00:43 +00:00
|
|
|
contentType string
|
2021-06-23 10:51:53 +00:00
|
|
|
done chan struct{}
|
|
|
|
data []byte
|
2020-12-03 15:00:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
request struct {
|
|
|
|
*fasthttp.RequestCtx
|
2021-05-28 08:57:28 +00:00
|
|
|
log *zap.Logger
|
2020-12-03 15:00:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
objectIDs []*object.ID
|
2021-06-23 10:51:53 +00:00
|
|
|
|
|
|
|
errReader struct {
|
|
|
|
data []byte
|
|
|
|
err error
|
|
|
|
offset int
|
|
|
|
}
|
2020-12-03 15:00:43 +00:00
|
|
|
)
|
2020-11-09 13:43:23 +00:00
|
|
|
|
2021-07-09 13:28:39 +00:00
|
|
|
var errObjectNotFound = errors.New("object not found")
|
|
|
|
|
2021-06-23 10:51:53 +00:00
|
|
|
func newReader(data []byte, err error) *errReader {
|
|
|
|
return &errReader{data: data, err: err}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *errReader) Read(b []byte) (int, error) {
|
|
|
|
if r.offset >= len(r.data) {
|
|
|
|
return 0, io.EOF
|
|
|
|
}
|
|
|
|
n := copy(b, r.data[r.offset:])
|
|
|
|
r.offset += n
|
|
|
|
if r.offset >= len(r.data) {
|
|
|
|
return n, r.err
|
|
|
|
}
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
const contentTypeDetectSize = 512
|
|
|
|
|
|
|
|
func newDetector() *detector {
|
|
|
|
return &detector{done: make(chan struct{}), data: make([]byte, contentTypeDetectSize)}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *detector) Wait() {
|
|
|
|
<-d.done
|
2020-11-09 13:43:23 +00:00
|
|
|
}
|
|
|
|
|
2021-06-23 10:51:53 +00:00
|
|
|
func (d *detector) SetReader(reader io.Reader) {
|
|
|
|
d.Reader = reader
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *detector) Detect() {
|
|
|
|
n, err := d.Reader.Read(d.data)
|
|
|
|
if err != nil && err != io.EOF {
|
|
|
|
d.err = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
d.data = d.data[:n]
|
|
|
|
d.contentType = http.DetectContentType(d.data)
|
|
|
|
close(d.done)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *detector) MultiReader() io.Reader {
|
|
|
|
return io.MultiReader(newReader(d.data, d.err), d.Reader)
|
2020-11-09 13:43:23 +00:00
|
|
|
}
|
|
|
|
|
2021-04-29 20:46:38 +00:00
|
|
|
func isValidToken(s string) bool {
|
|
|
|
for _, c := range s {
|
|
|
|
if c <= ' ' || c > 127 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if strings.ContainsRune("()<>@,;:\\\"/[]?={}", c) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func isValidValue(s string) bool {
|
|
|
|
for _, c := range s {
|
|
|
|
// HTTP specification allows for more technically, but we don't want to escape things.
|
|
|
|
if c < ' ' || c > 127 || c == '"' {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2021-11-15 11:12:15 +00:00
|
|
|
func (r request) receiveFile(clnt pool.Object, objectAddress *object.Address) {
|
2019-12-13 16:02:48 +00:00
|
|
|
var (
|
2020-12-03 15:00:43 +00:00
|
|
|
err error
|
|
|
|
dis = "inline"
|
|
|
|
start = time.Now()
|
2020-11-09 13:43:23 +00:00
|
|
|
filename string
|
2021-05-28 08:57:28 +00:00
|
|
|
obj *object.Object
|
2019-12-13 16:02:48 +00:00
|
|
|
)
|
2021-03-31 16:58:42 +00:00
|
|
|
if err = tokens.StoreBearerToken(r.RequestCtx); err != nil {
|
2021-03-30 22:46:33 +00:00
|
|
|
r.log.Error("could not fetch and store bearer token", zap.Error(err))
|
2021-11-12 11:37:05 +00:00
|
|
|
response.Error(r.RequestCtx, "could not fetch and store bearer token", fasthttp.StatusBadRequest)
|
2021-02-16 15:20:15 +00:00
|
|
|
return
|
|
|
|
}
|
2021-06-23 10:51:53 +00:00
|
|
|
readDetector := newDetector()
|
2021-05-28 08:57:28 +00:00
|
|
|
options := new(client.GetObjectParams).
|
|
|
|
WithAddress(objectAddress).
|
2021-06-23 10:51:53 +00:00
|
|
|
WithPayloadReaderHandler(func(reader io.Reader) {
|
|
|
|
readDetector.SetReader(reader)
|
|
|
|
readDetector.Detect()
|
|
|
|
})
|
2021-05-28 08:57:28 +00:00
|
|
|
|
2021-07-09 13:28:39 +00:00
|
|
|
obj, err = clnt.GetObject(r.RequestCtx, options, bearerOpts(r.RequestCtx))
|
2020-11-09 13:43:23 +00:00
|
|
|
if err != nil {
|
2021-07-09 13:28:39 +00:00
|
|
|
r.handleNeoFSErr(err, start)
|
2020-02-28 17:03:56 +00:00
|
|
|
return
|
2019-12-13 16:02:48 +00:00
|
|
|
}
|
2020-12-03 15:00:43 +00:00
|
|
|
if r.Request.URI().QueryArgs().GetBool("download") {
|
|
|
|
dis = "attachment"
|
2020-11-09 13:43:23 +00:00
|
|
|
}
|
2021-06-23 10:51:53 +00:00
|
|
|
r.Response.SetBodyStream(readDetector.MultiReader(), int(obj.PayloadSize()))
|
2022-02-15 08:27:51 +00:00
|
|
|
r.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
|
2021-06-23 10:51:53 +00:00
|
|
|
var contentType string
|
2020-11-23 09:32:03 +00:00
|
|
|
for _, attr := range obj.Attributes() {
|
|
|
|
key := attr.Key()
|
|
|
|
val := attr.Value()
|
2021-04-29 20:46:38 +00:00
|
|
|
if !isValidToken(key) || !isValidValue(val) {
|
|
|
|
continue
|
|
|
|
}
|
2021-11-30 09:25:50 +00:00
|
|
|
if strings.HasPrefix(key, utils.SystemAttributePrefix) {
|
|
|
|
key = systemBackwardTranslator(key)
|
|
|
|
}
|
2021-11-30 07:22:05 +00:00
|
|
|
r.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
2020-11-23 09:32:03 +00:00
|
|
|
switch key {
|
|
|
|
case object.AttributeFileName:
|
2020-11-09 13:43:23 +00:00
|
|
|
filename = val
|
2020-11-23 09:32:03 +00:00
|
|
|
case object.AttributeTimestamp:
|
|
|
|
value, err := strconv.ParseInt(val, 10, 64)
|
|
|
|
if err != nil {
|
2020-12-03 15:00:43 +00:00
|
|
|
r.log.Info("couldn't parse creation date",
|
2020-11-23 09:32:03 +00:00
|
|
|
zap.String("key", key),
|
|
|
|
zap.String("val", val),
|
|
|
|
zap.Error(err))
|
|
|
|
continue
|
|
|
|
}
|
2022-02-15 08:27:51 +00:00
|
|
|
r.Response.Header.Set(fasthttp.HeaderLastModified,
|
2021-06-29 09:25:14 +00:00
|
|
|
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
2021-06-21 13:56:51 +00:00
|
|
|
case object.AttributeContentType:
|
2021-06-23 10:51:53 +00:00
|
|
|
contentType = val
|
2020-02-28 17:03:56 +00:00
|
|
|
}
|
2020-11-09 13:43:23 +00:00
|
|
|
}
|
2022-02-15 08:27:51 +00:00
|
|
|
r.Response.Header.Set(hdrObjectID, obj.ID().String())
|
|
|
|
r.Response.Header.Set(hdrOwnerID, obj.OwnerID().String())
|
|
|
|
r.Response.Header.Set(hdrContainerID, obj.ContainerID().String())
|
2021-06-23 10:51:53 +00:00
|
|
|
|
|
|
|
if len(contentType) == 0 {
|
|
|
|
if readDetector.err != nil {
|
|
|
|
r.log.Error("could not read object", zap.Error(err))
|
2021-11-12 11:37:05 +00:00
|
|
|
response.Error(r.RequestCtx, "could not read object", fasthttp.StatusBadRequest)
|
2021-06-23 10:51:53 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
readDetector.Wait()
|
|
|
|
contentType = readDetector.contentType
|
|
|
|
}
|
|
|
|
r.SetContentType(contentType)
|
|
|
|
|
2022-02-15 08:27:51 +00:00
|
|
|
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
2020-12-03 15:00:43 +00:00
|
|
|
}
|
|
|
|
|
2021-11-30 09:25:50 +00:00
|
|
|
// systemBackwardTranslator is used to convert headers looking like '__NEOFS__ATTR_NAME' to 'Neofs-Attr-Name'.
|
|
|
|
func systemBackwardTranslator(key string) string {
|
|
|
|
// trim specified prefix '__NEOFS__'
|
|
|
|
key = strings.TrimPrefix(key, utils.SystemAttributePrefix)
|
|
|
|
|
|
|
|
var res strings.Builder
|
|
|
|
res.WriteString("Neofs-")
|
|
|
|
|
|
|
|
strs := strings.Split(key, "_")
|
|
|
|
for i, s := range strs {
|
|
|
|
s = strings.Title(strings.ToLower(s))
|
|
|
|
res.WriteString(s)
|
|
|
|
if i != len(strs)-1 {
|
|
|
|
res.WriteString("-")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return res.String()
|
|
|
|
}
|
|
|
|
|
2021-11-15 11:12:15 +00:00
|
|
|
func bearerOpts(ctx context.Context) pool.CallOption {
|
2021-07-09 13:28:39 +00:00
|
|
|
if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
|
2021-11-15 11:12:15 +00:00
|
|
|
return pool.WithBearer(tkn)
|
2021-07-09 13:28:39 +00:00
|
|
|
}
|
2021-11-15 11:12:15 +00:00
|
|
|
return pool.WithBearer(nil)
|
2021-07-09 13:28:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *request) handleNeoFSErr(err error, start time.Time) {
|
|
|
|
r.log.Error(
|
|
|
|
"could not receive object",
|
|
|
|
zap.Stringer("elapsed", time.Since(start)),
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
var (
|
|
|
|
msg = fmt.Sprintf("could not receive object: %v", err)
|
|
|
|
code = fasthttp.StatusBadRequest
|
|
|
|
cause = err
|
|
|
|
)
|
|
|
|
for unwrap := errors.Unwrap(err); unwrap != nil; unwrap = errors.Unwrap(cause) {
|
|
|
|
cause = unwrap
|
|
|
|
}
|
2021-07-20 10:40:39 +00:00
|
|
|
|
|
|
|
if strings.Contains(cause.Error(), "not found") ||
|
|
|
|
strings.Contains(cause.Error(), "can't fetch container info") {
|
|
|
|
code = fasthttp.StatusNotFound
|
|
|
|
msg = errObjectNotFound.Error()
|
2021-07-09 13:28:39 +00:00
|
|
|
}
|
2021-07-20 10:40:39 +00:00
|
|
|
|
2021-11-12 11:37:05 +00:00
|
|
|
response.Error(r.RequestCtx, msg, code)
|
2021-07-09 13:28:39 +00:00
|
|
|
}
|
|
|
|
|
2020-12-03 15:00:43 +00:00
|
|
|
func (o objectIDs) Slice() []string {
|
|
|
|
res := make([]string, 0, len(o))
|
|
|
|
for _, oid := range o {
|
|
|
|
res = append(res, oid.String())
|
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2021-05-13 12:22:03 +00:00
|
|
|
// Downloader is a download request handler.
|
2021-03-31 18:24:41 +00:00
|
|
|
type Downloader struct {
|
2021-10-29 13:11:34 +00:00
|
|
|
log *zap.Logger
|
|
|
|
pool pool.Pool
|
|
|
|
settings Settings
|
|
|
|
}
|
|
|
|
|
|
|
|
type Settings struct {
|
|
|
|
ZipCompression bool
|
2021-03-31 18:24:41 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 12:22:03 +00:00
|
|
|
// New creates an instance of Downloader using specified options.
|
2021-10-29 13:11:34 +00:00
|
|
|
func New(log *zap.Logger, settings Settings, conns pool.Pool) (*Downloader, error) {
|
2021-03-31 18:24:41 +00:00
|
|
|
var err error
|
2021-10-29 13:11:34 +00:00
|
|
|
d := &Downloader{log: log, pool: conns, settings: settings}
|
2021-03-31 18:24:41 +00:00
|
|
|
if err != nil {
|
2021-04-29 15:32:01 +00:00
|
|
|
return nil, fmt.Errorf("failed to get neofs client's reusable artifacts: %w", err)
|
2021-03-31 18:24:41 +00:00
|
|
|
}
|
|
|
|
return d, nil
|
|
|
|
}
|
|
|
|
|
2021-04-07 12:54:30 +00:00
|
|
|
func (d *Downloader) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) *request {
|
2020-12-03 15:00:43 +00:00
|
|
|
return &request{
|
2021-05-28 08:57:28 +00:00
|
|
|
RequestCtx: ctx,
|
|
|
|
log: log,
|
2020-12-03 15:00:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-13 12:22:03 +00:00
|
|
|
// DownloadByAddress handles download requests using simple cid/oid format.
|
2021-04-07 12:54:30 +00:00
|
|
|
func (d *Downloader) DownloadByAddress(c *fasthttp.RequestCtx) {
|
2021-07-09 13:28:39 +00:00
|
|
|
d.byAddress(c, request.receiveFile)
|
|
|
|
}
|
|
|
|
|
|
|
|
// byAddress is wrapper for function (e.g. request.headObject, request.receiveFile) that
|
|
|
|
// prepares request and object address to it.
|
2021-11-15 11:12:15 +00:00
|
|
|
func (d *Downloader) byAddress(c *fasthttp.RequestCtx, f func(request, pool.Object, *object.Address)) {
|
2020-12-03 15:00:43 +00:00
|
|
|
var (
|
2021-03-30 22:46:33 +00:00
|
|
|
address = object.NewAddress()
|
|
|
|
cid, _ = c.UserValue("cid").(string)
|
|
|
|
oid, _ = c.UserValue("oid").(string)
|
|
|
|
val = strings.Join([]string{cid, oid}, "/")
|
2021-04-07 12:54:30 +00:00
|
|
|
log = d.log.With(zap.String("cid", cid), zap.String("oid", oid))
|
2020-12-03 15:00:43 +00:00
|
|
|
)
|
2021-07-09 13:28:39 +00:00
|
|
|
if err := address.Parse(val); err != nil {
|
2020-12-03 15:00:43 +00:00
|
|
|
log.Error("wrong object address", zap.Error(err))
|
2021-11-12 11:37:05 +00:00
|
|
|
response.Error(c, "wrong object address", fasthttp.StatusBadRequest)
|
2020-12-03 15:00:43 +00:00
|
|
|
return
|
|
|
|
}
|
2021-05-28 08:57:28 +00:00
|
|
|
|
2021-07-09 13:28:39 +00:00
|
|
|
f(*d.newRequest(c, log), d.pool, address)
|
2020-12-03 15:00:43 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 12:22:03 +00:00
|
|
|
// DownloadByAttribute handles attribute-based download requests.
|
2021-04-07 12:54:30 +00:00
|
|
|
func (d *Downloader) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
2021-07-09 13:28:39 +00:00
|
|
|
d.byAttribute(c, request.receiveFile)
|
|
|
|
}
|
|
|
|
|
|
|
|
// byAttribute is wrapper similar to byAddress.
|
2021-11-15 11:12:15 +00:00
|
|
|
func (d *Downloader) byAttribute(c *fasthttp.RequestCtx, f func(request, pool.Object, *object.Address)) {
|
2020-12-03 15:00:43 +00:00
|
|
|
var (
|
2021-07-09 13:28:39 +00:00
|
|
|
httpStatus = fasthttp.StatusBadRequest
|
|
|
|
scid, _ = c.UserValue("cid").(string)
|
2022-01-24 14:04:34 +00:00
|
|
|
key, _ = url.QueryUnescape(c.UserValue("attr_key").(string))
|
|
|
|
val, _ = url.QueryUnescape(c.UserValue("attr_val").(string))
|
2021-07-09 13:28:39 +00:00
|
|
|
log = d.log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
|
2020-12-03 15:00:43 +00:00
|
|
|
)
|
2021-07-09 13:28:39 +00:00
|
|
|
containerID := cid.New()
|
|
|
|
if err := containerID.Parse(scid); err != nil {
|
2020-12-03 15:00:43 +00:00
|
|
|
log.Error("wrong container id", zap.Error(err))
|
2021-11-12 11:37:05 +00:00
|
|
|
response.Error(c, "wrong container id", httpStatus)
|
2021-07-09 13:28:39 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
address, err := d.searchObject(c, log, containerID, key, val)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("couldn't search object", zap.Error(err))
|
|
|
|
if errors.Is(err, errObjectNotFound) {
|
|
|
|
httpStatus = fasthttp.StatusNotFound
|
|
|
|
}
|
2021-11-12 11:37:05 +00:00
|
|
|
response.Error(c, "couldn't search object", httpStatus)
|
2020-12-03 15:00:43 +00:00
|
|
|
return
|
2021-03-30 22:46:33 +00:00
|
|
|
}
|
2021-05-28 08:57:28 +00:00
|
|
|
|
2021-07-09 13:28:39 +00:00
|
|
|
f(*d.newRequest(c, log), d.pool, address)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Downloader) searchObject(c *fasthttp.RequestCtx, log *zap.Logger, cid *cid.ID, key, val string) (*object.Address, error) {
|
2021-10-19 07:52:41 +00:00
|
|
|
ids, err := d.searchByAttr(c, cid, key, val)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if len(ids) > 1 {
|
|
|
|
log.Debug("found multiple objects",
|
|
|
|
zap.Strings("object_ids", objectIDs(ids).Slice()),
|
|
|
|
zap.Stringer("show_object_id", ids[0]))
|
|
|
|
}
|
|
|
|
|
|
|
|
return formAddress(cid, ids[0]), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func formAddress(cid *cid.ID, oid *object.ID) *object.Address {
|
|
|
|
address := object.NewAddress()
|
|
|
|
address.SetContainerID(cid)
|
|
|
|
address.SetObjectID(oid)
|
|
|
|
return address
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Downloader) search(c *fasthttp.RequestCtx, cid *cid.ID, key, val string, op object.SearchMatchType) ([]*object.ID, error) {
|
2021-05-28 08:57:28 +00:00
|
|
|
options := object.NewSearchFilters()
|
|
|
|
options.AddRootFilter()
|
2021-10-19 07:52:41 +00:00
|
|
|
options.AddFilter(key, val, op)
|
2021-05-28 08:57:28 +00:00
|
|
|
|
|
|
|
sops := new(client.SearchObjectParams).WithContainerID(cid).WithSearchFilters(options)
|
2021-07-09 13:28:39 +00:00
|
|
|
ids, err := d.pool.SearchObject(c, sops)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if len(ids) == 0 {
|
|
|
|
return nil, errObjectNotFound
|
2020-12-03 15:00:43 +00:00
|
|
|
}
|
2021-10-19 07:52:41 +00:00
|
|
|
return ids, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Downloader) searchByPrefix(c *fasthttp.RequestCtx, cid *cid.ID, val string) ([]*object.ID, error) {
|
|
|
|
return d.search(c, cid, object.AttributeFileName, val, object.MatchCommonPrefix)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Downloader) searchByAttr(c *fasthttp.RequestCtx, cid *cid.ID, key, val string) ([]*object.ID, error) {
|
|
|
|
return d.search(c, cid, key, val, object.MatchStringEqual)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DownloadZipped handles zip by prefix requests.
|
|
|
|
func (d *Downloader) DownloadZipped(c *fasthttp.RequestCtx) {
|
|
|
|
status := fasthttp.StatusBadRequest
|
|
|
|
scid, _ := c.UserValue("cid").(string)
|
2022-01-26 14:54:10 +00:00
|
|
|
prefix, _ := url.QueryUnescape(c.UserValue("prefix").(string))
|
2021-10-19 07:52:41 +00:00
|
|
|
log := d.log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
|
|
|
|
|
|
|
containerID := cid.New()
|
|
|
|
if err := containerID.Parse(scid); err != nil {
|
|
|
|
log.Error("wrong container id", zap.Error(err))
|
2021-11-12 11:37:05 +00:00
|
|
|
response.Error(c, "wrong container id", status)
|
2021-10-19 07:52:41 +00:00
|
|
|
return
|
2020-12-03 15:00:43 +00:00
|
|
|
}
|
2021-10-19 07:52:41 +00:00
|
|
|
|
|
|
|
if err := tokens.StoreBearerToken(c); err != nil {
|
|
|
|
log.Error("could not fetch and store bearer token", zap.Error(err))
|
2021-11-12 11:37:05 +00:00
|
|
|
response.Error(c, "could not fetch and store bearer token", fasthttp.StatusBadRequest)
|
2021-10-19 07:52:41 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ids, err := d.searchByPrefix(c, containerID, prefix)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("couldn't find objects", zap.Error(err))
|
|
|
|
if errors.Is(err, errObjectNotFound) {
|
|
|
|
status = fasthttp.StatusNotFound
|
|
|
|
}
|
2021-11-12 11:37:05 +00:00
|
|
|
response.Error(c, "couldn't find objects", status)
|
2021-10-19 07:52:41 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-02-15 08:27:51 +00:00
|
|
|
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
|
|
|
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
2021-10-19 07:52:41 +00:00
|
|
|
c.Response.SetStatusCode(http.StatusOK)
|
|
|
|
|
|
|
|
if err = d.streamFiles(c, containerID, ids); err != nil {
|
|
|
|
log.Error("couldn't stream files", zap.Error(err))
|
2021-11-12 11:37:05 +00:00
|
|
|
response.Error(c, "couldn't stream", fasthttp.StatusInternalServerError)
|
2021-10-19 07:52:41 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Downloader) streamFiles(c *fasthttp.RequestCtx, cid *cid.ID, ids []*object.ID) error {
|
|
|
|
zipWriter := zip.NewWriter(c)
|
2021-10-29 13:11:34 +00:00
|
|
|
compression := zip.Store
|
|
|
|
if d.settings.ZipCompression {
|
|
|
|
compression = zip.Deflate
|
|
|
|
}
|
|
|
|
|
2021-10-19 07:52:41 +00:00
|
|
|
for _, id := range ids {
|
|
|
|
var r io.Reader
|
|
|
|
readerInitCtx, initReader := context.WithCancel(c)
|
|
|
|
options := new(client.GetObjectParams).
|
|
|
|
WithAddress(formAddress(cid, id)).
|
|
|
|
WithPayloadReaderHandler(func(reader io.Reader) {
|
|
|
|
r = reader
|
|
|
|
initReader()
|
|
|
|
})
|
|
|
|
|
|
|
|
obj, err := d.pool.GetObject(c, options, bearerOpts(c))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
header := &zip.FileHeader{
|
|
|
|
Name: getFilename(obj),
|
2021-10-29 13:11:34 +00:00
|
|
|
Method: compression,
|
2021-10-19 07:52:41 +00:00
|
|
|
Modified: time.Now(),
|
|
|
|
}
|
|
|
|
entryWriter, err := zipWriter.CreateHeader(header)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
<-readerInitCtx.Done()
|
|
|
|
_, err = io.Copy(entryWriter, r)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = zipWriter.Flush(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return zipWriter.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func getFilename(obj *object.Object) string {
|
|
|
|
for _, attr := range obj.Attributes() {
|
|
|
|
if attr.Key() == object.AttributeFileName {
|
|
|
|
return attr.Value()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ""
|
2019-12-13 16:02:48 +00:00
|
|
|
}
|