forked from TrueCloudLab/frostfs-http-gw
184 lines
4.6 KiB
Go
184 lines
4.6 KiB
Go
package handler
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"io"
|
|
"net/http"
|
|
"path"
|
|
"strconv"
|
|
"time"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
"github.com/valyala/fasthttp"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
type readCloser struct {
|
|
io.Reader
|
|
io.Closer
|
|
}
|
|
|
|
// initializes io.Reader with the limited size and detects Content-Type from it.
|
|
// Returns r's error directly. Also returns the processed data.
|
|
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (string, []byte, error) {
|
|
if maxSize > sizeToDetectType {
|
|
maxSize = sizeToDetectType
|
|
}
|
|
|
|
buf := make([]byte, maxSize) // maybe sync-pool the slice?
|
|
|
|
r, err := rInit(maxSize)
|
|
if err != nil {
|
|
return "", nil, err
|
|
}
|
|
|
|
n, err := r.Read(buf)
|
|
if err != nil && err != io.EOF {
|
|
return "", nil, err
|
|
}
|
|
|
|
buf = buf[:n]
|
|
|
|
return http.DetectContentType(buf), buf, err // to not lose io.EOF
|
|
}
|
|
|
|
type getMultiobjectBodyParams struct {
|
|
obj *Object
|
|
req request
|
|
strSize string
|
|
}
|
|
|
|
func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.Address) {
|
|
var (
|
|
shouldDownload = req.QueryArgs().GetBool("download")
|
|
start = time.Now()
|
|
filename string
|
|
filepath string
|
|
contentType string
|
|
)
|
|
|
|
prm := PrmObjectGet{
|
|
PrmAuth: PrmAuth{
|
|
BearerToken: bearerToken(ctx),
|
|
},
|
|
Address: objAddress,
|
|
}
|
|
|
|
rObj, err := h.frostfs.GetObject(ctx, prm)
|
|
if err != nil {
|
|
req.handleFrostFSErr(err, start)
|
|
return
|
|
}
|
|
|
|
// we can't close reader in this function, so how to do it?
|
|
req.setIDs(rObj.Header)
|
|
payload := rObj.Payload
|
|
payloadSize := rObj.Header.PayloadSize()
|
|
for _, attr := range rObj.Header.Attributes() {
|
|
key := attr.Key()
|
|
val := attr.Value()
|
|
if !isValidToken(key) || !isValidValue(val) {
|
|
continue
|
|
}
|
|
|
|
key = utils.BackwardTransformIfSystem(key)
|
|
|
|
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
|
switch key {
|
|
case object.AttributeFileName:
|
|
filename = val
|
|
case object.AttributeTimestamp:
|
|
if err = req.setTimestamp(val); err != nil {
|
|
req.log.Error(logs.CouldntParseCreationDate,
|
|
zap.String("val", val),
|
|
zap.Error(err))
|
|
}
|
|
case object.AttributeContentType:
|
|
contentType = val
|
|
case object.AttributeFilePath:
|
|
filepath = val
|
|
case attributeMultipartObjectSize:
|
|
payload, payloadSize, err = h.getPayload(getMultiobjectBodyParams{
|
|
obj: rObj,
|
|
req: req,
|
|
strSize: val,
|
|
})
|
|
if err != nil {
|
|
req.handleFrostFSErr(err, start)
|
|
return
|
|
}
|
|
}
|
|
}
|
|
if filename == "" {
|
|
filename = filepath
|
|
}
|
|
|
|
req.setDisposition(shouldDownload, filename)
|
|
|
|
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
|
|
|
if len(contentType) == 0 {
|
|
// determine the Content-Type from the payload head
|
|
var payloadHead []byte
|
|
|
|
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
|
return payload, nil
|
|
})
|
|
if err != nil && err != io.EOF {
|
|
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err))
|
|
response.Error(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
|
return
|
|
}
|
|
|
|
// reset payload reader since a part of the data has been read
|
|
var headReader io.Reader = bytes.NewReader(payloadHead)
|
|
|
|
if err != io.EOF { // otherwise, we've already read full payload
|
|
headReader = io.MultiReader(headReader, payload)
|
|
}
|
|
|
|
// note: we could do with io.Reader, but SetBodyStream below closes body stream
|
|
// if it implements io.Closer and that's useful for us.
|
|
payload = readCloser{headReader, payload}
|
|
}
|
|
req.SetContentType(contentType)
|
|
req.Response.SetBodyStream(payload, int(payloadSize))
|
|
}
|
|
|
|
func (r *request) setIDs(obj object.Object) {
|
|
objID, _ := obj.ID()
|
|
cnrID, _ := obj.ContainerID()
|
|
r.Response.Header.Set(hdrObjectID, objID.String())
|
|
r.Response.Header.Set(hdrOwnerID, obj.OwnerID().String())
|
|
r.Response.Header.Set(hdrContainerID, cnrID.String())
|
|
}
|
|
|
|
func (r *request) setDisposition(shouldDownload bool, filename string) {
|
|
const (
|
|
inlineDisposition = "inline"
|
|
attachmentDisposition = "attachment"
|
|
)
|
|
|
|
dis := inlineDisposition
|
|
if shouldDownload {
|
|
dis = attachmentDisposition
|
|
}
|
|
|
|
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
|
}
|
|
|
|
func (r *request) setTimestamp(timestamp string) error {
|
|
value, err := strconv.ParseInt(timestamp, 10, 64)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
r.Response.Header.Set(fasthttp.HeaderLastModified,
|
|
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
|
|
|
return nil
|
|
}
|