[#73] Uploader, downloader structures refactoring
All checks were successful
/ Vulncheck (pull_request) Successful in 1m32s
/ Lint (pull_request) Successful in 2m28s
/ Tests (1.20) (pull_request) Successful in 1m52s
/ Tests (1.21) (pull_request) Successful in 1m21s
/ DCO (pull_request) Successful in 3m25s
/ Builds (1.20) (pull_request) Successful in 5m38s
/ Builds (1.21) (pull_request) Successful in 1m23s
All checks were successful
/ Vulncheck (pull_request) Successful in 1m32s
/ Lint (pull_request) Successful in 2m28s
/ Tests (1.20) (pull_request) Successful in 1m52s
/ Tests (1.21) (pull_request) Successful in 1m21s
/ DCO (pull_request) Successful in 3m25s
/ Builds (1.20) (pull_request) Successful in 5m38s
/ Builds (1.21) (pull_request) Successful in 1m23s
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
This commit is contained in:
parent
add07a21ed
commit
d219943542
27 changed files with 672 additions and 664 deletions
141
internal/handler/reader.go
Normal file
141
internal/handler/reader.go
Normal file
|
@ -0,0 +1,141 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type readCloser struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// initializes io.Reader with the limited size and detects Content-Type from it.
|
||||
// Returns r's error directly. Also returns the processed data.
|
||||
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (string, []byte, error) {
|
||||
if maxSize > sizeToDetectType {
|
||||
maxSize = sizeToDetectType
|
||||
}
|
||||
|
||||
buf := make([]byte, maxSize) // maybe sync-pool the slice?
|
||||
|
||||
r, err := rInit(maxSize)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
n, err := r.Read(buf)
|
||||
if err != nil && err != io.EOF {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
buf = buf[:n]
|
||||
|
||||
return http.DetectContentType(buf), buf, err // to not lose io.EOF
|
||||
}
|
||||
|
||||
func (h *Handler) receiveFile(ctx context.Context, req request, objectAddress oid.Address) {
|
||||
var (
|
||||
err error
|
||||
dis = "inline"
|
||||
start = time.Now()
|
||||
filename string
|
||||
)
|
||||
|
||||
var prm pool.PrmObjectGet
|
||||
prm.SetAddress(objectAddress)
|
||||
if btoken := bearerToken(ctx); btoken != nil {
|
||||
prm.UseBearer(*btoken)
|
||||
}
|
||||
|
||||
rObj, err := h.pool.GetObject(ctx, prm)
|
||||
if err != nil {
|
||||
req.handleFrostFSErr(err, start)
|
||||
return
|
||||
}
|
||||
|
||||
// we can't close reader in this function, so how to do it?
|
||||
|
||||
if req.Request.URI().QueryArgs().GetBool("download") {
|
||||
dis = "attachment"
|
||||
}
|
||||
|
||||
payloadSize := rObj.Header.PayloadSize()
|
||||
|
||||
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
||||
var contentType string
|
||||
for _, attr := range rObj.Header.Attributes() {
|
||||
key := attr.Key()
|
||||
val := attr.Value()
|
||||
if !isValidToken(key) || !isValidValue(val) {
|
||||
continue
|
||||
}
|
||||
|
||||
key = utils.BackwardTransformIfSystem(key)
|
||||
|
||||
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
||||
switch key {
|
||||
case object.AttributeFileName:
|
||||
filename = val
|
||||
case object.AttributeTimestamp:
|
||||
value, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
req.log.Info(logs.CouldntParseCreationDate,
|
||||
zap.String("key", key),
|
||||
zap.String("val", val),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
req.Response.Header.Set(fasthttp.HeaderLastModified,
|
||||
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||
case object.AttributeContentType:
|
||||
contentType = val
|
||||
}
|
||||
}
|
||||
|
||||
idsToResponse(&req.Response, &rObj.Header)
|
||||
|
||||
if len(contentType) == 0 {
|
||||
// determine the Content-Type from the payload head
|
||||
var payloadHead []byte
|
||||
|
||||
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
||||
return rObj.Payload, nil
|
||||
})
|
||||
if err != nil && err != io.EOF {
|
||||
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err))
|
||||
response.Error(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// reset payload reader since a part of the data has been read
|
||||
var headReader io.Reader = bytes.NewReader(payloadHead)
|
||||
|
||||
if err != io.EOF { // otherwise, we've already read full payload
|
||||
headReader = io.MultiReader(headReader, rObj.Payload)
|
||||
}
|
||||
|
||||
// note: we could do with io.Reader, but SetBodyStream below closes body stream
|
||||
// if it implements io.Closer and that's useful for us.
|
||||
rObj.Payload = readCloser{headReader, rObj.Payload}
|
||||
}
|
||||
req.SetContentType(contentType)
|
||||
|
||||
req.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
||||
|
||||
req.Response.SetBodyStream(rObj.Payload, int(payloadSize))
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue