Roman Loginov
fa28f1ff82
All checks were successful
/ Vulncheck (pull_request) Successful in 1m34s
/ Builds (1.19) (pull_request) Successful in 2m11s
/ Builds (1.20) (pull_request) Successful in 1m32s
/ DCO (pull_request) Successful in 4m14s
/ Lint (pull_request) Successful in 2m14s
/ Tests (1.19) (pull_request) Successful in 6m30s
/ Tests (1.20) (pull_request) Successful in 1m42s
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
227 lines
6.8 KiB
Go
227 lines
6.8 KiB
Go
package uploader
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"io"
|
|
"net/http"
|
|
"strconv"
|
|
"time"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
"github.com/valyala/fasthttp"
|
|
"go.uber.org/atomic"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
const (
|
|
jsonHeader = "application/json; charset=UTF-8"
|
|
drainBufSize = 4096
|
|
)
|
|
|
|
// Uploader is an upload request handler.
|
|
type Uploader struct {
|
|
log *zap.Logger
|
|
pool *pool.Pool
|
|
ownerID *user.ID
|
|
settings *Settings
|
|
containerResolver *resolver.ContainerResolver
|
|
}
|
|
|
|
// Settings stores reloading parameters, so it has to provide atomic getters and setters.
|
|
type Settings struct {
|
|
defaultTimestamp atomic.Bool
|
|
}
|
|
|
|
func (s *Settings) DefaultTimestamp() bool {
|
|
return s.defaultTimestamp.Load()
|
|
}
|
|
|
|
func (s *Settings) SetDefaultTimestamp(val bool) {
|
|
s.defaultTimestamp.Store(val)
|
|
}
|
|
|
|
// New creates a new Uploader using specified logger, connection pool and
|
|
// other options.
|
|
func New(params *utils.AppParams, settings *Settings) *Uploader {
|
|
return &Uploader{
|
|
log: params.Logger,
|
|
pool: params.Pool,
|
|
ownerID: params.Owner,
|
|
settings: settings,
|
|
containerResolver: params.Resolver,
|
|
}
|
|
}
|
|
|
|
// Upload handles multipart upload request.
|
|
func (u *Uploader) Upload(req *fasthttp.RequestCtx) {
|
|
var (
|
|
file MultipartFile
|
|
idObj oid.ID
|
|
addr oid.Address
|
|
scid, _ = req.UserValue("cid").(string)
|
|
log = u.log.With(zap.String("cid", scid))
|
|
bodyStream = req.RequestBodyStream()
|
|
drainBuf = make([]byte, drainBufSize)
|
|
)
|
|
|
|
ctx := utils.GetContextFromRequest(req)
|
|
|
|
idCnr, err := utils.GetContainerID(ctx, scid, u.containerResolver)
|
|
if err != nil {
|
|
log.Error(logs.WrongContainerID, zap.Error(err))
|
|
response.Error(req, "wrong container id", fasthttp.StatusBadRequest)
|
|
return
|
|
}
|
|
|
|
defer func() {
|
|
// If the temporary reader can be closed - let's close it.
|
|
if file == nil {
|
|
return
|
|
}
|
|
err := file.Close()
|
|
log.Debug(
|
|
logs.CloseTemporaryMultipartFormFile,
|
|
zap.Stringer("address", addr),
|
|
zap.String("filename", file.FileName()),
|
|
zap.Error(err),
|
|
)
|
|
}()
|
|
boundary := string(req.Request.Header.MultipartFormBoundary())
|
|
if file, err = fetchMultipartFile(u.log, bodyStream, boundary); err != nil {
|
|
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
|
|
response.Error(req, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
|
return
|
|
}
|
|
filtered, err := filterHeaders(u.log, &req.Request.Header)
|
|
if err != nil {
|
|
log.Error(logs.CouldNotProcessHeaders, zap.Error(err))
|
|
response.Error(req, err.Error(), fasthttp.StatusBadRequest)
|
|
return
|
|
}
|
|
|
|
now := time.Now()
|
|
if rawHeader := req.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
|
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
|
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
|
} else {
|
|
now = parsed
|
|
}
|
|
}
|
|
|
|
if err = utils.PrepareExpirationHeader(req, u.pool, filtered, now); err != nil {
|
|
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
|
response.Error(req, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
|
return
|
|
}
|
|
|
|
attributes := make([]object.Attribute, 0, len(filtered))
|
|
// prepares attributes from filtered headers
|
|
for key, val := range filtered {
|
|
attribute := object.NewAttribute()
|
|
attribute.SetKey(key)
|
|
attribute.SetValue(val)
|
|
attributes = append(attributes, *attribute)
|
|
}
|
|
// sets FileName attribute if it wasn't set from header
|
|
if _, ok := filtered[object.AttributeFileName]; !ok {
|
|
filename := object.NewAttribute()
|
|
filename.SetKey(object.AttributeFileName)
|
|
filename.SetValue(file.FileName())
|
|
attributes = append(attributes, *filename)
|
|
}
|
|
// sets Timestamp attribute if it wasn't set from header and enabled by settings
|
|
if _, ok := filtered[object.AttributeTimestamp]; !ok && u.settings.DefaultTimestamp() {
|
|
timestamp := object.NewAttribute()
|
|
timestamp.SetKey(object.AttributeTimestamp)
|
|
timestamp.SetValue(strconv.FormatInt(time.Now().Unix(), 10))
|
|
attributes = append(attributes, *timestamp)
|
|
}
|
|
|
|
obj := object.New()
|
|
obj.SetContainerID(*idCnr)
|
|
obj.SetOwnerID(u.ownerID)
|
|
obj.SetAttributes(attributes...)
|
|
|
|
var prm pool.PrmObjectPut
|
|
prm.SetHeader(*obj)
|
|
prm.SetPayload(file)
|
|
|
|
bt := u.fetchBearerToken(ctx)
|
|
if bt != nil {
|
|
prm.UseBearer(*bt)
|
|
}
|
|
|
|
if idObj, err = u.pool.PutObject(ctx, prm); err != nil {
|
|
u.handlePutFrostFSErr(req, err)
|
|
return
|
|
}
|
|
|
|
addr.SetObject(idObj)
|
|
addr.SetContainer(*idCnr)
|
|
|
|
// Try to return the response, otherwise, if something went wrong, throw an error.
|
|
if err = newPutResponse(addr).encode(req); err != nil {
|
|
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
|
response.Error(req, "could not encode response", fasthttp.StatusBadRequest)
|
|
|
|
return
|
|
}
|
|
// Multipart is multipart and thus can contain more than one part which
|
|
// we ignore at the moment. Also, when dealing with chunked encoding
|
|
// the last zero-length chunk might be left unread (because multipart
|
|
// reader only cares about its boundary and doesn't look further) and
|
|
// it will be (erroneously) interpreted as the start of the next
|
|
// pipelined header. Thus we need to drain the body buffer.
|
|
for {
|
|
_, err = bodyStream.Read(drainBuf)
|
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
|
break
|
|
}
|
|
}
|
|
// Report status code and content type.
|
|
req.Response.SetStatusCode(fasthttp.StatusOK)
|
|
req.Response.Header.SetContentType(jsonHeader)
|
|
}
|
|
|
|
func (u *Uploader) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error) {
|
|
statusCode, msg, additionalFields := response.FormErrorResponse("could not store file in frostfs", err)
|
|
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
|
|
|
|
u.log.Error(logs.CouldNotStoreFileInFrostfs, logFields...)
|
|
response.Error(r, msg, statusCode)
|
|
}
|
|
|
|
func (u *Uploader) fetchBearerToken(ctx context.Context) *bearer.Token {
|
|
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
|
|
return tkn
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type putResponse struct {
|
|
ObjectID string `json:"object_id"`
|
|
ContainerID string `json:"container_id"`
|
|
}
|
|
|
|
func newPutResponse(addr oid.Address) *putResponse {
|
|
return &putResponse{
|
|
ObjectID: addr.Object().EncodeToString(),
|
|
ContainerID: addr.Container().EncodeToString(),
|
|
}
|
|
}
|
|
|
|
func (pr *putResponse) encode(w io.Writer) error {
|
|
enc := json.NewEncoder(w)
|
|
enc.SetIndent("", "\t")
|
|
return enc.Encode(pr)
|
|
}
|