forked from TrueCloudLab/frostfs-s3-gw
327 lines
10 KiB
Go
327 lines
10 KiB
Go
package handler
|
|
|
|
import (
|
|
"fmt"
|
|
"net/http"
|
|
"net/url"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
type conditionalArgs struct {
|
|
IfModifiedSince *time.Time
|
|
IfUnmodifiedSince *time.Time
|
|
IfMatch string
|
|
IfNoneMatch string
|
|
}
|
|
|
|
func fetchRangeHeader(headers http.Header, fullSize uint64) (*layer.RangeParams, error) {
|
|
const prefix = "bytes="
|
|
rangeHeader := headers.Get("Range")
|
|
if len(rangeHeader) == 0 {
|
|
return nil, nil
|
|
}
|
|
if fullSize == 0 {
|
|
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
|
}
|
|
if !strings.HasPrefix(rangeHeader, prefix) {
|
|
return nil, fmt.Errorf("unknown unit in range header")
|
|
}
|
|
arr := strings.Split(strings.TrimPrefix(rangeHeader, prefix), "-")
|
|
if len(arr) != 2 || (len(arr[0]) == 0 && len(arr[1]) == 0) {
|
|
return nil, fmt.Errorf("unknown byte-range-set")
|
|
}
|
|
|
|
var end, start uint64
|
|
var err0, err1 error
|
|
base, bitSize := 10, 64
|
|
|
|
if len(arr[0]) == 0 {
|
|
end, err1 = strconv.ParseUint(arr[1], base, bitSize)
|
|
start = fullSize - end
|
|
end = fullSize - 1
|
|
} else if len(arr[1]) == 0 {
|
|
start, err0 = strconv.ParseUint(arr[0], base, bitSize)
|
|
end = fullSize - 1
|
|
} else {
|
|
start, err0 = strconv.ParseUint(arr[0], base, bitSize)
|
|
end, err1 = strconv.ParseUint(arr[1], base, bitSize)
|
|
if end > fullSize-1 {
|
|
end = fullSize - 1
|
|
}
|
|
}
|
|
|
|
if err0 != nil || err1 != nil || start > end || start > fullSize {
|
|
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
|
}
|
|
return &layer.RangeParams{Start: start, End: end}, nil
|
|
}
|
|
|
|
func overrideResponseHeaders(h http.Header, query url.Values) {
|
|
for key, value := range query {
|
|
if hdr, ok := api.ResponseModifiers[strings.ToLower(key)]; ok {
|
|
h[hdr] = value
|
|
}
|
|
}
|
|
}
|
|
|
|
func addSSECHeaders(responseHeader http.Header, requestHeader http.Header) {
|
|
responseHeader.Set(api.AmzServerSideEncryptionCustomerAlgorithm, requestHeader.Get(api.AmzServerSideEncryptionCustomerAlgorithm))
|
|
responseHeader.Set(api.AmzServerSideEncryptionCustomerKeyMD5, requestHeader.Get(api.AmzServerSideEncryptionCustomerKeyMD5))
|
|
}
|
|
|
|
func writeNotModifiedHeaders(h http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int, isBucketUnversioned, md5Enabled bool) {
|
|
h.Set(api.ETag, data.Quote(extendedInfo.ObjectInfo.ETag(md5Enabled)))
|
|
h.Set(api.LastModified, extendedInfo.ObjectInfo.Created.UTC().Format(http.TimeFormat))
|
|
h.Set(api.AmzTaggingCount, strconv.Itoa(tagSetLength))
|
|
|
|
if !isBucketUnversioned {
|
|
h.Set(api.AmzVersionID, extendedInfo.Version())
|
|
}
|
|
|
|
if cacheControl := extendedInfo.ObjectInfo.Headers[api.CacheControl]; cacheControl != "" {
|
|
h.Set(api.CacheControl, cacheControl)
|
|
}
|
|
|
|
for key, val := range extendedInfo.ObjectInfo.Headers {
|
|
if layer.IsSystemHeader(key) {
|
|
continue
|
|
}
|
|
h[api.MetadataPrefix+key] = []string{val}
|
|
}
|
|
}
|
|
|
|
func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int,
|
|
isBucketUnversioned, md5Enabled bool) {
|
|
info := extendedInfo.ObjectInfo
|
|
if len(info.ContentType) > 0 && h.Get(api.ContentType) == "" {
|
|
h.Set(api.ContentType, info.ContentType)
|
|
}
|
|
h.Set(api.LastModified, info.Created.UTC().Format(http.TimeFormat))
|
|
|
|
if len(info.Headers[layer.AttributeEncryptionAlgorithm]) > 0 {
|
|
h.Set(api.ContentLength, info.Headers[layer.AttributeDecryptedSize])
|
|
addSSECHeaders(h, requestHeader)
|
|
} else if len(info.Headers[layer.MultipartObjectSize]) > 0 {
|
|
h.Set(api.ContentLength, info.Headers[layer.MultipartObjectSize])
|
|
} else {
|
|
h.Set(api.ContentLength, strconv.FormatUint(info.Size, 10))
|
|
}
|
|
|
|
h.Set(api.ETag, data.Quote(info.ETag(md5Enabled)))
|
|
|
|
h.Set(api.AmzTaggingCount, strconv.Itoa(tagSetLength))
|
|
h.Set(api.AmzStorageClass, api.DefaultStorageClass)
|
|
|
|
if !isBucketUnversioned {
|
|
h.Set(api.AmzVersionID, extendedInfo.Version())
|
|
}
|
|
|
|
if cacheControl := info.Headers[api.CacheControl]; cacheControl != "" {
|
|
h.Set(api.CacheControl, cacheControl)
|
|
}
|
|
if expires := info.Headers[api.Expires]; expires != "" {
|
|
h.Set(api.Expires, expires)
|
|
}
|
|
if encodings := info.Headers[api.ContentEncoding]; encodings != "" {
|
|
h.Set(api.ContentEncoding, encodings)
|
|
}
|
|
if contentLanguage := info.Headers[api.ContentLanguage]; contentLanguage != "" {
|
|
h.Set(api.ContentLanguage, contentLanguage)
|
|
}
|
|
|
|
for key, val := range info.Headers {
|
|
if layer.IsSystemHeader(key) {
|
|
continue
|
|
}
|
|
h[api.MetadataPrefix+key] = []string{val}
|
|
}
|
|
}
|
|
|
|
func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
var (
|
|
params *layer.RangeParams
|
|
|
|
ctx = r.Context()
|
|
reqInfo = middleware.GetReqInfo(ctx)
|
|
)
|
|
|
|
conditional := parseConditionalHeaders(r.Header, h.reqLogger(ctx))
|
|
|
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
if err != nil {
|
|
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
p := &layer.HeadObjectParams{
|
|
BktInfo: bktInfo,
|
|
Object: reqInfo.ObjectName,
|
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
|
}
|
|
|
|
extendedInfo, err := h.obj.GetExtendedObjectInfo(ctx, p)
|
|
if err != nil {
|
|
h.logAndSendError(ctx, w, "could not find object", reqInfo, err)
|
|
return
|
|
}
|
|
info := extendedInfo.ObjectInfo
|
|
|
|
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
|
if err != nil {
|
|
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
t := &data.ObjectVersion{
|
|
BktInfo: bktInfo,
|
|
ObjectName: info.Name,
|
|
VersionID: info.VersionID(),
|
|
}
|
|
|
|
tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(ctx, t, extendedInfo.NodeVersion)
|
|
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
|
h.logAndSendError(ctx, w, "could not get object meta data", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
|
if errors.IsS3Error(err, errors.ErrNotModified) {
|
|
writeNotModifiedHeaders(w.Header(), extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.MD5Enabled())
|
|
}
|
|
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
encryptionParams, err := formEncryptionParams(r)
|
|
if err != nil {
|
|
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
|
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
|
return
|
|
}
|
|
|
|
fullSize, err := layer.GetObjectSize(info)
|
|
if err != nil {
|
|
h.logAndSendError(ctx, w, "invalid size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
|
return
|
|
}
|
|
|
|
if params, err = fetchRangeHeader(r.Header, fullSize); err != nil {
|
|
h.logAndSendError(ctx, w, "could not parse range header", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
if layer.IsAuthenticatedRequest(ctx) {
|
|
overrideResponseHeaders(w.Header(), reqInfo.URL.Query())
|
|
}
|
|
|
|
if err = h.setLockingHeaders(bktInfo, lockInfo, w.Header()); err != nil {
|
|
h.logAndSendError(ctx, w, "could not get locking info", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
getPayloadParams := &layer.GetObjectParams{
|
|
ObjectInfo: info,
|
|
Versioned: p.Versioned(),
|
|
Range: params,
|
|
BucketInfo: bktInfo,
|
|
Encryption: encryptionParams,
|
|
}
|
|
|
|
objPayload, err := h.obj.GetObject(ctx, getPayloadParams)
|
|
if err != nil {
|
|
h.logAndSendError(ctx, w, "could not get object payload", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.MD5Enabled())
|
|
if params != nil {
|
|
writeRangeHeaders(w, params, fullSize)
|
|
} else {
|
|
w.WriteHeader(http.StatusOK)
|
|
}
|
|
|
|
if err = objPayload.StreamTo(w); err != nil {
|
|
h.logAndSendError(ctx, w, "could not stream object payload", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs, md5Enabled bool) error {
|
|
etag := info.ETag(md5Enabled)
|
|
if len(args.IfMatch) > 0 && args.IfMatch != etag {
|
|
return fmt.Errorf("%w: etag mismatched: '%s', '%s'", errors.GetAPIError(errors.ErrPreconditionFailed), args.IfMatch, etag)
|
|
}
|
|
if len(args.IfNoneMatch) > 0 && args.IfNoneMatch == etag {
|
|
return fmt.Errorf("%w: etag matched: '%s', '%s'", errors.GetAPIError(errors.ErrNotModified), args.IfNoneMatch, etag)
|
|
}
|
|
if args.IfModifiedSince != nil && info.Created.Before(*args.IfModifiedSince) {
|
|
return fmt.Errorf("%w: not modified since '%s', last modified '%s'", errors.GetAPIError(errors.ErrNotModified),
|
|
args.IfModifiedSince.Format(time.RFC3339), info.Created.Format(time.RFC3339))
|
|
}
|
|
if args.IfUnmodifiedSince != nil && info.Created.After(*args.IfUnmodifiedSince) {
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_RequestSyntax
|
|
// If both of the If-Match and If-Unmodified-Since headers are present in the request as follows:
|
|
// If-Match condition evaluates to true, and;
|
|
// If-Unmodified-Since condition evaluates to false;
|
|
// then, S3 returns 200 OK and the data requested.
|
|
if len(args.IfMatch) == 0 {
|
|
return fmt.Errorf("%w: modified since '%s', last modified '%s'", errors.GetAPIError(errors.ErrPreconditionFailed),
|
|
args.IfUnmodifiedSince.Format(time.RFC3339), info.Created.Format(time.RFC3339))
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func parseConditionalHeaders(headers http.Header, log *zap.Logger) *conditionalArgs {
|
|
args := &conditionalArgs{
|
|
IfMatch: data.UnQuote(headers.Get(api.IfMatch)),
|
|
IfNoneMatch: data.UnQuote(headers.Get(api.IfNoneMatch)),
|
|
}
|
|
|
|
if httpTime, err := parseHTTPTime(headers.Get(api.IfModifiedSince)); err == nil {
|
|
args.IfModifiedSince = httpTime
|
|
} else {
|
|
log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfModifiedSince, headers.Get(api.IfModifiedSince)), zap.Error(err))
|
|
}
|
|
if httpTime, err := parseHTTPTime(headers.Get(api.IfUnmodifiedSince)); err == nil {
|
|
args.IfUnmodifiedSince = httpTime
|
|
} else {
|
|
log.Warn(logs.FailedToParseHTTPTime, zap.String(api.IfUnmodifiedSince, headers.Get(api.IfUnmodifiedSince)), zap.Error(err))
|
|
}
|
|
|
|
return args
|
|
}
|
|
|
|
func parseHTTPTime(data string) (*time.Time, error) {
|
|
if len(data) == 0 {
|
|
return nil, nil
|
|
}
|
|
|
|
result, err := time.Parse(http.TimeFormat, data)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("couldn't parse http time %s: %w", data, err)
|
|
}
|
|
return &result, nil
|
|
}
|
|
|
|
func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size uint64) {
|
|
w.Header().Set(api.AcceptRanges, "bytes")
|
|
w.Header().Set(api.ContentRange, fmt.Sprintf("bytes %d-%d/%d", params.Start, params.End, size))
|
|
w.Header().Set(api.ContentLength, strconv.FormatUint(params.End-params.Start+1, 10))
|
|
w.WriteHeader(http.StatusPartialContent)
|
|
}
|