Merge pull request #117 from KirillovDenis/feature/93-support_object_etag

[#93] Object ETag support
remotes/KirillovDenis/bugfix/681-fix_acl_parsing
Angira Kekteeva 2021-06-30 21:32:33 +03:00 committed by GitHub
commit 7ebcd5af8f
7 changed files with 62 additions and 92 deletions

View File

@ -81,7 +81,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
}, r.URL)
return
} else if err = api.EncodeToResponse(w, &CopyObjectResponse{LastModified: inf.Created.Format(time.RFC3339)}); err != nil {
} else if err = api.EncodeToResponse(w, &CopyObjectResponse{LastModified: inf.Created.Format(time.RFC3339), ETag: inf.HashSum}); err != nil {
h.log.Error("something went wrong",
zap.String("request_id", rid),
zap.String("dst_bucket_name", bkt),

View File

@ -1,13 +1,10 @@
package handler
import (
"context"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"sync"
"github.com/gorilla/mux"
"github.com/nspcc-dev/neofs-s3-gw/api"
@ -15,61 +12,6 @@ import (
"go.uber.org/zap"
)
type (
detector struct {
io.Writer
sync.Once
contentType string
}
)
func newDetector(w io.Writer) *detector {
return &detector{Writer: w}
}
func (d *detector) Write(data []byte) (int, error) {
d.Once.Do(func() {
if rw, ok := d.Writer.(http.ResponseWriter); ok {
rw.WriteHeader(http.StatusOK)
}
d.contentType = http.DetectContentType(data)
})
return d.Writer.Write(data)
}
func (h *handler) contentTypeFetcher(ctx context.Context, w io.Writer, info *layer.ObjectInfo) (string, error) {
return h.contentTypeFetcherWithRange(ctx, w, info, nil)
}
func (h *handler) contentTypeFetcherWithRange(ctx context.Context, w io.Writer, info *layer.ObjectInfo, rangeParams *layer.RangeParams) (string, error) {
if info.IsDir() {
if rangeParams != nil {
return "", fmt.Errorf("it is forbidden to request for a range in the directory")
}
return info.ContentType, nil
}
writer := newDetector(w)
params := &layer.GetObjectParams{
Bucket: info.Bucket,
Object: info.Name,
Writer: writer,
Range: rangeParams,
}
// params.Length = inf.Size
if err := h.obj.GetObject(ctx, params); err != nil {
return "", err
}
return writer.contentType, nil
}
func fetchRangeHeader(headers http.Header, fullSize uint64) (*layer.RangeParams, error) {
const prefix = "bytes="
rangeHeader := headers.Get("Range")
@ -107,9 +49,12 @@ func fetchRangeHeader(headers http.Header, fullSize uint64) (*layer.RangeParams,
}
func writeHeaders(h http.Header, info *layer.ObjectInfo) {
h.Set("Content-Type", info.ContentType)
h.Set("Last-Modified", info.Created.Format(http.TimeFormat))
h.Set("Content-Length", strconv.FormatInt(info.Size, 10))
if len(info.ContentType) > 0 {
h.Set(api.ContentType, info.ContentType)
}
h.Set(api.LastModified, info.Created.Format(http.TimeFormat))
h.Set(api.ContentLength, strconv.FormatInt(info.Size, 10))
h.Set(api.ETag, info.HashSum)
for key, val := range info.Headers {
h.Set("X-"+key, val)
@ -136,15 +81,20 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
writeError(w, r, h.log, "could not parse range header", rid, bkt, obj, err)
return
}
if inf.ContentType, err = h.contentTypeFetcherWithRange(r.Context(), w, inf, params); err != nil {
writeError(w, r, h.log, "could not get object", rid, bkt, obj, err)
return
}
writeHeaders(w.Header(), inf)
if params != nil {
writeRangeHeaders(w, params, inf.Size)
}
getParams := &layer.GetObjectParams{
Bucket: inf.Bucket,
Object: inf.Name,
Writer: w,
Range: params,
}
if err = h.obj.GetObject(r.Context(), getParams); err != nil {
writeError(w, r, h.log, "could not get object", rid, bkt, obj, err)
}
}
func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size int64) {

View File

@ -1,6 +1,7 @@
package handler
import (
"bytes"
"context"
"net/http"
@ -12,10 +13,18 @@ import (
"google.golang.org/grpc/status"
)
type devNull int
const sizeToDetectType = 512
func (d devNull) Write(p []byte) (n int, err error) {
return len(p), nil
func getRangeToDetectContentType(maxSize int64) *layer.RangeParams {
end := uint64(maxSize)
if sizeToDetectType < end {
end = sizeToDetectType
}
return &layer.RangeParams{
Start: 0,
End: end - 1,
}
}
func (h *handler) checkIsFolder(ctx context.Context, bucket, object string) *layer.ObjectInfo {
@ -73,7 +82,15 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
}, r.URL)
return
} else if inf.ContentType, err = h.contentTypeFetcher(r.Context(), devNull(0), inf); err != nil {
}
buffer := bytes.NewBuffer(make([]byte, 0, sizeToDetectType))
getParams := &layer.GetObjectParams{
Bucket: inf.Bucket,
Object: inf.Name,
Writer: buffer,
Range: getRangeToDetectContentType(inf.Size),
}
if err = h.obj.GetObject(r.Context(), getParams); err != nil {
h.log.Error("could not get object",
zap.String("request_id", rid),
zap.String("bucket_name", bkt),
@ -89,7 +106,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
return
}
inf.ContentType = http.DetectContentType(buffer.Bytes())
writeHeaders(w.Header(), inf)
w.WriteHeader(http.StatusOK)
}

View File

@ -196,7 +196,7 @@ func encodeV1(arg *listObjectsArgs, list *layer.ListObjectsInfo) *ListObjectsRes
DisplayName: obj.Owner.String(),
},
// ETag: "",
ETag: obj.HashSum,
// StorageClass: "",
})
}
@ -259,7 +259,7 @@ func encodeV2(arg *listObjectsArgs, list *layer.ListObjectsInfo) *ListObjectsV2R
DisplayName: obj.Owner.String(),
},
// ETag: "",
ETag: obj.HashSum,
// StorageClass: "",
})
}

View File

@ -24,11 +24,12 @@ const (
func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
var (
err error
req = mux.Vars(r)
bkt = req["bucket"]
obj = req["object"]
rid = api.GetRequestID(r.Context())
err error
info *layer.ObjectInfo
req = mux.Vars(r)
bkt = req["bucket"]
obj = req["object"]
rid = api.GetRequestID(r.Context())
)
if _, err := h.obj.GetBucketInfo(r.Context(), bkt); err != nil {
@ -53,7 +54,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
Size: r.ContentLength,
}
if _, err = h.obj.PutObject(r.Context(), params); err != nil {
if info, err = h.obj.PutObject(r.Context(), params); err != nil {
h.log.Error("could not upload object",
zap.String("request_id", rid),
zap.String("bucket_name", bkt),
@ -69,6 +70,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
return
}
w.Header().Set(api.ETag, info.HashSum)
api.WriteSuccessResponseHeadersOnly(w)
}

View File

@ -142,6 +142,14 @@ func (n *layer) objectPut(ctx context.Context, p *PutObjectParams) (*ObjectInfo,
return nil, err
}
addr := object.NewAddress()
addr.SetObjectID(oid)
addr.SetContainerID(bkt.CID)
meta, err := n.objectHead(ctx, addr)
if err != nil {
return nil, err
}
return &ObjectInfo{
id: oid,
@ -152,6 +160,7 @@ func (n *layer) objectPut(ctx context.Context, p *PutObjectParams) (*ObjectInfo,
Created: time.Now(),
Headers: p.Header,
ContentType: r.contentType,
HashSum: meta.PayloadChecksum().String(),
}, nil
}

View File

@ -1,7 +1,6 @@
package layer
import (
"net/http"
"os"
"strconv"
"strings"
@ -22,6 +21,7 @@ type (
Size int64
ContentType string
Created time.Time
HashSum string
Owner *owner.ID
Headers map[string]string
}
@ -102,10 +102,10 @@ func objectInfoFromMeta(bkt *BucketInfo, meta *object.Object, prefix, delimiter
filename = prefix + tail[:index+1]
userHeaders = nil
} else {
size, mimeType = getSizeAndMimeType(meta, mimeType)
size = int64(meta.PayloadSize())
}
} else {
size, mimeType = getSizeAndMimeType(meta, mimeType)
size = int64(meta.PayloadSize())
}
return &ObjectInfo{
@ -119,18 +119,10 @@ func objectInfoFromMeta(bkt *BucketInfo, meta *object.Object, prefix, delimiter
Headers: userHeaders,
Owner: meta.OwnerID(),
Size: size,
HashSum: meta.PayloadChecksum().String(),
}
}
func getSizeAndMimeType(meta *object.Object, contentType string) (size int64, mimeType string) {
size = int64(meta.PayloadSize())
mimeType = contentType
if len(mimeType) == 0 {
mimeType = http.DetectContentType(meta.Payload())
}
return
}
func filenameFromObject(o *object.Object) string {
var name = o.ID().String()
for _, attr := range o.Attributes() {