forked from TrueCloudLab/frostfs-s3-gw
[#432] Add get-object-attributes
Signed-off-by: Angira Kekteeva <kira@nspcc.ru>
This commit is contained in:
parent
b778c2e072
commit
7d69f9f74b
5 changed files with 254 additions and 1 deletions
|
@ -42,6 +42,7 @@ const (
|
||||||
ErrInvalidMaxUploads
|
ErrInvalidMaxUploads
|
||||||
ErrInvalidMaxParts
|
ErrInvalidMaxParts
|
||||||
ErrInvalidPartNumberMarker
|
ErrInvalidPartNumberMarker
|
||||||
|
ErrInvalidAttributeName
|
||||||
ErrInvalidPartNumber
|
ErrInvalidPartNumber
|
||||||
ErrInvalidRequestBody
|
ErrInvalidRequestBody
|
||||||
ErrInvalidCopySource
|
ErrInvalidCopySource
|
||||||
|
@ -335,6 +336,12 @@ var errorCodes = errorCodeMap{
|
||||||
Description: "Argument partNumberMarker must be an integer.",
|
Description: "Argument partNumberMarker must be an integer.",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
|
ErrInvalidAttributeName: {
|
||||||
|
ErrCode: ErrInvalidAttributeName,
|
||||||
|
Code: "InvalidArgument",
|
||||||
|
Description: "Invalid attribute name specified",
|
||||||
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
|
},
|
||||||
ErrInvalidPolicyDocument: {
|
ErrInvalidPolicyDocument: {
|
||||||
ErrCode: ErrInvalidPolicyDocument,
|
ErrCode: ErrInvalidPolicyDocument,
|
||||||
Code: "InvalidPolicyDocument",
|
Code: "InvalidPolicyDocument",
|
||||||
|
|
233
api/handler/attributes.go
Normal file
233
api/handler/attributes.go
Normal file
|
@ -0,0 +1,233 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
GetObjectAttributesResponse struct {
|
||||||
|
ETag string `xml:"ETag,omitempty"`
|
||||||
|
ObjectSize int64 `xml:"ObjectSize,omitempty"`
|
||||||
|
StorageClass string `xml:"StorageClass,omitempty"`
|
||||||
|
ObjectParts *ObjectParts `xml:"ObjectParts,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
ObjectParts struct {
|
||||||
|
IsTruncated bool `xml:"IsTruncated,omitempty"`
|
||||||
|
MaxParts int `xml:"MaxParts,omitempty"`
|
||||||
|
NextPartNumberMarker int `xml:"NextPartNumberMarker,omitempty"`
|
||||||
|
PartNumberMarker int `xml:"PartNumberMarker,omitempty"`
|
||||||
|
Parts []Part `xml:"Part,omitempty"`
|
||||||
|
PartsCount int `xml:"PartsCount,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
Part struct {
|
||||||
|
PartNumber int `xml:"PartNumber,omitempty"`
|
||||||
|
Size int `xml:"Size,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
GetObjectAttributesArgs struct {
|
||||||
|
MaxParts int
|
||||||
|
PartNumberMarker int
|
||||||
|
Attributes []string
|
||||||
|
VersionID string
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
partNumberMarkerDefault = -1
|
||||||
|
|
||||||
|
eTag = "ETag"
|
||||||
|
checksum = "Checksum"
|
||||||
|
objectParts = "ObjectParts"
|
||||||
|
storageClass = "StorageClass"
|
||||||
|
objectSize = "ObjectSize"
|
||||||
|
)
|
||||||
|
|
||||||
|
var validAttributes = map[string]struct{}{
|
||||||
|
eTag: {},
|
||||||
|
checksum: {},
|
||||||
|
objectParts: {},
|
||||||
|
storageClass: {},
|
||||||
|
objectSize: {},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
info *data.ObjectInfo
|
||||||
|
|
||||||
|
reqInfo = api.GetReqInfo(r.Context())
|
||||||
|
)
|
||||||
|
|
||||||
|
params, err := parseGetObjectAttributeArgs(r)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "invalid request", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &layer.HeadObjectParams{
|
||||||
|
BktInfo: bktInfo,
|
||||||
|
Object: reqInfo.ObjectName,
|
||||||
|
VersionID: params.VersionID,
|
||||||
|
}
|
||||||
|
|
||||||
|
if info, err = h.obj.GetObjectInfo(r.Context(), p); err != nil {
|
||||||
|
h.logAndSendError(w, "could not fetch object info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := encodeToObjectAttributesResponse(info, params)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "couldn't encode object info to response", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
writeAttributesHeaders(w.Header(), info, params)
|
||||||
|
if err = api.EncodeToResponse(w, response); err != nil {
|
||||||
|
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeAttributesHeaders(h http.Header, info *data.ObjectInfo, params *GetObjectAttributesArgs) {
|
||||||
|
h.Set(api.LastModified, info.Created.UTC().Format(http.TimeFormat))
|
||||||
|
if len(params.VersionID) != 0 {
|
||||||
|
h.Set(api.AmzVersionID, info.Version())
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := info.Headers[layer.VersionsDeleteMarkAttr]; ok {
|
||||||
|
h.Set(api.AmzDeleteMarker, strconv.FormatBool(true))
|
||||||
|
}
|
||||||
|
|
||||||
|
// x-amz-request-charged
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseGetObjectAttributeArgs(r *http.Request) (*GetObjectAttributesArgs, error) {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
|
||||||
|
res = &GetObjectAttributesArgs{}
|
||||||
|
attributesVal = r.Header.Get("X-Amz-Object-Attributes")
|
||||||
|
maxPartsVal = r.Header.Get("X-Amz-Max-Parts")
|
||||||
|
markerVal = r.Header.Get("X-Amz-Part-Number-Marker")
|
||||||
|
queryValues = r.URL.Query()
|
||||||
|
)
|
||||||
|
|
||||||
|
if attributesVal == "" {
|
||||||
|
return nil, errors.GetAPIError(errors.ErrInvalidAttributeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
attributes := strings.Split(attributesVal, ",")
|
||||||
|
for _, a := range attributes {
|
||||||
|
if _, ok := validAttributes[a]; !ok {
|
||||||
|
return nil, errors.GetAPIError(errors.ErrInvalidAttributeName)
|
||||||
|
}
|
||||||
|
res.Attributes = append(res.Attributes, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxPartsVal == "" {
|
||||||
|
res.MaxParts = layer.MaxSizePartsList
|
||||||
|
} else if res.MaxParts, err = strconv.Atoi(maxPartsVal); err != nil || res.MaxParts < 0 {
|
||||||
|
return nil, errors.GetAPIError(errors.ErrInvalidMaxKeys)
|
||||||
|
}
|
||||||
|
|
||||||
|
if markerVal == "" {
|
||||||
|
res.PartNumberMarker = partNumberMarkerDefault
|
||||||
|
} else if res.PartNumberMarker, err = strconv.Atoi(markerVal); err != nil || res.PartNumberMarker < 0 {
|
||||||
|
return nil, errors.GetAPIError(errors.ErrInvalidPartNumberMarker)
|
||||||
|
}
|
||||||
|
|
||||||
|
res.VersionID = queryValues.Get(api.QueryVersionID)
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs) (*GetObjectAttributesResponse, error) {
|
||||||
|
resp := &GetObjectAttributesResponse{}
|
||||||
|
|
||||||
|
for _, attr := range p.Attributes {
|
||||||
|
switch attr {
|
||||||
|
case eTag:
|
||||||
|
resp.ETag = info.HashSum
|
||||||
|
case storageClass:
|
||||||
|
resp.StorageClass = "STANDARD"
|
||||||
|
case objectSize:
|
||||||
|
resp.ObjectSize = info.Size
|
||||||
|
case objectParts:
|
||||||
|
parts, err := formUploadAttributes(info, p.MaxParts, p.PartNumberMarker)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if parts != nil {
|
||||||
|
resp.ObjectParts = parts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func formUploadAttributes(info *data.ObjectInfo, maxParts, marker int) (*ObjectParts, error) {
|
||||||
|
res := ObjectParts{}
|
||||||
|
|
||||||
|
if _, ok := info.Headers[layer.UploadIDAttributeName]; !ok {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := make([]Part, 0)
|
||||||
|
val, ok := info.Headers[layer.UploadCompletedParts]
|
||||||
|
if ok {
|
||||||
|
pairs := strings.Split(val, ",")
|
||||||
|
for _, p := range pairs {
|
||||||
|
// nums[0] -- part number, nums[1] -- part size
|
||||||
|
nums := strings.Split(p, "=")
|
||||||
|
if len(nums) != 2 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
num, err := strconv.Atoi(nums[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
size, err := strconv.Atoi(nums[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
parts = append(parts, Part{PartNumber: num, Size: size})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res.PartsCount = len(parts)
|
||||||
|
|
||||||
|
if marker != partNumberMarkerDefault {
|
||||||
|
res.PartNumberMarker = marker
|
||||||
|
for i, n := range parts {
|
||||||
|
if n.PartNumber == marker {
|
||||||
|
parts = parts[i:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res.MaxParts = maxParts
|
||||||
|
if len(parts) > maxParts {
|
||||||
|
res.IsTruncated = true
|
||||||
|
res.NextPartNumberMarker = parts[maxParts].PartNumber
|
||||||
|
parts = parts[:maxParts]
|
||||||
|
}
|
||||||
|
|
||||||
|
res.Parts = parts
|
||||||
|
|
||||||
|
return &res, nil
|
||||||
|
}
|
|
@ -22,6 +22,7 @@ const (
|
||||||
UploadIDAttributeName = "S3-Upload-Id"
|
UploadIDAttributeName = "S3-Upload-Id"
|
||||||
UploadPartNumberAttributeName = "S3-Upload-Part-Number"
|
UploadPartNumberAttributeName = "S3-Upload-Part-Number"
|
||||||
UploadKeyAttributeName = "S3-Upload-Key"
|
UploadKeyAttributeName = "S3-Upload-Key"
|
||||||
|
UploadCompletedParts = "S3-Completed-Parts"
|
||||||
UploadPartKeyPrefix = ".upload-"
|
UploadPartKeyPrefix = ".upload-"
|
||||||
|
|
||||||
MaxSizeUploadsList = 1000
|
MaxSizeUploadsList = 1000
|
||||||
|
@ -222,7 +223,10 @@ func (x *multiObjectReader) Read(p []byte) (n int, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipartParams) (*data.ObjectInfo, error) {
|
func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipartParams) (*data.ObjectInfo, error) {
|
||||||
var obj *data.ObjectInfo
|
var (
|
||||||
|
obj *data.ObjectInfo
|
||||||
|
partsAttrValue string
|
||||||
|
)
|
||||||
|
|
||||||
for i := 1; i < len(p.Parts); i++ {
|
for i := 1; i < len(p.Parts); i++ {
|
||||||
if p.Parts[i].PartNumber <= p.Parts[i-1].PartNumber {
|
if p.Parts[i].PartNumber <= p.Parts[i-1].PartNumber {
|
||||||
|
@ -277,6 +281,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
||||||
return nil, errors.GetAPIError(errors.ErrEntityTooSmall)
|
return nil, errors.GetAPIError(errors.ErrEntityTooSmall)
|
||||||
}
|
}
|
||||||
parts = append(parts, info)
|
parts = append(parts, info)
|
||||||
|
partsAttrValue += strconv.Itoa(part.PartNumber) + "=" + strconv.FormatInt(info.Size, 10) + ","
|
||||||
}
|
}
|
||||||
|
|
||||||
initMetadata := objects[0].Headers
|
initMetadata := objects[0].Headers
|
||||||
|
@ -293,6 +298,8 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
||||||
delete(initMetadata, objectSystemAttributeName)
|
delete(initMetadata, objectSystemAttributeName)
|
||||||
delete(initMetadata, versionsUnversionedAttr)
|
delete(initMetadata, versionsUnversionedAttr)
|
||||||
|
|
||||||
|
initMetadata[UploadCompletedParts] = partsAttrValue[:len(partsAttrValue)-1]
|
||||||
|
|
||||||
r := &multiObjectReader{
|
r := &multiObjectReader{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
layer: n,
|
layer: n,
|
||||||
|
|
|
@ -26,6 +26,7 @@ type (
|
||||||
GetObjectRetentionHandler(http.ResponseWriter, *http.Request)
|
GetObjectRetentionHandler(http.ResponseWriter, *http.Request)
|
||||||
GetObjectLegalHoldHandler(http.ResponseWriter, *http.Request)
|
GetObjectLegalHoldHandler(http.ResponseWriter, *http.Request)
|
||||||
GetObjectHandler(http.ResponseWriter, *http.Request)
|
GetObjectHandler(http.ResponseWriter, *http.Request)
|
||||||
|
GetObjectAttributesHandler(http.ResponseWriter, *http.Request)
|
||||||
CopyObjectHandler(http.ResponseWriter, *http.Request)
|
CopyObjectHandler(http.ResponseWriter, *http.Request)
|
||||||
PutObjectRetentionHandler(http.ResponseWriter, *http.Request)
|
PutObjectRetentionHandler(http.ResponseWriter, *http.Request)
|
||||||
PutObjectLegalHoldHandler(http.ResponseWriter, *http.Request)
|
PutObjectLegalHoldHandler(http.ResponseWriter, *http.Request)
|
||||||
|
@ -276,6 +277,10 @@ func Attach(r *mux.Router, domains []string, m MaxClients, h Handler, center aut
|
||||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||||
m.Handle(metrics.APIStats("getobjectlegalhold", h.GetObjectLegalHoldHandler))).Queries("legal-hold", "").
|
m.Handle(metrics.APIStats("getobjectlegalhold", h.GetObjectLegalHoldHandler))).Queries("legal-hold", "").
|
||||||
Name("GetObjectLegalHold")
|
Name("GetObjectLegalHold")
|
||||||
|
// GetObjectAttributes
|
||||||
|
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||||
|
m.Handle(metrics.APIStats("getobjectattributes", h.GetObjectAttributesHandler))).Queries("attributes", "").
|
||||||
|
Name("GetObjectAttributes")
|
||||||
// GetObject
|
// GetObject
|
||||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
|
||||||
m.Handle(metrics.APIStats("getobject", h.GetObjectHandler))).
|
m.Handle(metrics.APIStats("getobject", h.GetObjectHandler))).
|
||||||
|
|
|
@ -26,6 +26,7 @@ Reference:
|
||||||
| 🟢 | PutObject | Content-MD5 header deprecated |
|
| 🟢 | PutObject | Content-MD5 header deprecated |
|
||||||
| 🔵 | SelectObjectContent | Need to have some Lambda to execute SQL |
|
| 🔵 | SelectObjectContent | Need to have some Lambda to execute SQL |
|
||||||
| 🔵 | WriteGetObjectResponse | Waiting for Lambda to be developed |
|
| 🔵 | WriteGetObjectResponse | Waiting for Lambda to be developed |
|
||||||
|
| 🟢 | GetObjectAttributes | |
|
||||||
|
|
||||||
## ACL
|
## ACL
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue