[#487] Support Range header in object PUT #487

Open
mbiryukova wants to merge 2 commits from mbiryukova/frostfs-s3-gw:feature/put_range into master
9 changed files with 653 additions and 38 deletions

View file

@ -7,6 +7,7 @@ import (
"encoding/xml"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
@ -28,7 +29,7 @@ func TestPutObjectACLErrorAPE(t *testing.T) {
info := createBucket(hc, bktName)
putObjectWithHeadersAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, s3errors.ErrAccessControlListNotSupported)
putObjectWithHeadersAssertS3Error(hc, bktName, objName, "", map[string]string{api.AmzACL: basicACLPublic}, s3errors.ErrAccessControlListNotSupported)
putObjectWithHeaders(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPrivate}) // only `private` canned acl is allowed, that is actually ignored
putObjectWithHeaders(hc, bktName, objName, nil)
@ -396,8 +397,14 @@ func putObjectWithHeaders(hc *handlerContext, bktName, objName string, headers m
return w.Header()
}
func putObjectWithHeadersAssertS3Error(hc *handlerContext, bktName, objName string, headers map[string]string, code s3errors.ErrorCode) {
w := putObjectWithHeadersBase(hc, bktName, objName, headers, nil, nil)
func putObjectContentWithHeaders(hc *handlerContext, bktName, objName, content string, headers map[string]string) http.Header {
w := putObjectWithHeadersBase(hc, bktName, objName, headers, nil, []byte(content))
assertStatus(hc.t, w, http.StatusOK)
return w.Header()
}
func putObjectWithHeadersAssertS3Error(hc *handlerContext, bktName, objName, content string, headers map[string]string, code s3errors.ErrorCode) {
w := putObjectWithHeadersBase(hc, bktName, objName, headers, nil, []byte(content))
assertS3Error(hc.t, w, s3errors.GetAPIError(code))
}
@ -408,6 +415,7 @@ func putObjectWithHeadersBase(hc *handlerContext, bktName, objName string, heade
for k, v := range headers {
r.Header.Set(k, v)
}
r.Header.Set(api.ContentLength, strconv.Itoa(len(data)))
ctx := middleware.SetBox(r.Context(), &middleware.Box{AccessBox: box})
r = r.WithContext(ctx)

View file

@ -25,7 +25,7 @@ type conditionalArgs struct {
func fetchRangeHeader(headers http.Header, fullSize uint64) (*layer.RangeParams, error) {
const prefix = "bytes="
rangeHeader := headers.Get("Range")
rangeHeader := headers.Get(api.Range)
if len(rangeHeader) == 0 {
return nil, nil
}

View file

@ -190,6 +190,11 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
reqInfo = middleware.GetReqInfo(ctx)
)
if rangeStr := r.Header.Get(api.Range); rangeStr != "" {
h.putObjectWithRange(w, r)
return
}
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket objInfo", reqInfo, err)
@ -310,6 +315,171 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
}
}
func (h *handler) putObjectWithRange(w http.ResponseWriter, r *http.Request) {
var (
err error
ctx = r.Context()
reqInfo = middleware.GetReqInfo(ctx)
rangeStr = r.Header.Get(api.Range)
)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket objInfo", reqInfo, err)
return
}
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
if err != nil {
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
return
}
body, err := h.getBodyReader(r)
if err != nil {
h.logAndSendError(w, "failed to get body reader", reqInfo, err)
return
}
srcObjPrm := &layer.HeadObjectParams{
Object: reqInfo.ObjectName,
BktInfo: bktInfo,
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
}
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
if err != nil {
h.logAndSendError(w, "could not find object", reqInfo, err)
return
}
srcSize, err := layer.GetObjectSize(extendedSrcObjInfo.ObjectInfo)
if err != nil {
h.logAndSendError(w, "failed to get source object size", reqInfo, err)
return
}
var contentLen uint64
if r.ContentLength > 0 {
contentLen = uint64(r.ContentLength)
}
byteRange, overwrite, err := parsePutRange(rangeStr, srcSize, contentLen)
if err != nil {
h.logAndSendError(w, "could not parse byte range", reqInfo, errors.GetAPIError(errors.ErrInvalidRange), zap.Error(err))
return
}
if maxPatchSize < byteRange.End-byteRange.Start+1 {
h.logAndSendError(w, "byte range length is longer than allowed", reqInfo, errors.GetAPIError(errors.ErrInvalidRange), zap.Error(err))
return
}
if !overwrite && contentLen != byteRange.End-byteRange.Start+1 {
h.logAndSendError(w, "content-length must be equal to byte range length", reqInfo, errors.GetAPIError(errors.ErrInvalidRangeLength), zap.Error(err))
return
}
if byteRange.Start > srcSize {
h.logAndSendError(w, "start byte is greater than object size", reqInfo, errors.GetAPIError(errors.ErrRangeOutOfBounds))
return
}
params := &layer.PatchObjectParams{
Object: extendedSrcObjInfo,
BktInfo: bktInfo,
NewBytes: body,
Range: byteRange,
VersioningEnabled: settings.VersioningEnabled(),
Overwrite: overwrite,
}
params.CopiesNumbers, err = h.pickCopiesNumbers(nil, reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil {
h.logAndSendError(w, "invalid copies number", reqInfo, err)
return
}
extendedObjInfo, err := h.obj.PatchObject(ctx, params)
Review

I would try to introduce new method h.patchObject that contains common logic for h.putObjectWithRange and h.PatchObjectHandler

I would try to introduce new method `h.patchObject` that contains common logic for `h.putObjectWithRange` and `h.PatchObjectHandler`
if err != nil {
if isErrObjectLocked(err) {
h.logAndSendError(w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
} else {
h.logAndSendError(w, "could not patch object", reqInfo, err)
}
return
}
if settings.VersioningEnabled() {
w.Header().Set(api.AmzVersionID, extendedObjInfo.ObjectInfo.VersionID())
}
w.Header().Set(api.ETag, data.Quote(extendedObjInfo.ObjectInfo.ETag(h.cfg.MD5Enabled())))
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
h.logAndSendError(w, "write response", reqInfo, err)
}
}
func parsePutRange(rangeStr string, objSize, contentLen uint64) (*layer.RangeParams, bool, error) {
const prefix = "bytes="
var overwrite bool
if rangeStr == "" {
return nil, overwrite, fmt.Errorf("empty range")
}
if !strings.HasPrefix(rangeStr, prefix) {
return nil, overwrite, fmt.Errorf("unknown unit in range header")
}
rangeStr = strings.TrimPrefix(rangeStr, prefix)
i := strings.LastIndex(rangeStr, "-")
if i < 0 {
return nil, overwrite, fmt.Errorf("invalid range: %s", rangeStr)
}
startStr, endStr := rangeStr[:i], rangeStr[i+1:]
start, err := strconv.ParseInt(startStr, 10, 64)
if err != nil {
return nil, overwrite, fmt.Errorf("invalid start byte: %s", startStr)
}
if start == -1 && len(endStr) == 0 {
return &layer.RangeParams{
Start: objSize,
End: objSize + contentLen - 1,
Review

Why do we use contentLen to find out end of range to patch?

Why do we use contentLen to find out end of range to patch?
}, overwrite, nil
}
if start < 0 {
return nil, overwrite, fmt.Errorf("invalid range: %s", rangeStr)
}
end := uint64(start) + contentLen - 1
if contentLen == 0 {
end = objSize - 1
}
if len(endStr) > 0 {
end, err = strconv.ParseUint(endStr, 10, 64)
if err != nil {
return nil, overwrite, fmt.Errorf("invalid end byte: %s", endStr)
}
} else {
overwrite = true
}
if uint64(start) > end {
return nil, overwrite, fmt.Errorf("start byte is greater than end byte")
}
return &layer.RangeParams{
Start: uint64(start),
End: end,
}, overwrite, nil
}
func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
if !api.IsSignedStreamingV4(r) {
return r.Body, nil

View file

@ -4,9 +4,11 @@ import (
"bytes"
"context"
"crypto/md5"
"crypto/rand"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
@ -34,6 +36,430 @@ const (
awsChunkedRequestExampleContentLength = 66824
)
func TestPutObjectWithRange(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName, partSize := "bucket-put-range", "object-put-range", 5*1024*1024
putObjectWithHeadersAssertS3Error(hc, bktName, objName, "content", map[string]string{api.Range: "bytes=-1-"}, s3errors.ErrNoSuchBucket)
createBucket(hc, bktName)
putObjectWithHeadersAssertS3Error(hc, bktName, objName, "content", map[string]string{api.Range: "bytes=-1-"}, s3errors.ErrNoSuchKey)
for _, tt := range []struct {
name string
createObj func()
rng string
rngContent []byte
expected func([]byte, []byte) []byte
code s3errors.ErrorCode
}{
{
name: "append empty regular object",
createObj: func() {
putObjectContent(hc, bktName, objName, "")
},
rng: "bytes=-1-",
rngContent: []byte("content"),
expected: func(_, rngContent []byte) []byte {
return rngContent
},
},
{
name: "append regular object",
createObj: func() {
putObjectContent(hc, bktName, objName, "object")
},
rng: "bytes=-1-",
rngContent: []byte("content"),
expected: func(objContent, rngContent []byte) []byte {
return bytes.Join([][]byte{objContent, rngContent}, []byte{})
},
},
{
name: "append empty multipart object",
createObj: func() {
multipartInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 1, 0)
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, []string{etag1})
},
rng: "bytes=-1-",
rngContent: []byte("content"),
expected: func(_, rngContent []byte) []byte {
return rngContent
},
},
{
name: "append multipart object",
createObj: func() {
multipartInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
},
rng: "bytes=-1-",
rngContent: []byte("content"),
expected: func(objContent, rngContent []byte) []byte {
return bytes.Join([][]byte{objContent, rngContent}, []byte{})
},
},
{
name: "update regular object",
createObj: func() {
putObjectContent(hc, bktName, objName, "object old content")
},
rng: "bytes=7-9",
rngContent: []byte("new"),
expected: func(objContent, rngContent []byte) []byte {
return bytes.Join([][]byte{objContent[:7], rngContent, objContent[10:]}, []byte{})
},
},
{
name: "update multipart object",
createObj: func() {
multipartInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
},
rng: "bytes=" + strconv.Itoa(partSize/2) + "-" + strconv.Itoa(partSize*5/2-1),
rngContent: func() []byte {
rangeContent := make([]byte, partSize*2)
_, err := rand.Read(rangeContent)
require.NoError(t, err)
return rangeContent
}(),
expected: func(objContent, rangeContent []byte) []byte {
return bytes.Join([][]byte{objContent[:partSize/2], rangeContent, objContent[partSize*5/2:]}, []byte{})
},
},
{
name: "overwrite regular object, increase size",
createObj: func() {
putObjectContent(hc, bktName, objName, "object old")
},
rng: "bytes=7-",
rngContent: []byte("new content"),
expected: func(objContent, rangeContent []byte) []byte {
return bytes.Join([][]byte{objContent[:7], rangeContent}, []byte{})
},
},
{
name: "overwrite regular object, decrease size",
createObj: func() {
putObjectContent(hc, bktName, objName, "object old content")
},
rng: "bytes=7-",
rngContent: []byte("new"),
expected: func(objContent, rangeContent []byte) []byte {
return bytes.Join([][]byte{objContent[:7], rangeContent}, []byte{})
},
},
{
name: "overwrite multipart object, increase size",
createObj: func() {
multipartInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
},
rng: "bytes=" + strconv.Itoa(partSize*3/2) + "-",
rngContent: func() []byte {
rangeContent := make([]byte, partSize*2)
_, err := rand.Read(rangeContent)
require.NoError(t, err)
return rangeContent
}(),
expected: func(objContent, rangeContent []byte) []byte {
return bytes.Join([][]byte{objContent[:partSize*3/2], rangeContent}, []byte{})
},
},
{
name: "overwrite multipart object, reduce number of parts",
createObj: func() {
multipartInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
},
rng: "bytes=" + strconv.Itoa(partSize/2-1) + "-",
rngContent: func() []byte {
rangeContent := make([]byte, partSize)
_, err := rand.Read(rangeContent)
require.NoError(t, err)
return rangeContent
}(),
expected: func(objContent, rangeContent []byte) []byte {
return bytes.Join([][]byte{objContent[:partSize/2-1], rangeContent}, []byte{})
},
},
{
name: "overwrite multipart object, decrease size of last part",
createObj: func() {
multipartInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
},
rng: "bytes=" + strconv.Itoa(partSize*3/2) + "-",
rngContent: func() []byte {
rangeContent := make([]byte, partSize)
_, err := rand.Read(rangeContent)
require.NoError(t, err)
return rangeContent
}(),
expected: func(objContent, rangeContent []byte) []byte {
return bytes.Join([][]byte{objContent[:partSize*3/2], rangeContent}, []byte{})
},
},
{
name: "overwrite last part, increase size",
createObj: func() {
multipartInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
},
rng: "bytes=" + strconv.Itoa(partSize*5/2) + "-",
rngContent: func() []byte {
rangeContent := make([]byte, partSize)
_, err := rand.Read(rangeContent)
require.NoError(t, err)
return rangeContent
}(),
expected: func(objContent, rangeContent []byte) []byte {
return bytes.Join([][]byte{objContent[:partSize*5/2], rangeContent}, []byte{})
},
},
{
name: "overwrite last part, decrease size",
createObj: func() {
multipartInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
},
rng: "bytes=" + strconv.Itoa(partSize*9/4) + "-",
rngContent: func() []byte {
rangeContent := make([]byte, partSize/2)
_, err := rand.Read(rangeContent)
require.NoError(t, err)
return rangeContent
}(),
expected: func(objContent, rangeContent []byte) []byte {
return bytes.Join([][]byte{objContent[:partSize*9/4], rangeContent}, []byte{})
},
},
{
name: "regular object, empty range content",
createObj: func() {
putObjectContent(hc, bktName, objName, "object old")
},
rng: "bytes=6-",
rngContent: []byte{},
expected: func(objContent, _ []byte) []byte {
return objContent[:6]
},
},
{
name: "multipart object, empty range content, decrease size of last part",
createObj: func() {
multipartInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
},
rng: "bytes=" + strconv.Itoa(partSize*9/4) + "-",
rngContent: []byte{},
expected: func(objContent, _ []byte) []byte {
return bytes.Join([][]byte{objContent[:partSize*9/4]}, []byte{})
},
},
{
name: "multipart object, empty range content, decrease number of parts",
createObj: func() {
multipartInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(hc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
},
rng: "bytes=" + strconv.Itoa(partSize*5/4) + "-",
rngContent: func() []byte {
return []byte{}
}(),
expected: func(objContent, _ []byte) []byte {
return bytes.Join([][]byte{objContent[:partSize*5/4]}, []byte{})
},
},
{
name: "invalid range length",
createObj: func() {
putObjectContent(hc, bktName, objName, "object")
},
rng: "bytes=4-7",
rngContent: []byte("content"),
code: s3errors.ErrInvalidRangeLength,
},
{
name: "invalid start byte",
createObj: func() {
putObjectContent(hc, bktName, objName, "object")
},
rng: "bytes=7-",
rngContent: []byte("content"),
code: s3errors.ErrRangeOutOfBounds,
},
{
name: "invalid range",
createObj: func() {
putObjectContent(hc, bktName, objName, "object")
},
rng: "bytes=12-6",
rngContent: []byte("content"),
code: s3errors.ErrInvalidRange,
},
{
name: "encrypted object",
createObj: func() {
putEncryptedObject(t, hc, bktName, objName, "object")
},
rng: "bytes=-1-",
rngContent: []byte("content"),
code: s3errors.ErrInternalError,
},
{
name: "range is too long",
createObj: func() {
putObjectContent(hc, bktName, objName, "object")
},
rng: "bytes=0-5368709120",
rngContent: []byte("content"),
code: s3errors.ErrInvalidRange,
},
} {
t.Run(tt.name, func(t *testing.T) {
tt.createObj()
if tt.code == 0 {
objContent, _ := getObject(hc, bktName, objName)
putObjectContentWithHeaders(hc, bktName, objName, string(tt.rngContent), map[string]string{api.Range: tt.rng})
patchedObj, _ := getObject(hc, bktName, objName)
equalDataSlices(t, tt.expected(objContent, tt.rngContent), patchedObj)
} else {
putObjectWithHeadersAssertS3Error(hc, bktName, objName, string(tt.rngContent), map[string]string{api.Range: tt.rng}, tt.code)
}
})
}
}
func TestParsePutRange(t *testing.T) {
for _, tt := range []struct {
rng string
objSize uint64
contentLen uint64
expected *layer.RangeParams
overwrite bool
err bool
}{
{
rng: "bytes=-1-",
objSize: 10,
contentLen: 10,
expected: &layer.RangeParams{
Start: 10,
End: 19,
},
},
{
rng: "bytes=4-7",
expected: &layer.RangeParams{
Start: 4,
End: 7,
},
},
{
rng: "bytes=4-",
contentLen: 7,
expected: &layer.RangeParams{
Start: 4,
End: 10,
},
overwrite: true,
},
{
rng: "bytes=7-",
objSize: 10,
contentLen: 0,
expected: &layer.RangeParams{
Start: 7,
End: 9,
},
overwrite: true,
},
{
rng: "",
err: true,
},
{
rng: "4-7",
err: true,
},
{
rng: "bytes=7-4",
err: true,
},
{
rng: "bytes=-10-",
err: true,
},
{
rng: "bytes=-1-10",
err: true,
},
{
rng: "bytes=1--10",
err: true,
},
{
rng: "bytes=10",
err: true,
},
{
rng: "bytes=10-a",
err: true,
},
{
rng: "bytes=a-10",
err: true,
},
{
rng: "bytes=10-",
objSize: 10,
err: true,
},
} {
t.Run(fmt.Sprintf("case: %s", tt.rng), func(t *testing.T) {
rng, overwrite, err := parsePutRange(tt.rng, tt.objSize, tt.contentLen)
if tt.err {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Equal(t, tt.expected.Start, rng.Start)
require.Equal(t, tt.expected.End, rng.End)
require.Equal(t, tt.overwrite, overwrite)
}
})
}
}
func TestCheckBucketName(t *testing.T) {
for _, tc := range []struct {
name string

View file

@ -40,6 +40,7 @@ const (
IfUnmodifiedSince = "If-Unmodified-Since"
IfMatch = "If-Match"
IfNoneMatch = "If-None-Match"
Range = "Range"
AmzContentSha256 = "X-Amz-Content-Sha256"
AmzCopyIfModifiedSince = "X-Amz-Copy-Source-If-Modified-Since"

View file

@ -21,6 +21,7 @@ type PatchObjectParams struct {
Range *RangeParams
VersioningEnabled bool
CopiesNumbers []uint32
Overwrite bool
Review

Why do we need this field? It seems we can just provide correct Range field.

Why do we need this field? It seems we can just provide correct `Range` field.
}
func (n *Layer) PatchObject(ctx context.Context, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
@ -42,6 +43,10 @@ func (n *Layer) PatchObject(ctx context.Context, p *PatchObjectParams) (*data.Ex
}
n.prepareAuthParameters(ctx, &prmPatch.PrmAuth, p.BktInfo.Owner)
if p.Overwrite {
prmPatch.Length = p.Object.ObjectInfo.Size - p.Range.Start
}
createdObj, err := n.patchObject(ctx, prmPatch)
if err != nil {
return nil, fmt.Errorf("patch object: %w", err)
@ -115,9 +120,13 @@ func (n *Layer) patchMultipartObject(ctx context.Context, p *PatchObjectParams)
}
n.prepareAuthParameters(ctx, &prmPatch.PrmAuth, p.BktInfo.Owner)
off, ln := p.Range.Start, p.Range.End-p.Range.Start+1
var multipartObjectSize uint64
for i, part := range parts {
var (
multipartObjectSize uint64
i int
part *data.PartInfo
off, ln = p.Range.Start, p.Range.End - p.Range.Start + 1
)
for i, part = range parts {
if off > part.Size || (off == part.Size && i != len(parts)-1) || ln == 0 {
multipartObjectSize += part.Size
if ln != 0 {
@ -133,21 +142,27 @@ func (n *Layer) patchMultipartObject(ctx context.Context, p *PatchObjectParams)
}
parts[i].OID = createdObj.ID
parts[i].Size = createdObj.Size
parts[i].MD5 = ""
parts[i].ETag = hex.EncodeToString(createdObj.HashSum)
multipartObjectSize += createdObj.Size
if createdObj.Size < parts[i].Size {
parts[i].Size = createdObj.Size
break
}
parts[i].Size = createdObj.Size
}
return n.updateCombinedObject(ctx, parts, multipartObjectSize, p)
return n.updateCombinedObject(ctx, parts[:i+1], multipartObjectSize, p)
}
// Returns patched part info, updated offset and length.
func (n *Layer) patchPart(ctx context.Context, part *data.PartInfo, p *PatchObjectParams, prmPatch *PrmObjectPatch, off, ln uint64, lastPart bool) (*data.CreatedObjectInfo, uint64, uint64, error) {
if off == 0 && ln >= part.Size {
if off == 0 && (ln >= part.Size || p.Overwrite) {
curLen := part.Size
if lastPart {
if lastPart || (p.Overwrite && ln > part.Size) {
curLen = ln
}
prm := PrmObjectCreate{
@ -162,13 +177,12 @@ func (n *Layer) patchPart(ctx context.Context, part *data.PartInfo, p *PatchObje
return nil, 0, 0, fmt.Errorf("put new part object '%s': %w", part.OID.EncodeToString(), err)
}
ln -= curLen
ln -= min(curLen, createdObj.Size)
return createdObj, off, ln, err
}
curLen := ln
if off+curLen > part.Size && !lastPart {
if (off+curLen > part.Size && !lastPart) || (p.Overwrite && off+curLen < part.Size) {
curLen = part.Size - off
}
prmPatch.Object = part.OID
@ -183,7 +197,7 @@ func (n *Layer) patchPart(ctx context.Context, part *data.PartInfo, p *PatchObje
return nil, 0, 0, fmt.Errorf("patch part object '%s': %w", part.OID.EncodeToString(), err)
}
ln -= curLen
ln -= min(curLen, createdObj.Size-off)
off = 0
return createdObj, off, ln, nil

4
go.mod
View file

@ -3,10 +3,10 @@ module git.frostfs.info/TrueCloudLab/frostfs-s3-gw
go 1.22
require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240822080251-28f140bf06c1
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240912121252-d342c0bc1675
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/aws/aws-sdk-go v1.44.6

8
go.sum
View file

@ -36,16 +36,16 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326 h1:TkH+NSsY4C/Z8MocIJyMcqLm5vEhZcSowOldJyilKKA=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326/go.mod h1:zZnHiRv9m5+ESYLhBXY9Jds9A/YIDEUGiuyPUS09HwM=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb h1:p9ByDsw+H6p6LyYSx8LKFtAG/oPKQpDVMNfjPqdevTw=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240822080251-28f140bf06c1 h1:+Z55WxE1ad/LBzRX1dqgaWlXAQ/NDjUsBlwEIZ4rn6k=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240822080251-28f140bf06c1/go.mod h1:Pl77loECndbgIC0Kljj1MFmGJKQ9gotaFINyveW1T8I=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240912121252-d342c0bc1675 h1:NuxHi5QpEbLgu5R3TWbQEr+fTsAkHk5u0yJKmiRcd2o=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240912121252-d342c0bc1675/go.mod h1:I4GzeEnftO++N2JHQn9QR88eV0rjQkGqlUCNMnCz1CY=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b h1:M50kdfrf/h8c3cz0bJ2AEUcbXvAlPFVC1Wp1WkfZ/8E=

View file

@ -19,21 +19,19 @@ const (
)
const (
methodGetBalance = "get_balance"
methodPutContainer = "put_container"
methodGetContainer = "get_container"
methodListContainer = "list_container"
methodDeleteContainer = "delete_container"
methodGetContainerEacl = "get_container_eacl"
methodSetContainerEacl = "set_container_eacl"
methodEndpointInfo = "endpoint_info"
methodNetworkInfo = "network_info"
methodPutObject = "put_object"
methodDeleteObject = "delete_object"
methodGetObject = "get_object"
methodHeadObject = "head_object"
methodRangeObject = "range_object"
methodCreateSession = "create_session"
methodGetBalance = "get_balance"
methodPutContainer = "put_container"
methodGetContainer = "get_container"
methodListContainer = "list_container"
methodDeleteContainer = "delete_container"
methodEndpointInfo = "endpoint_info"
methodNetworkInfo = "network_info"
methodPutObject = "put_object"
methodDeleteObject = "delete_object"
methodGetObject = "get_object"
methodHeadObject = "head_object"
methodRangeObject = "range_object"
methodCreateSession = "create_session"
)
type poolMetricsCollector struct {
@ -107,8 +105,6 @@ func (m *poolMetricsCollector) updateRequestsDuration(node pool.NodeStatistic) {
m.requestDuration.WithLabelValues(node.Address(), methodGetContainer).Set(float64(node.AverageGetContainer().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodListContainer).Set(float64(node.AverageListContainer().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodDeleteContainer).Set(float64(node.AverageDeleteContainer().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodGetContainerEacl).Set(float64(node.AverageGetContainerEACL().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodSetContainerEacl).Set(float64(node.AverageSetContainerEACL().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodEndpointInfo).Set(float64(node.AverageEndpointInfo().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodNetworkInfo).Set(float64(node.AverageNetworkInfo().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodPutObject).Set(float64(node.AveragePutObject().Milliseconds()))