diff --git a/api/errors/errors.go b/api/errors/errors.go index 47822e1..77c3f23 100644 --- a/api/errors/errors.go +++ b/api/errors/errors.go @@ -180,6 +180,7 @@ const ( ErrGatewayTimeout ErrOperationMaxedOut ErrInvalidRequest + ErrInvalidRequestLargeCopy ErrInvalidStorageClass ErrMalformedJSON @@ -1161,6 +1162,12 @@ var errorCodes = errorCodeMap{ Description: "Invalid Request", HTTPStatusCode: http.StatusBadRequest, }, + ErrInvalidRequestLargeCopy: { + ErrCode: ErrInvalidRequestLargeCopy, + Code: "InvalidRequest", + Description: "CopyObject request made on objects larger than 5GB in size.", + HTTPStatusCode: http.StatusBadRequest, + }, ErrIncorrectContinuationToken: { ErrCode: ErrIncorrectContinuationToken, Code: "InvalidArgument", diff --git a/api/handler/copy.go b/api/handler/copy.go index 21e277b..ba7c746 100644 --- a/api/handler/copy.go +++ b/api/handler/copy.go @@ -106,6 +106,25 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { } srcObjInfo := extendedSrcObjInfo.ObjectInfo + encryptionParams, err := formEncryptionParams(r) + if err != nil { + h.logAndSendError(w, "invalid sse headers", reqInfo, err) + return + } + + if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil { + h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err)) + return + } + + if srcSize, err := getObjectSize(extendedSrcObjInfo, encryptionParams); err != nil { + h.logAndSendError(w, "failed to get source object size", reqInfo, err) + return + } else if srcSize > layer.UploadMaxSize { //https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + h.logAndSendError(w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy)) + return + } + args, err := parseCopyObjectArgs(r.Header) if err != nil { h.logAndSendError(w, "could not parse request params", reqInfo, err) @@ -144,17 +163,6 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { } } - encryptionParams, err := formEncryptionParams(r) - if err != nil { - h.logAndSendError(w, "invalid sse headers", reqInfo, err) - return - } - - if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil { - h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err)) - return - } - if err = checkPreconditions(srcObjInfo, args.Conditional); err != nil { h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed)) return @@ -164,7 +172,8 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { if len(srcObjInfo.ContentType) > 0 { srcObjInfo.Headers[api.ContentType] = srcObjInfo.ContentType } - metadata = srcObjInfo.Headers + metadata = makeCopyMap(srcObjInfo.Headers) + delete(metadata, layer.MultipartObjectSize) // object payload will be real one rather than list of compound parts } else if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 { metadata[api.ContentType] = contentType } @@ -257,6 +266,14 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { } } +func makeCopyMap(headers map[string]string) map[string]string { + res := make(map[string]string, len(headers)) + for key, val := range headers { + res[key] = val + } + return res +} + func isCopyingToItselfForbidden(reqInfo *middleware.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool { if reqInfo.BucketName != srcBucket || reqInfo.ObjectName != srcObject { return false diff --git a/api/handler/copy_test.go b/api/handler/copy_test.go index 5db629d..9a97a55 100644 --- a/api/handler/copy_test.go +++ b/api/handler/copy_test.go @@ -7,6 +7,7 @@ import ( "testing" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" + "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "github.com/stretchr/testify/require" ) @@ -29,14 +30,14 @@ func TestCopyWithTaggingDirective(t *testing.T) { copyMeta := CopyMeta{ Tags: map[string]string{"key2": "val"}, } - copyObject(t, tc, bktName, objName, objToCopy, copyMeta, http.StatusOK) + copyObject(tc, bktName, objName, objToCopy, copyMeta, http.StatusOK) tagging := getObjectTagging(t, tc, bktName, objToCopy, emptyVersion) require.Len(t, tagging.TagSet, 1) require.Equal(t, "key", tagging.TagSet[0].Key) require.Equal(t, "val", tagging.TagSet[0].Value) copyMeta.TaggingDirective = replaceDirective - copyObject(t, tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK) + copyObject(tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK) tagging = getObjectTagging(t, tc, bktName, objToCopy2, emptyVersion) require.Len(t, tagging.TagSet, 1) require.Equal(t, "key2", tagging.TagSet[0].Key) @@ -51,20 +52,54 @@ func TestCopyToItself(t *testing.T) { copyMeta := CopyMeta{MetadataDirective: replaceDirective} - copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest) - copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK) + copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest) + copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK) putBucketVersioning(t, tc, bktName, true) - copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusOK) - copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK) + copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK) + copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK) putBucketVersioning(t, tc, bktName, false) - copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusOK) - copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK) + copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK) + copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK) } -func copyObject(t *testing.T, tc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) { - w, r := prepareTestRequest(tc, bktName, toObject, nil) +func TestCopyMultipart(t *testing.T) { + hc := prepareHandlerContext(t) + + bktName, objName := "bucket-for-copy", "object-for-copy" + createTestBucket(hc, bktName) + + partSize := layer.UploadMinSize + objLen := 6 * partSize + headers := map[string]string{} + + data := multipartUpload(hc, bktName, objName, headers, objLen, partSize) + require.Equal(t, objLen, len(data)) + + objToCopy := "copy-target" + var copyMeta CopyMeta + copyObject(hc, bktName, objName, objToCopy, copyMeta, http.StatusOK) + + copiedData, _ := getObject(hc, bktName, objToCopy) + equalDataSlices(t, data, copiedData) + + result := getObjectAttributes(hc, bktName, objToCopy, objectParts) + require.NotNil(t, result.ObjectParts) + + objToCopy2 := "copy-target2" + copyMeta.MetadataDirective = replaceDirective + copyObject(hc, bktName, objName, objToCopy2, copyMeta, http.StatusOK) + + result = getObjectAttributes(hc, bktName, objToCopy2, objectParts) + require.Nil(t, result.ObjectParts) + + copiedData, _ = getObject(hc, bktName, objToCopy2) + equalDataSlices(t, data, copiedData) +} + +func copyObject(hc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) { + w, r := prepareTestRequest(hc, bktName, toObject, nil) r.Header.Set(api.AmzCopySource, bktName+"/"+fromObject) r.Header.Set(api.AmzMetadataDirective, copyMeta.MetadataDirective) @@ -79,8 +114,8 @@ func copyObject(t *testing.T, tc *handlerContext, bktName, fromObject, toObject } r.Header.Set(api.AmzTagging, tagsQuery.Encode()) - tc.Handler().CopyObjectHandler(w, r) - assertStatus(t, w, statusCode) + hc.Handler().CopyObjectHandler(w, r) + assertStatus(hc.t, w, statusCode) } func putObjectTagging(t *testing.T, tc *handlerContext, bktName, objName string, tags map[string]string) { diff --git a/api/handler/get_test.go b/api/handler/get_test.go index 996dc2e..9ee7df3 100644 --- a/api/handler/get_test.go +++ b/api/handler/get_test.go @@ -217,11 +217,11 @@ func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, s } func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apiErrors.ErrorCode) { - w := getObjectBase(hc, bktName, objName, version) + w := getObjectBaseResponse(hc, bktName, objName, version) assertS3Error(hc.t, w, apiErrors.GetAPIError(code)) } -func getObjectBase(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder { +func getObjectBaseResponse(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder { query := make(url.Values) query.Add(api.QueryVersionID, version) diff --git a/api/layer/multipart_upload.go b/api/layer/multipart_upload.go index 81e1f72..5f81ae6 100644 --- a/api/layer/multipart_upload.go +++ b/api/layer/multipart_upload.go @@ -26,7 +26,10 @@ const ( UploadIDAttributeName = "S3-Upload-Id" UploadPartNumberAttributeName = "S3-Upload-Part-Number" UploadCompletedParts = "S3-Completed-Parts" - MultipartObjectSize = "S3-Multipart-Object-Size" + + // MultipartObjectSize contains the real object size if object is combined (payload contains list of parts). + // This header is used to determine if object is combined. + MultipartObjectSize = "S3-Multipart-Object-Size" metaPrefix = "meta-" aclPrefix = "acl-" @@ -35,8 +38,8 @@ const ( MaxSizePartsList = 1000 UploadMinPartNumber = 1 UploadMaxPartNumber = 10000 - uploadMinSize = 5 * 1048576 // 5MB - uploadMaxSize = 5 * 1073741824 // 5GB + UploadMinSize = 5 * 1024 * 1024 // 5MB + UploadMaxSize = 1024 * UploadMinSize // 5GB ) type ( @@ -184,8 +187,8 @@ func (n *layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, er return "", err } - if p.Size > uploadMaxSize { - return "", fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), p.Size, uploadMaxSize) + if p.Size > UploadMaxSize { + return "", fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), p.Size, UploadMaxSize) } objInfo, err := n.uploadPart(ctx, multipartInfo, p) @@ -292,8 +295,8 @@ func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data. return nil, fmt.Errorf("%w: %d-%d/%d", s3errors.GetAPIError(s3errors.ErrInvalidCopyPartRangeSource), p.Range.Start, p.Range.End, p.SrcObjInfo.Size) } } - if size > uploadMaxSize { - return nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), size, uploadMaxSize) + if size > UploadMaxSize { + return nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), size, UploadMaxSize) } objPayload, err := n.GetObject(ctx, &GetObjectParams{ @@ -346,8 +349,8 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar delete(partsInfo, part.PartNumber) // for the last part we have no minimum size limit - if i != len(p.Parts)-1 && partInfo.Size < uploadMinSize { - return nil, nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooSmall), partInfo.Size, uploadMinSize) + if i != len(p.Parts)-1 && partInfo.Size < UploadMinSize { + return nil, nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooSmall), partInfo.Size, UploadMinSize) } parts = append(parts, partInfo) multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted)