forked from TrueCloudLab/frostfs-s3-gw
[#486] Fix PUT object with negative Content-Length
(cherry picked from commit f187141ae5
)
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
This commit is contained in:
parent
c2e313d899
commit
a845e7f798
8 changed files with 71 additions and 12 deletions
|
@ -416,7 +416,7 @@ func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName stri
|
||||||
extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{
|
extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: objName,
|
Object: objName,
|
||||||
Size: uint64(len(content)),
|
Size: ptr(uint64(len(content))),
|
||||||
Reader: bytes.NewReader(content),
|
Reader: bytes.NewReader(content),
|
||||||
Header: header,
|
Header: header,
|
||||||
Encryption: encryption,
|
Encryption: encryption,
|
||||||
|
@ -502,3 +502,7 @@ func readResponse(t *testing.T, w *httptest.ResponseRecorder, status int, model
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ptr[T any](t T) *T {
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
|
"crypto/rand"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
@ -435,6 +436,37 @@ func TestUploadPartCheckContentSHA256(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUploadPartWithNegativeContentLength(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
||||||
|
createTestBucket(hc, bktName)
|
||||||
|
partSize := 5 * 1024 * 1024
|
||||||
|
|
||||||
|
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||||
|
|
||||||
|
partBody := make([]byte, partSize)
|
||||||
|
_, err := rand.Read(partBody)
|
||||||
|
require.NoError(hc.t, err)
|
||||||
|
|
||||||
|
query := make(url.Values)
|
||||||
|
query.Set(uploadIDQuery, multipartUpload.UploadID)
|
||||||
|
query.Set(partNumberQuery, "1")
|
||||||
|
|
||||||
|
w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, partBody)
|
||||||
|
r.ContentLength = -1
|
||||||
|
hc.Handler().UploadPartHandler(w, r)
|
||||||
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
|
||||||
|
completeMultipartUpload(hc, bktName, objName, multipartUpload.UploadID, []string{w.Header().Get(api.ETag)})
|
||||||
|
res, _ := getObject(hc, bktName, objName)
|
||||||
|
equalDataSlices(t, partBody, res)
|
||||||
|
|
||||||
|
resp := getObjectAttributes(hc, bktName, objName, objectParts)
|
||||||
|
require.Len(t, resp.ObjectParts.Parts, 1)
|
||||||
|
require.Equal(t, partSize, resp.ObjectParts.Parts[0].Size)
|
||||||
|
}
|
||||||
|
|
||||||
func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
|
func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
|
||||||
return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end)
|
return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end)
|
||||||
}
|
}
|
||||||
|
|
|
@ -247,13 +247,16 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: reqInfo.ObjectName,
|
Object: reqInfo.ObjectName,
|
||||||
Reader: body,
|
Reader: body,
|
||||||
Size: size,
|
|
||||||
Header: metadata,
|
Header: metadata,
|
||||||
Encryption: encryptionParams,
|
Encryption: encryptionParams,
|
||||||
ContentMD5: r.Header.Get(api.ContentMD5),
|
ContentMD5: r.Header.Get(api.ContentMD5),
|
||||||
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
|
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if size > 0 {
|
||||||
|
params.Size = &size
|
||||||
|
}
|
||||||
|
|
||||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, bktInfo.LocationConstraint)
|
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||||
|
@ -490,7 +493,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: reqInfo.ObjectName,
|
Object: reqInfo.ObjectName,
|
||||||
Reader: contentReader,
|
Reader: contentReader,
|
||||||
Size: size,
|
Size: &size,
|
||||||
Header: metadata,
|
Header: metadata,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -163,6 +163,10 @@ func TestPutObjectWithNegativeContentLength(t *testing.T) {
|
||||||
tc.Handler().HeadObjectHandler(w, r)
|
tc.Handler().HeadObjectHandler(w, r)
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
require.Equal(t, strconv.Itoa(len(content)), w.Header().Get(api.ContentLength))
|
require.Equal(t, strconv.Itoa(len(content)), w.Header().Get(api.ContentLength))
|
||||||
|
|
||||||
|
result := listVersions(t, tc, bktName)
|
||||||
|
require.Len(t, result.Version, 1)
|
||||||
|
require.EqualValues(t, len(content), result.Version[0].Size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPutObjectWithStreamBodyError(t *testing.T) {
|
func TestPutObjectWithStreamBodyError(t *testing.T) {
|
||||||
|
|
|
@ -97,7 +97,7 @@ type (
|
||||||
PutObjectParams struct {
|
PutObjectParams struct {
|
||||||
BktInfo *data.BucketInfo
|
BktInfo *data.BucketInfo
|
||||||
Object string
|
Object string
|
||||||
Size uint64
|
Size *uint64
|
||||||
Reader io.Reader
|
Reader io.Reader
|
||||||
Header map[string]string
|
Header map[string]string
|
||||||
Lock *data.ObjectLock
|
Lock *data.ObjectLock
|
||||||
|
@ -512,7 +512,7 @@ func (n *Layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.Exte
|
||||||
return n.PutObject(ctx, &PutObjectParams{
|
return n.PutObject(ctx, &PutObjectParams{
|
||||||
BktInfo: p.DstBktInfo,
|
BktInfo: p.DstBktInfo,
|
||||||
Object: p.DstObject,
|
Object: p.DstObject,
|
||||||
Size: p.DstSize,
|
Size: &p.DstSize,
|
||||||
Reader: objPayload,
|
Reader: objPayload,
|
||||||
Header: p.Header,
|
Header: p.Header,
|
||||||
Encryption: p.DstEncryption,
|
Encryption: p.DstEncryption,
|
||||||
|
|
|
@ -453,7 +453,7 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
||||||
Object: p.Info.Key,
|
Object: p.Info.Key,
|
||||||
Reader: bytes.NewReader(partsData),
|
Reader: bytes.NewReader(partsData),
|
||||||
Header: initMetadata,
|
Header: initMetadata,
|
||||||
Size: multipartObjetSize,
|
Size: &multipartObjetSize,
|
||||||
Encryption: p.Info.Encryption,
|
Encryption: p.Info.Encryption,
|
||||||
CopiesNumbers: multipartInfo.CopiesNumbers,
|
CopiesNumbers: multipartInfo.CopiesNumbers,
|
||||||
CompleteMD5Hash: hex.EncodeToString(md5Hash.Sum(nil)) + "-" + strconv.Itoa(len(p.Parts)),
|
CompleteMD5Hash: hex.EncodeToString(md5Hash.Sum(nil)) + "-" + strconv.Itoa(len(p.Parts)),
|
||||||
|
|
|
@ -222,16 +222,20 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
||||||
|
|
||||||
r := p.Reader
|
r := p.Reader
|
||||||
if p.Encryption.Enabled() {
|
if p.Encryption.Enabled() {
|
||||||
p.Header[AttributeDecryptedSize] = strconv.FormatUint(p.Size, 10)
|
var size uint64
|
||||||
|
if p.Size != nil {
|
||||||
|
size = *p.Size
|
||||||
|
}
|
||||||
|
p.Header[AttributeDecryptedSize] = strconv.FormatUint(size, 10)
|
||||||
if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil {
|
if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil {
|
||||||
return nil, fmt.Errorf("add encryption header: %w", err)
|
return nil, fmt.Errorf("add encryption header: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var encSize uint64
|
var encSize uint64
|
||||||
if r, encSize, err = encryptionReader(p.Reader, p.Size, p.Encryption.Key()); err != nil {
|
if r, encSize, err = encryptionReader(p.Reader, size, p.Encryption.Key()); err != nil {
|
||||||
return nil, fmt.Errorf("create encrypter: %w", err)
|
return nil, fmt.Errorf("create encrypter: %w", err)
|
||||||
}
|
}
|
||||||
p.Size = encSize
|
p.Size = &encSize
|
||||||
}
|
}
|
||||||
|
|
||||||
if r != nil {
|
if r != nil {
|
||||||
|
@ -250,12 +254,14 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
||||||
|
|
||||||
prm := PrmObjectCreate{
|
prm := PrmObjectCreate{
|
||||||
Container: p.BktInfo.CID,
|
Container: p.BktInfo.CID,
|
||||||
PayloadSize: p.Size,
|
|
||||||
Filepath: p.Object,
|
Filepath: p.Object,
|
||||||
Payload: r,
|
Payload: r,
|
||||||
CreationTime: TimeNow(ctx),
|
CreationTime: TimeNow(ctx),
|
||||||
CopiesNumber: p.CopiesNumbers,
|
CopiesNumber: p.CopiesNumbers,
|
||||||
}
|
}
|
||||||
|
if p.Size != nil {
|
||||||
|
prm.PayloadSize = *p.Size
|
||||||
|
}
|
||||||
|
|
||||||
prm.Attributes = make([][2]string, 0, len(p.Header))
|
prm.Attributes = make([][2]string, 0, len(p.Header))
|
||||||
|
|
||||||
|
@ -303,19 +309,25 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
||||||
OID: id,
|
OID: id,
|
||||||
ETag: hex.EncodeToString(hash),
|
ETag: hex.EncodeToString(hash),
|
||||||
FilePath: p.Object,
|
FilePath: p.Object,
|
||||||
Size: p.Size,
|
|
||||||
Created: &now,
|
Created: &now,
|
||||||
Owner: &n.gateOwner,
|
Owner: &n.gateOwner,
|
||||||
},
|
},
|
||||||
IsUnversioned: !bktSettings.VersioningEnabled(),
|
IsUnversioned: !bktSettings.VersioningEnabled(),
|
||||||
IsCombined: p.Header[MultipartObjectSize] != "",
|
IsCombined: p.Header[MultipartObjectSize] != "",
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(p.CompleteMD5Hash) > 0 {
|
if len(p.CompleteMD5Hash) > 0 {
|
||||||
newVersion.MD5 = p.CompleteMD5Hash
|
newVersion.MD5 = p.CompleteMD5Hash
|
||||||
} else {
|
} else {
|
||||||
newVersion.MD5 = hex.EncodeToString(md5Hash)
|
newVersion.MD5 = hex.EncodeToString(md5Hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if p.Size != nil {
|
||||||
|
newVersion.Size = *p.Size
|
||||||
|
} else {
|
||||||
|
newVersion.Size = size
|
||||||
|
}
|
||||||
|
|
||||||
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
||||||
return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err)
|
return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ func (tc *testContext) putObject(content []byte) *data.ObjectInfo {
|
||||||
extObjInfo, err := tc.layer.PutObject(tc.ctx, &PutObjectParams{
|
extObjInfo, err := tc.layer.PutObject(tc.ctx, &PutObjectParams{
|
||||||
BktInfo: tc.bktInfo,
|
BktInfo: tc.bktInfo,
|
||||||
Object: tc.obj,
|
Object: tc.obj,
|
||||||
Size: uint64(len(content)),
|
Size: ptr(uint64(len(content))),
|
||||||
Reader: bytes.NewReader(content),
|
Reader: bytes.NewReader(content),
|
||||||
Header: make(map[string]string),
|
Header: make(map[string]string),
|
||||||
})
|
})
|
||||||
|
@ -444,3 +444,7 @@ func TestFilterVersionsByMarker(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ptr[T any](t T) *T {
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue