Marina Biryukova
e3141fc8e3
Some checks failed
/ DCO (pull_request) Successful in 1m43s
/ Vulncheck (pull_request) Successful in 1m51s
/ Builds (pull_request) Successful in 2m6s
/ Lint (pull_request) Successful in 3m14s
/ Tests (pull_request) Successful in 2m8s
/ Builds (push) Has been cancelled
/ Lint (push) Has been cancelled
/ Tests (push) Has been cancelled
/ Vulncheck (push) Has been cancelled
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
539 lines
20 KiB
Go
539 lines
20 KiB
Go
package handler
|
|
|
|
import (
|
|
"bytes"
|
|
"crypto/md5"
|
|
"crypto/rand"
|
|
"crypto/sha256"
|
|
"encoding/hex"
|
|
"encoding/xml"
|
|
"fmt"
|
|
"net/http"
|
|
"net/http/httptest"
|
|
"net/url"
|
|
"strconv"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
func TestPatch(t *testing.T) {
|
|
tc := prepareHandlerContext(t)
|
|
tc.config.md5Enabled = true
|
|
|
|
bktName, objName := "bucket-for-patch", "object-for-patch"
|
|
createTestBucket(tc, bktName)
|
|
|
|
content := []byte("old object content")
|
|
md5Hash := md5.New()
|
|
md5Hash.Write(content)
|
|
etag := data.Quote(hex.EncodeToString(md5Hash.Sum(nil)))
|
|
|
|
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
|
created := time.Now()
|
|
tc.Handler().PutObjectHandler(w, r)
|
|
require.Equal(t, etag, w.Header().Get(api.ETag))
|
|
|
|
patchPayload := []byte("new")
|
|
sha256Hash := sha256.New()
|
|
sha256Hash.Write(patchPayload)
|
|
sha256Hash.Write(content[len(patchPayload):])
|
|
hash := hex.EncodeToString(sha256Hash.Sum(nil))
|
|
|
|
for _, tt := range []struct {
|
|
name string
|
|
rng string
|
|
headers map[string]string
|
|
code apierr.ErrorCode
|
|
}{
|
|
{
|
|
name: "success",
|
|
rng: "bytes 0-2/*",
|
|
headers: map[string]string{
|
|
api.IfUnmodifiedSince: created.Format(http.TimeFormat),
|
|
api.IfMatch: etag,
|
|
},
|
|
},
|
|
{
|
|
name: "If-Unmodified-Since invalid format, header is ignored",
|
|
rng: "bytes 0-2/*",
|
|
headers: map[string]string{
|
|
api.IfUnmodifiedSince: created.Add(-24 * time.Hour).Format(time.RFC3339),
|
|
},
|
|
},
|
|
{
|
|
name: "invalid range syntax",
|
|
rng: "bytes 0-2",
|
|
code: apierr.ErrInvalidRange,
|
|
},
|
|
{
|
|
name: "invalid range length",
|
|
rng: "bytes 0-5/*",
|
|
code: apierr.ErrInvalidRangeLength,
|
|
},
|
|
{
|
|
name: "invalid range start",
|
|
rng: "bytes 20-22/*",
|
|
code: apierr.ErrRangeOutOfBounds,
|
|
},
|
|
{
|
|
name: "range is too long",
|
|
rng: "bytes 0-5368709120/*",
|
|
code: apierr.ErrInvalidRange,
|
|
},
|
|
{
|
|
name: "If-Unmodified-Since precondition are not satisfied",
|
|
rng: "bytes 0-2/*",
|
|
headers: map[string]string{
|
|
api.IfUnmodifiedSince: created.Add(-24 * time.Hour).Format(http.TimeFormat),
|
|
},
|
|
code: apierr.ErrPreconditionFailed,
|
|
},
|
|
{
|
|
name: "If-Match precondition are not satisfied",
|
|
rng: "bytes 0-2/*",
|
|
headers: map[string]string{
|
|
api.IfMatch: "etag",
|
|
},
|
|
code: apierr.ErrPreconditionFailed,
|
|
},
|
|
} {
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
if tt.code == 0 {
|
|
res := patchObject(t, tc, bktName, objName, tt.rng, patchPayload, tt.headers)
|
|
require.Equal(t, data.Quote(hash), res.Object.ETag)
|
|
} else {
|
|
patchObjectErr(tc, bktName, objName, tt.rng, patchPayload, tt.headers, tt.code)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestPatchMultipartObject(t *testing.T) {
|
|
tc := prepareHandlerContextWithMinCache(t)
|
|
tc.config.md5Enabled = true
|
|
|
|
bktName, objName, partSize := "bucket-for-multipart-patch", "object-for-multipart-patch", 5*1024*1024
|
|
createTestBucket(tc, bktName)
|
|
|
|
t.Run("patch beginning of the first part", func(t *testing.T) {
|
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
|
|
|
patchSize := partSize / 2
|
|
patchBody := make([]byte, patchSize)
|
|
_, err := rand.Read(patchBody)
|
|
require.NoError(t, err)
|
|
|
|
patchObject(t, tc, bktName, objName, "bytes 0-"+strconv.Itoa(patchSize-1)+"/*", patchBody, nil)
|
|
object, header := getObject(tc, bktName, objName)
|
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
|
require.NoError(t, err)
|
|
equalDataSlices(t, bytes.Join([][]byte{patchBody, data1[patchSize:], data2, data3}, []byte("")), object)
|
|
require.Equal(t, partSize*3, contentLen)
|
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
|
})
|
|
|
|
t.Run("patch middle of the first part", func(t *testing.T) {
|
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
|
|
|
patchSize := partSize / 2
|
|
patchBody := make([]byte, patchSize)
|
|
_, err := rand.Read(patchBody)
|
|
require.NoError(t, err)
|
|
|
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/4)+"-"+strconv.Itoa(partSize*3/4-1)+"/*", patchBody, nil)
|
|
object, header := getObject(tc, bktName, objName)
|
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
|
require.NoError(t, err)
|
|
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/4], patchBody, data1[partSize*3/4:], data2, data3}, []byte("")), object)
|
|
require.Equal(t, partSize*3, contentLen)
|
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
|
})
|
|
|
|
t.Run("patch first and second parts", func(t *testing.T) {
|
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
|
|
|
patchSize := partSize / 2
|
|
patchBody := make([]byte, patchSize)
|
|
_, err := rand.Read(patchBody)
|
|
require.NoError(t, err)
|
|
|
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*3/4)+"-"+strconv.Itoa(partSize*5/4-1)+"/*", patchBody, nil)
|
|
object, header := getObject(tc, bktName, objName)
|
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
|
require.NoError(t, err)
|
|
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize*3/4], patchBody, data2[partSize/4:], data3}, []byte("")), object)
|
|
require.Equal(t, partSize*3, contentLen)
|
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
|
})
|
|
|
|
t.Run("patch all parts", func(t *testing.T) {
|
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
|
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
|
|
|
patchSize := partSize * 2
|
|
patchBody := make([]byte, patchSize)
|
|
_, err := rand.Read(patchBody)
|
|
require.NoError(t, err)
|
|
|
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/2-1)+"-"+strconv.Itoa(partSize/2+patchSize-2)+"/*", patchBody, nil)
|
|
object, header := getObject(tc, bktName, objName)
|
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
|
require.NoError(t, err)
|
|
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/2-1], patchBody, data3[partSize/2-1:]}, []byte("")), object)
|
|
require.Equal(t, partSize*3, contentLen)
|
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
|
})
|
|
|
|
t.Run("patch all parts and append bytes", func(t *testing.T) {
|
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
|
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
|
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
|
|
|
patchSize := partSize * 3
|
|
patchBody := make([]byte, patchSize)
|
|
_, err := rand.Read(patchBody)
|
|
require.NoError(t, err)
|
|
|
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/2)+"-"+strconv.Itoa(partSize/2+patchSize-1)+"/*", patchBody, nil)
|
|
object, header := getObject(tc, bktName, objName)
|
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
|
require.NoError(t, err)
|
|
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/2], patchBody}, []byte("")), object)
|
|
require.Equal(t, partSize*7/2, contentLen)
|
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
|
})
|
|
|
|
t.Run("patch second part", func(t *testing.T) {
|
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
|
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
|
|
|
patchBody := make([]byte, partSize)
|
|
_, err := rand.Read(patchBody)
|
|
require.NoError(t, err)
|
|
|
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize)+"-"+strconv.Itoa(partSize*2-1)+"/*", patchBody, nil)
|
|
object, header := getObject(tc, bktName, objName)
|
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
|
require.NoError(t, err)
|
|
equalDataSlices(t, bytes.Join([][]byte{data1, patchBody, data3}, []byte("")), object)
|
|
require.Equal(t, partSize*3, contentLen)
|
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
|
})
|
|
|
|
t.Run("patch last part, equal size", func(t *testing.T) {
|
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
|
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
|
|
|
patchBody := make([]byte, partSize)
|
|
_, err := rand.Read(patchBody)
|
|
require.NoError(t, err)
|
|
|
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2)+"-"+strconv.Itoa(partSize*3-1)+"/*", patchBody, nil)
|
|
object, header := getObject(tc, bktName, objName)
|
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
|
require.NoError(t, err)
|
|
equalDataSlices(t, bytes.Join([][]byte{data1, data2, patchBody}, []byte("")), object)
|
|
require.Equal(t, partSize*3, contentLen)
|
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
|
})
|
|
|
|
t.Run("patch last part, increase size", func(t *testing.T) {
|
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
|
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
|
|
|
patchBody := make([]byte, partSize+1)
|
|
_, err := rand.Read(patchBody)
|
|
require.NoError(t, err)
|
|
|
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2)+"-"+strconv.Itoa(partSize*3)+"/*", patchBody, nil)
|
|
object, header := getObject(tc, bktName, objName)
|
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
|
require.NoError(t, err)
|
|
equalDataSlices(t, bytes.Join([][]byte{data1, data2, patchBody}, []byte("")), object)
|
|
require.Equal(t, partSize*3+1, contentLen)
|
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
|
})
|
|
|
|
t.Run("patch last part with offset and append bytes", func(t *testing.T) {
|
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
|
|
|
patchBody := make([]byte, partSize)
|
|
_, err := rand.Read(patchBody)
|
|
require.NoError(t, err)
|
|
|
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2+3)+"-"+strconv.Itoa(partSize*3+2)+"/*", patchBody, nil)
|
|
object, header := getObject(tc, bktName, objName)
|
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
|
require.NoError(t, err)
|
|
equalDataSlices(t, bytes.Join([][]byte{data1, data2, data3[:3], patchBody}, []byte("")), object)
|
|
require.Equal(t, partSize*3+3, contentLen)
|
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
|
})
|
|
|
|
t.Run("append bytes", func(t *testing.T) {
|
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
|
|
|
patchBody := make([]byte, partSize)
|
|
_, err := rand.Read(patchBody)
|
|
require.NoError(t, err)
|
|
|
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*3)+"-"+strconv.Itoa(partSize*4-1)+"/*", patchBody, nil)
|
|
object, header := getObject(tc, bktName, objName)
|
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
|
require.NoError(t, err)
|
|
equalDataSlices(t, bytes.Join([][]byte{data1, data2, data3, patchBody}, []byte("")), object)
|
|
require.Equal(t, partSize*4, contentLen)
|
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
|
})
|
|
|
|
t.Run("patch empty multipart", func(t *testing.T) {
|
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
|
etag, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, 0)
|
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag})
|
|
|
|
patchBody := make([]byte, partSize)
|
|
_, err := rand.Read(patchBody)
|
|
require.NoError(t, err)
|
|
|
|
patchObject(t, tc, bktName, objName, "bytes 0-"+strconv.Itoa(partSize-1)+"/*", patchBody, nil)
|
|
object, header := getObject(tc, bktName, objName)
|
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
|
require.NoError(t, err)
|
|
equalDataSlices(t, patchBody, object)
|
|
require.Equal(t, partSize, contentLen)
|
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-1"))
|
|
})
|
|
}
|
|
|
|
func TestPatchWithVersion(t *testing.T) {
|
|
hc := prepareHandlerContextWithMinCache(t)
|
|
bktName, objName := "bucket", "obj"
|
|
createVersionedBucket(hc, bktName)
|
|
objHeader := putObjectContent(hc, bktName, objName, "content")
|
|
|
|
putObjectContent(hc, bktName, objName, "some content")
|
|
|
|
patchObjectVersion(t, hc, bktName, objName, objHeader.Get(api.AmzVersionID), "bytes 7-14/*", []byte(" updated"))
|
|
|
|
res := listObjectsVersions(hc, bktName, "", "", "", "", 3)
|
|
require.False(t, res.IsTruncated)
|
|
require.Len(t, res.Version, 3)
|
|
|
|
for _, version := range res.Version {
|
|
content := getObjectVersion(hc, bktName, objName, version.VersionID)
|
|
if version.IsLatest {
|
|
require.Equal(t, []byte("content updated"), content)
|
|
continue
|
|
}
|
|
if version.VersionID == objHeader.Get(api.AmzVersionID) {
|
|
require.Equal(t, []byte("content"), content)
|
|
continue
|
|
}
|
|
require.Equal(t, []byte("some content"), content)
|
|
}
|
|
}
|
|
|
|
func TestPatchEncryptedObject(t *testing.T) {
|
|
tc := prepareHandlerContext(t)
|
|
bktName, objName := "bucket-for-patch-encrypted", "object-for-patch-encrypted"
|
|
createTestBucket(tc, bktName)
|
|
|
|
w, r := prepareTestPayloadRequest(tc, bktName, objName, strings.NewReader("object content"))
|
|
setEncryptHeaders(r)
|
|
tc.Handler().PutObjectHandler(w, r)
|
|
assertStatus(t, w, http.StatusOK)
|
|
|
|
patchObjectErr(tc, bktName, objName, "bytes 2-4/*", []byte("new"), nil, apierr.ErrInternalError)
|
|
}
|
|
|
|
func TestPatchMissingHeaders(t *testing.T) {
|
|
tc := prepareHandlerContext(t)
|
|
bktName, objName := "bucket-for-patch-missing-headers", "object-for-patch-missing-headers"
|
|
createTestBucket(tc, bktName)
|
|
|
|
w, r := prepareTestPayloadRequest(tc, bktName, objName, strings.NewReader("object content"))
|
|
setEncryptHeaders(r)
|
|
tc.Handler().PutObjectHandler(w, r)
|
|
assertStatus(t, w, http.StatusOK)
|
|
|
|
w = httptest.NewRecorder()
|
|
r = httptest.NewRequest(http.MethodPatch, defaultURL, strings.NewReader("new"))
|
|
tc.Handler().PatchObjectHandler(w, r)
|
|
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrMissingContentRange))
|
|
|
|
w = httptest.NewRecorder()
|
|
r = httptest.NewRequest(http.MethodPatch, defaultURL, strings.NewReader("new"))
|
|
r.Header.Set(api.ContentRange, "bytes 0-2/*")
|
|
tc.Handler().PatchObjectHandler(w, r)
|
|
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrMissingContentLength))
|
|
}
|
|
|
|
func TestPatchInvalidBucketName(t *testing.T) {
|
|
tc := prepareHandlerContext(t)
|
|
bktName, objName := "bucket", "object"
|
|
createTestBucket(tc, bktName)
|
|
|
|
patchObjectErr(tc, "bkt_name", objName, "bytes 2-4/*", []byte("new"), nil, apierr.ErrInvalidBucketName)
|
|
}
|
|
|
|
func TestParsePatchByteRange(t *testing.T) {
|
|
for _, tt := range []struct {
|
|
rng string
|
|
size uint64
|
|
expected *layer.RangeParams
|
|
err bool
|
|
}{
|
|
{
|
|
rng: "bytes 2-7/*",
|
|
expected: &layer.RangeParams{
|
|
Start: 2,
|
|
End: 7,
|
|
},
|
|
},
|
|
{
|
|
rng: "bytes 2-7/3",
|
|
expected: &layer.RangeParams{
|
|
Start: 2,
|
|
End: 7,
|
|
},
|
|
},
|
|
{
|
|
rng: "bytes 2-/*",
|
|
size: 9,
|
|
expected: &layer.RangeParams{
|
|
Start: 2,
|
|
End: 8,
|
|
},
|
|
},
|
|
{
|
|
rng: "bytes 2-/3",
|
|
size: 9,
|
|
expected: &layer.RangeParams{
|
|
Start: 2,
|
|
End: 8,
|
|
},
|
|
},
|
|
{
|
|
rng: "",
|
|
err: true,
|
|
},
|
|
{
|
|
rng: "2-7/*",
|
|
err: true,
|
|
},
|
|
{
|
|
rng: "bytes 7-2/*",
|
|
err: true,
|
|
},
|
|
{
|
|
rng: "bytes 2-7",
|
|
err: true,
|
|
},
|
|
{
|
|
rng: "bytes 2/*",
|
|
err: true,
|
|
},
|
|
{
|
|
rng: "bytes a-7/*",
|
|
err: true,
|
|
},
|
|
{
|
|
rng: "bytes 2-a/*",
|
|
err: true,
|
|
},
|
|
} {
|
|
t.Run(fmt.Sprintf("case: %s", tt.rng), func(t *testing.T) {
|
|
rng, err := parsePatchByteRange(tt.rng, tt.size)
|
|
if tt.err {
|
|
require.Error(t, err)
|
|
} else {
|
|
require.NoError(t, err)
|
|
require.Equal(t, tt.expected.Start, rng.Start)
|
|
require.Equal(t, tt.expected.End, rng.End)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func patchObject(t *testing.T, tc *handlerContext, bktName, objName, rng string, payload []byte, headers map[string]string) *PatchObjectResult {
|
|
w := patchObjectBase(tc, bktName, objName, "", rng, payload, headers)
|
|
assertStatus(t, w, http.StatusOK)
|
|
|
|
result := &PatchObjectResult{}
|
|
err := xml.NewDecoder(w.Result().Body).Decode(result)
|
|
require.NoError(t, err)
|
|
return result
|
|
}
|
|
|
|
func patchObjectVersion(t *testing.T, tc *handlerContext, bktName, objName, version, rng string, payload []byte) *PatchObjectResult {
|
|
w := patchObjectBase(tc, bktName, objName, version, rng, payload, nil)
|
|
assertStatus(t, w, http.StatusOK)
|
|
|
|
result := &PatchObjectResult{}
|
|
err := xml.NewDecoder(w.Result().Body).Decode(result)
|
|
require.NoError(t, err)
|
|
return result
|
|
}
|
|
|
|
func patchObjectErr(tc *handlerContext, bktName, objName, rng string, payload []byte, headers map[string]string, code apierr.ErrorCode) {
|
|
w := patchObjectBase(tc, bktName, objName, "", rng, payload, headers)
|
|
assertS3Error(tc.t, w, apierr.GetAPIError(code))
|
|
}
|
|
|
|
func patchObjectBase(tc *handlerContext, bktName, objName, version, rng string, payload []byte, headers map[string]string) *httptest.ResponseRecorder {
|
|
query := make(url.Values)
|
|
if len(version) > 0 {
|
|
query.Add(api.QueryVersionID, version)
|
|
}
|
|
|
|
w, r := prepareTestRequestWithQuery(tc, bktName, objName, query, payload)
|
|
r.Header.Set(api.ContentRange, rng)
|
|
r.Header.Set(api.ContentLength, strconv.Itoa(len(payload)))
|
|
for k, v := range headers {
|
|
r.Header.Set(k, v)
|
|
}
|
|
|
|
tc.Handler().PatchObjectHandler(w, r)
|
|
return w
|
|
}
|