2023-03-20 08:13:56 +00:00
|
|
|
package handler
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/xml"
|
2023-07-21 13:16:55 +00:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
2023-03-20 08:13:56 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2023-06-21 14:16:40 +00:00
|
|
|
s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
2023-03-20 08:13:56 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestPeriodicWriter(t *testing.T) {
|
|
|
|
const dur = 100 * time.Millisecond
|
|
|
|
const whitespaces = 8
|
|
|
|
expected := []byte(xml.Header)
|
|
|
|
for i := 0; i < whitespaces; i++ {
|
|
|
|
expected = append(expected, []byte(" ")...)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("writes data", func(t *testing.T) {
|
|
|
|
buf := bytes.NewBuffer(nil)
|
|
|
|
stop := periodicXMLWriter(buf, dur)
|
|
|
|
|
|
|
|
// N number of whitespaces + half durations to guarantee at least N writes in buffer
|
|
|
|
time.Sleep(whitespaces*dur + dur/2)
|
|
|
|
require.True(t, stop())
|
|
|
|
require.Equal(t, expected, buf.Bytes())
|
|
|
|
|
|
|
|
t.Run("no additional data after stop", func(t *testing.T) {
|
|
|
|
time.Sleep(2 * dur)
|
|
|
|
require.Equal(t, expected, buf.Bytes())
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("does not write data", func(t *testing.T) {
|
|
|
|
buf := bytes.NewBuffer(nil)
|
|
|
|
stop := periodicXMLWriter(buf, dur)
|
|
|
|
time.Sleep(dur / 2)
|
|
|
|
require.False(t, stop())
|
|
|
|
require.Empty(t, buf.Bytes())
|
|
|
|
|
|
|
|
t.Run("disabled", func(t *testing.T) {
|
|
|
|
stop = periodicXMLWriter(buf, 0)
|
|
|
|
require.False(t, stop())
|
|
|
|
require.Empty(t, buf.Bytes())
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2023-06-21 14:16:40 +00:00
|
|
|
|
|
|
|
func TestMultipartUploadInvalidPart(t *testing.T) {
|
|
|
|
hc := prepareHandlerContext(t)
|
|
|
|
|
|
|
|
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
|
|
|
createTestBucket(hc, bktName)
|
|
|
|
partSize := 8 // less than min part size
|
|
|
|
|
|
|
|
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
|
|
|
etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
|
|
|
|
etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
|
|
|
|
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
|
|
|
|
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
|
|
|
|
}
|
2023-07-21 13:16:55 +00:00
|
|
|
|
|
|
|
func TestMultipartReUploadPart(t *testing.T) {
|
|
|
|
hc := prepareHandlerContext(t)
|
|
|
|
|
|
|
|
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
|
|
|
bktInfo := createTestBucket(hc, bktName)
|
|
|
|
partSizeLast := 8 // less than min part size
|
|
|
|
partSizeFirst := 5 * 1024 * 1024
|
|
|
|
|
|
|
|
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
|
|
|
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeLast)
|
|
|
|
etag2, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeFirst)
|
|
|
|
|
|
|
|
list := listParts(hc, bktName, objName, uploadInfo.UploadID)
|
|
|
|
require.Len(t, list.Parts, 2)
|
|
|
|
require.Equal(t, etag1, list.Parts[0].ETag)
|
|
|
|
require.Equal(t, etag2, list.Parts[1].ETag)
|
|
|
|
|
|
|
|
w := completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
|
|
|
|
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
|
|
|
|
|
|
|
|
etag1, data1 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeFirst)
|
|
|
|
etag2, data2 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeLast)
|
|
|
|
|
|
|
|
list = listParts(hc, bktName, objName, uploadInfo.UploadID)
|
|
|
|
require.Len(t, list.Parts, 2)
|
|
|
|
require.Equal(t, etag1, list.Parts[0].ETag)
|
|
|
|
require.Equal(t, etag2, list.Parts[1].ETag)
|
|
|
|
|
|
|
|
innerUploadInfo, err := hc.tree.GetMultipartUpload(hc.context, bktInfo, objName, uploadInfo.UploadID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
treeParts, err := hc.tree.GetParts(hc.Context(), bktInfo, innerUploadInfo.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, treeParts, len(list.Parts))
|
|
|
|
|
|
|
|
w = completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
|
|
|
|
assertStatus(hc.t, w, http.StatusOK)
|
|
|
|
|
|
|
|
data, _ := getObject(hc, bktName, objName)
|
|
|
|
equalDataSlices(t, append(data1, data2...), data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func listParts(hc *handlerContext, bktName, objName string, uploadID string) *ListPartsResponse {
|
|
|
|
return listPartsBase(hc, bktName, objName, false, uploadID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func listPartsBase(hc *handlerContext, bktName, objName string, encrypted bool, uploadID string) *ListPartsResponse {
|
|
|
|
query := make(url.Values)
|
|
|
|
query.Set(uploadIDQuery, uploadID)
|
|
|
|
|
|
|
|
w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, nil)
|
|
|
|
if encrypted {
|
|
|
|
setEncryptHeaders(r)
|
|
|
|
}
|
|
|
|
|
|
|
|
hc.Handler().ListPartsHandler(w, r)
|
|
|
|
listPartsResponse := &ListPartsResponse{}
|
|
|
|
readResponse(hc.t, w, http.StatusOK, listPartsResponse)
|
|
|
|
|
|
|
|
return listPartsResponse
|
|
|
|
}
|