2022-08-23 14:13:59 +00:00
|
|
|
package handler
|
|
|
|
|
|
|
|
import (
|
2023-10-19 14:22:26 +00:00
|
|
|
"crypto/md5"
|
|
|
|
"crypto/tls"
|
|
|
|
"encoding/base64"
|
2022-08-23 14:13:59 +00:00
|
|
|
"encoding/xml"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
2023-10-19 14:22:26 +00:00
|
|
|
"strconv"
|
2022-08-23 14:13:59 +00:00
|
|
|
"testing"
|
|
|
|
|
2023-03-07 14:38:08 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
2024-04-10 06:41:07 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
2023-10-19 14:22:26 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
2023-07-12 09:08:56 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
2023-10-19 14:22:26 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
2024-04-10 06:41:07 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
2022-08-23 14:13:59 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
|
|
|
type CopyMeta struct {
|
|
|
|
TaggingDirective string
|
|
|
|
Tags map[string]string
|
|
|
|
MetadataDirective string
|
|
|
|
Metadata map[string]string
|
2024-03-19 13:56:32 +00:00
|
|
|
Headers map[string]string
|
2022-08-23 14:13:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestCopyWithTaggingDirective(t *testing.T) {
|
|
|
|
tc := prepareHandlerContext(t)
|
|
|
|
|
|
|
|
bktName, objName := "bucket-for-copy", "object-from-copy"
|
|
|
|
objToCopy, objToCopy2 := "object-to-copy", "object-to-copy-2"
|
2022-10-04 08:31:09 +00:00
|
|
|
createBucketAndObject(tc, bktName, objName)
|
2022-08-23 14:13:59 +00:00
|
|
|
|
|
|
|
putObjectTagging(t, tc, bktName, objName, map[string]string{"key": "val"})
|
|
|
|
|
|
|
|
copyMeta := CopyMeta{
|
|
|
|
Tags: map[string]string{"key2": "val"},
|
|
|
|
}
|
2023-07-12 09:08:56 +00:00
|
|
|
copyObject(tc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
2022-08-23 14:13:59 +00:00
|
|
|
tagging := getObjectTagging(t, tc, bktName, objToCopy, emptyVersion)
|
|
|
|
require.Len(t, tagging.TagSet, 1)
|
|
|
|
require.Equal(t, "key", tagging.TagSet[0].Key)
|
|
|
|
require.Equal(t, "val", tagging.TagSet[0].Value)
|
|
|
|
|
|
|
|
copyMeta.TaggingDirective = replaceDirective
|
2023-07-12 09:08:56 +00:00
|
|
|
copyObject(tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
2022-08-23 14:13:59 +00:00
|
|
|
tagging = getObjectTagging(t, tc, bktName, objToCopy2, emptyVersion)
|
|
|
|
require.Len(t, tagging.TagSet, 1)
|
|
|
|
require.Equal(t, "key2", tagging.TagSet[0].Key)
|
|
|
|
require.Equal(t, "val", tagging.TagSet[0].Value)
|
|
|
|
}
|
|
|
|
|
2022-08-29 13:08:05 +00:00
|
|
|
func TestCopyToItself(t *testing.T) {
|
|
|
|
tc := prepareHandlerContext(t)
|
|
|
|
|
|
|
|
bktName, objName := "bucket-for-copy", "object-for-copy"
|
2022-10-04 08:31:09 +00:00
|
|
|
createBucketAndObject(tc, bktName, objName)
|
2022-08-29 13:08:05 +00:00
|
|
|
|
|
|
|
copyMeta := CopyMeta{MetadataDirective: replaceDirective}
|
|
|
|
|
2023-07-12 09:08:56 +00:00
|
|
|
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest)
|
|
|
|
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
2022-08-29 13:08:05 +00:00
|
|
|
|
|
|
|
putBucketVersioning(t, tc, bktName, true)
|
2023-07-12 09:08:56 +00:00
|
|
|
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
|
|
|
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
2022-08-29 13:08:05 +00:00
|
|
|
|
|
|
|
putBucketVersioning(t, tc, bktName, false)
|
2023-07-12 09:08:56 +00:00
|
|
|
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
|
|
|
|
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
|
2022-08-29 13:08:05 +00:00
|
|
|
}
|
|
|
|
|
2023-07-12 09:08:56 +00:00
|
|
|
func TestCopyMultipart(t *testing.T) {
|
|
|
|
hc := prepareHandlerContext(t)
|
|
|
|
|
|
|
|
bktName, objName := "bucket-for-copy", "object-for-copy"
|
|
|
|
createTestBucket(hc, bktName)
|
|
|
|
|
|
|
|
partSize := layer.UploadMinSize
|
|
|
|
objLen := 6 * partSize
|
|
|
|
headers := map[string]string{}
|
|
|
|
|
|
|
|
data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
|
|
|
|
require.Equal(t, objLen, len(data))
|
|
|
|
|
|
|
|
objToCopy := "copy-target"
|
|
|
|
var copyMeta CopyMeta
|
|
|
|
copyObject(hc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
|
|
|
|
|
|
|
|
copiedData, _ := getObject(hc, bktName, objToCopy)
|
|
|
|
equalDataSlices(t, data, copiedData)
|
|
|
|
|
|
|
|
result := getObjectAttributes(hc, bktName, objToCopy, objectParts)
|
|
|
|
require.NotNil(t, result.ObjectParts)
|
|
|
|
|
|
|
|
objToCopy2 := "copy-target2"
|
|
|
|
copyMeta.MetadataDirective = replaceDirective
|
|
|
|
copyObject(hc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
|
|
|
|
|
|
|
|
result = getObjectAttributes(hc, bktName, objToCopy2, objectParts)
|
|
|
|
require.Nil(t, result.ObjectParts)
|
|
|
|
|
|
|
|
copiedData, _ = getObject(hc, bktName, objToCopy2)
|
|
|
|
equalDataSlices(t, data, copiedData)
|
|
|
|
}
|
|
|
|
|
2023-10-19 14:22:26 +00:00
|
|
|
func TestCopyEncryptedToUnencrypted(t *testing.T) {
|
|
|
|
tc := prepareHandlerContext(t)
|
|
|
|
|
|
|
|
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
|
|
|
|
key1 := []byte("firstencriptionkeyofsourceobject")
|
|
|
|
key1Md5 := md5.Sum(key1)
|
|
|
|
key2 := []byte("anotherencriptionkeysourceobject")
|
|
|
|
key2Md5 := md5.Sum(key2)
|
|
|
|
bktInfo := createTestBucket(tc, bktName)
|
|
|
|
|
|
|
|
srcEnc, err := encryption.NewParams(key1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, *srcEnc)
|
|
|
|
require.True(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
|
|
|
|
|
|
|
|
dstObjName := "copy-object"
|
|
|
|
|
|
|
|
// empty copy-source-sse headers
|
|
|
|
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
|
|
r.TLS = &tls.ConnectionState{}
|
|
|
|
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
|
|
tc.Handler().CopyObjectHandler(w, r)
|
|
|
|
|
|
|
|
assertStatus(t, w, http.StatusBadRequest)
|
|
|
|
assertS3Error(t, w, errors.GetAPIError(errors.ErrSSEEncryptedObject))
|
|
|
|
|
|
|
|
// empty copy-source-sse-custom-key
|
|
|
|
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
|
|
r.TLS = &tls.ConnectionState{}
|
|
|
|
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
|
|
tc.Handler().CopyObjectHandler(w, r)
|
|
|
|
|
|
|
|
assertStatus(t, w, http.StatusBadRequest)
|
|
|
|
assertS3Error(t, w, errors.GetAPIError(errors.ErrMissingSSECustomerKey))
|
|
|
|
|
|
|
|
// empty copy-source-sse-custom-algorithm
|
|
|
|
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
|
|
r.TLS = &tls.ConnectionState{}
|
|
|
|
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
|
|
|
|
tc.Handler().CopyObjectHandler(w, r)
|
|
|
|
|
|
|
|
assertStatus(t, w, http.StatusBadRequest)
|
|
|
|
assertS3Error(t, w, errors.GetAPIError(errors.ErrMissingSSECustomerAlgorithm))
|
|
|
|
|
|
|
|
// invalid copy-source-sse-custom-key
|
|
|
|
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
|
|
r.TLS = &tls.ConnectionState{}
|
|
|
|
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key2))
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key2Md5[:]))
|
|
|
|
tc.Handler().CopyObjectHandler(w, r)
|
|
|
|
|
|
|
|
assertStatus(t, w, http.StatusBadRequest)
|
|
|
|
assertS3Error(t, w, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters))
|
|
|
|
|
|
|
|
// success copy
|
|
|
|
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
|
|
r.TLS = &tls.ConnectionState{}
|
|
|
|
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key1Md5[:]))
|
|
|
|
tc.Handler().CopyObjectHandler(w, r)
|
|
|
|
|
|
|
|
assertStatus(t, w, http.StatusOK)
|
|
|
|
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], strconv.Itoa(int(dstObjInfo.Size)))
|
|
|
|
require.False(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCopyUnencryptedToEncrypted(t *testing.T) {
|
|
|
|
tc := prepareHandlerContext(t)
|
|
|
|
|
|
|
|
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
|
|
|
|
key := []byte("firstencriptionkeyofsourceobject")
|
|
|
|
keyMd5 := md5.Sum(key)
|
|
|
|
bktInfo := createTestBucket(tc, bktName)
|
|
|
|
|
|
|
|
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, encryption.Params{})
|
|
|
|
require.False(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
|
|
|
|
|
|
|
|
dstObjName := "copy-object"
|
|
|
|
|
|
|
|
// invalid copy-source-sse headers
|
|
|
|
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
|
|
r.TLS = &tls.ConnectionState{}
|
|
|
|
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key))
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMd5[:]))
|
|
|
|
tc.Handler().CopyObjectHandler(w, r)
|
|
|
|
|
|
|
|
assertStatus(t, w, http.StatusBadRequest)
|
|
|
|
assertS3Error(t, w, errors.GetAPIError(errors.ErrInvalidEncryptionParameters))
|
|
|
|
|
|
|
|
// success copy
|
|
|
|
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
|
|
r.TLS = &tls.ConnectionState{}
|
|
|
|
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
|
|
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
|
|
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key))
|
|
|
|
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMd5[:]))
|
|
|
|
tc.Handler().CopyObjectHandler(w, r)
|
|
|
|
|
|
|
|
assertStatus(t, w, http.StatusOK)
|
|
|
|
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
|
|
|
|
require.Equal(t, strconv.Itoa(int(srcObjInfo.Size)), dstObjInfo.Headers[layer.AttributeDecryptedSize])
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCopyEncryptedToEncryptedWithAnotherKey(t *testing.T) {
|
|
|
|
tc := prepareHandlerContext(t)
|
|
|
|
|
|
|
|
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
|
|
|
|
key1 := []byte("firstencriptionkeyofsourceobject")
|
|
|
|
key1Md5 := md5.Sum(key1)
|
|
|
|
key2 := []byte("anotherencriptionkeysourceobject")
|
|
|
|
key2Md5 := md5.Sum(key2)
|
|
|
|
bktInfo := createTestBucket(tc, bktName)
|
|
|
|
|
|
|
|
srcEnc, err := encryption.NewParams(key1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, *srcEnc)
|
|
|
|
require.True(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
|
|
|
|
|
|
|
|
dstObjName := "copy-object"
|
|
|
|
|
|
|
|
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
|
|
|
|
r.TLS = &tls.ConnectionState{}
|
|
|
|
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
|
|
|
|
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key1Md5[:]))
|
|
|
|
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
|
|
|
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key2))
|
|
|
|
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key2Md5[:]))
|
|
|
|
tc.Handler().CopyObjectHandler(w, r)
|
|
|
|
|
|
|
|
assertStatus(t, w, http.StatusOK)
|
|
|
|
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
|
|
|
|
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], dstObjInfo.Headers[layer.AttributeDecryptedSize])
|
|
|
|
}
|
|
|
|
|
|
|
|
func containEncryptionMetadataHeaders(headers map[string]string) bool {
|
|
|
|
for k := range headers {
|
|
|
|
if _, ok := layer.EncryptionMetadata[k]; ok {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-07-12 09:08:56 +00:00
|
|
|
func copyObject(hc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
|
|
|
|
w, r := prepareTestRequest(hc, bktName, toObject, nil)
|
2022-08-23 14:13:59 +00:00
|
|
|
r.Header.Set(api.AmzCopySource, bktName+"/"+fromObject)
|
|
|
|
|
|
|
|
r.Header.Set(api.AmzMetadataDirective, copyMeta.MetadataDirective)
|
|
|
|
for key, val := range copyMeta.Metadata {
|
|
|
|
r.Header.Set(api.MetadataPrefix+key, val)
|
|
|
|
}
|
|
|
|
|
|
|
|
r.Header.Set(api.AmzTaggingDirective, copyMeta.TaggingDirective)
|
|
|
|
tagsQuery := make(url.Values)
|
|
|
|
for key, val := range copyMeta.Tags {
|
|
|
|
tagsQuery.Set(key, val)
|
|
|
|
}
|
|
|
|
r.Header.Set(api.AmzTagging, tagsQuery.Encode())
|
|
|
|
|
2024-03-19 13:56:32 +00:00
|
|
|
for key, val := range copyMeta.Headers {
|
|
|
|
r.Header.Set(key, val)
|
|
|
|
}
|
|
|
|
|
2023-07-12 09:08:56 +00:00
|
|
|
hc.Handler().CopyObjectHandler(w, r)
|
|
|
|
assertStatus(hc.t, w, statusCode)
|
2022-08-23 14:13:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func putObjectTagging(t *testing.T, tc *handlerContext, bktName, objName string, tags map[string]string) {
|
2024-04-10 06:41:07 +00:00
|
|
|
body := &data.Tagging{
|
|
|
|
TagSet: make([]data.Tag, 0, len(tags)),
|
2022-08-23 14:13:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for key, val := range tags {
|
2024-04-10 06:41:07 +00:00
|
|
|
body.TagSet = append(body.TagSet, data.Tag{
|
2022-08-23 14:13:59 +00:00
|
|
|
Key: key,
|
|
|
|
Value: val,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-10-04 08:31:09 +00:00
|
|
|
w, r := prepareTestRequest(tc, bktName, objName, body)
|
2024-04-10 06:41:07 +00:00
|
|
|
middleware.GetReqInfo(r.Context()).Tagging = body
|
2022-08-23 14:13:59 +00:00
|
|
|
tc.Handler().PutObjectTaggingHandler(w, r)
|
|
|
|
assertStatus(t, w, http.StatusOK)
|
|
|
|
}
|
|
|
|
|
2024-04-10 06:41:07 +00:00
|
|
|
func getObjectTagging(t *testing.T, tc *handlerContext, bktName, objName, version string) *data.Tagging {
|
2022-08-23 14:13:59 +00:00
|
|
|
query := make(url.Values)
|
|
|
|
query.Add(api.QueryVersionID, version)
|
|
|
|
|
2022-10-04 08:31:09 +00:00
|
|
|
w, r := prepareTestFullRequest(tc, bktName, objName, query, nil)
|
2022-08-23 14:13:59 +00:00
|
|
|
tc.Handler().GetObjectTaggingHandler(w, r)
|
|
|
|
assertStatus(t, w, http.StatusOK)
|
|
|
|
|
2024-04-10 06:41:07 +00:00
|
|
|
tagging := &data.Tagging{}
|
2022-08-23 14:13:59 +00:00
|
|
|
err := xml.NewDecoder(w.Result().Body).Decode(tagging)
|
|
|
|
require.NoError(t, err)
|
|
|
|
return tagging
|
|
|
|
}
|
2022-08-24 13:12:05 +00:00
|
|
|
|
|
|
|
func TestSourceCopyRegexp(t *testing.T) {
|
|
|
|
for _, tc := range []struct {
|
|
|
|
path string
|
|
|
|
err bool
|
|
|
|
bktName string
|
|
|
|
objName string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
path: "/bucket/object",
|
|
|
|
err: false,
|
|
|
|
bktName: "bucket",
|
|
|
|
objName: "object",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
path: "bucket/object",
|
|
|
|
err: false,
|
|
|
|
bktName: "bucket",
|
|
|
|
objName: "object",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
path: "sub-bucket/object",
|
|
|
|
err: false,
|
|
|
|
bktName: "sub-bucket",
|
|
|
|
objName: "object",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
path: "bucket.domain/object",
|
|
|
|
err: false,
|
|
|
|
bktName: "bucket.domain",
|
|
|
|
objName: "object",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
path: "bucket/object/deep",
|
|
|
|
err: false,
|
|
|
|
bktName: "bucket",
|
|
|
|
objName: "object/deep",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
path: "bucket",
|
|
|
|
err: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
path: "/bucket",
|
|
|
|
err: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
path: "invalid+bucket/object",
|
|
|
|
err: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
path: "invaliDBucket/object",
|
|
|
|
err: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
path: "i/object",
|
|
|
|
err: true,
|
|
|
|
},
|
|
|
|
} {
|
|
|
|
t.Run("", func(t *testing.T) {
|
|
|
|
bktName, objName, err := path2BucketObject(tc.path)
|
|
|
|
if tc.err {
|
|
|
|
require.Error(t, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, tc.bktName, bktName)
|
|
|
|
require.Equal(t, tc.objName, objName)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|