forked from TrueCloudLab/frostfs-s3-gw
Compare commits
10 commits
979d85b046
...
bd4645c84c
Author | SHA1 | Date | |
---|---|---|---|
bd4645c84c | |||
eff0de43d5 | |||
fb00dff83b | |||
d8f126b339 | |||
7ab902d8d2 | |||
0792fcf456 | |||
c46ffa8146 | |||
3260308cc0 | |||
d6e6a13576 | |||
17d40245de |
31 changed files with 481 additions and 89 deletions
|
@ -1,3 +1,3 @@
|
||||||
.git
|
.git
|
||||||
.cache
|
.cache
|
||||||
.github
|
.forgejo
|
||||||
|
|
Before Width: | Height: | Size: 5.5 KiB After Width: | Height: | Size: 5.5 KiB |
|
@ -1,4 +1,8 @@
|
||||||
on: [pull_request]
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
builds:
|
builds:
|
||||||
|
|
|
@ -1,4 +1,8 @@
|
||||||
on: [pull_request]
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
dco:
|
dco:
|
||||||
|
|
|
@ -1,4 +1,8 @@
|
||||||
on: [pull_request]
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint:
|
lint:
|
||||||
|
|
|
@ -1,4 +1,8 @@
|
||||||
on: [pull_request]
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
vulncheck:
|
vulncheck:
|
||||||
|
|
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
|
@ -1 +0,0 @@
|
||||||
* @alexvanin @dkirillov
|
|
1
CODEOWNERS
Normal file
1
CODEOWNERS
Normal file
|
@ -0,0 +1 @@
|
||||||
|
.* @alexvanin @dkirillov
|
|
@ -1,5 +1,5 @@
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
|
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
|
||||||
</p>
|
</p>
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
||||||
|
|
|
@ -20,6 +20,7 @@ type (
|
||||||
Filter *LifecycleRuleFilter `xml:"Filter,omitempty"`
|
Filter *LifecycleRuleFilter `xml:"Filter,omitempty"`
|
||||||
ID string `xml:"ID,omitempty"`
|
ID string `xml:"ID,omitempty"`
|
||||||
NonCurrentVersionExpiration *NonCurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
|
NonCurrentVersionExpiration *NonCurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
|
||||||
|
Prefix string `xml:"Prefix,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
AbortIncompleteMultipartUpload struct {
|
AbortIncompleteMultipartUpload struct {
|
||||||
|
|
|
@ -191,6 +191,7 @@ func TestDeleteBucketWithPolicy(t *testing.T) {
|
||||||
require.Len(t, hc.h.ape.(*apeMock).policyMap, 1)
|
require.Len(t, hc.h.ape.(*apeMock).policyMap, 1)
|
||||||
require.Len(t, hc.h.ape.(*apeMock).chainMap[engine.ContainerTarget(bi.CID.EncodeToString())], 4)
|
require.Len(t, hc.h.ape.(*apeMock).chainMap[engine.ContainerTarget(bi.CID.EncodeToString())], 4)
|
||||||
|
|
||||||
|
hc.owner = bi.Owner
|
||||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||||
|
|
||||||
require.Empty(t, hc.h.ape.(*apeMock).policyMap)
|
require.Empty(t, hc.h.ape.(*apeMock).policyMap)
|
||||||
|
|
|
@ -245,6 +245,11 @@ func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = checkOwner(bktInfo, reqInfo.User); err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "request owner id does not match bucket owner id", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
var sessionToken *session.Container
|
var sessionToken *session.Container
|
||||||
|
|
||||||
boxData, err := middleware.GetBoxData(ctx)
|
boxData, err := middleware.GetBoxData(ctx)
|
||||||
|
|
|
@ -37,6 +37,7 @@ func TestDeleteBucketOnAlreadyRemovedError(t *testing.T) {
|
||||||
|
|
||||||
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
||||||
|
|
||||||
|
hc.owner = bktInfo.Owner
|
||||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,11 +54,12 @@ func TestDeleteBucket(t *testing.T) {
|
||||||
tc := prepareHandlerContext(t)
|
tc := prepareHandlerContext(t)
|
||||||
|
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||||
_, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
|
bktInfo, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
|
||||||
|
|
||||||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||||
require.True(t, isDeleteMarker)
|
require.True(t, isDeleteMarker)
|
||||||
|
|
||||||
|
tc.owner = bktInfo.Owner
|
||||||
deleteBucket(t, tc, bktName, http.StatusConflict)
|
deleteBucket(t, tc, bktName, http.StatusConflict)
|
||||||
deleteObject(t, tc, bktName, objName, objInfo.VersionID())
|
deleteObject(t, tc, bktName, objName, objInfo.VersionID())
|
||||||
deleteBucket(t, tc, bktName, http.StatusConflict)
|
deleteBucket(t, tc, bktName, http.StatusConflict)
|
||||||
|
@ -82,6 +84,7 @@ func TestDeleteBucketOnNotFoundError(t *testing.T) {
|
||||||
|
|
||||||
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
||||||
|
|
||||||
|
hc.owner = bktInfo.Owner
|
||||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,6 +102,7 @@ func TestForceDeleteBucket(t *testing.T) {
|
||||||
addr.SetContainer(bktInfo.CID)
|
addr.SetContainer(bktInfo.CID)
|
||||||
addr.SetObject(nodeVersion.OID)
|
addr.SetObject(nodeVersion.OID)
|
||||||
|
|
||||||
|
hc.owner = bktInfo.Owner
|
||||||
deleteBucketForce(t, hc, bktName, http.StatusConflict, "false")
|
deleteBucketForce(t, hc, bktName, http.StatusConflict, "false")
|
||||||
deleteBucketForce(t, hc, bktName, http.StatusNoContent, "true")
|
deleteBucketForce(t, hc, bktName, http.StatusNoContent, "true")
|
||||||
}
|
}
|
||||||
|
@ -457,6 +461,17 @@ func TestDeleteObjectCheckMarkerReturn(t *testing.T) {
|
||||||
require.Equal(t, deleteMarkerVersion, deleteMarkerVersion2)
|
require.Equal(t, deleteMarkerVersion, deleteMarkerVersion2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDeleteBucketByNotOwner(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-name"
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
deleteBucket(t, hc, bktName, http.StatusForbidden)
|
||||||
|
|
||||||
|
hc.owner = bktInfo.Owner
|
||||||
|
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||||
|
}
|
||||||
|
|
||||||
func createBucketAndObject(tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
func createBucketAndObject(tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
||||||
bktInfo := createTestBucket(tc, bktName)
|
bktInfo := createTestBucket(tc, bktName)
|
||||||
|
|
||||||
|
@ -563,6 +578,18 @@ func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version stri
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func headObjectWithHeaders(hc *handlerContext, bktName, objName, version string, headers map[string]string) *httptest.ResponseRecorder {
|
||||||
|
query := make(url.Values)
|
||||||
|
query.Add(api.QueryVersionID, version)
|
||||||
|
|
||||||
|
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
||||||
|
for k, v := range headers {
|
||||||
|
r.Header.Set(k, v)
|
||||||
|
}
|
||||||
|
hc.Handler().HeadObjectHandler(w, r)
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
func headObjectBase(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
|
func headObjectBase(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
|
||||||
query := make(url.Values)
|
query := make(url.Values)
|
||||||
query.Add(api.QueryVersionID, version)
|
query.Add(api.QueryVersionID, version)
|
||||||
|
|
|
@ -48,6 +48,25 @@ func TestSimpleGetEncrypted(t *testing.T) {
|
||||||
require.Equal(t, content, string(response))
|
require.Equal(t, content, string(response))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMD5HeaderBadOrEmpty(t *testing.T) {
|
||||||
|
tc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName, objName := "bucket-for-sse-c", "object-to-encrypt"
|
||||||
|
createTestBucket(tc, bktName)
|
||||||
|
content := "content"
|
||||||
|
|
||||||
|
headers := map[string]string{
|
||||||
|
api.ContentMD5: "",
|
||||||
|
}
|
||||||
|
putEncryptedObjectWithHeadersErr(t, tc, bktName, objName, content, headers, errors.ErrInvalidDigest)
|
||||||
|
|
||||||
|
headers = map[string]string{
|
||||||
|
api.ContentMD5: "YWJjMTIzIT8kKiYoKSctPUB+",
|
||||||
|
}
|
||||||
|
|
||||||
|
putEncryptedObjectWithHeadersErr(t, tc, bktName, objName, content, headers, errors.ErrBadDigest)
|
||||||
|
}
|
||||||
|
|
||||||
func TestGetEncryptedRange(t *testing.T) {
|
func TestGetEncryptedRange(t *testing.T) {
|
||||||
tc := prepareHandlerContext(t)
|
tc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
@ -360,6 +379,15 @@ func putEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName, cont
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func putEncryptedObjectWithHeadersErr(t *testing.T, tc *handlerContext, bktName, objName, content string, headers map[string]string, code errors.ErrorCode) {
|
||||||
|
body := bytes.NewReader([]byte(content))
|
||||||
|
w, r := prepareTestPayloadRequest(tc, bktName, objName, body)
|
||||||
|
setHeaders(r, headers)
|
||||||
|
|
||||||
|
tc.Handler().PutObjectHandler(w, r)
|
||||||
|
assertS3Error(t, w, errors.GetAPIError(code))
|
||||||
|
}
|
||||||
|
|
||||||
func getEncryptedObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
func getEncryptedObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
||||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||||
setEncryptHeaders(r)
|
setEncryptHeaders(r)
|
||||||
|
@ -371,6 +399,15 @@ func getObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header
|
||||||
return getObjectBase(hc, w, r)
|
return getObjectBase(hc, w, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getObjectWithHeaders(hc *handlerContext, bktName, objName string, headers map[string]string) *httptest.ResponseRecorder {
|
||||||
|
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||||
|
for k, v := range headers {
|
||||||
|
r.Header.Set(k, v)
|
||||||
|
}
|
||||||
|
hc.Handler().GetObjectHandler(w, r)
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
func getObjectBase(hc *handlerContext, w *httptest.ResponseRecorder, r *http.Request) ([]byte, http.Header) {
|
func getObjectBase(hc *handlerContext, w *httptest.ResponseRecorder, r *http.Request) ([]byte, http.Header) {
|
||||||
hc.Handler().GetObjectHandler(w, r)
|
hc.Handler().GetObjectHandler(w, r)
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
|
|
@ -78,6 +78,27 @@ func addSSECHeaders(responseHeader http.Header, requestHeader http.Header) {
|
||||||
responseHeader.Set(api.AmzServerSideEncryptionCustomerKeyMD5, requestHeader.Get(api.AmzServerSideEncryptionCustomerKeyMD5))
|
responseHeader.Set(api.AmzServerSideEncryptionCustomerKeyMD5, requestHeader.Get(api.AmzServerSideEncryptionCustomerKeyMD5))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func writeNotModifiedHeaders(h http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int, isBucketUnversioned, md5Enabled bool) {
|
||||||
|
h.Set(api.ETag, data.Quote(extendedInfo.ObjectInfo.ETag(md5Enabled)))
|
||||||
|
h.Set(api.LastModified, extendedInfo.ObjectInfo.Created.UTC().Format(http.TimeFormat))
|
||||||
|
h.Set(api.AmzTaggingCount, strconv.Itoa(tagSetLength))
|
||||||
|
|
||||||
|
if !isBucketUnversioned {
|
||||||
|
h.Set(api.AmzVersionID, extendedInfo.Version())
|
||||||
|
}
|
||||||
|
|
||||||
|
if cacheControl := extendedInfo.ObjectInfo.Headers[api.CacheControl]; cacheControl != "" {
|
||||||
|
h.Set(api.CacheControl, cacheControl)
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, val := range extendedInfo.ObjectInfo.Headers {
|
||||||
|
if layer.IsSystemHeader(key) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h[api.MetadataPrefix+key] = []string{val}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int,
|
func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int,
|
||||||
isBucketUnversioned, md5Enabled bool) {
|
isBucketUnversioned, md5Enabled bool) {
|
||||||
info := extendedInfo.ObjectInfo
|
info := extendedInfo.ObjectInfo
|
||||||
|
@ -158,7 +179,28 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
info := extendedInfo.ObjectInfo
|
info := extendedInfo.ObjectInfo
|
||||||
|
|
||||||
|
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
t := &data.ObjectVersion{
|
||||||
|
BktInfo: bktInfo,
|
||||||
|
ObjectName: info.Name,
|
||||||
|
VersionID: info.VersionID(),
|
||||||
|
}
|
||||||
|
|
||||||
|
tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(ctx, t, extendedInfo.NodeVersion)
|
||||||
|
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
||||||
|
h.logAndSendError(ctx, w, "could not get object meta data", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
||||||
|
if errors.IsS3Error(err, errors.ErrNotModified) {
|
||||||
|
writeNotModifiedHeaders(w.Header(), extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.MD5Enabled())
|
||||||
|
}
|
||||||
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
|
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -185,18 +227,6 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
t := &data.ObjectVersion{
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
ObjectName: info.Name,
|
|
||||||
VersionID: info.VersionID(),
|
|
||||||
}
|
|
||||||
|
|
||||||
tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(ctx, t, extendedInfo.NodeVersion)
|
|
||||||
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
|
||||||
h.logAndSendError(ctx, w, "could not get object meta data", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if layer.IsAuthenticatedRequest(ctx) {
|
if layer.IsAuthenticatedRequest(ctx) {
|
||||||
overrideResponseHeaders(w.Header(), reqInfo.URL.Query())
|
overrideResponseHeaders(w.Header(), reqInfo.URL.Query())
|
||||||
}
|
}
|
||||||
|
@ -206,12 +236,6 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
getPayloadParams := &layer.GetObjectParams{
|
getPayloadParams := &layer.GetObjectParams{
|
||||||
ObjectInfo: info,
|
ObjectInfo: info,
|
||||||
Versioned: p.Versioned(),
|
Versioned: p.Versioned(),
|
||||||
|
|
|
@ -210,6 +210,27 @@ func TestGetObjectEnabledMD5(t *testing.T) {
|
||||||
require.Equal(t, data.Quote(objInfo.MD5Sum), headers.Get(api.ETag))
|
require.Equal(t, data.Quote(objInfo.MD5Sum), headers.Get(api.ETag))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetObjectNotModifiedHeaders(t *testing.T) {
|
||||||
|
hc := prepareHandlerContextWithMinCache(t)
|
||||||
|
bktName, objName, metadataHeader := "bucket", "obj", api.MetadataPrefix+"header"
|
||||||
|
createVersionedBucket(hc, bktName)
|
||||||
|
header := putObjectWithHeaders(hc, bktName, objName, map[string]string{api.CacheControl: "value", metadataHeader: "value"})
|
||||||
|
etag, versionID := header.Get(api.ETag), header.Get(api.AmzVersionID)
|
||||||
|
require.NotEmpty(t, etag)
|
||||||
|
require.NotEmpty(t, versionID)
|
||||||
|
|
||||||
|
putObjectTagging(t, hc, bktName, objName, map[string]string{"key": "value"})
|
||||||
|
|
||||||
|
w := getObjectWithHeaders(hc, bktName, objName, map[string]string{api.IfNoneMatch: etag})
|
||||||
|
require.Equal(t, http.StatusNotModified, w.Code)
|
||||||
|
require.Equal(t, "1", w.Header().Get(api.AmzTaggingCount))
|
||||||
|
require.Equal(t, etag, w.Header().Get(api.ETag))
|
||||||
|
require.NotEmpty(t, w.Header().Get(api.LastModified))
|
||||||
|
require.Equal(t, versionID, w.Header().Get(api.AmzVersionID))
|
||||||
|
require.Equal(t, "value", w.Header().Get(api.CacheControl))
|
||||||
|
require.Equal(t, []string{"value"}, w.Header()[metadataHeader])
|
||||||
|
}
|
||||||
|
|
||||||
func putObjectContent(hc *handlerContext, bktName, objName, content string) http.Header {
|
func putObjectContent(hc *handlerContext, bktName, objName, content string) http.Header {
|
||||||
body := bytes.NewReader([]byte(content))
|
body := bytes.NewReader([]byte(content))
|
||||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
||||||
|
|
|
@ -462,6 +462,7 @@ func prepareTestRequestWithQuery(hc *handlerContext, bktName, objName string, qu
|
||||||
r.URL.RawQuery = query.Encode()
|
r.URL.RawQuery = query.Encode()
|
||||||
|
|
||||||
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
||||||
|
reqInfo.User = hc.owner.String()
|
||||||
r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo))
|
r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo))
|
||||||
|
|
||||||
return w, r
|
return w, r
|
||||||
|
|
|
@ -66,8 +66,9 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||||
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
|
if err != nil {
|
||||||
|
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,6 +84,14 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
||||||
|
if errors.IsS3Error(err, errors.ErrNotModified) {
|
||||||
|
writeNotModifiedHeaders(w.Header(), extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.MD5Enabled())
|
||||||
|
}
|
||||||
|
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if len(info.ContentType) == 0 {
|
if len(info.ContentType) == 0 {
|
||||||
if info.ContentType = layer.MimeByFilePath(info.Name); len(info.ContentType) == 0 {
|
if info.ContentType = layer.MimeByFilePath(info.Name); len(info.ContentType) == 0 {
|
||||||
getParams := &layer.GetObjectParams{
|
getParams := &layer.GetObjectParams{
|
||||||
|
@ -113,12 +122,6 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.MD5Enabled())
|
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.MD5Enabled())
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,6 +99,27 @@ func TestHeadObject(t *testing.T) {
|
||||||
headObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey)
|
headObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHeadObjectNotModifiedHeaders(t *testing.T) {
|
||||||
|
hc := prepareHandlerContextWithMinCache(t)
|
||||||
|
bktName, objName, metadataHeader := "bucket", "obj", api.MetadataPrefix+"header"
|
||||||
|
createVersionedBucket(hc, bktName)
|
||||||
|
header := putObjectWithHeaders(hc, bktName, objName, map[string]string{api.CacheControl: "value", metadataHeader: "value"})
|
||||||
|
etag, versionID := header.Get(api.ETag), header.Get(api.AmzVersionID)
|
||||||
|
require.NotEmpty(t, etag)
|
||||||
|
require.NotEmpty(t, versionID)
|
||||||
|
|
||||||
|
putObjectTagging(t, hc, bktName, objName, map[string]string{"key": "value"})
|
||||||
|
|
||||||
|
w := headObjectWithHeaders(hc, bktName, objName, emptyVersion, map[string]string{api.IfNoneMatch: etag})
|
||||||
|
require.Equal(t, http.StatusNotModified, w.Code)
|
||||||
|
require.Equal(t, "1", w.Header().Get(api.AmzTaggingCount))
|
||||||
|
require.Equal(t, etag, w.Header().Get(api.ETag))
|
||||||
|
require.NotEmpty(t, w.Header().Get(api.LastModified))
|
||||||
|
require.Equal(t, versionID, w.Header().Get(api.AmzVersionID))
|
||||||
|
require.Equal(t, "value", w.Header().Get(api.CacheControl))
|
||||||
|
require.Equal(t, []string{"value"}, w.Header()[metadataHeader])
|
||||||
|
}
|
||||||
|
|
||||||
func TestIsAvailableToResolve(t *testing.T) {
|
func TestIsAvailableToResolve(t *testing.T) {
|
||||||
list := []string{"container", "s3"}
|
list := []string{"container", "s3"}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
|
"github.com/google/uuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -97,7 +98,7 @@ func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Reque
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkLifecycleConfiguration(ctx, cfg, &networkInfo); err != nil {
|
if err = checkLifecycleConfiguration(ctx, cfg, &networkInfo); err != nil {
|
||||||
h.logAndSendError(ctx, w, "invalid lifecycle configuration", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
|
h.logAndSendError(ctx, w, "invalid lifecycle configuration", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,58 +141,67 @@ func checkLifecycleConfiguration(ctx context.Context, cfg *data.LifecycleConfigu
|
||||||
now := layer.TimeNow(ctx)
|
now := layer.TimeNow(ctx)
|
||||||
|
|
||||||
if len(cfg.Rules) > maxRules {
|
if len(cfg.Rules) > maxRules {
|
||||||
return fmt.Errorf("number of rules cannot be greater than %d", maxRules)
|
return fmt.Errorf("%w: number of rules cannot be greater than %d", apierr.GetAPIError(apierr.ErrInvalidRequest), maxRules)
|
||||||
}
|
}
|
||||||
|
|
||||||
ids := make(map[string]struct{}, len(cfg.Rules))
|
ids := make(map[string]struct{}, len(cfg.Rules))
|
||||||
for i, rule := range cfg.Rules {
|
for i, rule := range cfg.Rules {
|
||||||
if _, ok := ids[rule.ID]; ok && rule.ID != "" {
|
if rule.ID == "" {
|
||||||
return fmt.Errorf("duplicate 'ID': %s", rule.ID)
|
id, err := uuid.NewRandom()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("generate uuid: %w", err)
|
||||||
|
}
|
||||||
|
cfg.Rules[i].ID = id.String()
|
||||||
|
rule.ID = id.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := ids[rule.ID]; ok {
|
||||||
|
return fmt.Errorf("%w: duplicate 'ID': %s", apierr.GetAPIError(apierr.ErrInvalidArgument), rule.ID)
|
||||||
}
|
}
|
||||||
ids[rule.ID] = struct{}{}
|
ids[rule.ID] = struct{}{}
|
||||||
|
|
||||||
if len(rule.ID) > maxRuleIDLen {
|
if len(rule.ID) > maxRuleIDLen {
|
||||||
return fmt.Errorf("'ID' value cannot be longer than %d characters", maxRuleIDLen)
|
return fmt.Errorf("%w: 'ID' value cannot be longer than %d characters", apierr.GetAPIError(apierr.ErrInvalidArgument), maxRuleIDLen)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule.Status != data.LifecycleStatusEnabled && rule.Status != data.LifecycleStatusDisabled {
|
if rule.Status != data.LifecycleStatusEnabled && rule.Status != data.LifecycleStatusDisabled {
|
||||||
return fmt.Errorf("invalid lifecycle status: %s", rule.Status)
|
return fmt.Errorf("%w: invalid lifecycle status: %s", apierr.GetAPIError(apierr.ErrMalformedXML), rule.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule.AbortIncompleteMultipartUpload == nil && rule.Expiration == nil && rule.NonCurrentVersionExpiration == nil {
|
if rule.AbortIncompleteMultipartUpload == nil && rule.Expiration == nil && rule.NonCurrentVersionExpiration == nil {
|
||||||
return fmt.Errorf("at least one action needs to be specified in a rule")
|
return fmt.Errorf("%w: at least one action needs to be specified in a rule", apierr.GetAPIError(apierr.ErrInvalidRequest))
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule.AbortIncompleteMultipartUpload != nil {
|
if rule.AbortIncompleteMultipartUpload != nil {
|
||||||
if rule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil &&
|
if rule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil &&
|
||||||
*rule.AbortIncompleteMultipartUpload.DaysAfterInitiation <= 0 {
|
*rule.AbortIncompleteMultipartUpload.DaysAfterInitiation <= 0 {
|
||||||
return fmt.Errorf("days after initiation must be a positive integer: %d", *rule.AbortIncompleteMultipartUpload.DaysAfterInitiation)
|
return fmt.Errorf("%w: days after initiation must be a positive integer", apierr.GetAPIError(apierr.ErrInvalidArgument))
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
|
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
|
||||||
return fmt.Errorf("abort incomplete multipart upload cannot be specified with tags")
|
return fmt.Errorf("%w: abort incomplete multipart upload cannot be specified with tags", apierr.GetAPIError(apierr.ErrInvalidRequest))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule.Expiration != nil {
|
if rule.Expiration != nil {
|
||||||
if rule.Expiration.ExpiredObjectDeleteMarker != nil {
|
if rule.Expiration.ExpiredObjectDeleteMarker != nil {
|
||||||
if rule.Expiration.Days != nil || rule.Expiration.Date != "" {
|
if rule.Expiration.Days != nil || rule.Expiration.Date != "" {
|
||||||
return fmt.Errorf("expired object delete marker cannot be specified with days or date")
|
return fmt.Errorf("%w: expired object delete marker cannot be specified with days or date", apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
|
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
|
||||||
return fmt.Errorf("expired object delete marker cannot be specified with tags")
|
return fmt.Errorf("%w: expired object delete marker cannot be specified with tags", apierr.GetAPIError(apierr.ErrInvalidRequest))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule.Expiration.Days != nil && *rule.Expiration.Days <= 0 {
|
if rule.Expiration.Days != nil && *rule.Expiration.Days <= 0 {
|
||||||
return fmt.Errorf("expiration days must be a positive integer: %d", *rule.Expiration.Days)
|
return fmt.Errorf("%w: expiration days must be a positive integer", apierr.GetAPIError(apierr.ErrInvalidArgument))
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule.Expiration.Date != "" {
|
if rule.Expiration.Date != "" {
|
||||||
parsedTime, err := time.Parse("2006-01-02T15:04:05Z", rule.Expiration.Date)
|
parsedTime, err := time.Parse("2006-01-02T15:04:05Z", rule.Expiration.Date)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid value of expiration date: %s", rule.Expiration.Date)
|
return fmt.Errorf("%w: invalid value of expiration date: %s", apierr.GetAPIError(apierr.ErrInvalidArgument), rule.Expiration.Date)
|
||||||
}
|
}
|
||||||
|
|
||||||
epoch, err := util.TimeToEpoch(ni, now, parsedTime)
|
epoch, err := util.TimeToEpoch(ni, now, parsedTime)
|
||||||
|
@ -204,20 +214,29 @@ func checkLifecycleConfiguration(ctx context.Context, cfg *data.LifecycleConfigu
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule.NonCurrentVersionExpiration != nil {
|
if rule.NonCurrentVersionExpiration != nil {
|
||||||
|
if rule.NonCurrentVersionExpiration.NewerNonCurrentVersions != nil && rule.NonCurrentVersionExpiration.NonCurrentDays == nil {
|
||||||
|
return fmt.Errorf("%w: newer noncurrent versions cannot be specified without noncurrent days", apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||||
|
}
|
||||||
|
|
||||||
if rule.NonCurrentVersionExpiration.NewerNonCurrentVersions != nil &&
|
if rule.NonCurrentVersionExpiration.NewerNonCurrentVersions != nil &&
|
||||||
(*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions > maxNewerNoncurrentVersions ||
|
(*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions > maxNewerNoncurrentVersions ||
|
||||||
*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions <= 0) {
|
*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions <= 0) {
|
||||||
return fmt.Errorf("invalid value of newer noncurrent versions: %d", *rule.NonCurrentVersionExpiration.NewerNonCurrentVersions)
|
return fmt.Errorf("%w: newer noncurrent versions must be a positive integer up to %d", apierr.GetAPIError(apierr.ErrInvalidArgument),
|
||||||
|
maxNewerNoncurrentVersions)
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule.NonCurrentVersionExpiration.NonCurrentDays != nil && *rule.NonCurrentVersionExpiration.NonCurrentDays <= 0 {
|
if rule.NonCurrentVersionExpiration.NonCurrentDays != nil && *rule.NonCurrentVersionExpiration.NonCurrentDays <= 0 {
|
||||||
return fmt.Errorf("invalid value of noncurrent days: %d", *rule.NonCurrentVersionExpiration.NonCurrentDays)
|
return fmt.Errorf("%w: noncurrent days must be a positive integer", apierr.GetAPIError(apierr.ErrInvalidArgument))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := checkLifecycleRuleFilter(rule.Filter); err != nil {
|
if err := checkLifecycleRuleFilter(rule.Filter); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if rule.Filter != nil && rule.Filter.Prefix != "" && rule.Prefix != "" {
|
||||||
|
return fmt.Errorf("%w: rule cannot have two prefixes", apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -239,9 +258,14 @@ func checkLifecycleRuleFilter(filter *data.LifecycleRuleFilter) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if filter.And.ObjectSizeGreaterThan != nil && filter.And.ObjectSizeLessThan != nil &&
|
if filter.And.ObjectSizeLessThan != nil {
|
||||||
*filter.And.ObjectSizeLessThan <= *filter.And.ObjectSizeGreaterThan {
|
if *filter.And.ObjectSizeLessThan == 0 {
|
||||||
return fmt.Errorf("the maximum object size must be larger than the minimum object size")
|
return fmt.Errorf("%w: the maximum object size must be more than 0", apierr.GetAPIError(apierr.ErrInvalidRequest))
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.And.ObjectSizeGreaterThan != nil && *filter.And.ObjectSizeLessThan <= *filter.And.ObjectSizeGreaterThan {
|
||||||
|
return fmt.Errorf("%w: the maximum object size must be larger than the minimum object size", apierr.GetAPIError(apierr.ErrInvalidRequest))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -250,6 +274,9 @@ func checkLifecycleRuleFilter(filter *data.LifecycleRuleFilter) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if filter.ObjectSizeLessThan != nil {
|
if filter.ObjectSizeLessThan != nil {
|
||||||
|
if *filter.ObjectSizeLessThan == 0 {
|
||||||
|
return fmt.Errorf("%w: the maximum object size must be more than 0", apierr.GetAPIError(apierr.ErrInvalidRequest))
|
||||||
|
}
|
||||||
fields++
|
fields++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -266,7 +293,7 @@ func checkLifecycleRuleFilter(filter *data.LifecycleRuleFilter) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if fields > 1 {
|
if fields > 1 {
|
||||||
return fmt.Errorf("filter cannot have more than one field")
|
return fmt.Errorf("%w: filter cannot have more than one field", apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -29,17 +29,14 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
name string
|
name string
|
||||||
body *data.LifecycleConfiguration
|
body *data.LifecycleConfiguration
|
||||||
error bool
|
errorCode apierr.ErrorCode
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "correct configuration",
|
name: "correct configuration",
|
||||||
body: &data.LifecycleConfiguration{
|
body: &data.LifecycleConfiguration{
|
||||||
XMLName: xml.Name{
|
|
||||||
Space: `http://s3.amazonaws.com/doc/2006-03-01/`,
|
|
||||||
Local: "LifecycleConfiguration",
|
|
||||||
},
|
|
||||||
Rules: []data.LifecycleRule{
|
Rules: []data.LifecycleRule{
|
||||||
{
|
{
|
||||||
|
ID: "rule-1",
|
||||||
Status: data.LifecycleStatusEnabled,
|
Status: data.LifecycleStatusEnabled,
|
||||||
Expiration: &data.LifecycleExpiration{
|
Expiration: &data.LifecycleExpiration{
|
||||||
Days: ptr(21),
|
Days: ptr(21),
|
||||||
|
@ -54,6 +51,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
ID: "rule-2",
|
||||||
Status: data.LifecycleStatusEnabled,
|
Status: data.LifecycleStatusEnabled,
|
||||||
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
||||||
DaysAfterInitiation: ptr(14),
|
DaysAfterInitiation: ptr(14),
|
||||||
|
@ -83,7 +81,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
}
|
}
|
||||||
return lifecycle
|
return lifecycle
|
||||||
}(),
|
}(),
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "duplicate rule ID",
|
name: "duplicate rule ID",
|
||||||
|
@ -105,7 +103,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "too long rule ID",
|
name: "too long rule ID",
|
||||||
|
@ -121,7 +119,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}(),
|
}(),
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid status",
|
name: "invalid status",
|
||||||
|
@ -132,7 +130,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrMalformedXML,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "no actions",
|
name: "no actions",
|
||||||
|
@ -146,7 +144,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid days after initiation",
|
name: "invalid days after initiation",
|
||||||
|
@ -160,7 +158,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid expired object delete marker declaration",
|
name: "invalid expired object delete marker declaration",
|
||||||
|
@ -175,7 +173,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrMalformedXML,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid expiration days",
|
name: "invalid expiration days",
|
||||||
|
@ -189,7 +187,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid expiration date",
|
name: "invalid expiration date",
|
||||||
|
@ -203,7 +201,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "newer noncurrent versions is too small",
|
name: "newer noncurrent versions is too small",
|
||||||
|
@ -212,12 +210,13 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
{
|
{
|
||||||
Status: data.LifecycleStatusEnabled,
|
Status: data.LifecycleStatusEnabled,
|
||||||
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||||
|
NonCurrentDays: ptr(1),
|
||||||
NewerNonCurrentVersions: ptr(0),
|
NewerNonCurrentVersions: ptr(0),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "newer noncurrent versions is too large",
|
name: "newer noncurrent versions is too large",
|
||||||
|
@ -226,12 +225,13 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
{
|
{
|
||||||
Status: data.LifecycleStatusEnabled,
|
Status: data.LifecycleStatusEnabled,
|
||||||
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||||
|
NonCurrentDays: ptr(1),
|
||||||
NewerNonCurrentVersions: ptr(101),
|
NewerNonCurrentVersions: ptr(101),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid noncurrent days",
|
name: "invalid noncurrent days",
|
||||||
|
@ -245,7 +245,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidArgument,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "more than one filter field",
|
name: "more than one filter field",
|
||||||
|
@ -263,7 +263,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrMalformedXML,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid tag in filter",
|
name: "invalid tag in filter",
|
||||||
|
@ -280,7 +280,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidTagKey,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "abort incomplete multipart upload with tag",
|
name: "abort incomplete multipart upload with tag",
|
||||||
|
@ -297,7 +297,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "expired object delete marker with tag",
|
name: "expired object delete marker with tag",
|
||||||
|
@ -316,7 +316,7 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid size range",
|
name: "invalid size range",
|
||||||
|
@ -336,19 +336,88 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
error: true,
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "two prefixes",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
Prefix: "prefix-1/",
|
||||||
|
},
|
||||||
|
Prefix: "prefix-2/",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrMalformedXML,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "newer noncurrent versions without noncurrent days",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||||
|
NewerNonCurrentVersions: ptr(10),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrMalformedXML,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid maximum object size in filter",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
ObjectSizeLessThan: ptr(uint64(0)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid maximum object size in filter and",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
And: &data.LifecycleRuleAndOperator{
|
||||||
|
Prefix: "prefix/",
|
||||||
|
ObjectSizeLessThan: ptr(uint64(0)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
errorCode: apierr.ErrInvalidRequest,
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
if tc.error {
|
if tc.errorCode > 0 {
|
||||||
putBucketLifecycleConfigurationErr(hc, bktName, tc.body, apierr.GetAPIError(apierr.ErrMalformedXML))
|
putBucketLifecycleConfigurationErr(hc, bktName, tc.body, apierr.GetAPIError(tc.errorCode))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
putBucketLifecycleConfiguration(hc, bktName, tc.body)
|
putBucketLifecycleConfiguration(hc, bktName, tc.body)
|
||||||
|
|
||||||
cfg := getBucketLifecycleConfiguration(hc, bktName)
|
cfg := getBucketLifecycleConfiguration(hc, bktName)
|
||||||
require.Equal(t, *tc.body, *cfg)
|
require.Equal(t, tc.body.Rules, cfg.Rules)
|
||||||
|
|
||||||
deleteBucketLifecycleConfiguration(hc, bktName)
|
deleteBucketLifecycleConfiguration(hc, bktName)
|
||||||
getBucketLifecycleConfigurationErr(hc, bktName, apierr.GetAPIError(apierr.ErrNoSuchLifecycleConfiguration))
|
getBucketLifecycleConfigurationErr(hc, bktName, apierr.GetAPIError(apierr.ErrNoSuchLifecycleConfiguration))
|
||||||
|
@ -356,6 +425,36 @@ func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPutBucketLifecycleIDGeneration(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-lifecycle-id"
|
||||||
|
createBucket(hc, bktName)
|
||||||
|
|
||||||
|
lifecycle := &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
||||||
|
DaysAfterInitiation: ptr(14),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
putBucketLifecycleConfiguration(hc, bktName, lifecycle)
|
||||||
|
cfg := getBucketLifecycleConfiguration(hc, bktName)
|
||||||
|
require.Len(t, cfg.Rules, 2)
|
||||||
|
require.NotEmpty(t, cfg.Rules[0].ID)
|
||||||
|
require.NotEmpty(t, cfg.Rules[1].ID)
|
||||||
|
}
|
||||||
|
|
||||||
func TestPutBucketLifecycleInvalidMD5(t *testing.T) {
|
func TestPutBucketLifecycleInvalidMD5(t *testing.T) {
|
||||||
hc := prepareHandlerContext(t)
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
|
|
@ -681,6 +681,80 @@ func TestS3BucketListDelimiterNotSkipSpecial(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestS3BucketListMarkerUnreadable(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-for-listing"
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
|
||||||
|
objects := []string{"bar", "baz", "foo", "quxx"}
|
||||||
|
for _, objName := range objects {
|
||||||
|
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||||
|
}
|
||||||
|
|
||||||
|
list := listObjectsV1(hc, bktName, "", "", "\x0a", -1)
|
||||||
|
|
||||||
|
require.Equal(t, "\x0a", list.Marker)
|
||||||
|
require.False(t, list.IsTruncated)
|
||||||
|
|
||||||
|
require.Len(t, list.Contents, len(objects))
|
||||||
|
for i := 0; i < len(list.Contents); i++ {
|
||||||
|
require.Equal(t, objects[i], list.Contents[i].Key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestS3BucketListMarkerNotInList(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-for-listing"
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
|
||||||
|
objects := []string{"bar", "baz", "foo", "quxx"}
|
||||||
|
for _, objName := range objects {
|
||||||
|
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||||
|
}
|
||||||
|
|
||||||
|
list := listObjectsV1(hc, bktName, "", "", "blah", -1)
|
||||||
|
|
||||||
|
require.Equal(t, "blah", list.Marker)
|
||||||
|
|
||||||
|
expected := []string{"foo", "quxx"}
|
||||||
|
require.Len(t, list.Contents, len(expected))
|
||||||
|
for i := 0; i < len(list.Contents); i++ {
|
||||||
|
require.Equal(t, expected[i], list.Contents[i].Key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListTruncatedCacheHit(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-for-listing"
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
|
||||||
|
objects := []string{"bar", "baz", "foo", "quxx"}
|
||||||
|
for _, objName := range objects {
|
||||||
|
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||||
|
}
|
||||||
|
|
||||||
|
list := listObjectsV1(hc, bktName, "", "", "", 2)
|
||||||
|
require.True(t, list.IsTruncated)
|
||||||
|
|
||||||
|
require.Len(t, list.Contents, 2)
|
||||||
|
for i := 0; i < len(list.Contents); i++ {
|
||||||
|
require.Equal(t, objects[i], list.Contents[i].Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheKey := cache.CreateListSessionCacheKey(bktInfo.CID, "", list.NextMarker)
|
||||||
|
list = listObjectsV1(hc, bktName, "", "", list.NextMarker, 2)
|
||||||
|
require.Nil(t, hc.cache.GetListSession(hc.owner, cacheKey))
|
||||||
|
require.False(t, list.IsTruncated)
|
||||||
|
|
||||||
|
require.Len(t, list.Contents, 2)
|
||||||
|
for i := 0; i < len(list.Contents); i++ {
|
||||||
|
require.Equal(t, objects[i+2], list.Contents[i].Key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestMintVersioningListObjectVersionsVersionIDContinuation(t *testing.T) {
|
func TestMintVersioningListObjectVersionsVersionIDContinuation(t *testing.T) {
|
||||||
hc := prepareHandlerContext(t)
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
|
|
@ -251,7 +251,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
Reader: body,
|
Reader: body,
|
||||||
Header: metadata,
|
Header: metadata,
|
||||||
Encryption: encryptionParams,
|
Encryption: encryptionParams,
|
||||||
ContentMD5: r.Header.Get(api.ContentMD5),
|
ContentMD5: getMD5Header(r),
|
||||||
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
|
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1038,3 +1038,13 @@ func (h *handler) parseLocationConstraint(r *http.Request) (*createBucketParams,
|
||||||
}
|
}
|
||||||
return params, nil
|
return params, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getMD5Header(r *http.Request) *string {
|
||||||
|
var md5Hdr *string
|
||||||
|
if len(r.Header.Values(api.ContentMD5)) != 0 {
|
||||||
|
hdr := r.Header.Get(api.ContentMD5)
|
||||||
|
md5Hdr = &hdr
|
||||||
|
}
|
||||||
|
|
||||||
|
return md5Hdr
|
||||||
|
}
|
||||||
|
|
|
@ -284,6 +284,12 @@ func TestPutObjectWithInvalidContentMD5(t *testing.T) {
|
||||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||||
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString([]byte("invalid")))
|
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString([]byte("invalid")))
|
||||||
tc.Handler().PutObjectHandler(w, r)
|
tc.Handler().PutObjectHandler(w, r)
|
||||||
|
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrBadDigest))
|
||||||
|
|
||||||
|
content = []byte("content")
|
||||||
|
w, r = prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||||
|
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString([]byte("")))
|
||||||
|
tc.Handler().PutObjectHandler(w, r)
|
||||||
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||||
|
|
||||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
||||||
|
@ -498,9 +504,6 @@ func getEmptyChunkedRequest(ctx context.Context, t *testing.T, bktName, objName
|
||||||
AWSAccessKeyID := "48c1K4PLVb7SvmV3PjDKEuXaMh8yZMXZ8Wx9msrkKcYw06dZeaxeiPe8vyFm2WsoeVaNt7UWEjNsVkagDs8oX4XXh"
|
AWSAccessKeyID := "48c1K4PLVb7SvmV3PjDKEuXaMh8yZMXZ8Wx9msrkKcYw06dZeaxeiPe8vyFm2WsoeVaNt7UWEjNsVkagDs8oX4XXh"
|
||||||
AWSSecretAccessKey := "09260955b4eb0279dc017ba20a1ddac909cbd226c86cbb2d868e55534c8e64b0"
|
AWSSecretAccessKey := "09260955b4eb0279dc017ba20a1ddac909cbd226c86cbb2d868e55534c8e64b0"
|
||||||
|
|
||||||
//awsCreds := credentials.NewStaticCredentials(AWSAccessKeyID, AWSSecretAccessKey, "")
|
|
||||||
//signer := v4.NewSigner(awsCreds)
|
|
||||||
|
|
||||||
reqBody := bytes.NewBufferString("0;chunk-signature=311a7142c8f3a07972c3aca65c36484b513a8fee48ab7178c7225388f2ae9894\r\n\r\n")
|
reqBody := bytes.NewBufferString("0;chunk-signature=311a7142c8f3a07972c3aca65c36484b513a8fee48ab7178c7225388f2ae9894\r\n\r\n")
|
||||||
|
|
||||||
req, err := http.NewRequest("PUT", "http://localhost:8084/"+bktName+"/"+objName, reqBody)
|
req, err := http.NewRequest("PUT", "http://localhost:8084/"+bktName+"/"+objName, reqBody)
|
||||||
|
|
|
@ -111,7 +111,7 @@ type (
|
||||||
Encryption encryption.Params
|
Encryption encryption.Params
|
||||||
CopiesNumbers []uint32
|
CopiesNumbers []uint32
|
||||||
CompleteMD5Hash string
|
CompleteMD5Hash string
|
||||||
ContentMD5 string
|
ContentMD5 *string
|
||||||
ContentSHA256Hash string
|
ContentSHA256Hash string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -585,7 +585,7 @@ func shouldSkip(node *data.ExtendedNodeVersion, p commonVersionsListingParams, e
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.Bookmark != "" {
|
if p.Bookmark != "" && p.Bookmark != p.Marker {
|
||||||
if _, ok := existed[continuationToken]; !ok {
|
if _, ok := existed[continuationToken]; !ok {
|
||||||
if p.Bookmark != node.NodeVersion.OID.EncodeToString() {
|
if p.Bookmark != node.NodeVersion.OID.EncodeToString() {
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -286,8 +286,11 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !p.Encryption.Enabled() && len(p.ContentMD5) > 0 {
|
if !p.Encryption.Enabled() && p.ContentMD5 != nil {
|
||||||
headerMd5Hash, err := base64.StdEncoding.DecodeString(p.ContentMD5)
|
if len(*p.ContentMD5) == 0 {
|
||||||
|
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest)
|
||||||
|
}
|
||||||
|
headerMd5Hash, err := base64.StdEncoding.DecodeString(*p.ContentMD5)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest)
|
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest)
|
||||||
}
|
}
|
||||||
|
@ -296,7 +299,7 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||||
}
|
}
|
||||||
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest)
|
return nil, apierr.GetAPIError(apierr.ErrBadDigest)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -349,6 +349,25 @@ $ frostfs-s3-authmate generate-presigned-url --endpoint http://localhost:8084 \
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Upload file with presigned URL
|
||||||
|
|
||||||
|
1. Generate presigned URL to upload object `obj` to bucket `presigned`
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ frostfs-s3-authmate generate-presigned-url --endpoint http://localhost:8084 \
|
||||||
|
--method put --bucket presigned --object obj --lifetime 30s
|
||||||
|
|
||||||
|
{
|
||||||
|
"URL": "http://localhost:8084/presigned/obj?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=CYfbvKwSC9VNvttj5snyEZ5Ttr2VaBabpw7mRuEzNXyw09ewUERj6MGDKfyckfg5VZ39GfXbwLwz62UPVeRxhJDet%2F20241029%2Fdefault%2Fs3%2Faws4_request&X-Amz-Date=20241029T145726Z&X-Amz-Expires=30&X-Amz-SignedHeaders=host&X-Amz-Signature=2bb13b3e6448968219ad95147debe49e37bce5ce3ed1344c4015f43cb444a956"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Upload file using `curl`
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl --upload-file /path/to/file 'http://localhost:8084/presigned/obj?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=CYfbvKwSC9VNvttj5snyEZ5Ttr2VaBabpw7mRuEzNXyw09ewUERj6MGDKfyckfg5VZ39GfXbwLwz62UPVeRxhJDet%2F20241029%2Fdefault%2Fs3%2Faws4_request&X-Amz-Date=20241029T145726Z&X-Amz-Expires=30&X-Amz-SignedHeaders=host&X-Amz-Signature=2bb13b3e6448968219ad95147debe49e37bce5ce3ed1344c4015f43cb444a956'
|
||||||
|
```
|
||||||
|
|
||||||
### AWS CLI
|
### AWS CLI
|
||||||
|
|
||||||
You can also can get the presigned URL (only for GET) using aws cli v2:
|
You can also can get the presigned URL (only for GET) using aws cli v2:
|
||||||
|
|
Loading…
Add table
Reference in a new issue