[#595] Add SSE with customer key
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
This commit is contained in:
parent
c0de8f41fc
commit
87c05eb514
22 changed files with 1248 additions and 70 deletions
|
@ -36,6 +36,7 @@ type (
|
|||
CID cid.ID
|
||||
IsDir bool
|
||||
IsDeleteMarker bool
|
||||
EncryptionInfo EncryptionInfo
|
||||
|
||||
Bucket string
|
||||
Name string
|
||||
|
@ -47,6 +48,14 @@ type (
|
|||
Headers map[string]string
|
||||
}
|
||||
|
||||
// EncryptionInfo store parsed object encryption headers.
|
||||
EncryptionInfo struct {
|
||||
Enabled bool
|
||||
Algorithm string
|
||||
HMACKey string
|
||||
HMACSalt string
|
||||
}
|
||||
|
||||
// NotificationInfo store info to send s3 notification.
|
||||
NotificationInfo struct {
|
||||
Name string
|
||||
|
@ -113,6 +122,11 @@ func (o *ObjectInfo) Address() oid.Address {
|
|||
return addr
|
||||
}
|
||||
|
||||
// IsEncrypted returns true if object is encrypted.
|
||||
func (o ObjectInfo) IsEncrypted() bool {
|
||||
return o.EncryptionInfo.Enabled
|
||||
}
|
||||
|
||||
func (b BucketSettings) Unversioned() bool {
|
||||
return b.Versioning == VersioningUnversioned
|
||||
}
|
||||
|
|
|
@ -141,6 +141,7 @@ const (
|
|||
ErrSSEEncryptedObject
|
||||
ErrInvalidEncryptionParameters
|
||||
ErrInvalidSSECustomerAlgorithm
|
||||
ErrInvalidEncryptionAlgorithm
|
||||
ErrInvalidSSECustomerKey
|
||||
ErrMissingSSECustomerKey
|
||||
ErrMissingSSECustomerKeyMD5
|
||||
|
@ -1011,6 +1012,12 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Requests specifying Server Side Encryption with Customer provided keys must provide a valid encryption algorithm.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidEncryptionAlgorithm: {
|
||||
ErrCode: ErrInvalidEncryptionAlgorithm,
|
||||
Code: "InvalidArgument",
|
||||
Description: "The encryption request that you specified is not valid. The valid value is AES256.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidSSECustomerKey: {
|
||||
ErrCode: ErrInvalidSSECustomerKey,
|
||||
Code: "InvalidArgument",
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -93,6 +94,17 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
|||
}
|
||||
info := extendedInfo.ObjectInfo
|
||||
|
||||
encryption, err := formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = encryption.MatchObjectEncryption(info.EncryptionInfo); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if err = checkPreconditions(info, params.Conditional); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
||||
return
|
||||
|
|
|
@ -96,6 +96,17 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
encryption, err := formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = encryption.MatchObjectEncryption(objInfo.EncryptionInfo); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if err = checkPreconditions(objInfo, args.Conditional); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
|
||||
return
|
||||
|
@ -117,6 +128,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
DstObject: reqInfo.ObjectName,
|
||||
SrcSize: objInfo.Size,
|
||||
Header: metadata,
|
||||
Encryption: encryption,
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), dstBktInfo)
|
||||
|
@ -173,6 +185,10 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
if encryption.Enabled() {
|
||||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
}
|
||||
|
||||
func parseCopyObjectArgs(headers http.Header) (*copyObjectArgs, error) {
|
||||
|
|
|
@ -244,9 +244,7 @@ func TestDeleteObjectCheckMarkerReturn(t *testing.T) {
|
|||
}
|
||||
|
||||
func createBucketAndObject(t *testing.T, tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
||||
createTestBucket(tc.Context(), t, tc, bktName)
|
||||
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
|
||||
require.NoError(t, err)
|
||||
bktInfo := createTestBucket(tc.Context(), t, tc, bktName)
|
||||
|
||||
objInfo := createTestObject(tc.Context(), t, tc, bktInfo, objName)
|
||||
|
||||
|
|
311
api/handler/encryption_test.go
Normal file
311
api/handler/encryption_test.go
Normal file
|
@ -0,0 +1,311 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
aes256Key = "MTIzNDU2Nzg5MHF3ZXJ0eXVpb3Bhc2RmZ2hqa2x6eGM="
|
||||
aes256KeyMD5 = "NtkH/y2maPit+yUkhq4Q7A=="
|
||||
partNumberQuery = "partNumber"
|
||||
uploadIDQuery = "uploadId"
|
||||
)
|
||||
|
||||
func TestSimpleGetEncrypted(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-sse-c", "object-to-encrypt"
|
||||
bktInfo := createTestBucket(tc.Context(), t, tc, bktName)
|
||||
|
||||
content := "content"
|
||||
putEncryptedObject(t, tc, bktName, objName, content)
|
||||
|
||||
objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: objName})
|
||||
require.NoError(t, err)
|
||||
obj, err := tc.MockedPool().ReadObject(tc.Context(), layer.PrmObjectRead{Container: bktInfo.CID, Object: objInfo.ID})
|
||||
require.NoError(t, err)
|
||||
encryptedContent, err := io.ReadAll(obj.Payload)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, content, string(encryptedContent))
|
||||
|
||||
response, _ := getEncryptedObject(t, tc, bktName, objName)
|
||||
require.Equal(t, content, string(response))
|
||||
}
|
||||
|
||||
func TestGetEncryptedRange(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-sse-c", "object-to-encrypt"
|
||||
createTestBucket(tc.Context(), t, tc, bktName)
|
||||
|
||||
var sb strings.Builder
|
||||
for i := 0; i < 1<<16+11; i++ {
|
||||
switch i {
|
||||
case 0:
|
||||
sb.Write([]byte("b"))
|
||||
case 1<<16 - 2:
|
||||
sb.Write([]byte("c"))
|
||||
case 1<<16 - 1:
|
||||
sb.Write([]byte("d"))
|
||||
case 1 << 16:
|
||||
sb.Write([]byte("e"))
|
||||
case 1<<16 + 1:
|
||||
sb.Write([]byte("f"))
|
||||
case 1<<16 + 10:
|
||||
sb.Write([]byte("g"))
|
||||
default:
|
||||
sb.Write([]byte("a"))
|
||||
}
|
||||
}
|
||||
|
||||
content := sb.String()
|
||||
putEncryptedObject(t, tc, bktName, objName, content)
|
||||
|
||||
full := getEncryptedObjectRange(t, tc, bktName, objName, 0, sb.Len()-1)
|
||||
require.Equalf(t, content, string(full), "expected len: %d, actual len: %d", len(content), len(full))
|
||||
|
||||
beginning := getEncryptedObjectRange(t, tc, bktName, objName, 0, 3)
|
||||
require.Equal(t, content[:4], string(beginning))
|
||||
|
||||
middle := getEncryptedObjectRange(t, tc, bktName, objName, 1<<16-3, 1<<16+2)
|
||||
require.Equal(t, "acdefa", string(middle))
|
||||
|
||||
end := getEncryptedObjectRange(t, tc, bktName, objName, 1<<16+2, len(content)-1)
|
||||
require.Equal(t, "aaaaaaaag", string(end))
|
||||
}
|
||||
|
||||
func TestS3EncryptionSSECMultipartUpload(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
bktName, objName := "bucket-for-sse-c-multipart-s3-tests", "multipart_enc"
|
||||
createTestBucket(tc.Context(), t, tc, bktName)
|
||||
|
||||
objLen := 30 * 1024 * 1024
|
||||
partSize := objLen / 6
|
||||
headerMetaKey := api.MetadataPrefix + "foo"
|
||||
headers := map[string]string{
|
||||
headerMetaKey: "bar",
|
||||
api.ContentType: "text/plain",
|
||||
}
|
||||
|
||||
data := multipartUploadEncrypted(t, tc, bktName, objName, headers, objLen, partSize)
|
||||
require.Equal(t, objLen, len(data))
|
||||
|
||||
resData, resHeader := getEncryptedObject(t, tc, bktName, objName)
|
||||
equalDataSlices(t, data, resData)
|
||||
require.Equal(t, headers[api.ContentType], resHeader.Get(api.ContentType))
|
||||
require.Equal(t, headers[headerMetaKey], resHeader[headerMetaKey][0])
|
||||
require.Equal(t, strconv.Itoa(objLen), resHeader.Get(api.ContentLength))
|
||||
|
||||
checkContentUsingRangeEnc(t, tc, bktName, objName, data, 1000000)
|
||||
checkContentUsingRangeEnc(t, tc, bktName, objName, data, 10000000)
|
||||
}
|
||||
|
||||
func equalDataSlices(t *testing.T, expected, actual []byte) {
|
||||
require.Equal(t, len(expected), len(actual), "sizes don't match")
|
||||
|
||||
if bytes.Equal(expected, actual) {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < len(expected); i++ {
|
||||
if expected[i] != actual[i] {
|
||||
require.Equalf(t, expected[i], actual[i], "differ start with '%d' position, length: %d", i, len(expected))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkContentUsingRangeEnc(t *testing.T, tc *handlerContext, bktName, objName string, data []byte, step int) {
|
||||
var off, toRead, end int
|
||||
|
||||
for off < len(data) {
|
||||
toRead = len(data) - off
|
||||
if toRead > step {
|
||||
toRead = step
|
||||
}
|
||||
end = off + toRead - 1
|
||||
|
||||
rangeData := getEncryptedObjectRange(t, tc, bktName, objName, off, end)
|
||||
equalDataSlices(t, data[off:end+1], rangeData)
|
||||
|
||||
off += step
|
||||
}
|
||||
}
|
||||
|
||||
func multipartUploadEncrypted(t *testing.T, tc *handlerContext, bktName, objName string, headers map[string]string, objLen, partsSize int) (objData []byte) {
|
||||
multipartInfo := createMultipartUpload(t, tc, bktName, objName, headers)
|
||||
|
||||
var sum, currentPart int
|
||||
var etags []string
|
||||
adjustedSize := partsSize
|
||||
|
||||
for sum < objLen {
|
||||
currentPart++
|
||||
|
||||
sum += partsSize
|
||||
if sum > objLen {
|
||||
adjustedSize = objLen - sum
|
||||
}
|
||||
|
||||
etag, data := uploadPart(t, tc, bktName, objName, multipartInfo.UploadID, currentPart, adjustedSize)
|
||||
etags = append(etags, etag)
|
||||
objData = append(objData, data...)
|
||||
}
|
||||
|
||||
completeMultipartUpload(t, tc, bktName, objName, multipartInfo.UploadID, etags)
|
||||
return
|
||||
}
|
||||
|
||||
func createMultipartUpload(t *testing.T, tc *handlerContext, bktName, objName string, headers map[string]string) *InitiateMultipartUploadResponse {
|
||||
w, r := prepareTestRequest(t, bktName, objName, nil)
|
||||
setEncryptHeaders(r)
|
||||
setHeaders(r, headers)
|
||||
tc.Handler().CreateMultipartUploadHandler(w, r)
|
||||
multipartInitInfo := &InitiateMultipartUploadResponse{}
|
||||
readResponse(t, w, http.StatusOK, multipartInitInfo)
|
||||
|
||||
return multipartInitInfo
|
||||
}
|
||||
func completeMultipartUpload(t *testing.T, tc *handlerContext, bktName, objName, uploadID string, partsETags []string) {
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, uploadID)
|
||||
complete := &CompleteMultipartUpload{
|
||||
Parts: []*layer.CompletedPart{},
|
||||
}
|
||||
for i, tag := range partsETags {
|
||||
complete.Parts = append(complete.Parts, &layer.CompletedPart{
|
||||
ETag: tag,
|
||||
PartNumber: i + 1,
|
||||
})
|
||||
}
|
||||
|
||||
w, r := prepareTestFullRequest(t, bktName, objName, query, complete)
|
||||
tc.Handler().CompleteMultipartUploadHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func uploadPart(t *testing.T, tc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
|
||||
partBody := make([]byte, size)
|
||||
_, err := rand.Read(partBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, uploadID)
|
||||
query.Set(partNumberQuery, strconv.Itoa(num))
|
||||
|
||||
w, r := prepareTestRequestWithQuery(bktName, objName, query, partBody)
|
||||
setEncryptHeaders(r)
|
||||
tc.Handler().UploadPartHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
return w.Header().Get(api.ETag), partBody
|
||||
}
|
||||
|
||||
func TestMultipartEncrypted(t *testing.T) {
|
||||
partSize := 5*1048576 + 1<<16 - 5 // 5MB (min part size) + 64kb (cipher block size) - 5 (to check corner range)
|
||||
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-sse-c-multipart", "object-to-encrypt-multipart"
|
||||
createTestBucket(tc.Context(), t, tc, bktName)
|
||||
|
||||
w, r := prepareTestRequest(t, bktName, objName, nil)
|
||||
setEncryptHeaders(r)
|
||||
tc.Handler().CreateMultipartUploadHandler(w, r)
|
||||
multipartInitInfo := &InitiateMultipartUploadResponse{}
|
||||
readResponse(t, w, http.StatusOK, multipartInitInfo)
|
||||
|
||||
part1 := make([]byte, partSize)
|
||||
for i := range part1 {
|
||||
part1[i] = 'a'
|
||||
}
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, multipartInitInfo.UploadID)
|
||||
query.Set(partNumberQuery, "1")
|
||||
w, r = prepareTestRequestWithQuery(bktName, objName, query, part1)
|
||||
setEncryptHeaders(r)
|
||||
tc.Handler().UploadPartHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
part1ETag := w.Header().Get(api.ETag)
|
||||
|
||||
part2 := []byte("part2")
|
||||
query = make(url.Values)
|
||||
query.Set(uploadIDQuery, multipartInitInfo.UploadID)
|
||||
query.Set(partNumberQuery, "2")
|
||||
w, r = prepareTestRequestWithQuery(bktName, objName, query, part2)
|
||||
setEncryptHeaders(r)
|
||||
tc.Handler().UploadPartHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
part2ETag := w.Header().Get(api.ETag)
|
||||
|
||||
query = make(url.Values)
|
||||
query.Set(uploadIDQuery, multipartInitInfo.UploadID)
|
||||
complete := &CompleteMultipartUpload{
|
||||
Parts: []*layer.CompletedPart{
|
||||
{ETag: part1ETag, PartNumber: 1},
|
||||
{ETag: part2ETag, PartNumber: 2},
|
||||
},
|
||||
}
|
||||
w, r = prepareTestFullRequest(t, bktName, objName, query, complete)
|
||||
tc.Handler().CompleteMultipartUploadHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
res, _ := getEncryptedObject(t, tc, bktName, objName)
|
||||
require.Equal(t, len(part1)+len(part2), len(res))
|
||||
require.Equal(t, append(part1, part2...), res)
|
||||
|
||||
part2Range := getEncryptedObjectRange(t, tc, bktName, objName, len(part1), len(part1)+len(part2)-1)
|
||||
require.Equal(t, part2[0:], part2Range)
|
||||
}
|
||||
|
||||
func putEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName, content string) {
|
||||
body := bytes.NewReader([]byte(content))
|
||||
w, r := prepareTestPayloadRequest(bktName, objName, body)
|
||||
setEncryptHeaders(r)
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func getEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName string) ([]byte, http.Header) {
|
||||
w, r := prepareTestRequest(t, bktName, objName, nil)
|
||||
setEncryptHeaders(r)
|
||||
tc.Handler().GetObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
content, err := io.ReadAll(w.Result().Body)
|
||||
require.NoError(t, err)
|
||||
return content, w.Header()
|
||||
}
|
||||
|
||||
func getEncryptedObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, start, end int) []byte {
|
||||
w, r := prepareTestRequest(t, bktName, objName, nil)
|
||||
setEncryptHeaders(r)
|
||||
r.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
|
||||
tc.Handler().GetObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusPartialContent)
|
||||
content, err := io.ReadAll(w.Result().Body)
|
||||
require.NoError(t, err)
|
||||
return content
|
||||
}
|
||||
|
||||
func setEncryptHeaders(r *http.Request) {
|
||||
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, aes256Key)
|
||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, aes256KeyMD5)
|
||||
}
|
||||
|
||||
func setHeaders(r *http.Request, header map[string]string) {
|
||||
for key, val := range header {
|
||||
r.Header.Set(key, val)
|
||||
}
|
||||
}
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type conditionalArgs struct {
|
||||
|
@ -71,13 +72,25 @@ func overrideResponseHeaders(h http.Header, query url.Values) {
|
|||
}
|
||||
}
|
||||
|
||||
func writeHeaders(h http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int, isBucketUnversioned bool) {
|
||||
func addSSECHeaders(responseHeader http.Header, requestHeader http.Header) {
|
||||
responseHeader.Set(api.AmzServerSideEncryptionCustomerAlgorithm, requestHeader.Get(api.AmzServerSideEncryptionCustomerAlgorithm))
|
||||
responseHeader.Set(api.AmzServerSideEncryptionCustomerKeyMD5, requestHeader.Get(api.AmzServerSideEncryptionCustomerKeyMD5))
|
||||
}
|
||||
|
||||
func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int, isBucketUnversioned bool) {
|
||||
info := extendedInfo.ObjectInfo
|
||||
if len(info.ContentType) > 0 && h.Get(api.ContentType) == "" {
|
||||
h.Set(api.ContentType, info.ContentType)
|
||||
}
|
||||
h.Set(api.LastModified, info.Created.UTC().Format(http.TimeFormat))
|
||||
h.Set(api.ContentLength, strconv.FormatInt(info.Size, 10))
|
||||
|
||||
if info.IsEncrypted() {
|
||||
h.Set(api.ContentLength, info.Headers[layer.AttributeDecryptedSize])
|
||||
addSSECHeaders(h, requestHeader)
|
||||
} else {
|
||||
h.Set(api.ContentLength, strconv.FormatInt(info.Size, 10))
|
||||
}
|
||||
|
||||
h.Set(api.ETag, info.HashSum)
|
||||
h.Set(api.AmzTaggingCount, strconv.Itoa(tagSetLength))
|
||||
|
||||
|
@ -137,7 +150,26 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if params, err = fetchRangeHeader(r.Header, uint64(info.Size)); err != nil {
|
||||
encryption, err := formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = encryption.MatchObjectEncryption(info.EncryptionInfo); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
fullSize := info.Size
|
||||
if encryption.Enabled() {
|
||||
if fullSize, err = strconv.ParseInt(info.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil {
|
||||
h.logAndSendError(w, "invalid decrypted size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if params, err = fetchRangeHeader(r.Header, uint64(fullSize)); err != nil {
|
||||
h.logAndSendError(w, "could not parse range header", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -169,7 +201,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
writeHeaders(w.Header(), extendedInfo, len(tagSet), bktSettings.Unversioned())
|
||||
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned())
|
||||
if params != nil {
|
||||
writeRangeHeaders(w, params, info.Size)
|
||||
} else {
|
||||
|
@ -181,6 +213,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
Writer: w,
|
||||
Range: params,
|
||||
BucketInfo: bktInfo,
|
||||
Encryption: encryption,
|
||||
}
|
||||
if err = h.obj.GetObject(r.Context(), getParams); err != nil {
|
||||
h.logAndSendError(w, "could not get object", reqInfo, err)
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -144,3 +147,42 @@ func TestPreconditions(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRange(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-range", "object-to-range"
|
||||
createTestBucket(tc.Context(), t, tc, bktName)
|
||||
|
||||
content := "123456789abcdef"
|
||||
putObjectContent(t, tc, bktName, objName, content)
|
||||
|
||||
full := getObjectRange(t, tc, bktName, objName, 0, len(content)-1)
|
||||
require.Equal(t, content, string(full))
|
||||
|
||||
beginning := getObjectRange(t, tc, bktName, objName, 0, 3)
|
||||
require.Equal(t, content[:4], string(beginning))
|
||||
|
||||
middle := getObjectRange(t, tc, bktName, objName, 5, 10)
|
||||
require.Equal(t, "6789ab", string(middle))
|
||||
|
||||
end := getObjectRange(t, tc, bktName, objName, 10, 15)
|
||||
require.Equal(t, "bcdef", string(end))
|
||||
}
|
||||
|
||||
func putObjectContent(t *testing.T, tc *handlerContext, bktName, objName, content string) {
|
||||
body := bytes.NewReader([]byte(content))
|
||||
w, r := prepareTestPayloadRequest(bktName, objName, body)
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, start, end int) []byte {
|
||||
w, r := prepareTestRequest(t, bktName, objName, nil)
|
||||
r.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
|
||||
tc.Handler().GetObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusPartialContent)
|
||||
content, err := io.ReadAll(w.Result().Body)
|
||||
require.NoError(t, err)
|
||||
return content
|
||||
}
|
||||
|
|
|
@ -81,12 +81,16 @@ func prepareHandlerContext(t *testing.T) *handlerContext {
|
|||
}
|
||||
}
|
||||
|
||||
func createTestBucket(ctx context.Context, t *testing.T, h *handlerContext, bktName string) {
|
||||
func createTestBucket(ctx context.Context, t *testing.T, h *handlerContext, bktName string) *data.BucketInfo {
|
||||
_, err := h.MockedPool().CreateContainer(ctx, layer.PrmContainerCreate{
|
||||
Creator: *usertest.ID(),
|
||||
Name: bktName,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
bktInfo, err := h.Layer().GetBucketInfo(ctx, bktName)
|
||||
require.NoError(t, err)
|
||||
return bktInfo
|
||||
}
|
||||
|
||||
func createTestBucketWithLock(ctx context.Context, t *testing.T, h *handlerContext, bktName string, conf *data.ObjectLockConfiguration) *data.BucketInfo {
|
||||
|
@ -149,8 +153,12 @@ func prepareTestFullRequest(t *testing.T, bktName, objName string, query url.Val
|
|||
rawBody, err := xml.Marshal(body)
|
||||
require.NoError(t, err)
|
||||
|
||||
return prepareTestRequestWithQuery(bktName, objName, query, rawBody)
|
||||
}
|
||||
|
||||
func prepareTestRequestWithQuery(bktName, objName string, query url.Values, body []byte) (*httptest.ResponseRecorder, *http.Request) {
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(rawBody))
|
||||
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(body))
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
reqInfo := api.NewReqInfo(w, r, api.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
|
@ -200,3 +208,17 @@ func listOIDsFromMockedNeoFS(t *testing.T, tc *handlerContext, bktName, objectNa
|
|||
|
||||
return ids
|
||||
}
|
||||
|
||||
func assertStatus(t *testing.T, w *httptest.ResponseRecorder, status int) {
|
||||
if w.Code != status {
|
||||
resp, err := io.ReadAll(w.Result().Body)
|
||||
require.NoError(t, err)
|
||||
require.Failf(t, "unexpected status", "expected: %d, actual: %d, resp: '%s'", status, w.Code, string(resp))
|
||||
}
|
||||
}
|
||||
|
||||
func readResponse(t *testing.T, w *httptest.ResponseRecorder, status int, model interface{}) {
|
||||
assertStatus(t, w, status)
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(model)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
|
|
@ -53,6 +53,17 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
info := extendedInfo.ObjectInfo
|
||||
|
||||
encryption, err := formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = encryption.MatchObjectEncryption(info.EncryptionInfo); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if err = checkPreconditions(info, conditional); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
||||
return
|
||||
|
@ -98,7 +109,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
writeHeaders(w.Header(), extendedInfo, len(tagSet), bktSettings.Unversioned())
|
||||
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned())
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
|
@ -589,11 +588,3 @@ func assertRetentionApproximate(t *testing.T, w *httptest.ResponseRecorder, rete
|
|||
|
||||
require.InDelta(t, expectedUntil.Unix(), actualUntil.Unix(), delta)
|
||||
}
|
||||
|
||||
func assertStatus(t *testing.T, w *httptest.ResponseRecorder, status int) {
|
||||
if w.Code != status {
|
||||
resp, err := io.ReadAll(w.Result().Body)
|
||||
require.NoError(t, err)
|
||||
require.Failf(t, "unexpected status", "expected: %d, actual: %d, resp: '%s'", status, w.Code, string(resp))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -137,16 +137,26 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
p.Header = parseMetadata(r)
|
||||
if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
|
||||
p.Header[api.ContentType] = contentType
|
||||
}
|
||||
|
||||
if err = h.obj.CreateMultipartUpload(r.Context(), p); err != nil {
|
||||
h.logAndSendError(w, "could not upload a part", reqInfo, err, additional...)
|
||||
h.logAndSendError(w, "could create multipart upload", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
if p.Info.Encryption.Enabled() {
|
||||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
|
||||
resp := InitiateMultipartUploadResponse{
|
||||
Bucket: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
|
@ -210,12 +220,22 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
Reader: r.Body,
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
hash, err := h.obj.UploadPart(r.Context(), p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not upload a part", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
if p.Info.Encryption.Enabled() {
|
||||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
|
||||
w.Header().Set(api.ETag, hash)
|
||||
api.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
@ -301,6 +321,17 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
Range: srcRange,
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = p.Info.Encryption.MatchObjectEncryption(srcInfo.EncryptionInfo); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
info, err := h.obj.UploadPartCopy(r.Context(), p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not upload part copy", reqInfo, err, additional...)
|
||||
|
@ -312,6 +343,10 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
LastModified: info.Created.UTC().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
if p.Info.Encryption.Enabled() {
|
||||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
|
@ -353,6 +388,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
Info: uploadInfo,
|
||||
Parts: reqBody.Parts,
|
||||
}
|
||||
|
||||
uploadData, objInfo, err := h.obj.CompleteMultipartUpload(r.Context(), c)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not complete multipart upload", reqInfo, err, additional...)
|
||||
|
@ -522,6 +558,12 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
PartNumberMarker: partNumberMarker,
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
list, err := h.obj.ListParts(r.Context(), p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not list parts", reqInfo, err, additional...)
|
||||
|
@ -551,6 +593,12 @@ func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Req
|
|||
Key: reqInfo.ObjectName,
|
||||
}
|
||||
|
||||
p.Encryption, err = formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.obj.AbortMultipartUpload(r.Context(), p); err != nil {
|
||||
h.logAndSendError(w, "could not abort multipart upload", reqInfo, err, additional...)
|
||||
return
|
||||
|
|
|
@ -2,6 +2,7 @@ package handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
|
@ -209,12 +210,19 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
metadata[api.Expires] = expires
|
||||
}
|
||||
|
||||
encryption, err := formEncryptionParams(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
params := &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: reqInfo.ObjectName,
|
||||
Reader: r.Body,
|
||||
Size: r.ContentLength,
|
||||
Header: metadata,
|
||||
BktInfo: bktInfo,
|
||||
Object: reqInfo.ObjectName,
|
||||
Reader: r.Body,
|
||||
Size: r.ContentLength,
|
||||
Header: metadata,
|
||||
Encryption: encryption,
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
|
@ -280,11 +288,52 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
if settings.VersioningEnabled() {
|
||||
w.Header().Set(api.AmzVersionID, info.VersionID())
|
||||
}
|
||||
if encryption.Enabled() {
|
||||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
|
||||
w.Header().Set(api.ETag, info.HashSum)
|
||||
api.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func formEncryptionParams(header http.Header) (enc layer.EncryptionParams, err error) {
|
||||
sseCustomerAlgorithm := header.Get(api.AmzServerSideEncryptionCustomerAlgorithm)
|
||||
sseCustomerKey := header.Get(api.AmzServerSideEncryptionCustomerKey)
|
||||
sseCustomerKeyMD5 := header.Get(api.AmzServerSideEncryptionCustomerKeyMD5)
|
||||
|
||||
if len(sseCustomerAlgorithm) == 0 && len(sseCustomerKey) == 0 && len(sseCustomerKeyMD5) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if sseCustomerAlgorithm != layer.AESEncryptionAlgorithm {
|
||||
return enc, errors.GetAPIError(errors.ErrInvalidEncryptionAlgorithm)
|
||||
}
|
||||
|
||||
key, err := base64.StdEncoding.DecodeString(sseCustomerKey)
|
||||
if err != nil {
|
||||
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
|
||||
}
|
||||
|
||||
if len(key) != layer.AESKeySize {
|
||||
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
|
||||
}
|
||||
|
||||
keyMD5, err := base64.StdEncoding.DecodeString(sseCustomerKeyMD5)
|
||||
if err != nil {
|
||||
return enc, errors.GetAPIError(errors.ErrSSECustomerKeyMD5Mismatch)
|
||||
}
|
||||
|
||||
md5Sum := md5.Sum(key)
|
||||
if !bytes.Equal(md5Sum[:], keyMD5) {
|
||||
return enc, errors.GetAPIError(errors.ErrSSECustomerKeyMD5Mismatch)
|
||||
}
|
||||
|
||||
var aesKey layer.AES256Key
|
||||
copy(aesKey[:], key)
|
||||
|
||||
return layer.NewEncryptionParams(aesKey), nil
|
||||
}
|
||||
|
||||
func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
newEaclTable *eacl.Table
|
||||
|
|
|
@ -57,6 +57,10 @@ const (
|
|||
AmzMaxParts = "X-Amz-Max-Parts"
|
||||
AmzPartNumberMarker = "X-Amz-Part-Number-Marker"
|
||||
|
||||
AmzServerSideEncryptionCustomerAlgorithm = "x-amz-server-side-encryption-customer-algorithm"
|
||||
AmzServerSideEncryptionCustomerKey = "x-amz-server-side-encryption-customer-key"
|
||||
AmzServerSideEncryptionCustomerKeyMD5 = "x-amz-server-side-encryption-customer-key-MD5"
|
||||
|
||||
ContainerID = "X-Container-Id"
|
||||
|
||||
AccessControlAllowOrigin = "Access-Control-Allow-Origin"
|
||||
|
|
115
api/layer/encryption_test.go
Normal file
115
api/layer/encryption_test.go
Normal file
|
@ -0,0 +1,115 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
aes256Key = "1234567890qwertyuiopasdfghjklzxc"
|
||||
)
|
||||
|
||||
func getAES256Key() AES256Key {
|
||||
var key AES256Key
|
||||
copy(key[:], aes256Key)
|
||||
return key
|
||||
}
|
||||
|
||||
func TestHMAC(t *testing.T) {
|
||||
encParam := NewEncryptionParams(getAES256Key())
|
||||
|
||||
hmacKey, hmacSalt, err := encParam.HMAC()
|
||||
require.NoError(t, err)
|
||||
|
||||
encInfo := data.EncryptionInfo{
|
||||
Enabled: true,
|
||||
Algorithm: "",
|
||||
HMACKey: hex.EncodeToString(hmacKey),
|
||||
HMACSalt: hex.EncodeToString(hmacSalt),
|
||||
}
|
||||
|
||||
err = encParam.MatchObjectEncryption(encInfo)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
const (
|
||||
objSize = 30 * 1024 * 1024
|
||||
partNum = 6
|
||||
partSize = 5 * 1024 * 1024
|
||||
encObjSize = 31472640 // objSize + enc headers
|
||||
encPartSize = 5245440 // partSize + enc headers
|
||||
)
|
||||
|
||||
func getDecrypter() *decrypter {
|
||||
parts := make([]EncryptedPart, partNum)
|
||||
for i := range parts {
|
||||
parts[i] = EncryptedPart{
|
||||
Part: Part{
|
||||
PartNumber: i + 1,
|
||||
Size: int64(partSize),
|
||||
},
|
||||
EncryptedSize: encPartSize,
|
||||
}
|
||||
}
|
||||
return &decrypter{
|
||||
parts: parts,
|
||||
encryption: NewEncryptionParams(getAES256Key()),
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecrypterInitParams(t *testing.T) {
|
||||
decReader := getDecrypter()
|
||||
|
||||
for i, tc := range []struct {
|
||||
rng *RangeParams
|
||||
expSkipLen, expLn, expOff, expSeqNumber uint64
|
||||
expDecLen, expDataRemain, expEncPartRange int64
|
||||
}{
|
||||
{
|
||||
rng: &RangeParams{End: objSize - 1},
|
||||
expSkipLen: 0,
|
||||
expLn: encObjSize,
|
||||
expOff: 0,
|
||||
expSeqNumber: 0,
|
||||
expDecLen: objSize,
|
||||
expDataRemain: partSize,
|
||||
expEncPartRange: encPartSize,
|
||||
},
|
||||
{
|
||||
rng: &RangeParams{End: 999999},
|
||||
expSkipLen: 0,
|
||||
expLn: 1049088,
|
||||
expOff: 0,
|
||||
expSeqNumber: 0,
|
||||
expDecLen: 1000000,
|
||||
expDataRemain: 1000000,
|
||||
expEncPartRange: 1049088,
|
||||
},
|
||||
{
|
||||
rng: &RangeParams{Start: 1000000, End: 1999999},
|
||||
expSkipLen: 16960,
|
||||
expLn: 1049088,
|
||||
expOff: 983520,
|
||||
expSeqNumber: 15,
|
||||
expDecLen: 1000000,
|
||||
expDataRemain: 1000000,
|
||||
expEncPartRange: 1049088,
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
decReader.rangeParam = tc.rng
|
||||
decReader.initRangeParams()
|
||||
require.Equal(t, tc.expSkipLen, decReader.skipLen)
|
||||
require.Equal(t, tc.expDecLen, decReader.decLen)
|
||||
require.Equal(t, tc.expLn, decReader.ln)
|
||||
require.Equal(t, tc.expOff, decReader.off)
|
||||
require.Equal(t, tc.expDataRemain, decReader.partDataRemain)
|
||||
require.Equal(t, tc.expEncPartRange, decReader.encPartRangeLen)
|
||||
require.Equal(t, tc.expSeqNumber, decReader.seqNumber)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,14 +1,22 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
errorsStd "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/sio"
|
||||
"github.com/nats-io/nats.go"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
|
@ -81,6 +89,7 @@ type (
|
|||
ObjectInfo *data.ObjectInfo
|
||||
BucketInfo *data.BucketInfo
|
||||
Writer io.Writer
|
||||
Encryption EncryptionParams
|
||||
}
|
||||
|
||||
// HeadObjectParams stores object head request parameters.
|
||||
|
@ -104,14 +113,23 @@ type (
|
|||
End uint64
|
||||
}
|
||||
|
||||
// AES256Key is a key for encryption.
|
||||
AES256Key [32]byte
|
||||
|
||||
EncryptionParams struct {
|
||||
enabled bool
|
||||
customerKey AES256Key
|
||||
}
|
||||
|
||||
// PutObjectParams stores object put request parameters.
|
||||
PutObjectParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Object string
|
||||
Size int64
|
||||
Reader io.Reader
|
||||
Header map[string]string
|
||||
Lock *data.ObjectLock
|
||||
BktInfo *data.BucketInfo
|
||||
Object string
|
||||
Size int64
|
||||
Reader io.Reader
|
||||
Header map[string]string
|
||||
Lock *data.ObjectLock
|
||||
Encryption EncryptionParams
|
||||
}
|
||||
|
||||
DeleteObjectParams struct {
|
||||
|
@ -142,6 +160,7 @@ type (
|
|||
Header map[string]string
|
||||
Range *RangeParams
|
||||
Lock *data.ObjectLock
|
||||
Encryption EncryptionParams
|
||||
}
|
||||
// CreateBucketParams stores bucket create request parameters.
|
||||
CreateBucketParams struct {
|
||||
|
@ -249,6 +268,13 @@ type (
|
|||
|
||||
const (
|
||||
tagPrefix = "S3-Tag-"
|
||||
|
||||
AESEncryptionAlgorithm = "AES256"
|
||||
AESKeySize = 32
|
||||
AttributeEncryptionAlgorithm = api.NeoFSSystemMetadataPrefix + "Algorithm"
|
||||
AttributeDecryptedSize = api.NeoFSSystemMetadataPrefix + "Decrypted-Size"
|
||||
AttributeHMACSalt = api.NeoFSSystemMetadataPrefix + "HMAC-Salt"
|
||||
AttributeHMACKey = api.NeoFSSystemMetadataPrefix + "HMAC-Key"
|
||||
)
|
||||
|
||||
func (t *VersionedObject) String() string {
|
||||
|
@ -259,6 +285,72 @@ func (f MsgHandlerFunc) HandleMessage(ctx context.Context, msg *nats.Msg) error
|
|||
return f(ctx, msg)
|
||||
}
|
||||
|
||||
// NewEncryptionParams create new params to encrypt with provided key.
|
||||
func NewEncryptionParams(key AES256Key) EncryptionParams {
|
||||
return EncryptionParams{
|
||||
enabled: true,
|
||||
customerKey: key,
|
||||
}
|
||||
}
|
||||
|
||||
// Key returns encryption key as slice.
|
||||
func (p EncryptionParams) Key() []byte {
|
||||
return p.customerKey[:]
|
||||
}
|
||||
|
||||
// AESKey returns encryption key.
|
||||
func (p EncryptionParams) AESKey() AES256Key {
|
||||
return p.customerKey
|
||||
}
|
||||
|
||||
// Enabled returns true if key isn't empty.
|
||||
func (p EncryptionParams) Enabled() bool {
|
||||
return p.enabled
|
||||
}
|
||||
|
||||
// HMAC compute salted HMAC.
|
||||
func (p EncryptionParams) HMAC() ([]byte, []byte, error) {
|
||||
mac := hmac.New(sha256.New, p.Key())
|
||||
|
||||
salt := make([]byte, 16)
|
||||
if _, err := rand.Read(salt); err != nil {
|
||||
return nil, nil, errorsStd.New("failed to init create salt")
|
||||
}
|
||||
|
||||
mac.Write(salt)
|
||||
return mac.Sum(nil), salt, nil
|
||||
}
|
||||
|
||||
// MatchObjectEncryption check if encryption params are valid for provided object.
|
||||
func (p EncryptionParams) MatchObjectEncryption(encInfo data.EncryptionInfo) error {
|
||||
if p.Enabled() != encInfo.Enabled {
|
||||
return errorsStd.New("invalid encryption view")
|
||||
}
|
||||
|
||||
if !encInfo.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
hmacSalt, err := hex.DecodeString(encInfo.HMACSalt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid hmacSalt '%s': %w", encInfo.HMACSalt, err)
|
||||
}
|
||||
|
||||
hmacKey, err := hex.DecodeString(encInfo.HMACKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid hmacKey '%s': %w", encInfo.HMACKey, err)
|
||||
}
|
||||
|
||||
mac := hmac.New(sha256.New, p.Key())
|
||||
mac.Write(hmacSalt)
|
||||
expectedHmacKey := mac.Sum(nil)
|
||||
if !bytes.Equal(expectedHmacKey, hmacKey) {
|
||||
return errorsStd.New("mismatched hmac key")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DefaultCachesConfigs returns filled configs.
|
||||
func DefaultCachesConfigs(logger *zap.Logger) *CachesConfig {
|
||||
return &CachesConfig{
|
||||
|
@ -381,6 +473,253 @@ func (n *layer) ListBuckets(ctx context.Context) ([]*data.BucketInfo, error) {
|
|||
return n.containerList(ctx)
|
||||
}
|
||||
|
||||
func formEncryptedParts(header string) ([]EncryptedPart, error) {
|
||||
partInfos := strings.Split(header, ",")
|
||||
result := make([]EncryptedPart, len(partInfos))
|
||||
|
||||
for i, partInfo := range partInfos {
|
||||
part, err := parseCompletedPartHeader(partInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encPartSize, err := sio.EncryptedSize(uint64(part.Size))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("compute encrypted size: %w", err)
|
||||
}
|
||||
|
||||
result[i] = EncryptedPart{
|
||||
Part: *part,
|
||||
EncryptedSize: int64(encPartSize),
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type decrypter struct {
|
||||
reader io.Reader
|
||||
decReader io.Reader
|
||||
parts []EncryptedPart
|
||||
currentPart int
|
||||
encryption EncryptionParams
|
||||
|
||||
rangeParam *RangeParams
|
||||
|
||||
partDataRemain int64
|
||||
encPartRangeLen int64
|
||||
|
||||
seqNumber uint64
|
||||
decLen int64
|
||||
skipLen uint64
|
||||
|
||||
ln uint64
|
||||
off uint64
|
||||
}
|
||||
|
||||
func (d decrypter) decLength() int64 {
|
||||
return d.decLen
|
||||
}
|
||||
|
||||
func (d decrypter) encLength() uint64 {
|
||||
return d.ln
|
||||
}
|
||||
|
||||
func (d decrypter) encOffset() uint64 {
|
||||
return d.off
|
||||
}
|
||||
|
||||
func getDecryptReader(p *GetObjectParams) (*decrypter, error) {
|
||||
if !p.Encryption.Enabled() {
|
||||
return nil, errorsStd.New("couldn't create decrypter with disabled encryption")
|
||||
}
|
||||
|
||||
rangeParam := p.Range
|
||||
|
||||
var err error
|
||||
var parts []EncryptedPart
|
||||
header := p.ObjectInfo.Headers[UploadCompletedParts]
|
||||
if len(header) != 0 {
|
||||
parts, err = formEncryptedParts(header)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("form parts: %w", err)
|
||||
}
|
||||
if rangeParam == nil {
|
||||
decSizeHeader := p.ObjectInfo.Headers[AttributeDecryptedSize]
|
||||
size, err := strconv.ParseUint(decSizeHeader, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse dec size header '%s': %w", decSizeHeader, err)
|
||||
}
|
||||
rangeParam = &RangeParams{
|
||||
Start: 0,
|
||||
End: size - 1,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
decSize, err := sio.DecryptedSize(uint64(p.ObjectInfo.Size))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("compute decrypted size: %w", err)
|
||||
}
|
||||
|
||||
parts = []EncryptedPart{{
|
||||
Part: Part{Size: int64(decSize)},
|
||||
EncryptedSize: p.ObjectInfo.Size,
|
||||
}}
|
||||
}
|
||||
|
||||
if rangeParam != nil && rangeParam.Start > rangeParam.End {
|
||||
return nil, fmt.Errorf("invalid range: %d %d", rangeParam.Start, rangeParam.End)
|
||||
}
|
||||
|
||||
decReader := &decrypter{
|
||||
parts: parts,
|
||||
rangeParam: rangeParam,
|
||||
encryption: p.Encryption,
|
||||
}
|
||||
|
||||
decReader.initRangeParams()
|
||||
|
||||
return decReader, nil
|
||||
}
|
||||
|
||||
const (
|
||||
blockSize = 1 << 16 // 64KB
|
||||
fullBlockSize = blockSize + 32
|
||||
)
|
||||
|
||||
func (d *decrypter) initRangeParams() {
|
||||
d.partDataRemain = d.parts[d.currentPart].Size
|
||||
d.encPartRangeLen = d.parts[d.currentPart].EncryptedSize
|
||||
if d.rangeParam == nil {
|
||||
d.decLen = d.partDataRemain
|
||||
d.ln = uint64(d.encPartRangeLen)
|
||||
return
|
||||
}
|
||||
|
||||
start, end := d.rangeParam.Start, d.rangeParam.End
|
||||
|
||||
var sum, encSum uint64
|
||||
var partStart int
|
||||
for i, part := range d.parts {
|
||||
if start < sum+uint64(part.Size) {
|
||||
partStart = i
|
||||
break
|
||||
}
|
||||
sum += uint64(part.Size)
|
||||
encSum += uint64(part.EncryptedSize)
|
||||
}
|
||||
|
||||
d.skipLen = (start - sum) % blockSize
|
||||
d.seqNumber = (start - sum) / blockSize
|
||||
encOffPart := d.seqNumber * fullBlockSize
|
||||
d.off = encSum + encOffPart
|
||||
d.encPartRangeLen = d.encPartRangeLen - int64(encOffPart)
|
||||
d.partDataRemain = d.partDataRemain + int64(sum-start)
|
||||
|
||||
var partEnd int
|
||||
for i, part := range d.parts[partStart:] {
|
||||
index := partStart + i
|
||||
if end < sum+uint64(part.Size) {
|
||||
partEnd = index
|
||||
break
|
||||
}
|
||||
sum += uint64(part.Size)
|
||||
encSum += uint64(part.EncryptedSize)
|
||||
}
|
||||
|
||||
payloadPartEnd := (end - sum) / blockSize
|
||||
endEnc := encSum + (payloadPartEnd+1)*fullBlockSize
|
||||
|
||||
endPartEnc := encSum + uint64(d.parts[partEnd].EncryptedSize)
|
||||
if endPartEnc < endEnc {
|
||||
endEnc = endPartEnc
|
||||
}
|
||||
d.ln = endEnc - d.off
|
||||
d.decLen = int64(end - start + 1)
|
||||
|
||||
if int64(d.ln) < d.encPartRangeLen {
|
||||
d.encPartRangeLen = int64(d.ln)
|
||||
}
|
||||
if d.decLen < d.partDataRemain {
|
||||
d.partDataRemain = d.decLen
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decrypter) updateRangeParams() {
|
||||
d.partDataRemain = d.parts[d.currentPart].Size
|
||||
d.encPartRangeLen = d.parts[d.currentPart].EncryptedSize
|
||||
d.seqNumber = 0
|
||||
d.skipLen = 0
|
||||
}
|
||||
|
||||
func (d *decrypter) Read(p []byte) (int, error) {
|
||||
if int64(len(p)) < d.partDataRemain {
|
||||
n, err := d.decReader.Read(p)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
d.partDataRemain -= int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
n1, err := io.ReadFull(d.decReader, p[:d.partDataRemain])
|
||||
if err != nil {
|
||||
return n1, err
|
||||
}
|
||||
|
||||
d.currentPart++
|
||||
if d.currentPart == len(d.parts) {
|
||||
return n1, io.EOF
|
||||
}
|
||||
|
||||
d.updateRangeParams()
|
||||
|
||||
err = d.initNextDecReader()
|
||||
if err != nil {
|
||||
return n1, err
|
||||
}
|
||||
|
||||
n2, err := d.decReader.Read(p[n1:])
|
||||
if err != nil {
|
||||
return n1 + n2, err
|
||||
}
|
||||
|
||||
d.partDataRemain -= int64(n2)
|
||||
|
||||
return n1 + n2, nil
|
||||
}
|
||||
|
||||
func (d *decrypter) SetReader(r io.Reader) error {
|
||||
d.reader = r
|
||||
return d.initNextDecReader()
|
||||
}
|
||||
|
||||
func (d *decrypter) initNextDecReader() error {
|
||||
if d.reader == nil {
|
||||
return errorsStd.New("reader isn't set")
|
||||
}
|
||||
|
||||
r, err := sio.DecryptReader(io.LimitReader(d.reader, d.encPartRangeLen),
|
||||
sio.Config{
|
||||
MinVersion: sio.Version20,
|
||||
SequenceNumber: uint32(d.seqNumber),
|
||||
Key: d.encryption.Key(),
|
||||
CipherSuites: []byte{sio.AES_256_GCM},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't create decrypter: %w", err)
|
||||
}
|
||||
|
||||
if d.skipLen > 0 {
|
||||
if _, err = io.CopyN(io.Discard, r, int64(d.skipLen)); err != nil {
|
||||
return fmt.Errorf("couldn't skip some bytes: %w", err)
|
||||
}
|
||||
}
|
||||
d.decReader = r
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetObject from storage.
|
||||
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
|
||||
var params getParams
|
||||
|
@ -388,13 +727,23 @@ func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
|
|||
params.oid = p.ObjectInfo.ID
|
||||
params.bktInfo = p.BucketInfo
|
||||
|
||||
if p.Range != nil {
|
||||
if p.Range.Start > p.Range.End {
|
||||
panic("invalid range")
|
||||
var decReader *decrypter
|
||||
if p.Encryption.Enabled() {
|
||||
var err error
|
||||
decReader, err = getDecryptReader(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating decrypter: %w", err)
|
||||
}
|
||||
params.off = decReader.encOffset()
|
||||
params.ln = decReader.encLength()
|
||||
} else {
|
||||
if p.Range != nil {
|
||||
if p.Range.Start > p.Range.End {
|
||||
panic("invalid range")
|
||||
}
|
||||
params.ln = p.Range.End - p.Range.Start + 1
|
||||
params.off = p.Range.Start
|
||||
}
|
||||
|
||||
params.off = p.Range.Start
|
||||
params.ln = p.Range.End - p.Range.Start + 1
|
||||
}
|
||||
|
||||
payload, err := n.initObjectPayloadReader(ctx, params)
|
||||
|
@ -402,17 +751,26 @@ func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
|
|||
return fmt.Errorf("init object payload reader: %w", err)
|
||||
}
|
||||
|
||||
if params.ln == 0 {
|
||||
params.ln = 4096 // configure?
|
||||
bufSize := uint64(32 * 1024) // configure?
|
||||
if params.ln != 0 && params.ln < bufSize {
|
||||
bufSize = params.ln
|
||||
}
|
||||
|
||||
// alloc buffer for copying
|
||||
buf := make([]byte, params.ln) // sync-pool it?
|
||||
buf := make([]byte, bufSize) // sync-pool it?
|
||||
|
||||
r := payload
|
||||
if decReader != nil {
|
||||
if err = decReader.SetReader(payload); err != nil {
|
||||
return fmt.Errorf("set reader to decrypter: %w", err)
|
||||
}
|
||||
r = io.LimitReader(decReader, decReader.decLength())
|
||||
}
|
||||
|
||||
// copy full payload
|
||||
_, err = io.CopyBuffer(p.Writer, payload, buf)
|
||||
written, err := io.CopyBuffer(p.Writer, r, buf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy object payload: %w", err)
|
||||
return fmt.Errorf("copy object payload written: '%d', decLength: '%d', params.ln: '%d' : %w", written, decReader.decLength(), params.ln, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -447,6 +805,7 @@ func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.Obje
|
|||
Writer: pw,
|
||||
Range: p.Range,
|
||||
BucketInfo: p.ScrBktInfo,
|
||||
Encryption: p.Encryption,
|
||||
})
|
||||
|
||||
if err = pw.CloseWithError(err); err != nil {
|
||||
|
@ -455,11 +814,12 @@ func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.Obje
|
|||
}()
|
||||
|
||||
return n.PutObject(ctx, &PutObjectParams{
|
||||
BktInfo: p.DstBktInfo,
|
||||
Object: p.DstObject,
|
||||
Size: p.SrcSize,
|
||||
Reader: pr,
|
||||
Header: p.Header,
|
||||
BktInfo: p.DstBktInfo,
|
||||
Object: p.DstObject,
|
||||
Size: p.SrcSize,
|
||||
Reader: pr,
|
||||
Header: p.Header,
|
||||
Encryption: p.Encryption,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/sio"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
|
@ -36,9 +37,10 @@ const (
|
|||
|
||||
type (
|
||||
UploadInfoParams struct {
|
||||
UploadID string
|
||||
Bkt *data.BucketInfo
|
||||
Key string
|
||||
UploadID string
|
||||
Bkt *data.BucketInfo
|
||||
Key string
|
||||
Encryption EncryptionParams
|
||||
}
|
||||
|
||||
CreateMultipartParams struct {
|
||||
|
@ -77,6 +79,11 @@ type (
|
|||
PartNumber int
|
||||
}
|
||||
|
||||
EncryptedPart struct {
|
||||
Part
|
||||
EncryptedSize int64
|
||||
}
|
||||
|
||||
Part struct {
|
||||
ETag string
|
||||
LastModified string
|
||||
|
@ -152,6 +159,12 @@ func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
|||
}
|
||||
}
|
||||
|
||||
if p.Info.Encryption.Enabled() {
|
||||
if err := addEncryptionHeaders(info.Meta, p.Info.Encryption); err != nil {
|
||||
return fmt.Errorf("add encryption header: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return n.treeService.CreateMultipartUpload(ctx, p.Info.Bkt.CID, info)
|
||||
}
|
||||
|
||||
|
@ -177,6 +190,12 @@ func (n *layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, er
|
|||
}
|
||||
|
||||
func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInfo, p *UploadPartParams) (*data.ObjectInfo, error) {
|
||||
encInfo := formEncryptionInfo(multipartInfo.Meta)
|
||||
if err := p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
||||
n.log.Warn("mismatched obj encryptionInfo", zap.Error(err))
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
|
||||
bktInfo := p.Info.Bkt
|
||||
prm := PrmObjectCreate{
|
||||
Container: bktInfo.CID,
|
||||
|
@ -185,6 +204,17 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
Payload: p.Reader,
|
||||
}
|
||||
|
||||
decSize := p.Size
|
||||
if p.Info.Encryption.Enabled() {
|
||||
r, encSize, err := encryptionReader(p.Reader, uint64(p.Size), p.Info.Encryption.Key())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create ecnrypted reader: %w", err)
|
||||
}
|
||||
prm.Attributes = append(prm.Attributes, [2]string{AttributeDecryptedSize, strconv.FormatInt(p.Size, 10)})
|
||||
prm.Payload = r
|
||||
p.Size = int64(encSize)
|
||||
}
|
||||
|
||||
prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID
|
||||
prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber)
|
||||
|
||||
|
@ -198,7 +228,7 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
UploadID: p.Info.UploadID,
|
||||
Number: p.PartNumber,
|
||||
OID: id,
|
||||
Size: p.Size,
|
||||
Size: decSize,
|
||||
ETag: hex.EncodeToString(hash),
|
||||
Created: time.Now(),
|
||||
}
|
||||
|
@ -326,12 +356,14 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
encInfo := formEncryptionInfo(multipartInfo.Meta)
|
||||
|
||||
if len(partsInfo) < len(p.Parts) {
|
||||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
|
||||
}
|
||||
|
||||
var multipartObjetSize int64
|
||||
var encMultipartObjectSize uint64
|
||||
parts := make([]*data.PartInfo, 0, len(p.Parts))
|
||||
|
||||
var completedPartsHeader strings.Builder
|
||||
|
@ -345,7 +377,15 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
return nil, nil, errors.GetAPIError(errors.ErrEntityTooSmall)
|
||||
}
|
||||
parts = append(parts, partInfo)
|
||||
multipartObjetSize += partInfo.Size
|
||||
multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted)
|
||||
|
||||
if encInfo.Enabled {
|
||||
encPartSize, err := sio.EncryptedSize(uint64(partInfo.Size))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("compute encrypted size: %w", err)
|
||||
}
|
||||
encMultipartObjectSize += encPartSize
|
||||
}
|
||||
|
||||
partInfoStr := partInfo.ToHeaderString()
|
||||
if i != len(p.Parts)-1 {
|
||||
|
@ -373,6 +413,14 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
}
|
||||
}
|
||||
|
||||
if encInfo.Enabled {
|
||||
initMetadata[AttributeEncryptionAlgorithm] = encInfo.Algorithm
|
||||
initMetadata[AttributeHMACKey] = encInfo.HMACKey
|
||||
initMetadata[AttributeHMACSalt] = encInfo.HMACSalt
|
||||
initMetadata[AttributeDecryptedSize] = strconv.FormatInt(multipartObjetSize, 10)
|
||||
multipartObjetSize = int64(encMultipartObjectSize)
|
||||
}
|
||||
|
||||
r := &multiObjectReader{
|
||||
ctx: ctx,
|
||||
layer: n,
|
||||
|
@ -382,11 +430,12 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
r.prm.bktInfo = p.Info.Bkt
|
||||
|
||||
obj, err := n.PutObject(ctx, &PutObjectParams{
|
||||
BktInfo: p.Info.Bkt,
|
||||
Object: p.Info.Key,
|
||||
Reader: r,
|
||||
Header: initMetadata,
|
||||
Size: multipartObjetSize,
|
||||
BktInfo: p.Info.Bkt,
|
||||
Object: p.Info.Key,
|
||||
Reader: r,
|
||||
Header: initMetadata,
|
||||
Size: multipartObjetSize,
|
||||
Encryption: p.Info.Encryption,
|
||||
})
|
||||
if err != nil {
|
||||
n.log.Error("could not put a completed object (multipart upload)",
|
||||
|
@ -496,6 +545,12 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
|||
return nil, err
|
||||
}
|
||||
|
||||
encInfo := formEncryptionInfo(multipartInfo.Meta)
|
||||
if err = p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
||||
n.log.Warn("mismatched obj encryptionInfo", zap.Error(err))
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
|
||||
res.Owner = multipartInfo.Owner
|
||||
|
||||
parts := make([]*Part, 0, len(partsInfo))
|
||||
|
|
|
@ -176,9 +176,16 @@ func (t *TestNeoFS) ReadObject(_ context.Context, prm PrmObjectRead) (*ObjectPar
|
|||
sAddr := addr.EncodeToString()
|
||||
|
||||
if obj, ok := t.objects[sAddr]; ok {
|
||||
payload := obj.Payload()
|
||||
|
||||
if prm.PayloadRange[0]+prm.PayloadRange[1] > 0 {
|
||||
off := prm.PayloadRange[0]
|
||||
payload = payload[off : off+prm.PayloadRange[1]]
|
||||
}
|
||||
|
||||
return &ObjectPart{
|
||||
Head: obj,
|
||||
Payload: io.NopCloser(bytes.NewReader(obj.Payload())),
|
||||
Payload: io.NopCloser(bytes.NewReader(payload)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -10,10 +10,12 @@ import (
|
|||
"mime"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/sio"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/cache"
|
||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||
|
@ -145,6 +147,42 @@ func MimeByFileName(name string) string {
|
|||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
|
||||
func encryptionReader(r io.Reader, size uint64, key []byte) (io.Reader, uint64, error) {
|
||||
encSize, err := sio.EncryptedSize(size)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to compute enc size: %w", err)
|
||||
}
|
||||
|
||||
r, err = sio.EncryptReader(r, sio.Config{MinVersion: sio.Version20, MaxVersion: sio.Version20, Key: key, CipherSuites: []byte{sio.AES_256_GCM}})
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("couldn't create encrypter: %w", err)
|
||||
}
|
||||
|
||||
return r, encSize, nil
|
||||
}
|
||||
|
||||
func parseCompletedPartHeader(hdr string) (*Part, error) {
|
||||
// partInfo[0] -- part number, partInfo[1] -- part size, partInfo[2] -- checksum
|
||||
partInfo := strings.Split(hdr, "-")
|
||||
if len(partInfo) != 3 {
|
||||
return nil, fmt.Errorf("invalid completed part header")
|
||||
}
|
||||
num, err := strconv.Atoi(partInfo[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid completed part number '%s': %w", partInfo[0], err)
|
||||
}
|
||||
size, err := strconv.Atoi(partInfo[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid completed part size '%s': %w", partInfo[1], err)
|
||||
}
|
||||
|
||||
return &Part{
|
||||
ETag: partInfo[2],
|
||||
PartNumber: num,
|
||||
Size: int64(size),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutObject stores object into NeoFS, took payload from io.Reader.
|
||||
func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.ObjectInfo, error) {
|
||||
own := n.Owner(ctx)
|
||||
|
@ -163,6 +201,19 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Object
|
|||
}
|
||||
|
||||
r := p.Reader
|
||||
if p.Encryption.Enabled() {
|
||||
p.Header[AttributeDecryptedSize] = strconv.FormatInt(p.Size, 10)
|
||||
if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil {
|
||||
return nil, fmt.Errorf("add encryption header: %w", err)
|
||||
}
|
||||
|
||||
var encSize uint64
|
||||
if r, encSize, err = encryptionReader(p.Reader, uint64(p.Size), p.Encryption.Key()); err != nil {
|
||||
return nil, fmt.Errorf("create encrypter: %w", err)
|
||||
}
|
||||
p.Size = int64(encSize)
|
||||
}
|
||||
|
||||
if r != nil {
|
||||
if len(p.Header[api.ContentType]) == 0 {
|
||||
if contentType := MimeByFileName(p.Object); len(contentType) == 0 {
|
||||
|
@ -217,8 +268,9 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Object
|
|||
n.listsCache.CleanCacheEntriesContainingObject(p.Object, p.BktInfo.CID)
|
||||
|
||||
objInfo := &data.ObjectInfo{
|
||||
ID: id,
|
||||
CID: p.BktInfo.CID,
|
||||
ID: id,
|
||||
CID: p.BktInfo.CID,
|
||||
EncryptionInfo: formEncryptionInfo(p.Header),
|
||||
|
||||
Owner: own,
|
||||
Bucket: p.BktInfo.Name,
|
||||
|
|
|
@ -83,9 +83,10 @@ func objectInfoFromMeta(bkt *data.BucketInfo, meta *object.Object) *data.ObjectI
|
|||
objID, _ := meta.ID()
|
||||
payloadChecksum, _ := meta.PayloadChecksum()
|
||||
return &data.ObjectInfo{
|
||||
ID: objID,
|
||||
CID: bkt.CID,
|
||||
IsDir: false,
|
||||
ID: objID,
|
||||
CID: bkt.CID,
|
||||
IsDir: false,
|
||||
EncryptionInfo: formEncryptionInfo(headers),
|
||||
|
||||
Bucket: bkt.Name,
|
||||
Name: filenameFromObject(meta),
|
||||
|
@ -98,6 +99,28 @@ func objectInfoFromMeta(bkt *data.BucketInfo, meta *object.Object) *data.ObjectI
|
|||
}
|
||||
}
|
||||
|
||||
func formEncryptionInfo(headers map[string]string) data.EncryptionInfo {
|
||||
algorithm := headers[AttributeEncryptionAlgorithm]
|
||||
return data.EncryptionInfo{
|
||||
Enabled: len(algorithm) > 0,
|
||||
Algorithm: algorithm,
|
||||
HMACKey: headers[AttributeHMACKey],
|
||||
HMACSalt: headers[AttributeHMACSalt],
|
||||
}
|
||||
}
|
||||
|
||||
func addEncryptionHeaders(meta map[string]string, enc EncryptionParams) error {
|
||||
meta[AttributeEncryptionAlgorithm] = AESEncryptionAlgorithm
|
||||
hmacKey, hmacSalt, err := enc.HMAC()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get hmac: %w", err)
|
||||
}
|
||||
meta[AttributeHMACKey] = hex.EncodeToString(hmacKey)
|
||||
meta[AttributeHMACSalt] = hex.EncodeToString(hmacSalt)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processObjectInfoName fixes name in objectInfo structure based on prefix and
|
||||
// delimiter from user request. If name does not contain prefix, nil value is
|
||||
// returned. If name should be modified, then function returns copy of objectInfo
|
||||
|
|
8
go.mod
8
go.mod
|
@ -3,10 +3,11 @@ module github.com/nspcc-dev/neofs-s3-gw
|
|||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.37.9
|
||||
github.com/aws/aws-sdk-go v1.44.6
|
||||
github.com/bluele/gcache v0.0.2
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/minio/sio v0.3.0
|
||||
github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d
|
||||
github.com/nspcc-dev/neo-go v0.99.1
|
||||
github.com/nspcc-dev/neofs-api-go/v2 v2.13.1
|
||||
|
@ -25,6 +26,7 @@ require (
|
|||
|
||||
require (
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210521073959-f0d4d129b7f1 // indirect
|
||||
//github.com/aws/aws-sdk-go-v2 v1.16.7 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btcd v0.22.0-beta // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
|
@ -68,10 +70,10 @@ require (
|
|||
github.com/urfave/cli v1.22.5 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/term v0.0.0-20210429154555-c04ba851c2a4 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect
|
||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
||||
|
|
14
go.sum
14
go.sum
|
@ -61,8 +61,8 @@ github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210521073959-f0d4d129b7f1/go.m
|
|||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aws/aws-sdk-go v1.37.9 h1:sgRbr+heubkgSwkn9fQMF80l9xjXkmhzk9DLdsaYh+c=
|
||||
github.com/aws/aws-sdk-go v1.37.9/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.6 h1:Y+uHxmZfhRTLX2X3khkdxCoTZAyGEX21aOUHe1U6geg=
|
||||
github.com/aws/aws-sdk-go v1.44.6/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
|
@ -298,6 +298,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5
|
|||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=
|
||||
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/minio/sio v0.3.0 h1:syEFBewzOMOYVzSTFpp1MqpSZk8rUNbz8VIIc+PNzus=
|
||||
github.com/minio/sio v0.3.0/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
|
@ -503,6 +505,7 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
|
|||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
|
@ -595,8 +598,9 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -687,12 +691,14 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210429154555-c04ba851c2a4 h1:UPou2i3GzKgi6igR+/0C5XyHKBngHxBp/CL5CQ0p3Zk=
|
||||
golang.org/x/term v0.0.0-20210429154555-c04ba851c2a4/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
Loading…
Reference in a new issue