Compare commits
1 commit
master
...
feature/mf
Author | SHA1 | Date | |
---|---|---|---|
552bd7b932 |
41 changed files with 1362 additions and 1069 deletions
|
@ -1,6 +1,4 @@
|
|||
// This file is adopting https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go for sigv4a.
|
||||
// with changes
|
||||
// * add VerifyTrailerSignature
|
||||
|
||||
package v4a
|
||||
|
||||
|
@ -90,39 +88,6 @@ func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSi
|
|||
}, "\n")
|
||||
}
|
||||
|
||||
func (s *StreamSigner) VerifyTrailerSignature(payload []byte, signingTime time.Time, signature []byte) error {
|
||||
prevSignature := s.prevSignature
|
||||
|
||||
st := v4Internal.NewSigningTime(signingTime)
|
||||
|
||||
scope := buildCredentialScope(st, s.service)
|
||||
|
||||
stringToSign := s.buildEventStreamStringToSignTrailer(payload, prevSignature, scope, &st)
|
||||
|
||||
ok, err := signerCrypto.VerifySignature(&s.credentials.PrivateKey.PublicKey, makeHash(sha256.New(), []byte(stringToSign)), signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("v4a: invalid signature")
|
||||
}
|
||||
|
||||
s.prevSignature = signature
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StreamSigner) buildEventStreamStringToSignTrailer(payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
||||
hash := sha256.New()
|
||||
return strings.Join([]string{
|
||||
"AWS4-ECDSA-P256-SHA256-TRAILER",
|
||||
signingTime.TimeFormat(),
|
||||
credentialScope,
|
||||
hex.EncodeToString(previousSignature),
|
||||
hex.EncodeToString(makeHash(hash, payload)),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
func buildCredentialScope(st v4Internal.SigningTime, service string) string {
|
||||
return strings.Join([]string{
|
||||
st.Format(shortTimeFormat),
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go
|
||||
// with changes
|
||||
// * add GetTrailingSignature
|
||||
|
||||
package v4
|
||||
|
||||
|
@ -89,32 +87,3 @@ func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSi
|
|||
hex.EncodeToString(makeHash(hash, payload)),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
// GetTrailerSignature signs the provided header and payload bytes.
|
||||
func (s *StreamSigner) GetTrailerSignature(payload []byte, signingTime time.Time) ([]byte, error) {
|
||||
prevSignature := s.prevSignature
|
||||
|
||||
st := v4Internal.NewSigningTime(signingTime)
|
||||
|
||||
sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st)
|
||||
|
||||
scope := v4Internal.BuildCredentialScope(st, s.region, s.service)
|
||||
|
||||
stringToSign := s.buildEventStreamStringToSignTrailer(payload, prevSignature, scope, &st)
|
||||
|
||||
signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign))
|
||||
s.prevSignature = signature
|
||||
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
func (s *StreamSigner) buildEventStreamStringToSignTrailer(payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
||||
hash := sha256.New()
|
||||
return strings.Join([]string{
|
||||
"AWS4-HMAC-SHA256-TRAILER",
|
||||
signingTime.TimeFormat(),
|
||||
credentialScope,
|
||||
hex.EncodeToString(previousSignature),
|
||||
hex.EncodeToString(makeHash(hash, payload)),
|
||||
}, "\n")
|
||||
}
|
||||
|
|
5
api/cache/cache_test.go
vendored
5
api/cache/cache_test.go
vendored
|
@ -173,7 +173,10 @@ func TestSettingsCacheType(t *testing.T) {
|
|||
cache := NewSystemCache(DefaultSystemConfig(logger))
|
||||
|
||||
key := "key"
|
||||
settings := &data.BucketSettings{Versioning: data.VersioningEnabled}
|
||||
settings := &data.BucketSettings{Versioning: data.Versioning{
|
||||
VersioningStatus: data.VersioningEnabled,
|
||||
MFADeleteStatus: data.MFADeleteEnabled,
|
||||
}}
|
||||
|
||||
err := cache.PutSettings(key, settings)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -20,6 +20,9 @@ const (
|
|||
VersioningUnversioned = "Unversioned"
|
||||
VersioningEnabled = "Enabled"
|
||||
VersioningSuspended = "Suspended"
|
||||
|
||||
MFADeleteDisabled = "Disabled"
|
||||
MFADeleteEnabled = "Enabled"
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -55,12 +58,19 @@ type (
|
|||
|
||||
// BucketSettings stores settings such as versioning.
|
||||
BucketSettings struct {
|
||||
Versioning string
|
||||
Versioning Versioning
|
||||
LockConfiguration *ObjectLockConfiguration
|
||||
CannedACL string
|
||||
OwnerKey *keys.PublicKey
|
||||
}
|
||||
|
||||
// Versioning stores bucket versioning settings.
|
||||
Versioning struct {
|
||||
VersioningStatus string
|
||||
MFADeleteStatus string
|
||||
MFASerialNumber string
|
||||
}
|
||||
|
||||
// CORSConfiguration stores CORS configuration of a request.
|
||||
CORSConfiguration struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CORSConfiguration" json:"-"`
|
||||
|
@ -130,15 +140,19 @@ func (o *ObjectInfo) ETag(md5Enabled bool) string {
|
|||
}
|
||||
|
||||
func (b BucketSettings) Unversioned() bool {
|
||||
return b.Versioning == VersioningUnversioned
|
||||
return b.Versioning.VersioningStatus == VersioningUnversioned
|
||||
}
|
||||
|
||||
func (b BucketSettings) VersioningEnabled() bool {
|
||||
return b.Versioning == VersioningEnabled
|
||||
return b.Versioning.VersioningStatus == VersioningEnabled
|
||||
}
|
||||
|
||||
func (b BucketSettings) VersioningSuspended() bool {
|
||||
return b.Versioning == VersioningSuspended
|
||||
return b.Versioning.VersioningStatus == VersioningSuspended
|
||||
}
|
||||
|
||||
func (b BucketSettings) MFADeleteEnabled() bool {
|
||||
return b.Versioning.MFADeleteStatus == MFADeleteEnabled
|
||||
}
|
||||
|
||||
func Quote(val string) string {
|
||||
|
|
|
@ -109,6 +109,7 @@ type MultipartInfo struct {
|
|||
Owner user.ID
|
||||
Created time.Time
|
||||
Meta map[string]string
|
||||
CopiesNumbers []uint32
|
||||
Finished bool
|
||||
CreationEpoch uint64
|
||||
}
|
||||
|
|
|
@ -143,6 +143,7 @@ const (
|
|||
ErrInvalidTagDirective
|
||||
// Add new error codes here.
|
||||
ErrNotSupported
|
||||
ErrMFAAuthNeeded
|
||||
|
||||
// SSE-S3 related API errors.
|
||||
ErrInvalidEncryptionMethod
|
||||
|
@ -287,7 +288,7 @@ const (
|
|||
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
|
||||
//CORS configuration errors.
|
||||
// CORS configuration errors.
|
||||
ErrCORSUnsupportedMethod
|
||||
ErrCORSWildcardExposeHeaders
|
||||
|
||||
|
@ -388,6 +389,12 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Access Denied.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrMFAAuthNeeded: {
|
||||
ErrCode: ErrMFAAuthNeeded,
|
||||
Code: "AccessDenied",
|
||||
Description: "Mfa Authentication must be used for this request",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAccessControlListNotSupported: {
|
||||
ErrCode: ErrAccessControlListNotSupported,
|
||||
Code: "AccessControlListNotSupported",
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/mfa"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
|
@ -24,6 +25,7 @@ type (
|
|||
cfg Config
|
||||
ape APE
|
||||
frostfsid FrostFSID
|
||||
mfa *mfa.MFA
|
||||
}
|
||||
|
||||
// Config contains data which handler needs to keep.
|
||||
|
@ -68,7 +70,7 @@ const (
|
|||
var _ api.Handler = (*handler)(nil)
|
||||
|
||||
// New creates new api.Handler using given logger and client.
|
||||
func New(log *zap.Logger, obj *layer.Layer, cfg Config, storage APE, ffsid FrostFSID) (api.Handler, error) {
|
||||
func New(log *zap.Logger, obj *layer.Layer, cfg Config, storage APE, ffsid FrostFSID, mfaCli *mfa.MFA) (api.Handler, error) {
|
||||
switch {
|
||||
case obj == nil:
|
||||
return nil, errors.New("empty FrostFS Object Layer")
|
||||
|
@ -86,6 +88,7 @@ func New(log *zap.Logger, obj *layer.Layer, cfg Config, storage APE, ffsid Frost
|
|||
cfg: cfg,
|
||||
ape: storage,
|
||||
frostfsid: ffsid,
|
||||
mfa: mfaCli,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"github.com/pquerna/otp/totp"
|
||||
)
|
||||
|
||||
// limitation of AWS https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
|
||||
|
@ -81,6 +82,26 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if len(versionID) > 0 && bktSettings.MFADeleteEnabled() {
|
||||
serialNumber, token, err := h.getMFAHeader(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not get mfa header", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
device, err := h.mfa.GetMFADevice(ctx, reqInfo.Namespace, nameFromArn(serialNumber))
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not get mfa device", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
validate := totp.Validate(token, device.Key.Secret())
|
||||
if !validate {
|
||||
h.logAndSendError(ctx, w, "could not validate token", reqInfo, fmt.Errorf("mfa Authentication must be used for this request"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
|
||||
|
@ -188,6 +209,26 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
if haveVersionedObjects(requested.Objects) && bktSettings.MFADeleteEnabled() {
|
||||
serialNumber, token, err := h.getMFAHeader(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not get mfa header", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
device, err := h.mfa.GetMFADevice(ctx, reqInfo.Namespace, nameFromArn(serialNumber))
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not get mfa device", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
validate := totp.Validate(token, device.Key.Secret())
|
||||
if !validate {
|
||||
h.logAndSendError(ctx, w, "could not validate token", reqInfo, fmt.Errorf("mfa Authentication must be used for this request"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
|
||||
|
@ -287,3 +328,12 @@ func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func haveVersionedObjects(objects []ObjectIdentifier) bool {
|
||||
for _, o := range objects {
|
||||
if len(o.VersionID) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -437,7 +437,10 @@ func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.Obj
|
|||
sp := &layer.PutSettingsParams{
|
||||
BktInfo: bktInfo,
|
||||
Settings: &data.BucketSettings{
|
||||
Versioning: data.VersioningEnabled,
|
||||
Versioning: data.Versioning{
|
||||
VersioningStatus: data.VersioningEnabled,
|
||||
MFADeleteStatus: data.MFADeleteDisabled,
|
||||
},
|
||||
LockConfiguration: conf,
|
||||
OwnerKey: key.PublicKey(),
|
||||
},
|
||||
|
|
|
@ -152,6 +152,12 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
p.Header[api.ContentLanguage] = contentLanguage
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.obj.CreateMultipartUpload(ctx, p); err != nil {
|
||||
h.logAndSendError(ctx, w, "could create multipart upload", reqInfo, err, additional...)
|
||||
return
|
||||
|
@ -223,12 +229,6 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
hash, err := h.obj.UploadPart(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not upload a part", reqInfo, err, additional...)
|
||||
|
@ -354,12 +354,6 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
info, err := h.obj.UploadPartCopy(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "could not upload part copy", reqInfo, err, additional...)
|
||||
|
@ -422,12 +416,6 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
Parts: reqBody.Parts,
|
||||
}
|
||||
|
||||
c.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
// Start complete multipart upload which may take some time to fetch object
|
||||
// and re-upload it part by part.
|
||||
objInfo, err := h.completeMultipartUpload(r, c, bktInfo)
|
||||
|
|
|
@ -81,26 +81,6 @@ func TestDeleteMultipartAllParts(t *testing.T) {
|
|||
require.Empty(t, hc.tp.Objects())
|
||||
}
|
||||
|
||||
func TestMultipartCopiesNumber(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket", "object"
|
||||
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
copies := []uint32{2, 0}
|
||||
|
||||
hc.config.copiesNumbers = map[string][]uint32{"default": copies}
|
||||
|
||||
multipartInfo := createMultipartUpload(hc, bktName, objName, nil)
|
||||
uploadPart(hc, bktName, objName, multipartInfo.UploadID, 1, layer.UploadMinSize)
|
||||
|
||||
objs := hc.tp.Objects()
|
||||
require.Len(t, objs, 1)
|
||||
|
||||
require.EqualValues(t, copies, hc.tp.CopiesNumbers(addrFromObject(objs[0]).EncodeToString()))
|
||||
}
|
||||
|
||||
func TestSpecialMultipartName(t *testing.T) {
|
||||
hc := prepareHandlerContextWithMinCache(t)
|
||||
|
||||
|
@ -812,14 +792,3 @@ func listPartsBase(hc *handlerContext, bktName, objName string, encrypted bool,
|
|||
|
||||
return listPartsResponse
|
||||
}
|
||||
|
||||
func addrFromObject(obj *object.Object) oid.Address {
|
||||
var addr oid.Address
|
||||
cnrID, _ := obj.ContainerID()
|
||||
objID, _ := obj.ID()
|
||||
|
||||
addr.SetContainer(cnrID)
|
||||
addr.SetObject(objID)
|
||||
|
||||
return addr
|
||||
}
|
||||
|
|
|
@ -311,23 +311,10 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
type BodyReader interface {
|
||||
io.ReadCloser
|
||||
TrailerHeaders() map[string]string
|
||||
}
|
||||
|
||||
type noTrailerBodyReader struct {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *noTrailerBodyReader) TrailerHeaders() map[string]string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *handler) getBodyReader(r *http.Request) (BodyReader, error) {
|
||||
func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
|
||||
shaType, streaming := api.IsSignedStreamingV4(r)
|
||||
if !streaming {
|
||||
return &noTrailerBodyReader{r.Body}, nil
|
||||
return r.Body, nil
|
||||
}
|
||||
|
||||
encodings := r.Header.Values(api.ContentEncoding)
|
||||
|
@ -363,15 +350,12 @@ func (h *handler) getBodyReader(r *http.Request) (BodyReader, error) {
|
|||
|
||||
var (
|
||||
err error
|
||||
chunkReader BodyReader
|
||||
chunkReader io.ReadCloser
|
||||
)
|
||||
switch shaType {
|
||||
case api.StreamingContentSHA256, api.StreamingContentSHA256Trailer:
|
||||
chunkReader, err = newSignV4ChunkedReader(r)
|
||||
case api.StreamingContentV4aSHA256, api.StreamingContentV4aSHA256Trailer:
|
||||
if shaType == api.StreamingContentV4aSHA256 {
|
||||
chunkReader, err = newSignV4aChunkedReader(r)
|
||||
default:
|
||||
chunkReader, err = newUnsignedChunkedReader(r.Body)
|
||||
} else {
|
||||
chunkReader, err = newSignV4ChunkedReader(r)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -840,14 +824,17 @@ func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Reque
|
|||
sp := &layer.PutSettingsParams{
|
||||
BktInfo: bktInfo,
|
||||
Settings: &data.BucketSettings{
|
||||
CannedACL: cannedACL,
|
||||
OwnerKey: key,
|
||||
Versioning: data.VersioningUnversioned,
|
||||
CannedACL: cannedACL,
|
||||
OwnerKey: key,
|
||||
Versioning: data.Versioning{
|
||||
VersioningStatus: data.VersioningUnversioned,
|
||||
MFADeleteStatus: data.MFADeleteDisabled,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if p.ObjectLockEnabled {
|
||||
sp.Settings.Versioning = data.VersioningEnabled
|
||||
sp.Settings.Versioning.VersioningStatus = data.VersioningEnabled
|
||||
}
|
||||
|
||||
err = retryer.MakeWithRetry(ctx, func() error {
|
||||
|
|
|
@ -377,77 +377,6 @@ func TestPutObjectCheckContentSHA256(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamUnsignedBodySmall(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "test2", "tmp.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
w, req, chunk := getChunkedRequestUnsignedTrailingSmall(hc.context, t, bktName, objName)
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, req = prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, "5", w.Header().Get(api.ContentLength))
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, 5)
|
||||
for i := range chunk {
|
||||
require.Equal(t, chunk[i], data[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamUnsignedBody(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "examplebucket", "chunkObject.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
w, req, chunk := getChunkedRequestUnsignedTrailing(hc.context, t, bktName, objName)
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, req = prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, strconv.Itoa(awsChunkedRequestExampleDecodedContentLength), w.Header().Get(api.ContentLength))
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, awsChunkedRequestExampleDecodedContentLength)
|
||||
for i := range chunk {
|
||||
require.Equal(t, chunk[i], data[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyAWSExampleTrailing(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "examplebucket", "chunkObject.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
t.Run("valid trailer signature", func(t *testing.T) {
|
||||
w, req, chunk := getChunkedRequestTrailing(hc.context, t, bktName, objName)
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, req = prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, strconv.Itoa(awsChunkedRequestExampleDecodedContentLength), w.Header().Get(api.ContentLength))
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, awsChunkedRequestExampleDecodedContentLength)
|
||||
equalDataSlices(t, chunk, data)
|
||||
})
|
||||
|
||||
t.Run("invalid trailer signature", func(t *testing.T) {
|
||||
w, req, _ := getChunkedRequestTrailing(hc.context, t, bktName, objName)
|
||||
body := req.Body.(*customNopCloser)
|
||||
body.Bytes()[body.Len()-2] = 'a'
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusForbidden)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyAWSExample(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -547,9 +476,9 @@ func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName strin
|
|||
|
||||
req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("content-encoding", api.AwsChunked)
|
||||
req.Header.Set("content-encoding", "aws-chunked")
|
||||
req.Header.Set("content-length", strconv.Itoa(awsChunkedRequestExampleContentLength))
|
||||
req.Header.Set("x-amz-content-sha256", api.StreamingContentSHA256)
|
||||
req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
|
||||
req.Header.Set("x-amz-decoded-content-length", strconv.Itoa(awsChunkedRequestExampleDecodedContentLength))
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
|
@ -581,202 +510,6 @@ func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName strin
|
|||
return w, req, chunk
|
||||
}
|
||||
|
||||
type customNopCloser struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (c *customNopCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getChunkedRequestTrailing implements request example from
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html
|
||||
func getChunkedRequestTrailing(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
|
||||
chunk := make([]byte, 65*1024)
|
||||
for i := range chunk {
|
||||
chunk[i] = 'a'
|
||||
}
|
||||
chunk1 := chunk[:64*1024]
|
||||
chunk2 := chunk[64*1024:]
|
||||
|
||||
AWSAccessKeyID := "AKIAIOSFODNN7EXAMPLE"
|
||||
AWSSecretAccessKey := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
|
||||
awsCreds := aws.Credentials{AccessKeyID: AWSAccessKeyID, SecretAccessKey: AWSSecretAccessKey}
|
||||
signer := v4.NewSigner()
|
||||
|
||||
reqBody := bytes.NewBufferString("10000;chunk-signature=b474d8862b1487a5145d686f57f013e54db672cee1c953b3010fb58501ef5aa2\r\n")
|
||||
_, err := reqBody.Write(chunk1)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n400;chunk-signature=1c1344b170168f8e65b41376b44b20fe354e373826ccbbe2c1d40a8cae51e5c7\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.Write(chunk2)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0;chunk-signature=2ca2aba2005185cf7159c6277faf83795951dd77a3a99e6e65d5c9f85863f992\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("x-amz-checksum-crc32c:sOO8/Q==\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
// original signature is 63bddb248ad2590c92712055f51b8e78ab024eead08276b24f010b0efd74843f,
|
||||
// but we use d81f82fc3505edab99d459891051a732e8730629a2e4a59689829ca17fe2e435
|
||||
// because original signature is incorrect
|
||||
// it was calculated using the`AWS4-HMAC-SHA256-PAYLOAD` constant in canonical string instead of
|
||||
// `AWS4-HMAC-SHA256-TRAILER` that actually must be used by spec
|
||||
// (java sdk use correct `AWS4-HMAC-SHA256-TRAILER` string).
|
||||
_, err = reqBody.WriteString("x-amz-trailer-signature:d81f82fc3505edab99d459891051a732e8730629a2e4a59689829ca17fe2e435")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("content-encoding", api.AwsChunked)
|
||||
req.Header.Set("content-length", strconv.Itoa(awsChunkedRequestExampleContentLength))
|
||||
req.Header.Set("x-amz-content-sha256", api.StreamingContentSHA256Trailer)
|
||||
req.Header.Set("x-amz-decoded-content-length", strconv.Itoa(awsChunkedRequestExampleDecodedContentLength))
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32c")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20130524T000000Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = signer.SignHTTP(ctx, awsCreds, req, api.StreamingContentSHA256Trailer, "s3", "us-east-1", signTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Body = &customNopCloser{Buffer: reqBody}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetBox(req.Context(), &middleware.Box{
|
||||
ClientTime: signTime,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "106e2a8a18243abcf37539882f36619c00e2dfc72633413f02d3b74544bfeb8e",
|
||||
Region: "us-east-1",
|
||||
},
|
||||
AccessBox: &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: AWSSecretAccessKey,
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
return w, req, chunk
|
||||
}
|
||||
|
||||
func getChunkedRequestUnsignedTrailing(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
|
||||
chunk := make([]byte, 65*1024)
|
||||
for i := range chunk {
|
||||
chunk[i] = 'a'
|
||||
}
|
||||
//chunk1 := chunk[:64*1024]
|
||||
//chunk2 := chunk[64*1024:]
|
||||
|
||||
AWSAccessKeyID := "9uEm8zMrGWsEDWiPCnVuQLKTiGtCEXpYXt8eBG7agupw0JDySJZMFuej7PTcPzRqBUyPtFowNu1RtvHULU8XHjie6"
|
||||
AWSSecretAccessKey := "9f546428957ed7e189b7be928906ce7d1d9cb3042dd4d2d5194e28ce8c4c3b8e"
|
||||
|
||||
awsCreds := aws.Credentials{AccessKeyID: AWSAccessKeyID, SecretAccessKey: AWSSecretAccessKey}
|
||||
signer := v4.NewSigner()
|
||||
|
||||
reqBody := bytes.NewBufferString("10400\r\n")
|
||||
_, err := reqBody.Write(chunk)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\nx-amz-checksum-crc64nvme:pRf+emrnL+A=\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/"+bktName+"/"+objName, nil)
|
||||
//req, err := http.NewRequest("PUT", "https://localhost:8184/test2/body", nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("x-amz-sdk-checksum-algorithm", "CRC64NVME")
|
||||
req.Header.Set("content-encoding", api.AwsChunked)
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc64nvme")
|
||||
req.Header.Set("x-amz-content-sha256", api.StreamingUnsignedPayloadTrailer)
|
||||
req.Header.Set("x-amz-decoded-content-length", strconv.Itoa(awsChunkedRequestExampleDecodedContentLength))
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20250131T140527Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = signer.SignHTTP(ctx, awsCreds, req, api.StreamingContentSHA256Trailer, "s3", "ru", signTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Body = io.NopCloser(reqBody)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetBox(req.Context(), &middleware.Box{
|
||||
ClientTime: signTime,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "a075c83779d1c3c02254fbe4c9eff0a21556d15556fc6a25db69147c4838226b",
|
||||
Region: "ru",
|
||||
},
|
||||
AccessBox: &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: AWSSecretAccessKey,
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
return w, req, chunk
|
||||
}
|
||||
|
||||
func getChunkedRequestUnsignedTrailingSmall(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
|
||||
AWSAccessKeyID := "9uEm8zMrGWsEDWiPCnVuQLKTiGtCEXpYXt8eBG7agupw0JDySJZMFuej7PTcPzRqBUyPtFowNu1RtvHULU8XHjie6"
|
||||
AWSSecretAccessKey := "9f546428957ed7e189b7be928906ce7d1d9cb3042dd4d2d5194e28ce8c4c3b8e"
|
||||
|
||||
awsCreds := aws.Credentials{AccessKeyID: AWSAccessKeyID, SecretAccessKey: AWSSecretAccessKey}
|
||||
signer := v4.NewSigner()
|
||||
|
||||
chunk := "tmp2\n"
|
||||
|
||||
reqBody := bytes.NewBufferString("5\r\n")
|
||||
_, err := reqBody.WriteString(chunk)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("x-amz-checksum-crc64nvme:q1EYl4rI0TU=\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("x-amz-sdk-checksum-algorithm", "CRC64NVME")
|
||||
req.Header.Set("content-encoding", api.AwsChunked)
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc64nvme")
|
||||
req.Header.Set("x-amz-content-sha256", api.StreamingUnsignedPayloadTrailer)
|
||||
req.Header.Set("x-amz-decoded-content-length", "5")
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20250203T063745Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = signer.SignHTTP(ctx, awsCreds, req, api.StreamingContentSHA256Trailer, "s3", "ru", signTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Body = io.NopCloser(reqBody)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetBox(req.Context(), &middleware.Box{
|
||||
ClientTime: signTime,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "a075c83779d1c3c02254fbe4c9eff0a21556d15556fc6a25db69147c4838226b",
|
||||
Region: "ru",
|
||||
},
|
||||
AccessBox: &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: AWSSecretAccessKey,
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
return w, req, []byte(chunk)
|
||||
}
|
||||
|
||||
func getEmptyChunkedRequest(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request) {
|
||||
AWSAccessKeyID := "48c1K4PLVb7SvmV3PjDKEuXaMh8yZMXZ8Wx9msrkKcYw06dZeaxeiPe8vyFm2WsoeVaNt7UWEjNsVkagDs8oX4XXh"
|
||||
AWSSecretAccessKey := "09260955b4eb0279dc017ba20a1ddac909cbd226c86cbb2d868e55534c8e64b0"
|
||||
|
|
|
@ -8,8 +8,6 @@ import (
|
|||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
|
||||
|
@ -29,19 +27,16 @@ type (
|
|||
reader *bufio.Reader
|
||||
streamSigner *v4.StreamSigner
|
||||
|
||||
trailerHeaders []string
|
||||
trailers map[string]string
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
errGiantChunk = errors.New("chunk too big: choose chunk size <= 16MiB")
|
||||
errMalformedChunkedEncoding = errors.New("malformed chunked encoding")
|
||||
errMalformedTrailerHeaders = errors.New("malformed trailer headers")
|
||||
)
|
||||
|
||||
func (c *s3ChunkReader) Close() (err error) {
|
||||
|
@ -112,9 +107,29 @@ func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
|
|||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\r' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if cap(c.buffer) < size {
|
||||
|
@ -132,6 +147,23 @@ func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
|
|||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if b != '\r' || err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// Once we have read the entire chunk successfully, we verify
|
||||
// that the received signature matches our computed signature.
|
||||
|
@ -149,99 +181,16 @@ func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
|
|||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
if len(c.trailerHeaders) != 0 {
|
||||
if err = c.readTrailers(); err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
} else if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
c.err = io.EOF
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
c.offset = copy(buf, c.buffer)
|
||||
num += c.offset
|
||||
return num, err
|
||||
}
|
||||
|
||||
func (c *s3ChunkReader) readCRLF() error {
|
||||
for _, ch := range [2]byte{'\r', '\n'} {
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return c.err
|
||||
}
|
||||
if b != ch {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return c.err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3ChunkReader) readTrailers() error {
|
||||
var k, v []byte
|
||||
var err error
|
||||
for err == nil {
|
||||
k, err = c.reader.ReadBytes(':')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
v, err = c.reader.ReadBytes('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
if len(v) >= 2 && v[len(v)-2] == '\r' {
|
||||
v[len(v)-2] = '\n'
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
|
||||
switch {
|
||||
case slices.Contains(c.trailerHeaders, string(k[:len(k)-1])):
|
||||
c.buffer = append(append(c.buffer, k...), v...) // todo use copy
|
||||
case string(k) == "x-amz-trailer-signature:":
|
||||
calculatedSignature, err := c.streamSigner.GetTrailerSignature(c.buffer, c.requestTime)
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return c.err
|
||||
}
|
||||
if string(v[:64]) != hex.EncodeToString(calculatedSignature) {
|
||||
c.err = errs.GetAPIError(errs.ErrSignatureDoesNotMatch)
|
||||
return c.err
|
||||
}
|
||||
default:
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
|
||||
c.trailers[string(k[:len(k)-1])] = string(v[:len(v)-1])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3ChunkReader) TrailerHeaders() map[string]string {
|
||||
return c.trailers
|
||||
}
|
||||
|
||||
func newSignV4ChunkedReader(req *http.Request) (*s3ChunkReader, error) {
|
||||
func newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, error) {
|
||||
ctx := req.Context()
|
||||
box, err := middleware.GetBoxData(ctx)
|
||||
if err != nil {
|
||||
|
@ -265,19 +214,11 @@ func newSignV4ChunkedReader(req *http.Request) (*s3ChunkReader, error) {
|
|||
}
|
||||
newStreamSigner := v4.NewStreamSigner(currentCredentials, "s3", authHeaders.Region, seed)
|
||||
|
||||
var trailerHeaders []string
|
||||
trailer := req.Header.Get("x-amz-trailer")
|
||||
if trailer != "" {
|
||||
trailerHeaders = strings.Split(trailer, ";")
|
||||
}
|
||||
|
||||
return &s3ChunkReader{
|
||||
ctx: ctx,
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
trailerHeaders: trailerHeaders,
|
||||
trailers: make(map[string]string, len(trailerHeaders)),
|
||||
ctx: ctx,
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -2,12 +2,8 @@ package handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -16,102 +12,22 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSigV4AChunkedReader(t *testing.T) {
|
||||
t.Run("with trailers", func(t *testing.T) {
|
||||
accessKeyID := "9uEm8zMrGWsEDWiPCnVuQLKTiGtCEXpYXt8eBG7agupw0JDySJZMFuej7PTcPzRqBUyPtFowNu1RtvHULU8XHjie6"
|
||||
secretKey := "9f546428957ed7e189b7be928906ce7d1d9cb3042dd4d2d5194e28ce8c4c3b8e"
|
||||
func TestSigV4AStreaming(t *testing.T) {
|
||||
accessKeyID := "2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1"
|
||||
secretKey := "00637f53f842573aaa06c2164c598973cd986880987111416cf71f1619def537"
|
||||
|
||||
chunk1 := "Testing with the {sdk-java}"
|
||||
body := "1b;chunk-signature=3045022100956ca03d2166100b455b532de542892f73925fbcea2f6498674a39a61bb4860902202977c1d47aea548d434540f89640ce97e605d18353cbbd75a619874f02e3dd22**\r\n" +
|
||||
chunk1 +
|
||||
"\r\n0;chunk-signature=304502210097dcc1721675469910ef8712fc2af0678eb90c12216dd6228c6b621fb6f805a0022047d27d21ae2af8a8172f2ef83c81ce9d4746aa88fc9ee0ca783eaa5e71aaef6c**\r\n" +
|
||||
"x-amz-checksum-crc32:Np6zMg==\r\n" +
|
||||
"x-amz-trailer-signature:304502200ecacd9aa2c432af5a2327c22a2ff9b32f44ab8559de00309219aef105eaaac102210092cbc0e78c4bcd56490a73da8ceed1934be80f3affeffb14d8c743fc292dda4f**\r\n\r\n"
|
||||
chunk1 := "Testing with the {sdk-java}"
|
||||
reqBody := bytes.NewBufferString("1b;chunk-signature=3045022100b63692a1b20759bdabd342011823427a8952df75c93174d98ad043abca8052e002201695228a91ba986171b8d0ad20856d3d94ca3614d0a90a50a531ba8e52447b9b**\r\n")
|
||||
_, err := reqBody.WriteString(chunk1)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0;chunk-signature=30440220455885a2d4e9f705256ca6b0a5a22f7f784780ccbd1c0a371e5db3059c91745b022073259dd44746cbd63261d628a04d25be5a32a974c077c5c2d83c8157fb323b9f****\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
reqBody := bytes.NewBufferString(body)
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/test2/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32")
|
||||
req, err := http.NewRequest("PUT", "http://localhost:8084/test/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
signature := "3045022100ddbc6ab11785d7f23d299de7db97379116f543377a44e38170a4e43b38b0d62b02201d8dca13c67f04f45491345152db4b704768eb8bb89b5215fd59bb4a4d9d7b61"
|
||||
signingTime, err := time.Parse("20060102T150405Z", "20250203T144621Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
accessBox, err := newTestAccessBox(key)
|
||||
require.NoError(t, err)
|
||||
accessBox.Gate.SecretKey = secretKey
|
||||
|
||||
ctx := middleware.SetBox(req.Context(), &middleware.Box{
|
||||
AccessBox: accessBox,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: accessKeyID,
|
||||
SignatureV4: signature,
|
||||
},
|
||||
ClientTime: signingTime,
|
||||
})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
r, err := newSignV4aChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chunk1, string(data))
|
||||
})
|
||||
|
||||
t.Run("without trailers", func(t *testing.T) {
|
||||
accessKeyID := "2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1"
|
||||
secretKey := "00637f53f842573aaa06c2164c598973cd986880987111416cf71f1619def537"
|
||||
|
||||
chunk1 := "Testing with the {sdk-java}"
|
||||
reqBody := bytes.NewBufferString("1b;chunk-signature=3045022100b63692a1b20759bdabd342011823427a8952df75c93174d98ad043abca8052e002201695228a91ba986171b8d0ad20856d3d94ca3614d0a90a50a531ba8e52447b9b**\r\n")
|
||||
_, err := reqBody.WriteString(chunk1)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0;chunk-signature=30440220455885a2d4e9f705256ca6b0a5a22f7f784780ccbd1c0a371e5db3059c91745b022073259dd44746cbd63261d628a04d25be5a32a974c077c5c2d83c8157fb323b9f****\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "http://localhost:8084/test/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
signature := "30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7"
|
||||
signingTime, err := time.Parse("20060102T150405Z", "20240904T133253Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
accessBox, err := newTestAccessBox(key)
|
||||
require.NoError(t, err)
|
||||
accessBox.Gate.SecretKey = secretKey
|
||||
|
||||
ctx := middleware.SetBox(req.Context(), &middleware.Box{
|
||||
AccessBox: accessBox,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: accessKeyID,
|
||||
SignatureV4: signature,
|
||||
},
|
||||
ClientTime: signingTime,
|
||||
})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
r, err := newSignV4aChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chunk1, string(data))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSigV4ChunkedReader(t *testing.T) {
|
||||
accessKeyID := "9uEm8zMrGWsEDWiPCnVuQLKTiGtCEXpYXt8eBG7agupw0JDySJZMFuej7PTcPzRqBUyPtFowNu1RtvHULU8XHjie6"
|
||||
secretKey := "9f546428957ed7e189b7be928906ce7d1d9cb3042dd4d2d5194e28ce8c4c3b8e"
|
||||
|
||||
signature := "b740b3b2a08c541c3fc4bd155a448e25408b509a29af98a86356b894930b93e8"
|
||||
signingTime, err := time.Parse("20060102T150405Z", "20250203T134442Z")
|
||||
signature := "30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7"
|
||||
signingTime, err := time.Parse("20060102T150405Z", "20240904T133253Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
|
@ -121,117 +37,21 @@ func TestSigV4ChunkedReader(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
accessBox.Gate.SecretKey = secretKey
|
||||
|
||||
setBoxFn := func(ctx context.Context) context.Context {
|
||||
return middleware.SetBox(ctx, &middleware.Box{
|
||||
AccessBox: accessBox,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: accessKeyID,
|
||||
SignatureV4: signature,
|
||||
Region: "us-east-1",
|
||||
},
|
||||
ClientTime: signingTime,
|
||||
})
|
||||
}
|
||||
|
||||
chunk1 := "Testing with the {sdk-java}"
|
||||
|
||||
t.Run("with trailers", func(t *testing.T) {
|
||||
body := "1b;chunk-signature=a6a9be5fff05db0b542aedb2203d892b4162250885d06b1422b173ee0ea92ba5\r\n" +
|
||||
chunk1 +
|
||||
"\r\n0;chunk-signature=31afd083a57c416c46afaf101649d7f0c6c0627cfa60c0f93d1f7ea84396ee42\r\n" +
|
||||
"x-amz-checksum-crc32:Np6zMg==\r\n" +
|
||||
"x-amz-trailer-signature:40ec0046ac730fa27a1451d00d849056c49553ee753f5d158306d05671a42125\r\n\r\n"
|
||||
|
||||
reqBody := bytes.NewBufferString(body)
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/test2/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32")
|
||||
|
||||
req = req.WithContext(setBoxFn(req.Context()))
|
||||
|
||||
r, err := newSignV4ChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chunk1, string(data))
|
||||
ctx := middleware.SetBox(req.Context(), &middleware.Box{
|
||||
AccessBox: accessBox,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: accessKeyID,
|
||||
SignatureV4: signature,
|
||||
},
|
||||
ClientTime: signingTime,
|
||||
})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
t.Run("without trailers", func(t *testing.T) {
|
||||
body := "1b;chunk-signature=a6a9be5fff05db0b542aedb2203d892b4162250885d06b1422b173ee0ea92ba5\r\n" +
|
||||
chunk1 +
|
||||
"\r\n0;chunk-signature=31afd083a57c416c46afaf101649d7f0c6c0627cfa60c0f93d1f7ea84396ee42\r\n\r\n"
|
||||
reqBody := bytes.NewBufferString(body)
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/test2/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
req = req.WithContext(setBoxFn(req.Context()))
|
||||
|
||||
r, err := newSignV4ChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chunk1, string(data))
|
||||
})
|
||||
}
|
||||
|
||||
func TestUnsignedChunkReader(t *testing.T) {
|
||||
chunk1 := "chunk1"
|
||||
chunk2 := "chunk2"
|
||||
|
||||
t.Run("with trailer", func(t *testing.T) {
|
||||
chunks := []string{chunk1, chunk2}
|
||||
trailer := map[string]string{"x-amz-checksum-crc64nvme": "q1EYl4rI0TU="}
|
||||
body, expected := getChunkedBody(t, chunks, trailer)
|
||||
|
||||
r, err := newUnsignedChunkedReader(body)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, string(data))
|
||||
|
||||
require.EqualValues(t, trailer, r.TrailerHeaders())
|
||||
})
|
||||
|
||||
t.Run("without trailer", func(t *testing.T) {
|
||||
chunks := []string{chunk1, chunk2}
|
||||
body, expected := getChunkedBody(t, chunks, nil)
|
||||
|
||||
r, err := newUnsignedChunkedReader(body)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, string(data))
|
||||
})
|
||||
}
|
||||
|
||||
func getChunkedBody(t *testing.T, chunks []string, trailers map[string]string) (*bytes.Buffer, string) {
|
||||
res := bytes.NewBufferString("")
|
||||
|
||||
for i, chunk := range chunks {
|
||||
meta := strconv.FormatInt(int64(len(chunk)), 16) + "\r\n"
|
||||
if i != 0 {
|
||||
meta = "\r\n" + meta
|
||||
}
|
||||
_, err := res.WriteString(meta)
|
||||
require.NoError(t, err)
|
||||
_, err = res.WriteString(chunk)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
_, err := res.WriteString("\r\n0\r\n")
|
||||
r, err := newSignV4aChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
for k, v := range trailers {
|
||||
_, err := res.WriteString(fmt.Sprintf("%s:%s\n", k, v))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
_, err = res.WriteString("\r\n")
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
|
||||
return res, strings.Join(chunks, "")
|
||||
require.Equal(t, chunk1, string(data))
|
||||
}
|
||||
|
|
|
@ -1,161 +0,0 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
)
|
||||
|
||||
type (
|
||||
s3UnsignedChunkReader struct {
|
||||
reader *bufio.Reader
|
||||
|
||||
trailers map[string]string
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
func (c *s3UnsignedChunkReader) Close() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3UnsignedChunkReader) Read(buf []byte) (num int, err error) {
|
||||
if c.offset > 0 {
|
||||
num = copy(buf, c.buffer[c.offset:])
|
||||
if num == len(buf) {
|
||||
c.offset += num
|
||||
return num, nil
|
||||
}
|
||||
c.offset = 0
|
||||
buf = buf[num:]
|
||||
}
|
||||
|
||||
var size int
|
||||
var b byte
|
||||
for {
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b == '\r' {
|
||||
break
|
||||
}
|
||||
|
||||
// Manually deserialize the size since AWS specified
|
||||
// the chunk size to be of variable width. In particular,
|
||||
// a size of 16 is encoded as `10` while a size of 64 KB
|
||||
// is `10000`.
|
||||
switch {
|
||||
case b >= '0' && b <= '9':
|
||||
size = size<<4 | int(b-'0')
|
||||
case b >= 'a' && b <= 'f':
|
||||
size = size<<4 | int(b-('a'-10))
|
||||
case b >= 'A' && b <= 'F':
|
||||
size = size<<4 | int(b-('A'-10))
|
||||
default:
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
if size > maxChunkSize {
|
||||
c.err = errGiantChunk
|
||||
return num, c.err
|
||||
}
|
||||
}
|
||||
|
||||
if b != '\r' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if cap(c.buffer) < size {
|
||||
c.buffer = make([]byte, size)
|
||||
} else {
|
||||
c.buffer = c.buffer[:size]
|
||||
}
|
||||
|
||||
// Now, we read the payload and compute its SHA-256 hash.
|
||||
_, err = io.ReadFull(c.reader, c.buffer)
|
||||
if err == io.EOF && size != 0 {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
var k, v string
|
||||
for err == nil {
|
||||
k, err = c.reader.ReadString(':')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return num, c.err
|
||||
}
|
||||
v, err = c.reader.ReadString('\n')
|
||||
if err != nil {
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return num, c.err
|
||||
}
|
||||
c.trailers[k[:len(k)-1]] = v[:len(v)-1]
|
||||
}
|
||||
|
||||
c.err = io.EOF
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
b, err = c.reader.ReadByte()
|
||||
if b != '\r' || err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
c.offset = copy(buf, c.buffer)
|
||||
num += c.offset
|
||||
return num, err
|
||||
}
|
||||
|
||||
func (c *s3UnsignedChunkReader) TrailerHeaders() map[string]string {
|
||||
return c.trailers
|
||||
}
|
||||
|
||||
func newUnsignedChunkedReader(body io.Reader) (*s3UnsignedChunkReader, error) {
|
||||
return &s3UnsignedChunkReader{
|
||||
reader: bufio.NewReader(body),
|
||||
trailers: map[string]string{},
|
||||
buffer: make([]byte, 64*1024),
|
||||
}, nil
|
||||
}
|
|
@ -7,8 +7,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
||||
|
@ -22,12 +20,10 @@ type (
|
|||
reader *bufio.Reader
|
||||
streamSigner *v4a.StreamSigner
|
||||
|
||||
trailerHeaders []string
|
||||
trailers map[string]string
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -91,9 +87,21 @@ func (c *s3v4aChunkReader) Read(buf []byte) (num int, err error) {
|
|||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
b, err := c.reader.ReadByte()
|
||||
if err != nil {
|
||||
return c.handleErr(num, err)
|
||||
}
|
||||
if b != '\r' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err != nil {
|
||||
return c.handleErr(num, err)
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if cap(c.buffer) < size {
|
||||
|
@ -111,6 +119,19 @@ func (c *s3v4aChunkReader) Read(buf []byte) (num int, err error) {
|
|||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if b != '\r' || err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err != nil {
|
||||
return c.handleErr(num, err)
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// Once we have read the entire chunk successfully, we verify
|
||||
// that the received signature is valid.
|
||||
|
@ -129,23 +150,10 @@ func (c *s3v4aChunkReader) Read(buf []byte) (num int, err error) {
|
|||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
if len(c.trailerHeaders) != 0 {
|
||||
if err = c.readTrailers(); err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
} else if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
c.err = io.EOF
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
c.offset = copy(buf, c.buffer)
|
||||
num += c.offset
|
||||
return num, err
|
||||
|
@ -160,78 +168,7 @@ func (c *s3v4aChunkReader) handleErr(num int, err error) (int, error) {
|
|||
return num, c.err
|
||||
}
|
||||
|
||||
func (c *s3v4aChunkReader) readCRLF() error {
|
||||
for _, ch := range [2]byte{'\r', '\n'} {
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return c.err
|
||||
}
|
||||
if b != ch {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return c.err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3v4aChunkReader) readTrailers() error {
|
||||
var k, v []byte
|
||||
var err error
|
||||
for err == nil {
|
||||
k, err = c.reader.ReadBytes(':')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
v, err = c.reader.ReadBytes('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
if len(v) >= 2 && v[len(v)-2] == '\r' {
|
||||
v[len(v)-2] = '\n'
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
|
||||
switch {
|
||||
case slices.Contains(c.trailerHeaders, string(k[:len(k)-1])):
|
||||
c.buffer = append(append(c.buffer, k...), v...) // todo use copy
|
||||
case string(k) == "x-amz-trailer-signature:":
|
||||
n, err := hex.Decode(v[:], bytes.TrimRight(v[:], "*\n"))
|
||||
if err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return c.err
|
||||
}
|
||||
|
||||
if err = c.streamSigner.VerifyTrailerSignature(c.buffer, c.requestTime, v[:n]); err != nil {
|
||||
c.err = fmt.Errorf("%w: %s", errs.GetAPIError(errs.ErrSignatureDoesNotMatch), err.Error())
|
||||
return c.err
|
||||
}
|
||||
default:
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
|
||||
c.trailers[string(k[:len(k)-1])] = string(v[:len(v)-1])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3v4aChunkReader) TrailerHeaders() map[string]string {
|
||||
return c.trailers
|
||||
}
|
||||
|
||||
func newSignV4aChunkedReader(req *http.Request) (*s3v4aChunkReader, error) {
|
||||
func newSignV4aChunkedReader(req *http.Request) (io.ReadCloser, error) {
|
||||
box, err := middleware.GetBoxData(req.Context())
|
||||
if err != nil {
|
||||
return nil, errs.GetAPIError(errs.ErrAuthorizationHeaderMalformed)
|
||||
|
@ -263,18 +200,10 @@ func newSignV4aChunkedReader(req *http.Request) (*s3v4aChunkReader, error) {
|
|||
|
||||
newStreamSigner := v4a.NewStreamSigner(creds, "s3", seed)
|
||||
|
||||
var trailerHeaders []string
|
||||
trailer := req.Header.Get("x-amz-trailer")
|
||||
if trailer != "" {
|
||||
trailerHeaders = strings.Split(trailer, ";")
|
||||
}
|
||||
|
||||
return &s3v4aChunkReader{
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
trailerHeaders: trailerHeaders,
|
||||
trailers: make(map[string]string, len(trailerHeaders)),
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -137,3 +137,17 @@ func parseRange(s string) (*layer.RangeParams, error) {
|
|||
End: values[1],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func nameFromArn(arn string) string {
|
||||
pts := strings.Split(arn, "/")
|
||||
return pts[len(pts)-1]
|
||||
}
|
||||
|
||||
func (h *handler) getMFAHeader(r *http.Request) (string, string, error) {
|
||||
parts := strings.Split(r.Header.Get(api.AmzMFA), " ")
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("%w: invalid mfa header", apierr.GetAPIError(apierr.ErrMFAAuthNeeded))
|
||||
}
|
||||
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/pquerna/otp/totp"
|
||||
)
|
||||
|
||||
func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -31,14 +32,47 @@ func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
// settings pointer is stored in the cache, so modify a copy of the settings
|
||||
newSettings := *settings
|
||||
|
||||
if len(configuration.MfaDelete) > 0 {
|
||||
serialNumber, token, err := h.getMFAHeader(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "invalid x-amz-mfa header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||
return
|
||||
}
|
||||
|
||||
name := nameFromArn(serialNumber)
|
||||
device, err := h.mfa.GetMFADevice(ctx, reqInfo.Namespace, name)
|
||||
if err != nil {
|
||||
h.logAndSendError(ctx, w, "get mfa device", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
ok := totp.Validate(token, device.Key.Secret())
|
||||
if !ok {
|
||||
h.logAndSendError(ctx, w, "validation error", reqInfo, nil)
|
||||
return
|
||||
}
|
||||
|
||||
switch configuration.MfaDelete {
|
||||
case data.MFADeleteEnabled:
|
||||
newSettings.Versioning.MFADeleteStatus = data.MFADeleteEnabled
|
||||
newSettings.Versioning.MFASerialNumber = serialNumber
|
||||
case data.MFADeleteDisabled:
|
||||
newSettings.Versioning.MFADeleteStatus = data.MFADeleteDisabled
|
||||
default:
|
||||
h.logAndSendError(ctx, w, "failed to get mfa configuration", reqInfo, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if configuration.Status != data.VersioningEnabled && configuration.Status != data.VersioningSuspended {
|
||||
h.logAndSendError(ctx, w, "invalid versioning configuration", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
||||
return
|
||||
}
|
||||
|
||||
// settings pointer is stored in the cache, so modify a copy of the settings
|
||||
newSettings := *settings
|
||||
newSettings.Versioning = configuration.Status
|
||||
newSettings.Versioning.VersioningStatus = configuration.Status
|
||||
|
||||
p := &layer.PutSettingsParams{
|
||||
BktInfo: bktInfo,
|
||||
|
@ -80,7 +114,7 @@ func (h *handler) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
|
|||
func formVersioningConfiguration(settings *data.BucketSettings) *VersioningConfiguration {
|
||||
res := &VersioningConfiguration{}
|
||||
if !settings.Unversioned() {
|
||||
res.Status = settings.Versioning
|
||||
res.Status = settings.Versioning.VersioningStatus
|
||||
}
|
||||
|
||||
return res
|
||||
|
|
|
@ -15,6 +15,7 @@ const (
|
|||
AmzCopySource = "X-Amz-Copy-Source"
|
||||
AmzCopySourceRange = "X-Amz-Copy-Source-Range"
|
||||
AmzDate = "X-Amz-Date"
|
||||
AmzMFA = "X-Amz-Mfa"
|
||||
|
||||
LastModified = "Last-Modified"
|
||||
Date = "Date"
|
||||
|
@ -94,11 +95,8 @@ const (
|
|||
|
||||
DefaultLocationConstraint = "default"
|
||||
|
||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
StreamingContentSHA256Trailer = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
|
||||
StreamingContentV4aSHA256 = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
|
||||
StreamingContentV4aSHA256Trailer = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
|
||||
StreamingUnsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
|
||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
StreamingContentV4aSHA256 = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
|
||||
|
||||
DefaultStorageClass = "STANDARD"
|
||||
)
|
||||
|
@ -132,8 +130,6 @@ var SystemMetadata = map[string]struct{}{
|
|||
func IsSignedStreamingV4(r *http.Request) (string, bool) {
|
||||
shaHeader := r.Header.Get(AmzContentSha256)
|
||||
return shaHeader,
|
||||
(shaHeader == StreamingContentSHA256 || shaHeader == StreamingContentSHA256Trailer ||
|
||||
shaHeader == StreamingContentV4aSHA256 || shaHeader == StreamingContentV4aSHA256Trailer ||
|
||||
shaHeader == StreamingUnsignedPayloadTrailer) &&
|
||||
(shaHeader == StreamingContentSHA256 || shaHeader == StreamingContentV4aSHA256) &&
|
||||
r.Method == http.MethodPut
|
||||
}
|
||||
|
|
|
@ -76,7 +76,6 @@ var _ frostfs.FrostFS = (*TestFrostFS)(nil)
|
|||
|
||||
type TestFrostFS struct {
|
||||
objects map[string]*object.Object
|
||||
copiesNumbers map[string][]uint32
|
||||
objectErrors map[string]error
|
||||
objectPutErrors map[string]error
|
||||
containers map[string]*container.Container
|
||||
|
@ -89,7 +88,6 @@ type TestFrostFS struct {
|
|||
func NewTestFrostFS(key *keys.PrivateKey) *TestFrostFS {
|
||||
return &TestFrostFS{
|
||||
objects: make(map[string]*object.Object),
|
||||
copiesNumbers: make(map[string][]uint32),
|
||||
objectErrors: make(map[string]error),
|
||||
objectPutErrors: make(map[string]error),
|
||||
containers: make(map[string]*container.Container),
|
||||
|
@ -128,10 +126,6 @@ func (t *TestFrostFS) Objects() []*object.Object {
|
|||
return res
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) CopiesNumbers(addr string) []uint32 {
|
||||
return t.copiesNumbers[addr]
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) ObjectExists(objID oid.ID) bool {
|
||||
for _, obj := range t.objects {
|
||||
if id, _ := obj.ID(); id.Equals(objID) {
|
||||
|
@ -352,8 +346,6 @@ func (t *TestFrostFS) CreateObject(ctx context.Context, prm frostfs.PrmObjectCre
|
|||
|
||||
addr := newAddress(cnrID, objID)
|
||||
t.objects[addr.EncodeToString()] = obj
|
||||
t.copiesNumbers[addr.EncodeToString()] = prm.CopiesNumber
|
||||
|
||||
return &frostfs.CreateObjectResult{
|
||||
ObjectID: objID,
|
||||
CreationEpoch: t.currentEpoch - 1,
|
||||
|
|
|
@ -12,8 +12,11 @@ import (
|
|||
func TestObjectLockAttributes(t *testing.T) {
|
||||
tc := prepareContext(t)
|
||||
err := tc.layer.PutBucketSettings(tc.ctx, &PutSettingsParams{
|
||||
BktInfo: tc.bktInfo,
|
||||
Settings: &data.BucketSettings{Versioning: data.VersioningEnabled},
|
||||
BktInfo: tc.bktInfo,
|
||||
Settings: &data.BucketSettings{Versioning: data.Versioning{
|
||||
VersioningStatus: data.VersioningEnabled,
|
||||
MFADeleteStatus: data.MFADeleteDisabled,
|
||||
}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -58,9 +58,10 @@ type (
|
|||
}
|
||||
|
||||
CreateMultipartParams struct {
|
||||
Info *UploadInfoParams
|
||||
Header map[string]string
|
||||
Data *UploadData
|
||||
Info *UploadInfoParams
|
||||
Header map[string]string
|
||||
Data *UploadData
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
UploadData struct {
|
||||
|
@ -74,7 +75,6 @@ type (
|
|||
Reader io.Reader
|
||||
ContentMD5 string
|
||||
ContentSHA256Hash string
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
UploadCopyParams struct {
|
||||
|
@ -85,13 +85,11 @@ type (
|
|||
SrcEncryption encryption.Params
|
||||
PartNumber int
|
||||
Range *RangeParams
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
CompleteMultipartParams struct {
|
||||
Info *UploadInfoParams
|
||||
Parts []*CompletedPart
|
||||
CopiesNumbers []uint32
|
||||
Info *UploadInfoParams
|
||||
Parts []*CompletedPart
|
||||
}
|
||||
|
||||
CompletedPart struct {
|
||||
|
@ -167,6 +165,7 @@ func (n *Layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
|||
Owner: n.gateOwner,
|
||||
Created: TimeNow(ctx),
|
||||
Meta: make(map[string]string, metaSize),
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
CreationEpoch: networkInfo.CurrentEpoch(),
|
||||
}
|
||||
|
||||
|
@ -223,7 +222,7 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
Attributes: make([][2]string, 2),
|
||||
Payload: p.Reader,
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: p.CopiesNumbers,
|
||||
CopiesNumber: multipartInfo.CopiesNumbers,
|
||||
}
|
||||
|
||||
decSize := p.Size
|
||||
|
@ -373,11 +372,10 @@ func (n *Layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.
|
|||
}
|
||||
|
||||
params := &UploadPartParams{
|
||||
Info: p.Info,
|
||||
PartNumber: p.PartNumber,
|
||||
Size: size,
|
||||
Reader: objPayload,
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
Info: p.Info,
|
||||
PartNumber: p.PartNumber,
|
||||
Size: size,
|
||||
Reader: objPayload,
|
||||
}
|
||||
|
||||
return n.uploadPart(ctx, multipartInfo, params)
|
||||
|
@ -474,7 +472,7 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
Header: initMetadata,
|
||||
Size: &multipartObjetSize,
|
||||
Encryption: p.Info.Encryption,
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
CopiesNumbers: multipartInfo.CopiesNumbers,
|
||||
CompleteMD5Hash: hex.EncodeToString(md5Hash.Sum(nil)) + "-" + strconv.Itoa(len(p.Parts)),
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -216,7 +216,10 @@ func (n *Layer) GetBucketSettings(ctx context.Context, bktInfo *data.BucketInfo)
|
|||
if !errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
settings = &data.BucketSettings{Versioning: data.VersioningUnversioned}
|
||||
settings = &data.BucketSettings{Versioning: data.Versioning{
|
||||
VersioningStatus: data.VersioningUnversioned,
|
||||
MFADeleteStatus: data.MFADeleteDisabled,
|
||||
}}
|
||||
n.reqLogger(ctx).Debug(logs.BucketSettingsNotFoundUseDefaults)
|
||||
}
|
||||
|
||||
|
|
|
@ -196,8 +196,11 @@ func prepareContext(t *testing.T, cachesConfig ...*CachesConfig) *testContext {
|
|||
func TestSimpleVersioning(t *testing.T) {
|
||||
tc := prepareContext(t)
|
||||
err := tc.layer.PutBucketSettings(tc.ctx, &PutSettingsParams{
|
||||
BktInfo: tc.bktInfo,
|
||||
Settings: &data.BucketSettings{Versioning: data.VersioningEnabled},
|
||||
BktInfo: tc.bktInfo,
|
||||
Settings: &data.BucketSettings{Versioning: data.Versioning{
|
||||
VersioningStatus: data.VersioningEnabled,
|
||||
MFADeleteStatus: data.MFADeleteDisabled,
|
||||
}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -234,7 +237,10 @@ func TestSimpleNoVersioning(t *testing.T) {
|
|||
|
||||
func TestVersioningDeleteObject(t *testing.T) {
|
||||
tc := prepareContext(t)
|
||||
settings := &data.BucketSettings{Versioning: data.VersioningEnabled}
|
||||
settings := &data.BucketSettings{Versioning: data.Versioning{
|
||||
VersioningStatus: data.VersioningEnabled,
|
||||
MFADeleteStatus: data.MFADeleteDisabled,
|
||||
}}
|
||||
err := tc.layer.PutBucketSettings(tc.ctx, &PutSettingsParams{
|
||||
BktInfo: tc.bktInfo,
|
||||
Settings: settings,
|
||||
|
@ -256,7 +262,10 @@ func TestGetUnversioned(t *testing.T) {
|
|||
objContent := []byte("content obj1 v1")
|
||||
objInfo := tc.putObject(objContent)
|
||||
|
||||
settings := &data.BucketSettings{Versioning: data.VersioningUnversioned}
|
||||
settings := &data.BucketSettings{Versioning: data.Versioning{
|
||||
VersioningStatus: data.VersioningUnversioned,
|
||||
MFADeleteStatus: data.MFADeleteDisabled,
|
||||
}}
|
||||
err := tc.layer.PutBucketSettings(tc.ctx, &PutSettingsParams{
|
||||
BktInfo: tc.bktInfo,
|
||||
Settings: settings,
|
||||
|
@ -270,7 +279,10 @@ func TestGetUnversioned(t *testing.T) {
|
|||
|
||||
func TestVersioningDeleteSpecificObjectVersion(t *testing.T) {
|
||||
tc := prepareContext(t)
|
||||
settings := &data.BucketSettings{Versioning: data.VersioningEnabled}
|
||||
settings := &data.BucketSettings{Versioning: data.Versioning{
|
||||
VersioningStatus: data.VersioningEnabled,
|
||||
MFADeleteStatus: data.MFADeleteDisabled,
|
||||
}}
|
||||
err := tc.layer.PutBucketSettings(tc.ctx, &PutSettingsParams{
|
||||
BktInfo: tc.bktInfo,
|
||||
Settings: settings,
|
||||
|
|
|
@ -36,6 +36,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/policy/contract"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/services"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/mfa"
|
||||
internalnet "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/net"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
|
@ -595,7 +596,7 @@ func (s *appSettings) TombstoneLifetime() uint64 {
|
|||
|
||||
func (a *App) initAPI(ctx context.Context) {
|
||||
a.initLayer(ctx)
|
||||
a.initHandler()
|
||||
a.initHandler(ctx)
|
||||
}
|
||||
|
||||
func (a *App) initMetrics() {
|
||||
|
@ -783,8 +784,9 @@ func (a *App) initPools(ctx context.Context) {
|
|||
prm.SetNodeDialTimeout(connTimeout)
|
||||
prmTree.SetNodeDialTimeout(connTimeout)
|
||||
|
||||
prm.SetNodeStreamTimeout(fetchStreamTimeout(a.config(), cfgStreamTimeout))
|
||||
prmTree.SetNodeStreamTimeout(fetchStreamTimeout(a.config(), cfgTreeStreamTimeout))
|
||||
streamTimeout := fetchStreamTimeout(a.config())
|
||||
prm.SetNodeStreamTimeout(streamTimeout)
|
||||
prmTree.SetNodeStreamTimeout(streamTimeout)
|
||||
|
||||
healthCheckTimeout := fetchHealthCheckTimeout(a.config())
|
||||
prm.SetHealthcheckTimeout(healthCheckTimeout)
|
||||
|
@ -1133,15 +1135,51 @@ func getFrostfsIDCacheConfig(v *viper.Viper, l *zap.Logger) *cache.Config {
|
|||
return cacheCfg
|
||||
}
|
||||
|
||||
func (a *App) initHandler() {
|
||||
func (a *App) initHandler(ctx context.Context) {
|
||||
var err error
|
||||
|
||||
a.api, err = handler.New(a.log, a.obj, a.settings, a.policyStorage, a.frostfsid)
|
||||
var mfaCnrInfo *data.BucketInfo
|
||||
if a.cfg.IsSet(cfgContainersMFA) {
|
||||
mfaCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersMFA)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchMFAContainerInfo, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
mfaConfig, err := a.fetchMFAConfig(mfaCnrInfo.CID)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotInitMFAClient, zap.Error(err))
|
||||
}
|
||||
|
||||
mfaCli, err := mfa.NewMFA(mfaConfig)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotInitMFAClient, zap.Error(err))
|
||||
}
|
||||
|
||||
a.api, err = handler.New(a.log, a.obj, a.settings, a.policyStorage, a.frostfsid, mfaCli)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotInitializeAPIHandler, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) fetchMFAConfig(id cid.ID) (mfa.Config, error) {
|
||||
mfaFrostFS := frostfs.NewMFAFrostFS(frostfs.MFAFrostFSConfig{
|
||||
Pool: a.pool,
|
||||
TreePool: a.treePool,
|
||||
Key: a.key,
|
||||
Logger: a.log,
|
||||
})
|
||||
|
||||
config := mfa.Config{
|
||||
Storage: mfaFrostFS,
|
||||
Key: a.key,
|
||||
Container: id,
|
||||
Logger: a.log,
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func (a *App) getServer(address string) Server {
|
||||
for i := range a.servers {
|
||||
if a.servers[i].Address() == address {
|
||||
|
|
|
@ -123,7 +123,6 @@ const ( // Settings.
|
|||
// Pool config.
|
||||
cfgConnectTimeout = "connect_timeout"
|
||||
cfgStreamTimeout = "stream_timeout"
|
||||
cfgTreeStreamTimeout = "tree_stream_timeout"
|
||||
cfgHealthcheckTimeout = "healthcheck_timeout"
|
||||
cfgRebalanceInterval = "rebalance_interval"
|
||||
cfgPoolErrorThreshold = "pool_error_threshold"
|
||||
|
@ -223,6 +222,7 @@ const ( // Settings.
|
|||
cfgContainersCORS = "containers.cors"
|
||||
cfgContainersLifecycle = "containers.lifecycle"
|
||||
cfgContainersAccessBox = "containers.accessbox"
|
||||
cfgContainersMFA = "containers.mfa"
|
||||
|
||||
// Multinet.
|
||||
cfgMultinetEnabled = "multinet.enabled"
|
||||
|
@ -364,8 +364,8 @@ func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
|
|||
return reconnect
|
||||
}
|
||||
|
||||
func fetchStreamTimeout(cfg *viper.Viper, cfgEntry string) time.Duration {
|
||||
streamTimeout := cfg.GetDuration(cfgEntry)
|
||||
func fetchStreamTimeout(cfg *viper.Viper) time.Duration {
|
||||
streamTimeout := cfg.GetDuration(cfgStreamTimeout)
|
||||
if streamTimeout <= 0 {
|
||||
streamTimeout = defaultStreamTimeout
|
||||
}
|
||||
|
|
|
@ -80,10 +80,8 @@ S3_GW_PROMETHEUS_ADDRESS=localhost:8086
|
|||
|
||||
# Timeout to connect to a node
|
||||
S3_GW_CONNECT_TIMEOUT=10s
|
||||
# Timeout for individual operations in object pool streaming RPC.
|
||||
# Timeout for individual operations in streaming RPC.
|
||||
S3_GW_STREAM_TIMEOUT=10s
|
||||
# Timeout for individual operations in tree pool streaming RPC.
|
||||
S3_GW_TREE_STREAM_TIMEOUT=10s
|
||||
# Timeout to check node health during rebalance.
|
||||
S3_GW_HEALTHCHECK_TIMEOUT=15s
|
||||
# Interval to check node health
|
||||
|
@ -264,6 +262,7 @@ S3_GW_RETRY_STRATEGY=exponential
|
|||
# Containers properties
|
||||
S3_GW_CONTAINERS_CORS=AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||
S3_GW_CONTAINERS_LIFECYCLE=AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||
S3_GW_CONTAINERS_MFA=HV9h4zbp7Dti2VXef2oFSsBSRyJUR6NfMeswuv12fjZu
|
||||
|
||||
# Multinet properties
|
||||
# Enable multinet support
|
||||
|
|
|
@ -100,10 +100,8 @@ tracing:
|
|||
|
||||
# Timeout to connect to a node
|
||||
connect_timeout: 10s
|
||||
# Timeout for individual operations in object pool streaming RPC.
|
||||
# Timeout for individual operations in streaming RPC.
|
||||
stream_timeout: 10s
|
||||
# Timeout for individual operations in tree pool streaming RPC.
|
||||
tree_stream_timeout: 10s
|
||||
# Timeout to check node health during rebalance
|
||||
healthcheck_timeout: 15s
|
||||
# Interval to check node health
|
||||
|
@ -312,6 +310,7 @@ retry:
|
|||
containers:
|
||||
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||
lifecycle: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||
mfa: HV9h4zbp7Dti2VXef2oFSsBSRyJUR6NfMeswuv12fjZu
|
||||
|
||||
# Multinet properties
|
||||
multinet:
|
||||
|
|
|
@ -330,7 +330,7 @@ func file_creds_accessbox_accessbox_proto_rawDescGZIP() []byte {
|
|||
}
|
||||
|
||||
var file_creds_accessbox_accessbox_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_creds_accessbox_accessbox_proto_goTypes = []any{
|
||||
var file_creds_accessbox_accessbox_proto_goTypes = []interface{}{
|
||||
(*AccessBox)(nil), // 0: accessbox.AccessBox
|
||||
(*Tokens)(nil), // 1: accessbox.Tokens
|
||||
(*AccessBox_Gate)(nil), // 2: accessbox.AccessBox.Gate
|
||||
|
@ -352,7 +352,7 @@ func file_creds_accessbox_accessbox_proto_init() {
|
|||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_creds_accessbox_accessbox_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
||||
file_creds_accessbox_accessbox_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*AccessBox); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -364,7 +364,7 @@ func file_creds_accessbox_accessbox_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_creds_accessbox_accessbox_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
||||
file_creds_accessbox_accessbox_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Tokens); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -376,7 +376,7 @@ func file_creds_accessbox_accessbox_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_creds_accessbox_accessbox_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
||||
file_creds_accessbox_accessbox_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*AccessBox_Gate); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -388,7 +388,7 @@ func file_creds_accessbox_accessbox_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_creds_accessbox_accessbox_proto_msgTypes[3].Exporter = func(v any, i int) any {
|
||||
file_creds_accessbox_accessbox_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*AccessBox_ContainerPolicy); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
|
|
@ -215,7 +215,6 @@ resolve_order:
|
|||
|
||||
connect_timeout: 10s
|
||||
stream_timeout: 10s
|
||||
tree_stream_timeout: 10s
|
||||
healthcheck_timeout: 15s
|
||||
rebalance_interval: 60s
|
||||
pool_error_threshold: 100
|
||||
|
@ -238,8 +237,7 @@ source_ip_header: "Source-Ip"
|
|||
| `rpc_endpoint` | `string` | no | | The address of the RPC host to which the gateway connects to resolve bucket names and interact with frostfs contracts (required to use the `nns` resolver and `frostfsid` contract). |
|
||||
| `resolve_order` | `[]string` | yes | `[dns]` | Order of bucket name resolvers to use. Available resolvers: `dns`, `nns`. |
|
||||
| `connect_timeout` | `duration` | no | `10s` | Timeout to connect to a node. |
|
||||
| `stream_timeout` | `duration` | no | `10s` | Timeout for individual operations in object pool streaming RPC. |
|
||||
| `tree_stream_timeout` | `duration` | no | `10s` | Timeout for individual operations in tree pool streaming RPC. |
|
||||
| `stream_timeout` | `duration` | no | `10s` | Timeout for individual operations in streaming RPC. |
|
||||
| `healthcheck_timeout` | `duration` | no | `15s` | Timeout to check node health during rebalance. |
|
||||
| `rebalance_interval` | `duration` | no | `60s` | Interval to check node health. |
|
||||
| `pool_error_threshold` | `uint32` | no | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
||||
|
@ -824,6 +822,7 @@ containers:
|
|||
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||
lifecycle: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||
accessbox: ExnA1gSY3kzgomi2wJxNyWo1ytWv9VAKXRE55fNXEPL2
|
||||
mfa: HV9h4zbp7Dti2VXef2oFSsBSRyJUR6NfMeswuv12fjZu
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -831,6 +830,7 @@ containers:
|
|||
| `cors` | `string` | no | | Container name for CORS configurations. If not set, container of the bucket is used. |
|
||||
| `lifecycle` | `string` | no | | Container name for lifecycle configurations. If not set, container of the bucket is used. |
|
||||
| `accessbox` | `string` | no | | Container name to lookup accessbox if custom aws credentials is used. If not set, custom credentials are not supported. |
|
||||
| `mfa` | `string` | no | | Container name for virtual MFA devices. If not set, MFADeleteBucket are not supported. |
|
||||
|
||||
# `vhs` section
|
||||
|
||||
|
|
2
go.mod
2
go.mod
|
@ -21,6 +21,7 @@ require (
|
|||
github.com/mr-tron/base58 v1.2.0
|
||||
github.com/nspcc-dev/neo-go v0.106.3
|
||||
github.com/panjf2000/ants/v2 v2.5.0
|
||||
github.com/pquerna/otp v1.4.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/prometheus/client_model v0.5.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
|
@ -64,6 +65,7 @@ require (
|
|||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.6 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
|
|
5
go.sum
5
go.sum
|
@ -104,6 +104,8 @@ github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5M
|
|||
github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
|
||||
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
|
@ -309,6 +311,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
|||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/otp v1.4.0 h1:wZvl1TIVxKRThZIBiwOOHOGP/1+nZyWBil9Y2XNEDzg=
|
||||
github.com/pquerna/otp v1.4.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
@ -345,6 +349,7 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE
|
|||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
|
183
internal/frostfs/mfa.go
Normal file
183
internal/frostfs/mfa.go
Normal file
|
@ -0,0 +1,183 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/mfa"
|
||||
apitree "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/tree"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// MFAFrostFS is a mediator which implements mfa.Storage through pool.Pool and treepool.Pool.
|
||||
type MFAFrostFS struct {
|
||||
frostFS *FrostFS
|
||||
treePool *treepool.Pool
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
type MFAFrostFSConfig struct {
|
||||
Pool *pool.Pool
|
||||
TreePool *treepool.Pool
|
||||
Key *keys.PrivateKey
|
||||
Logger *zap.Logger
|
||||
}
|
||||
|
||||
type treeNode struct {
|
||||
ID uint64
|
||||
ParentID uint64
|
||||
TimeStamp uint64
|
||||
Meta map[string]string
|
||||
}
|
||||
|
||||
type multiSystemNode struct {
|
||||
// the first element is latest
|
||||
nodes []*treeNode
|
||||
}
|
||||
|
||||
const (
|
||||
fileNameKey = "FileName"
|
||||
mfaTreeName = "mfa"
|
||||
)
|
||||
|
||||
// NewMFAFrostFS creates new MFAFrostFS using provided pool.Pool.
|
||||
func NewMFAFrostFS(cfg MFAFrostFSConfig) *MFAFrostFS {
|
||||
return &MFAFrostFS{
|
||||
frostFS: NewFrostFS(cfg.Pool, cfg.Key),
|
||||
treePool: cfg.TreePool,
|
||||
log: cfg.Logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MFAFrostFS) GetObject(ctx context.Context, addr oid.Address) ([]byte, error) {
|
||||
res, err := m.frostFS.GetObject(ctx, frostfs.PrmObjectGet{
|
||||
Container: addr.Container(),
|
||||
Object: addr.Object(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if closeErr := res.Payload.Close(); closeErr != nil {
|
||||
// TODO add log
|
||||
// middleware.GetReqLog(ctx).Warn(logs.CloseMFAObjectPayload, zap.Error(closeErr))
|
||||
middleware.GetReqLog(ctx).Warn("logs.CloseMFAObjectPayload", zap.Error(closeErr))
|
||||
}
|
||||
}()
|
||||
|
||||
return io.ReadAll(res.Payload)
|
||||
}
|
||||
|
||||
func (m *MFAFrostFS) GetTreeNode(ctx context.Context, cnrID cid.ID, name string) (*mfa.TreeMultiNode, error) {
|
||||
multiNode, err := m.getTreeNode(ctx, cnrID, pathFromName(name))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get node: %w", err)
|
||||
}
|
||||
|
||||
return multiNode.ToMFAMultiNode(), nil
|
||||
}
|
||||
|
||||
func (m *MFAFrostFS) getTreeNode(ctx context.Context, cnrID cid.ID, path []string) (*multiSystemNode, error) {
|
||||
prmGetNodes := treepool.GetNodesParams{
|
||||
CID: cnrID,
|
||||
TreeID: mfaTreeName,
|
||||
Path: path,
|
||||
PathAttribute: fileNameKey,
|
||||
LatestOnly: true,
|
||||
AllAttrs: true,
|
||||
}
|
||||
|
||||
nodes, err := m.treePool.GetNodes(ctx, prmGetNodes)
|
||||
if err != nil {
|
||||
if errors.Is(err, treepool.ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%s: %s", "mfa.ErrTreeNodeNotFound", err.Error())
|
||||
// return nil, fmt.Errorf("%w: %s", mfa.ErrTreeNodeNotFound, err.Error())
|
||||
}
|
||||
return nil, fmt.Errorf("get nodes: %w", err)
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
// return nil, mfa.ErrTreeNodeNotFound
|
||||
return nil, err
|
||||
}
|
||||
// if len(nodes) != 1 {
|
||||
// // m.log.Warn("logs.FoundMultiNode", zap.Strings("path", path))
|
||||
// }
|
||||
|
||||
return newMultiNode(nodes)
|
||||
}
|
||||
|
||||
func newMultiNode(nodes []*apitree.GetNodeByPathResponseInfo) (*multiSystemNode, error) {
|
||||
var (
|
||||
err error
|
||||
index int
|
||||
maxTimestamp uint64
|
||||
)
|
||||
|
||||
if len(nodes) == 0 {
|
||||
return nil, errors.New("multi node must have at least one node")
|
||||
}
|
||||
|
||||
treeNodes := make([]*treeNode, len(nodes))
|
||||
|
||||
for i, node := range nodes {
|
||||
if treeNodes[i] = newTreeNode(node); err != nil {
|
||||
return nil, fmt.Errorf("parse tree node response: %w", err)
|
||||
}
|
||||
|
||||
if maxTimestamp < node.GetTimestamp() {
|
||||
index = i
|
||||
maxTimestamp = node.GetTimestamp()
|
||||
}
|
||||
}
|
||||
|
||||
treeNodes[0], treeNodes[index] = treeNodes[index], treeNodes[0]
|
||||
|
||||
return &multiSystemNode{
|
||||
nodes: treeNodes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *multiSystemNode) ToMFAMultiNode() *mfa.TreeMultiNode {
|
||||
res := &mfa.TreeMultiNode{
|
||||
Current: mfa.TreeNode{Meta: m.nodes[0].Meta},
|
||||
Old: make([]*mfa.TreeNode, len(m.nodes[1:])),
|
||||
}
|
||||
|
||||
for i, node := range m.nodes[1:] {
|
||||
res.Old[i] = &mfa.TreeNode{Meta: node.Meta}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func newTreeNode(nodeInfo *apitree.GetNodeByPathResponseInfo) *treeNode {
|
||||
tNode := &treeNode{
|
||||
ID: nodeInfo.GetNodeID(),
|
||||
ParentID: nodeInfo.GetParentID(),
|
||||
TimeStamp: nodeInfo.GetTimestamp(),
|
||||
Meta: make(map[string]string, len(nodeInfo.GetMeta())),
|
||||
}
|
||||
|
||||
for _, kv := range nodeInfo.GetMeta() {
|
||||
tNode.Meta[kv.GetKey()] = string(kv.GetValue())
|
||||
}
|
||||
|
||||
return tNode
|
||||
}
|
||||
|
||||
// pathFromName splits name by '/'.
|
||||
func pathFromName(name string) []string {
|
||||
return strings.Split(name, "/")
|
||||
}
|
|
@ -164,6 +164,8 @@ const (
|
|||
CouldntDeleteLifecycleObject = "couldn't delete lifecycle configuration object"
|
||||
CouldntCacheLifecycleConfiguration = "couldn't cache lifecycle configuration"
|
||||
CouldNotFetchLifecycleContainerInfo = "couldn't fetch lifecycle container info"
|
||||
CouldNotFetchMFAContainerInfo = "couldn't fetch mfa container info"
|
||||
CouldNotInitMFAClient = "couldn't init MFA client"
|
||||
BucketLifecycleNodeHasMultipleIDs = "bucket lifecycle node has multiple ids"
|
||||
GetBucketLifecycle = "get bucket lifecycle"
|
||||
WarnDuplicateNamespaceVHS = "duplicate namespace with enabled VHS, config value skipped"
|
||||
|
|
161
internal/mfa/mfa.go
Normal file
161
internal/mfa/mfa.go
Normal file
|
@ -0,0 +1,161 @@
|
|||
package mfa
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pquerna/otp"
|
||||
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
FilePathKey = "FilePath"
|
||||
OIDKey = "OID"
|
||||
EnableDateKey = "EnableDate"
|
||||
EnabledKey = "EnabledKey"
|
||||
UserIDKey = "UserIDKey"
|
||||
TagPrefix = "tag-"
|
||||
)
|
||||
|
||||
type (
|
||||
Storage interface {
|
||||
GetObject(context.Context, oid.Address) ([]byte, error)
|
||||
GetTreeNode(ctx context.Context, cnrID cid.ID, name string) (*TreeMultiNode, error)
|
||||
}
|
||||
|
||||
TreeNode struct {
|
||||
Meta map[string]string
|
||||
}
|
||||
|
||||
TreeMultiNode struct {
|
||||
Current TreeNode
|
||||
Old []*TreeNode
|
||||
}
|
||||
)
|
||||
|
||||
type Device struct {
|
||||
Namespace string
|
||||
Name string
|
||||
OID oid.ID
|
||||
Meta map[string]string
|
||||
}
|
||||
|
||||
type SecretDevice struct {
|
||||
Device
|
||||
Key *otp.Key
|
||||
}
|
||||
|
||||
type MFA struct {
|
||||
storage Storage
|
||||
iamKey *keys.PrivateKey
|
||||
container cid.ID
|
||||
logger *zap.Logger
|
||||
// settings Settings
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Storage Storage
|
||||
Key *keys.PrivateKey
|
||||
Container cid.ID
|
||||
Logger *zap.Logger
|
||||
// Settings Settings
|
||||
}
|
||||
|
||||
func NewMFA(cfg Config) (*MFA, error) {
|
||||
if cfg.Storage == nil {
|
||||
return nil, errors.New("mfa storage is nil")
|
||||
}
|
||||
if cfg.Logger == nil {
|
||||
return nil, errors.New("mfa logger is nil")
|
||||
}
|
||||
if cfg.Key == nil {
|
||||
return nil, errors.New("mfa iam key is nil")
|
||||
}
|
||||
|
||||
return &MFA{
|
||||
storage: cfg.Storage,
|
||||
container: cfg.Container,
|
||||
iamKey: cfg.Key,
|
||||
logger: cfg.Logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Settings interface {
|
||||
ServicePubKeys() []*keys.PublicKey
|
||||
}
|
||||
|
||||
func (m *MFA) GetMFADevice(ctx context.Context, ns, mfaName string) (*SecretDevice, error) {
|
||||
node, err := m.storage.GetTreeNode(ctx, m.container, getTreePath(ns, mfaName))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get mfa nodes: %w", err)
|
||||
}
|
||||
|
||||
var objID oid.ID
|
||||
if err = objID.DecodeString(node.Current.Meta[OIDKey]); err != nil {
|
||||
return nil, fmt.Errorf("decode oid '%s': %w", node.Current.Meta[OIDKey], err)
|
||||
}
|
||||
|
||||
var addr oid.Address
|
||||
addr.SetContainer(m.container)
|
||||
addr.SetObject(objID)
|
||||
|
||||
boxData, err := m.storage.GetObject(ctx, addr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object '%s': %w", addr.EncodeToString(), err)
|
||||
}
|
||||
|
||||
mfaBox := new(MFABox)
|
||||
if err = mfaBox.Unmarshal(boxData); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal box data: %w", err)
|
||||
}
|
||||
|
||||
secrets, err := UnpackMFABox(m.iamKey, mfaBox)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unpack mfa box: %w", err)
|
||||
}
|
||||
|
||||
key, err := otp.NewKeyFromURL(secrets.GetMFAURL())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dev, err := newDevice(&node.Current)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SecretDevice{
|
||||
Device: *dev,
|
||||
Key: key,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newDevice(node *TreeNode) (*Device, error) {
|
||||
meta := node.Meta
|
||||
filepathArr := strings.Split(meta[FilePathKey], "/")
|
||||
if len(filepathArr) != 2 {
|
||||
return nil, fmt.Errorf("invalid device filepath: '%s'", meta[FilePathKey])
|
||||
}
|
||||
|
||||
var objID oid.ID
|
||||
if err := objID.DecodeString(meta[OIDKey]); err != nil {
|
||||
return nil, fmt.Errorf("decode oid '%s': %w", meta[OIDKey], err)
|
||||
}
|
||||
|
||||
return &Device{
|
||||
Namespace: filepathArr[0],
|
||||
Name: filepathArr[1],
|
||||
OID: objID,
|
||||
Meta: meta,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getTreePath(ns, mfaName string) string {
|
||||
return ns + "/" + mfaName
|
||||
}
|
247
internal/mfa/mfabox.go
Normal file
247
internal/mfa/mfabox.go
Normal file
|
@ -0,0 +1,247 @@
|
|||
package mfa
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/cipher"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/pquerna/otp"
|
||||
"golang.org/x/crypto/chacha20poly1305"
|
||||
"golang.org/x/crypto/hkdf"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
secretLength = 32
|
||||
saltLength = 16
|
||||
)
|
||||
|
||||
// Marshal returns the wire-format of MFABox.
|
||||
func (x *MFABox) Marshal() ([]byte, error) {
|
||||
return proto.Marshal(x)
|
||||
}
|
||||
|
||||
// Unmarshal parses the wire-format message and put data to x.
|
||||
func (x *MFABox) Unmarshal(data []byte) error {
|
||||
return proto.Unmarshal(data, x)
|
||||
}
|
||||
|
||||
func PackMFABox(key *otp.Key, iamSvcKeys []*keys.PublicKey, useSalt bool) (*MFABox, error) {
|
||||
if len(iamSvcKeys) == 0 {
|
||||
return nil, errors.New("list of public keys to encrypt box must not be empty")
|
||||
}
|
||||
|
||||
symmetricKey, err := generateRandomBytes(secretLength)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generate symmetric key: %w", err)
|
||||
}
|
||||
|
||||
data, err := proto.Marshal(&Secrets{MFAURL: key.URL()})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal secrets: %w", err)
|
||||
}
|
||||
|
||||
var salt []byte
|
||||
if useSalt {
|
||||
salt, err = generateRandomBytes(saltLength)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generate salt for mfa secrets: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
encryptedSecrets, err := encryptSymmetric(symmetricKey, data, salt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encrypt mfa secrets: %w", err)
|
||||
}
|
||||
|
||||
boxKey, err := keys.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create ephemeral key: %w", err)
|
||||
}
|
||||
|
||||
iamSvcEncrypted, err := encryptedSymmetricKeyForEachIAMSvc(boxKey, iamSvcKeys, symmetricKey, useSalt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add tokens to mfabox: %w", err)
|
||||
}
|
||||
|
||||
return &MFABox{
|
||||
IAMServices: iamSvcEncrypted,
|
||||
SeedPublicKey: boxKey.PublicKey().Bytes(),
|
||||
EncryptedSecrets: encryptedSecrets,
|
||||
SecretsSalt: salt,
|
||||
}, err
|
||||
}
|
||||
|
||||
func UnpackMFABox(iamPrivateKey *keys.PrivateKey, box *MFABox) (*Secrets, error) {
|
||||
iamPublicKeyBytes := iamPrivateKey.PublicKey().Bytes()
|
||||
boxPublicKey, err := keys.NewPublicKeyFromBytes(box.SeedPublicKey, elliptic.P256())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, iamSvc := range box.IAMServices {
|
||||
if !bytes.Equal(iamPublicKeyBytes, iamSvc.GetSvcPublicKey()) {
|
||||
continue
|
||||
}
|
||||
|
||||
symmetricKey, err := decryptECDH(iamPrivateKey, boxPublicKey, iamSvc.GetEncryptedSymmetricKey(), iamSvc.SymmetricKeySalt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decrypt symmetric key: %w", err)
|
||||
}
|
||||
|
||||
secretsData, err := decryptSymmetric(symmetricKey, box.GetEncryptedSecrets(), box.SecretsSalt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decrypt secrets: %w", err)
|
||||
}
|
||||
|
||||
secrets := new(Secrets)
|
||||
if err = proto.Unmarshal(secretsData, secrets); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal secrets: %w", err)
|
||||
}
|
||||
return secrets, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no box data for key '%x' was found", iamPublicKeyBytes)
|
||||
}
|
||||
|
||||
func encryptedSymmetricKeyForEachIAMSvc(boxKey *keys.PrivateKey, iamSvcKeys []*keys.PublicKey, symmetricKey []byte, useSalt bool) ([]*MFABox_IAMService, error) {
|
||||
var err error
|
||||
|
||||
res := make([]*MFABox_IAMService, len(iamSvcKeys))
|
||||
for i, iamKey := range iamSvcKeys {
|
||||
var salt []byte
|
||||
if useSalt {
|
||||
salt, err = generateRandomBytes(saltLength)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generate salt for symmetric key: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
res[i], err = encryptSymmetricKeyForIAMSvc(boxKey, iamKey, symmetricKey, salt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encode symmetric key for iam svc: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func encryptSymmetricKeyForIAMSvc(boxKey *keys.PrivateKey, iamPublicKey *keys.PublicKey, symmetricKey, salt []byte) (*MFABox_IAMService, error) {
|
||||
encrypted, err := encryptECDH(boxKey, iamPublicKey, symmetricKey, salt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encrypt symmetricKey: %w", err)
|
||||
}
|
||||
|
||||
s3svc := new(MFABox_IAMService)
|
||||
s3svc.SvcPublicKey = iamPublicKey.Bytes()
|
||||
s3svc.EncryptedSymmetricKey = encrypted
|
||||
s3svc.SymmetricKeySalt = salt
|
||||
return s3svc, nil
|
||||
}
|
||||
|
||||
func encryptECDH(boxPrivateKey *keys.PrivateKey, iamPublicKey *keys.PublicKey, data, salt []byte) ([]byte, error) {
|
||||
enc, err := getCipherECDH(boxPrivateKey, iamPublicKey, salt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get chiper ecdh: %w", err)
|
||||
}
|
||||
|
||||
return encrypt(enc, data)
|
||||
}
|
||||
|
||||
func decryptECDH(iamPrivateKey *keys.PrivateKey, boxPublicKey *keys.PublicKey, data, salt []byte) ([]byte, error) {
|
||||
dec, err := getCipherECDH(iamPrivateKey, boxPublicKey, salt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get chiper ecdh: %w", err)
|
||||
}
|
||||
|
||||
return decrypt(dec, data)
|
||||
}
|
||||
|
||||
func encryptSymmetric(secret, data, salt []byte) ([]byte, error) {
|
||||
enc, err := getCipher(secret, salt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get chiper: %w", err)
|
||||
}
|
||||
|
||||
return encrypt(enc, data)
|
||||
}
|
||||
|
||||
func decryptSymmetric(secret, data, salt []byte) ([]byte, error) {
|
||||
dec, err := getCipher(secret, salt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get chiper: %w", err)
|
||||
}
|
||||
|
||||
return decrypt(dec, data)
|
||||
}
|
||||
|
||||
func encrypt(enc cipher.AEAD, data []byte) ([]byte, error) {
|
||||
nonce := make([]byte, enc.NonceSize())
|
||||
if _, err := rand.Read(nonce); err != nil {
|
||||
return nil, fmt.Errorf("generate random nonce: %w", err)
|
||||
}
|
||||
|
||||
return enc.Seal(nonce, nonce, data, nil), nil
|
||||
}
|
||||
|
||||
func decrypt(dec cipher.AEAD, data []byte) ([]byte, error) {
|
||||
if ld, ns := len(data), dec.NonceSize(); ld < ns {
|
||||
return nil, fmt.Errorf("wrong data size (%d), should be greater than %d", ld, ns)
|
||||
}
|
||||
|
||||
nonce, cypher := data[:dec.NonceSize()], data[dec.NonceSize():]
|
||||
return dec.Open(nil, nonce, cypher, nil)
|
||||
}
|
||||
|
||||
func getCipherECDH(owner *keys.PrivateKey, sender *keys.PublicKey, salt []byte) (cipher.AEAD, error) {
|
||||
secret, err := generateECDH(owner, sender)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generate shared key: %w", err)
|
||||
}
|
||||
|
||||
return getCipher(secret, salt)
|
||||
}
|
||||
|
||||
func getCipher(secret, salt []byte) (cipher.AEAD, error) {
|
||||
key, err := deriveKey(secret, salt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("derive key: %w", err)
|
||||
}
|
||||
|
||||
return chacha20poly1305.NewX(key)
|
||||
}
|
||||
|
||||
func generateECDH(prv *keys.PrivateKey, pub *keys.PublicKey) (sk []byte, err error) {
|
||||
prvECDH, err := prv.ECDH()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ECDH private key: %w", err)
|
||||
}
|
||||
|
||||
pubECDH, err := (*ecdsa.PublicKey)(pub).ECDH()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ECDH public key: %w", err)
|
||||
}
|
||||
|
||||
return prvECDH.ECDH(pubECDH)
|
||||
}
|
||||
|
||||
func deriveKey(secret, salt []byte) ([]byte, error) {
|
||||
hash := sha256.New
|
||||
kdf := hkdf.New(hash, secret, salt, nil)
|
||||
key := make([]byte, 32)
|
||||
_, err := io.ReadFull(kdf, key)
|
||||
return key, err
|
||||
}
|
||||
|
||||
func generateRandomBytes(length int) ([]byte, error) {
|
||||
b := make([]byte, length)
|
||||
_, err := rand.Read(b)
|
||||
return b, err
|
||||
}
|
333
internal/mfa/mfabox.pb.go
Normal file
333
internal/mfa/mfabox.pb.go
Normal file
|
@ -0,0 +1,333 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.34.2
|
||||
// protoc v3.21.9
|
||||
// source: internal/mfa/mfabox.proto
|
||||
|
||||
package mfa
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type MFABox struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// iam-service, contains iam keys and encrypted data for that keys
|
||||
IAMServices []*MFABox_IAMService `protobuf:"bytes,1,rep,name=IAMServices,json=iamServices,proto3" json:"IAMServices,omitempty"`
|
||||
// Seed public key for asymmetric encryption of IAMServicesKey
|
||||
SeedPublicKey []byte `protobuf:"bytes,2,opt,name=seedPublicKey,proto3" json:"seedPublicKey,omitempty"`
|
||||
// MFA secrets, which are encrypted by symmetric cipher
|
||||
EncryptedSecrets []byte `protobuf:"bytes,3,opt,name=encryptedSecrets,proto3" json:"encryptedSecrets,omitempty"`
|
||||
// salt used to derivation encrypted key to encrypt/decrypt MFA secrets
|
||||
SecretsSalt []byte `protobuf:"bytes,4,opt,name=secretsSalt,proto3" json:"secretsSalt,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MFABox) Reset() {
|
||||
*x = MFABox{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_mfa_mfabox_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MFABox) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MFABox) ProtoMessage() {}
|
||||
|
||||
func (x *MFABox) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_mfa_mfabox_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MFABox.ProtoReflect.Descriptor instead.
|
||||
func (*MFABox) Descriptor() ([]byte, []int) {
|
||||
return file_internal_mfa_mfabox_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *MFABox) GetIAMServices() []*MFABox_IAMService {
|
||||
if x != nil {
|
||||
return x.IAMServices
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *MFABox) GetSeedPublicKey() []byte {
|
||||
if x != nil {
|
||||
return x.SeedPublicKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *MFABox) GetEncryptedSecrets() []byte {
|
||||
if x != nil {
|
||||
return x.EncryptedSecrets
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *MFABox) GetSecretsSalt() []byte {
|
||||
if x != nil {
|
||||
return x.SecretsSalt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Secrets struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// MFA URL
|
||||
MFAURL string `protobuf:"bytes,2,opt,name=MFAURL,json=mfaURL,proto3" json:"MFAURL,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Secrets) Reset() {
|
||||
*x = Secrets{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_mfa_mfabox_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Secrets) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Secrets) ProtoMessage() {}
|
||||
|
||||
func (x *Secrets) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_mfa_mfabox_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Secrets.ProtoReflect.Descriptor instead.
|
||||
func (*Secrets) Descriptor() ([]byte, []int) {
|
||||
return file_internal_mfa_mfabox_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *Secrets) GetMFAURL() string {
|
||||
if x != nil {
|
||||
return x.MFAURL
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type MFABox_IAMService struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// iam-service public key
|
||||
SvcPublicKey []byte `protobuf:"bytes,1,opt,name=svcPublicKey,proto3" json:"svcPublicKey,omitempty"`
|
||||
// encrypted symmetric key used to encrypt/decrypt mfa secrets
|
||||
EncryptedSymmetricKey []byte `protobuf:"bytes,2,opt,name=encryptedSymmetricKey,proto3" json:"encryptedSymmetricKey,omitempty"`
|
||||
// salt used to derivation encrypted key to encrypt/decrypt symmetricKey
|
||||
SymmetricKeySalt []byte `protobuf:"bytes,3,opt,name=symmetricKeySalt,proto3" json:"symmetricKeySalt,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MFABox_IAMService) Reset() {
|
||||
*x = MFABox_IAMService{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_internal_mfa_mfabox_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MFABox_IAMService) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MFABox_IAMService) ProtoMessage() {}
|
||||
|
||||
func (x *MFABox_IAMService) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_internal_mfa_mfabox_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MFABox_IAMService.ProtoReflect.Descriptor instead.
|
||||
func (*MFABox_IAMService) Descriptor() ([]byte, []int) {
|
||||
return file_internal_mfa_mfabox_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *MFABox_IAMService) GetSvcPublicKey() []byte {
|
||||
if x != nil {
|
||||
return x.SvcPublicKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *MFABox_IAMService) GetEncryptedSymmetricKey() []byte {
|
||||
if x != nil {
|
||||
return x.EncryptedSymmetricKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *MFABox_IAMService) GetSymmetricKeySalt() []byte {
|
||||
if x != nil {
|
||||
return x.SymmetricKeySalt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_internal_mfa_mfabox_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_internal_mfa_mfabox_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x6d, 0x66, 0x61, 0x2f, 0x6d,
|
||||
0x66, 0x61, 0x62, 0x6f, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x03, 0x6d, 0x66, 0x61,
|
||||
0x22, 0xcb, 0x02, 0x0a, 0x06, 0x4d, 0x46, 0x41, 0x42, 0x6f, 0x78, 0x12, 0x38, 0x0a, 0x0b, 0x49,
|
||||
0x41, 0x4d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
|
||||
0x32, 0x16, 0x2e, 0x6d, 0x66, 0x61, 0x2e, 0x4d, 0x46, 0x41, 0x42, 0x6f, 0x78, 0x2e, 0x49, 0x41,
|
||||
0x4d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0b, 0x69, 0x61, 0x6d, 0x53, 0x65, 0x72,
|
||||
0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x65, 0x65, 0x64, 0x50, 0x75, 0x62,
|
||||
0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x65,
|
||||
0x65, 0x64, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x65,
|
||||
0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64,
|
||||
0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x65, 0x63, 0x72, 0x65,
|
||||
0x74, 0x73, 0x53, 0x61, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x65,
|
||||
0x63, 0x72, 0x65, 0x74, 0x73, 0x53, 0x61, 0x6c, 0x74, 0x1a, 0x92, 0x01, 0x0a, 0x0a, 0x49, 0x41,
|
||||
0x4d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x76, 0x63, 0x50,
|
||||
0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c,
|
||||
0x73, 0x76, 0x63, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x15,
|
||||
0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x53, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72,
|
||||
0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x15, 0x65, 0x6e, 0x63,
|
||||
0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x53, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b,
|
||||
0x65, 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x73, 0x79, 0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b,
|
||||
0x65, 0x79, 0x53, 0x61, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x73, 0x79,
|
||||
0x6d, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x53, 0x61, 0x6c, 0x74, 0x22, 0x21,
|
||||
0x0a, 0x07, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x46, 0x41,
|
||||
0x55, 0x52, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x66, 0x61, 0x55, 0x52,
|
||||
0x4c, 0x42, 0x19, 0x5a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x6d, 0x66,
|
||||
0x61, 0x2f, 0x6d, 0x66, 0x61, 0x62, 0x6f, 0x78, 0x3b, 0x6d, 0x66, 0x61, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_internal_mfa_mfabox_proto_rawDescOnce sync.Once
|
||||
file_internal_mfa_mfabox_proto_rawDescData = file_internal_mfa_mfabox_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_internal_mfa_mfabox_proto_rawDescGZIP() []byte {
|
||||
file_internal_mfa_mfabox_proto_rawDescOnce.Do(func() {
|
||||
file_internal_mfa_mfabox_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_mfa_mfabox_proto_rawDescData)
|
||||
})
|
||||
return file_internal_mfa_mfabox_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_internal_mfa_mfabox_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_internal_mfa_mfabox_proto_goTypes = []interface{}{
|
||||
(*MFABox)(nil), // 0: mfa.MFABox
|
||||
(*Secrets)(nil), // 1: mfa.Secrets
|
||||
(*MFABox_IAMService)(nil), // 2: mfa.MFABox.IAMService
|
||||
}
|
||||
var file_internal_mfa_mfabox_proto_depIdxs = []int32{
|
||||
2, // 0: mfa.MFABox.IAMServices:type_name -> mfa.MFABox.IAMService
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_internal_mfa_mfabox_proto_init() }
|
||||
func file_internal_mfa_mfabox_proto_init() {
|
||||
if File_internal_mfa_mfabox_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_internal_mfa_mfabox_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MFABox); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_internal_mfa_mfabox_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Secrets); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_internal_mfa_mfabox_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MFABox_IAMService); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_internal_mfa_mfabox_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 3,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_internal_mfa_mfabox_proto_goTypes,
|
||||
DependencyIndexes: file_internal_mfa_mfabox_proto_depIdxs,
|
||||
MessageInfos: file_internal_mfa_mfabox_proto_msgTypes,
|
||||
}.Build()
|
||||
File_internal_mfa_mfabox_proto = out.File
|
||||
file_internal_mfa_mfabox_proto_rawDesc = nil
|
||||
file_internal_mfa_mfabox_proto_goTypes = nil
|
||||
file_internal_mfa_mfabox_proto_depIdxs = nil
|
||||
}
|
34
internal/mfa/mfabox.proto
Normal file
34
internal/mfa/mfabox.proto
Normal file
|
@ -0,0 +1,34 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package mfa;
|
||||
|
||||
option go_package = "internal/mfa/mfabox;mfa";
|
||||
|
||||
message MFABox {
|
||||
|
||||
message IAMService {
|
||||
// iam-service public key
|
||||
bytes svcPublicKey = 1 [json_name = "svcPublicKey"];
|
||||
// encrypted symmetric key used to encrypt/decrypt mfa secrets
|
||||
bytes encryptedSymmetricKey = 2 [json_name = "encryptedSymmetricKey"];
|
||||
// salt used to derivation encrypted key to encrypt/decrypt symmetricKey
|
||||
bytes symmetricKeySalt = 3 [json_name = "symmetricKeySalt"];
|
||||
}
|
||||
|
||||
// iam-service, contains iam keys and encrypted data for that keys
|
||||
repeated IAMService IAMServices = 1 [json_name = "iamServices"];
|
||||
|
||||
// Seed public key for asymmetric encryption of IAMServicesKey
|
||||
bytes seedPublicKey = 2 [json_name = "seedPublicKey"];
|
||||
|
||||
// MFA secrets, which are encrypted by symmetric cipher
|
||||
bytes encryptedSecrets = 3 [json_name = "encryptedSecrets"];
|
||||
|
||||
// salt used to derivation encrypted key to encrypt/decrypt MFA secrets
|
||||
bytes secretsSalt = 4 [json_name = "secretsSalt"];
|
||||
}
|
||||
|
||||
message Secrets {
|
||||
// MFA URL
|
||||
string MFAURL = 2 [json_name = "mfaURL"];
|
||||
}
|
|
@ -89,6 +89,8 @@ var (
|
|||
|
||||
const (
|
||||
versioningKV = "Versioning"
|
||||
mfaDeleteEnabledKV = "MFADelete"
|
||||
mfaSerialNumberKV = "SerialNumber"
|
||||
cannedACLKV = "cannedACL"
|
||||
ownerKeyKV = "ownerKey"
|
||||
lockConfigurationKV = "LockConfiguration"
|
||||
|
@ -500,9 +502,20 @@ func (c *Tree) GetSettingsNode(ctx context.Context, bktInfo *data.BucketInfo) (*
|
|||
|
||||
node := multiNode.Latest()
|
||||
|
||||
settings := &data.BucketSettings{Versioning: data.VersioningUnversioned}
|
||||
settings := &data.BucketSettings{Versioning: data.Versioning{
|
||||
VersioningStatus: data.VersioningUnversioned,
|
||||
MFADeleteStatus: data.MFADeleteDisabled,
|
||||
}}
|
||||
if versioningValue, ok := node.Get(versioningKV); ok {
|
||||
settings.Versioning = versioningValue
|
||||
settings.Versioning.VersioningStatus = versioningValue
|
||||
}
|
||||
if mfaDeleteValue, ok := node.Get(mfaDeleteEnabledKV); ok {
|
||||
settings.Versioning.MFADeleteStatus = mfaDeleteValue
|
||||
if settings.MFADeleteEnabled() {
|
||||
if mfaSerialNumberValue, ok := node.Get(mfaSerialNumberKV); ok {
|
||||
settings.Versioning.MFASerialNumber = mfaSerialNumberValue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if lockConfigurationValue, ok := node.Get(lockConfigurationKV); ok {
|
||||
|
@ -1789,7 +1802,8 @@ func metaFromSettings(settings *data.BucketSettings) map[string]string {
|
|||
results := make(map[string]string, 3)
|
||||
|
||||
results[FileNameKey] = settingsFileName
|
||||
results[versioningKV] = settings.Versioning
|
||||
results[versioningKV] = settings.Versioning.VersioningStatus
|
||||
results[mfaDeleteEnabledKV] = settings.Versioning.MFADeleteStatus
|
||||
results[lockConfigurationKV] = encodeLockConfiguration(settings.LockConfiguration)
|
||||
results[cannedACLKV] = settings.CannedACL
|
||||
if settings.OwnerKey != nil {
|
||||
|
|
|
@ -119,7 +119,10 @@ func TestTreeServiceSettings(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
settings := &data.BucketSettings{
|
||||
Versioning: "Versioning",
|
||||
Versioning: data.Versioning{
|
||||
VersioningStatus: data.VersioningEnabled,
|
||||
MFADeleteStatus: data.MFADeleteDisabled,
|
||||
},
|
||||
LockConfiguration: &data.ObjectLockConfiguration{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
Rule: &data.ObjectLockRule{
|
||||
|
|
Loading…
Add table
Reference in a new issue