From 089e2c8bc89b5d5035b83446506cf9cf5e4d97c1 Mon Sep 17 00:00:00 2001 From: Denis Kirillov Date: Thu, 1 Jun 2023 14:30:49 +0300 Subject: [PATCH 1/4] [#125] api/auth: DisableURIPathEscaping for presign Don't use escaping when presign url. Escape manually before. Signed-off-by: Denis Kirillov --- api/auth/center.go | 2 +- api/auth/presign.go | 46 ++++++++++++++++++++ api/auth/presign_test.go | 91 ++++++++++++++++++++++++++++++++++++++++ cmd/s3-authmate/main.go | 26 +++++++----- 4 files changed, 153 insertions(+), 12 deletions(-) create mode 100644 api/auth/presign.go create mode 100644 api/auth/presign_test.go diff --git a/api/auth/center.go b/api/auth/center.go index fb610da..18a07ef 100644 --- a/api/auth/center.go +++ b/api/auth/center.go @@ -291,6 +291,7 @@ func cloneRequest(r *http.Request, authHeader *authHeader) *http.Request { func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error { awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.AccessKey, "") signer := v4.NewSigner(awsCreds) + signer.DisableURIPathEscaping = true var signature string if authHeader.IsPresigned { @@ -306,7 +307,6 @@ func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request * } signature = request.URL.Query().Get(AmzSignature) } else { - signer.DisableURIPathEscaping = true if _, err := signer.Sign(request, nil, authHeader.Service, authHeader.Region, signatureDateTime); err != nil { return fmt.Errorf("failed to sign temporary HTTP request: %w", err) } diff --git a/api/auth/presign.go b/api/auth/presign.go new file mode 100644 index 0000000..755ada9 --- /dev/null +++ b/api/auth/presign.go @@ -0,0 +1,46 @@ +package auth + +import ( + "fmt" + "net/http" + "strings" + "time" + + v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +type RequestData struct { + Method string + Endpoint string + Bucket string + Object string +} + +type PresignData struct { + Service string + Region string + Lifetime time.Duration + SignTime time.Time +} + +// PresignRequest forms pre-signed request to access objects without aws credentials. +func PresignRequest(creds *credentials.Credentials, reqData RequestData, presignData PresignData) (*http.Request, error) { + urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, rest.EscapePath(reqData.Bucket, false), rest.EscapePath(reqData.Object, false)) + req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil) + if err != nil { + return nil, fmt.Errorf("failed to create new request: %w", err) + } + + req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z")) + + signer := v4.NewSigner(creds) + signer.DisableURIPathEscaping = true + + if _, err = signer.Presign(req, nil, presignData.Service, presignData.Region, presignData.Lifetime, presignData.SignTime); err != nil { + return nil, fmt.Errorf("presign: %w", err) + } + + return req, nil +} diff --git a/api/auth/presign_test.go b/api/auth/presign_test.go new file mode 100644 index 0000000..898a6e9 --- /dev/null +++ b/api/auth/presign_test.go @@ -0,0 +1,91 @@ +package auth + +import ( + "context" + "strings" + "testing" + "time" + + "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox" + "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens" + apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" + cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" + oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" + "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/nspcc-dev/neo-go/pkg/crypto/keys" + "github.com/stretchr/testify/require" +) + +var _ tokens.Credentials = (*credentialsMock)(nil) + +type credentialsMock struct { + boxes map[string]*accessbox.Box +} + +func newTokensFrostfsMock() *credentialsMock { + return &credentialsMock{ + boxes: make(map[string]*accessbox.Box), + } +} + +func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) { + m.boxes[addr.String()] = box +} + +func (m credentialsMock) GetBox(_ context.Context, addr oid.Address) (*accessbox.Box, error) { + box, ok := m.boxes[addr.String()] + if !ok { + return nil, apistatus.ObjectNotFound{} + } + + return box, nil +} + +func (m credentialsMock) Put(context.Context, cid.ID, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error) { + return oid.Address{}, nil +} + +func TestCheckSign(t *testing.T) { + var accessKeyAddr oid.Address + err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto") + require.NoError(t, err) + + accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0") + secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb" + awsCreds := credentials.NewStaticCredentials(accessKeyID, secretKey, "") + + reqData := RequestData{ + Method: "GET", + Endpoint: "http://localhost:8084", + Bucket: "my-bucket", + Object: "@obj/name", + } + presignData := PresignData{ + Service: "s3", + Region: "spb", + Lifetime: 10 * time.Minute, + SignTime: time.Now().UTC(), + } + + req, err := PresignRequest(awsCreds, reqData, presignData) + require.NoError(t, err) + + expBox := &accessbox.Box{ + Gate: &accessbox.GateData{ + AccessKey: secretKey, + }, + } + + mock := newTokensFrostfsMock() + mock.addBox(accessKeyAddr, expBox) + + c := ¢er{ + cli: mock, + reg: NewRegexpMatcher(authorizationFieldRegexp), + postReg: NewRegexpMatcher(postPolicyCredentialRegexp), + } + box, err := c.Authenticate(req) + require.NoError(t, err) + require.EqualValues(t, expBox, box.AccessBox) +} diff --git a/cmd/s3-authmate/main.go b/cmd/s3-authmate/main.go index 3ac0f4c..26a0599 100644 --- a/cmd/s3-authmate/main.go +++ b/cmd/s3-authmate/main.go @@ -5,7 +5,6 @@ import ( "crypto/ecdsa" "encoding/json" "fmt" - "net/http" "os" "os/signal" "runtime" @@ -14,6 +13,7 @@ import ( "time" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" + "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version" @@ -23,7 +23,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/spf13/viper" "github.com/urfave/cli/v2" @@ -482,17 +481,22 @@ It will be ceil rounded to the nearest amount of epoch.`, return fmt.Errorf("couldn't get credentials: %w", err) } - signer := v4.NewSigner(sess.Config.Credentials) - req, err := http.NewRequest(strings.ToUpper(methodFlag), fmt.Sprintf("%s/%s/%s", endpointFlag, bucketFlag, objectFlag), nil) - if err != nil { - return fmt.Errorf("failed to create new request: %w", err) + reqData := auth.RequestData{ + Method: methodFlag, + Endpoint: endpointFlag, + Bucket: bucketFlag, + Object: objectFlag, + } + presignData := auth.PresignData{ + Service: "s3", + Region: *sess.Config.Region, + Lifetime: lifetimeFlag, + SignTime: time.Now().UTC(), } - date := time.Now().UTC() - req.Header.Set(api.AmzDate, date.Format("20060102T150405Z")) - - if _, err = signer.Presign(req, nil, "s3", *sess.Config.Region, lifetimeFlag, date); err != nil { - return fmt.Errorf("presign: %w", err) + req, err := auth.PresignRequest(sess.Config.Credentials, reqData, presignData) + if err != nil { + return err } res := &struct{ URL string }{ -- 2.45.2 From 843cd4c09478a2f9f0f86d42b5ef9174499ff99b Mon Sep 17 00:00:00 2001 From: Denis Kirillov Date: Thu, 1 Jun 2023 16:45:28 +0300 Subject: [PATCH 2/4] [#125] Handle negative Content-Length on put Add computing actual object size during calculating hash on put. Use this actual value to save in tree and cache Signed-off-by: Denis Kirillov --- api/data/info.go | 4 +-- api/data/tree.go | 6 ++--- api/handler/attributes.go | 2 +- api/handler/get.go | 8 +++--- api/handler/handlers_test.go | 2 +- api/handler/head.go | 4 +-- api/handler/multipart_upload.go | 7 +++++- api/handler/put.go | 17 ++++++++----- api/handler/put_test.go | 20 +++++++++++++++ api/handler/response.go | 4 +-- api/layer/cors.go | 2 +- api/layer/layer.go | 4 +-- api/layer/multipart_upload.go | 29 ++++++++++++---------- api/layer/notifications.go | 2 +- api/layer/object.go | 44 +++++++++++++++++---------------- api/layer/system_object.go | 2 +- api/layer/util.go | 2 +- api/layer/util_test.go | 2 +- api/layer/versioning_test.go | 2 +- api/notifications/controller.go | 2 +- pkg/service/tree/tree.go | 10 ++++---- 21 files changed, 105 insertions(+), 70 deletions(-) diff --git a/api/data/info.go b/api/data/info.go index f3e69f5..b612ee4 100644 --- a/api/data/info.go +++ b/api/data/info.go @@ -40,7 +40,7 @@ type ( Bucket string Name string - Size int64 + Size uint64 ContentType string Created time.Time HashSum string @@ -52,7 +52,7 @@ type ( NotificationInfo struct { Name string Version string - Size int64 + Size uint64 HashSum string } diff --git a/api/data/tree.go b/api/data/tree.go index e24d7d8..02db82c 100644 --- a/api/data/tree.go +++ b/api/data/tree.go @@ -53,7 +53,7 @@ type BaseNodeVersion struct { ParenID uint64 OID oid.ID Timestamp uint64 - Size int64 + Size uint64 ETag string FilePath string } @@ -83,14 +83,14 @@ type PartInfo struct { UploadID string Number int OID oid.ID - Size int64 + Size uint64 ETag string Created time.Time } // ToHeaderString form short part representation to use in S3-Completed-Parts header. func (p *PartInfo) ToHeaderString() string { - return strconv.Itoa(p.Number) + "-" + strconv.FormatInt(p.Size, 10) + "-" + p.ETag + return strconv.Itoa(p.Number) + "-" + strconv.FormatUint(p.Size, 10) + "-" + p.ETag } // LockInfo is lock information to create appropriate tree node. diff --git a/api/handler/attributes.go b/api/handler/attributes.go index 758b43d..c5125f1 100644 --- a/api/handler/attributes.go +++ b/api/handler/attributes.go @@ -17,7 +17,7 @@ type ( GetObjectAttributesResponse struct { ETag string `xml:"ETag,omitempty"` Checksum *Checksum `xml:"Checksum,omitempty"` - ObjectSize int64 `xml:"ObjectSize,omitempty"` + ObjectSize uint64 `xml:"ObjectSize,omitempty"` StorageClass string `xml:"StorageClass,omitempty"` ObjectParts *ObjectParts `xml:"ObjectParts,omitempty"` } diff --git a/api/handler/get.go b/api/handler/get.go index 58e3a93..a5f2605 100644 --- a/api/handler/get.go +++ b/api/handler/get.go @@ -88,7 +88,7 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E h.Set(api.ContentLength, info.Headers[layer.AttributeDecryptedSize]) addSSECHeaders(h, requestHeader) } else { - h.Set(api.ContentLength, strconv.FormatInt(info.Size, 10)) + h.Set(api.ContentLength, strconv.FormatUint(info.Size, 10)) } h.Set(api.ETag, info.HashSum) @@ -163,13 +163,13 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) { fullSize := info.Size if encryptionParams.Enabled() { - if fullSize, err = strconv.ParseInt(info.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil { + if fullSize, err = strconv.ParseUint(info.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil { h.logAndSendError(w, "invalid decrypted size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest)) return } } - if params, err = fetchRangeHeader(r.Header, uint64(fullSize)); err != nil { + if params, err = fetchRangeHeader(r.Header, fullSize); err != nil { h.logAndSendError(w, "could not parse range header", reqInfo, err) return } @@ -268,7 +268,7 @@ func parseHTTPTime(data string) (*time.Time, error) { return &result, nil } -func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size int64) { +func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size uint64) { w.Header().Set(api.AcceptRanges, "bytes") w.Header().Set(api.ContentRange, fmt.Sprintf("bytes %d-%d/%d", params.Start, params.End, size)) w.Header().Set(api.ContentLength, strconv.FormatUint(params.End-params.Start+1, 10)) diff --git a/api/handler/handlers_test.go b/api/handler/handlers_test.go index c567b7f..b817600 100644 --- a/api/handler/handlers_test.go +++ b/api/handler/handlers_test.go @@ -190,7 +190,7 @@ func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName stri extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{ BktInfo: bktInfo, Object: objName, - Size: int64(len(content)), + Size: uint64(len(content)), Reader: bytes.NewReader(content), Header: header, }) diff --git a/api/handler/head.go b/api/handler/head.go index 0ac519e..7217b6f 100644 --- a/api/handler/head.go +++ b/api/handler/head.go @@ -13,8 +13,8 @@ import ( const sizeToDetectType = 512 -func getRangeToDetectContentType(maxSize int64) *layer.RangeParams { - end := uint64(maxSize) +func getRangeToDetectContentType(maxSize uint64) *layer.RangeParams { + end := maxSize if sizeToDetectType < end { end = sizeToDetectType } diff --git a/api/handler/multipart_upload.go b/api/handler/multipart_upload.go index 1db3bd5..01a2636 100644 --- a/api/handler/multipart_upload.go +++ b/api/handler/multipart_upload.go @@ -216,6 +216,11 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) { return } + var size uint64 + if r.ContentLength > 0 { + size = uint64(r.ContentLength) + } + p := &layer.UploadPartParams{ Info: &layer.UploadInfoParams{ UploadID: uploadID, @@ -223,7 +228,7 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) { Key: reqInfo.ObjectName, }, PartNumber: partNumber, - Size: r.ContentLength, + Size: size, Reader: r.Body, } diff --git a/api/handler/put.go b/api/handler/put.go index 8972468..c8f3760 100644 --- a/api/handler/put.go +++ b/api/handler/put.go @@ -43,13 +43,13 @@ func (p *postPolicy) condition(key string) *policyCondition { return nil } -func (p *postPolicy) CheckContentLength(size int64) bool { +func (p *postPolicy) CheckContentLength(size uint64) bool { if p.empty { return true } for _, condition := range p.Conditions { if condition.Matching == "content-length-range" { - length := strconv.FormatInt(size, 10) + length := strconv.FormatUint(size, 10) return condition.Key <= length && length <= condition.Value } } @@ -218,11 +218,16 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) { return } + var size uint64 + if r.ContentLength > 0 { + size = uint64(r.ContentLength) + } + params := &layer.PutObjectParams{ BktInfo: bktInfo, Object: reqInfo.ObjectName, Reader: r.Body, - Size: r.ContentLength, + Size: size, Header: metadata, Encryption: encryptionParams, } @@ -388,10 +393,10 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) { } var contentReader io.Reader - var size int64 + var size uint64 if content, ok := r.MultipartForm.Value["file"]; ok { contentReader = bytes.NewBufferString(content[0]) - size = int64(len(content[0])) + size = uint64(len(content[0])) } else { file, head, err := r.FormFile("file") if err != nil { @@ -399,7 +404,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) { return } contentReader = file - size = head.Size + size = uint64(head.Size) reqInfo.ObjectName = strings.ReplaceAll(reqInfo.ObjectName, "${filename}", head.Filename) } if !policy.CheckContentLength(size) { diff --git a/api/handler/put_test.go b/api/handler/put_test.go index aac8c3b..9bdc0bc 100644 --- a/api/handler/put_test.go +++ b/api/handler/put_test.go @@ -1,9 +1,11 @@ package handler import ( + "bytes" "encoding/json" "mime/multipart" "net/http" + "strconv" "strings" "testing" "time" @@ -126,3 +128,21 @@ func TestPutObjectOverrideCopiesNumber(t *testing.T) { require.NoError(t, err) require.Equal(t, "1", objInfo.Headers[layer.AttributeFrostfsCopiesNumber]) } + +func TestPutObjectWithNegativeContentLength(t *testing.T) { + tc := prepareHandlerContext(t) + + bktName, objName := "bucket-for-put", "object-for-put" + createTestBucket(tc, bktName) + + content := []byte("content") + w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content)) + r.ContentLength = -1 + tc.Handler().PutObjectHandler(w, r) + assertStatus(t, w, http.StatusOK) + + w, r = prepareTestRequest(tc, bktName, objName, nil) + tc.Handler().HeadObjectHandler(w, r) + assertStatus(t, w, http.StatusOK) + require.Equal(t, strconv.Itoa(len(content)), w.Header().Get(api.ContentLength)) +} diff --git a/api/handler/response.go b/api/handler/response.go index 70eb32d..9d07b33 100644 --- a/api/handler/response.go +++ b/api/handler/response.go @@ -104,7 +104,7 @@ type Object struct { Key string LastModified string // time string of format "2006-01-02T15:04:05.000Z" ETag string `xml:"ETag,omitempty"` - Size int64 + Size uint64 // Owner of the object. Owner *Owner `xml:"Owner,omitempty"` @@ -120,7 +120,7 @@ type ObjectVersionResponse struct { Key string `xml:"Key"` LastModified string `xml:"LastModified"` Owner Owner `xml:"Owner"` - Size int64 `xml:"Size"` + Size uint64 `xml:"Size"` StorageClass string `xml:"StorageClass,omitempty"` // is empty!! VersionID string `xml:"VersionId"` } diff --git a/api/layer/cors.go b/api/layer/cors.go index cad882d..02f731a 100644 --- a/api/layer/cors.go +++ b/api/layer/cors.go @@ -45,7 +45,7 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error { CopiesNumber: p.CopiesNumbers, } - objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo) + _, objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo) if err != nil { return fmt.Errorf("put system object: %w", err) } diff --git a/api/layer/layer.go b/api/layer/layer.go index 4ee38b5..8c3a627 100644 --- a/api/layer/layer.go +++ b/api/layer/layer.go @@ -102,7 +102,7 @@ type ( PutObjectParams struct { BktInfo *data.BucketInfo Object string - Size int64 + Size uint64 Reader io.Reader Header map[string]string Lock *data.ObjectLock @@ -135,7 +135,7 @@ type ( ScrBktInfo *data.BucketInfo DstBktInfo *data.BucketInfo DstObject string - SrcSize int64 + SrcSize uint64 Header map[string]string Range *RangeParams Lock *data.ObjectLock diff --git a/api/layer/multipart_upload.go b/api/layer/multipart_upload.go index 322f55c..9ce93fa 100644 --- a/api/layer/multipart_upload.go +++ b/api/layer/multipart_upload.go @@ -60,7 +60,7 @@ type ( UploadPartParams struct { Info *UploadInfoParams PartNumber int - Size int64 + Size uint64 Reader io.Reader } @@ -91,7 +91,7 @@ type ( ETag string LastModified string PartNumber int - Size int64 + Size uint64 } ListMultipartUploadsParams struct { @@ -212,22 +212,25 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf decSize := p.Size if p.Info.Encryption.Enabled() { - r, encSize, err := encryptionReader(p.Reader, uint64(p.Size), p.Info.Encryption.Key()) + r, encSize, err := encryptionReader(p.Reader, p.Size, p.Info.Encryption.Key()) if err != nil { return nil, fmt.Errorf("failed to create ecnrypted reader: %w", err) } - prm.Attributes = append(prm.Attributes, [2]string{AttributeDecryptedSize, strconv.FormatInt(p.Size, 10)}) + prm.Attributes = append(prm.Attributes, [2]string{AttributeDecryptedSize, strconv.FormatUint(p.Size, 10)}) prm.Payload = r - p.Size = int64(encSize) + p.Size = encSize } prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber) - id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo) + size, id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo) if err != nil { return nil, err } + if p.Info.Encryption.Enabled() { + size = decSize + } reqInfo := api.GetReqInfo(ctx) n.log.Debug("upload part", @@ -241,7 +244,7 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf UploadID: p.Info.UploadID, Number: p.PartNumber, OID: id, - Size: decSize, + Size: size, ETag: hex.EncodeToString(hash), Created: prm.CreationTime, } @@ -285,8 +288,8 @@ func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data. size := p.SrcObjInfo.Size if p.Range != nil { - size = int64(p.Range.End - p.Range.Start + 1) - if p.Range.End > uint64(p.SrcObjInfo.Size) { + size = p.Range.End - p.Range.Start + 1 + if p.Range.End > p.SrcObjInfo.Size { return nil, errors.GetAPIError(errors.ErrInvalidCopyPartRangeSource) } } @@ -375,7 +378,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar return nil, nil, errors.GetAPIError(errors.ErrInvalidPart) } - var multipartObjetSize int64 + var multipartObjetSize uint64 var encMultipartObjectSize uint64 parts := make([]*data.PartInfo, 0, len(p.Parts)) @@ -393,7 +396,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted) if encInfo.Enabled { - encPartSize, err := sio.EncryptedSize(uint64(partInfo.Size)) + encPartSize, err := sio.EncryptedSize(partInfo.Size) if err != nil { return nil, nil, fmt.Errorf("compute encrypted size: %w", err) } @@ -430,8 +433,8 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar initMetadata[AttributeEncryptionAlgorithm] = encInfo.Algorithm initMetadata[AttributeHMACKey] = encInfo.HMACKey initMetadata[AttributeHMACSalt] = encInfo.HMACSalt - initMetadata[AttributeDecryptedSize] = strconv.FormatInt(multipartObjetSize, 10) - multipartObjetSize = int64(encMultipartObjectSize) + initMetadata[AttributeDecryptedSize] = strconv.FormatUint(multipartObjetSize, 10) + multipartObjetSize = encMultipartObjectSize } r := &multiObjectReader{ diff --git a/api/layer/notifications.go b/api/layer/notifications.go index 3b6a9cf..b6b59b7 100644 --- a/api/layer/notifications.go +++ b/api/layer/notifications.go @@ -34,7 +34,7 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu CopiesNumber: p.CopiesNumbers, } - objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo) + _, objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo) if err != nil { return err } diff --git a/api/layer/object.go b/api/layer/object.go index 7dda084..fde374e 100644 --- a/api/layer/object.go +++ b/api/layer/object.go @@ -170,7 +170,7 @@ func ParseCompletedPartHeader(hdr string) (*Part, error) { if err != nil { return nil, fmt.Errorf("invalid completed part number '%s': %w", partInfo[0], err) } - size, err := strconv.Atoi(partInfo[1]) + size, err := strconv.ParseUint(partInfo[1], 10, 64) if err != nil { return nil, fmt.Errorf("invalid completed part size '%s': %w", partInfo[1], err) } @@ -178,7 +178,7 @@ func ParseCompletedPartHeader(hdr string) (*Part, error) { return &Part{ ETag: partInfo[2], PartNumber: num, - Size: int64(size), + Size: size, }, nil } @@ -191,26 +191,18 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend return nil, fmt.Errorf("couldn't get versioning settings object: %w", err) } - newVersion := &data.NodeVersion{ - BaseNodeVersion: data.BaseNodeVersion{ - FilePath: p.Object, - Size: p.Size, - }, - IsUnversioned: !bktSettings.VersioningEnabled(), - } - r := p.Reader if p.Encryption.Enabled() { - p.Header[AttributeDecryptedSize] = strconv.FormatInt(p.Size, 10) + p.Header[AttributeDecryptedSize] = strconv.FormatUint(p.Size, 10) if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil { return nil, fmt.Errorf("add encryption header: %w", err) } var encSize uint64 - if r, encSize, err = encryptionReader(p.Reader, uint64(p.Size), p.Encryption.Key()); err != nil { + if r, encSize, err = encryptionReader(p.Reader, p.Size, p.Encryption.Key()); err != nil { return nil, fmt.Errorf("create encrypter: %w", err) } - p.Size = int64(encSize) + p.Size = encSize } if r != nil { @@ -230,7 +222,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend prm := PrmObjectCreate{ Container: p.BktInfo.CID, Creator: owner, - PayloadSize: uint64(p.Size), + PayloadSize: p.Size, Filepath: p.Object, Payload: r, CreationTime: TimeNow(ctx), @@ -243,7 +235,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend prm.Attributes = append(prm.Attributes, [2]string{k, v}) } - id, hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo) + size, id, hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo) if err != nil { return nil, err } @@ -254,8 +246,16 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend zap.String("bucket", p.BktInfo.Name), zap.Stringer("cid", p.BktInfo.CID), zap.String("object", p.Object), zap.Stringer("oid", id)) - newVersion.OID = id - newVersion.ETag = hex.EncodeToString(hash) + newVersion := &data.NodeVersion{ + BaseNodeVersion: data.BaseNodeVersion{ + OID: id, + ETag: hex.EncodeToString(hash), + FilePath: p.Object, + Size: size, + }, + IsUnversioned: !bktSettings.VersioningEnabled(), + } + if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil { return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err) } @@ -286,7 +286,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend Owner: owner, Bucket: p.BktInfo.Name, Name: p.Object, - Size: p.Size, + Size: size, Created: prm.CreationTime, Headers: p.Header, ContentType: p.Header[api.ContentType], @@ -405,17 +405,19 @@ func (n *layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idOb // objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject. // Returns object ID and payload sha256 hash. -func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (oid.ID, []byte, error) { +func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, error) { n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner) + var size uint64 hash := sha256.New() prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) { + size += uint64(len(buf)) hash.Write(buf) }) id, err := n.frostFS.CreateObject(ctx, prm) if err != nil { - return oid.ID{}, nil, err + return 0, oid.ID{}, nil, err } - return id, hash.Sum(nil), nil + return size, id, hash.Sum(nil), nil } // ListObjectsV1 returns objects in a bucket for requests of Version 1. diff --git a/api/layer/system_object.go b/api/layer/system_object.go index 3e5d41d..0563341 100644 --- a/api/layer/system_object.go +++ b/api/layer/system_object.go @@ -126,7 +126,7 @@ func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, obj return oid.ID{}, err } - id, _, err := n.objectPutAndHash(ctx, prm, bktInfo) + _, id, _, err := n.objectPutAndHash(ctx, prm, bktInfo) return id, err } diff --git a/api/layer/util.go b/api/layer/util.go index a0370e0..a16a1af 100644 --- a/api/layer/util.go +++ b/api/layer/util.go @@ -94,7 +94,7 @@ func objectInfoFromMeta(bkt *data.BucketInfo, meta *object.Object) *data.ObjectI ContentType: mimeType, Headers: headers, Owner: *meta.OwnerID(), - Size: int64(meta.PayloadSize()), + Size: meta.PayloadSize(), HashSum: hex.EncodeToString(payloadChecksum.Value()), } } diff --git a/api/layer/util_test.go b/api/layer/util_test.go index 574f83e..446f2e2 100644 --- a/api/layer/util_test.go +++ b/api/layer/util_test.go @@ -17,7 +17,7 @@ import ( var ( defaultTestCreated = time.Now() defaultTestPayload = []byte("test object payload") - defaultTestPayloadLength = int64(len(defaultTestPayload)) + defaultTestPayloadLength = uint64(len(defaultTestPayload)) defaultTestContentType = http.DetectContentType(defaultTestPayload) ) diff --git a/api/layer/versioning_test.go b/api/layer/versioning_test.go index e1132f5..72cd965 100644 --- a/api/layer/versioning_test.go +++ b/api/layer/versioning_test.go @@ -21,7 +21,7 @@ func (tc *testContext) putObject(content []byte) *data.ObjectInfo { extObjInfo, err := tc.layer.PutObject(tc.ctx, &PutObjectParams{ BktInfo: tc.bktInfo, Object: tc.obj, - Size: int64(len(content)), + Size: uint64(len(content)), Reader: bytes.NewReader(content), Header: make(map[string]string), }) diff --git a/api/notifications/controller.go b/api/notifications/controller.go index dca6acf..2909ab0 100644 --- a/api/notifications/controller.go +++ b/api/notifications/controller.go @@ -94,7 +94,7 @@ type ( Object struct { Key string `json:"key"` - Size int64 `json:"size,omitempty"` + Size uint64 `json:"size,omitempty"` VersionID string `json:"versionId,omitempty"` ETag string `json:"eTag,omitempty"` Sequencer string `json:"sequencer,omitempty"` diff --git a/pkg/service/tree/tree.go b/pkg/service/tree/tree.go index 97fb6d9..87de268 100644 --- a/pkg/service/tree/tree.go +++ b/pkg/service/tree/tree.go @@ -38,7 +38,7 @@ type ( ParentID uint64 ObjID oid.ID TimeStamp uint64 - Size int64 + Size uint64 Meta map[string]string } @@ -143,7 +143,7 @@ func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) { case sizeKV: if sizeStr := string(kv.GetValue()); len(sizeStr) > 0 { var err error - if treeNode.Size, err = strconv.ParseInt(sizeStr, 10, 64); err != nil { + if treeNode.Size, err = strconv.ParseUint(sizeStr, 10, 64); err != nil { return nil, fmt.Errorf("invalid size value '%s': %w", sizeStr, err) } } @@ -261,7 +261,7 @@ func newPartInfo(node NodeResponse) (*data.PartInfo, error) { case etagKV: partInfo.ETag = value case sizeKV: - if partInfo.Size, err = strconv.ParseInt(value, 10, 64); err != nil { + if partInfo.Size, err = strconv.ParseUint(value, 10, 64); err != nil { return nil, fmt.Errorf("invalid part size: %w", err) } case createdKV: @@ -921,7 +921,7 @@ func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartN meta := map[string]string{ partNumberKV: strconv.Itoa(info.Number), oidKV: info.OID.EncodeToString(), - sizeKV: strconv.FormatInt(info.Size, 10), + sizeKV: strconv.FormatUint(info.Size, 10), createdKV: strconv.FormatInt(info.Created.UTC().UnixMilli(), 10), etagKV: info.ETag, } @@ -1057,7 +1057,7 @@ func (c *Tree) addVersion(ctx context.Context, bktInfo *data.BucketInfo, treeID } if version.Size > 0 { - meta[sizeKV] = strconv.FormatInt(version.Size, 10) + meta[sizeKV] = strconv.FormatUint(version.Size, 10) } if len(version.ETag) > 0 { meta[etagKV] = version.ETag -- 2.45.2 From 138102030041a0e60529c4ccd1c28dd09ffacde1 Mon Sep 17 00:00:00 2001 From: Denis Kirillov Date: Thu, 1 Jun 2023 16:47:29 +0300 Subject: [PATCH 3/4] [#125] Fix trailing whitespaces Signed-off-by: Denis Kirillov --- .forgejo/workflows/tests.yml | 4 ++-- docs/authmate.md | 4 ++-- docs/configuration.md | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml index f64b816..c5e9d6b 100644 --- a/.forgejo/workflows/tests.yml +++ b/.forgejo/workflows/tests.yml @@ -29,7 +29,7 @@ jobs: uses: actions/setup-go@v3 with: go-version: '${{ matrix.go_versions }}' - + - name: Sync tree service run: make sync-tree @@ -37,4 +37,4 @@ jobs: run: make dep - name: Run tests - run: make test \ No newline at end of file + run: make test diff --git a/docs/authmate.md b/docs/authmate.md index e9cb8cc..42dc994 100644 --- a/docs/authmate.md +++ b/docs/authmate.md @@ -140,10 +140,10 @@ the secret. Format of `access_key_id`: `%cid0%oid`, where 0(zero) is a delimiter Creation of bearer tokens is mandatory. By default, bearer token will be created with `impersonate` flag and won't have eACL table. It means that gate which will use such token -to interact with node can have access to your private containers or to containers in which eACL grants access to you +to interact with node can have access to your private containers or to containers in which eACL grants access to you by public key. -Rules for a bearer token can be set via parameter `--bearer-rules` (json-string and file path allowed). +Rules for a bearer token can be set via parameter `--bearer-rules` (json-string and file path allowed). But you must provide `--disable-impersonate` flag: ```shell diff --git a/docs/configuration.md b/docs/configuration.md index 36724ee..4ffc0bb 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -502,7 +502,7 @@ prometheus: # `frostfs` section Contains parameters of requests to FrostFS. -This value can be overridden with `X-Amz-Meta-Frostfs-Copies-Number` (value is comma separated numbers: `1,2,3`) +This value can be overridden with `X-Amz-Meta-Frostfs-Copies-Number` (value is comma separated numbers: `1,2,3`) header for `PutObject`, `CopyObject`, `CreateMultipartUpload`. ```yaml -- 2.45.2 From 0b82bd51d3bb2f7e6dbcc9cd0e37708002f8d537 Mon Sep 17 00:00:00 2001 From: Denis Kirillov Date: Thu, 1 Jun 2023 17:11:04 +0300 Subject: [PATCH 4/4] [#125] Update CHANGELOG.md Signed-off-by: Denis Kirillov --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 498328c..0312c15 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,8 @@ This document outlines major changes between releases. - Get empty bucket CORS from frostfs (TrueCloudLab#36) - Don't count pool error on client abort (#35) - Don't create unnecessary delete-markers (#83) +- Handle negative `Content-Length` on put (#125) +- Use `DisableURIPathEscaping` to presign urls (#125) ### Added - Reload default and custom copies numbers on SIGHUP (#104) -- 2.45.2