Fix gitlab intergation #125

Merged
alexvanin merged 4 commits from dkirillov/frostfs-s3-gw:bugfix/content-length-presigned-url into master 2023-06-02 14:16:21 +00:00
29 changed files with 265 additions and 87 deletions

View file

@ -29,7 +29,7 @@ jobs:
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '${{ matrix.go_versions }}' go-version: '${{ matrix.go_versions }}'
- name: Sync tree service - name: Sync tree service
run: make sync-tree run: make sync-tree
@ -37,4 +37,4 @@ jobs:
run: make dep run: make dep
- name: Run tests - name: Run tests
run: make test run: make test

View file

@ -9,6 +9,8 @@ This document outlines major changes between releases.
- Get empty bucket CORS from frostfs (TrueCloudLab#36) - Get empty bucket CORS from frostfs (TrueCloudLab#36)
- Don't count pool error on client abort (#35) - Don't count pool error on client abort (#35)
- Don't create unnecessary delete-markers (#83) - Don't create unnecessary delete-markers (#83)
- Handle negative `Content-Length` on put (#125)
- Use `DisableURIPathEscaping` to presign urls (#125)
### Added ### Added
- Reload default and custom copies numbers on SIGHUP (#104) - Reload default and custom copies numbers on SIGHUP (#104)

View file

@ -291,6 +291,7 @@ func cloneRequest(r *http.Request, authHeader *authHeader) *http.Request {
func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error { func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.AccessKey, "") awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.AccessKey, "")
signer := v4.NewSigner(awsCreds) signer := v4.NewSigner(awsCreds)
signer.DisableURIPathEscaping = true
alexvanin marked this conversation as resolved
Review

Is it compatible with aws s3 presign?

Is it compatible with `aws s3 presign`?
Review

Yes, aws presign generates a url like one that we couldn't handle in gitlab integration

Yes, `aws presign` generates a url like one that we couldn't handle in gitlab integration
var signature string var signature string
if authHeader.IsPresigned { if authHeader.IsPresigned {
@ -306,7 +307,6 @@ func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request *
} }
signature = request.URL.Query().Get(AmzSignature) signature = request.URL.Query().Get(AmzSignature)
} else { } else {
signer.DisableURIPathEscaping = true
if _, err := signer.Sign(request, nil, authHeader.Service, authHeader.Region, signatureDateTime); err != nil { if _, err := signer.Sign(request, nil, authHeader.Service, authHeader.Region, signatureDateTime); err != nil {
return fmt.Errorf("failed to sign temporary HTTP request: %w", err) return fmt.Errorf("failed to sign temporary HTTP request: %w", err)
} }

46
api/auth/presign.go Normal file
View file

@ -0,0 +1,46 @@
package auth
import (
"fmt"
"net/http"
"strings"
"time"
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/private/protocol/rest"
)
type RequestData struct {
Method string
Endpoint string
Bucket string
Object string
}
type PresignData struct {
Service string
Region string
Lifetime time.Duration
SignTime time.Time
}
// PresignRequest forms pre-signed request to access objects without aws credentials.
func PresignRequest(creds *credentials.Credentials, reqData RequestData, presignData PresignData) (*http.Request, error) {
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, rest.EscapePath(reqData.Bucket, false), rest.EscapePath(reqData.Object, false))
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
if err != nil {
return nil, fmt.Errorf("failed to create new request: %w", err)
}
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
signer := v4.NewSigner(creds)
signer.DisableURIPathEscaping = true
if _, err = signer.Presign(req, nil, presignData.Service, presignData.Region, presignData.Lifetime, presignData.SignTime); err != nil {
return nil, fmt.Errorf("presign: %w", err)
}
return req, nil
}

91
api/auth/presign_test.go Normal file
View file

@ -0,0 +1,91 @@
package auth
import (
"context"
"strings"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
)
var _ tokens.Credentials = (*credentialsMock)(nil)
type credentialsMock struct {
boxes map[string]*accessbox.Box
}
func newTokensFrostfsMock() *credentialsMock {
return &credentialsMock{
boxes: make(map[string]*accessbox.Box),
}
}
func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) {
m.boxes[addr.String()] = box
}
func (m credentialsMock) GetBox(_ context.Context, addr oid.Address) (*accessbox.Box, error) {
box, ok := m.boxes[addr.String()]
if !ok {
return nil, apistatus.ObjectNotFound{}
}
return box, nil
}
func (m credentialsMock) Put(context.Context, cid.ID, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error) {
return oid.Address{}, nil
}
func TestCheckSign(t *testing.T) {
var accessKeyAddr oid.Address
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
require.NoError(t, err)
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
awsCreds := credentials.NewStaticCredentials(accessKeyID, secretKey, "")
reqData := RequestData{
Method: "GET",
Endpoint: "http://localhost:8084",
Bucket: "my-bucket",
Object: "@obj/name",
}
presignData := PresignData{
Service: "s3",
Region: "spb",
Lifetime: 10 * time.Minute,
SignTime: time.Now().UTC(),
}
req, err := PresignRequest(awsCreds, reqData, presignData)
require.NoError(t, err)
expBox := &accessbox.Box{
Gate: &accessbox.GateData{
AccessKey: secretKey,
},
}
mock := newTokensFrostfsMock()
mock.addBox(accessKeyAddr, expBox)
c := &center{
cli: mock,
reg: NewRegexpMatcher(authorizationFieldRegexp),
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
}
box, err := c.Authenticate(req)
require.NoError(t, err)
require.EqualValues(t, expBox, box.AccessBox)
}

View file

@ -40,7 +40,7 @@ type (
Bucket string Bucket string
Name string Name string
Size int64 Size uint64
ContentType string ContentType string
Created time.Time Created time.Time
HashSum string HashSum string
@ -52,7 +52,7 @@ type (
NotificationInfo struct { NotificationInfo struct {
Name string Name string
Version string Version string
Size int64 Size uint64
HashSum string HashSum string
} }

View file

@ -53,7 +53,7 @@ type BaseNodeVersion struct {
ParenID uint64 ParenID uint64
OID oid.ID OID oid.ID
Timestamp uint64 Timestamp uint64
Size int64 Size uint64
ETag string ETag string
FilePath string FilePath string
} }
@ -83,14 +83,14 @@ type PartInfo struct {
UploadID string UploadID string
Number int Number int
OID oid.ID OID oid.ID
Size int64 Size uint64
ETag string ETag string
Created time.Time Created time.Time
} }
// ToHeaderString form short part representation to use in S3-Completed-Parts header. // ToHeaderString form short part representation to use in S3-Completed-Parts header.
func (p *PartInfo) ToHeaderString() string { func (p *PartInfo) ToHeaderString() string {
return strconv.Itoa(p.Number) + "-" + strconv.FormatInt(p.Size, 10) + "-" + p.ETag return strconv.Itoa(p.Number) + "-" + strconv.FormatUint(p.Size, 10) + "-" + p.ETag
} }
// LockInfo is lock information to create appropriate tree node. // LockInfo is lock information to create appropriate tree node.

View file

@ -17,7 +17,7 @@ type (
GetObjectAttributesResponse struct { GetObjectAttributesResponse struct {
ETag string `xml:"ETag,omitempty"` ETag string `xml:"ETag,omitempty"`
Checksum *Checksum `xml:"Checksum,omitempty"` Checksum *Checksum `xml:"Checksum,omitempty"`
ObjectSize int64 `xml:"ObjectSize,omitempty"` ObjectSize uint64 `xml:"ObjectSize,omitempty"`
StorageClass string `xml:"StorageClass,omitempty"` StorageClass string `xml:"StorageClass,omitempty"`
ObjectParts *ObjectParts `xml:"ObjectParts,omitempty"` ObjectParts *ObjectParts `xml:"ObjectParts,omitempty"`
} }

View file

@ -88,7 +88,7 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
h.Set(api.ContentLength, info.Headers[layer.AttributeDecryptedSize]) h.Set(api.ContentLength, info.Headers[layer.AttributeDecryptedSize])
addSSECHeaders(h, requestHeader) addSSECHeaders(h, requestHeader)
} else { } else {
h.Set(api.ContentLength, strconv.FormatInt(info.Size, 10)) h.Set(api.ContentLength, strconv.FormatUint(info.Size, 10))
} }
h.Set(api.ETag, info.HashSum) h.Set(api.ETag, info.HashSum)
@ -163,13 +163,13 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
fullSize := info.Size fullSize := info.Size
if encryptionParams.Enabled() { if encryptionParams.Enabled() {
if fullSize, err = strconv.ParseInt(info.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil { if fullSize, err = strconv.ParseUint(info.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil {
h.logAndSendError(w, "invalid decrypted size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest)) h.logAndSendError(w, "invalid decrypted size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
return return
} }
} }
if params, err = fetchRangeHeader(r.Header, uint64(fullSize)); err != nil { if params, err = fetchRangeHeader(r.Header, fullSize); err != nil {
h.logAndSendError(w, "could not parse range header", reqInfo, err) h.logAndSendError(w, "could not parse range header", reqInfo, err)
return return
} }
@ -268,7 +268,7 @@ func parseHTTPTime(data string) (*time.Time, error) {
return &result, nil return &result, nil
} }
func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size int64) { func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size uint64) {
w.Header().Set(api.AcceptRanges, "bytes") w.Header().Set(api.AcceptRanges, "bytes")
w.Header().Set(api.ContentRange, fmt.Sprintf("bytes %d-%d/%d", params.Start, params.End, size)) w.Header().Set(api.ContentRange, fmt.Sprintf("bytes %d-%d/%d", params.Start, params.End, size))
w.Header().Set(api.ContentLength, strconv.FormatUint(params.End-params.Start+1, 10)) w.Header().Set(api.ContentLength, strconv.FormatUint(params.End-params.Start+1, 10))

View file

@ -190,7 +190,7 @@ func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName stri
extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{ extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{
BktInfo: bktInfo, BktInfo: bktInfo,
Object: objName, Object: objName,
Size: int64(len(content)), Size: uint64(len(content)),
Reader: bytes.NewReader(content), Reader: bytes.NewReader(content),
Header: header, Header: header,
}) })

View file

@ -13,8 +13,8 @@ import (
const sizeToDetectType = 512 const sizeToDetectType = 512
func getRangeToDetectContentType(maxSize int64) *layer.RangeParams { func getRangeToDetectContentType(maxSize uint64) *layer.RangeParams {
end := uint64(maxSize) end := maxSize
if sizeToDetectType < end { if sizeToDetectType < end {
end = sizeToDetectType end = sizeToDetectType
} }

View file

@ -216,6 +216,11 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
var size uint64
if r.ContentLength > 0 {
size = uint64(r.ContentLength)
}
p := &layer.UploadPartParams{ p := &layer.UploadPartParams{
Info: &layer.UploadInfoParams{ Info: &layer.UploadInfoParams{
UploadID: uploadID, UploadID: uploadID,
@ -223,7 +228,7 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
Key: reqInfo.ObjectName, Key: reqInfo.ObjectName,
}, },
PartNumber: partNumber, PartNumber: partNumber,
Size: r.ContentLength, Size: size,
Reader: r.Body, Reader: r.Body,
} }

View file

@ -43,13 +43,13 @@ func (p *postPolicy) condition(key string) *policyCondition {
return nil return nil
} }
func (p *postPolicy) CheckContentLength(size int64) bool { func (p *postPolicy) CheckContentLength(size uint64) bool {
if p.empty { if p.empty {
return true return true
} }
for _, condition := range p.Conditions { for _, condition := range p.Conditions {
if condition.Matching == "content-length-range" { if condition.Matching == "content-length-range" {
length := strconv.FormatInt(size, 10) length := strconv.FormatUint(size, 10)
return condition.Key <= length && length <= condition.Value return condition.Key <= length && length <= condition.Value
} }
} }
@ -218,11 +218,16 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
var size uint64
if r.ContentLength > 0 {
size = uint64(r.ContentLength)
}
params := &layer.PutObjectParams{ params := &layer.PutObjectParams{
BktInfo: bktInfo, BktInfo: bktInfo,
Object: reqInfo.ObjectName, Object: reqInfo.ObjectName,
Reader: r.Body, Reader: r.Body,
Size: r.ContentLength, Size: size,
Header: metadata, Header: metadata,
Encryption: encryptionParams, Encryption: encryptionParams,
} }
@ -388,10 +393,10 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
} }
var contentReader io.Reader var contentReader io.Reader
var size int64 var size uint64
if content, ok := r.MultipartForm.Value["file"]; ok { if content, ok := r.MultipartForm.Value["file"]; ok {
contentReader = bytes.NewBufferString(content[0]) contentReader = bytes.NewBufferString(content[0])
size = int64(len(content[0])) size = uint64(len(content[0]))
} else { } else {
file, head, err := r.FormFile("file") file, head, err := r.FormFile("file")
if err != nil { if err != nil {
@ -399,7 +404,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
return return
} }
contentReader = file contentReader = file
size = head.Size size = uint64(head.Size)
reqInfo.ObjectName = strings.ReplaceAll(reqInfo.ObjectName, "${filename}", head.Filename) reqInfo.ObjectName = strings.ReplaceAll(reqInfo.ObjectName, "${filename}", head.Filename)
} }
if !policy.CheckContentLength(size) { if !policy.CheckContentLength(size) {

View file

@ -1,9 +1,11 @@
package handler package handler
import ( import (
"bytes"
"encoding/json" "encoding/json"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
"strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -126,3 +128,21 @@ func TestPutObjectOverrideCopiesNumber(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, "1", objInfo.Headers[layer.AttributeFrostfsCopiesNumber]) require.Equal(t, "1", objInfo.Headers[layer.AttributeFrostfsCopiesNumber])
} }
func TestPutObjectWithNegativeContentLength(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-put", "object-for-put"
createTestBucket(tc, bktName)
content := []byte("content")
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
r.ContentLength = -1
tc.Handler().PutObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
w, r = prepareTestRequest(tc, bktName, objName, nil)
tc.Handler().HeadObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
require.Equal(t, strconv.Itoa(len(content)), w.Header().Get(api.ContentLength))
}

View file

@ -104,7 +104,7 @@ type Object struct {
Key string Key string
LastModified string // time string of format "2006-01-02T15:04:05.000Z" LastModified string // time string of format "2006-01-02T15:04:05.000Z"
ETag string `xml:"ETag,omitempty"` ETag string `xml:"ETag,omitempty"`
Size int64 Size uint64
// Owner of the object. // Owner of the object.
Owner *Owner `xml:"Owner,omitempty"` Owner *Owner `xml:"Owner,omitempty"`
@ -120,7 +120,7 @@ type ObjectVersionResponse struct {
Key string `xml:"Key"` Key string `xml:"Key"`
LastModified string `xml:"LastModified"` LastModified string `xml:"LastModified"`
Owner Owner `xml:"Owner"` Owner Owner `xml:"Owner"`
Size int64 `xml:"Size"` Size uint64 `xml:"Size"`
StorageClass string `xml:"StorageClass,omitempty"` // is empty!! StorageClass string `xml:"StorageClass,omitempty"` // is empty!!
VersionID string `xml:"VersionId"` VersionID string `xml:"VersionId"`
} }

View file

@ -45,7 +45,7 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
CopiesNumber: p.CopiesNumbers, CopiesNumber: p.CopiesNumbers,
} }
objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo) _, objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
if err != nil { if err != nil {
return fmt.Errorf("put system object: %w", err) return fmt.Errorf("put system object: %w", err)
} }

View file

@ -102,7 +102,7 @@ type (
PutObjectParams struct { PutObjectParams struct {
BktInfo *data.BucketInfo BktInfo *data.BucketInfo
Object string Object string
Size int64 Size uint64
Reader io.Reader Reader io.Reader
Header map[string]string Header map[string]string
Lock *data.ObjectLock Lock *data.ObjectLock
@ -135,7 +135,7 @@ type (
ScrBktInfo *data.BucketInfo ScrBktInfo *data.BucketInfo
DstBktInfo *data.BucketInfo DstBktInfo *data.BucketInfo
DstObject string DstObject string
SrcSize int64 SrcSize uint64
Header map[string]string Header map[string]string
Range *RangeParams Range *RangeParams
Lock *data.ObjectLock Lock *data.ObjectLock

View file

@ -60,7 +60,7 @@ type (
UploadPartParams struct { UploadPartParams struct {
Info *UploadInfoParams Info *UploadInfoParams
PartNumber int PartNumber int
Size int64 Size uint64
Reader io.Reader Reader io.Reader
} }
@ -91,7 +91,7 @@ type (
ETag string ETag string
LastModified string LastModified string
PartNumber int PartNumber int
Size int64 Size uint64
} }
ListMultipartUploadsParams struct { ListMultipartUploadsParams struct {
@ -212,22 +212,25 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
decSize := p.Size decSize := p.Size
if p.Info.Encryption.Enabled() { if p.Info.Encryption.Enabled() {
r, encSize, err := encryptionReader(p.Reader, uint64(p.Size), p.Info.Encryption.Key()) r, encSize, err := encryptionReader(p.Reader, p.Size, p.Info.Encryption.Key())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create ecnrypted reader: %w", err) return nil, fmt.Errorf("failed to create ecnrypted reader: %w", err)
} }
prm.Attributes = append(prm.Attributes, [2]string{AttributeDecryptedSize, strconv.FormatInt(p.Size, 10)}) prm.Attributes = append(prm.Attributes, [2]string{AttributeDecryptedSize, strconv.FormatUint(p.Size, 10)})
prm.Payload = r prm.Payload = r
p.Size = int64(encSize) p.Size = encSize
} }
prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID
prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber) prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber)
id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo) size, id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if p.Info.Encryption.Enabled() {
size = decSize
alexvanin marked this conversation as resolved
Review

Can you provide some examples why size depends on p.Info.Encryption.Enabled()?
Can't we just always use size from the line above?

Can you provide some examples why `size` depends on `p.Info.Encryption.Enabled()`? Can't we just always use `size` from the line above?
Review

Encrypted payload can be different from plain one. So we want to know the real size in HEAD requests for example, so we store real size in object attributes and tree node attribute

Encrypted payload can be different from plain one. So we want to know the real size in HEAD requests for example, so we store real size in object attributes and tree node attribute
Review

Makes sense, thanks.

Makes sense, thanks.
}
reqInfo := api.GetReqInfo(ctx) reqInfo := api.GetReqInfo(ctx)
n.log.Debug("upload part", n.log.Debug("upload part",
@ -241,7 +244,7 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
UploadID: p.Info.UploadID, UploadID: p.Info.UploadID,
Number: p.PartNumber, Number: p.PartNumber,
OID: id, OID: id,
Size: decSize, Size: size,
ETag: hex.EncodeToString(hash), ETag: hex.EncodeToString(hash),
Created: prm.CreationTime, Created: prm.CreationTime,
} }
@ -285,8 +288,8 @@ func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.
size := p.SrcObjInfo.Size size := p.SrcObjInfo.Size
if p.Range != nil { if p.Range != nil {
size = int64(p.Range.End - p.Range.Start + 1) size = p.Range.End - p.Range.Start + 1
if p.Range.End > uint64(p.SrcObjInfo.Size) { if p.Range.End > p.SrcObjInfo.Size {
return nil, errors.GetAPIError(errors.ErrInvalidCopyPartRangeSource) return nil, errors.GetAPIError(errors.ErrInvalidCopyPartRangeSource)
} }
} }
@ -375,7 +378,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart) return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
} }
var multipartObjetSize int64 var multipartObjetSize uint64
var encMultipartObjectSize uint64 var encMultipartObjectSize uint64
parts := make([]*data.PartInfo, 0, len(p.Parts)) parts := make([]*data.PartInfo, 0, len(p.Parts))
@ -393,7 +396,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted) multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted)
if encInfo.Enabled { if encInfo.Enabled {
encPartSize, err := sio.EncryptedSize(uint64(partInfo.Size)) encPartSize, err := sio.EncryptedSize(partInfo.Size)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("compute encrypted size: %w", err) return nil, nil, fmt.Errorf("compute encrypted size: %w", err)
} }
@ -430,8 +433,8 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
initMetadata[AttributeEncryptionAlgorithm] = encInfo.Algorithm initMetadata[AttributeEncryptionAlgorithm] = encInfo.Algorithm
initMetadata[AttributeHMACKey] = encInfo.HMACKey initMetadata[AttributeHMACKey] = encInfo.HMACKey
initMetadata[AttributeHMACSalt] = encInfo.HMACSalt initMetadata[AttributeHMACSalt] = encInfo.HMACSalt
initMetadata[AttributeDecryptedSize] = strconv.FormatInt(multipartObjetSize, 10) initMetadata[AttributeDecryptedSize] = strconv.FormatUint(multipartObjetSize, 10)
multipartObjetSize = int64(encMultipartObjectSize) multipartObjetSize = encMultipartObjectSize
} }
r := &multiObjectReader{ r := &multiObjectReader{

View file

@ -34,7 +34,7 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu
CopiesNumber: p.CopiesNumbers, CopiesNumber: p.CopiesNumbers,
} }
objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo) _, objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
if err != nil { if err != nil {
return err return err
} }

View file

@ -170,7 +170,7 @@ func ParseCompletedPartHeader(hdr string) (*Part, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid completed part number '%s': %w", partInfo[0], err) return nil, fmt.Errorf("invalid completed part number '%s': %w", partInfo[0], err)
} }
size, err := strconv.Atoi(partInfo[1]) size, err := strconv.ParseUint(partInfo[1], 10, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid completed part size '%s': %w", partInfo[1], err) return nil, fmt.Errorf("invalid completed part size '%s': %w", partInfo[1], err)
} }
@ -178,7 +178,7 @@ func ParseCompletedPartHeader(hdr string) (*Part, error) {
return &Part{ return &Part{
ETag: partInfo[2], ETag: partInfo[2],
PartNumber: num, PartNumber: num,
Size: int64(size), Size: size,
}, nil }, nil
} }
@ -191,26 +191,18 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
return nil, fmt.Errorf("couldn't get versioning settings object: %w", err) return nil, fmt.Errorf("couldn't get versioning settings object: %w", err)
} }
newVersion := &data.NodeVersion{
BaseNodeVersion: data.BaseNodeVersion{
FilePath: p.Object,
Size: p.Size,
},
IsUnversioned: !bktSettings.VersioningEnabled(),
}
r := p.Reader r := p.Reader
if p.Encryption.Enabled() { if p.Encryption.Enabled() {
p.Header[AttributeDecryptedSize] = strconv.FormatInt(p.Size, 10) p.Header[AttributeDecryptedSize] = strconv.FormatUint(p.Size, 10)
if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil { if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil {
return nil, fmt.Errorf("add encryption header: %w", err) return nil, fmt.Errorf("add encryption header: %w", err)
} }
var encSize uint64 var encSize uint64
if r, encSize, err = encryptionReader(p.Reader, uint64(p.Size), p.Encryption.Key()); err != nil { if r, encSize, err = encryptionReader(p.Reader, p.Size, p.Encryption.Key()); err != nil {
return nil, fmt.Errorf("create encrypter: %w", err) return nil, fmt.Errorf("create encrypter: %w", err)
} }
p.Size = int64(encSize) p.Size = encSize
} }
if r != nil { if r != nil {
@ -230,7 +222,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
prm := PrmObjectCreate{ prm := PrmObjectCreate{
Container: p.BktInfo.CID, Container: p.BktInfo.CID,
Creator: owner, Creator: owner,
PayloadSize: uint64(p.Size), PayloadSize: p.Size,
Filepath: p.Object, Filepath: p.Object,
Payload: r, Payload: r,
CreationTime: TimeNow(ctx), CreationTime: TimeNow(ctx),
@ -243,7 +235,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
prm.Attributes = append(prm.Attributes, [2]string{k, v}) prm.Attributes = append(prm.Attributes, [2]string{k, v})
} }
id, hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo) size, id, hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -254,8 +246,16 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
zap.String("bucket", p.BktInfo.Name), zap.Stringer("cid", p.BktInfo.CID), zap.String("bucket", p.BktInfo.Name), zap.Stringer("cid", p.BktInfo.CID),
zap.String("object", p.Object), zap.Stringer("oid", id)) zap.String("object", p.Object), zap.Stringer("oid", id))
newVersion.OID = id newVersion := &data.NodeVersion{
newVersion.ETag = hex.EncodeToString(hash) BaseNodeVersion: data.BaseNodeVersion{
OID: id,
ETag: hex.EncodeToString(hash),
FilePath: p.Object,
Size: size,
},
IsUnversioned: !bktSettings.VersioningEnabled(),
}
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil { if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err) return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err)
} }
@ -286,7 +286,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
Owner: owner, Owner: owner,
Bucket: p.BktInfo.Name, Bucket: p.BktInfo.Name,
Name: p.Object, Name: p.Object,
Size: p.Size, Size: size,
Created: prm.CreationTime, Created: prm.CreationTime,
Headers: p.Header, Headers: p.Header,
ContentType: p.Header[api.ContentType], ContentType: p.Header[api.ContentType],
@ -405,17 +405,19 @@ func (n *layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idOb
// objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject. // objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject.
// Returns object ID and payload sha256 hash. // Returns object ID and payload sha256 hash.
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (oid.ID, []byte, error) { func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, error) {
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner) n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
var size uint64
hash := sha256.New() hash := sha256.New()
prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) { prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) {
size += uint64(len(buf))
hash.Write(buf) hash.Write(buf)
}) })
id, err := n.frostFS.CreateObject(ctx, prm) id, err := n.frostFS.CreateObject(ctx, prm)
if err != nil { if err != nil {
return oid.ID{}, nil, err return 0, oid.ID{}, nil, err
} }
return id, hash.Sum(nil), nil return size, id, hash.Sum(nil), nil
} }
// ListObjectsV1 returns objects in a bucket for requests of Version 1. // ListObjectsV1 returns objects in a bucket for requests of Version 1.

View file

@ -126,7 +126,7 @@ func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, obj
return oid.ID{}, err return oid.ID{}, err
} }
id, _, err := n.objectPutAndHash(ctx, prm, bktInfo) _, id, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
return id, err return id, err
} }

View file

@ -94,7 +94,7 @@ func objectInfoFromMeta(bkt *data.BucketInfo, meta *object.Object) *data.ObjectI
ContentType: mimeType, ContentType: mimeType,
Headers: headers, Headers: headers,
Owner: *meta.OwnerID(), Owner: *meta.OwnerID(),
Size: int64(meta.PayloadSize()), Size: meta.PayloadSize(),
HashSum: hex.EncodeToString(payloadChecksum.Value()), HashSum: hex.EncodeToString(payloadChecksum.Value()),
} }
} }

View file

@ -17,7 +17,7 @@ import (
var ( var (
defaultTestCreated = time.Now() defaultTestCreated = time.Now()
defaultTestPayload = []byte("test object payload") defaultTestPayload = []byte("test object payload")
defaultTestPayloadLength = int64(len(defaultTestPayload)) defaultTestPayloadLength = uint64(len(defaultTestPayload))
defaultTestContentType = http.DetectContentType(defaultTestPayload) defaultTestContentType = http.DetectContentType(defaultTestPayload)
) )

View file

@ -21,7 +21,7 @@ func (tc *testContext) putObject(content []byte) *data.ObjectInfo {
extObjInfo, err := tc.layer.PutObject(tc.ctx, &PutObjectParams{ extObjInfo, err := tc.layer.PutObject(tc.ctx, &PutObjectParams{
BktInfo: tc.bktInfo, BktInfo: tc.bktInfo,
Object: tc.obj, Object: tc.obj,
Size: int64(len(content)), Size: uint64(len(content)),
Reader: bytes.NewReader(content), Reader: bytes.NewReader(content),
Header: make(map[string]string), Header: make(map[string]string),
}) })

View file

@ -94,7 +94,7 @@ type (
Object struct { Object struct {
Key string `json:"key"` Key string `json:"key"`
Size int64 `json:"size,omitempty"` Size uint64 `json:"size,omitempty"`
VersionID string `json:"versionId,omitempty"` VersionID string `json:"versionId,omitempty"`
ETag string `json:"eTag,omitempty"` ETag string `json:"eTag,omitempty"`
Sequencer string `json:"sequencer,omitempty"` Sequencer string `json:"sequencer,omitempty"`

View file

@ -5,7 +5,6 @@ import (
"crypto/ecdsa" "crypto/ecdsa"
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http"
"os" "os"
"os/signal" "os/signal"
"runtime" "runtime"
@ -14,6 +13,7 @@ import (
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
@ -23,7 +23,6 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
v4 "github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/spf13/viper" "github.com/spf13/viper"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
@ -482,17 +481,22 @@ It will be ceil rounded to the nearest amount of epoch.`,
return fmt.Errorf("couldn't get credentials: %w", err) return fmt.Errorf("couldn't get credentials: %w", err)
} }
signer := v4.NewSigner(sess.Config.Credentials) reqData := auth.RequestData{
req, err := http.NewRequest(strings.ToUpper(methodFlag), fmt.Sprintf("%s/%s/%s", endpointFlag, bucketFlag, objectFlag), nil) Method: methodFlag,
if err != nil { Endpoint: endpointFlag,
return fmt.Errorf("failed to create new request: %w", err) Bucket: bucketFlag,
Object: objectFlag,
}
presignData := auth.PresignData{
Service: "s3",
Region: *sess.Config.Region,
Lifetime: lifetimeFlag,
SignTime: time.Now().UTC(),
} }
date := time.Now().UTC() req, err := auth.PresignRequest(sess.Config.Credentials, reqData, presignData)
req.Header.Set(api.AmzDate, date.Format("20060102T150405Z")) if err != nil {
return err
if _, err = signer.Presign(req, nil, "s3", *sess.Config.Region, lifetimeFlag, date); err != nil {
return fmt.Errorf("presign: %w", err)
} }
res := &struct{ URL string }{ res := &struct{ URL string }{

View file

@ -140,10 +140,10 @@ the secret. Format of `access_key_id`: `%cid0%oid`, where 0(zero) is a delimiter
Creation of bearer tokens is mandatory. Creation of bearer tokens is mandatory.
By default, bearer token will be created with `impersonate` flag and won't have eACL table. It means that gate which will use such token By default, bearer token will be created with `impersonate` flag and won't have eACL table. It means that gate which will use such token
to interact with node can have access to your private containers or to containers in which eACL grants access to you to interact with node can have access to your private containers or to containers in which eACL grants access to you
by public key. by public key.
Rules for a bearer token can be set via parameter `--bearer-rules` (json-string and file path allowed). Rules for a bearer token can be set via parameter `--bearer-rules` (json-string and file path allowed).
But you must provide `--disable-impersonate` flag: But you must provide `--disable-impersonate` flag:
```shell ```shell

View file

@ -502,7 +502,7 @@ prometheus:
# `frostfs` section # `frostfs` section
Contains parameters of requests to FrostFS. Contains parameters of requests to FrostFS.
This value can be overridden with `X-Amz-Meta-Frostfs-Copies-Number` (value is comma separated numbers: `1,2,3`) This value can be overridden with `X-Amz-Meta-Frostfs-Copies-Number` (value is comma separated numbers: `1,2,3`)
header for `PutObject`, `CopyObject`, `CreateMultipartUpload`. header for `PutObject`, `CopyObject`, `CreateMultipartUpload`.
```yaml ```yaml

View file

@ -38,7 +38,7 @@ type (
ParentID uint64 ParentID uint64
ObjID oid.ID ObjID oid.ID
TimeStamp uint64 TimeStamp uint64
Size int64 Size uint64
Meta map[string]string Meta map[string]string
} }
@ -143,7 +143,7 @@ func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
case sizeKV: case sizeKV:
if sizeStr := string(kv.GetValue()); len(sizeStr) > 0 { if sizeStr := string(kv.GetValue()); len(sizeStr) > 0 {
var err error var err error
if treeNode.Size, err = strconv.ParseInt(sizeStr, 10, 64); err != nil { if treeNode.Size, err = strconv.ParseUint(sizeStr, 10, 64); err != nil {
return nil, fmt.Errorf("invalid size value '%s': %w", sizeStr, err) return nil, fmt.Errorf("invalid size value '%s': %w", sizeStr, err)
} }
} }
@ -261,7 +261,7 @@ func newPartInfo(node NodeResponse) (*data.PartInfo, error) {
case etagKV: case etagKV:
partInfo.ETag = value partInfo.ETag = value
case sizeKV: case sizeKV:
if partInfo.Size, err = strconv.ParseInt(value, 10, 64); err != nil { if partInfo.Size, err = strconv.ParseUint(value, 10, 64); err != nil {
return nil, fmt.Errorf("invalid part size: %w", err) return nil, fmt.Errorf("invalid part size: %w", err)
} }
case createdKV: case createdKV:
@ -921,7 +921,7 @@ func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartN
meta := map[string]string{ meta := map[string]string{
partNumberKV: strconv.Itoa(info.Number), partNumberKV: strconv.Itoa(info.Number),
oidKV: info.OID.EncodeToString(), oidKV: info.OID.EncodeToString(),
sizeKV: strconv.FormatInt(info.Size, 10), sizeKV: strconv.FormatUint(info.Size, 10),
createdKV: strconv.FormatInt(info.Created.UTC().UnixMilli(), 10), createdKV: strconv.FormatInt(info.Created.UTC().UnixMilli(), 10),
etagKV: info.ETag, etagKV: info.ETag,
} }
@ -1057,7 +1057,7 @@ func (c *Tree) addVersion(ctx context.Context, bktInfo *data.BucketInfo, treeID
} }
if version.Size > 0 { if version.Size > 0 {
meta[sizeKV] = strconv.FormatInt(version.Size, 10) meta[sizeKV] = strconv.FormatUint(version.Size, 10)
} }
if len(version.ETag) > 0 { if len(version.ETag) > 0 {
meta[etagKV] = version.ETag meta[etagKV] = version.ETag