support/v0.28 #289

Closed
pogpp wants to merge 11 commits from support/v0.28 into master
42 changed files with 576 additions and 135 deletions

1
.gitignore vendored
View file

@ -22,6 +22,7 @@ coverage.html
# debian package build files # debian package build files
debian/files debian/files
debian/changelog
debian/*.log debian/*.log
debian/*.substvars debian/*.substvars
debian/frostfs-s3-gw/ debian/frostfs-s3-gw/

View file

@ -4,6 +4,8 @@ This document outlines major changes between releases.
## [Unreleased] ## [Unreleased]
## [0.28.0] - Academy of Sciences - 2023-12-07
### Fixed ### Fixed
- Handle negative `Content-Length` on put (#125) - Handle negative `Content-Length` on put (#125)
- Use `DisableURIPathEscaping` to presign urls (#125) - Use `DisableURIPathEscaping` to presign urls (#125)
@ -15,11 +17,15 @@ This document outlines major changes between releases.
- Fix parsing signed headers in presigned urls (#182) - Fix parsing signed headers in presigned urls (#182)
- Fix url escaping (#188) - Fix url escaping (#188)
- Use correct keys in `list-multipart-uploads` response (#185) - Use correct keys in `list-multipart-uploads` response (#185)
- Fix parsing `key-marker` for object list versions (#237)
- `GetSubTree` failures (#179)
- Unexpected EOF during multipart download (#210)
- Produce clean version in debian build (#245)
### Added ### Added
- Add `trace_id` value into log record when tracing is enabled (#142) - Add `trace_id` value into log record when tracing is enabled (#142)
- Add basic error types and exit codes to `frostfs-s3-authmate` (#152) - Add basic error types and exit codes to `frostfs-s3-authmate` (#152)
- Add a metric with addresses of nodes of the same and highest priority that are currently healthy (#51) - Add a metric with addresses of nodes of the same and highest priority that are currently healthy (#186)
- Support dump metrics descriptions (#80) - Support dump metrics descriptions (#80)
- Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70, #101) - Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70, #101)
- Support impersonate bearer token (#81, #105) - Support impersonate bearer token (#81, #105)
@ -30,6 +36,9 @@ This document outlines major changes between releases.
- Implement chunk uploading (#106) - Implement chunk uploading (#106)
- Add new `kludge.bypass_content_encoding_check_in_chunks` config param (#146) - Add new `kludge.bypass_content_encoding_check_in_chunks` config param (#146)
- Add new `frostfs.client_cut` config param (#192) - Add new `frostfs.client_cut` config param (#192)
- Add selection of the node of the latest version of the object (#231)
- Soft memory limit with `runtime.soft_memory_limit` (#196)
- `server_health` metric for every S3 endpoint status (#199)
### Changed ### Changed
- Update prometheus to v1.15.0 (#94) - Update prometheus to v1.15.0 (#94)
@ -40,10 +49,16 @@ This document outlines major changes between releases.
- Use request scope logger (#111) - Use request scope logger (#111)
- Add `s3-authmate update-secret` command (#131) - Add `s3-authmate update-secret` command (#131)
- Use default registerer for app metrics (#155) - Use default registerer for app metrics (#155)
- Use chi router instead of archived gorlilla/mux (#149) - Use chi router instead of archived gorlilla/mux (#149, #174, #188)
- Complete multipart upload doesn't unnecessary copy now. Thus, the total time of multipart upload was reduced by 2 times (#63) - Complete multipart upload doesn't unnecessary copy now. Thus, the total time of multipart upload was reduced by 2 times (#63)
- Use gate key to form object owner (#175) - Use gate key to form object owner (#175)
- Apply placement policies and copies if there is at least one valid value (#168) - Apply placement policies and copies if there is at least one valid value (#168)
- `statistic_tx_bytes_total` and `statistic_rx_bytes_total` metric to `statistic_bytes_total` metric with `direction` label (#153)
- Refactor of context-stored data receivers (#137)
- Refactor fetch/parse config parameters functions (#117)
- Move all log messages to constants (#96)
- Allow zero value of `part-number-marker` (#207)
- Clean tag node in the tree service instead of removal (#233)
### Removed ### Removed
- Drop `tree.service` param (now endpoints from `peers` section are used) (#133) - Drop `tree.service` param (now endpoints from `peers` section are used) (#133)
@ -92,4 +107,5 @@ This project is a fork of [NeoFS S3 Gateway](https://github.com/nspcc-dev/neofs-
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-s3-gw/blob/master/CHANGELOG.md. To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-s3-gw/blob/master/CHANGELOG.md.
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...v0.27.0 [0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...v0.27.0
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...master [0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...v0.28.0
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.0...master

View file

@ -160,6 +160,7 @@ protoc:
# Package for Debian # Package for Debian
debpackage: debpackage:
cp debian/changelog.init debian/changelog
dch --package frostfs-s3-gw \ dch --package frostfs-s3-gw \
--controlmaint \ --controlmaint \
--newversion $(PKG_VERSION) \ --newversion $(PKG_VERSION) \

View file

@ -1 +1 @@
v0.27.0 v0.28.0

View file

@ -45,6 +45,7 @@ type (
Created time.Time Created time.Time
CreationEpoch uint64 CreationEpoch uint64
HashSum string HashSum string
MD5Sum string
Owner user.ID Owner user.ID
Headers map[string]string Headers map[string]string
} }
@ -81,12 +82,12 @@ type (
) )
// NotificationInfoFromObject creates new NotificationInfo from ObjectInfo. // NotificationInfoFromObject creates new NotificationInfo from ObjectInfo.
func NotificationInfoFromObject(objInfo *ObjectInfo) *NotificationInfo { func NotificationInfoFromObject(objInfo *ObjectInfo, md5Enabled bool) *NotificationInfo {
return &NotificationInfo{ return &NotificationInfo{
Name: objInfo.Name, Name: objInfo.Name,
Version: objInfo.VersionID(), Version: objInfo.VersionID(),
Size: objInfo.Size, Size: objInfo.Size,
HashSum: objInfo.HashSum, HashSum: objInfo.ETag(md5Enabled),
} }
} }
@ -115,6 +116,13 @@ func (o *ObjectInfo) Address() oid.Address {
return addr return addr
} }
func (o *ObjectInfo) ETag(md5Enabled bool) string {
if md5Enabled && len(o.MD5Sum) > 0 {
return o.MD5Sum
}
return o.HashSum
}
func (b BucketSettings) Unversioned() bool { func (b BucketSettings) Unversioned() bool {
return b.Versioning == VersioningUnversioned return b.Versioning == VersioningUnversioned
} }

View file

@ -56,6 +56,7 @@ type BaseNodeVersion struct {
Timestamp uint64 Timestamp uint64
Size uint64 Size uint64
ETag string ETag string
MD5 string
FilePath string FilePath string
} }
@ -86,14 +87,23 @@ type PartInfo struct {
OID oid.ID `json:"oid"` OID oid.ID `json:"oid"`
Size uint64 `json:"size"` Size uint64 `json:"size"`
ETag string `json:"etag"` ETag string `json:"etag"`
MD5 string `json:"md5"`
Created time.Time `json:"created"` Created time.Time `json:"created"`
} }
// ToHeaderString form short part representation to use in S3-Completed-Parts header. // ToHeaderString form short part representation to use in S3-Completed-Parts header.
func (p *PartInfo) ToHeaderString() string { func (p *PartInfo) ToHeaderString() string {
// ETag value contains SHA256 checksum which is used while getting object parts attributes.
return strconv.Itoa(p.Number) + "-" + strconv.FormatUint(p.Size, 10) + "-" + p.ETag return strconv.Itoa(p.Number) + "-" + strconv.FormatUint(p.Size, 10) + "-" + p.ETag
} }
func (p *PartInfo) GetETag(md5Enabled bool) string {
if md5Enabled && len(p.MD5) > 0 {
return p.MD5
}
return p.ETag
}
// LockInfo is lock information to create appropriate tree node. // LockInfo is lock information to create appropriate tree node.
type LockInfo struct { type LockInfo struct {
id uint64 id uint64

View file

@ -466,7 +466,7 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
if updated { if updated {
s := &SendNotificationParams{ s := &SendNotificationParams{
Event: EventObjectACLPut, Event: EventObjectACLPut,
NotificationInfo: data.NotificationInfoFromObject(objInfo), NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.Features.MD5Enabled()),
BktInfo: bktInfo, BktInfo: bktInfo,
ReqInfo: reqInfo, ReqInfo: reqInfo,
} }

View file

@ -39,6 +39,7 @@ type (
IsResolveListAllow bool // True if ResolveZoneList contains allowed zones IsResolveListAllow bool // True if ResolveZoneList contains allowed zones
CompleteMultipartKeepalive time.Duration CompleteMultipartKeepalive time.Duration
Kludge KludgeSettings Kludge KludgeSettings
Features layer.FeatureSettings
} }
PlacementPolicy interface { PlacementPolicy interface {

View file

@ -1,6 +1,8 @@
package handler package handler
import ( import (
"encoding/base64"
"encoding/hex"
"fmt" "fmt"
"net/http" "net/http"
"strconv" "strconv"
@ -106,7 +108,7 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
return return
} }
if err = checkPreconditions(info, params.Conditional); err != nil { if err = checkPreconditions(info, params.Conditional, h.cfg.Features.MD5Enabled()); err != nil {
h.logAndSendError(w, "precondition failed", reqInfo, err) h.logAndSendError(w, "precondition failed", reqInfo, err)
return return
} }
@ -117,7 +119,7 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
return return
} }
response, err := encodeToObjectAttributesResponse(info, params) response, err := encodeToObjectAttributesResponse(info, params, h.cfg.Features.MD5Enabled())
if err != nil { if err != nil {
h.logAndSendError(w, "couldn't encode object info to response", reqInfo, err) h.logAndSendError(w, "couldn't encode object info to response", reqInfo, err)
return return
@ -179,19 +181,23 @@ func parseGetObjectAttributeArgs(r *http.Request) (*GetObjectAttributesArgs, err
return res, err return res, err
} }
func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs) (*GetObjectAttributesResponse, error) { func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttributesArgs, md5Enabled bool) (*GetObjectAttributesResponse, error) {
resp := &GetObjectAttributesResponse{} resp := &GetObjectAttributesResponse{}
for _, attr := range p.Attributes { for _, attr := range p.Attributes {
switch attr { switch attr {
case eTag: case eTag:
resp.ETag = info.HashSum resp.ETag = info.ETag(md5Enabled)
case storageClass: case storageClass:
resp.StorageClass = "STANDARD" resp.StorageClass = "STANDARD"
case objectSize: case objectSize:
resp.ObjectSize = info.Size resp.ObjectSize = info.Size
case checksum: case checksum:
resp.Checksum = &Checksum{ChecksumSHA256: info.HashSum} checksumBytes, err := hex.DecodeString(info.HashSum)
if err != nil {
return nil, fmt.Errorf("form upload attributes: %w", err)
}
resp.Checksum = &Checksum{ChecksumSHA256: base64.StdEncoding.EncodeToString(checksumBytes)}
case objectParts: case objectParts:
parts, err := formUploadAttributes(info, p.MaxParts, p.PartNumberMarker) parts, err := formUploadAttributes(info, p.MaxParts, p.PartNumberMarker)
if err != nil { if err != nil {
@ -219,10 +225,15 @@ func formUploadAttributes(info *data.ObjectInfo, maxParts, marker int) (*ObjectP
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid completed part: %w", err) return nil, fmt.Errorf("invalid completed part: %w", err)
} }
// ETag value contains SHA256 checksum.
checksumBytes, err := hex.DecodeString(part.ETag)
if err != nil {
return nil, fmt.Errorf("invalid sha256 checksum in completed part: %w", err)
}
parts[i] = Part{ parts[i] = Part{
PartNumber: part.PartNumber, PartNumber: part.PartNumber,
Size: int(part.Size), Size: int(part.Size),
ChecksumSHA256: part.ETag, ChecksumSHA256: base64.StdEncoding.EncodeToString(checksumBytes),
} }
} }

View file

@ -1,6 +1,8 @@
package handler package handler
import ( import (
"encoding/base64"
"encoding/hex"
"strings" "strings"
"testing" "testing"
@ -24,11 +26,13 @@ func TestGetObjectPartsAttributes(t *testing.T) {
multipartUpload := createMultipartUpload(hc, bktName, objMultipartName, map[string]string{}) multipartUpload := createMultipartUpload(hc, bktName, objMultipartName, map[string]string{})
etag, _ := uploadPart(hc, bktName, objMultipartName, multipartUpload.UploadID, 1, partSize) etag, _ := uploadPart(hc, bktName, objMultipartName, multipartUpload.UploadID, 1, partSize)
completeMultipartUpload(hc, bktName, objMultipartName, multipartUpload.UploadID, []string{etag}) completeMultipartUpload(hc, bktName, objMultipartName, multipartUpload.UploadID, []string{etag})
etagBytes, err := hex.DecodeString(etag)
require.NoError(t, err)
result = getObjectAttributes(hc, bktName, objMultipartName, objectParts) result = getObjectAttributes(hc, bktName, objMultipartName, objectParts)
require.NotNil(t, result.ObjectParts) require.NotNil(t, result.ObjectParts)
require.Len(t, result.ObjectParts.Parts, 1) require.Len(t, result.ObjectParts.Parts, 1)
require.Equal(t, etag, result.ObjectParts.Parts[0].ChecksumSHA256) require.Equal(t, base64.StdEncoding.EncodeToString(etagBytes), result.ObjectParts.Parts[0].ChecksumSHA256)
require.Equal(t, partSize, result.ObjectParts.Parts[0].Size) require.Equal(t, partSize, result.ObjectParts.Parts[0].Size)
require.Equal(t, 1, result.ObjectParts.PartsCount) require.Equal(t, 1, result.ObjectParts.PartsCount)
} }

View file

@ -164,7 +164,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
} }
} }
if err = checkPreconditions(srcObjInfo, args.Conditional); err != nil { if err = checkPreconditions(srcObjInfo, args.Conditional, h.cfg.Features.MD5Enabled()); err != nil {
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed)) h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
return return
} }
@ -210,7 +210,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
} }
dstObjInfo := extendedDstObjInfo.ObjectInfo dstObjInfo := extendedDstObjInfo.ObjectInfo
if err = middleware.EncodeToResponse(w, &CopyObjectResponse{LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), ETag: dstObjInfo.HashSum}); err != nil { if err = middleware.EncodeToResponse(w, &CopyObjectResponse{LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), ETag: dstObjInfo.ETag(h.cfg.Features.MD5Enabled())}); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...) h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
return return
} }
@ -254,7 +254,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
s := &SendNotificationParams{ s := &SendNotificationParams{
Event: EventObjectCreatedCopy, Event: EventObjectCreatedCopy,
NotificationInfo: data.NotificationInfoFromObject(dstObjInfo), NotificationInfo: data.NotificationInfoFromObject(dstObjInfo, h.cfg.Features.MD5Enabled()),
BktInfo: dstBktInfo, BktInfo: dstBktInfo,
ReqInfo: reqInfo, ReqInfo: reqInfo,
} }

View file

@ -456,13 +456,8 @@ func headObjectBase(hc *handlerContext, bktName, objName, version string) *httpt
return w return w
} }
func listVersions(t *testing.T, tc *handlerContext, bktName string) *ListObjectsVersionsResponse { func listVersions(_ *testing.T, tc *handlerContext, bktName string) *ListObjectsVersionsResponse {
w, r := prepareTestRequest(tc, bktName, "", nil) return listObjectsVersions(tc, bktName, "", "", "", "", -1)
tc.Handler().ListBucketObjectVersionsHandler(w, r)
assertStatus(t, w, http.StatusOK)
res := &ListObjectsVersionsResponse{}
parseTestResponse(t, w, res)
return res
} }
func getVersion(resp *ListObjectsVersionsResponse, objName string) []*ObjectVersionResponse { func getVersion(resp *ListObjectsVersionsResponse, objName string) []*ObjectVersionResponse {

View file

@ -78,7 +78,8 @@ func addSSECHeaders(responseHeader http.Header, requestHeader http.Header) {
responseHeader.Set(api.AmzServerSideEncryptionCustomerKeyMD5, requestHeader.Get(api.AmzServerSideEncryptionCustomerKeyMD5)) responseHeader.Set(api.AmzServerSideEncryptionCustomerKeyMD5, requestHeader.Get(api.AmzServerSideEncryptionCustomerKeyMD5))
} }
func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int, isBucketUnversioned bool) { func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int,
isBucketUnversioned, md5Enabled bool) {
info := extendedInfo.ObjectInfo info := extendedInfo.ObjectInfo
if len(info.ContentType) > 0 && h.Get(api.ContentType) == "" { if len(info.ContentType) > 0 && h.Get(api.ContentType) == "" {
h.Set(api.ContentType, info.ContentType) h.Set(api.ContentType, info.ContentType)
@ -94,7 +95,8 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
h.Set(api.ContentLength, strconv.FormatUint(info.Size, 10)) h.Set(api.ContentLength, strconv.FormatUint(info.Size, 10))
} }
h.Set(api.ETag, info.HashSum) h.Set(api.ETag, info.ETag(md5Enabled))
h.Set(api.AmzTaggingCount, strconv.Itoa(tagSetLength)) h.Set(api.AmzTaggingCount, strconv.Itoa(tagSetLength))
if !isBucketUnversioned { if !isBucketUnversioned {
@ -151,7 +153,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
} }
info := extendedInfo.ObjectInfo info := extendedInfo.ObjectInfo
if err = checkPreconditions(info, conditional); err != nil { if err = checkPreconditions(info, conditional, h.cfg.Features.MD5Enabled()); err != nil {
h.logAndSendError(w, "precondition failed", reqInfo, err) h.logAndSendError(w, "precondition failed", reqInfo, err)
return return
} }
@ -219,7 +221,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned()) writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.Features.MD5Enabled())
if params != nil { if params != nil {
writeRangeHeaders(w, params, fullSize) writeRangeHeaders(w, params, fullSize)
} else { } else {
@ -232,12 +234,13 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
} }
} }
func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs) error { func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs, md5Enabled bool) error {
if len(args.IfMatch) > 0 && args.IfMatch != info.HashSum { etag := info.ETag(md5Enabled)
return fmt.Errorf("%w: etag mismatched: '%s', '%s'", errors.GetAPIError(errors.ErrPreconditionFailed), args.IfMatch, info.HashSum) if len(args.IfMatch) > 0 && args.IfMatch != etag {
return fmt.Errorf("%w: etag mismatched: '%s', '%s'", errors.GetAPIError(errors.ErrPreconditionFailed), args.IfMatch, etag)
} }
if len(args.IfNoneMatch) > 0 && args.IfNoneMatch == info.HashSum { if len(args.IfNoneMatch) > 0 && args.IfNoneMatch == etag {
return fmt.Errorf("%w: etag matched: '%s', '%s'", errors.GetAPIError(errors.ErrNotModified), args.IfNoneMatch, info.HashSum) return fmt.Errorf("%w: etag matched: '%s', '%s'", errors.GetAPIError(errors.ErrNotModified), args.IfNoneMatch, etag)
} }
if args.IfModifiedSince != nil && info.Created.Before(*args.IfModifiedSince) { if args.IfModifiedSince != nil && info.Created.Before(*args.IfModifiedSince) {
return fmt.Errorf("%w: not modified since '%s', last modified '%s'", errors.GetAPIError(errors.ErrNotModified), return fmt.Errorf("%w: not modified since '%s', last modified '%s'", errors.GetAPIError(errors.ErrNotModified),

View file

@ -147,7 +147,7 @@ func TestPreconditions(t *testing.T) {
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
actual := checkPreconditions(tc.info, tc.args) actual := checkPreconditions(tc.info, tc.args, false)
if tc.expected == nil { if tc.expected == nil {
require.NoError(t, actual) require.NoError(t, actual)
} else { } else {
@ -197,6 +197,19 @@ func TestGetObject(t *testing.T) {
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, errors.ErrNoSuchKey) getObjectAssertS3Error(hc, bktName, objName, emptyVersion, errors.ErrNoSuchKey)
} }
func TestGetObjectEnabledMD5(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket", "obj"
_, objInfo := createBucketAndObject(hc, bktName, objName)
_, headers := getObject(hc, bktName, objName)
require.Equal(t, objInfo.HashSum, headers.Get(api.ETag))
hc.features.SetMD5Enabled(true)
_, headers = getObject(hc, bktName, objName)
require.Equal(t, objInfo.MD5Sum, headers.Get(api.ETag))
}
func putObjectContent(hc *handlerContext, bktName, objName, content string) { func putObjectContent(hc *handlerContext, bktName, objName, content string) {
body := bytes.NewReader([]byte(content)) body := bytes.NewReader([]byte(content))
w, r := prepareTestPayloadRequest(hc, bktName, objName, body) w, r := prepareTestPayloadRequest(hc, bktName, objName, body)

View file

@ -39,7 +39,7 @@ type handlerContext struct {
context context.Context context context.Context
kludge *kludgeSettingsMock kludge *kludgeSettingsMock
layerFeatures *layer.FeatureSettingsMock features *layer.FeatureSettingsMock
} }
func (hc *handlerContext) Handler() *handler { func (hc *handlerContext) Handler() *handler {
@ -148,6 +148,7 @@ func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
Policy: &placementPolicyMock{defaultPolicy: pp}, Policy: &placementPolicyMock{defaultPolicy: pp},
XMLDecoder: &xmlDecoderProviderMock{}, XMLDecoder: &xmlDecoderProviderMock{},
Kludge: kludge, Kludge: kludge,
Features: features,
}, },
} }
@ -160,7 +161,7 @@ func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
context: middleware.SetBoxData(context.Background(), newTestAccessBox(t, key)), context: middleware.SetBoxData(context.Background(), newTestAccessBox(t, key)),
kludge: kludge, kludge: kludge,
layerFeatures: features, features: features,
} }
} }

View file

@ -65,7 +65,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
if err = checkPreconditions(info, conditional); err != nil { if err = checkPreconditions(info, conditional, h.cfg.Features.MD5Enabled()); err != nil {
h.logAndSendError(w, "precondition failed", reqInfo, err) h.logAndSendError(w, "precondition failed", reqInfo, err)
return return
} }
@ -118,7 +118,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned()) writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.Features.MD5Enabled())
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
} }

View file

@ -243,6 +243,7 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
PartNumber: partNumber, PartNumber: partNumber,
Size: size, Size: size,
Reader: body, Reader: body,
ContentMD5: r.Header.Get(api.ContentMD5),
} }
p.Info.Encryption, err = formEncryptionParams(r) p.Info.Encryption, err = formEncryptionParams(r)
@ -336,7 +337,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
return return
} }
if err = checkPreconditions(srcInfo, args.Conditional); err != nil { if err = checkPreconditions(srcInfo, args.Conditional, h.cfg.Features.MD5Enabled()); err != nil {
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed), h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed),
additional...) additional...)
return return
@ -373,8 +374,8 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
} }
response := UploadPartCopyResponse{ response := UploadPartCopyResponse{
ETag: info.HashSum,
LastModified: info.Created.UTC().Format(time.RFC3339), LastModified: info.Created.UTC().Format(time.RFC3339),
ETag: info.ETag(h.cfg.Features.MD5Enabled()),
} }
if p.Info.Encryption.Enabled() { if p.Info.Encryption.Enabled() {
@ -449,8 +450,8 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
response := CompleteMultipartUploadResponse{ response := CompleteMultipartUploadResponse{
Bucket: objInfo.Bucket, Bucket: objInfo.Bucket,
ETag: objInfo.HashSum,
Key: objInfo.Name, Key: objInfo.Name,
ETag: objInfo.ETag(h.cfg.Features.MD5Enabled()),
} }
// Here we previously set api.AmzVersionID header for versioned bucket. // Here we previously set api.AmzVersionID header for versioned bucket.
@ -514,7 +515,7 @@ func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMult
s := &SendNotificationParams{ s := &SendNotificationParams{
Event: EventObjectCreatedCompleteMultipartUpload, Event: EventObjectCreatedCompleteMultipartUpload,
NotificationInfo: data.NotificationInfoFromObject(objInfo), NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.Features.MD5Enabled()),
BktInfo: bktInfo, BktInfo: bktInfo,
ReqInfo: reqInfo, ReqInfo: reqInfo,
} }

View file

@ -2,6 +2,8 @@ package handler
import ( import (
"bytes" "bytes"
"crypto/md5"
"encoding/hex"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"net/http" "net/http"
@ -21,6 +23,8 @@ const (
) )
func TestPeriodicWriter(t *testing.T) { func TestPeriodicWriter(t *testing.T) {
t.Skip()
const dur = 100 * time.Millisecond const dur = 100 * time.Millisecond
const whitespaces = 8 const whitespaces = 8
expected := []byte(xml.Header) expected := []byte(xml.Header)
@ -253,6 +257,32 @@ func TestListParts(t *testing.T) {
require.Len(t, list.Parts, 0) require.Len(t, list.Parts, 0)
} }
func TestMultipartUploadEnabledMD5(t *testing.T) {
hc := prepareHandlerContext(t)
hc.features.SetMD5Enabled(true)
bktName, objName := "bucket-md5", "object-md5"
createTestBucket(hc, bktName)
partSize := 5 * 1024 * 1024
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, partBody1 := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
md5Sum1 := md5.Sum(partBody1)
require.Equal(t, hex.EncodeToString(md5Sum1[:]), etag1)
etag2, partBody2 := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
md5Sum2 := md5.Sum(partBody2)
require.Equal(t, hex.EncodeToString(md5Sum2[:]), etag2)
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
assertStatus(t, w, http.StatusOK)
resp := &CompleteMultipartUploadResponse{}
err := xml.NewDecoder(w.Result().Body).Decode(resp)
require.NoError(t, err)
completeMD5Sum := md5.Sum(append(md5Sum1[:], md5Sum2[:]...))
require.Equal(t, hex.EncodeToString(completeMD5Sum[:])+"-2", resp.ETag)
}
func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse { func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end) return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end)
} }

View file

@ -33,12 +33,12 @@ func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
return return
} }
if err = middleware.EncodeToResponse(w, encodeV1(params, list)); err != nil { if err = middleware.EncodeToResponse(w, h.encodeV1(params, list)); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }
func encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjectsInfoV1) *ListObjectsV1Response { func (h *handler) encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjectsInfoV1) *ListObjectsV1Response {
res := &ListObjectsV1Response{ res := &ListObjectsV1Response{
Name: p.BktInfo.Name, Name: p.BktInfo.Name,
EncodingType: p.Encode, EncodingType: p.Encode,
@ -52,7 +52,7 @@ func encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjectsInfoV1) *List
res.CommonPrefixes = fillPrefixes(list.Prefixes, p.Encode) res.CommonPrefixes = fillPrefixes(list.Prefixes, p.Encode)
res.Contents = fillContentsWithOwner(list.Objects, p.Encode) res.Contents = fillContentsWithOwner(list.Objects, p.Encode, h.cfg.Features.MD5Enabled())
return res return res
} }
@ -77,12 +77,12 @@ func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
return return
} }
if err = middleware.EncodeToResponse(w, encodeV2(params, list)); err != nil { if err = middleware.EncodeToResponse(w, h.encodeV2(params, list)); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }
func encodeV2(p *layer.ListObjectsParamsV2, list *layer.ListObjectsInfoV2) *ListObjectsV2Response { func (h *handler) encodeV2(p *layer.ListObjectsParamsV2, list *layer.ListObjectsInfoV2) *ListObjectsV2Response {
res := &ListObjectsV2Response{ res := &ListObjectsV2Response{
Name: p.BktInfo.Name, Name: p.BktInfo.Name,
EncodingType: p.Encode, EncodingType: p.Encode,
@ -98,7 +98,7 @@ func encodeV2(p *layer.ListObjectsParamsV2, list *layer.ListObjectsInfoV2) *List
res.CommonPrefixes = fillPrefixes(list.Prefixes, p.Encode) res.CommonPrefixes = fillPrefixes(list.Prefixes, p.Encode)
res.Contents = fillContents(list.Objects, p.Encode, p.FetchOwner) res.Contents = fillContents(list.Objects, p.Encode, p.FetchOwner, h.cfg.Features.MD5Enabled())
return res return res
} }
@ -184,18 +184,18 @@ func fillPrefixes(src []string, encode string) []CommonPrefix {
return dst return dst
} }
func fillContentsWithOwner(src []*data.ObjectInfo, encode string) []Object { func fillContentsWithOwner(src []*data.ObjectInfo, encode string, md5Enabled bool) []Object {
return fillContents(src, encode, true) return fillContents(src, encode, true, md5Enabled)
} }
func fillContents(src []*data.ObjectInfo, encode string, fetchOwner bool) []Object { func fillContents(src []*data.ObjectInfo, encode string, fetchOwner, md5Enabled bool) []Object {
var dst []Object var dst []Object
for _, obj := range src { for _, obj := range src {
res := Object{ res := Object{
Key: s3PathEncode(obj.Name, encode), Key: s3PathEncode(obj.Name, encode),
Size: obj.Size, Size: obj.Size,
LastModified: obj.Created.UTC().Format(time.RFC3339), LastModified: obj.Created.UTC().Format(time.RFC3339),
ETag: obj.HashSum, ETag: obj.ETag(md5Enabled),
} }
if size, err := layer.GetObjectSize(obj); err == nil { if size, err := layer.GetObjectSize(obj); err == nil {
@ -233,7 +233,7 @@ func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http
return return
} }
response := encodeListObjectVersionsToResponse(info, p.BktInfo.Name) response := encodeListObjectVersionsToResponse(info, p.BktInfo.Name, h.cfg.Features.MD5Enabled())
if err = middleware.EncodeToResponse(w, response); err != nil { if err = middleware.EncodeToResponse(w, response); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
@ -253,7 +253,7 @@ func parseListObjectVersionsRequest(reqInfo *middleware.ReqInfo) (*layer.ListObj
} }
res.Prefix = queryValues.Get("prefix") res.Prefix = queryValues.Get("prefix")
res.KeyMarker = queryValues.Get("marker") res.KeyMarker = queryValues.Get("key-marker")
res.Delimiter = queryValues.Get("delimiter") res.Delimiter = queryValues.Get("delimiter")
res.Encode = queryValues.Get("encoding-type") res.Encode = queryValues.Get("encoding-type")
res.VersionIDMarker = queryValues.Get("version-id-marker") res.VersionIDMarker = queryValues.Get("version-id-marker")
@ -261,7 +261,7 @@ func parseListObjectVersionsRequest(reqInfo *middleware.ReqInfo) (*layer.ListObj
return &res, nil return &res, nil
} }
func encodeListObjectVersionsToResponse(info *layer.ListObjectVersionsInfo, bucketName string) *ListObjectsVersionsResponse { func encodeListObjectVersionsToResponse(info *layer.ListObjectVersionsInfo, bucketName string, md5Enabled bool) *ListObjectsVersionsResponse {
res := ListObjectsVersionsResponse{ res := ListObjectsVersionsResponse{
Name: bucketName, Name: bucketName,
IsTruncated: info.IsTruncated, IsTruncated: info.IsTruncated,
@ -286,7 +286,7 @@ func encodeListObjectVersionsToResponse(info *layer.ListObjectVersionsInfo, buck
}, },
Size: ver.ObjectInfo.Size, Size: ver.ObjectInfo.Size,
VersionID: ver.Version(), VersionID: ver.Version(),
ETag: ver.ObjectInfo.HashSum, ETag: ver.ObjectInfo.ETag(md5Enabled),
}) })
} }
// this loop is not starting till versioning is not implemented // this loop is not starting till versioning is not implemented

View file

@ -3,6 +3,7 @@ package handler
import ( import (
"net/http" "net/http"
"net/url" "net/url"
"sort"
"strconv" "strconv"
"testing" "testing"
@ -56,6 +57,36 @@ func TestListObjectNullVersions(t *testing.T) {
require.Equal(t, data.UnversionedObjectVersionID, result.Version[1].VersionID) require.Equal(t, data.UnversionedObjectVersionID, result.Version[1].VersionID)
} }
func TestListObjectsPaging(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-versioning-enabled"
createTestBucket(hc, bktName)
n := 10
var objects []string
for i := 0; i < n; i++ {
objects = append(objects, "objects"+strconv.Itoa(i))
putObjectContent(hc, bktName, objects[i], "content")
}
sort.Strings(objects)
result := &ListObjectsVersionsResponse{IsTruncated: true}
for result.IsTruncated {
result = listObjectsVersions(hc, bktName, "", "", result.NextKeyMarker, result.NextVersionIDMarker, n/3)
for i, version := range result.Version {
if objects[i] != version.Key {
t.Errorf("expected: '%s', got: '%s'", objects[i], version.Key)
}
}
objects = objects[len(result.Version):]
}
require.Empty(t, objects)
}
func TestS3CompatibilityBucketListV2BothContinuationTokenStartAfter(t *testing.T) { func TestS3CompatibilityBucketListV2BothContinuationTokenStartAfter(t *testing.T) {
tc := prepareHandlerContext(t) tc := prepareHandlerContext(t)
@ -215,3 +246,20 @@ func listObjectsV1(hc *handlerContext, bktName, prefix, delimiter, marker string
parseTestResponse(hc.t, w, res) parseTestResponse(hc.t, w, res)
return res return res
} }
func listObjectsVersions(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int) *ListObjectsVersionsResponse {
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
if len(keyMarker) != 0 {
query.Add("key-marker", keyMarker)
}
if len(versionIDMarker) != 0 {
query.Add("version-id-marker", versionIDMarker)
}
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
hc.Handler().ListBucketObjectVersionsHandler(w, r)
assertStatus(hc.t, w, http.StatusOK)
res := &ListObjectsVersionsResponse{}
parseTestResponse(hc.t, w, res)
return res
}

View file

@ -242,6 +242,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
Size: size, Size: size,
Header: metadata, Header: metadata,
Encryption: encryptionParams, Encryption: encryptionParams,
ContentMD5: r.Header.Get(api.ContentMD5),
} }
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, bktInfo.LocationConstraint) params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, bktInfo.LocationConstraint)
@ -273,7 +274,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
s := &SendNotificationParams{ s := &SendNotificationParams{
Event: EventObjectCreatedPut, Event: EventObjectCreatedPut,
NotificationInfo: data.NotificationInfoFromObject(objInfo), NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.Features.MD5Enabled()),
BktInfo: bktInfo, BktInfo: bktInfo,
ReqInfo: reqInfo, ReqInfo: reqInfo,
} }
@ -324,7 +325,8 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
addSSECHeaders(w.Header(), r.Header) addSSECHeaders(w.Header(), r.Header)
} }
w.Header().Set(api.ETag, objInfo.HashSum) w.Header().Set(api.ETag, objInfo.ETag(h.cfg.Features.MD5Enabled()))
middleware.WriteSuccessResponseHeadersOnly(w) middleware.WriteSuccessResponseHeadersOnly(w)
} }
@ -490,7 +492,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
s := &SendNotificationParams{ s := &SendNotificationParams{
Event: EventObjectCreatedPost, Event: EventObjectCreatedPost,
NotificationInfo: data.NotificationInfoFromObject(objInfo), NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.Features.MD5Enabled()),
BktInfo: bktInfo, BktInfo: bktInfo,
ReqInfo: reqInfo, ReqInfo: reqInfo,
} }
@ -559,7 +561,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
resp := &PostResponse{ resp := &PostResponse{
Bucket: objInfo.Bucket, Bucket: objInfo.Bucket,
Key: objInfo.Name, Key: objInfo.Name,
ETag: objInfo.HashSum, ETag: objInfo.ETag(h.cfg.Features.MD5Enabled()),
} }
w.WriteHeader(status) w.WriteHeader(status)
if _, err = w.Write(middleware.EncodeResponse(resp)); err != nil { if _, err = w.Write(middleware.EncodeResponse(resp)); err != nil {
@ -569,7 +571,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
} }
} }
w.Header().Set(api.ETag, objInfo.HashSum) w.Header().Set(api.ETag, objInfo.ETag(h.cfg.Features.MD5Enabled()))
w.WriteHeader(status) w.WriteHeader(status)
} }

View file

@ -3,7 +3,10 @@ package handler
import ( import (
"bytes" "bytes"
"context" "context"
"crypto/md5"
"crypto/rand" "crypto/rand"
"encoding/base64"
"encoding/hex"
"encoding/json" "encoding/json"
"errors" "errors"
"io" "io"
@ -194,6 +197,37 @@ func TestPutObjectWithWrapReaderDiscardOnError(t *testing.T) {
require.Equal(t, numGoroutineBefore, numGoroutineAfter, "goroutines shouldn't leak during put object") require.Equal(t, numGoroutineBefore, numGoroutineAfter, "goroutines shouldn't leak during put object")
} }
func TestPutObjectWithInvalidContentMD5(t *testing.T) {
tc := prepareHandlerContext(t)
tc.features.SetMD5Enabled(true)
bktName, objName := "bucket-for-put", "object-for-put"
createTestBucket(tc, bktName)
content := []byte("content")
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString([]byte("invalid")))
tc.Handler().PutObjectHandler(w, r)
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrInvalidDigest))
checkNotFound(t, tc, bktName, objName, emptyVersion)
}
func TestPutObjectWithEnabledMD5(t *testing.T) {
tc := prepareHandlerContext(t)
tc.features.SetMD5Enabled(true)
bktName, objName := "bucket-for-put", "object-for-put"
createTestBucket(tc, bktName)
content := []byte("content")
md5Hash := md5.New()
md5Hash.Write(content)
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
tc.Handler().PutObjectHandler(w, r)
require.Equal(t, hex.EncodeToString(md5Hash.Sum(nil)), w.Header().Get(api.ETag))
}
func TestPutObjectWithStreamBodyAWSExample(t *testing.T) { func TestPutObjectWithStreamBodyAWSExample(t *testing.T) {
hc := prepareHandlerContext(t) hc := prepareHandlerContext(t)
@ -320,7 +354,7 @@ func TestPutObjectClientCut(t *testing.T) {
obj1 := getObjectFromLayer(hc, objName1)[0] obj1 := getObjectFromLayer(hc, objName1)[0]
require.Empty(t, getObjectAttribute(obj1, "s3-client-cut")) require.Empty(t, getObjectAttribute(obj1, "s3-client-cut"))
hc.layerFeatures.SetClientCut(true) hc.features.SetClientCut(true)
putObject(hc, bktName, objName2) putObject(hc, bktName, objName2)
obj2 := getObjectFromLayer(hc, objName2)[0] obj2 := getObjectFromLayer(hc, objName2)[0]
require.Equal(t, "true", getObjectAttribute(obj2, "s3-client-cut")) require.Equal(t, "true", getObjectAttribute(obj2, "s3-client-cut"))

View file

@ -45,7 +45,7 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
CopiesNumber: p.CopiesNumbers, CopiesNumber: p.CopiesNumbers,
} }
_, objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo) _, objID, _, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
if err != nil { if err != nil {
return fmt.Errorf("put system object: %w", err) return fmt.Errorf("put system object: %w", err)
} }

View file

@ -26,7 +26,8 @@ import (
) )
type FeatureSettingsMock struct { type FeatureSettingsMock struct {
clientCut bool clientCut bool
md5Enabled bool
} }
func (k *FeatureSettingsMock) ClientCut() bool { func (k *FeatureSettingsMock) ClientCut() bool {
@ -37,6 +38,14 @@ func (k *FeatureSettingsMock) SetClientCut(clientCut bool) {
k.clientCut = clientCut k.clientCut = clientCut
} }
func (k *FeatureSettingsMock) MD5Enabled() bool {
return k.md5Enabled
}
func (k *FeatureSettingsMock) SetMD5Enabled(md5Enabled bool) {
k.md5Enabled = md5Enabled
}
type TestFrostFS struct { type TestFrostFS struct {
FrostFS FrostFS

View file

@ -48,6 +48,7 @@ type (
FeatureSettings interface { FeatureSettings interface {
ClientCut() bool ClientCut() bool
MD5Enabled() bool
} }
layer struct { layer struct {
@ -109,14 +110,16 @@ type (
// PutObjectParams stores object put request parameters. // PutObjectParams stores object put request parameters.
PutObjectParams struct { PutObjectParams struct {
BktInfo *data.BucketInfo BktInfo *data.BucketInfo
Object string Object string
Size uint64 Size uint64
Reader io.Reader Reader io.Reader
Header map[string]string Header map[string]string
Lock *data.ObjectLock Lock *data.ObjectLock
Encryption encryption.Params Encryption encryption.Params
CopiesNumbers []uint32 CopiesNumbers []uint32
CompleteMD5Hash string
ContentMD5 string
} }
PutCombinedObjectParams struct { PutCombinedObjectParams struct {

View file

@ -3,6 +3,8 @@ package layer
import ( import (
"bytes" "bytes"
"context" "context"
"crypto/md5"
"encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors" "errors"
@ -68,6 +70,7 @@ type (
PartNumber int PartNumber int
Size uint64 Size uint64
Reader io.Reader Reader io.Reader
ContentMD5 string
} }
UploadCopyParams struct { UploadCopyParams struct {
@ -197,7 +200,7 @@ func (n *layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, er
return "", err return "", err
} }
return objInfo.HashSum, nil return objInfo.ETag(n.features.MD5Enabled()), nil
} }
func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInfo, p *UploadPartParams) (*data.ObjectInfo, error) { func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInfo, p *UploadPartParams) (*data.ObjectInfo, error) {
@ -230,10 +233,28 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID
prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber) prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber)
size, id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo) size, id, hash, md5Hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(p.ContentMD5) > 0 {
hashBytes, err := base64.StdEncoding.DecodeString(p.ContentMD5)
if err != nil {
return nil, s3errors.GetAPIError(s3errors.ErrInvalidDigest)
}
if hex.EncodeToString(hashBytes) != hex.EncodeToString(md5Hash) {
prm := PrmObjectDelete{
Object: id,
Container: bktInfo.CID,
}
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
err = n.frostFS.DeleteObject(ctx, prm)
if err != nil {
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
}
return nil, s3errors.GetAPIError(s3errors.ErrInvalidDigest)
}
}
if p.Info.Encryption.Enabled() { if p.Info.Encryption.Enabled() {
size = decSize size = decSize
} }
@ -250,6 +271,7 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
Size: size, Size: size,
ETag: hex.EncodeToString(hash), ETag: hex.EncodeToString(hash),
Created: prm.CreationTime, Created: prm.CreationTime,
MD5: hex.EncodeToString(md5Hash),
} }
oldPartID, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo) oldPartID, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
@ -274,6 +296,7 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
Size: partInfo.Size, Size: partInfo.Size,
Created: partInfo.Created, Created: partInfo.Created,
HashSum: partInfo.ETag, HashSum: partInfo.ETag,
MD5Sum: partInfo.MD5,
} }
return objInfo, nil return objInfo, nil
@ -347,9 +370,10 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
parts := make([]*data.PartInfo, 0, len(p.Parts)) parts := make([]*data.PartInfo, 0, len(p.Parts))
var completedPartsHeader strings.Builder var completedPartsHeader strings.Builder
md5Hash := md5.New()
for i, part := range p.Parts { for i, part := range p.Parts {
partInfo := partsInfo[part.PartNumber] partInfo := partsInfo[part.PartNumber]
if partInfo == nil || part.ETag != partInfo.ETag { if partInfo == nil || strings.Trim(part.ETag, "\"") != partInfo.GetETag(n.features.MD5Enabled()) {
return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", s3errors.GetAPIError(s3errors.ErrInvalidPart), part.PartNumber) return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", s3errors.GetAPIError(s3errors.ErrInvalidPart), part.PartNumber)
} }
delete(partsInfo, part.PartNumber) delete(partsInfo, part.PartNumber)
@ -376,6 +400,12 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
if _, err = completedPartsHeader.WriteString(partInfoStr); err != nil { if _, err = completedPartsHeader.WriteString(partInfoStr); err != nil {
return nil, nil, err return nil, nil, err
} }
bytesHash, err := hex.DecodeString(partInfo.MD5)
if err != nil {
return nil, nil, fmt.Errorf("couldn't decode MD5 checksum of part: %w", err)
}
md5Hash.Write(bytesHash)
} }
initMetadata := make(map[string]string, len(multipartInfo.Meta)+1) initMetadata := make(map[string]string, len(multipartInfo.Meta)+1)
@ -410,13 +440,14 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
} }
extObjInfo, err := n.PutObject(ctx, &PutObjectParams{ extObjInfo, err := n.PutObject(ctx, &PutObjectParams{
BktInfo: p.Info.Bkt, BktInfo: p.Info.Bkt,
Object: p.Info.Key, Object: p.Info.Key,
Reader: bytes.NewReader(partsData), Reader: bytes.NewReader(partsData),
Header: initMetadata, Header: initMetadata,
Size: multipartObjetSize, Size: multipartObjetSize,
Encryption: p.Info.Encryption, Encryption: p.Info.Encryption,
CopiesNumbers: multipartInfo.CopiesNumbers, CopiesNumbers: multipartInfo.CopiesNumbers,
CompleteMD5Hash: hex.EncodeToString(md5Hash.Sum(nil)) + "-" + strconv.Itoa(len(p.Parts)),
}) })
if err != nil { if err != nil {
n.reqLogger(ctx).Error(logs.CouldNotPutCompletedObject, n.reqLogger(ctx).Error(logs.CouldNotPutCompletedObject,
@ -537,7 +568,7 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
for _, partInfo := range partsInfo { for _, partInfo := range partsInfo {
parts = append(parts, &Part{ parts = append(parts, &Part{
ETag: partInfo.ETag, ETag: partInfo.GetETag(n.features.MD5Enabled()),
LastModified: partInfo.Created.UTC().Format(time.RFC3339), LastModified: partInfo.Created.UTC().Format(time.RFC3339),
PartNumber: partInfo.Number, PartNumber: partInfo.Number,
Size: partInfo.Size, Size: partInfo.Size,

View file

@ -34,7 +34,7 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu
CopiesNumber: p.CopiesNumbers, CopiesNumber: p.CopiesNumbers,
} }
_, objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo) _, objID, _, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
if err != nil { if err != nil {
return err return err
} }

View file

@ -1,8 +1,11 @@
package layer package layer
import ( import (
"bytes"
"context" "context"
"crypto/md5"
"crypto/sha256" "crypto/sha256"
"encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors" "errors"
@ -287,10 +290,23 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
prm.Attributes = append(prm.Attributes, [2]string{k, v}) prm.Attributes = append(prm.Attributes, [2]string{k, v})
} }
size, id, hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo) size, id, hash, md5Hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(p.ContentMD5) > 0 {
headerMd5Hash, err := base64.StdEncoding.DecodeString(p.ContentMD5)
if err != nil {
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest)
}
if !bytes.Equal(headerMd5Hash, md5Hash) {
err = n.objectDelete(ctx, p.BktInfo, id)
if err != nil {
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
}
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest)
}
}
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id)) n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
@ -304,6 +320,11 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
IsUnversioned: !bktSettings.VersioningEnabled(), IsUnversioned: !bktSettings.VersioningEnabled(),
IsCombined: p.Header[MultipartObjectSize] != "", IsCombined: p.Header[MultipartObjectSize] != "",
} }
if len(p.CompleteMD5Hash) > 0 {
newVersion.MD5 = p.CompleteMD5Hash
} else {
newVersion.MD5 = hex.EncodeToString(md5Hash)
}
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil { if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err) return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err)
@ -340,6 +361,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
Headers: p.Header, Headers: p.Header,
ContentType: p.Header[api.ContentType], ContentType: p.Header[api.ContentType],
HashSum: newVersion.ETag, HashSum: newVersion.ETag,
MD5Sum: newVersion.MD5,
} }
extendedObjInfo := &data.ExtendedObjectInfo{ extendedObjInfo := &data.ExtendedObjectInfo{
@ -378,6 +400,7 @@ func (n *layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.Bucke
return nil, err return nil, err
} }
objInfo := objectInfoFromMeta(bkt, meta) objInfo := objectInfoFromMeta(bkt, meta)
objInfo.MD5Sum = node.MD5
extObjInfo := &data.ExtendedObjectInfo{ extObjInfo := &data.ExtendedObjectInfo{
ObjectInfo: objInfo, ObjectInfo: objInfo,
@ -430,6 +453,7 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
return nil, err return nil, err
} }
objInfo := objectInfoFromMeta(bkt, meta) objInfo := objectInfoFromMeta(bkt, meta)
objInfo.MD5Sum = foundVersion.MD5
extObjInfo := &data.ExtendedObjectInfo{ extObjInfo := &data.ExtendedObjectInfo{
ObjectInfo: objInfo, ObjectInfo: objInfo,
@ -457,14 +481,16 @@ func (n *layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idOb
// objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject. // objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject.
// Returns object ID and payload sha256 hash. // Returns object ID and payload sha256 hash.
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, error) { func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, []byte, error) {
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner) n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
prm.ClientCut = n.features.ClientCut() prm.ClientCut = n.features.ClientCut()
var size uint64 var size uint64
hash := sha256.New() hash := sha256.New()
md5Hash := md5.New()
prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) { prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) {
size += uint64(len(buf)) size += uint64(len(buf))
hash.Write(buf) hash.Write(buf)
md5Hash.Write(buf)
}) })
id, err := n.frostFS.CreateObject(ctx, prm) id, err := n.frostFS.CreateObject(ctx, prm)
if err != nil { if err != nil {
@ -472,9 +498,9 @@ func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktIn
n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard)) n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard))
} }
return 0, oid.ID{}, nil, err return 0, oid.ID{}, nil, nil, err
} }
return size, id, hash.Sum(nil), nil return size, id, hash.Sum(nil), md5Hash.Sum(nil), nil
} }
// ListObjectsV1 returns objects in a bucket for requests of Version 1. // ListObjectsV1 returns objects in a bucket for requests of Version 1.
@ -805,6 +831,7 @@ func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo
} }
oi = objectInfoFromMeta(bktInfo, meta) oi = objectInfoFromMeta(bktInfo, meta)
oi.MD5Sum = node.MD5
n.cache.PutObject(owner, &data.ExtendedObjectInfo{ObjectInfo: oi, NodeVersion: node}) n.cache.PutObject(owner, &data.ExtendedObjectInfo{ObjectInfo: oi, NodeVersion: node})
return oi return oi

View file

@ -125,7 +125,7 @@ func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, obj
return oid.ID{}, err return oid.ID{}, err
} }
_, id, _, err := n.objectPutAndHash(ctx, prm, bktInfo) _, id, _, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
return id, err return id, err
} }

View file

@ -49,14 +49,8 @@ func (t *TreeServiceMock) PutObjectTagging(_ context.Context, bktInfo *data.Buck
return nil return nil
} }
func (t *TreeServiceMock) DeleteObjectTagging(_ context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) error { func (t *TreeServiceMock) DeleteObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) error {
cnrTagsMap, ok := t.tags[bktInfo.CID.EncodeToString()] return t.PutObjectTagging(ctx, bktInfo, objVersion, nil)
if !ok {
return nil
}
delete(cnrTagsMap, objVersion.ID)
return nil
} }
func (t *TreeServiceMock) GetBucketTagging(context.Context, *data.BucketInfo) (map[string]string, error) { func (t *TreeServiceMock) GetBucketTagging(context.Context, *data.BucketInfo) (map[string]string, error) {

View file

@ -10,6 +10,7 @@ import (
"sync" "sync"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5"
"github.com/google/uuid" "github.com/google/uuid"
"go.uber.org/zap" "go.uber.org/zap"
@ -206,6 +207,8 @@ func Request(log *zap.Logger) Func {
r.Context(), HdrAmzRequestID, reqInfo.RequestID, r.Context(), HdrAmzRequestID, reqInfo.RequestID,
)) ))
r = r.WithContext(treepool.SetRequestID(r.Context(), reqInfo.RequestID))
reqLogger := log.With(zap.String("request_id", reqInfo.RequestID)) reqLogger := log.With(zap.String("request_id", reqInfo.RequestID))
r = r.WithContext(SetReqLogger(r.Context(), reqLogger)) r = r.WithContext(SetReqLogger(r.Context(), reqLogger))

View file

@ -73,6 +73,7 @@ type (
maxClient maxClientsConfig maxClient maxClientsConfig
bypassContentEncodingInChunks atomic.Bool bypassContentEncodingInChunks atomic.Bool
clientCut atomic.Bool clientCut atomic.Bool
md5Enabled atomic.Bool
} }
maxClientsConfig struct { maxClientsConfig struct {
@ -176,6 +177,7 @@ func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
settings.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks)) settings.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
settings.setClientCut(v.GetBool(cfgClientCut)) settings.setClientCut(v.GetBool(cfgClientCut))
settings.setMD5Enabled(v.GetBool(cfgMD5Enabled))
return settings return settings
} }
@ -196,6 +198,14 @@ func (s *appSettings) setClientCut(clientCut bool) {
s.clientCut.Store(clientCut) s.clientCut.Store(clientCut)
} }
func (s *appSettings) MD5Enabled() bool {
return s.md5Enabled.Load()
}
func (s *appSettings) setMD5Enabled(md5Enabled bool) {
s.md5Enabled.Store(md5Enabled)
}
func (a *App) initAPI(ctx context.Context) { func (a *App) initAPI(ctx context.Context) {
a.initLayer(ctx) a.initLayer(ctx)
a.initHandler() a.initHandler()
@ -315,6 +325,8 @@ func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.
prm.SetLogger(logger) prm.SetLogger(logger)
prmTree.SetLogger(logger) prmTree.SetLogger(logger)
prmTree.SetMaxRequestAttempts(cfg.GetInt(cfgTreePoolMaxAttempts))
var apiGRPCDialOpts []grpc.DialOption var apiGRPCDialOpts []grpc.DialOption
var treeGRPCDialOpts []grpc.DialOption var treeGRPCDialOpts []grpc.DialOption
if cfg.GetBool(cfgTracingEnabled) { if cfg.GetBool(cfgTracingEnabled) {
@ -536,6 +548,7 @@ func (a *App) updateSettings() {
a.settings.xmlDecoder.UseDefaultNamespaceForCompleteMultipart(a.cfg.GetBool(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload)) a.settings.xmlDecoder.UseDefaultNamespaceForCompleteMultipart(a.cfg.GetBool(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload))
a.settings.setBypassContentEncodingInChunks(a.cfg.GetBool(cfgKludgeBypassContentEncodingCheckInChunks)) a.settings.setBypassContentEncodingInChunks(a.cfg.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut)) a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
a.settings.setMD5Enabled(a.cfg.GetBool(cfgMD5Enabled))
} }
func (a *App) startServices() { func (a *App) startServices() {
@ -679,6 +692,7 @@ func (a *App) initHandler() {
cfg.CompleteMultipartKeepalive = a.cfg.GetDuration(cfgKludgeCompleteMultipartUploadKeepalive) cfg.CompleteMultipartKeepalive = a.cfg.GetDuration(cfgKludgeCompleteMultipartUploadKeepalive)
cfg.Kludge = a.settings cfg.Kludge = a.settings
cfg.Features = a.settings
var err error var err error
a.api, err = handler.New(a.log, a.obj, a.nc, cfg) a.api, err = handler.New(a.log, a.obj, a.nc, cfg)

View file

@ -149,6 +149,8 @@ const ( // Settings.
cfgSetCopiesNumber = "frostfs.set_copies_number" cfgSetCopiesNumber = "frostfs.set_copies_number"
// Enabling client side object preparing for PUT operations. // Enabling client side object preparing for PUT operations.
cfgClientCut = "frostfs.client_cut" cfgClientCut = "frostfs.client_cut"
// Sets max attempt to make successful tree request.
cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
// List of allowed AccessKeyID prefixes. // List of allowed AccessKeyID prefixes.
cfgAllowedAccessKeyIDPrefixes = "allowed_access_key_id_prefixes" cfgAllowedAccessKeyIDPrefixes = "allowed_access_key_id_prefixes"
@ -160,6 +162,9 @@ const ( // Settings.
// Runtime. // Runtime.
cfgSoftMemoryLimit = "runtime.soft_memory_limit" cfgSoftMemoryLimit = "runtime.soft_memory_limit"
// Enable return MD5 checksum in ETag.
cfgMD5Enabled = "features.md5.enabled"
// envPrefix is an environment variables prefix used for configuration. // envPrefix is an environment variables prefix used for configuration.
envPrefix = "S3_GW" envPrefix = "S3_GW"
) )

View file

@ -127,6 +127,9 @@ S3_GW_CORS_DEFAULT_MAX_AGE=600
S3_GW_FROSTFS_SET_COPIES_NUMBER=0 S3_GW_FROSTFS_SET_COPIES_NUMBER=0
# This flag enables client side object preparing. # This flag enables client side object preparing.
S3_GW_FROSTFS_CLIENT_CUT=false S3_GW_FROSTFS_CLIENT_CUT=false
# max attempt to make successful tree request.
# default value is 0 that means the number of attempts equals to number of nodes in pool.
S3_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
# List of allowed AccessKeyID prefixes # List of allowed AccessKeyID prefixes
# If not set, S3 GW will accept all AccessKeyIDs # If not set, S3 GW will accept all AccessKeyIDs
@ -147,4 +150,6 @@ S3_GW_TRACING_ENABLED=false
S3_GW_TRACING_ENDPOINT="localhost:4318" S3_GW_TRACING_ENDPOINT="localhost:4318"
S3_GW_TRACING_EXPORTER="otlp_grpc" S3_GW_TRACING_EXPORTER="otlp_grpc"
S3_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824 S3_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
S3_GW_FEATURES_MD5_ENABLED=false

View file

@ -152,6 +152,9 @@ frostfs:
set_copies_number: [0] set_copies_number: [0]
# This flag enables client side object preparing. # This flag enables client side object preparing.
client_cut: false client_cut: false
# max attempt to make successful tree request.
# default value is 0 that means the number of attempts equals to number of nodes in pool.
tree_pool_max_attempts: 0
# List of allowed AccessKeyID prefixes # List of allowed AccessKeyID prefixes
# If the parameter is omitted, S3 GW will accept all AccessKeyIDs # If the parameter is omitted, S3 GW will accept all AccessKeyIDs
@ -173,4 +176,8 @@ kludge:
bypass_content_encoding_check_in_chunks: false bypass_content_encoding_check_in_chunks: false
runtime: runtime:
soft_memory_limit: 1gb soft_memory_limit: 1gb
features:
md5:
enabled: false

View file

@ -186,6 +186,7 @@ There are some custom types used for brevity:
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) | | `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
| `kludge` | [Different kludge configuration](#kludge-section) | | `kludge` | [Different kludge configuration](#kludge-section) |
| `runtime` | [Runtime configuration](#runtime-section) | | `runtime` | [Runtime configuration](#runtime-section) |
| `features` | [Features configuration](#features-section) |
### General section ### General section
@ -509,12 +510,14 @@ header for `PutObject`, `CopyObject`, `CreateMultipartUpload`.
frostfs: frostfs:
set_copies_number: [0] set_copies_number: [0]
client_cut: false client_cut: false
tree_pool_max_attempts: 0
``` ```
| Parameter | Type | SIGHUP reload | Default value | Description | | Parameter | Type | SIGHUP reload | Default value | Description |
|---------------------|------------|---------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |---------------------------|------------|---------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `set_copies_number` | `[]uint32` | yes | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy | | `set_copies_number` | `[]uint32` | yes | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy |
| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. | | `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. |
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
# `resolve_bucket` section # `resolve_bucket` section
@ -559,4 +562,17 @@ runtime:
| Parameter | Type | SIGHUP reload | Default value | Description | | Parameter | Type | SIGHUP reload | Default value | Description |
|---------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |---------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `soft_memory_limit` | `size` | yes | maxint64 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. | | `soft_memory_limit` | `size` | yes | maxint64 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. |
# `features` section
Contains parameters for enabling features.
```yaml
features:
md5:
enabled: false
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|---------------|--------|---------------|---------------|----------------------------------------------------------------|
| `md5.enabled` | `bool` | yes | false | Flag to enable return MD5 checksum in ETag headers and fields. |

4
go.mod
View file

@ -3,9 +3,9 @@ module git.frostfs.info/TrueCloudLab/frostfs-s3-gw
go 1.20 go 1.20
require ( require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44 git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20231121085847-241a9f1ad0a4
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230821090303-202412230a05 git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20231107114540-ab75edd70939
github.com/aws/aws-sdk-go v1.44.6 github.com/aws/aws-sdk-go v1.44.6
github.com/bluele/gcache v0.0.2 github.com/bluele/gcache v0.0.2
github.com/go-chi/chi/v5 v5.0.8 github.com/go-chi/chi/v5 v5.0.8

8
go.sum
View file

@ -36,16 +36,16 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44 h1:v6JqBD/VzZx3QSxbaXnUwnnJ1KEYheU4LzLGr3IhsAE= git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20231121085847-241a9f1ad0a4 h1:wjLfZ3WCt7qNGsQv+Jl0TXnmtg0uVk/jToKPFTBc/jo=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44/go.mod h1:pKJJRLOChW4zDQsAt1e8k/snWKljJtpkiPfxV53ngjI= git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20231121085847-241a9f1ad0a4/go.mod h1:uY0AYmCznjZdghDnAk7THFIe1Vlg531IxUcus7ZfUJI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb h1:S/TrbOOu9qEXZRZ9/Ddw7crnxbBUQLo68PSzQWYrc9M= git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb h1:S/TrbOOu9qEXZRZ9/Ddw7crnxbBUQLo68PSzQWYrc9M=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb/go.mod h1:nkR5gaGeez3Zv2SE7aceP0YwxG2FzIB5cGKpQO2vV2o= git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb/go.mod h1:nkR5gaGeez3Zv2SE7aceP0YwxG2FzIB5cGKpQO2vV2o=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE= git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230821090303-202412230a05 h1:OuViMF54N87FXmaBEpYw3jhzaLrJ/EWOlPL1wUkimE0= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20231107114540-ab75edd70939 h1:jZEepi9yWmqrWgLRQcHQu4YPJaudmd7d2AEhpmM3m4U=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230821090303-202412230a05/go.mod h1:t1akKcUH7iBrFHX8rSXScYMP17k2kYQXMbZooiL5Juw= git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20231107114540-ab75edd70939/go.mod h1:t1akKcUH7iBrFHX8rSXScYMP17k2kYQXMbZooiL5Juw=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA= git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=

View file

@ -75,6 +75,7 @@ const (
ResolveBucket = "resolve bucket" // Info in ../../api/layer/layer.go ResolveBucket = "resolve bucket" // Info in ../../api/layer/layer.go
CouldntDeleteCorsObject = "couldn't delete cors object" // Error in ../../api/layer/cors.go CouldntDeleteCorsObject = "couldn't delete cors object" // Error in ../../api/layer/cors.go
PutObject = "put object" // Debug in ../../api/layer/object.go PutObject = "put object" // Debug in ../../api/layer/object.go
FailedToDeleteObject = "failed to delete object" // Debug in ../../api/layer/object.go
FailedToDiscardPutPayloadProbablyGoroutineLeaks = "failed to discard put payload, probably goroutine leaks" // Warn in ../../api/layer/object.go FailedToDiscardPutPayloadProbablyGoroutineLeaks = "failed to discard put payload, probably goroutine leaks" // Warn in ../../api/layer/object.go
FailedToSubmitTaskToPool = "failed to submit task to pool" // Warn in ../../api/layer/object.go FailedToSubmitTaskToPool = "failed to submit task to pool" // Warn in ../../api/layer/object.go
CouldNotFetchObjectMeta = "could not fetch object meta" // Warn in ../../api/layer/object.go CouldNotFetchObjectMeta = "could not fetch object meta" // Warn in ../../api/layer/object.go

View file

@ -81,6 +81,7 @@ const (
partNumberKV = "Number" partNumberKV = "Number"
sizeKV = "Size" sizeKV = "Size"
etagKV = "ETag" etagKV = "ETag"
md5KV = "MD5"
// keys for lock. // keys for lock.
isLockKV = "IsLock" isLockKV = "IsLock"
@ -185,6 +186,7 @@ func newNodeVersionFromTreeNode(filePath string, treeNode *treeNode) *data.NodeV
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV) _, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
_, isCombined := treeNode.Get(isCombinedKV) _, isCombined := treeNode.Get(isCombinedKV)
eTag, _ := treeNode.Get(etagKV) eTag, _ := treeNode.Get(etagKV)
md5, _ := treeNode.Get(md5KV)
version := &data.NodeVersion{ version := &data.NodeVersion{
BaseNodeVersion: data.BaseNodeVersion{ BaseNodeVersion: data.BaseNodeVersion{
@ -193,6 +195,7 @@ func newNodeVersionFromTreeNode(filePath string, treeNode *treeNode) *data.NodeV
OID: treeNode.ObjID, OID: treeNode.ObjID,
Timestamp: treeNode.TimeStamp, Timestamp: treeNode.TimeStamp,
ETag: eTag, ETag: eTag,
MD5: md5,
Size: treeNode.Size, Size: treeNode.Size,
FilePath: filePath, FilePath: filePath,
}, },
@ -302,6 +305,8 @@ func newPartInfo(node NodeResponse) (*data.PartInfo, error) {
return nil, fmt.Errorf("invalid created timestamp: %w", err) return nil, fmt.Errorf("invalid created timestamp: %w", err)
} }
partInfo.Created = time.UnixMilli(utcMilli) partInfo.Created = time.UnixMilli(utcMilli)
case md5KV:
partInfo.MD5 = value
} }
} }
@ -471,16 +476,7 @@ func (c *Tree) PutObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, o
} }
func (c *Tree) DeleteObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) error { func (c *Tree) DeleteObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) error {
tagNode, err := c.getTreeNode(ctx, bktInfo, objVersion.ID, isTagKV) return c.PutObjectTagging(ctx, bktInfo, objVersion, nil)
if err != nil {
return err
}
if tagNode == nil {
return nil
}
return c.service.RemoveNode(ctx, bktInfo, versionTree, tagNode.ID)
} }
func (c *Tree) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) (map[string]string, error) { func (c *Tree) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) (map[string]string, error) {
@ -524,16 +520,7 @@ func (c *Tree) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo, t
} }
func (c *Tree) DeleteBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) error { func (c *Tree) DeleteBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) error {
node, err := c.getSystemNode(ctx, bktInfo, []string{bucketTaggingFilename}, nil) return c.PutBucketTagging(ctx, bktInfo, nil)
if err != nil && !errors.Is(err, layer.ErrNodeNotFound) {
return err
}
if node != nil {
return c.service.RemoveNode(ctx, bktInfo, systemTree, node.ID)
}
return nil
} }
func (c *Tree) getTreeNode(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64, key string) (*treeNode, error) { func (c *Tree) getTreeNode(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64, key string) (*treeNode, error) {
@ -578,7 +565,7 @@ func (c *Tree) GetVersions(ctx context.Context, bktInfo *data.BucketInfo, filepa
} }
func (c *Tree) GetLatestVersion(ctx context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error) { func (c *Tree) GetLatestVersion(ctx context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error) {
meta := []string{oidKV, isUnversionedKV, isDeleteMarkerKV, etagKV, sizeKV} meta := []string{oidKV, isUnversionedKV, isDeleteMarkerKV, etagKV, sizeKV, md5KV}
path := pathFromName(objectName) path := pathFromName(objectName)
p := &GetNodesParams{ p := &GetNodesParams{
@ -586,7 +573,7 @@ func (c *Tree) GetLatestVersion(ctx context.Context, bktInfo *data.BucketInfo, o
TreeID: versionTree, TreeID: versionTree,
Path: path, Path: path,
Meta: meta, Meta: meta,
LatestOnly: true, LatestOnly: false,
AllAttrs: false, AllAttrs: false,
} }
nodes, err := c.service.GetNodes(ctx, p) nodes, err := c.service.GetNodes(ctx, p)
@ -594,11 +581,43 @@ func (c *Tree) GetLatestVersion(ctx context.Context, bktInfo *data.BucketInfo, o
return nil, err return nil, err
} }
if len(nodes) == 0 { latestNode, err := getLatestNode(nodes)
if err != nil {
return nil, err
}
return newNodeVersion(objectName, latestNode)
}
func getLatestNode(nodes []NodeResponse) (NodeResponse, error) {
var (
maxCreationTime uint64
targetIndexNode = -1
)
for i, node := range nodes {
currentCreationTime := node.GetTimestamp()
if checkExistOID(node.GetMeta()) && currentCreationTime > maxCreationTime {
maxCreationTime = currentCreationTime
targetIndexNode = i
}
}
if targetIndexNode == -1 {
return nil, layer.ErrNodeNotFound return nil, layer.ErrNodeNotFound
} }
return newNodeVersion(objectName, nodes[0]) return nodes[targetIndexNode], nil
}
func checkExistOID(meta []Meta) bool {
for _, kv := range meta {
if kv.GetKey() == "OID" {
return true
}
}
return false
} }
// pathFromName splits name by '/'. // pathFromName splits name by '/'.
@ -992,6 +1011,7 @@ func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartN
sizeKV: strconv.FormatUint(info.Size, 10), sizeKV: strconv.FormatUint(info.Size, 10),
createdKV: strconv.FormatInt(info.Created.UTC().UnixMilli(), 10), createdKV: strconv.FormatInt(info.Created.UTC().UnixMilli(), 10),
etagKV: info.ETag, etagKV: info.ETag,
md5KV: info.MD5,
} }
for _, part := range parts { for _, part := range parts {
@ -1124,6 +1144,9 @@ func (c *Tree) addVersion(ctx context.Context, bktInfo *data.BucketInfo, treeID
if len(version.ETag) > 0 { if len(version.ETag) > 0 {
meta[etagKV] = version.ETag meta[etagKV] = version.ETag
} }
if len(version.MD5) > 0 {
meta[md5KV] = version.MD5
}
if version.IsDeleteMarker() { if version.IsDeleteMarker() {
meta[isDeleteMarkerKV] = "true" meta[isDeleteMarkerKV] = "true"
@ -1168,7 +1191,7 @@ func (c *Tree) clearOutdatedVersionInfo(ctx context.Context, bktInfo *data.Bucke
} }
func (c *Tree) getVersions(ctx context.Context, bktInfo *data.BucketInfo, treeID, filepath string, onlyUnversioned bool) ([]*data.NodeVersion, error) { func (c *Tree) getVersions(ctx context.Context, bktInfo *data.BucketInfo, treeID, filepath string, onlyUnversioned bool) ([]*data.NodeVersion, error) {
keysToReturn := []string{oidKV, isUnversionedKV, isDeleteMarkerKV, etagKV, sizeKV} keysToReturn := []string{oidKV, isUnversionedKV, isDeleteMarkerKV, etagKV, sizeKV, md5KV}
path := pathFromName(filepath) path := pathFromName(filepath)
p := &GetNodesParams{ p := &GetNodesParams{
BktInfo: bktInfo, BktInfo: bktInfo,

View file

@ -168,3 +168,127 @@ func TestTreeServiceAddVersion(t *testing.T) {
require.Len(t, versions, 1) require.Len(t, versions, 1)
require.Equal(t, storedNode, versions[0]) require.Equal(t, storedNode, versions[0])
} }
func TestGetLatestNode(t *testing.T) {
for _, tc := range []struct {
name string
nodes []NodeResponse
exceptedNodeID uint64
error bool
}{
{
name: "empty",
nodes: []NodeResponse{},
error: true,
},
{
name: "one node of the object version",
nodes: []NodeResponse{
nodeResponse{
nodeID: 1,
parentID: 0,
timestamp: 1,
meta: []nodeMeta{
{
key: oidKV,
value: []byte(oidtest.ID().String()),
},
},
},
},
exceptedNodeID: 1,
},
{
name: "one node of the object version and one node of the secondary object",
nodes: []NodeResponse{
nodeResponse{
nodeID: 2,
parentID: 0,
timestamp: 3,
meta: []nodeMeta{},
},
nodeResponse{
nodeID: 1,
parentID: 0,
timestamp: 1,
meta: []nodeMeta{
{
key: oidKV,
value: []byte(oidtest.ID().String()),
},
},
},
},
exceptedNodeID: 1,
},
{
name: "all nodes represent a secondary object",
nodes: []NodeResponse{
nodeResponse{
nodeID: 2,
parentID: 0,
timestamp: 3,
meta: []nodeMeta{},
},
nodeResponse{
nodeID: 4,
parentID: 0,
timestamp: 5,
meta: []nodeMeta{},
},
},
error: true,
},
{
name: "several nodes of different types and with different timestamp",
nodes: []NodeResponse{
nodeResponse{
nodeID: 1,
parentID: 0,
timestamp: 1,
meta: []nodeMeta{
{
key: oidKV,
value: []byte(oidtest.ID().String()),
},
},
},
nodeResponse{
nodeID: 3,
parentID: 0,
timestamp: 3,
meta: []nodeMeta{},
},
nodeResponse{
nodeID: 4,
parentID: 0,
timestamp: 4,
meta: []nodeMeta{
{
key: oidKV,
value: []byte(oidtest.ID().String()),
},
},
},
nodeResponse{
nodeID: 6,
parentID: 0,
timestamp: 6,
meta: []nodeMeta{},
},
},
exceptedNodeID: 4,
},
} {
t.Run(tc.name, func(t *testing.T) {
actualNode, err := getLatestNode(tc.nodes)
if tc.error {
require.Error(t, err)
return
}
require.NoError(t, err)
require.Equal(t, tc.exceptedNodeID, actualNode.GetNodeID())
})
}
}