Compare commits

...

42 commits

Author SHA1 Message Date
52b89d3497 [#153] Add labels in metrics of total bytes
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2023-08-16 10:30:40 +03:00
6b109eee92 [#182] Fix parsing signed headers in presigned urls
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-08 13:04:53 +03:00
18878b66d3 [#175] Use gate owner as object owner
This is required because node check session token owner
TrueCloudLab/frostfs-node#528
For client cut TrueCloudLab/frostfs-sdk-go#114
such owner will be gate owner

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-08 12:22:33 +03:00
46eae4a356 [#179] Fix GetSubTree failures with updated SDK
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-08-02 14:56:23 +00:00
fe897ec588 [#178] wrapReader: Fix goroutine leak
In case of error in FrostFS.CreateObject wrapped reader
can be blocked because of synchronous pipe. We have to read out all payload in such case.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-08-01 17:59:34 +03:00
52931663e1 [#176] multipart: Replace part on re-upload
We want to have exactly one object and tree node for each part number

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-21 16:20:04 +03:00
1a09041cd1 [#63] Simplify multiObjectReader and add tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 14:00:15 +03:00
631b7f67b4 [#63] multipart: Log upload id for every failed request
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 14:00:15 +03:00
8ca2998297 [#63] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 14:00:15 +03:00
bcf5a85aab [#63] multipart: Fix copying
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 14:00:15 +03:00
ad81b599dd [#63] Add fast multipart upload
Add new flag to object tree meta `isCombined` that means
the object payload is list of parts that forms real payload.
Set this attribute when complete multipart upload not to do unnecessary copying.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 14:00:15 +03:00
361d10cc78 [#174] Fix query for listing multipart uploads
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 12:30:17 +03:00
62e6b49254 [#174] Log unmatched requests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 12:30:17 +03:00
80c4982bd4 [#174] Add router tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 12:30:17 +03:00
73ed3f7782 [#174] Fix router filter query matching
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 12:30:17 +03:00
6e3595e35b [#174] Fix object keys with slashes in chi
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-20 12:30:17 +03:00
57add29643 [#173] Use forked actions in workflow
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-07-18 15:18:34 +03:00
b59aa06637 [#146] Add kludge.bypass_content_encoding_check_in_chunks flag
Flag allows to skip checking `Content-Encoding` for `aws-chunked` value

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-18 14:49:52 +03:00
d62aa7b979 [#146] Fix preconditions: trim quotes in etags
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-18 14:49:03 +03:00
751a9be7cc [#146] Move getting chunk payload reader to separate function
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-18 14:49:03 +03:00
e58ea40463 Release v0.27.0
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-07-14 10:53:28 +03:00
14ef9ff091 [#158] Separate init object reader from read itself
To be able to handle cases and return appropriate http status code
when object missed in storage but gate cache contains its metadata
we need write code after init object reader.
So we separate init reader from actual reading.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-11 17:32:05 +03:00
fc90981c03 [#149] Update inner imports after moving middlewares
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-11 17:25:09 +03:00
83cdfbee78 [#149] Move middlewares to separate package
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-11 17:25:09 +03:00
37f2f468fe [#149] Add host bucket router
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-11 17:25:09 +03:00
e30a452cdb [#149] Use chi instead of gorilla mux
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-11 17:25:09 +03:00
7be70243f7 [#166] Update sdk to support grpc schemes in tree pool
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-11 17:25:03 +03:00
7f708b3a2d [#111] auth: Get log from real request context
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-07-06 11:57:04 +00:00
d531b13866 [#143] Add more context to some s3 errors
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-30 12:08:33 +03:00
f921bc8af5 [#143] Fix typo
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-30 12:04:52 +03:00
be03c5178f [#143] Fix NoSuchKey error on get/head
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-30 12:04:52 +03:00
499f4c6495 [#155] metrics: Use default registerer for app metrics
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-28 12:00:47 +03:00
2cbe3b9a27 [#131] Update docs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
c588d485fa [#131] authmate: Add update-secret cobra cmd
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
3927223bb0 [#131] authmate: Add generate-presigned-url cobra cmd
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
eba85b50b6 [#131] authmate: Add obtain-secret corba cmd
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
043447600e [#131] authmate: Add issue-secret cobra cmd
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
0cd353707a [#131] authmate: Make authmate use cobra
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
f74ab12f91 [#131] authmate: Add agent.UpdateSecret
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
dea7b39805 [#131] Fix session token limit by container
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-27 14:18:16 +03:00
9df8695463 [#143] Fix transformToS3Error function
Unwrap error before checking for s3 error

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-21 17:16:40 +03:00
614d703726 [#106] Add chunk uploading
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-06-21 10:23:57 +03:00
93 changed files with 4396 additions and 2225 deletions

View file

@ -15,6 +15,6 @@ jobs:
go-version: '1.20'
- name: Run commit format checker
uses: https://git.alexvan.in/alexvanin/dco-go@v1
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v1
with:
from: 3fbad97a

View file

@ -5,54 +5,84 @@ This document outlines major changes between releases.
## [Unreleased]
### Fixed
- Clean up List and Name caches when object is missing in Tree service (#57)
- Get empty bucket CORS from frostfs (TrueCloudLab#36)
- Don't count pool error on client abort (#35)
- Don't create unnecessary delete-markers (#83)
- Handle negative `Content-Length` on put (#125)
- Use `DisableURIPathEscaping` to presign urls (#125)
- Use specific s3 errors instead of `InternalError` where possible (#143)
- `grpc` schemas in tree configuration (#166)
- Return appropriate 404 code when object missed in storage but there is in gate cache (#158)
- Replace part on re-upload when use multipart upload (#176)
- Fix goroutine leak on put object error (#178)
- Fix parsing signed headers in presigned urls (#182)
### Added
- Reload default and custom copies numbers on SIGHUP (#104)
- Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70)
- Return `X-Owner-Id` in `head-bucket` response (#79)
- Return container name in `head-bucket` response (TrueCloudLab#18)
- Billing metrics (TrueCloudLab#5)
- Multiple configs support (TrueCloudLab#21)
- Bucket name resolving policy (TrueCloudLab#25)
- Support string `Action` and `Resource` fields in `bucketPolicy.Statement` (TrueCloudLab#32)
- Add new `kludge.use_default_xmlns_for_complete_multipart` config param (TrueCloudLab#40)
- Support dump metrics descriptions (#80)
- Support impersonate bearer token (#81)
- Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70, #101)
- Support impersonate bearer token (#81, #105)
- Reload default and custom copies numbers on SIGHUP (#104)
- Tracing support (#84, #140)
- Return bearer token in `s3-authmate obtain-secret` result (#132)
- Support multiple version credentials using GSet (#135)
- Implement chunk uploading (#106)
- Add new `kludge.bypass_content_encoding_check_in_chunks` config param (#146)
### Changed
- Remove object from tree and reset its cache on object deletion when it is already removed from storage (#78)
- Update prometheus to v1.15.0 (#94)
- Update syncTree.sh due to recent renaming (#73)
- Update neo-go to v0.101.0 (#14)
- Update viper to v1.15.0 (#14)
- Using multiple servers require only one healthy (TrueCloudLab#12)
- Update go version to go1.18 (TrueCloudLab#16)
- Update go version to go1.19 (#118)
- Return error on invalid LocationConstraint (TrueCloudLab#23)
- Place billing metrics to separate url path (TrueCloudLab#26)
- Add generated deb builder files to .gitignore, and fix typo (TrueCloudLab#28)
- Limit number of objects to delete at one time (TrueCloudLab#37)
- CompleteMultipartUpload handler now sends whitespace characters to keep alive client's connection (#60)
- Support new system attributes (#64)
- Changed values for `frostfs_s3_gw_state_health` metric (#91)
- Support multiple tree service endpoints (#74)
- Remove object from tree and reset its cache on object deletion when it is already removed from storage (#78)
- Finish rebranding (#2)
- Timeout errors has code 504 now (#103)
- Support multiple version credentials using GSet (#135)
- Use request scope logger (#111)
- Add `s3-authmate update-secret` command (#131)
- Use default registerer for app metrics (#155)
- Use chi router instead of archived gorlilla/mux (#149)
- Complete multipart upload doesn't unnecessary copy now. Thus, the total time of multipart upload was reduced by 2 times (#63)
- Use gate key to form object owner (#175)
### Removed
- Drop `tree.service` param (now endpoints from `peers` section are used) (#133)
## [0.27.0] - Karpinsky - 2023-07-12
This is a first FrostFS S3 Gateway release named after
[Karpinsky glacier](https://en.wikipedia.org/wiki/Karpinsky_Glacier).
### Fixed
- Using multiple servers require only one healthy (#12)
- Renew token before it expires (#20)
- Add generated deb builder files to .gitignore, and fix typo (#28)
- Get empty bucket CORS from frostfs (#36)
- Don't count pool error on client abort (#35)
- Handle request cancelling (#69)
- Clean up List and Name caches when object is missing in Tree service (#57)
- Don't create unnecessary delete-markers (#83)
- `Too many pings` error (#145)
### Added
- Billing metrics (#5, #26, #29)
- Return container name in `head-bucket` response (#18)
- Multiple configs support (#21)
- Bucket name resolving policy (#25)
- Support string `Action` and `Resource` fields in `bucketPolicy.Statement` (#32)
- Add new `kludge.use_default_xmlns_for_complete_multipart` config param (#40)
- Return `X-Owner-Id` in `head-bucket` response (#79)
- Support multiple tree service endpoints (#74, #110, #114)
### Changed
- Repository rebranding (#1)
- Update neo-go to v0.101.0 (#14)
- Update viper to v1.15.0 (#14)
- Update go version to go1.18 (#16)
- Return error on invalid LocationConstraint (#23)
- Limit number of objects to delete at one time (#37)
- CompleteMultipartUpload handler now sends whitespace characters to keep alive client's connection (#60)
- Support new system attributes (#64)
- Abstract network communication in TreeClient (#59, #75)
- Changed values for `frostfs_s3_gw_state_health` metric (#91)
## Older versions
This project is a fork of [NeoFS S3 Gateway](https://github.com/nspcc-dev/neofs-s3-gw) from version v0.26.0.
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-s3-gw/blob/master/CHANGELOG.md.
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...master
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...v0.27.0
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...master

View file

@ -1 +1 @@
v0.26.0
v0.27.0

View file

@ -38,8 +38,9 @@ type (
// Box contains access box and additional info.
Box struct {
AccessBox *accessbox.Box
ClientTime time.Time
AccessBox *accessbox.Box
ClientTime time.Time
AuthHeaders *AuthHeader
}
center struct {
@ -51,7 +52,8 @@ type (
prs int
authHeader struct {
//nolint:revive
AuthHeader struct {
AccessKeyID string
Service string
Region string
@ -101,7 +103,7 @@ func New(frostFS tokens.FrostFS, key *keys.PrivateKey, prefixes []string, config
}
}
func (c *center) parseAuthHeader(header string) (*authHeader, error) {
func (c *center) parseAuthHeader(header string) (*AuthHeader, error) {
submatches := c.reg.GetSubmatches(header)
if len(submatches) != authHeaderPartsNum {
return nil, apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed)
@ -114,7 +116,7 @@ func (c *center) parseAuthHeader(header string) (*authHeader, error) {
signedFields := strings.Split(submatches["signed_header_fields"], ";")
return &authHeader{
return &AuthHeader{
AccessKeyID: submatches["access_key_id"],
Service: submatches["service"],
Region: submatches["region"],
@ -124,7 +126,7 @@ func (c *center) parseAuthHeader(header string) (*authHeader, error) {
}, nil
}
func (a *authHeader) getAddress() (oid.Address, error) {
func (a *AuthHeader) getAddress() (oid.Address, error) {
var addr oid.Address
if err := addr.DecodeString(strings.ReplaceAll(a.AccessKeyID, "0", "/")); err != nil {
return addr, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
@ -135,7 +137,7 @@ func (a *authHeader) getAddress() (oid.Address, error) {
func (c *center) Authenticate(r *http.Request) (*Box, error) {
var (
err error
authHdr *authHeader
authHdr *AuthHeader
signatureDateTimeStr string
needClientTime bool
)
@ -146,12 +148,12 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
if len(creds) != 5 || creds[4] != "aws4_request" {
return nil, fmt.Errorf("bad X-Amz-Credential")
}
authHdr = &authHeader{
authHdr = &AuthHeader{
AccessKeyID: creds[0],
Service: creds[3],
Region: creds[2],
SignatureV4: queryValues.Get(AmzSignature),
SignedFields: queryValues[AmzSignedHeaders],
SignedFields: strings.Split(queryValues.Get(AmzSignedHeaders), ";"),
Date: creds[1],
IsPresigned: true,
}
@ -200,7 +202,10 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
return nil, err
}
result := &Box{AccessBox: box}
result := &Box{
AccessBox: box,
AuthHeaders: authHdr,
}
if needClientTime {
result.ClientTime = signatureDateTime
}
@ -267,7 +272,7 @@ func (c *center) checkFormData(r *http.Request) (*Box, error) {
return &Box{AccessBox: box}, nil
}
func cloneRequest(r *http.Request, authHeader *authHeader) *http.Request {
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
otherRequest := r.Clone(context.TODO())
otherRequest.Header = make(http.Header)
@ -288,7 +293,7 @@ func cloneRequest(r *http.Request, authHeader *authHeader) *http.Request {
return otherRequest
}
func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
func (c *center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.AccessKey, "")
signer := v4.NewSigner(awsCreds)
signer.DisableURIPathEscaping = true

View file

@ -19,12 +19,12 @@ func TestAuthHeaderParse(t *testing.T) {
for _, tc := range []struct {
header string
err error
expected *authHeader
expected *AuthHeader
}{
{
header: defaultHeader,
err: nil,
expected: &authHeader{
expected: &AuthHeader{
AccessKeyID: "oid0cid",
Service: "s3",
Region: "us-east-1",
@ -54,29 +54,29 @@ func TestAuthHeaderGetAddress(t *testing.T) {
defaulErr := errors.GetAPIError(errors.ErrInvalidAccessKeyID)
for _, tc := range []struct {
authHeader *authHeader
authHeader *AuthHeader
err error
}{
{
authHeader: &authHeader{
authHeader: &AuthHeader{
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJM0HrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
},
err: nil,
},
{
authHeader: &authHeader{
authHeader: &AuthHeader{
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJMHrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
},
err: defaulErr,
},
{
authHeader: &authHeader{
authHeader: &AuthHeader{
AccessKeyID: "oid0cid",
},
err: defaulErr,
},
{
authHeader: &authHeader{
authHeader: &AuthHeader{
AccessKeyID: "oidcid",
},
err: defaulErr,

View file

@ -34,6 +34,7 @@ func PresignRequest(creds *credentials.Credentials, reqData RequestData, presign
}
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
req.Header.Set(ContentTypeHdr, "text/plain")
signer := v4.NewSigner(creds)
signer.DisableURIPathEscaping = true

View file

@ -18,6 +18,7 @@ type NodeVersion struct {
BaseNodeVersion
DeleteMarker *DeleteMarkerInfo
IsUnversioned bool
IsCombined bool
}
func (v NodeVersion) IsDeleteMarker() bool {
@ -79,13 +80,13 @@ type MultipartInfo struct {
// PartInfo is upload information about part.
type PartInfo struct {
Key string
UploadID string
Number int
OID oid.ID
Size uint64
ETag string
Created time.Time
Key string `json:"key"`
UploadID string `json:"uploadId"`
Number int `json:"number"`
OID oid.ID `json:"oid"`
Size uint64 `json:"size"`
ETag string `json:"etag"`
Created time.Time `json:"created"`
}
// ToHeaderString form short part representation to use in S3-Completed-Parts header.

View file

@ -3,6 +3,8 @@ package errors
import (
"fmt"
"net/http"
frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
)
type (
@ -178,6 +180,7 @@ const (
ErrGatewayTimeout
ErrOperationMaxedOut
ErrInvalidRequest
ErrInvalidRequestLargeCopy
ErrInvalidStorageClass
ErrMalformedJSON
@ -1159,6 +1162,12 @@ var errorCodes = errorCodeMap{
Description: "Invalid Request",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidRequestLargeCopy: {
ErrCode: ErrInvalidRequestLargeCopy,
Code: "InvalidRequest",
Description: "CopyObject request made on objects larger than 5GB in size.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrIncorrectContinuationToken: {
ErrCode: ErrIncorrectContinuationToken,
Code: "InvalidArgument",
@ -1700,6 +1709,7 @@ var errorCodes = errorCodeMap{
// IsS3Error checks if the provided error is a specific s3 error.
func IsS3Error(err error, code ErrorCode) bool {
err = frosterrors.UnwrapErr(err)
e, ok := err.(Error)
return ok && e.ErrCode == code
}

View file

@ -19,6 +19,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -244,7 +245,7 @@ func (s *statement) UnmarshalJSON(data []byte) error {
func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := api.GetReqInfo(ctx)
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -258,7 +259,7 @@ func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
return
}
if err = api.EncodeToResponse(w, h.encodeBucketACL(ctx, bktInfo.Name, bucketACL)); err != nil {
if err = middleware.EncodeToResponse(w, h.encodeBucketACL(ctx, bktInfo.Name, bucketACL)); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
return
}
@ -282,7 +283,7 @@ func (h *handler) bearerTokenIssuerKey(ctx context.Context) (*keys.PublicKey, er
}
func (h *handler) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
key, err := h.bearerTokenIssuerKey(r.Context())
if err != nil {
h.logAndSendError(w, "couldn't get bearer token issuer key", reqInfo, err)
@ -367,7 +368,7 @@ func (h *handler) updateBucketACL(r *http.Request, astChild *ast, bktInfo *data.
func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := api.GetReqInfo(ctx)
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -393,14 +394,14 @@ func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
return
}
if err = api.EncodeToResponse(w, h.encodeObjectACL(ctx, bucketACL, reqInfo.BucketName, objInfo.VersionID())); err != nil {
if err = middleware.EncodeToResponse(w, h.encodeObjectACL(ctx, bucketACL, reqInfo.BucketName, objInfo.VersionID())); err != nil {
h.logAndSendError(w, "failed to encode response", reqInfo, err)
}
}
func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := api.GetReqInfo(ctx)
reqInfo := middleware.GetReqInfo(ctx)
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
key, err := h.bearerTokenIssuerKey(ctx)
if err != nil {
@ -476,7 +477,7 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
}
func (h *handler) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -507,14 +508,14 @@ func checkOwner(info *data.BucketInfo, owner string) error {
// may need to convert owner to appropriate format
if info.Owner.String() != owner {
return errors.GetAPIError(errors.ErrAccessDenied)
return fmt.Errorf("%w: mismatch owner", errors.GetAPIError(errors.ErrAccessDenied))
}
return nil
}
func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {

View file

@ -11,10 +11,13 @@ import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
@ -1425,7 +1428,7 @@ func TestPutBucketPolicy(t *testing.T) {
createBucket(t, hc, bktName, box)
w, r := prepareTestPayloadRequest(hc, bktName, "", bytes.NewReader([]byte(bktPolicy)))
ctx := context.WithValue(r.Context(), api.BoxData, box)
ctx := context.WithValue(r.Context(), middleware.BoxData, box)
r = r.WithContext(ctx)
hc.Handler().PutBucketPolicyHandler(w, r)
assertStatus(hc.t, w, http.StatusOK)
@ -1447,7 +1450,7 @@ func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy *bucketPolicy
require.NoError(hc.t, err)
w, r := prepareTestPayloadRequest(hc, bktName, "", bytes.NewReader(body))
ctx := context.WithValue(r.Context(), api.BoxData, box)
ctx := context.WithValue(r.Context(), middleware.BoxData, box)
r = r.WithContext(ctx)
hc.Handler().PutBucketPolicyHandler(w, r)
assertStatus(hc.t, w, status)
@ -1480,9 +1483,14 @@ func createAccessBox(t *testing.T) (*accessbox.Box, *keys.PrivateKey) {
tok := new(session.Container)
tok.ForVerb(session.VerbContainerSetEACL)
err = tok.Sign(key.PrivateKey)
require.NoError(t, err)
tok2 := new(session.Container)
tok2.ForVerb(session.VerbContainerPut)
err = tok2.Sign(key.PrivateKey)
require.NoError(t, err)
box := &accessbox.Box{
Gate: &accessbox.GateData{
SessionTokens: []*session.Container{tok, tok2},
@ -1493,24 +1501,34 @@ func createAccessBox(t *testing.T) (*accessbox.Box, *keys.PrivateKey) {
return box, key
}
func createBucket(t *testing.T, tc *handlerContext, bktName string, box *accessbox.Box) *data.BucketInfo {
w, r := prepareTestRequest(tc, bktName, "", nil)
ctx := context.WithValue(r.Context(), api.BoxData, box)
r = r.WithContext(ctx)
tc.Handler().CreateBucketHandler(w, r)
func createBucket(t *testing.T, hc *handlerContext, bktName string, box *accessbox.Box) *data.BucketInfo {
w := createBucketBase(hc, bktName, box)
assertStatus(t, w, http.StatusOK)
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
bktInfo, err := hc.Layer().GetBucketInfo(hc.Context(), bktName)
require.NoError(t, err)
return bktInfo
}
func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code s3errors.ErrorCode) {
w := createBucketBase(hc, bktName, box)
assertS3Error(hc.t, w, s3errors.GetAPIError(code))
}
func createBucketBase(hc *handlerContext, bktName string, box *accessbox.Box) *httptest.ResponseRecorder {
w, r := prepareTestRequest(hc, bktName, "", nil)
ctx := context.WithValue(r.Context(), middleware.BoxData, box)
r = r.WithContext(ctx)
hc.Handler().CreateBucketHandler(w, r)
return w
}
func putBucketACL(t *testing.T, tc *handlerContext, bktName string, box *accessbox.Box, header map[string]string) {
w, r := prepareTestRequest(tc, bktName, "", nil)
for key, val := range header {
r.Header.Set(key, val)
}
ctx := context.WithValue(r.Context(), api.BoxData, box)
ctx := context.WithValue(r.Context(), middleware.BoxData, box)
r = r.WithContext(ctx)
tc.Handler().PutBucketACLHandler(w, r)
assertStatus(t, w, http.StatusOK)

View file

@ -37,6 +37,7 @@ type (
ResolveZoneList []string
IsResolveListAllow bool // True if ResolveZoneList contains allowed zones
CompleteMultipartKeepalive time.Duration
Kludge KludgeSettings
}
PlacementPolicy interface {
@ -49,6 +50,10 @@ type (
XMLDecoderProvider interface {
NewCompleteMultipartDecoder(io.Reader) *xml.Decoder
}
KludgeSettings interface {
BypassContentEncodingInChunks() bool
}
)
const (

View file

@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"go.uber.org/zap"
)
@ -67,7 +68,7 @@ var validAttributes = map[string]struct{}{
}
func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
params, err := parseGetObjectAttributeArgs(r)
if err != nil {
@ -123,7 +124,7 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
}
writeAttributesHeaders(w.Header(), extendedInfo, bktSettings.Unversioned())
if err = api.EncodeToResponse(w, response); err != nil {
if err = middleware.EncodeToResponse(w, response); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
}

View file

@ -11,6 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"go.uber.org/zap"
)
@ -47,7 +48,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
sessionTokenEACL *session.Container
ctx = r.Context()
reqInfo = api.GetReqInfo(ctx)
reqInfo = middleware.GetReqInfo(ctx)
containsACL = containsACLHeaders(r)
)
@ -105,6 +106,25 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
}
srcObjInfo := extendedSrcObjInfo.ObjectInfo
encryptionParams, err := formEncryptionParams(r)
if err != nil {
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
return
}
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
return
}
if srcSize, err := getObjectSize(extendedSrcObjInfo, encryptionParams); err != nil {
h.logAndSendError(w, "failed to get source object size", reqInfo, err)
return
} else if srcSize > layer.UploadMaxSize { //https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
h.logAndSendError(w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy))
return
}
args, err := parseCopyObjectArgs(r.Header)
if err != nil {
h.logAndSendError(w, "could not parse request params", reqInfo, err)
@ -143,17 +163,6 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
}
}
encryptionParams, err := formEncryptionParams(r)
if err != nil {
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
return
}
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
return
}
if err = checkPreconditions(srcObjInfo, args.Conditional); err != nil {
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
return
@ -163,19 +172,21 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
if len(srcObjInfo.ContentType) > 0 {
srcObjInfo.Headers[api.ContentType] = srcObjInfo.ContentType
}
metadata = srcObjInfo.Headers
metadata = makeCopyMap(srcObjInfo.Headers)
delete(metadata, layer.MultipartObjectSize) // object payload will be real one rather than list of compound parts
} else if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
metadata[api.ContentType] = contentType
}
params := &layer.CopyObjectParams{
SrcObject: srcObjInfo,
ScrBktInfo: srcObjPrm.BktInfo,
DstBktInfo: dstBktInfo,
DstObject: reqInfo.ObjectName,
SrcSize: srcObjInfo.Size,
Header: metadata,
Encryption: encryptionParams,
SrcVersioned: srcObjPrm.Versioned(),
SrcObject: srcObjInfo,
ScrBktInfo: srcObjPrm.BktInfo,
DstBktInfo: dstBktInfo,
DstObject: reqInfo.ObjectName,
SrcSize: srcObjInfo.Size,
Header: metadata,
Encryption: encryptionParams,
}
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, dstBktInfo.LocationConstraint)
@ -198,7 +209,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
}
dstObjInfo := extendedDstObjInfo.ObjectInfo
if err = api.EncodeToResponse(w, &CopyObjectResponse{LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), ETag: dstObjInfo.HashSum}); err != nil {
if err = middleware.EncodeToResponse(w, &CopyObjectResponse{LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), ETag: dstObjInfo.HashSum}); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
return
}
@ -255,7 +266,15 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
}
}
func isCopyingToItselfForbidden(reqInfo *api.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool {
func makeCopyMap(headers map[string]string) map[string]string {
res := make(map[string]string, len(headers))
for key, val := range headers {
res[key] = val
}
return res
}
func isCopyingToItselfForbidden(reqInfo *middleware.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool {
if reqInfo.BucketName != srcBucket || reqInfo.ObjectName != srcObject {
return false
}

View file

@ -7,6 +7,7 @@ import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"github.com/stretchr/testify/require"
)
@ -29,14 +30,14 @@ func TestCopyWithTaggingDirective(t *testing.T) {
copyMeta := CopyMeta{
Tags: map[string]string{"key2": "val"},
}
copyObject(t, tc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
copyObject(tc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
tagging := getObjectTagging(t, tc, bktName, objToCopy, emptyVersion)
require.Len(t, tagging.TagSet, 1)
require.Equal(t, "key", tagging.TagSet[0].Key)
require.Equal(t, "val", tagging.TagSet[0].Value)
copyMeta.TaggingDirective = replaceDirective
copyObject(t, tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
copyObject(tc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
tagging = getObjectTagging(t, tc, bktName, objToCopy2, emptyVersion)
require.Len(t, tagging.TagSet, 1)
require.Equal(t, "key2", tagging.TagSet[0].Key)
@ -51,20 +52,54 @@ func TestCopyToItself(t *testing.T) {
copyMeta := CopyMeta{MetadataDirective: replaceDirective}
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest)
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusBadRequest)
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
putBucketVersioning(t, tc, bktName, true)
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
putBucketVersioning(t, tc, bktName, false)
copyObject(t, tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
copyObject(t, tc, bktName, objName, objName, copyMeta, http.StatusOK)
copyObject(tc, bktName, objName, objName, CopyMeta{}, http.StatusOK)
copyObject(tc, bktName, objName, objName, copyMeta, http.StatusOK)
}
func copyObject(t *testing.T, tc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
w, r := prepareTestRequest(tc, bktName, toObject, nil)
func TestCopyMultipart(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket-for-copy", "object-for-copy"
createTestBucket(hc, bktName)
partSize := layer.UploadMinSize
objLen := 6 * partSize
headers := map[string]string{}
data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
require.Equal(t, objLen, len(data))
objToCopy := "copy-target"
var copyMeta CopyMeta
copyObject(hc, bktName, objName, objToCopy, copyMeta, http.StatusOK)
copiedData, _ := getObject(hc, bktName, objToCopy)
equalDataSlices(t, data, copiedData)
result := getObjectAttributes(hc, bktName, objToCopy, objectParts)
require.NotNil(t, result.ObjectParts)
objToCopy2 := "copy-target2"
copyMeta.MetadataDirective = replaceDirective
copyObject(hc, bktName, objName, objToCopy2, copyMeta, http.StatusOK)
result = getObjectAttributes(hc, bktName, objToCopy2, objectParts)
require.Nil(t, result.ObjectParts)
copiedData, _ = getObject(hc, bktName, objToCopy2)
equalDataSlices(t, data, copiedData)
}
func copyObject(hc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
w, r := prepareTestRequest(hc, bktName, toObject, nil)
r.Header.Set(api.AmzCopySource, bktName+"/"+fromObject)
r.Header.Set(api.AmzMetadataDirective, copyMeta.MetadataDirective)
@ -79,8 +114,8 @@ func copyObject(t *testing.T, tc *handlerContext, bktName, fromObject, toObject
}
r.Header.Set(api.AmzTagging, tagsQuery.Encode())
tc.Handler().CopyObjectHandler(w, r)
assertStatus(t, w, statusCode)
hc.Handler().CopyObjectHandler(w, r)
assertStatus(hc.t, w, statusCode)
}
func putObjectTagging(t *testing.T, tc *handlerContext, bktName, objName string, tags map[string]string) {

View file

@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"go.uber.org/zap"
)
@ -18,7 +19,7 @@ const (
)
func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -32,14 +33,14 @@ func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
return
}
if err = api.EncodeToResponse(w, cors); err != nil {
if err = middleware.EncodeToResponse(w, cors); err != nil {
h.logAndSendError(w, "could not encode cors to response", reqInfo, err)
return
}
}
func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -63,11 +64,11 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
return
}
api.WriteSuccessResponseHeadersOnly(w)
middleware.WriteSuccessResponseHeadersOnly(w)
}
func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -92,7 +93,7 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
}
ctx := r.Context()
reqInfo := api.GetReqInfo(ctx)
reqInfo := middleware.GetReqInfo(ctx)
if reqInfo.BucketName == "" {
return
}
@ -143,7 +144,7 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
}
func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
@ -197,7 +198,7 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
if o != wildcard {
w.Header().Set(api.AccessControlAllowCredentials, "true")
}
api.WriteSuccessResponseHeadersOnly(w)
middleware.WriteSuccessResponseHeadersOnly(w)
return
}
}

View file

@ -7,6 +7,7 @@ import (
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
)
func TestCORSOriginWildcard(t *testing.T) {
@ -23,14 +24,14 @@ func TestCORSOriginWildcard(t *testing.T) {
bktName := "bucket-for-cors"
box, _ := createAccessBox(t)
w, r := prepareTestRequest(hc, bktName, "", nil)
ctx := context.WithValue(r.Context(), api.BoxData, box)
ctx := context.WithValue(r.Context(), middleware.BoxData, box)
r = r.WithContext(ctx)
r.Header.Add(api.AmzACL, "public-read")
hc.Handler().CreateBucketHandler(w, r)
assertStatus(t, w, http.StatusOK)
w, r = prepareTestPayloadRequest(hc, bktName, "", strings.NewReader(body))
ctx = context.WithValue(r.Context(), api.BoxData, box)
ctx = context.WithValue(r.Context(), middleware.BoxData, box)
r = r.WithContext(ctx)
hc.Handler().PutBucketCorsHandler(w, r)
assertStatus(t, w, http.StatusOK)

View file

@ -10,6 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
@ -62,7 +63,7 @@ type DeleteObjectsResponse struct {
func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := api.GetReqInfo(ctx)
reqInfo := middleware.GetReqInfo(ctx)
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
versionedObject := []*layer.VersionedObject{{
Name: reqInfo.ObjectName,
@ -158,7 +159,7 @@ func isErrObjectLocked(err error) bool {
// DeleteMultipleObjectsHandler handles multiple delete requests.
func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := api.GetReqInfo(ctx)
reqInfo := middleware.GetReqInfo(ctx)
// Content-Md5 is required and should be set
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
@ -264,14 +265,14 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
h.reqLogger(ctx).Error("couldn't delete objects", fields...)
}
if err = api.EncodeToResponse(w, response); err != nil {
if err = middleware.EncodeToResponse(w, response); err != nil {
h.logAndSendError(w, "could not write response", reqInfo, err, zap.Array("objects", marshaler))
return
}
}
func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)

View file

@ -3,11 +3,13 @@ package handler
import (
"bytes"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
@ -25,11 +27,7 @@ func TestDeleteBucketOnAlreadyRemovedError(t *testing.T) {
putObject(t, hc, bktName, objName)
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
require.NoError(t, err)
var addr oid.Address
addr.SetContainer(bktInfo.CID)
addr.SetObject(nodeVersion.OID)
addr := getAddressOfLastVersion(hc, bktInfo, objName)
hc.tp.SetObjectError(addr, apistatus.ObjectAlreadyRemoved{})
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
@ -37,6 +35,15 @@ func TestDeleteBucketOnAlreadyRemovedError(t *testing.T) {
deleteBucket(t, hc, bktName, http.StatusNoContent)
}
func getAddressOfLastVersion(hc *handlerContext, bktInfo *data.BucketInfo, objName string) oid.Address {
nodeVersion, err := hc.tree.GetLatestVersion(hc.context, bktInfo, objName)
require.NoError(hc.t, err)
var addr oid.Address
addr.SetContainer(bktInfo.CID)
addr.SetObject(nodeVersion.OID)
return addr
}
func TestDeleteBucket(t *testing.T) {
tc := prepareHandlerContext(t)
@ -425,22 +432,28 @@ func deleteBucket(t *testing.T, tc *handlerContext, bktName string, code int) {
assertStatus(t, w, code)
}
func checkNotFound(t *testing.T, tc *handlerContext, bktName, objName, version string) {
query := make(url.Values)
query.Add(api.QueryVersionID, version)
w, r := prepareTestFullRequest(tc, bktName, objName, query, nil)
tc.Handler().HeadObjectHandler(w, r)
func checkNotFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
w := headObjectBase(hc, bktName, objName, version)
assertStatus(t, w, http.StatusNotFound)
}
func checkFound(t *testing.T, tc *handlerContext, bktName, objName, version string) {
func headObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apiErrors.ErrorCode) {
w := headObjectBase(hc, bktName, objName, version)
assertS3Error(hc.t, w, apiErrors.GetAPIError(code))
}
func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
w := headObjectBase(hc, bktName, objName, version)
assertStatus(t, w, http.StatusOK)
}
func headObjectBase(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
query := make(url.Values)
query.Add(api.QueryVersionID, version)
w, r := prepareTestFullRequest(tc, bktName, objName, query, nil)
tc.Handler().HeadObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
hc.Handler().HeadObjectHandler(w, r)
return w
}
func listVersions(t *testing.T, tc *handlerContext, bktName string) *ListObjectsVersionsResponse {

View file

@ -7,6 +7,7 @@ import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
@ -41,7 +42,7 @@ func TestSimpleGetEncrypted(t *testing.T) {
require.NoError(t, err)
require.NotEqual(t, content, string(encryptedContent))
response, _ := getEncryptedObject(t, tc, bktName, objName)
response, _ := getEncryptedObject(tc, bktName, objName)
require.Equal(t, content, string(response))
}
@ -103,14 +104,40 @@ func TestS3EncryptionSSECMultipartUpload(t *testing.T) {
data := multipartUploadEncrypted(tc, bktName, objName, headers, objLen, partSize)
require.Equal(t, objLen, len(data))
resData, resHeader := getEncryptedObject(t, tc, bktName, objName)
resData, resHeader := getEncryptedObject(tc, bktName, objName)
equalDataSlices(t, data, resData)
require.Equal(t, headers[api.ContentType], resHeader.Get(api.ContentType))
require.Equal(t, headers[headerMetaKey], resHeader[headerMetaKey][0])
require.Equal(t, strconv.Itoa(objLen), resHeader.Get(api.ContentLength))
checkContentUsingRangeEnc(t, tc, bktName, objName, data, 1000000)
checkContentUsingRangeEnc(t, tc, bktName, objName, data, 10000000)
checkContentUsingRangeEnc(tc, bktName, objName, data, 1000000)
checkContentUsingRangeEnc(tc, bktName, objName, data, 10000000)
}
func TestMultipartUploadGetRange(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket-for-multipart-s3-tests", "multipart_obj"
createTestBucket(hc, bktName)
objLen := 30 * 1024 * 1024
partSize := objLen / 6
headerMetaKey := api.MetadataPrefix + "foo"
headers := map[string]string{
headerMetaKey: "bar",
api.ContentType: "text/plain",
}
data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
require.Equal(t, objLen, len(data))
resData, resHeader := getObject(hc, bktName, objName)
equalDataSlices(t, data, resData)
require.Equal(t, headers[api.ContentType], resHeader.Get(api.ContentType))
require.Equal(t, headers[headerMetaKey], resHeader[headerMetaKey][0])
require.Equal(t, strconv.Itoa(objLen), resHeader.Get(api.ContentLength))
checkContentUsingRange(hc, bktName, objName, data, 1000000)
checkContentUsingRange(hc, bktName, objName, data, 10000000)
}
func equalDataSlices(t *testing.T, expected, actual []byte) {
@ -127,7 +154,15 @@ func equalDataSlices(t *testing.T, expected, actual []byte) {
}
}
func checkContentUsingRangeEnc(t *testing.T, tc *handlerContext, bktName, objName string, data []byte, step int) {
func checkContentUsingRangeEnc(hc *handlerContext, bktName, objName string, data []byte, step int) {
checkContentUsingRangeBase(hc, bktName, objName, data, step, true)
}
func checkContentUsingRange(hc *handlerContext, bktName, objName string, data []byte, step int) {
checkContentUsingRangeBase(hc, bktName, objName, data, step, false)
}
func checkContentUsingRangeBase(hc *handlerContext, bktName, objName string, data []byte, step int, encrypted bool) {
var off, toRead, end int
for off < len(data) {
@ -137,8 +172,14 @@ func checkContentUsingRangeEnc(t *testing.T, tc *handlerContext, bktName, objNam
}
end = off + toRead - 1
rangeData := getEncryptedObjectRange(t, tc, bktName, objName, off, end)
equalDataSlices(t, data[off:end+1], rangeData)
var rangeData []byte
if encrypted {
rangeData = getEncryptedObjectRange(hc.t, hc, bktName, objName, off, end)
} else {
rangeData = getObjectRange(hc.t, hc, bktName, objName, off, end)
}
equalDataSlices(hc.t, data[off:end+1], rangeData)
off += step
}
@ -168,6 +209,30 @@ func multipartUploadEncrypted(hc *handlerContext, bktName, objName string, heade
return
}
func multipartUpload(hc *handlerContext, bktName, objName string, headers map[string]string, objLen, partsSize int) (objData []byte) {
multipartInfo := createMultipartUpload(hc, bktName, objName, headers)
var sum, currentPart int
var etags []string
adjustedSize := partsSize
for sum < objLen {
currentPart++
sum += partsSize
if sum > objLen {
adjustedSize = objLen - sum
}
etag, data := uploadPart(hc, bktName, objName, multipartInfo.UploadID, currentPart, adjustedSize)
etags = append(etags, etag)
objData = append(objData, data...)
}
completeMultipartUpload(hc, bktName, objName, multipartInfo.UploadID, etags)
return
}
func createMultipartUploadEncrypted(hc *handlerContext, bktName, objName string, headers map[string]string) *InitiateMultipartUploadResponse {
return createMultipartUploadBase(hc, bktName, objName, true, headers)
}
@ -190,6 +255,11 @@ func createMultipartUploadBase(hc *handlerContext, bktName, objName string, encr
}
func completeMultipartUpload(hc *handlerContext, bktName, objName, uploadID string, partsETags []string) {
w := completeMultipartUploadBase(hc, bktName, objName, uploadID, partsETags)
assertStatus(hc.t, w, http.StatusOK)
}
func completeMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID string, partsETags []string) *httptest.ResponseRecorder {
query := make(url.Values)
query.Set(uploadIDQuery, uploadID)
complete := &CompleteMultipartUpload{
@ -204,7 +274,8 @@ func completeMultipartUpload(hc *handlerContext, bktName, objName, uploadID stri
w, r := prepareTestFullRequest(hc, bktName, objName, query, complete)
hc.Handler().CompleteMultipartUploadHandler(w, r)
assertStatus(hc.t, w, http.StatusOK)
return w
}
func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
@ -247,7 +318,7 @@ func TestMultipartEncrypted(t *testing.T) {
part2ETag, part2 := uploadPartEncrypted(hc, bktName, objName, multipartInitInfo.UploadID, 2, 5)
completeMultipartUpload(hc, bktName, objName, multipartInitInfo.UploadID, []string{part1ETag, part2ETag})
res, _ := getEncryptedObject(t, hc, bktName, objName)
res, _ := getEncryptedObject(hc, bktName, objName)
require.Equal(t, len(part1)+len(part2), len(res))
require.Equal(t, append(part1, part2...), res)
@ -263,13 +334,22 @@ func putEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName, cont
assertStatus(t, w, http.StatusOK)
}
func getEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName string) ([]byte, http.Header) {
w, r := prepareTestRequest(tc, bktName, objName, nil)
func getEncryptedObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
w, r := prepareTestRequest(hc, bktName, objName, nil)
setEncryptHeaders(r)
tc.Handler().GetObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
return getObjectBase(hc, w, r)
}
func getObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
w, r := prepareTestRequest(hc, bktName, objName, nil)
return getObjectBase(hc, w, r)
}
func getObjectBase(hc *handlerContext, w *httptest.ResponseRecorder, r *http.Request) ([]byte, http.Header) {
hc.Handler().GetObjectHandler(w, r)
assertStatus(hc.t, w, http.StatusOK)
content, err := io.ReadAll(w.Result().Body)
require.NoError(t, err)
require.NoError(hc.t, err)
return content, w.Header()
}

View file

@ -12,6 +12,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"go.uber.org/zap"
)
@ -87,6 +89,8 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
if len(info.Headers[layer.AttributeEncryptionAlgorithm]) > 0 {
h.Set(api.ContentLength, info.Headers[layer.AttributeDecryptedSize])
addSSECHeaders(h, requestHeader)
} else if len(info.Headers[layer.MultipartObjectSize]) > 0 {
h.Set(api.ContentLength, info.Headers[layer.MultipartObjectSize])
} else {
h.Set(api.ContentLength, strconv.FormatUint(info.Size, 10))
}
@ -104,6 +108,9 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
if expires := info.Headers[api.Expires]; expires != "" {
h.Set(api.Expires, expires)
}
if encodings := info.Headers[api.ContentEncoding]; encodings != "" {
h.Set(api.ContentEncoding, encodings)
}
for key, val := range info.Headers {
if layer.IsSystemHeader(key) {
@ -117,7 +124,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
var (
params *layer.RangeParams
reqInfo = api.GetReqInfo(r.Context())
reqInfo = middleware.GetReqInfo(r.Context())
)
conditional, err := parseConditionalHeaders(r.Header)
@ -161,12 +168,10 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
return
}
fullSize := info.Size
if encryptionParams.Enabled() {
if fullSize, err = strconv.ParseUint(info.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil {
h.logAndSendError(w, "invalid decrypted size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
return
}
fullSize, err := getObjectSize(extendedInfo, encryptionParams)
if err != nil {
h.logAndSendError(w, "invalid size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
return
}
if params, err = fetchRangeHeader(r.Header, fullSize); err != nil {
@ -201,38 +206,70 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
return
}
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned())
if params != nil {
writeRangeHeaders(w, params, info.Size)
} else {
w.WriteHeader(http.StatusOK)
}
getParams := &layer.GetObjectParams{
getPayloadParams := &layer.GetObjectParams{
ObjectInfo: info,
Writer: w,
Versioned: p.Versioned(),
Range: params,
BucketInfo: bktInfo,
Encryption: encryptionParams,
}
if err = h.obj.GetObject(r.Context(), getParams); err != nil {
h.logAndSendError(w, "could not get object", reqInfo, err)
objPayload, err := h.obj.GetObject(r.Context(), getPayloadParams)
if err != nil {
h.logAndSendError(w, "could not get object payload", reqInfo, err)
return
}
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned())
if params != nil {
writeRangeHeaders(w, params, fullSize)
} else {
w.WriteHeader(http.StatusOK)
}
if err = objPayload.StreamTo(w); err != nil {
h.logAndSendError(w, "could not stream object payload", reqInfo, err)
return
}
}
func getObjectSize(extendedInfo *data.ExtendedObjectInfo, encryptionParams encryption.Params) (uint64, error) {
var err error
fullSize := extendedInfo.ObjectInfo.Size
if encryptionParams.Enabled() {
if fullSize, err = strconv.ParseUint(extendedInfo.ObjectInfo.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil {
return 0, fmt.Errorf("invalid decrypted size header: %w", err)
}
} else if extendedInfo.NodeVersion.IsCombined {
if fullSize, err = strconv.ParseUint(extendedInfo.ObjectInfo.Headers[layer.MultipartObjectSize], 10, 64); err != nil {
return 0, fmt.Errorf("invalid multipart size header: %w", err)
}
}
return fullSize, nil
}
func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs) error {
if len(args.IfMatch) > 0 && args.IfMatch != info.HashSum {
return errors.GetAPIError(errors.ErrPreconditionFailed)
return fmt.Errorf("%w: etag mismatched: '%s', '%s'", errors.GetAPIError(errors.ErrPreconditionFailed), args.IfMatch, info.HashSum)
}
if len(args.IfNoneMatch) > 0 && args.IfNoneMatch == info.HashSum {
return errors.GetAPIError(errors.ErrNotModified)
return fmt.Errorf("%w: etag matched: '%s', '%s'", errors.GetAPIError(errors.ErrNotModified), args.IfNoneMatch, info.HashSum)
}
if args.IfModifiedSince != nil && info.Created.Before(*args.IfModifiedSince) {
return errors.GetAPIError(errors.ErrNotModified)
return fmt.Errorf("%w: not modified since '%s', last modified '%s'", errors.GetAPIError(errors.ErrNotModified),
args.IfModifiedSince.Format(time.RFC3339), info.Created.Format(time.RFC3339))
}
if args.IfUnmodifiedSince != nil && info.Created.After(*args.IfUnmodifiedSince) {
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html#API_GetObject_RequestSyntax
// If both of the If-Match and If-Unmodified-Since headers are present in the request as follows:
// If-Match condition evaluates to true, and;
// If-Unmodified-Since condition evaluates to false;
// then, S3 returns 200 OK and the data requested.
if len(args.IfMatch) == 0 {
return errors.GetAPIError(errors.ErrPreconditionFailed)
return fmt.Errorf("%w: modified since '%s', last modified '%s'", errors.GetAPIError(errors.ErrPreconditionFailed),
args.IfUnmodifiedSince.Format(time.RFC3339), info.Created.Format(time.RFC3339))
}
}
@ -242,8 +279,8 @@ func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs) error {
func parseConditionalHeaders(headers http.Header) (*conditionalArgs, error) {
var err error
args := &conditionalArgs{
IfMatch: headers.Get(api.IfMatch),
IfNoneMatch: headers.Get(api.IfNoneMatch),
IfMatch: strings.Trim(headers.Get(api.IfMatch), "\""),
IfNoneMatch: strings.Trim(headers.Get(api.IfNoneMatch), "\""),
}
if args.IfModifiedSince, err = parseHTTPTime(headers.Get(api.IfModifiedSince)); err != nil {

View file

@ -2,15 +2,22 @@ package handler
import (
"bytes"
stderrors "errors"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"github.com/stretchr/testify/require"
)
@ -143,7 +150,11 @@ func TestPreconditions(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
actual := checkPreconditions(tc.info, tc.args)
require.Equal(t, tc.expected, actual)
if tc.expected == nil {
require.NoError(t, actual)
} else {
require.True(t, stderrors.Is(actual, tc.expected), tc.expected, actual)
}
})
}
}
@ -170,6 +181,24 @@ func TestGetRange(t *testing.T) {
require.Equal(t, "bcdef", string(end))
}
func TestGetObject(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket", "obj"
bktInfo, objInfo := createVersionedBucketAndObject(hc.t, hc, bktName, objName)
putObject(hc.t, hc, bktName, objName)
checkFound(hc.t, hc, bktName, objName, objInfo.VersionID())
checkFound(hc.t, hc, bktName, objName, emptyVersion)
addr := getAddressOfLastVersion(hc, bktInfo, objName)
hc.tp.SetObjectError(addr, apistatus.ObjectNotFound{})
hc.tp.SetObjectError(objInfo.Address(), apistatus.ObjectNotFound{})
getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), s3errors.ErrNoSuchVersion)
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, s3errors.ErrNoSuchKey)
}
func putObjectContent(hc *handlerContext, bktName, objName, content string) {
body := bytes.NewReader([]byte(content))
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
@ -186,3 +215,17 @@ func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, s
require.NoError(t, err)
return content
}
func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apiErrors.ErrorCode) {
w := getObjectBaseResponse(hc, bktName, objName, version)
assertS3Error(hc.t, w, apiErrors.GetAPIError(code))
}
func getObjectBaseResponse(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
query := make(url.Values)
query.Add(api.QueryVersionID, version)
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
hc.Handler().GetObjectHandler(w, r)
return w
}

View file

@ -13,11 +13,13 @@ import (
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -35,6 +37,7 @@ type handlerContext struct {
tp *layer.TestFrostFS
tree *tree.Tree
context context.Context
kludge *kludgeSettingsMock
}
func (hc *handlerContext) Handler() *handler {
@ -82,12 +85,28 @@ func (p *xmlDecoderProviderMock) NewCompleteMultipartDecoder(r io.Reader) *xml.D
return xml.NewDecoder(r)
}
type kludgeSettingsMock struct {
bypassContentEncodingInChunks bool
}
func (k *kludgeSettingsMock) BypassContentEncodingInChunks() bool {
return k.bypassContentEncodingInChunks
}
func prepareHandlerContext(t *testing.T) *handlerContext {
return prepareHandlerContextBase(t, false)
}
func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext {
return prepareHandlerContextBase(t, true)
}
func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
key, err := keys.NewPrivateKey()
require.NoError(t, err)
l := zap.NewExample()
tp := layer.NewTestFrostFS()
tp := layer.NewTestFrostFS(key)
testResolver := &resolver.Resolver{Name: "test_resolver"}
testResolver.SetResolveFunc(func(_ context.Context, name string) (cid.ID, error) {
@ -99,8 +118,13 @@ func prepareHandlerContext(t *testing.T) *handlerContext {
treeMock := NewTreeServiceMock(t)
cacheCfg := layer.DefaultCachesConfigs(l)
if minCache {
cacheCfg = getMinCacheConfig(l)
}
layerCfg := &layer.Config{
Caches: layer.DefaultCachesConfigs(zap.NewExample()),
Caches: cacheCfg,
AnonKey: layer.AnonymousKey{Key: key},
Resolver: testResolver,
TreeService: treeMock,
@ -110,12 +134,15 @@ func prepareHandlerContext(t *testing.T) *handlerContext {
err = pp.DecodeString("REP 1")
require.NoError(t, err)
kludge := &kludgeSettingsMock{}
h := &handler{
log: l,
obj: layer.NewLayer(l, tp, layerCfg),
cfg: &Config{
Policy: &placementPolicyMock{defaultPolicy: pp},
XMLDecoder: &xmlDecoderProviderMock{},
Kludge: kludge,
},
}
@ -125,7 +152,25 @@ func prepareHandlerContext(t *testing.T) *handlerContext {
h: h,
tp: tp,
tree: treeMock,
context: context.WithValue(context.Background(), api.BoxData, newTestAccessBox(t, key)),
context: context.WithValue(context.Background(), middleware.BoxData, newTestAccessBox(t, key)),
kludge: kludge,
}
}
func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
minCacheCfg := &cache.Config{
Size: 1,
Lifetime: 1,
Logger: logger,
}
return &layer.CachesConfig{
Logger: logger,
Objects: minCacheCfg,
ObjectsList: minCacheCfg,
Names: minCacheCfg,
Buckets: minCacheCfg,
System: minCacheCfg,
AccessControl: minCacheCfg,
}
}
@ -137,8 +182,9 @@ func NewTreeServiceMock(t *testing.T) *tree.Tree {
func createTestBucket(hc *handlerContext, bktName string) *data.BucketInfo {
_, err := hc.MockedPool().CreateContainer(hc.Context(), layer.PrmContainerCreate{
Creator: hc.owner,
Name: bktName,
Creator: hc.owner,
Name: bktName,
BasicACL: acl.PublicRWExtended,
})
require.NoError(hc.t, err)
@ -215,8 +261,8 @@ func prepareTestRequestWithQuery(hc *handlerContext, bktName, objName string, qu
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(body))
r.URL.RawQuery = query.Encode()
reqInfo := api.NewReqInfo(w, r, api.ObjectRequest{Bucket: bktName, Object: objName})
r = r.WithContext(api.SetReqInfo(hc.Context(), reqInfo))
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: objName})
r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo))
return w, r
}
@ -225,8 +271,8 @@ func prepareTestPayloadRequest(hc *handlerContext, bktName, objName string, payl
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, defaultURL, payload)
reqInfo := api.NewReqInfo(w, r, api.ObjectRequest{Bucket: bktName, Object: objName})
r = r.WithContext(api.SetReqInfo(hc.Context(), reqInfo))
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: objName})
r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo))
return w, r
}
@ -241,10 +287,16 @@ func existInMockedFrostFS(tc *handlerContext, bktInfo *data.BucketInfo, objInfo
p := &layer.GetObjectParams{
BucketInfo: bktInfo,
ObjectInfo: objInfo,
Writer: io.Discard,
}
return tc.Layer().GetObject(tc.Context(), p) == nil
objPayload, err := tc.Layer().GetObject(tc.Context(), p)
if err != nil {
return false
}
_, err = io.ReadAll(objPayload)
require.NoError(tc.t, err)
return true
}
func listOIDsFromMockedFrostFS(t *testing.T, tc *handlerContext, bktName string) []oid.ID {

View file

@ -1,13 +1,14 @@
package handler
import (
"bytes"
"io"
"net/http"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"go.uber.org/zap"
)
@ -26,7 +27,7 @@ func getRangeToDetectContentType(maxSize uint64) *layer.RangeParams {
}
func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -83,18 +84,26 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
if len(info.ContentType) == 0 {
if info.ContentType = layer.MimeByFilePath(info.Name); len(info.ContentType) == 0 {
buffer := bytes.NewBuffer(make([]byte, 0, sizeToDetectType))
getParams := &layer.GetObjectParams{
ObjectInfo: info,
Writer: buffer,
Versioned: p.Versioned(),
Range: getRangeToDetectContentType(info.Size),
BucketInfo: bktInfo,
}
if err = h.obj.GetObject(r.Context(), getParams); err != nil {
objPayload, err := h.obj.GetObject(r.Context(), getParams)
if err != nil {
h.logAndSendError(w, "could not get object", reqInfo, err, zap.Stringer("oid", info.ID))
return
}
info.ContentType = http.DetectContentType(buffer.Bytes())
buffer, err := io.ReadAll(objPayload)
if err != nil {
h.logAndSendError(w, "could not partly read payload to detect content type", reqInfo, err, zap.Stringer("oid", info.ID))
return
}
info.ContentType = http.DetectContentType(buffer)
}
}
@ -114,7 +123,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
}
func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -131,7 +140,7 @@ func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set(api.ContainerZone, bktInfo.Zone)
}
api.WriteResponse(w, http.StatusOK, nil, api.MimeNone)
middleware.WriteResponse(w, http.StatusOK, nil, middleware.MimeNone)
}
func (h *handler) setLockingHeaders(bktInfo *data.BucketInfo, lockInfo *data.LockInfo, header http.Header) error {

View file

@ -7,8 +7,12 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
@ -24,10 +28,14 @@ func TestConditionalHead(t *testing.T) {
tc.Handler().HeadObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
etag := w.Result().Header.Get(api.ETag)
etagQuoted := "\"" + etag + "\""
headers := map[string]string{api.IfMatch: etag}
headObject(t, tc, bktName, objName, headers, http.StatusOK)
headers = map[string]string{api.IfMatch: etagQuoted}
headObject(t, tc, bktName, objName, headers, http.StatusOK)
headers = map[string]string{api.IfMatch: "etag"}
headObject(t, tc, bktName, objName, headers, http.StatusPreconditionFailed)
@ -47,6 +55,9 @@ func TestConditionalHead(t *testing.T) {
headers = map[string]string{api.IfNoneMatch: etag}
headObject(t, tc, bktName, objName, headers, http.StatusNotModified)
headers = map[string]string{api.IfNoneMatch: etagQuoted}
headObject(t, tc, bktName, objName, headers, http.StatusNotModified)
headers = map[string]string{api.IfNoneMatch: "etag"}
headObject(t, tc, bktName, objName, headers, http.StatusOK)
@ -75,17 +86,48 @@ func headObject(t *testing.T, tc *handlerContext, bktName, objName string, heade
}
func TestInvalidAccessThroughCache(t *testing.T) {
tc := prepareHandlerContext(t)
hc := prepareHandlerContext(t)
bktName, objName := "bucket-for-cache", "obj-for-cache"
createBucketAndObject(tc, bktName, objName)
bktInfo, _ := createBucketAndObject(hc, bktName, objName)
setContainerEACL(hc, bktInfo.CID)
headObject(t, tc, bktName, objName, nil, http.StatusOK)
headObject(t, hc, bktName, objName, nil, http.StatusOK)
w, r := prepareTestRequest(tc, bktName, objName, nil)
tc.Handler().HeadObjectHandler(w, r.WithContext(context.WithValue(r.Context(), api.BoxData, newTestAccessBox(t, nil))))
w, r := prepareTestRequest(hc, bktName, objName, nil)
hc.Handler().HeadObjectHandler(w, r.WithContext(context.WithValue(r.Context(), middleware.BoxData, newTestAccessBox(t, nil))))
assertStatus(t, w, http.StatusForbidden)
}
func setContainerEACL(hc *handlerContext, cnrID cid.ID) {
table := eacl.NewTable()
table.SetCID(cnrID)
for _, op := range fullOps {
table.AddRecord(getOthersRecord(op, eacl.ActionDeny))
}
err := hc.MockedPool().SetContainerEACL(hc.Context(), *table, nil)
require.NoError(hc.t, err)
}
func TestHeadObject(t *testing.T) {
hc := prepareHandlerContextWithMinCache(t)
bktName, objName := "bucket", "obj"
bktInfo, objInfo := createVersionedBucketAndObject(hc.t, hc, bktName, objName)
putObject(hc.t, hc, bktName, objName)
checkFound(hc.t, hc, bktName, objName, objInfo.VersionID())
checkFound(hc.t, hc, bktName, objName, emptyVersion)
addr := getAddressOfLastVersion(hc, bktInfo, objName)
hc.tp.SetObjectError(addr, apistatus.ObjectNotFound{})
hc.tp.SetObjectError(objInfo.Address(), apistatus.ObjectNotFound{})
headObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), s3errors.ErrNoSuchVersion)
headObjectAssertS3Error(hc, bktName, objName, emptyVersion, s3errors.ErrNoSuchKey)
}
func TestIsAvailableToResolve(t *testing.T) {
list := []string{"container", "s3"}

View file

@ -3,11 +3,11 @@ package handler
import (
"net/http"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
)
func (h *handler) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -15,7 +15,7 @@ func (h *handler) GetBucketLocationHandler(w http.ResponseWriter, r *http.Reques
return
}
if err = api.EncodeToResponse(w, LocationResponse{Location: bktInfo.LocationConstraint}); err != nil {
if err = middleware.EncodeToResponse(w, LocationResponse{Location: bktInfo.LocationConstraint}); err != nil {
h.logAndSendError(w, "couldn't encode bucket location response", reqInfo, err)
}
}

View file

@ -4,7 +4,7 @@ import (
"net/http"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
@ -15,7 +15,7 @@ func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
var (
own user.ID
res *ListBucketsResponse
reqInfo = api.GetReqInfo(r.Context())
reqInfo = middleware.GetReqInfo(r.Context())
)
list, err := h.obj.ListBuckets(r.Context())
@ -42,7 +42,7 @@ func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
})
}
if err = api.EncodeToResponse(w, res); err != nil {
if err = middleware.EncodeToResponse(w, res); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
}

View file

@ -12,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
)
const (
@ -26,7 +27,7 @@ const (
)
func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -73,7 +74,7 @@ func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
}
func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -100,13 +101,13 @@ func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
settings.LockConfiguration.ObjectLockEnabled = enabledValue
}
if err = api.EncodeToResponse(w, settings.LockConfiguration); err != nil {
if err = middleware.EncodeToResponse(w, settings.LockConfiguration); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
}
func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -158,7 +159,7 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
}
func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -189,13 +190,13 @@ func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
legalHold.Status = legalHoldOn
}
if err = api.EncodeToResponse(w, legalHold); err != nil {
if err = middleware.EncodeToResponse(w, legalHold); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
}
func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -242,7 +243,7 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
}
func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -281,7 +282,7 @@ func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
retention.Mode = complianceMode
}
if err = api.EncodeToResponse(w, retention); err != nil {
if err = middleware.EncodeToResponse(w, retention); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
}

View file

@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"github.com/stretchr/testify/require"
)
@ -313,7 +314,7 @@ func TestPutBucketLockConfigurationHandler(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(body))
r = r.WithContext(api.SetReqInfo(r.Context(), api.NewReqInfo(w, r, api.ObjectRequest{Bucket: tc.bucket})))
r = r.WithContext(middleware.SetReqInfo(r.Context(), middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: tc.bucket})))
hc.Handler().PutBucketObjectLockConfigHandler(w, r)
@ -386,7 +387,7 @@ func TestGetBucketLockConfigurationHandler(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(nil))
r = r.WithContext(api.SetReqInfo(r.Context(), api.NewReqInfo(w, r, api.ObjectRequest{Bucket: tc.bucket})))
r = r.WithContext(middleware.SetReqInfo(r.Context(), middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: tc.bucket})))
hc.Handler().GetBucketObjectLockConfigHandler(w, r)
@ -406,7 +407,7 @@ func TestGetBucketLockConfigurationHandler(t *testing.T) {
}
func assertS3Error(t *testing.T, w *httptest.ResponseRecorder, expectedError apiErrors.Error) {
actualErrorResponse := &api.ErrorResponse{}
actualErrorResponse := &middleware.ErrorResponse{}
err := xml.NewDecoder(w.Result().Body).Decode(actualErrorResponse)
require.NoError(t, err)

View file

@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"github.com/google/uuid"
"go.uber.org/zap"
)
@ -94,7 +95,7 @@ const (
)
func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -103,10 +104,7 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
}
uploadID := uuid.New()
additional := []zap.Field{
zap.String("uploadID", uploadID.String()),
zap.String("Key", reqInfo.ObjectName),
}
additional := []zap.Field{zap.String("uploadID", uploadID.String())}
p := &layer.CreateMultipartParams{
Info: &layer.UploadInfoParams{
@ -120,11 +118,11 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
if containsACLHeaders(r) {
key, err := h.bearerTokenIssuerKey(r.Context())
if err != nil {
h.logAndSendError(w, "couldn't get gate key", reqInfo, err)
h.logAndSendError(w, "couldn't get gate key", reqInfo, err, additional...)
return
}
if _, err = parseACLHeaders(r.Header, key); err != nil {
h.logAndSendError(w, "could not parse acl", reqInfo, err)
h.logAndSendError(w, "could not parse acl", reqInfo, err, additional...)
return
}
p.Data.ACLHeaders = formACLHeadersForMultipart(r.Header)
@ -140,7 +138,7 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
p.Info.Encryption, err = formEncryptionParams(r)
if err != nil {
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
return
}
@ -151,7 +149,7 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, bktInfo.LocationConstraint)
if err != nil {
h.logAndSendError(w, "invalid copies number", reqInfo, err)
h.logAndSendError(w, "invalid copies number", reqInfo, err, additional...)
return
}
@ -170,7 +168,7 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
UploadID: uploadID.String(),
}
if err = api.EncodeToResponse(w, resp); err != nil {
if err = middleware.EncodeToResponse(w, resp); err != nil {
h.logAndSendError(w, "could not encode InitiateMultipartUploadResponse to response", reqInfo, err, additional...)
return
}
@ -196,7 +194,7 @@ func formACLHeadersForMultipart(header http.Header) map[string]string {
}
func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -207,12 +205,19 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
var (
queryValues = r.URL.Query()
uploadID = queryValues.Get(uploadIDHeaderName)
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
partNumStr = queryValues.Get(partNumberHeaderName)
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("partNumber", partNumStr)}
)
partNumber, err := strconv.Atoi(queryValues.Get(partNumberHeaderName))
partNumber, err := strconv.Atoi(partNumStr)
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber))
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
return
}
body, err := h.getBodyReader(r)
if err != nil {
h.logAndSendError(w, "failed to get body reader", reqInfo, err, additional...)
return
}
@ -229,12 +234,12 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
},
PartNumber: partNumber,
Size: size,
Reader: r.Body,
Reader: body,
}
p.Info.Encryption, err = formEncryptionParams(r)
if err != nil {
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
return
}
@ -249,22 +254,23 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
}
w.Header().Set(api.ETag, hash)
api.WriteSuccessResponseHeadersOnly(w)
middleware.WriteSuccessResponseHeadersOnly(w)
}
func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
var (
versionID string
ctx = r.Context()
reqInfo = api.GetReqInfo(ctx)
reqInfo = middleware.GetReqInfo(ctx)
queryValues = reqInfo.URL.Query()
uploadID = queryValues.Get(uploadIDHeaderName)
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
partNumStr = queryValues.Get(partNumberHeaderName)
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("partNumber", partNumStr)}
)
partNumber, err := strconv.Atoi(queryValues.Get(partNumberHeaderName))
partNumber, err := strconv.Atoi(partNumStr)
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber))
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
return
}
@ -275,7 +281,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
}
srcBucket, srcObject, err := path2BucketObject(src)
if err != nil {
h.logAndSendError(w, "invalid source copy", reqInfo, err)
h.logAndSendError(w, "invalid source copy", reqInfo, err, additional...)
return
}
@ -288,21 +294,23 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
srcBktInfo, err := h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner)
if err != nil {
h.logAndSendError(w, "could not get source bucket info", reqInfo, err)
h.logAndSendError(w, "could not get source bucket info", reqInfo, err, additional...)
return
}
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get target bucket info", reqInfo, err)
h.logAndSendError(w, "could not get target bucket info", reqInfo, err, additional...)
return
}
srcInfo, err := h.obj.GetObjectInfo(ctx, &layer.HeadObjectParams{
headPrm := &layer.HeadObjectParams{
BktInfo: srcBktInfo,
Object: srcObject,
VersionID: versionID,
})
}
srcInfo, err := h.obj.GetObjectInfo(ctx, headPrm)
if err != nil {
if errors.IsS3Error(err, errors.ErrNoSuchKey) && versionID != "" {
h.logAndSendError(w, "could not head source object version", reqInfo,
@ -327,6 +335,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
}
p := &layer.UploadCopyParams{
Versioned: headPrm.Versioned(),
Info: &layer.UploadInfoParams{
UploadID: uploadID,
Bkt: bktInfo,
@ -340,12 +349,12 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
p.Info.Encryption, err = formEncryptionParams(r)
if err != nil {
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
return
}
if err = p.Info.Encryption.MatchObjectEncryption(layer.FormEncryptionInfo(srcInfo.Headers)); err != nil {
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
h.logAndSendError(w, "encryption doesn't match object", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrBadRequest), err), additional...)
return
}
@ -364,13 +373,13 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
addSSECHeaders(w.Header(), r.Header)
}
if err = api.EncodeToResponse(w, response); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
if err = middleware.EncodeToResponse(w, response); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
}
}
func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -385,7 +394,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
Bkt: bktInfo,
Key: reqInfo.ObjectName,
}
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
additional = []zap.Field{zap.String("uploadID", uploadID)}
)
reqBody := new(CompleteMultipartUpload)
@ -417,11 +426,11 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
// successfully or not.
headerIsWritten := stopPeriodicResponseWriter()
responseWriter := api.EncodeToResponse
responseWriter := middleware.EncodeToResponse
errLogger := h.logAndSendError
// Do not send XML and HTTP headers if periodic writer was invoked at this point.
if headerIsWritten {
responseWriter = api.EncodeToResponseNoHeader
responseWriter = middleware.EncodeToResponseNoHeader
errLogger = h.logAndSendErrorNoHeader
}
@ -441,11 +450,11 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
// space XML writer to keep connection with the client.
if err = responseWriter(w, response); err != nil {
errLogger(w, "something went wrong", reqInfo, err)
errLogger(w, "something went wrong", reqInfo, err, additional...)
}
}
func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMultipartParams, bktInfo *data.BucketInfo, reqInfo *api.ReqInfo) (*data.ObjectInfo, error) {
func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMultipartParams, bktInfo *data.BucketInfo, reqInfo *middleware.ReqInfo) (*data.ObjectInfo, error) {
ctx := r.Context()
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(ctx, c)
if err != nil {
@ -509,7 +518,7 @@ func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMult
}
func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -551,13 +560,13 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
return
}
if err = api.EncodeToResponse(w, encodeListMultipartUploadsToResponse(list, p)); err != nil {
if err = middleware.EncodeToResponse(w, encodeListMultipartUploadsToResponse(list, p)); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
}
func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -614,13 +623,13 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
return
}
if err = api.EncodeToResponse(w, encodeListPartsToResponse(list, p)); err != nil {
if err = middleware.EncodeToResponse(w, encodeListPartsToResponse(list, p)); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
}
func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {

View file

@ -3,9 +3,12 @@ package handler
import (
"bytes"
"encoding/xml"
"net/http"
"net/url"
"testing"
"time"
s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"github.com/stretchr/testify/require"
)
@ -46,3 +49,78 @@ func TestPeriodicWriter(t *testing.T) {
})
})
}
func TestMultipartUploadInvalidPart(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket-to-upload-part", "object-multipart"
createTestBucket(hc, bktName)
partSize := 8 // less than min part size
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
}
func TestMultipartReUploadPart(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket-to-upload-part", "object-multipart"
bktInfo := createTestBucket(hc, bktName)
partSizeLast := 8 // less than min part size
partSizeFirst := 5 * 1024 * 1024
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeLast)
etag2, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeFirst)
list := listParts(hc, bktName, objName, uploadInfo.UploadID)
require.Len(t, list.Parts, 2)
require.Equal(t, etag1, list.Parts[0].ETag)
require.Equal(t, etag2, list.Parts[1].ETag)
w := completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
etag1, data1 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeFirst)
etag2, data2 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeLast)
list = listParts(hc, bktName, objName, uploadInfo.UploadID)
require.Len(t, list.Parts, 2)
require.Equal(t, etag1, list.Parts[0].ETag)
require.Equal(t, etag2, list.Parts[1].ETag)
innerUploadInfo, err := hc.tree.GetMultipartUpload(hc.context, bktInfo, objName, uploadInfo.UploadID)
require.NoError(t, err)
treeParts, err := hc.tree.GetParts(hc.Context(), bktInfo, innerUploadInfo.ID)
require.NoError(t, err)
require.Len(t, treeParts, len(list.Parts))
w = completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
assertStatus(hc.t, w, http.StatusOK)
data, _ := getObject(hc, bktName, objName)
equalDataSlices(t, append(data1, data2...), data)
}
func listParts(hc *handlerContext, bktName, objName string, uploadID string) *ListPartsResponse {
return listPartsBase(hc, bktName, objName, false, uploadID)
}
func listPartsBase(hc *handlerContext, bktName, objName string, encrypted bool, uploadID string) *ListPartsResponse {
query := make(url.Values)
query.Set(uploadIDQuery, uploadID)
w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, nil)
if encrypted {
setEncryptHeaders(r)
}
hc.Handler().ListPartsHandler(w, r)
listPartsResponse := &ListPartsResponse{}
readResponse(hc.t, w, http.StatusOK, listPartsResponse)
return listPartsResponse
}

View file

@ -3,18 +3,18 @@ package handler
import (
"net/http"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
)
func (h *handler) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not supported", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
}
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not supported", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
}
func (h *handler) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not supported", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
}

View file

@ -8,10 +8,10 @@ import (
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"github.com/google/uuid"
)
@ -21,7 +21,7 @@ type (
Event string
NotificationInfo *data.NotificationInfo
BktInfo *data.BucketInfo
ReqInfo *api.ReqInfo
ReqInfo *middleware.ReqInfo
User string
Time time.Time
}
@ -96,7 +96,7 @@ var validEvents = map[string]struct{}{
}
func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
@ -133,7 +133,7 @@ func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Re
}
func (h *handler) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -147,7 +147,7 @@ func (h *handler) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Re
return
}
if err = api.EncodeToResponse(w, conf); err != nil {
if err = middleware.EncodeToResponse(w, conf); err != nil {
h.logAndSendError(w, "could not encode bucket notification configuration to response", reqInfo, err)
return
}
@ -179,7 +179,7 @@ func (h *handler) sendNotifications(ctx context.Context, p *SendNotificationPara
}
// checkBucketConfiguration checks notification configuration and generates an ID for configurations with empty ids.
func (h *handler) checkBucketConfiguration(ctx context.Context, conf *data.NotificationConfiguration, r *api.ReqInfo) (completed bool, err error) {
func (h *handler) checkBucketConfiguration(ctx context.Context, conf *data.NotificationConfiguration, r *middleware.ReqInfo) (completed bool, err error) {
if conf == nil {
return
}

View file

@ -6,16 +6,16 @@ import (
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
// ListObjectsV1Handler handles objects listing requests for API version 1.
func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
params, err := parseListObjectsArgsV1(reqInfo)
if err != nil {
h.logAndSendError(w, "failed to parse arguments", reqInfo, err)
@ -33,7 +33,7 @@ func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
return
}
if err = api.EncodeToResponse(w, encodeV1(params, list)); err != nil {
if err = middleware.EncodeToResponse(w, encodeV1(params, list)); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
}
@ -59,7 +59,7 @@ func encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjectsInfoV1) *List
// ListObjectsV2Handler handles objects listing requests for API version 2.
func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
params, err := parseListObjectsArgsV2(reqInfo)
if err != nil {
h.logAndSendError(w, "failed to parse arguments", reqInfo, err)
@ -77,7 +77,7 @@ func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
return
}
if err = api.EncodeToResponse(w, encodeV2(params, list)); err != nil {
if err = middleware.EncodeToResponse(w, encodeV2(params, list)); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
}
@ -103,7 +103,7 @@ func encodeV2(p *layer.ListObjectsParamsV2, list *layer.ListObjectsInfoV2) *List
return res
}
func parseListObjectsArgsV1(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV1, error) {
func parseListObjectsArgsV1(reqInfo *middleware.ReqInfo) (*layer.ListObjectsParamsV1, error) {
var (
res layer.ListObjectsParamsV1
queryValues = reqInfo.URL.Query()
@ -120,7 +120,7 @@ func parseListObjectsArgsV1(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV1, e
return &res, nil
}
func parseListObjectsArgsV2(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV2, error) {
func parseListObjectsArgsV2(reqInfo *middleware.ReqInfo) (*layer.ListObjectsParamsV2, error) {
var (
res layer.ListObjectsParamsV2
queryValues = reqInfo.URL.Query()
@ -142,7 +142,7 @@ func parseListObjectsArgsV2(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV2, e
return &res, nil
}
func parseListObjectArgs(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsCommon, error) {
func parseListObjectArgs(reqInfo *middleware.ReqInfo) (*layer.ListObjectsParamsCommon, error) {
var (
err error
res layer.ListObjectsParamsCommon
@ -211,7 +211,7 @@ func fillContents(src []*data.ObjectInfo, encode string, fetchOwner bool) []Obje
}
func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
p, err := parseListObjectVersionsRequest(reqInfo)
if err != nil {
h.logAndSendError(w, "failed to parse request", reqInfo, err)
@ -230,12 +230,12 @@ func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http
}
response := encodeListObjectVersionsToResponse(info, p.BktInfo.Name)
if err = api.EncodeToResponse(w, response); err != nil {
if err = middleware.EncodeToResponse(w, response); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
}
func parseListObjectVersionsRequest(reqInfo *api.ReqInfo) (*layer.ListObjectVersionsParams, error) {
func parseListObjectVersionsRequest(reqInfo *middleware.ReqInfo) (*layer.ListObjectVersionsParams, error) {
var (
err error
res layer.ListObjectVersionsParams

View file

@ -22,6 +22,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
@ -180,7 +181,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
sessionTokenEACL *session.Container
containsACL = containsACLHeaders(r)
ctx = r.Context()
reqInfo = api.GetReqInfo(ctx)
reqInfo = middleware.GetReqInfo(ctx)
)
if containsACL {
@ -219,6 +220,15 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
return
}
body, err := h.getBodyReader(r)
if err != nil {
h.logAndSendError(w, "failed to get body reader", reqInfo, err)
return
}
if encodings := r.Header.Get(api.ContentEncoding); len(encodings) > 0 {
metadata[api.ContentEncoding] = encodings
}
var size uint64
if r.ContentLength > 0 {
size = uint64(r.ContentLength)
@ -227,7 +237,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
params := &layer.PutObjectParams{
BktInfo: bktInfo,
Object: reqInfo.ObjectName,
Reader: r.Body,
Reader: body,
Size: size,
Header: metadata,
Encryption: encryptionParams,
@ -253,8 +263,8 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
extendedObjInfo, err := h.obj.PutObject(ctx, params)
if err != nil {
_, err2 := io.Copy(io.Discard, r.Body)
err3 := r.Body.Close()
_, err2 := io.Copy(io.Discard, body)
err3 := body.Close()
h.logAndSendError(w, "could not upload object", reqInfo, err, zap.Errors("body close errors", []error{err2, err3}))
return
}
@ -314,7 +324,49 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
}
w.Header().Set(api.ETag, objInfo.HashSum)
api.WriteSuccessResponseHeadersOnly(w)
middleware.WriteSuccessResponseHeadersOnly(w)
}
func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
if !api.IsSignedStreamingV4(r) {
return r.Body, nil
}
encodings := r.Header.Values(api.ContentEncoding)
var chunkedEncoding bool
resultContentEncoding := make([]string, 0, len(encodings))
for _, enc := range encodings {
for _, e := range strings.Split(enc, ",") {
e = strings.TrimSpace(e)
if e == api.AwsChunked { // probably we should also check position of this header value
chunkedEncoding = true
} else {
resultContentEncoding = append(resultContentEncoding, e)
}
}
}
r.Header.Set(api.ContentEncoding, strings.Join(resultContentEncoding, ","))
if !chunkedEncoding && !h.cfg.Kludge.BypassContentEncodingInChunks() {
return nil, fmt.Errorf("%w: request is not chunk encoded, encodings '%s'",
errors.GetAPIError(errors.ErrInvalidEncodingMethod), strings.Join(encodings, ","))
}
decodeContentSize := r.Header.Get(api.AmzDecodedContentLength)
if len(decodeContentSize) == 0 {
return nil, errors.GetAPIError(errors.ErrMissingContentLength)
}
if _, err := strconv.Atoi(decodeContentSize); err != nil {
return nil, fmt.Errorf("%w: parse decoded content length: %s", errors.GetAPIError(errors.ErrMissingContentLength), err.Error())
}
chunkReader, err := newSignV4ChunkedReader(r)
if err != nil {
return nil, fmt.Errorf("initialize chunk reader: %w", err)
}
return chunkReader, nil
}
func formEncryptionParams(r *http.Request) (enc encryption.Params, err error) {
@ -367,7 +419,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
tagSet map[string]string
sessionTokenEACL *session.Container
ctx = r.Context()
reqInfo = api.GetReqInfo(ctx)
reqInfo = middleware.GetReqInfo(ctx)
metadata = make(map[string]string)
containsACL = containsACLHeaders(r)
)
@ -509,7 +561,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
ETag: objInfo.HashSum,
}
w.WriteHeader(status)
if _, err = w.Write(api.EncodeResponse(resp)); err != nil {
if _, err = w.Write(middleware.EncodeResponse(resp)); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
return
@ -520,7 +572,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(status)
}
func checkPostPolicy(r *http.Request, reqInfo *api.ReqInfo, metadata map[string]string) (*postPolicy, error) {
func checkPostPolicy(r *http.Request, reqInfo *middleware.ReqInfo, metadata map[string]string) (*postPolicy, error) {
policy := &postPolicy{empty: true}
if policyStr := auth.MultipartFormValue(r, "policy"); policyStr != "" {
policyData, err := base64.StdEncoding.DecodeString(policyStr)
@ -660,7 +712,7 @@ func parseMetadata(r *http.Request) map[string]string {
func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := api.GetReqInfo(ctx)
reqInfo := middleware.GetReqInfo(ctx)
p := &layer.CreateBucketParams{
Name: reqInfo.BucketName,
}
@ -740,7 +792,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
}
}
api.WriteSuccessResponseHeadersOnly(w)
middleware.WriteSuccessResponseHeadersOnly(w)
}
func (h handler) setPolicy(prm *layer.CreateBucketParams, locationConstraint string, userPolicies []*accessbox.ContainerPolicy) error {

View file

@ -2,16 +2,28 @@ package handler
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"errors"
"io"
"mime/multipart"
"net/http"
"net/http/httptest"
"runtime"
"strconv"
"strings"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/stretchr/testify/require"
)
@ -102,7 +114,7 @@ func TestEmptyPostPolicy(t *testing.T) {
},
},
}
reqInfo := &api.ReqInfo{}
reqInfo := &middleware.ReqInfo{}
metadata := make(map[string]string)
_, err := checkPostPolicy(r, reqInfo, metadata)
@ -146,3 +158,154 @@ func TestPutObjectWithNegativeContentLength(t *testing.T) {
assertStatus(t, w, http.StatusOK)
require.Equal(t, strconv.Itoa(len(content)), w.Header().Get(api.ContentLength))
}
func TestPutObjectWithStreamBodyError(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-put", "object-for-put"
createTestBucket(tc, bktName)
content := []byte("content")
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
r.Header.Set(api.AmzContentSha256, api.StreamingContentSHA256)
r.Header.Set(api.ContentEncoding, api.AwsChunked)
tc.Handler().PutObjectHandler(w, r)
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrMissingContentLength))
checkNotFound(t, tc, bktName, objName, emptyVersion)
}
func TestPutObjectWithWrapReaderDiscardOnError(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-put", "object-for-put"
createTestBucket(tc, bktName)
content := make([]byte, 128*1024)
_, err := rand.Read(content)
require.NoError(t, err)
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
tc.tp.SetObjectPutError(objName, errors.New("some error"))
numGoroutineBefore := runtime.NumGoroutine()
tc.Handler().PutObjectHandler(w, r)
numGoroutineAfter := runtime.NumGoroutine()
require.Equal(t, numGoroutineBefore, numGoroutineAfter, "goroutines shouldn't leak during put object")
}
func TestPutObjectWithStreamBodyAWSExample(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "examplebucket", "chunkObject.txt"
createTestBucket(hc, bktName)
w, req, chunk := getChunkedRequest(hc.context, t, bktName, objName)
hc.Handler().PutObjectHandler(w, req)
assertStatus(t, w, http.StatusOK)
data := getObjectRange(t, hc, bktName, objName, 0, 66824)
for i := range chunk {
require.Equal(t, chunk[i], data[i])
}
}
func TestPutChunkedTestContentEncoding(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "examplebucket", "chunkObject.txt"
createTestBucket(hc, bktName)
w, req, _ := getChunkedRequest(hc.context, t, bktName, objName)
req.Header.Set(api.ContentEncoding, api.AwsChunked+",gzip")
hc.Handler().PutObjectHandler(w, req)
assertStatus(t, w, http.StatusOK)
resp := headObjectBase(hc, bktName, objName, emptyVersion)
require.Equal(t, "gzip", resp.Header().Get(api.ContentEncoding))
w, req, _ = getChunkedRequest(hc.context, t, bktName, objName)
req.Header.Set(api.ContentEncoding, "gzip")
hc.Handler().PutObjectHandler(w, req)
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrInvalidEncodingMethod))
hc.kludge.bypassContentEncodingInChunks = true
w, req, _ = getChunkedRequest(hc.context, t, bktName, objName)
req.Header.Set(api.ContentEncoding, "gzip")
hc.Handler().PutObjectHandler(w, req)
assertStatus(t, w, http.StatusOK)
resp = headObjectBase(hc, bktName, objName, emptyVersion)
require.Equal(t, "gzip", resp.Header().Get(api.ContentEncoding))
}
func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
chunk := make([]byte, 65*1024)
for i := range chunk {
chunk[i] = 'a'
}
chunk1 := chunk[:64*1024]
chunk2 := chunk[64*1024:]
AWSAccessKeyID := "AKIAIOSFODNN7EXAMPLE"
AWSSecretAccessKey := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
awsCreds := credentials.NewStaticCredentials(AWSAccessKeyID, AWSSecretAccessKey, "")
signer := v4.NewSigner(awsCreds)
reqBody := bytes.NewBufferString("10000;chunk-signature=ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648\r\n")
_, err := reqBody.Write(chunk1)
require.NoError(t, err)
_, err = reqBody.WriteString("\r\n400;chunk-signature=0055627c9e194cb4542bae2aa5492e3c1575bbb81b612b7d234b86a503ef5497\r\n")
require.NoError(t, err)
_, err = reqBody.Write(chunk2)
require.NoError(t, err)
_, err = reqBody.WriteString("\r\n0;chunk-signature=b6c6ea8a5354eaf15b3cb7646744f4275b71ea724fed81ceb9323e279d449df9\r\n\r\n")
require.NoError(t, err)
req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil)
require.NoError(t, err)
req.Header.Set("content-encoding", "aws-chunked")
req.Header.Set("content-length", "66824")
req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
req.Header.Set("x-amz-decoded-content-length", "66560")
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
signTime, err := time.Parse("20060102T150405Z", "20130524T000000Z")
require.NoError(t, err)
_, err = signer.Sign(req, nil, "s3", "us-east-1", signTime)
require.NoError(t, err)
req.Body = io.NopCloser(reqBody)
w := httptest.NewRecorder()
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName})
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
req = req.WithContext(context.WithValue(req.Context(), middleware.ClientTime, signTime))
req = req.WithContext(context.WithValue(req.Context(), middleware.AuthHeaders, &auth.AuthHeader{
AccessKeyID: AWSAccessKeyID,
SignatureV4: "4f232c4386841ef735655705268965c44a0e4690baa4adea153f7db9fa80a0a9",
Service: "s3",
Region: "us-east-1",
}))
req = req.WithContext(context.WithValue(req.Context(), middleware.BoxData, &accessbox.Box{
Gate: &accessbox.GateData{
AccessKey: AWSSecretAccessKey,
},
}))
return w, req, chunk
}
func TestCreateBucket(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bkt-name"
box, _ := createAccessBox(t)
createBucket(t, hc, bktName, box)
createBucketAssertS3Error(hc, bktName, box, s3errors.ErrBucketAlreadyOwnedByYou)
box2, _ := createAccessBox(t)
createBucketAssertS3Error(hc, bktName, box2, s3errors.ErrBucketAlreadyExists)
}

224
api/handler/s3reader.go Normal file
View file

@ -0,0 +1,224 @@
package handler
import (
"bufio"
"bytes"
"encoding/hex"
"errors"
"io"
"net/http"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
errs "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"github.com/aws/aws-sdk-go/aws/credentials"
)
const (
chunkSignatureHeader = "chunk-signature="
maxChunkSize = 16 << 20
)
type (
s3ChunkReader struct {
reader *bufio.Reader
streamSigner *v4.StreamSigner
requestTime time.Time
buffer []byte
offset int
err error
}
)
var (
errGiantChunk = errors.New("chunk too big: choose chunk size <= 16MiB")
errMalformedChunkedEncoding = errors.New("malformed chunked encoding")
)
func (c *s3ChunkReader) Close() (err error) {
return nil
}
func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
if c.offset > 0 {
num = copy(buf, c.buffer[c.offset:])
if num == len(buf) {
c.offset += num
return num, nil
}
c.offset = 0
buf = buf[num:]
}
var size int
for {
b, err := c.reader.ReadByte()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
c.err = err
return num, c.err
}
if b == ';' { // separating character
break
}
// Manually deserialize the size since AWS specified
// the chunk size to be of variable width. In particular,
// a size of 16 is encoded as `10` while a size of 64 KB
// is `10000`.
switch {
case b >= '0' && b <= '9':
size = size<<4 | int(b-'0')
case b >= 'a' && b <= 'f':
size = size<<4 | int(b-('a'-10))
case b >= 'A' && b <= 'F':
size = size<<4 | int(b-('A'-10))
default:
c.err = errMalformedChunkedEncoding
return num, c.err
}
if size > maxChunkSize {
c.err = errGiantChunk
return num, c.err
}
}
// Now, we read the signature of the following payload and expect:
// chunk-signature=" + <signature-as-hex> + "\r\n"
//
// The signature is 64 bytes long (hex-encoded SHA256 hash) and
// starts with a 16 byte header: len("chunk-signature=") + 64 == 80.
var signature [80]byte
_, err = io.ReadFull(c.reader, signature[:])
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
c.err = err
return num, c.err
}
if !bytes.HasPrefix(signature[:], []byte(chunkSignatureHeader)) {
c.err = errMalformedChunkedEncoding
return num, c.err
}
b, err := c.reader.ReadByte()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
c.err = err
return num, c.err
}
if b != '\r' {
c.err = errMalformedChunkedEncoding
return num, c.err
}
b, err = c.reader.ReadByte()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
c.err = err
return num, c.err
}
if b != '\n' {
c.err = errMalformedChunkedEncoding
return num, c.err
}
if cap(c.buffer) < size {
c.buffer = make([]byte, size)
} else {
c.buffer = c.buffer[:size]
}
// Now, we read the payload and compute its SHA-256 hash.
_, err = io.ReadFull(c.reader, c.buffer)
if err == io.EOF && size != 0 {
err = io.ErrUnexpectedEOF
}
if err != nil && err != io.EOF {
c.err = err
return num, c.err
}
b, err = c.reader.ReadByte()
if b != '\r' || err != nil {
c.err = errMalformedChunkedEncoding
return num, c.err
}
b, err = c.reader.ReadByte()
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
if err != nil {
c.err = err
return num, c.err
}
if b != '\n' {
c.err = errMalformedChunkedEncoding
return num, c.err
}
// Once we have read the entire chunk successfully, we verify
// that the received signature matches our computed signature.
calculatedSignature, err := c.streamSigner.GetSignature(nil, c.buffer, c.requestTime)
if err != nil {
c.err = err
return num, c.err
}
if string(signature[16:]) != hex.EncodeToString(calculatedSignature) {
c.err = errs.GetAPIError(errs.ErrSignatureDoesNotMatch)
return num, c.err
}
// If the chunk size is zero we return io.EOF. As specified by AWS,
// only the last chunk is zero-sized.
if size == 0 {
c.err = io.EOF
return num, c.err
}
c.offset = copy(buf, c.buffer)
num += c.offset
return num, err
}
func newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, error) {
// Expecting to refactor this in future:
// https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/issues/137
box, ok := req.Context().Value(middleware.BoxData).(*accessbox.Box)
if !ok {
return nil, errs.GetAPIError(errs.ErrAuthorizationHeaderMalformed)
}
authHeaders, ok := req.Context().Value(middleware.AuthHeaders).(*auth.AuthHeader)
if !ok {
return nil, errs.GetAPIError(errs.ErrAuthorizationHeaderMalformed)
}
currentCredentials := credentials.NewStaticCredentials(authHeaders.AccessKeyID, box.Gate.AccessKey, "")
seed, err := hex.DecodeString(authHeaders.SignatureV4)
if err != nil {
return nil, errs.GetAPIError(errs.ErrSignatureDoesNotMatch)
}
reqTime, ok := req.Context().Value(middleware.ClientTime).(time.Time)
if !ok {
return nil, errs.GetAPIError(errs.ErrMalformedDate)
}
newStreamSigner := v4.NewStreamSigner(authHeaders.Region, "s3", seed, currentCredentials)
return &s3ChunkReader{
reader: bufio.NewReader(req.Body),
streamSigner: newStreamSigner,
requestTime: reqTime,
buffer: make([]byte, 64*1024),
}, nil
}

View file

@ -12,6 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"go.uber.org/zap"
)
@ -25,7 +26,7 @@ const (
func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := api.GetReqInfo(ctx)
reqInfo := middleware.GetReqInfo(ctx)
tagSet, err := readTagSet(r.Body)
if err != nil {
@ -72,7 +73,7 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
}
func (h *handler) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -103,14 +104,14 @@ func (h *handler) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request
if settings.VersioningEnabled() {
w.Header().Set(api.AmzVersionID, versionID)
}
if err = api.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
if err = middleware.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
}
func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := api.GetReqInfo(ctx)
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -149,7 +150,7 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
}
func (h *handler) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
tagSet, err := readTagSet(r.Body)
if err != nil {
@ -170,7 +171,7 @@ func (h *handler) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request
}
func (h *handler) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -184,14 +185,14 @@ func (h *handler) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request
return
}
if err = api.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
if err = middleware.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
return
}
}
func (h *handler) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {

View file

@ -3,58 +3,58 @@ package handler
import (
"net/http"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
)
func (h *handler) SelectObjectContentHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) GetBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) GetBucketAccelerateHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) GetBucketRequestPaymentHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) GetBucketLoggingHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) GetBucketReplicationHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) DeleteBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", api.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}

View file

@ -2,29 +2,31 @@ package handler
import (
"context"
errorsStd "errors"
"errors"
"net/http"
"strconv"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"go.uber.org/zap"
)
func (h *handler) reqLogger(ctx context.Context) *zap.Logger {
reqLogger := api.GetReqLog(ctx)
reqLogger := middleware.GetReqLog(ctx)
if reqLogger != nil {
return reqLogger
}
return h.log
}
func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo *api.ReqInfo, err error, additional ...zap.Field) {
code := api.WriteErrorResponse(w, reqInfo, transformToS3Error(err))
func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo *middleware.ReqInfo, err error, additional ...zap.Field) {
code := middleware.WriteErrorResponse(w, reqInfo, transformToS3Error(err))
fields := []zap.Field{
zap.Int("status", code),
zap.String("request_id", reqInfo.RequestID),
@ -34,11 +36,11 @@ func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo
zap.String("description", logText),
zap.Error(err)}
fields = append(fields, additional...)
h.log.Error("reqeust failed", fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
h.log.Error("request failed", fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
}
func (h *handler) logAndSendErrorNoHeader(w http.ResponseWriter, logText string, reqInfo *api.ReqInfo, err error, additional ...zap.Field) {
api.WriteErrorResponseNoHeader(w, reqInfo, transformToS3Error(err))
func (h *handler) logAndSendErrorNoHeader(w http.ResponseWriter, logText string, reqInfo *middleware.ReqInfo, err error, additional ...zap.Field) {
middleware.WriteErrorResponseNoHeader(w, reqInfo, transformToS3Error(err))
fields := []zap.Field{
zap.String("request_id", reqInfo.RequestID),
zap.String("method", reqInfo.API),
@ -47,24 +49,25 @@ func (h *handler) logAndSendErrorNoHeader(w http.ResponseWriter, logText string,
zap.String("description", logText),
zap.Error(err)}
fields = append(fields, additional...)
h.log.Error("reqeust failed", fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
h.log.Error("request failed", fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
}
func transformToS3Error(err error) error {
if _, ok := err.(errors.Error); ok {
err = frosterrors.UnwrapErr(err) // this wouldn't work with errors.Join
if _, ok := err.(s3errors.Error); ok {
return err
}
if errorsStd.Is(err, layer.ErrAccessDenied) ||
errorsStd.Is(err, layer.ErrNodeAccessDenied) {
return errors.GetAPIError(errors.ErrAccessDenied)
if errors.Is(err, layer.ErrAccessDenied) ||
errors.Is(err, layer.ErrNodeAccessDenied) {
return s3errors.GetAPIError(s3errors.ErrAccessDenied)
}
if errorsStd.Is(err, layer.ErrGatewayTimeout) {
return errors.GetAPIError(errors.ErrGatewayTimeout)
if errors.Is(err, layer.ErrGatewayTimeout) {
return s3errors.GetAPIError(s3errors.ErrGatewayTimeout)
}
return errors.GetAPIError(errors.ErrInternalError)
return s3errors.GetAPIError(s3errors.ErrInternalError)
}
func (h *handler) ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error) {
@ -99,26 +102,26 @@ func parseRange(s string) (*layer.RangeParams, error) {
prefix := "bytes="
if !strings.HasPrefix(s, prefix) {
return nil, errors.GetAPIError(errors.ErrInvalidRange)
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
}
s = strings.TrimPrefix(s, prefix)
valuesStr := strings.Split(s, "-")
if len(valuesStr) != 2 {
return nil, errors.GetAPIError(errors.ErrInvalidRange)
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
}
values := make([]uint64, 0, len(valuesStr))
for _, v := range valuesStr {
num, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return nil, errors.GetAPIError(errors.ErrInvalidRange)
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
}
values = append(values, num)
}
if values[0] > values[1] {
return nil, errors.GetAPIError(errors.ErrInvalidRange)
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
}
return &layer.RangeParams{
@ -134,7 +137,7 @@ func getSessionTokenSetEACL(ctx context.Context) (*session.Container, error) {
}
sessionToken := boxData.Gate.SessionTokenForSetEACL()
if sessionToken == nil {
return nil, errors.GetAPIError(errors.ErrAccessDenied)
return nil, s3errors.GetAPIError(s3errors.ErrAccessDenied)
}
return sessionToken, nil

64
api/handler/util_test.go Normal file
View file

@ -0,0 +1,64 @@
package handler
import (
"errors"
"fmt"
"testing"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"github.com/stretchr/testify/require"
)
func TestTransformS3Errors(t *testing.T) {
for _, tc := range []struct {
name string
err error
expected s3errors.ErrorCode
}{
{
name: "simple std error to internal error",
err: errors.New("some error"),
expected: s3errors.ErrInternalError,
},
{
name: "layer access denied error to s3 access denied error",
err: layer.ErrAccessDenied,
expected: s3errors.ErrAccessDenied,
},
{
name: "wrapped layer access denied error to s3 access denied error",
err: fmt.Errorf("wrap: %w", layer.ErrAccessDenied),
expected: s3errors.ErrAccessDenied,
},
{
name: "layer node access denied error to s3 access denied error",
err: layer.ErrNodeAccessDenied,
expected: s3errors.ErrAccessDenied,
},
{
name: "layer gateway timeout error to s3 gateway timeout error",
err: layer.ErrGatewayTimeout,
expected: s3errors.ErrGatewayTimeout,
},
{
name: "s3 error to s3 error",
err: s3errors.GetAPIError(s3errors.ErrInvalidPart),
expected: s3errors.ErrInvalidPart,
},
{
name: "wrapped s3 error to s3 error",
err: fmt.Errorf("wrap: %w", s3errors.GetAPIError(s3errors.ErrInvalidPart)),
expected: s3errors.ErrInvalidPart,
},
} {
t.Run(tc.name, func(t *testing.T) {
err := transformToS3Error(tc.err)
s3err, ok := err.(s3errors.Error)
require.True(t, ok, "error must be s3 error")
require.Equalf(t, tc.expected, s3err.ErrCode,
"expected: '%s', got: '%s'",
s3errors.GetAPIError(tc.expected).Code, s3errors.GetAPIError(s3err.ErrCode).Code)
})
}
}

View file

@ -4,14 +4,14 @@ import (
"encoding/xml"
"net/http"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
)
func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
configuration := new(VersioningConfiguration)
if err := xml.NewDecoder(r.Body).Decode(configuration); err != nil {
@ -57,7 +57,7 @@ func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
// GetBucketVersioningHandler implements bucket versioning getter handler.
func (h *handler) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := api.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(r.Context())
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
@ -71,7 +71,7 @@ func (h *handler) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
return
}
if err = api.EncodeToResponse(w, formVersioningConfiguration(settings)); err != nil {
if err = middleware.EncodeToResponse(w, formVersioningConfiguration(settings)); err != nil {
h.logAndSendError(w, "something went wrong", reqInfo, err)
}
}

View file

@ -1,5 +1,7 @@
package api
import "net/http"
// Standard S3 HTTP request/response constants.
const (
MetadataPrefix = "X-Amz-Meta-"
@ -39,11 +41,13 @@ const (
IfMatch = "If-Match"
IfNoneMatch = "If-None-Match"
AmzContentSha256 = "X-Amz-Content-Sha256"
AmzCopyIfModifiedSince = "X-Amz-Copy-Source-If-Modified-Since"
AmzCopyIfUnmodifiedSince = "X-Amz-Copy-Source-If-Unmodified-Since"
AmzCopyIfMatch = "X-Amz-Copy-Source-If-Match"
AmzCopyIfNoneMatch = "X-Amz-Copy-Source-If-None-Match"
AmzACL = "X-Amz-Acl"
AmzDecodedContentLength = "X-Amz-Decoded-Content-Length"
AmzGrantFullControl = "X-Amz-Grant-Full-Control"
AmzGrantRead = "X-Amz-Grant-Read"
AmzGrantWrite = "X-Amz-Grant-Write"
@ -78,9 +82,13 @@ const (
AccessControlRequestMethod = "Access-Control-Request-Method"
AccessControlRequestHeaders = "Access-Control-Request-Headers"
AwsChunked = "aws-chunked"
Vary = "Vary"
DefaultLocationConstraint = "default"
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
)
// S3 request query params.
@ -107,3 +115,8 @@ var SystemMetadata = map[string]struct{}{
LastModified: {},
ETag: {},
}
func IsSignedStreamingV4(r *http.Request) bool {
return r.Header.Get(AmzContentSha256) == StreamingContentSHA256 &&
r.Method == http.MethodPut
}

71
api/host_bucket_router.go Normal file
View file

@ -0,0 +1,71 @@
package api
import (
"net/http"
"strings"
"github.com/go-chi/chi/v5"
)
type HostBucketRouter struct {
routes map[string]chi.Router
bktParam string
defaultRouter chi.Router
}
func NewHostBucketRouter(bktParam string) HostBucketRouter {
return HostBucketRouter{
routes: make(map[string]chi.Router),
bktParam: bktParam,
}
}
func (hr *HostBucketRouter) Default(router chi.Router) {
hr.defaultRouter = router
}
func (hr HostBucketRouter) Map(host string, h chi.Router) {
hr.routes[strings.ToLower(host)] = h
}
func (hr HostBucketRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
bucket, domain := getBucketDomain(getHost(r))
router, ok := hr.routes[strings.ToLower(domain)]
if !ok {
router = hr.defaultRouter
if router == nil {
http.Error(w, http.StatusText(404), 404)
return
}
}
if rctx := chi.RouteContext(r.Context()); rctx != nil && bucket != "" {
rctx.URLParams.Add(hr.bktParam, bucket)
}
router.ServeHTTP(w, r)
}
func getBucketDomain(host string) (bucket string, domain string) {
parts := strings.Split(host, ".")
if len(parts) > 1 {
return parts[0], strings.Join(parts[1:], ".")
}
return "", host
}
// getHost tries its best to return the request host.
// According to section 14.23 of RFC 2616 the Host header
// can include the port number if the default value of 80 is not used.
func getHost(r *http.Request) string {
host := r.Host
if r.URL.IsAbs() {
host = r.URL.Host
}
if i := strings.Index(host, ":"); i != -1 {
host = host[:i]
}
return host
}

View file

@ -2,15 +2,16 @@ package layer
import (
"context"
errorsStd "errors"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
)
func (n *layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) {
var err error
owner := n.Owner(ctx)
owner := n.BearerOwner(ctx)
tags := n.cache.GetTagging(owner, objectTaggingCacheKey(objVersion))
lockInfo := n.cache.GetLockInfo(owner, lockObjectKey(objVersion))
@ -28,8 +29,8 @@ func (n *layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *ObjectV
tags, lockInfo, err = n.treeService.GetObjectTaggingAndLock(ctx, objVersion.BktInfo, nodeVersion)
if err != nil {
if errorsStd.Is(err, ErrNodeNotFound) {
return nil, nil, errors.GetAPIError(errors.ErrNoSuchKey)
if errors.Is(err, ErrNodeNotFound) {
return nil, nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
}
return nil, nil, err
}

View file

@ -8,7 +8,7 @@ import (
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -43,10 +43,8 @@ func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketIn
)
res, err = n.frostFS.Container(ctx, idCnr)
if err != nil {
log.Error("could not fetch container", zap.Error(err))
if client.IsErrContainerNotFound(err) {
return nil, errors.GetAPIError(errors.ErrNoSuchBucket)
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchBucket), err.Error())
}
return nil, fmt.Errorf("get frostfs container: %w", err)
}
@ -78,12 +76,7 @@ func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketIn
}
func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
var (
err error
own = n.Owner(ctx)
res []cid.ID
)
res, err = n.frostFS.UserContainers(ctx, own)
res, err := n.frostFS.UserContainers(ctx, n.BearerOwner(ctx))
if err != nil {
n.reqLogger(ctx).Error("could not list user containers", zap.Error(err))
return nil, err
@ -104,14 +97,13 @@ func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
}
func (n *layer) createContainer(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error) {
ownerID := n.Owner(ctx)
if p.LocationConstraint == "" {
p.LocationConstraint = api.DefaultLocationConstraint // s3tests_boto3.functional.test_s3:test_bucket_get_location
}
bktInfo := &data.BucketInfo{
Name: p.Name,
Zone: v2container.SysAttributeZoneDefault,
Owner: ownerID,
Owner: n.BearerOwner(ctx),
Created: TimeNow(ctx),
LocationConstraint: p.LocationConstraint,
ObjectLockEnabled: p.ObjectLockEnabled,

View file

@ -38,7 +38,6 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
prm := PrmObjectCreate{
Container: p.BktInfo.CID,
Creator: p.BktInfo.Owner,
Payload: &buf,
Filepath: p.BktInfo.CORSObjectName(),
CreationTime: TimeNow(ctx),
@ -64,7 +63,7 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
}
}
n.cache.PutCORS(n.Owner(ctx), p.BktInfo, cors)
n.cache.PutCORS(n.BearerOwner(ctx), p.BktInfo, cors)
return nil
}
@ -73,7 +72,7 @@ func (n *layer) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (*d
cors, err := n.getCORS(ctx, bktInfo)
if err != nil {
if errorsStd.Is(err, ErrNodeNotFound) {
return nil, errors.GetAPIError(errors.ErrNoSuchCORSConfiguration)
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchCORSConfiguration), err.Error())
}
return nil, err
}

View file

@ -91,9 +91,6 @@ type PrmObjectCreate struct {
// Container to store the object.
Container cid.ID
// FrostFS identifier of the object creator.
Creator user.ID
// Key-value object attributes.
Attributes [][2]string

View file

@ -11,7 +11,7 @@ import (
"time"
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
@ -23,24 +23,29 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
type TestFrostFS struct {
FrostFS
objects map[string]*object.Object
objectErrors map[string]error
containers map[string]*container.Container
eaclTables map[string]*eacl.Table
currentEpoch uint64
objects map[string]*object.Object
objectErrors map[string]error
objectPutErrors map[string]error
containers map[string]*container.Container
eaclTables map[string]*eacl.Table
currentEpoch uint64
key *keys.PrivateKey
}
func NewTestFrostFS() *TestFrostFS {
func NewTestFrostFS(key *keys.PrivateKey) *TestFrostFS {
return &TestFrostFS{
objects: make(map[string]*object.Object),
objectErrors: make(map[string]error),
containers: make(map[string]*container.Container),
eaclTables: make(map[string]*eacl.Table),
objects: make(map[string]*object.Object),
objectErrors: make(map[string]error),
objectPutErrors: make(map[string]error),
containers: make(map[string]*container.Container),
eaclTables: make(map[string]*eacl.Table),
key: key,
}
}
@ -56,6 +61,14 @@ func (t *TestFrostFS) SetObjectError(addr oid.Address, err error) {
}
}
func (t *TestFrostFS) SetObjectPutError(fileName string, err error) {
if err == nil {
delete(t.objectPutErrors, fileName)
} else {
t.objectPutErrors[fileName] = err
}
}
func (t *TestFrostFS) Objects() []*object.Object {
res := make([]*object.Object, 0, len(t.objects))
@ -168,8 +181,8 @@ func (t *TestFrostFS) ReadObject(ctx context.Context, prm PrmObjectRead) (*Objec
}
if obj, ok := t.objects[sAddr]; ok {
owner := getOwner(ctx)
if !obj.OwnerID().Equals(owner) && !t.isPublicRead(prm.Container) {
owner := getBearerOwner(ctx)
if !t.checkAccess(prm.Container, owner, eacl.OperationGet) {
return nil, ErrAccessDenied
}
@ -199,6 +212,10 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.
attrs := make([]object.Attribute, 0)
if err := t.objectPutErrors[prm.Filepath]; err != nil {
return oid.ID{}, err
}
if prm.Filepath != "" {
a := object.NewAttribute()
a.SetKey(object.AttributeFilePath)
@ -213,13 +230,16 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.
attrs = append(attrs, *a)
}
var owner user.ID
user.IDFromKey(&owner, t.key.PrivateKey.PublicKey)
obj := object.New()
obj.SetContainerID(prm.Container)
obj.SetID(id)
obj.SetPayloadSize(prm.PayloadSize)
obj.SetAttributes(attrs...)
obj.SetCreationEpoch(t.currentEpoch)
obj.SetOwnerID(&prm.Creator)
obj.SetOwnerID(&owner)
t.currentEpoch++
if len(prm.Locks) > 0 {
@ -257,9 +277,9 @@ func (t *TestFrostFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) err
return err
}
if obj, ok := t.objects[addr.EncodeToString()]; ok {
owner := getOwner(ctx)
if !obj.OwnerID().Equals(owner) {
if _, ok := t.objects[addr.EncodeToString()]; ok {
owner := getBearerOwner(ctx)
if !t.checkAccess(prm.Container, owner, eacl.OperationDelete) {
return ErrAccessDenied
}
@ -311,27 +331,43 @@ func (t *TestFrostFS) ContainerEACL(_ context.Context, cnrID cid.ID) (*eacl.Tabl
return table, nil
}
func (t *TestFrostFS) isPublicRead(cnrID cid.ID) bool {
table, ok := t.eaclTables[cnrID.EncodeToString()]
func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID, op eacl.Operation) bool {
cnr, ok := t.containers[cnrID.EncodeToString()]
if !ok {
return false
}
if !cnr.BasicACL().Extendable() {
return cnr.Owner().Equals(owner)
}
table, ok := t.eaclTables[cnrID.EncodeToString()]
if !ok {
return true
}
for _, rec := range table.Records() {
if rec.Operation() == eacl.OperationGet && len(rec.Filters()) == 0 {
if rec.Operation() == op && len(rec.Filters()) == 0 {
for _, trgt := range rec.Targets() {
if trgt.Role() == eacl.RoleOthers {
return rec.Action() == eacl.ActionAllow
}
var targetOwner user.ID
for _, pk := range eacl.TargetECDSAKeys(&trgt) {
user.IDFromKey(&targetOwner, *pk)
if targetOwner.Equals(owner) {
return rec.Action() == eacl.ActionAllow
}
}
}
}
}
return false
return true
}
func getOwner(ctx context.Context) user.ID {
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
func getBearerOwner(ctx context.Context) user.ID {
if bd, ok := ctx.Value(middleware.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
return bearer.ResolveIssuer(*bd.Gate.BearerToken)
}

View file

@ -15,6 +15,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@ -47,6 +48,7 @@ type (
layer struct {
frostFS FrostFS
gateOwner user.ID
log *zap.Logger
anonKey AnonymousKey
resolver BucketResolver
@ -56,6 +58,7 @@ type (
}
Config struct {
GateOwner user.ID
ChainAddress string
Caches *CachesConfig
AnonKey AnonymousKey
@ -73,7 +76,7 @@ type (
Range *RangeParams
ObjectInfo *data.ObjectInfo
BucketInfo *data.BucketInfo
Writer io.Writer
Versioned bool
Encryption encryption.Params
}
@ -110,6 +113,15 @@ type (
CopiesNumbers []uint32
}
PutCombinedObjectParams struct {
BktInfo *data.BucketInfo
Object string
Size uint64
Header map[string]string
Lock *data.ObjectLock
Encryption encryption.Params
}
DeleteObjectParams struct {
BktInfo *data.BucketInfo
Objects []*VersionedObject
@ -131,6 +143,7 @@ type (
// CopyObjectParams stores object copy request parameters.
CopyObjectParams struct {
SrcVersioned bool
SrcObject *data.ObjectInfo
ScrBktInfo *data.BucketInfo
DstBktInfo *data.BucketInfo
@ -184,6 +197,13 @@ type (
Error error
}
ObjectPayload struct {
r io.Reader
params getParams
encrypted bool
decryptedLen uint64
}
// Client provides S3 API client interface.
Client interface {
Initialize(ctx context.Context, c EventListener) error
@ -203,7 +223,7 @@ type (
CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error)
DeleteBucket(ctx context.Context, p *DeleteBucketParams) error
GetObject(ctx context.Context, p *GetObjectParams) error
GetObject(ctx context.Context, p *GetObjectParams) (*ObjectPayload, error)
GetObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.ObjectInfo, error)
GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.ExtendedObjectInfo, error)
@ -267,12 +287,17 @@ func (f MsgHandlerFunc) HandleMessage(ctx context.Context, msg *nats.Msg) error
return f(ctx, msg)
}
func (p HeadObjectParams) Versioned() bool {
return len(p.VersionID) > 0
}
// NewLayer creates an instance of a layer. It checks credentials
// and establishes gRPC connection with the node.
func NewLayer(log *zap.Logger, frostFS FrostFS, config *Config) Client {
return &layer{
frostFS: frostFS,
log: log,
gateOwner: config.GateOwner,
anonKey: config.AnonKey,
resolver: config.Resolver,
cache: NewCache(config.Caches),
@ -303,22 +328,22 @@ func (n *layer) IsNotificationEnabled() bool {
// IsAuthenticatedRequest checks if access box exists in the current request.
func IsAuthenticatedRequest(ctx context.Context) bool {
_, ok := ctx.Value(api.BoxData).(*accessbox.Box)
_, ok := ctx.Value(middleware.BoxData).(*accessbox.Box)
return ok
}
// TimeNow returns client time from request or time.Now().
func TimeNow(ctx context.Context) time.Time {
if now, ok := ctx.Value(api.ClientTime).(time.Time); ok {
if now, ok := ctx.Value(middleware.ClientTime).(time.Time); ok {
return now
}
return time.Now()
}
// Owner returns owner id from BearerToken (context) or from client owner.
func (n *layer) Owner(ctx context.Context) user.ID {
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
// BearerOwner returns owner id from BearerToken (context) or from client owner.
func (n *layer) BearerOwner(ctx context.Context) user.ID {
if bd, ok := ctx.Value(middleware.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
return bearer.ResolveIssuer(*bd.Gate.BearerToken)
}
@ -329,7 +354,7 @@ func (n *layer) Owner(ctx context.Context) user.ID {
}
func (n *layer) reqLogger(ctx context.Context) *zap.Logger {
reqLogger := api.GetReqLog(ctx)
reqLogger := middleware.GetReqLog(ctx)
if reqLogger != nil {
return reqLogger
}
@ -337,7 +362,7 @@ func (n *layer) reqLogger(ctx context.Context) *zap.Logger {
}
func (n *layer) prepareAuthParameters(ctx context.Context, prm *PrmAuth, bktOwner user.ID) {
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
if bd, ok := ctx.Value(middleware.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
if bd.Gate.BearerToken.Impersonate() || bktOwner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
prm.BearerToken = bd.Gate.BearerToken
return
@ -361,7 +386,7 @@ func (n *layer) GetBucketInfo(ctx context.Context, name string) (*data.BucketInf
containerID, err := n.ResolveBucket(ctx, name)
if err != nil {
if strings.Contains(err.Error(), "not found") {
return nil, errors.GetAPIError(errors.ErrNoSuchBucket)
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucket), err.Error())
}
return nil, err
}
@ -394,10 +419,10 @@ func (n *layer) ListBuckets(ctx context.Context) ([]*data.BucketInfo, error) {
}
// GetObject from storage.
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) (*ObjectPayload, error) {
var params getParams
params.oid = p.ObjectInfo.ID
params.objInfo = p.ObjectInfo
params.bktInfo = p.BucketInfo
var decReader *encryption.Decrypter
@ -405,7 +430,7 @@ func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
var err error
decReader, err = getDecrypter(p)
if err != nil {
return fmt.Errorf("creating decrypter: %w", err)
return nil, fmt.Errorf("creating decrypter: %w", err)
}
params.off = decReader.EncryptedOffset()
params.ln = decReader.EncryptedLength()
@ -419,32 +444,58 @@ func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
}
}
payload, err := n.initObjectPayloadReader(ctx, params)
r, err := n.initObjectPayloadReader(ctx, params)
if err != nil {
return fmt.Errorf("init object payload reader: %w", err)
if client.IsErrObjectNotFound(err) {
if p.Versioned {
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchVersion), err.Error())
} else {
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchKey), err.Error())
}
}
return nil, fmt.Errorf("init object payload reader: %w", err)
}
var decryptedLen uint64
if decReader != nil {
if err = decReader.SetReader(r); err != nil {
return nil, fmt.Errorf("set reader to decrypter: %w", err)
}
r = io.LimitReader(decReader, int64(decReader.DecryptedLength()))
decryptedLen = decReader.DecryptedLength()
}
return &ObjectPayload{
r: r,
params: params,
encrypted: decReader != nil,
decryptedLen: decryptedLen,
}, nil
}
// Read implements io.Reader. If you want to use ObjectPayload as io.Reader
// you must not use ObjectPayload.StreamTo method and vice versa.
func (o *ObjectPayload) Read(p []byte) (int, error) {
return o.r.Read(p)
}
// StreamTo reads all payload to provided writer.
// If you want to use this method you must not use ObjectPayload.Read and vice versa.
func (o *ObjectPayload) StreamTo(w io.Writer) error {
bufSize := uint64(32 * 1024) // configure?
if params.ln != 0 && params.ln < bufSize {
bufSize = params.ln
if o.params.ln != 0 && o.params.ln < bufSize {
bufSize = o.params.ln
}
// alloc buffer for copying
buf := make([]byte, bufSize) // sync-pool it?
r := payload
if decReader != nil {
if err = decReader.SetReader(payload); err != nil {
return fmt.Errorf("set reader to decrypter: %w", err)
}
r = io.LimitReader(decReader, int64(decReader.DecryptedLength()))
}
// copy full payload
written, err := io.CopyBuffer(p.Writer, r, buf)
written, err := io.CopyBuffer(w, o.r, buf)
if err != nil {
if decReader != nil {
return fmt.Errorf("copy object payload written: '%d', decLength: '%d', params.ln: '%d' : %w", written, decReader.DecryptedLength(), params.ln, err)
if o.encrypted {
return fmt.Errorf("copy object payload written: '%d', decLength: '%d', params.ln: '%d' : %w", written, o.decryptedLen, o.params.ln, err)
}
return fmt.Errorf("copy object payload written: '%d': %w", written, err)
}
@ -496,10 +547,10 @@ func (n *layer) GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams)
var objInfo *data.ExtendedObjectInfo
var err error
if len(p.VersionID) == 0 {
objInfo, err = n.headLastVersionIfNotDeleted(ctx, p.BktInfo, p.Object)
} else {
if p.Versioned() {
objInfo, err = n.headVersion(ctx, p.BktInfo, p)
} else {
objInfo, err = n.headLastVersionIfNotDeleted(ctx, p.BktInfo, p.Object)
}
if err != nil {
return nil, err
@ -514,27 +565,22 @@ func (n *layer) GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams)
// CopyObject from one bucket into another bucket.
func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.ExtendedObjectInfo, error) {
pr, pw := io.Pipe()
go func() {
err := n.GetObject(ctx, &GetObjectParams{
ObjectInfo: p.SrcObject,
Writer: pw,
Range: p.Range,
BucketInfo: p.ScrBktInfo,
Encryption: p.Encryption,
})
if err = pw.CloseWithError(err); err != nil {
n.reqLogger(ctx).Error("could not get object", zap.Error(err))
}
}()
objPayload, err := n.GetObject(ctx, &GetObjectParams{
ObjectInfo: p.SrcObject,
Versioned: p.SrcVersioned,
Range: p.Range,
BucketInfo: p.ScrBktInfo,
Encryption: p.Encryption,
})
if err != nil {
return nil, fmt.Errorf("get object to copy: %w", err)
}
return n.PutObject(ctx, &PutObjectParams{
BktInfo: p.DstBktInfo,
Object: p.DstObject,
Size: p.SrcSize,
Reader: pr,
Reader: objPayload,
Header: p.Header,
Encryption: p.Encryption,
CopiesNumbers: p.CopiesNumbers,
@ -615,7 +661,7 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
},
DeleteMarker: &data.DeleteMarkerInfo{
Created: TimeNow(ctx),
Owner: n.Owner(ctx),
Owner: n.gateOwner,
},
IsUnversioned: settings.VersioningSuspended(),
}

View file

@ -0,0 +1,149 @@
package layer
import (
"context"
"errors"
"fmt"
"io"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
type partObj struct {
OID oid.ID
Size uint64
}
type readerInitiator interface {
initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error)
}
// implements io.Reader of payloads of the object list stored in the FrostFS network.
type multiObjectReader struct {
ctx context.Context
layer readerInitiator
startPartOffset uint64
endPartLength uint64
prm getFrostFSParams
curIndex int
curReader io.Reader
parts []partObj
}
type multiObjectReaderConfig struct {
layer readerInitiator
// the offset of complete object and total size to read
off, ln uint64
bktInfo *data.BucketInfo
parts []partObj
}
var (
errOffsetIsOutOfRange = errors.New("offset is out of payload range")
errLengthIsOutOfRange = errors.New("length is out of payload range")
errEmptyPartsList = errors.New("empty parts list")
errorZeroRangeLength = errors.New("zero range length")
)
func newMultiObjectReader(ctx context.Context, cfg multiObjectReaderConfig) (*multiObjectReader, error) {
if len(cfg.parts) == 0 {
return nil, errEmptyPartsList
}
r := &multiObjectReader{
ctx: ctx,
layer: cfg.layer,
prm: getFrostFSParams{
bktInfo: cfg.bktInfo,
},
parts: cfg.parts,
}
if cfg.off+cfg.ln == 0 {
return r, nil
}
if cfg.off > 0 && cfg.ln == 0 {
return nil, errorZeroRangeLength
}
startPartIndex, startPartOffset := findStartPart(cfg)
if startPartIndex == -1 {
return nil, errOffsetIsOutOfRange
}
r.startPartOffset = startPartOffset
endPartIndex, endPartLength := findEndPart(cfg)
if endPartIndex == -1 {
return nil, errLengthIsOutOfRange
}
r.endPartLength = endPartLength
r.parts = cfg.parts[startPartIndex : endPartIndex+1]
return r, nil
}
func findStartPart(cfg multiObjectReaderConfig) (index int, offset uint64) {
return findPartByPosition(cfg.off, cfg.parts)
}
func findEndPart(cfg multiObjectReaderConfig) (index int, length uint64) {
return findPartByPosition(cfg.off+cfg.ln, cfg.parts)
}
func findPartByPosition(position uint64, parts []partObj) (index int, positionInPart uint64) {
for i, part := range parts {
if position <= part.Size {
return i, position
}
position -= part.Size
}
return -1, 0
}
func (x *multiObjectReader) Read(p []byte) (n int, err error) {
if x.curReader != nil {
n, err = x.curReader.Read(p)
if !errors.Is(err, io.EOF) {
return n, err
}
x.curIndex++
}
if x.curIndex == len(x.parts) {
return n, io.EOF
}
x.prm.oid = x.parts[x.curIndex].OID
if x.curIndex == 0 {
x.prm.off = x.startPartOffset
x.prm.ln = x.parts[x.curIndex].Size - x.startPartOffset
}
if x.curIndex == len(x.parts)-1 {
x.prm.ln = x.endPartLength - x.prm.off
}
x.curReader, err = x.layer.initFrostFSObjectPayloadReader(x.ctx, x.prm)
if err != nil {
return n, fmt.Errorf("init payload reader for the next part: %w", err)
}
x.prm.off = 0
x.prm.ln = 0
next, err := x.Read(p[n:])
return n + next, err
}

View file

@ -0,0 +1,127 @@
package layer
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"testing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
type readerInitiatorMock struct {
parts map[oid.ID][]byte
}
func (r *readerInitiatorMock) initFrostFSObjectPayloadReader(_ context.Context, p getFrostFSParams) (io.Reader, error) {
partPayload, ok := r.parts[p.oid]
if !ok {
return nil, errors.New("part not found")
}
if p.off+p.ln == 0 {
return bytes.NewReader(partPayload), nil
}
if p.off > uint64(len(partPayload)-1) {
return nil, fmt.Errorf("invalid offset: %d/%d", p.off, len(partPayload))
}
if p.off+p.ln > uint64(len(partPayload)) {
return nil, fmt.Errorf("invalid range: %d-%d/%d", p.off, p.off+p.ln, len(partPayload))
}
return bytes.NewReader(partPayload[p.off : p.off+p.ln]), nil
}
func prepareDataReader() ([]byte, []partObj, *readerInitiatorMock) {
mockInitReader := &readerInitiatorMock{
parts: map[oid.ID][]byte{
oidtest.ID(): []byte("first part 1"),
oidtest.ID(): []byte("second part 2"),
oidtest.ID(): []byte("third part 3"),
},
}
var fullPayload []byte
parts := make([]partObj, 0, len(mockInitReader.parts))
for id, payload := range mockInitReader.parts {
parts = append(parts, partObj{OID: id, Size: uint64(len(payload))})
fullPayload = append(fullPayload, payload...)
}
return fullPayload, parts, mockInitReader
}
func TestMultiReader(t *testing.T) {
ctx := context.Background()
fullPayload, parts, mockInitReader := prepareDataReader()
for _, tc := range []struct {
name string
off uint64
ln uint64
err error
}{
{
name: "simple read all",
},
{
name: "simple read with length",
ln: uint64(len(fullPayload)),
},
{
name: "middle of parts",
off: parts[0].Size + 2,
ln: 4,
},
{
name: "first and second",
off: parts[0].Size - 4,
ln: 8,
},
{
name: "first and third",
off: parts[0].Size - 4,
ln: parts[1].Size + 8,
},
{
name: "offset out of range",
off: uint64(len(fullPayload) + 1),
ln: 1,
err: errOffsetIsOutOfRange,
},
{
name: "zero length",
off: parts[1].Size + 1,
ln: 0,
err: errorZeroRangeLength,
},
} {
t.Run(tc.name, func(t *testing.T) {
multiReader, err := newMultiObjectReader(ctx, multiObjectReaderConfig{
layer: mockInitReader,
parts: parts,
off: tc.off,
ln: tc.ln,
})
require.ErrorIs(t, err, tc.err)
if tc.err == nil {
off := tc.off
ln := tc.ln
if off+ln == 0 {
ln = uint64(len(fullPayload))
}
data, err := io.ReadAll(multiReader)
require.NoError(t, err)
require.Equal(t, fullPayload[off:off+ln], data)
}
})
}
}

View file

@ -1,9 +1,11 @@
package layer
import (
"bytes"
"context"
"encoding/hex"
stderrors "errors"
"encoding/json"
"errors"
"fmt"
"io"
"sort"
@ -12,7 +14,7 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@ -25,6 +27,10 @@ const (
UploadPartNumberAttributeName = "S3-Upload-Part-Number"
UploadCompletedParts = "S3-Completed-Parts"
// MultipartObjectSize contains the real object size if object is combined (payload contains list of parts).
// This header is used to determine if object is combined.
MultipartObjectSize = "S3-Multipart-Object-Size"
metaPrefix = "meta-"
aclPrefix = "acl-"
@ -32,8 +38,8 @@ const (
MaxSizePartsList = 1000
UploadMinPartNumber = 1
UploadMaxPartNumber = 10000
uploadMinSize = 5 * 1048576 // 5MB
uploadMaxSize = 5 * 1073741824 // 5GB
UploadMinSize = 5 * 1024 * 1024 // 5MB
UploadMaxSize = 1024 * UploadMinSize // 5GB
)
type (
@ -64,6 +70,7 @@ type (
}
UploadCopyParams struct {
Versioned bool
Info *UploadInfoParams
SrcObjInfo *data.ObjectInfo
SrcBktInfo *data.BucketInfo
@ -142,7 +149,7 @@ func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
info := &data.MultipartInfo{
Key: p.Info.Key,
UploadID: p.Info.UploadID,
Owner: n.Owner(ctx),
Owner: n.gateOwner,
Created: TimeNow(ctx),
Meta: make(map[string]string, metaSize),
CopiesNumbers: p.CopiesNumbers,
@ -174,14 +181,14 @@ func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
func (n *layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, error) {
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID)
if err != nil {
if stderrors.Is(err, ErrNodeNotFound) {
return "", errors.GetAPIError(errors.ErrNoSuchUpload)
if errors.Is(err, ErrNodeNotFound) {
return "", fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
}
return "", err
}
if p.Size > uploadMaxSize {
return "", errors.GetAPIError(errors.ErrEntityTooLarge)
if p.Size > UploadMaxSize {
return "", fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), p.Size, UploadMaxSize)
}
objInfo, err := n.uploadPart(ctx, multipartInfo, p)
@ -196,13 +203,12 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
encInfo := FormEncryptionInfo(multipartInfo.Meta)
if err := p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
n.reqLogger(ctx).Warn("mismatched obj encryptionInfo", zap.Error(err))
return nil, errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
return nil, s3errors.GetAPIError(s3errors.ErrInvalidEncryptionParameters)
}
bktInfo := p.Info.Bkt
prm := PrmObjectCreate{
Container: bktInfo.CID,
Creator: bktInfo.Owner,
Attributes: make([][2]string, 2),
Payload: p.Reader,
CreationTime: TimeNow(ctx),
@ -246,7 +252,7 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
}
oldPartID, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
oldPartIDNotFound := stderrors.Is(err, ErrNoNodeToRemove)
oldPartIDNotFound := errors.Is(err, ErrNoNodeToRemove)
if err != nil && !oldPartIDNotFound {
return nil, err
}
@ -275,8 +281,8 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.ObjectInfo, error) {
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID)
if err != nil {
if stderrors.Is(err, ErrNodeNotFound) {
return nil, errors.GetAPIError(errors.ErrNoSuchUpload)
if errors.Is(err, ErrNodeNotFound) {
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
}
return nil, err
}
@ -285,81 +291,37 @@ func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.
if p.Range != nil {
size = p.Range.End - p.Range.Start + 1
if p.Range.End > p.SrcObjInfo.Size {
return nil, errors.GetAPIError(errors.ErrInvalidCopyPartRangeSource)
return nil, fmt.Errorf("%w: %d-%d/%d", s3errors.GetAPIError(s3errors.ErrInvalidCopyPartRangeSource), p.Range.Start, p.Range.End, p.SrcObjInfo.Size)
}
}
if size > uploadMaxSize {
return nil, errors.GetAPIError(errors.ErrEntityTooLarge)
if size > UploadMaxSize {
return nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), size, UploadMaxSize)
}
pr, pw := io.Pipe()
go func() {
err = n.GetObject(ctx, &GetObjectParams{
ObjectInfo: p.SrcObjInfo,
Writer: pw,
Range: p.Range,
BucketInfo: p.SrcBktInfo,
})
if err = pw.CloseWithError(err); err != nil {
n.reqLogger(ctx).Error("could not get object", zap.Error(err))
}
}()
objPayload, err := n.GetObject(ctx, &GetObjectParams{
ObjectInfo: p.SrcObjInfo,
Versioned: p.Versioned,
Range: p.Range,
BucketInfo: p.SrcBktInfo,
})
if err != nil {
return nil, fmt.Errorf("get object to upload copy: %w", err)
}
params := &UploadPartParams{
Info: p.Info,
PartNumber: p.PartNumber,
Size: size,
Reader: pr,
Reader: objPayload,
}
return n.uploadPart(ctx, multipartInfo, params)
}
// implements io.Reader of payloads of the object list stored in the FrostFS network.
type multiObjectReader struct {
ctx context.Context
layer *layer
prm getParams
curReader io.Reader
parts []*data.PartInfo
}
func (x *multiObjectReader) Read(p []byte) (n int, err error) {
if x.curReader != nil {
n, err = x.curReader.Read(p)
if !stderrors.Is(err, io.EOF) {
return n, err
}
}
if len(x.parts) == 0 {
return n, io.EOF
}
x.prm.oid = x.parts[0].OID
x.curReader, err = x.layer.initObjectPayloadReader(x.ctx, x.prm)
if err != nil {
return n, fmt.Errorf("init payload reader for the next part: %w", err)
}
x.parts = x.parts[1:]
next, err := x.Read(p[n:])
return n + next, err
}
func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipartParams) (*UploadData, *data.ExtendedObjectInfo, error) {
for i := 1; i < len(p.Parts); i++ {
if p.Parts[i].PartNumber <= p.Parts[i-1].PartNumber {
return nil, nil, errors.GetAPIError(errors.ErrInvalidPartOrder)
return nil, nil, s3errors.GetAPIError(s3errors.ErrInvalidPartOrder)
}
}
@ -370,7 +332,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
encInfo := FormEncryptionInfo(multipartInfo.Meta)
if len(partsInfo) < len(p.Parts) {
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
return nil, nil, fmt.Errorf("%w: found %d parts, need %d", s3errors.GetAPIError(s3errors.ErrInvalidPart), len(partsInfo), len(p.Parts))
}
var multipartObjetSize uint64
@ -381,11 +343,13 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
for i, part := range p.Parts {
partInfo := partsInfo[part.PartNumber]
if partInfo == nil || part.ETag != partInfo.ETag {
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", s3errors.GetAPIError(s3errors.ErrInvalidPart), part.PartNumber)
}
delete(partsInfo, part.PartNumber)
// for the last part we have no minimum size limit
if i != len(p.Parts)-1 && partInfo.Size < uploadMinSize {
return nil, nil, errors.GetAPIError(errors.ErrEntityTooSmall)
if i != len(p.Parts)-1 && partInfo.Size < UploadMinSize {
return nil, nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooSmall), partInfo.Size, UploadMinSize)
}
parts = append(parts, partInfo)
multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted)
@ -409,6 +373,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
initMetadata := make(map[string]string, len(multipartInfo.Meta)+1)
initMetadata[UploadCompletedParts] = completedPartsHeader.String()
initMetadata[MultipartObjectSize] = strconv.FormatUint(multipartObjetSize, 10)
uploadData := &UploadData{
TagSet: make(map[string]string),
@ -432,18 +397,15 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
multipartObjetSize = encMultipartObjectSize
}
r := &multiObjectReader{
ctx: ctx,
layer: n,
parts: parts,
partsData, err := json.Marshal(parts)
if err != nil {
return nil, nil, fmt.Errorf("marshal parst for combined object: %w", err)
}
r.prm.bktInfo = p.Info.Bkt
extObjInfo, err := n.PutObject(ctx, &PutObjectParams{
BktInfo: p.Info.Bkt,
Object: p.Info.Key,
Reader: r,
Reader: bytes.NewReader(partsData),
Header: initMetadata,
Size: multipartObjetSize,
Encryption: p.Info.Encryption,
@ -455,7 +417,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
zap.String("uploadKey", p.Info.Key),
zap.Error(err))
return nil, nil, errors.GetAPIError(errors.ErrInternalError)
return nil, nil, s3errors.GetAPIError(s3errors.ErrInternalError)
}
var addr oid.Address
@ -559,7 +521,7 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
encInfo := FormEncryptionInfo(multipartInfo.Meta)
if err = p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
n.reqLogger(ctx).Warn("mismatched obj encryptionInfo", zap.Error(err))
return nil, errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
return nil, s3errors.GetAPIError(s3errors.ErrInvalidEncryptionParameters)
}
res.Owner = multipartInfo.Owner
@ -602,8 +564,8 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, map[int]*data.PartInfo, error) {
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Bkt, p.Key, p.UploadID)
if err != nil {
if stderrors.Is(err, ErrNodeNotFound) {
return nil, nil, errors.GetAPIError(errors.ErrNoSuchUpload)
if errors.Is(err, ErrNodeNotFound) {
return nil, nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
}
return nil, nil, err
}

View file

@ -7,13 +7,13 @@ import (
errorsStd "errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"go.uber.org/zap"
)
type PutBucketNotificationConfigurationParams struct {
RequestInfo *api.ReqInfo
RequestInfo *middleware.ReqInfo
BktInfo *data.BucketInfo
Configuration *data.NotificationConfiguration
CopiesNumbers []uint32
@ -27,7 +27,6 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu
prm := PrmObjectCreate{
Container: p.BktInfo.CID,
Creator: p.BktInfo.Owner,
Payload: bytes.NewReader(confXML),
Filepath: p.BktInfo.NotificationConfigurationObjectName(),
CreationTime: TimeNow(ctx),
@ -53,13 +52,13 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu
}
}
n.cache.PutNotificationConfiguration(n.Owner(ctx), p.BktInfo, p.Configuration)
n.cache.PutNotificationConfiguration(n.BearerOwner(ctx), p.BktInfo, p.Configuration)
return nil
}
func (n *layer) GetBucketNotificationConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (*data.NotificationConfiguration, error) {
owner := n.Owner(ctx)
owner := n.BearerOwner(ctx)
if conf := n.cache.GetNotificationConfiguration(owner, bktInfo); conf != nil {
return conf, nil
}

View file

@ -4,6 +4,7 @@ import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
@ -32,6 +33,14 @@ type (
// payload range
off, ln uint64
objInfo *data.ObjectInfo
bktInfo *data.BucketInfo
}
getFrostFSParams struct {
// payload range
off, ln uint64
oid oid.ID
bktInfo *data.BucketInfo
}
@ -98,9 +107,54 @@ func (n *layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj
return res.Head, nil
}
func (n *layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) {
if _, isCombined := p.objInfo.Headers[MultipartObjectSize]; !isCombined {
return n.initFrostFSObjectPayloadReader(ctx, getFrostFSParams{
off: p.off,
ln: p.ln,
oid: p.objInfo.ID,
bktInfo: p.bktInfo,
})
}
combinedObj, err := n.objectGet(ctx, p.bktInfo, p.objInfo.ID)
if err != nil {
return nil, fmt.Errorf("get combined object '%s': %w", p.objInfo.ID.EncodeToString(), err)
}
var parts []*data.PartInfo
if err = json.Unmarshal(combinedObj.Payload(), &parts); err != nil {
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
}
isEncrypted := FormEncryptionInfo(p.objInfo.Headers).Enabled
objParts := make([]partObj, len(parts))
for i, part := range parts {
size := part.Size
if isEncrypted {
if size, err = sio.EncryptedSize(part.Size); err != nil {
return nil, fmt.Errorf("compute encrypted size: %w", err)
}
}
objParts[i] = partObj{
OID: part.OID,
Size: size,
}
}
return newMultiObjectReader(ctx, multiObjectReaderConfig{
layer: n,
off: p.off,
ln: p.ln,
parts: objParts,
bktInfo: p.bktInfo,
})
}
// initializes payload reader of the FrostFS object.
// Zero range corresponds to full payload (panics if only offset is set).
func (n *layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) {
func (n *layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error) {
prm := PrmObjectRead{
Container: p.bktInfo.CID,
Object: p.oid,
@ -184,8 +238,6 @@ func ParseCompletedPartHeader(hdr string) (*Part, error) {
// PutObject stores object into FrostFS, took payload from io.Reader.
func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.ExtendedObjectInfo, error) {
owner := n.Owner(ctx)
bktSettings, err := n.GetBucketSettings(ctx, p.BktInfo)
if err != nil {
return nil, fmt.Errorf("couldn't get versioning settings object: %w", err)
@ -221,7 +273,6 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
prm := PrmObjectCreate{
Container: p.BktInfo.CID,
Creator: owner,
PayloadSize: p.Size,
Filepath: p.Object,
Payload: r,
@ -250,6 +301,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
Size: size,
},
IsUnversioned: !bktSettings.VersioningEnabled(),
IsCombined: p.Header[MultipartObjectSize] != "",
}
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
@ -279,7 +331,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
ID: id,
CID: p.BktInfo.CID,
Owner: owner,
Owner: n.gateOwner,
Bucket: p.BktInfo.Name,
Name: p.Object,
Size: size,
@ -294,13 +346,13 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
NodeVersion: newVersion,
}
n.cache.PutObjectWithName(owner, extendedObjInfo)
n.cache.PutObjectWithName(n.BearerOwner(ctx), extendedObjInfo)
return extendedObjInfo, nil
}
func (n *layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.BucketInfo, objectName string) (*data.ExtendedObjectInfo, error) {
owner := n.Owner(ctx)
owner := n.BearerOwner(ctx)
if extObjInfo := n.cache.GetLastObject(owner, bkt.Name, objectName); extObjInfo != nil {
return extObjInfo, nil
}
@ -308,17 +360,20 @@ func (n *layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.Bucke
node, err := n.treeService.GetLatestVersion(ctx, bkt, objectName)
if err != nil {
if errors.Is(err, ErrNodeNotFound) {
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey)
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error())
}
return nil, err
}
if node.IsDeleteMarker() {
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey)
return nil, fmt.Errorf("%w: found version is delete marker", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey))
}
meta, err := n.objectHead(ctx, bkt, node.OID)
if err != nil {
if client.IsErrObjectNotFound(err) {
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error())
}
return nil, err
}
objInfo := objectInfoFromMeta(bkt, meta)
@ -340,7 +395,7 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
foundVersion, err = n.treeService.GetUnversioned(ctx, bkt, p.Object)
if err != nil {
if errors.Is(err, ErrNodeNotFound) {
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error())
}
return nil, err
}
@ -357,11 +412,11 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
}
}
if foundVersion == nil {
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion))
}
}
owner := n.Owner(ctx)
owner := n.BearerOwner(ctx)
if extObjInfo := n.cache.GetObject(owner, newAddress(bkt.CID, foundVersion.OID)); extObjInfo != nil {
return extObjInfo, nil
}
@ -369,7 +424,7 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
meta, err := n.objectHead(ctx, bkt, foundVersion.OID)
if err != nil {
if client.IsErrObjectNotFound(err) {
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error())
}
return nil, err
}
@ -411,6 +466,10 @@ func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktIn
})
id, err := n.frostFS.CreateObject(ctx, prm)
if err != nil {
if _, errDiscard := io.Copy(io.Discard, prm.Payload); errDiscard != nil {
n.reqLogger(ctx).Warn("failed to discard put payload, probably goroutine leaks", zap.Error(errDiscard))
}
return 0, oid.ID{}, nil, err
}
return size, id, hash.Sum(nil), nil
@ -484,7 +543,7 @@ func (n *layer) getLatestObjectsVersions(ctx context.Context, p allObjectParams)
return nil, nil, nil
}
owner := n.Owner(ctx)
owner := n.BearerOwner(ctx)
cacheKey := cache.CreateObjectsListCacheKey(p.Bucket.CID, p.Prefix, true)
nodeVersions := n.cache.GetList(owner, cacheKey)
@ -612,7 +671,7 @@ func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams,
func (n *layer) bucketNodeVersions(ctx context.Context, bkt *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
var err error
owner := n.Owner(ctx)
owner := n.BearerOwner(ctx)
cacheKey := cache.CreateObjectsListCacheKey(bkt.CID, prefix, false)
nodeVersions := n.cache.GetList(owner, cacheKey)
@ -732,7 +791,7 @@ func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo
return oiDir
}
owner := n.Owner(ctx)
owner := n.BearerOwner(ctx)
if extInfo := n.cache.GetObject(owner, newAddress(bktInfo.CID, node.OID)); extInfo != nil {
return extInfo.ObjectInfo
}

View file

@ -95,14 +95,14 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
return fmt.Errorf("couldn't put lock into tree: %w", err)
}
n.cache.PutLockInfo(n.Owner(ctx), lockObjectKey(p.ObjVersion), lockInfo)
n.cache.PutLockInfo(n.BearerOwner(ctx), lockObjectKey(p.ObjVersion), lockInfo)
return nil
}
func (n *layer) getNodeVersionFromCacheOrFrostfs(ctx context.Context, objVersion *ObjectVersion) (nodeVersion *data.NodeVersion, err error) {
// check cache if node version is stored inside extendedObjectVersion
nodeVersion = n.getNodeVersionFromCache(n.Owner(ctx), objVersion)
nodeVersion = n.getNodeVersionFromCache(n.BearerOwner(ctx), objVersion)
if nodeVersion == nil {
// else get node version from tree service
return n.getNodeVersion(ctx, objVersion)
@ -114,7 +114,6 @@ func (n *layer) getNodeVersionFromCacheOrFrostfs(ctx context.Context, objVersion
func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, lock *data.ObjectLock, copiesNumber []uint32) (oid.ID, error) {
prm := PrmObjectCreate{
Container: bktInfo.CID,
Creator: bktInfo.Owner,
Locks: []oid.ID{objID},
CreationTime: TimeNow(ctx),
CopiesNumber: copiesNumber,
@ -131,7 +130,7 @@ func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, obj
}
func (n *layer) GetLockInfo(ctx context.Context, objVersion *ObjectVersion) (*data.LockInfo, error) {
owner := n.Owner(ctx)
owner := n.BearerOwner(ctx)
if lockInfo := n.cache.GetLockInfo(owner, lockObjectKey(objVersion)); lockInfo != nil {
return lockInfo, nil
}
@ -155,7 +154,7 @@ func (n *layer) GetLockInfo(ctx context.Context, objVersion *ObjectVersion) (*da
}
func (n *layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSConfiguration, error) {
owner := n.Owner(ctx)
owner := n.BearerOwner(ctx)
if cors := n.cache.GetCORS(owner, bkt); cors != nil {
return cors, nil
}
@ -167,7 +166,7 @@ func (n *layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSCo
}
if objIDNotFound {
return nil, errors.GetAPIError(errors.ErrNoSuchCORSConfiguration)
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchCORSConfiguration), err.Error())
}
obj, err := n.objectGet(ctx, bkt, objID)
@ -192,7 +191,7 @@ func lockObjectKey(objVersion *ObjectVersion) string {
}
func (n *layer) GetBucketSettings(ctx context.Context, bktInfo *data.BucketInfo) (*data.BucketSettings, error) {
owner := n.Owner(ctx)
owner := n.BearerOwner(ctx)
if settings := n.cache.GetSettings(owner, bktInfo); settings != nil {
return settings, nil
}
@ -215,7 +214,7 @@ func (n *layer) PutBucketSettings(ctx context.Context, p *PutSettingsParams) err
return fmt.Errorf("failed to get settings node: %w", err)
}
n.cache.PutSettings(n.Owner(ctx), p.BktInfo, p.Settings)
n.cache.PutSettings(n.BearerOwner(ctx), p.BktInfo, p.Settings)
return nil
}

View file

@ -2,10 +2,11 @@ package layer
import (
"context"
errorsStd "errors"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@ -29,7 +30,7 @@ type PutObjectTaggingParams struct {
func (n *layer) GetObjectTagging(ctx context.Context, p *GetObjectTaggingParams) (string, map[string]string, error) {
var err error
owner := n.Owner(ctx)
owner := n.BearerOwner(ctx)
if len(p.ObjectVersion.VersionID) != 0 && p.ObjectVersion.VersionID != data.UnversionedObjectVersionID {
if tags := n.cache.GetTagging(owner, objectTaggingCacheKey(p.ObjectVersion)); tags != nil {
@ -52,8 +53,8 @@ func (n *layer) GetObjectTagging(ctx context.Context, p *GetObjectTaggingParams)
tags, err := n.treeService.GetObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion)
if err != nil {
if errorsStd.Is(err, ErrNodeNotFound) {
return "", nil, errors.GetAPIError(errors.ErrNoSuchKey)
if errors.Is(err, ErrNodeNotFound) {
return "", nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
}
return "", nil, err
}
@ -75,13 +76,13 @@ func (n *layer) PutObjectTagging(ctx context.Context, p *PutObjectTaggingParams)
err = n.treeService.PutObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion, p.TagSet)
if err != nil {
if errorsStd.Is(err, ErrNodeNotFound) {
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
if errors.Is(err, ErrNodeNotFound) {
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
}
return nil, err
}
n.cache.PutTagging(n.Owner(ctx), objectTaggingCacheKey(p.ObjectVersion), p.TagSet)
n.cache.PutTagging(n.BearerOwner(ctx), objectTaggingCacheKey(p.ObjectVersion), p.TagSet)
return nodeVersion, nil
}
@ -94,8 +95,8 @@ func (n *layer) DeleteObjectTagging(ctx context.Context, p *ObjectVersion) (*dat
err = n.treeService.DeleteObjectTagging(ctx, p.BktInfo, version)
if err != nil {
if errorsStd.Is(err, ErrNodeNotFound) {
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
if errors.Is(err, ErrNodeNotFound) {
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
}
return nil, err
}
@ -108,14 +109,14 @@ func (n *layer) DeleteObjectTagging(ctx context.Context, p *ObjectVersion) (*dat
}
func (n *layer) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) (map[string]string, error) {
owner := n.Owner(ctx)
owner := n.BearerOwner(ctx)
if tags := n.cache.GetTagging(owner, bucketTaggingCacheKey(bktInfo.CID)); tags != nil {
return tags, nil
}
tags, err := n.treeService.GetBucketTagging(ctx, bktInfo)
if err != nil && !errorsStd.Is(err, ErrNodeNotFound) {
if err != nil && !errors.Is(err, ErrNodeNotFound) {
return nil, err
}
@ -129,7 +130,7 @@ func (n *layer) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo,
return err
}
n.cache.PutTagging(n.Owner(ctx), bucketTaggingCacheKey(bktInfo.CID), tagSet)
n.cache.PutTagging(n.BearerOwner(ctx), bucketTaggingCacheKey(bktInfo.CID), tagSet)
return nil
}
@ -168,12 +169,14 @@ func (n *layer) getNodeVersion(ctx context.Context, objVersion *ObjectVersion) (
}
}
if version == nil {
err = errors.GetAPIError(errors.ErrNoSuchVersion)
err = fmt.Errorf("%w: there isn't tree node with requested version id", s3errors.GetAPIError(s3errors.ErrNoSuchVersion))
}
}
if err == nil && version.IsDeleteMarker() && !objVersion.NoErrorOnDeleteMarker || errorsStd.Is(err, ErrNodeNotFound) {
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
if err == nil && version.IsDeleteMarker() && !objVersion.NoErrorOnDeleteMarker {
return nil, fmt.Errorf("%w: found version is delete marker", s3errors.GetAPIError(s3errors.ErrNoSuchKey))
} else if errors.Is(err, ErrNodeNotFound) {
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
}
if err == nil && version != nil && !version.IsDeleteMarker() {

View file

@ -9,9 +9,9 @@ import (
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
)
@ -141,7 +141,7 @@ func NameFromString(name string) (string, string) {
// GetBoxData extracts accessbox.Box from context.
func GetBoxData(ctx context.Context) (*accessbox.Box, error) {
var boxData *accessbox.Box
data, ok := ctx.Value(api.BoxData).(*accessbox.Box)
data, ok := ctx.Value(middleware.BoxData).(*accessbox.Box)
if !ok || data == nil {
return nil, fmt.Errorf("couldn't get box data from context")
}

View file

@ -3,10 +3,11 @@ package layer
import (
"bytes"
"context"
"io"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -31,26 +32,29 @@ func (tc *testContext) putObject(content []byte) *data.ObjectInfo {
}
func (tc *testContext) getObject(objectName, versionID string, needError bool) (*data.ObjectInfo, []byte) {
objInfo, err := tc.layer.GetObjectInfo(tc.ctx, &HeadObjectParams{
headPrm := &HeadObjectParams{
BktInfo: tc.bktInfo,
Object: objectName,
VersionID: versionID,
})
}
objInfo, err := tc.layer.GetObjectInfo(tc.ctx, headPrm)
if needError {
require.Error(tc.t, err)
return nil, nil
}
require.NoError(tc.t, err)
content := bytes.NewBuffer(nil)
err = tc.layer.GetObject(tc.ctx, &GetObjectParams{
objPayload, err := tc.layer.GetObject(tc.ctx, &GetObjectParams{
ObjectInfo: objInfo,
Writer: content,
Versioned: headPrm.Versioned(),
BucketInfo: tc.bktInfo,
})
require.NoError(tc.t, err)
return objInfo, content.Bytes()
payload, err := io.ReadAll(objPayload)
require.NoError(tc.t, err)
return objInfo, payload
}
func (tc *testContext) deleteObject(objectName, versionID string, settings *data.BucketSettings) {
@ -140,13 +144,13 @@ func prepareContext(t *testing.T, cachesConfig ...*CachesConfig) *testContext {
bearerToken := bearertest.Token()
require.NoError(t, bearerToken.Sign(key.PrivateKey))
ctx := context.WithValue(context.Background(), api.BoxData, &accessbox.Box{
ctx := context.WithValue(context.Background(), middleware.BoxData, &accessbox.Box{
Gate: &accessbox.GateData{
BearerToken: &bearerToken,
GateKey: key.PublicKey(),
},
})
tp := NewTestFrostFS()
tp := NewTestFrostFS(key)
bktName := "testbucket1"
bktID, err := tp.CreateContainer(ctx, PrmContainerCreate{

View file

@ -1,60 +0,0 @@
package api
import (
"net/http"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
)
type (
// MaxClients provides HTTP handler wrapper with the client limit.
MaxClients interface {
Handle(http.HandlerFunc) http.HandlerFunc
}
maxClients struct {
pool chan struct{}
timeout time.Duration
}
)
const defaultRequestDeadline = time.Second * 30
// NewMaxClientsMiddleware returns MaxClients interface with handler wrapper based on
// the provided count and the timeout limits.
func NewMaxClientsMiddleware(count int, timeout time.Duration) MaxClients {
if timeout <= 0 {
timeout = defaultRequestDeadline
}
return &maxClients{
pool: make(chan struct{}, count),
timeout: timeout,
}
}
// Handler wraps HTTP handler function with logic limiting access to it.
func (m *maxClients) Handle(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if m.pool == nil {
f.ServeHTTP(w, r)
return
}
deadline := time.NewTimer(m.timeout)
defer deadline.Stop()
select {
case m.pool <- struct{}{}:
defer func() { <-m.pool }()
f.ServeHTTP(w, r)
case <-deadline.C:
// Send a http timeout message
WriteErrorResponse(w, GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrOperationTimedOut))
return
case <-r.Context().Done():
return
}
}
}

View file

@ -1,4 +1,4 @@
package api
package middleware
import (
"context"
@ -6,29 +6,29 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"github.com/gorilla/mux"
"go.uber.org/zap"
)
// KeyWrapper is wrapper for context keys.
type KeyWrapper string
// AuthHeaders is a wrapper for authentication headers of a request.
var AuthHeaders = KeyWrapper("__context_auth_headers_key")
// BoxData is an ID used to store accessbox.Box in a context.
var BoxData = KeyWrapper("__context_box_key")
// ClientTime is an ID used to store client time.Time in a context.
var ClientTime = KeyWrapper("__context_client_time")
// AuthMiddleware adds user authentication via center to router using log for logging.
func AuthMiddleware(log *zap.Logger, center auth.Center) mux.MiddlewareFunc {
func Auth(center auth.Center, log *zap.Logger) Func {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var ctx context.Context
ctx := r.Context()
box, err := center.Authenticate(r)
if err != nil {
if err == auth.ErrNoAuthorizationHeader {
reqLogOrDefault(ctx, log).Debug("couldn't receive access box for gate key, random key will be used")
ctx = r.Context()
} else {
reqLogOrDefault(ctx, log).Error("failed to pass authentication", zap.Error(err))
if _, ok := err.(errors.Error); !ok {
@ -38,21 +38,14 @@ func AuthMiddleware(log *zap.Logger, center auth.Center) mux.MiddlewareFunc {
return
}
} else {
ctx = context.WithValue(r.Context(), BoxData, box.AccessBox)
ctx = context.WithValue(ctx, BoxData, box.AccessBox)
if !box.ClientTime.IsZero() {
ctx = context.WithValue(ctx, ClientTime, box.ClientTime)
}
ctx = context.WithValue(ctx, AuthHeaders, box.AuthHeaders)
}
h.ServeHTTP(w, r.WithContext(ctx))
})
}
}
func reqLogOrDefault(ctx context.Context, log *zap.Logger) *zap.Logger {
reqLog := GetReqLog(ctx)
if reqLog != nil {
return reqLog
}
return log
}

View file

@ -1,4 +1,4 @@
package api
package middleware
import (
"context"
@ -9,12 +9,102 @@ import (
"sync/atomic"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"go.uber.org/zap"
)
func RequestTypeFromAPI(api string) metrics.RequestType {
type (
UsersStat interface {
Update(user, bucket, cnrID string, reqType int, in, out uint64)
}
readCounter struct {
io.ReadCloser
countBytes uint64
}
writeCounter struct {
http.ResponseWriter
countBytes uint64
}
responseWrapper struct {
sync.Once
http.ResponseWriter
statusCode int
startTime time.Time
}
// BucketResolveFunc is a func to resolve bucket info by name.
BucketResolveFunc func(ctx context.Context, bucket string) (*data.BucketInfo, error)
// cidResolveFunc is a func to resolve CID in Stats handler.
cidResolveFunc func(ctx context.Context, reqInfo *ReqInfo) (cnrID string)
)
const systemPath = "/system"
// Metrics wraps http handler for api with basic statistics collection.
func Metrics(log *zap.Logger, resolveBucket BucketResolveFunc, appMetrics *metrics.AppMetrics) Func {
return func(h http.Handler) http.Handler {
return stats(h.ServeHTTP, resolveCID(log, resolveBucket), appMetrics)
}
}
// Stats is a handler that update metrics.
func stats(f http.HandlerFunc, resolveCID cidResolveFunc, appMetrics *metrics.AppMetrics) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
reqInfo := GetReqInfo(r.Context())
appMetrics.Statistic().CurrentS3RequestsInc(reqInfo.API)
defer appMetrics.Statistic().CurrentS3RequestsDec(reqInfo.API)
in := &readCounter{ReadCloser: r.Body}
out := &writeCounter{ResponseWriter: w}
r.Body = in
statsWriter := &responseWrapper{
ResponseWriter: out,
startTime: time.Now(),
}
f(statsWriter, r)
// Time duration in secs since the call started.
// We don't need to do nanosecond precision here
// simply for the fact that it is not human-readable.
durationSecs := time.Since(statsWriter.startTime).Seconds()
user := resolveUser(r.Context())
cnrID := resolveCID(r.Context(), reqInfo)
appMetrics.Update(user, reqInfo.BucketName, cnrID, requestTypeFromAPI(reqInfo.API), in.countBytes, out.countBytes)
code := statsWriter.statusCode
// A successful request has a 2xx response code
successReq := code >= http.StatusOK && code < http.StatusMultipleChoices
if !strings.HasSuffix(r.URL.Path, systemPath) {
appMetrics.Statistic().TotalS3RequestsInc(reqInfo.API)
if !successReq && code != 0 {
appMetrics.Statistic().TotalS3ErrorsInc(reqInfo.API)
}
}
if r.Method == http.MethodGet {
// Increment the prometheus http request response histogram with appropriate label
appMetrics.Statistic().RequestDurationsUpdate(reqInfo.API, durationSecs)
}
appMetrics.Statistic().TotalInputBytesAdd(in.countBytes)
appMetrics.Statistic().TotalOutputBytesAdd(out.countBytes)
}
}
func requestTypeFromAPI(api string) metrics.RequestType {
switch api {
case "Options", "HeadObject", "HeadBucket":
return metrics.HEADRequest
@ -43,83 +133,20 @@ func RequestTypeFromAPI(api string) metrics.RequestType {
}
}
type (
UsersStat interface {
Update(user, bucket, cnrID string, reqType int, in, out uint64)
}
readCounter struct {
io.ReadCloser
countBytes uint64
}
writeCounter struct {
http.ResponseWriter
countBytes uint64
}
responseWrapper struct {
sync.Once
http.ResponseWriter
statusCode int
startTime time.Time
}
)
const systemPath = "/system"
//var apiStatMetrics = metrics.newApiStatMetrics()
// CIDResolveFunc is a func to resolve CID in Stats handler.
type CIDResolveFunc func(ctx context.Context, reqInfo *ReqInfo) (cnrID string)
// Stats is a handler that update metrics.
func Stats(f http.HandlerFunc, resolveCID CIDResolveFunc, appMetrics *metrics.AppMetrics) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
reqInfo := GetReqInfo(r.Context())
appMetrics.Statistic().CurrentS3RequestsInc(reqInfo.API)
defer appMetrics.Statistic().CurrentS3RequestsDec(reqInfo.API)
in := &readCounter{ReadCloser: r.Body}
out := &writeCounter{ResponseWriter: w}
r.Body = in
statsWriter := &responseWrapper{
ResponseWriter: out,
startTime: time.Now(),
// resolveCID forms CIDResolveFunc using BucketResolveFunc.
func resolveCID(log *zap.Logger, resolveBucket BucketResolveFunc) cidResolveFunc {
return func(ctx context.Context, reqInfo *ReqInfo) (cnrID string) {
if reqInfo.BucketName == "" || reqInfo.API == "CreateBucket" || reqInfo.API == "" {
return ""
}
f(statsWriter, r)
// Time duration in secs since the call started.
// We don't need to do nanosecond precision here
// simply for the fact that it is not human-readable.
durationSecs := time.Since(statsWriter.startTime).Seconds()
user := resolveUser(r.Context())
cnrID := resolveCID(r.Context(), reqInfo)
appMetrics.Update(user, reqInfo.BucketName, cnrID, RequestTypeFromAPI(reqInfo.API), in.countBytes, out.countBytes)
code := statsWriter.statusCode
// A successful request has a 2xx response code
successReq := code >= http.StatusOK && code < http.StatusMultipleChoices
if !strings.HasSuffix(r.URL.Path, systemPath) {
appMetrics.Statistic().TotalS3RequestsInc(reqInfo.API)
if !successReq && code != 0 {
appMetrics.Statistic().TotalS3ErrorsInc(reqInfo.API)
}
bktInfo, err := resolveBucket(ctx, reqInfo.BucketName)
if err != nil {
reqLogOrDefault(ctx, log).Debug("failed to resolve CID", zap.Error(err))
return ""
}
if r.Method == http.MethodGet {
// Increment the prometheus http request response histogram with appropriate label
appMetrics.Statistic().RequestDurationsUpdate(reqInfo.API, durationSecs)
}
appMetrics.Statistic().TotalInputBytesAdd(in.countBytes)
appMetrics.Statistic().TotalOutputBytesAdd(out.countBytes)
return bktInfo.CID.EncodeToString()
}
}

View file

@ -0,0 +1,27 @@
package middleware
import (
"context"
"net/http"
"go.uber.org/zap"
)
type Func func(h http.Handler) http.Handler
func WrapHandler(handler http.HandlerFunc) Func {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
handler(w, r)
h.ServeHTTP(w, r)
})
}
}
func reqLogOrDefault(ctx context.Context, log *zap.Logger) *zap.Logger {
reqLog := GetReqLog(ctx)
if reqLog != nil {
return reqLog
}
return log
}

View file

@ -1,4 +1,4 @@
package api
package middleware
import (
"context"
@ -9,8 +9,10 @@ import (
"strings"
"sync"
"github.com/gorilla/mux"
"github.com/go-chi/chi/v5"
"github.com/google/uuid"
"go.uber.org/zap"
"google.golang.org/grpc/metadata"
)
type (
@ -41,23 +43,29 @@ type (
Object string
Method string
}
)
// Key used for custom key/value in context.
type contextKeyType string
// Key used for custom key/value in context.
contextKeyType string
)
const (
ctxRequestInfo = contextKeyType("FrostFS-S3-GW")
ctxRequestLogger = contextKeyType("FrostFS-S3-GW-Logger")
)
const HdrAmzRequestID = "x-amz-request-id"
const (
BucketURLPrm = "bucket"
)
var deploymentID = uuid.Must(uuid.NewRandom())
var (
// De-facto standard header keys.
xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
xRealIP = http.CanonicalHeaderKey("X-Real-IP")
)
var (
// RFC7239 defines a new "Forwarded: " header designed to replace the
// existing use of X-Forwarded-* headers.
// e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43.
@ -67,68 +75,6 @@ var (
forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|, )]+)(.*)`)
)
// GetSourceIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239
// Forwarded headers (in that order), falls back to r.RemoteAddr when everything
// else fails.
func GetSourceIP(r *http.Request) string {
var addr string
if fwd := r.Header.Get(xForwardedFor); fwd != "" {
// Only grabs the first (client) address. Note that '192.168.0.1,
// 10.1.1.1' is a valid key for X-Forwarded-For where addresses after
// the first one may represent forwarding proxies earlier in the chain.
s := strings.Index(fwd, ", ")
if s == -1 {
s = len(fwd)
}
addr = fwd[:s]
} else if fwd := r.Header.Get(xRealIP); fwd != "" {
// X-Real-IP should only contain one IP address (the client making the
// request).
addr = fwd
} else if fwd := r.Header.Get(forwarded); fwd != "" {
// match should contain at least two elements if the protocol was
// specified in the Forwarded header. The first element will always be
// the 'for=' capture, which we ignore. In the case of multiple IP
// addresses (for=8.8.8.8, 8.8.4.4, 172.16.1.20 is valid) we only
// extract the first, which should be the client IP.
if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 {
// IPv6 addresses in Forwarded headers are quoted-strings. We strip
// these quotes.
addr = strings.Trim(match[1], `"`)
}
}
if addr != "" {
return addr
}
// Default to remote address if headers not set.
addr, _, _ = net.SplitHostPort(r.RemoteAddr)
return addr
}
func prepareReqInfo(w http.ResponseWriter, r *http.Request) *ReqInfo {
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := url.PathUnescape(vars["object"])
if err != nil {
object = vars["object"]
}
prefix, err := url.QueryUnescape(vars["prefix"])
if err != nil {
prefix = vars["prefix"]
}
if prefix != "" {
object = prefix
}
return NewReqInfo(w, r, ObjectRequest{
Bucket: bucket,
Object: object,
Method: mux.CurrentRoute(r).GetName(),
})
}
// NewReqInfo returns new ReqInfo based on parameters.
func NewReqInfo(w http.ResponseWriter, r *http.Request, req ObjectRequest) *ReqInfo {
return &ReqInfo{
@ -136,7 +82,7 @@ func NewReqInfo(w http.ResponseWriter, r *http.Request, req ObjectRequest) *ReqI
BucketName: req.Bucket,
ObjectName: req.Object,
UserAgent: r.UserAgent(),
RemoteHost: GetSourceIP(r),
RemoteHost: getSourceIP(r),
RequestID: GetRequestID(w),
DeploymentID: deploymentID.String(),
URL: r.URL,
@ -187,6 +133,18 @@ func (r *ReqInfo) GetTags() []KeyVal {
return append([]KeyVal(nil), r.tags...)
}
// GetRequestID returns the request ID from the response writer or the context.
func GetRequestID(v interface{}) string {
switch t := v.(type) {
case context.Context:
return GetReqInfo(t).RequestID
case http.ResponseWriter:
return t.Header().Get(HdrAmzRequestID)
default:
panic("unknown type")
}
}
// SetReqInfo sets ReqInfo in the context.
func SetReqInfo(ctx context.Context, req *ReqInfo) context.Context {
if ctx == nil {
@ -224,3 +182,120 @@ func GetReqLog(ctx context.Context) *zap.Logger {
}
return nil
}
func Request(log *zap.Logger) Func {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// generate random UUIDv4
id, _ := uuid.NewRandom()
// set request id into response header
// also we have to set request id here
// to be able to get it in NewReqInfo
w.Header().Set(HdrAmzRequestID, id.String())
// set request info into context
// bucket name and object will be set in reqInfo later (limitation of go-chi)
reqInfo := NewReqInfo(w, r, ObjectRequest{})
r = r.WithContext(SetReqInfo(r.Context(), reqInfo))
// set request id into gRPC meta header
r = r.WithContext(metadata.AppendToOutgoingContext(
r.Context(), HdrAmzRequestID, reqInfo.RequestID,
))
reqLogger := log.With(zap.String("request_id", reqInfo.RequestID))
r = r.WithContext(SetReqLogger(r.Context(), reqLogger))
reqLogger.Info("request start", zap.String("host", r.Host),
zap.String("remote_host", reqInfo.RemoteHost))
// continue execution
h.ServeHTTP(w, r)
})
}
}
// AddBucketName adds bucket name to ReqInfo from context.
func AddBucketName(l *zap.Logger) Func {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := GetReqInfo(ctx)
reqInfo.BucketName = chi.URLParam(r, BucketURLPrm)
reqLogger := reqLogOrDefault(ctx, l)
r = r.WithContext(SetReqLogger(ctx, reqLogger.With(zap.String("bucket", reqInfo.BucketName))))
h.ServeHTTP(w, r)
})
}
}
// AddObjectName adds objects name to ReqInfo from context.
func AddObjectName(l *zap.Logger) Func {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := GetReqInfo(ctx)
rctx := chi.RouteContext(ctx)
// trim leading slash (always present)
obj := rctx.RoutePath[1:]
object, err := url.PathUnescape(obj)
if err != nil {
object = obj
}
reqInfo.ObjectName = object
reqLogger := reqLogOrDefault(ctx, l)
r = r.WithContext(SetReqLogger(ctx, reqLogger.With(zap.String("object", reqInfo.ObjectName))))
h.ServeHTTP(w, r)
})
}
}
// getSourceIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239
// Forwarded headers (in that order), falls back to r.RemoteAddr when everything
// else fails.
func getSourceIP(r *http.Request) string {
var addr string
if fwd := r.Header.Get(xForwardedFor); fwd != "" {
// Only grabs the first (client) address. Note that '192.168.0.1,
// 10.1.1.1' is a valid key for X-Forwarded-For where addresses after
// the first one may represent forwarding proxies earlier in the chain.
s := strings.Index(fwd, ", ")
if s == -1 {
s = len(fwd)
}
addr = fwd[:s]
} else if fwd := r.Header.Get(xRealIP); fwd != "" {
// X-Real-IP should only contain one IP address (the client making the
// request).
addr = fwd
} else if fwd := r.Header.Get(forwarded); fwd != "" {
// match should contain at least two elements if the protocol was
// specified in the Forwarded header. The first element will always be
// the 'for=' capture, which we ignore. In the case of multiple IP
// addresses (for=8.8.8.8, 8.8.4.4, 172.16.1.20 is valid) we only
// extract the first, which should be the client IP.
if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 {
// IPv6 addresses in Forwarded headers are quoted-strings. We strip
// these quotes.
addr = strings.Trim(match[1], `"`)
}
}
if addr != "" {
return addr
}
// Default to remote address if headers not set.
addr, _, _ = net.SplitHostPort(r.RemoteAddr)
return addr
}

View file

@ -1,4 +1,4 @@
package api
package middleware
import (
"bytes"
@ -6,10 +6,11 @@ import (
"fmt"
"net/http"
"strconv"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
"github.com/google/uuid"
"go.uber.org/zap"
)
type (
@ -34,19 +35,26 @@ type (
// Underlying HTTP status code for the returned error.
StatusCode int `xml:"-" json:"-"`
}
// mimeType represents various MIME types used in API responses.
mimeType string
)
const (
// MimeNone means no response type.
MimeNone mimeType = ""
// MimeXML means response type is XML.
MimeXML mimeType = "application/xml"
hdrServerInfo = "Server"
hdrAcceptRanges = "Accept-Ranges"
hdrContentType = "Content-Type"
hdrContentLength = "Content-Length"
hdrRetryAfter = "Retry-After"
hdrAmzCopySource = "X-Amz-Copy-Source"
// Response request id.
hdrAmzRequestID = "x-amz-request-id"
// hdrSSE is the general AWS SSE HTTP header key.
hdrSSE = "X-Amz-Server-Side-Encryption"
@ -61,8 +69,6 @@ const (
)
var (
deploymentID, _ = uuid.NewRandom()
xmlHeader = []byte(xml.Header)
)
@ -140,16 +146,6 @@ func WriteErrorResponseNoHeader(w http.ResponseWriter, reqInfo *ReqInfo, err err
WriteResponseBody(w, encodedErrorResponse)
}
// If none of the http routes match respond with appropriate errors.
func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
desc := fmt.Sprintf("Unknown API request at %s", r.URL.Path)
WriteErrorResponse(w, GetReqInfo(r.Context()), errors.Error{
Code: "UnknownAPIRequest",
Description: desc,
HTTPStatusCode: http.StatusBadRequest,
})
}
// Write http common headers.
func setCommonHeaders(w http.ResponseWriter) {
w.Header().Set(hdrServerInfo, version.Server)
@ -280,3 +276,56 @@ func getAPIErrorResponse(info *ReqInfo, err error) ErrorResponse {
HostID: info.DeploymentID,
}
}
type logResponseWriter struct {
sync.Once
http.ResponseWriter
statusCode int
}
func (lrw *logResponseWriter) WriteHeader(code int) {
lrw.Do(func() {
lrw.statusCode = code
lrw.ResponseWriter.WriteHeader(code)
})
}
func (lrw *logResponseWriter) Flush() {
if f, ok := lrw.ResponseWriter.(http.Flusher); ok {
f.Flush()
}
}
func LogSuccessResponse(l *zap.Logger) Func {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
lw := &logResponseWriter{ResponseWriter: w}
// here reqInfo doesn't contain bucket name and object name
// pass execution:
h.ServeHTTP(lw, r)
// here reqInfo contains bucket name and object name because of
// addBucketName and addObjectName middlewares
// Ignore >400 status codes
if lw.statusCode >= http.StatusBadRequest {
return
}
ctx := r.Context()
reqLogger := reqLogOrDefault(ctx, l)
reqInfo := GetReqInfo(ctx)
reqLogger.Info("request end",
zap.String("method", reqInfo.API),
zap.String("bucket", reqInfo.BucketName),
zap.String("object", reqInfo.ObjectName),
zap.Int("status", lw.statusCode),
zap.String("description", http.StatusText(lw.statusCode)),
)
})
}
}

View file

@ -1,4 +1,4 @@
package api
package middleware
import (
"context"
@ -6,15 +6,14 @@ import (
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"github.com/gorilla/mux"
"go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
"go.opentelemetry.io/otel/trace"
)
// TracingMiddleware adds tracing support for requests.
// Tracing adds tracing support for requests.
// Must be placed after prepareRequest middleware.
func TracingMiddleware() mux.MiddlewareFunc {
func Tracing() Func {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
appCtx, span := StartHTTPServerSpan(r, "REQUEST S3")

View file

@ -2,16 +2,17 @@ package api
import (
"context"
"fmt"
"net/http"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
s3middleware "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
"github.com/google/uuid"
"github.com/gorilla/mux"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"go.uber.org/zap"
"google.golang.org/grpc/metadata"
)
type (
@ -86,554 +87,301 @@ type (
ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error)
}
// mimeType represents various MIME types used in API responses.
mimeType string
logResponseWriter struct {
sync.Once
http.ResponseWriter
statusCode int
}
)
const (
// SlashSeparator -- slash separator.
SlashSeparator = "/"
// MimeNone means no response type.
MimeNone mimeType = ""
// MimeXML means response type is XML.
MimeXML mimeType = "application/xml"
)
var _ = logSuccessResponse
func (lrw *logResponseWriter) WriteHeader(code int) {
lrw.Do(func() {
lrw.statusCode = code
lrw.ResponseWriter.WriteHeader(code)
})
}
func (lrw *logResponseWriter) Flush() {
if f, ok := lrw.ResponseWriter.(http.Flusher); ok {
f.Flush()
}
}
func prepareRequest(log *zap.Logger) mux.MiddlewareFunc {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// generate random UUIDv4
id, _ := uuid.NewRandom()
// set request id into response header
// also we have to set request id here
// to be able to get it in prepareReqInfo
w.Header().Set(hdrAmzRequestID, id.String())
// set request info into context
reqInfo := prepareReqInfo(w, r)
r = r.WithContext(SetReqInfo(r.Context(), reqInfo))
// set request id into gRPC meta header
r = r.WithContext(metadata.AppendToOutgoingContext(
r.Context(), hdrAmzRequestID, reqInfo.RequestID,
))
// set request scoped child logger into context
additionalFields := []zap.Field{zap.String("request_id", reqInfo.RequestID),
zap.String("method", reqInfo.API), zap.String("bucket", reqInfo.BucketName)}
if isObjectRequest(reqInfo) {
additionalFields = append(additionalFields, zap.String("object", reqInfo.ObjectName))
}
reqLogger := log.With(additionalFields...)
r = r.WithContext(SetReqLogger(r.Context(), reqLogger))
reqLogger.Info("request start", zap.String("host", r.Host),
zap.String("remote_host", reqInfo.RemoteHost))
// continue execution
h.ServeHTTP(w, r)
})
}
}
var objectMethods = []string{
"HeadObject", "GetObject", "DeleteObject", "PutObject", "PostObject", "CopyObject",
"CreateMultipartUpload", "UploadPartCopy", "UploadPart", "ListObjectParts",
"CompleteMultipartUpload", "AbortMultipartUpload",
"PutObjectACL", "GetObjectACL",
"PutObjectTagging", "GetObjectTagging", "DeleteObjectTagging",
"PutObjectRetention", "GetObjectRetention", "PutObjectLegalHold", "getobjectlegalhold",
"SelectObjectContent", "GetObjectAttributes",
}
func isObjectRequest(info *ReqInfo) bool {
for _, method := range objectMethods {
if info.API == method {
return true
}
}
return false
}
func appendCORS(handler Handler) mux.MiddlewareFunc {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
handler.AppendCORSHeaders(w, r)
h.ServeHTTP(w, r)
})
}
}
// BucketResolveFunc is a func to resolve bucket info by name.
type BucketResolveFunc func(ctx context.Context, bucket string) (*data.BucketInfo, error)
// metricsMiddleware wraps http handler for api with basic statistics collection.
func metricsMiddleware(log *zap.Logger, resolveBucket BucketResolveFunc, appMetrics *metrics.AppMetrics) mux.MiddlewareFunc {
return func(h http.Handler) http.Handler {
return Stats(h.ServeHTTP, resolveCID(log, resolveBucket), appMetrics)
}
}
// resolveCID forms CIDResolveFunc using BucketResolveFunc.
func resolveCID(log *zap.Logger, resolveBucket BucketResolveFunc) CIDResolveFunc {
return func(ctx context.Context, reqInfo *ReqInfo) (cnrID string) {
if reqInfo.BucketName == "" || reqInfo.API == "CreateBucket" || reqInfo.API == "" {
return ""
}
bktInfo, err := resolveBucket(ctx, reqInfo.BucketName)
if err != nil {
reqLogOrDefault(ctx, log).Debug("failed to resolve CID", zap.Error(err))
return ""
}
return bktInfo.CID.EncodeToString()
}
}
func logSuccessResponse(l *zap.Logger) mux.MiddlewareFunc {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
lw := &logResponseWriter{ResponseWriter: w}
reqLogger := reqLogOrDefault(r.Context(), l)
// pass execution:
h.ServeHTTP(lw, r)
// Ignore >400 status codes
if lw.statusCode >= http.StatusBadRequest {
return
}
reqLogger.Info("request end",
zap.Int("status", lw.statusCode),
zap.String("description", http.StatusText(lw.statusCode)))
})
}
}
// GetRequestID returns the request ID from the response writer or the context.
func GetRequestID(v interface{}) string {
switch t := v.(type) {
case context.Context:
return GetReqInfo(t).RequestID
case http.ResponseWriter:
return t.Header().Get(hdrAmzRequestID)
default:
panic("unknown type")
}
}
func setErrorAPI(apiName string, h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := SetReqInfo(r.Context(), &ReqInfo{API: apiName})
h.ServeHTTP(w, r.WithContext(ctx))
})
}
// attachErrorHandler set NotFoundHandler and MethodNotAllowedHandler for mux.Router.
func attachErrorHandler(api *mux.Router, log *zap.Logger, h Handler, center auth.Center, appMetrics *metrics.AppMetrics) {
middlewares := []mux.MiddlewareFunc{
AuthMiddleware(log, center),
metricsMiddleware(log, h.ResolveBucket, appMetrics),
}
var errorHandler http.Handler = http.HandlerFunc(errorResponseHandler)
for i := len(middlewares) - 1; i >= 0; i-- {
errorHandler = middlewares[i](errorHandler)
}
// If none of the routes match, add default error handler routes
api.NotFoundHandler = setErrorAPI("NotFound", errorHandler)
api.MethodNotAllowedHandler = setErrorAPI("MethodNotAllowed", errorHandler)
}
// Attach adds S3 API handlers from h to r for domains with m client limit using
// center authentication and log logger.
func Attach(r *mux.Router, domains []string, m MaxClients, h Handler, center auth.Center, log *zap.Logger, appMetrics *metrics.AppMetrics) {
api := r.PathPrefix(SlashSeparator).Subrouter()
func AttachChi(api *chi.Mux, domains []string, throttle middleware.ThrottleOpts, h Handler, center auth.Center, log *zap.Logger, appMetrics *metrics.AppMetrics) {
api.Use(
// -- prepare request
prepareRequest(log),
// Attach user authentication for all S3 routes.
AuthMiddleware(log, center),
TracingMiddleware(),
metricsMiddleware(log, h.ResolveBucket, appMetrics),
// -- logging error requests
logSuccessResponse(log),
s3middleware.Request(log),
middleware.ThrottleWithOpts(throttle),
middleware.Recoverer,
s3middleware.Tracing(),
s3middleware.Metrics(log, h.ResolveBucket, appMetrics),
s3middleware.LogSuccessResponse(log),
s3middleware.Auth(center, log),
)
attachErrorHandler(api, log, h, center, appMetrics)
buckets := make([]*mux.Router, 0, len(domains)+1)
buckets = append(buckets, api.PathPrefix("/{bucket}").Subrouter())
defaultRouter := chi.NewRouter()
defaultRouter.Mount(fmt.Sprintf("/{%s}", s3middleware.BucketURLPrm), bucketRouter(h, log))
defaultRouter.Get("/", named("ListBuckets", h.ListBucketsHandler))
hr := NewHostBucketRouter("bucket")
hr.Default(defaultRouter)
for _, domain := range domains {
buckets = append(buckets, api.Host("{bucket:.+}."+domain).Subrouter())
hr.Map(domain, bucketRouter(h, log))
}
api.Mount("/", hr)
for _, bucket := range buckets {
// Object operations
// HeadObject
bucket.Use(
// -- append CORS headers to a response for
appendCORS(h),
)
bucket.Methods(http.MethodOptions).HandlerFunc(
m.Handle(h.Preflight)).
Name("Options")
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
m.Handle(h.HeadObjectHandler)).
Name("HeadObject")
// CopyObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").Headers(hdrAmzCopySource, "").HandlerFunc(
m.Handle(h.UploadPartCopy)).
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}").
Name("UploadPartCopy")
// PutObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
m.Handle(h.UploadPartHandler)).
Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}").
Name("UploadPart")
// ListParts
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
m.Handle(h.ListPartsHandler)).
Queries("uploadId", "{uploadId:.*}").
Name("ListObjectParts")
// CompleteMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
m.Handle(h.CompleteMultipartUploadHandler)).
Queries("uploadId", "{uploadId:.*}").
Name("CompleteMultipartUpload")
// CreateMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
m.Handle(h.CreateMultipartUploadHandler)).
Queries("uploads", "").
Name("CreateMultipartUpload")
// AbortMultipartUpload
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
m.Handle(h.AbortMultipartUploadHandler)).
Queries("uploadId", "{uploadId:.*}").
Name("AbortMultipartUpload")
// ListMultipartUploads
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.ListMultipartUploadsHandler)).
Queries("uploads", "").
Name("ListMultipartUploads")
// GetObjectACL -- this is a dummy call.
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
m.Handle(h.GetObjectACLHandler)).
Queries("acl", "").
Name("GetObjectACL")
// PutObjectACL -- this is a dummy call.
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
m.Handle(h.PutObjectACLHandler)).
Queries("acl", "").
Name("PutObjectACL")
// GetObjectTagging
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
m.Handle(h.GetObjectTaggingHandler)).
Queries("tagging", "").
Name("GetObjectTagging")
// PutObjectTagging
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
m.Handle(h.PutObjectTaggingHandler)).
Queries("tagging", "").
Name("PutObjectTagging")
// DeleteObjectTagging
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
m.Handle(h.DeleteObjectTaggingHandler)).
Queries("tagging", "").
Name("DeleteObjectTagging")
// SelectObjectContent
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
m.Handle(h.SelectObjectContentHandler)).
Queries("select", "").Queries("select-type", "2").
Name("SelectObjectContent")
// GetObjectRetention
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
m.Handle(h.GetObjectRetentionHandler)).
Queries("retention", "").
Name("GetObjectRetention")
// GetObjectLegalHold
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
m.Handle(h.GetObjectLegalHoldHandler)).
Queries("legal-hold", "").
Name("GetObjectLegalHold")
// GetObjectAttributes
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
m.Handle(h.GetObjectAttributesHandler)).
Queries("attributes", "").
Name("GetObjectAttributes")
// GetObject
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
m.Handle(h.GetObjectHandler)).
Name("GetObject")
// CopyObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").Headers(hdrAmzCopySource, "").HandlerFunc(
m.Handle(h.CopyObjectHandler)).
Name("CopyObject")
// PutObjectRetention
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
m.Handle(h.PutObjectRetentionHandler)).
Queries("retention", "").
Name("PutObjectRetention")
// PutObjectLegalHold
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
m.Handle(h.PutObjectLegalHoldHandler)).
Queries("legal-hold", "").
Name("PutObjectLegalHold")
// PutObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
m.Handle(h.PutObjectHandler)).
Name("PutObject")
// DeleteObject
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
m.Handle(h.DeleteObjectHandler)).
Name("DeleteObject")
// Bucket operations
// GetBucketLocation
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketLocationHandler)).
Queries("location", "").
Name("GetBucketLocation")
// GetBucketPolicy
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketPolicyHandler)).
Queries("policy", "").
Name("GetBucketPolicy")
// GetBucketLifecycle
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketLifecycleHandler)).
Queries("lifecycle", "").
Name("GetBucketLifecycle")
// GetBucketEncryption
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketEncryptionHandler)).
Queries("encryption", "").
Name("GetBucketEncryption")
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketCorsHandler)).
Queries("cors", "").
Name("GetBucketCors")
bucket.Methods(http.MethodPut).HandlerFunc(
m.Handle(h.PutBucketCorsHandler)).
Queries("cors", "").
Name("PutBucketCors")
bucket.Methods(http.MethodDelete).HandlerFunc(
m.Handle(h.DeleteBucketCorsHandler)).
Queries("cors", "").
Name("DeleteBucketCors")
// Dummy Bucket Calls
// GetBucketACL -- this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketACLHandler)).
Queries("acl", "").
Name("GetBucketACL")
// PutBucketACL -- this is a dummy call.
bucket.Methods(http.MethodPut).HandlerFunc(
m.Handle(h.PutBucketACLHandler)).
Queries("acl", "").
Name("PutBucketACL")
// GetBucketWebsiteHandler -- this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketWebsiteHandler)).
Queries("website", "").
Name("GetBucketWebsite")
// GetBucketAccelerateHandler -- this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketAccelerateHandler)).
Queries("accelerate", "").
Name("GetBucketAccelerate")
// GetBucketRequestPaymentHandler -- this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketRequestPaymentHandler)).
Queries("requestPayment", "").
Name("GetBucketRequestPayment")
// GetBucketLoggingHandler -- this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketLoggingHandler)).
Queries("logging", "").
Name("GetBucketLogging")
// GetBucketReplicationHandler -- this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketReplicationHandler)).
Queries("replication", "").
Name("GetBucketReplication")
// GetBucketTaggingHandler
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketTaggingHandler)).
Queries("tagging", "").
Name("GetBucketTagging")
// DeleteBucketWebsiteHandler
bucket.Methods(http.MethodDelete).HandlerFunc(
m.Handle(h.DeleteBucketWebsiteHandler)).
Queries("website", "").
Name("DeleteBucketWebsite")
// DeleteBucketTaggingHandler
bucket.Methods(http.MethodDelete).HandlerFunc(
m.Handle(h.DeleteBucketTaggingHandler)).
Queries("tagging", "").
Name("DeleteBucketTagging")
// GetBucketObjectLockConfig
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketObjectLockConfigHandler)).
Queries("object-lock", "").
Name("GetBucketObjectLockConfig")
// GetBucketVersioning
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketVersioningHandler)).
Queries("versioning", "").
Name("GetBucketVersioning")
// GetBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.GetBucketNotificationHandler)).
Queries("notification", "").
Name("GetBucketNotification")
// ListenBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(h.ListenBucketNotificationHandler).
Queries("events", "{events:.*}").
Name("ListenBucketNotification")
// ListObjectsV2M
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.ListObjectsV2MHandler)).
Queries("list-type", "2", "metadata", "true").
Name("ListObjectsV2M")
// ListObjectsV2
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.ListObjectsV2Handler)).
Queries("list-type", "2").
Name("ListObjectsV2")
// ListBucketVersions
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.ListBucketObjectVersionsHandler)).
Queries("versions", "").
Name("ListBucketVersions")
// ListObjectsV1 (Legacy)
bucket.Methods(http.MethodGet).HandlerFunc(
m.Handle(h.ListObjectsV1Handler)).
Name("ListObjectsV1")
// PutBucketLifecycle
bucket.Methods(http.MethodPut).HandlerFunc(
m.Handle(h.PutBucketLifecycleHandler)).
Queries("lifecycle", "").
Name("PutBucketLifecycle")
// PutBucketEncryption
bucket.Methods(http.MethodPut).HandlerFunc(
m.Handle(h.PutBucketEncryptionHandler)).
Queries("encryption", "").
Name("PutBucketEncryption")
// PutBucketPolicy
bucket.Methods(http.MethodPut).HandlerFunc(
m.Handle(h.PutBucketPolicyHandler)).
Queries("policy", "").
Name("PutBucketPolicy")
// PutBucketObjectLockConfig
bucket.Methods(http.MethodPut).HandlerFunc(
m.Handle(h.PutBucketObjectLockConfigHandler)).
Queries("object-lock", "").
Name("PutBucketObjectLockConfig")
// PutBucketTaggingHandler
bucket.Methods(http.MethodPut).HandlerFunc(
m.Handle(h.PutBucketTaggingHandler)).
Queries("tagging", "").
Name("PutBucketTagging")
// PutBucketVersioning
bucket.Methods(http.MethodPut).HandlerFunc(
m.Handle(h.PutBucketVersioningHandler)).
Queries("versioning", "").
Name("PutBucketVersioning")
// PutBucketNotification
bucket.Methods(http.MethodPut).HandlerFunc(
m.Handle(h.PutBucketNotificationHandler)).
Queries("notification", "").
Name("PutBucketNotification")
// CreateBucket
bucket.Methods(http.MethodPut).HandlerFunc(
m.Handle(h.CreateBucketHandler)).
Name("CreateBucket")
// HeadBucket
bucket.Methods(http.MethodHead).HandlerFunc(
m.Handle(h.HeadBucketHandler)).
Name("HeadBucket")
// PostPolicy
bucket.Methods(http.MethodPost).HeadersRegexp(hdrContentType, "multipart/form-data*").HandlerFunc(
m.Handle(h.PostObject)).
Name("PostObject")
// DeleteMultipleObjects
bucket.Methods(http.MethodPost).HandlerFunc(
m.Handle(h.DeleteMultipleObjectsHandler)).
Queries("delete", "").
Name("DeleteMultipleObjects")
// DeleteBucketPolicy
bucket.Methods(http.MethodDelete).HandlerFunc(
m.Handle(h.DeleteBucketPolicyHandler)).
Queries("policy", "").
Name("DeleteBucketPolicy")
// DeleteBucketLifecycle
bucket.Methods(http.MethodDelete).HandlerFunc(
m.Handle(h.DeleteBucketLifecycleHandler)).
Queries("lifecycle", "").
Name("DeleteBucketLifecycle")
// DeleteBucketEncryption
bucket.Methods(http.MethodDelete).HandlerFunc(
m.Handle(h.DeleteBucketEncryptionHandler)).
Queries("encryption", "").
Name("DeleteBucketEncryption")
// DeleteBucket
bucket.Methods(http.MethodDelete).HandlerFunc(
m.Handle(h.DeleteBucketHandler)).
Name("DeleteBucket")
}
// Root operation
// ListBuckets
api.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
m.Handle(h.ListBucketsHandler)).
Name("ListBuckets")
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
// than failing with UnknownAPIRequest we simply handle it for now.
api.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).HandlerFunc(
m.Handle(h.ListBucketsHandler)).
Name("ListBuckets")
attachErrorHandler(api)
}
func named(name string, handlerFunc http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
reqInfo := s3middleware.GetReqInfo(r.Context())
reqInfo.API = name
handlerFunc.ServeHTTP(w, r)
}
}
// If none of the http routes match respond with appropriate errors.
func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := s3middleware.GetReqInfo(ctx)
desc := fmt.Sprintf("Unknown API request at %s", r.URL.Path)
s3middleware.WriteErrorResponse(w, reqInfo, errors.Error{
Code: "UnknownAPIRequest",
Description: desc,
HTTPStatusCode: http.StatusBadRequest,
})
if log := s3middleware.GetReqLog(ctx); log != nil {
log.Error("request unmatched", zap.String("method", reqInfo.API))
}
}
// attachErrorHandler set NotFoundHandler and MethodNotAllowedHandler for chi.Router.
func attachErrorHandler(api *chi.Mux) {
errorHandler := http.HandlerFunc(errorResponseHandler)
// If none of the routes match, add default error handler routes
api.NotFound(named("NotFound", errorHandler))
api.MethodNotAllowed(named("MethodNotAllowed", errorHandler))
}
func bucketRouter(h Handler, log *zap.Logger) chi.Router {
bktRouter := chi.NewRouter()
bktRouter.Use(
s3middleware.AddBucketName(log),
s3middleware.WrapHandler(h.AppendCORSHeaders),
)
bktRouter.Mount("/", objectRouter(h, log))
bktRouter.Options("/", h.Preflight)
bktRouter.Head("/", named("HeadBucket", h.HeadBucketHandler))
// GET method handlers
bktRouter.Group(func(r chi.Router) {
r.Method(http.MethodGet, "/", NewHandlerFilter().
Add(NewFilter().
Queries("uploads").
Handler(named("ListMultipartUploads", h.ListMultipartUploadsHandler))).
Add(NewFilter().
Queries("location").
Handler(named("GetBucketLocation", h.GetBucketLocationHandler))).
Add(NewFilter().
Queries("policy").
Handler(named("GetBucketPolicy", h.GetBucketPolicyHandler))).
Add(NewFilter().
Queries("lifecycle").
Handler(named("GetBucketLifecycle", h.GetBucketLifecycleHandler))).
Add(NewFilter().
Queries("encryption").
Handler(named("GetBucketEncryption", h.GetBucketEncryptionHandler))).
Add(NewFilter().
Queries("cors").
Handler(named("GetBucketCors", h.GetBucketCorsHandler))).
Add(NewFilter().
Queries("acl").
Handler(named("GetBucketACL", h.GetBucketACLHandler))).
Add(NewFilter().
Queries("website").
Handler(named("GetBucketWebsite", h.GetBucketWebsiteHandler))).
Add(NewFilter().
Queries("accelerate").
Handler(named("GetBucketAccelerate", h.GetBucketAccelerateHandler))).
Add(NewFilter().
Queries("requestPayment").
Handler(named("GetBucketRequestPayment", h.GetBucketRequestPaymentHandler))).
Add(NewFilter().
Queries("logging").
Handler(named("GetBucketLogging", h.GetBucketLoggingHandler))).
Add(NewFilter().
Queries("replication").
Handler(named("GetBucketReplication", h.GetBucketReplicationHandler))).
Add(NewFilter().
Queries("tagging").
Handler(named("GetBucketTagging", h.GetBucketTaggingHandler))).
Add(NewFilter().
Queries("object-lock").
Handler(named("GetBucketObjectLockConfig", h.GetBucketObjectLockConfigHandler))).
Add(NewFilter().
Queries("versioning").
Handler(named("GetBucketVersioning", h.GetBucketVersioningHandler))).
Add(NewFilter().
Queries("notification").
Handler(named("GetBucketNotification", h.GetBucketNotificationHandler))).
Add(NewFilter().
Queries("events").
Handler(named("ListenBucketNotification", h.ListenBucketNotificationHandler))).
Add(NewFilter().
QueriesMatch("list-type", "2", "metadata", "true").
Handler(named("ListObjectsV2M", h.ListObjectsV2MHandler))).
Add(NewFilter().
QueriesMatch("list-type", "2").
Handler(named("ListObjectsV2", h.ListObjectsV2Handler))).
Add(NewFilter().
Queries("versions").
Handler(named("ListBucketObjectVersions", h.ListBucketObjectVersionsHandler))).
DefaultHandler(named("ListObjectsV1", h.ListObjectsV1Handler)))
})
// PUT method handlers
bktRouter.Group(func(r chi.Router) {
r.Method(http.MethodPut, "/", NewHandlerFilter().
Add(NewFilter().
Queries("cors").
Handler(named("PutBucketCors", h.PutBucketCorsHandler))).
Add(NewFilter().
Queries("acl").
Handler(named("PutBucketACL", h.PutBucketACLHandler))).
Add(NewFilter().
Queries("lifecycle").
Handler(named("PutBucketLifecycle", h.PutBucketLifecycleHandler))).
Add(NewFilter().
Queries("encryption").
Handler(named("PutBucketEncryption", h.PutBucketEncryptionHandler))).
Add(NewFilter().
Queries("policy").
Handler(named("PutBucketPolicy", h.PutBucketPolicyHandler))).
Add(NewFilter().
Queries("object-lock").
Handler(named("PutBucketObjectLockConfig", h.PutBucketObjectLockConfigHandler))).
Add(NewFilter().
Queries("tagging").
Handler(named("PutBucketTagging", h.PutBucketTaggingHandler))).
Add(NewFilter().
Queries("versioning").
Handler(named("PutBucketVersioning", h.PutBucketVersioningHandler))).
Add(NewFilter().
Queries("notification").
Handler(named("PutBucketNotification", h.PutBucketNotificationHandler))).
DefaultHandler(named("CreateBucket", h.CreateBucketHandler)))
})
// POST method handlers
bktRouter.Group(func(r chi.Router) {
r.Method(http.MethodPost, "/", NewHandlerFilter().
Add(NewFilter().
Queries("delete").
Handler(named("DeleteMultipleObjects", h.DeleteMultipleObjectsHandler))).
// todo consider add filter to match header for defaultHandler: hdrContentType, "multipart/form-data*"
DefaultHandler(named("PostObject", h.PostObject)))
})
// DELETE method handlers
bktRouter.Group(func(r chi.Router) {
r.Method(http.MethodDelete, "/", NewHandlerFilter().
Add(NewFilter().
Queries("cors").
Handler(named("DeleteBucketCors", h.DeleteBucketCorsHandler))).
Add(NewFilter().
Queries("website").
Handler(named("DeleteBucketWebsite", h.DeleteBucketWebsiteHandler))).
Add(NewFilter().
Queries("tagging").
Handler(named("DeleteBucketTagging", h.DeleteBucketTaggingHandler))).
Add(NewFilter().
Queries("policy").
Handler(named("PutBucketPolicy", h.PutBucketPolicyHandler))).
Add(NewFilter().
Queries("lifecycle").
Handler(named("PutBucketLifecycle", h.PutBucketLifecycleHandler))).
Add(NewFilter().
Queries("encryption").
Handler(named("DeleteBucketEncryption", h.DeleteBucketEncryptionHandler))).
DefaultHandler(named("DeleteBucket", h.DeleteBucketHandler)))
})
attachErrorHandler(bktRouter)
return bktRouter
}
func objectRouter(h Handler, l *zap.Logger) chi.Router {
objRouter := chi.NewRouter()
objRouter.Use(s3middleware.AddObjectName(l))
objRouter.Head("/*", named("HeadObject", h.HeadObjectHandler))
// GET method handlers
objRouter.Group(func(r chi.Router) {
r.Method(http.MethodGet, "/*", NewHandlerFilter().
Add(NewFilter().
Queries("uploadId").
Handler(named("ListParts", h.ListPartsHandler))).
Add(NewFilter().
Queries("acl").
Handler(named("GetObjectACL", h.GetObjectACLHandler))).
Add(NewFilter().
Queries("tagging").
Handler(named("GetObjectTagging", h.GetObjectTaggingHandler))).
Add(NewFilter().
Queries("retention").
Handler(named("GetObjectRetention", h.GetObjectRetentionHandler))).
Add(NewFilter().
Queries("legal-hold").
Handler(named("GetObjectLegalHold", h.GetObjectLegalHoldHandler))).
Add(NewFilter().
Queries("attributes").
Handler(named("GetObjectAttributes", h.GetObjectAttributesHandler))).
DefaultHandler(named("GetObject", h.GetObjectHandler)))
})
// PUT method handlers
objRouter.Group(func(r chi.Router) {
r.Method(http.MethodPut, "/*", NewHandlerFilter().
Add(NewFilter().
Headers(AmzCopySource).
Queries("partNumber", "uploadId").
Handler(named("UploadPartCopy", h.UploadPartCopy))).
Add(NewFilter().
Queries("partNumber", "uploadId").
Handler(named("UploadPart", h.UploadPartHandler))).
Add(NewFilter().
Queries("acl").
Handler(named("PutObjectACL", h.PutObjectACLHandler))).
Add(NewFilter().
Queries("tagging").
Handler(named("PutObjectTagging", h.PutObjectTaggingHandler))).
Add(NewFilter().
Headers(AmzCopySource).
Handler(named("CopyObject", h.CopyObjectHandler))).
Add(NewFilter().
Queries("retention").
Handler(named("PutObjectRetention", h.PutObjectRetentionHandler))).
Add(NewFilter().
Queries("legal-hold").
Handler(named("PutObjectLegalHold", h.PutObjectLegalHoldHandler))).
DefaultHandler(named("PutObject", h.PutObjectHandler)))
})
// POST method handlers
objRouter.Group(func(r chi.Router) {
r.Method(http.MethodPost, "/*", NewHandlerFilter().
Add(NewFilter().
Queries("uploadId").
Handler(named("CompleteMultipartUpload", h.CompleteMultipartUploadHandler))).
Add(NewFilter().
Queries("uploads").
Handler(named("CreateMultipartUpload", h.CreateMultipartUploadHandler))).
DefaultHandler(named("SelectObjectContent", h.SelectObjectContentHandler)))
})
// DELETE method handlers
objRouter.Group(func(r chi.Router) {
r.Method(http.MethodDelete, "/*", NewHandlerFilter().
Add(NewFilter().
Queries("uploadId").
Handler(named("AbortMultipartUpload", h.AbortMultipartUploadHandler))).
Add(NewFilter().
Queries("tagging").
Handler(named("DeleteObjectTagging", h.DeleteObjectTaggingHandler))).
DefaultHandler(named("DeleteObject", h.DeleteObjectHandler)))
})
attachErrorHandler(objRouter)
return objRouter
}

141
api/router_filter.go Normal file
View file

@ -0,0 +1,141 @@
package api
import (
"fmt"
"net/http"
)
type HandlerFilters struct {
filters []Filter
defaultHandler http.Handler
}
type Filter struct {
queries []Pair
headers []Pair
h http.Handler
}
type Pair struct {
Key string
Value string
}
func NewHandlerFilter() *HandlerFilters {
return &HandlerFilters{}
}
func NewFilter() *Filter {
return &Filter{}
}
func (hf *HandlerFilters) Add(filter *Filter) *HandlerFilters {
hf.filters = append(hf.filters, *filter)
return hf
}
// HeadersMatch adds a matcher for header values.
// It accepts a sequence of key/value pairs. Values may define variables.
// Panics if number of parameters is not even.
// Supports only exact matching.
// If the value is an empty string, it will match any value if the key is set.
func (f *Filter) HeadersMatch(pairs ...string) *Filter {
length := len(pairs)
if length%2 != 0 {
panic(fmt.Errorf("filter headers: number of parameters must be multiple of 2, got %v", pairs))
}
for i := 0; i < length; i += 2 {
f.headers = append(f.headers, Pair{
Key: pairs[i],
Value: pairs[i+1],
})
}
return f
}
// Headers is similar to HeadersMatch but accept only header keys, set value to empty string internally.
func (f *Filter) Headers(headers ...string) *Filter {
for _, header := range headers {
f.headers = append(f.headers, Pair{
Key: header,
Value: "",
})
}
return f
}
func (f *Filter) Handler(handler http.HandlerFunc) *Filter {
f.h = handler
return f
}
// QueriesMatch adds a matcher for URL query values.
// It accepts a sequence of key/value pairs. Values may define variables.
// Panics if number of parameters is not even.
// Supports only exact matching.
// If the value is an empty string, it will match any value if the key is set.
func (f *Filter) QueriesMatch(pairs ...string) *Filter {
length := len(pairs)
if length%2 != 0 {
panic(fmt.Errorf("filter headers: number of parameters must be multiple of 2, got %v", pairs))
}
for i := 0; i < length; i += 2 {
f.queries = append(f.queries, Pair{
Key: pairs[i],
Value: pairs[i+1],
})
}
return f
}
// Queries is similar to QueriesMatch but accept only query keys, set value to empty string internally.
func (f *Filter) Queries(queries ...string) *Filter {
for _, query := range queries {
f.queries = append(f.queries, Pair{
Key: query,
Value: "",
})
}
return f
}
func (hf *HandlerFilters) DefaultHandler(handler http.HandlerFunc) *HandlerFilters {
hf.defaultHandler = handler
return hf
}
func (hf *HandlerFilters) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if handler := hf.match(r); handler != nil {
handler.ServeHTTP(w, r)
return
}
hf.defaultHandler.ServeHTTP(w, r)
}
func (hf *HandlerFilters) match(r *http.Request) http.Handler {
LOOP:
for _, filter := range hf.filters {
for _, header := range filter.headers {
hdrVals := r.Header.Values(header.Key)
if len(hdrVals) == 0 || header.Value != "" && header.Value != hdrVals[0] {
continue LOOP
}
}
for _, query := range filter.queries {
queryVal := r.URL.Query().Get(query.Key)
if !r.URL.Query().Has(query.Key) || query.Value != "" && query.Value != queryVal {
continue LOOP
}
}
return filter.h
}
return nil
}

68
api/router_filter_test.go Normal file
View file

@ -0,0 +1,68 @@
package api
import (
"net/http"
"net/url"
"testing"
"github.com/stretchr/testify/require"
)
func TestFilter(t *testing.T) {
key1, val1 := "key1", "val1"
key2, val2 := "key2", "val2"
key3, val3 := "key3", "val3"
anyVal := ""
notNilHandler := http.HandlerFunc(func(http.ResponseWriter, *http.Request) {})
t.Run("queries", func(t *testing.T) {
f := NewHandlerFilter().
Add(NewFilter().
QueriesMatch(key1, val1, key2, anyVal).
Queries(key3).
Handler(notNilHandler))
r, err := http.NewRequest(http.MethodGet, "https://localhost:8084", nil)
require.NoError(t, err)
query := make(url.Values)
query.Set(key1, val1)
query.Set(key2, val2)
query.Set(key3, val3)
r.URL.RawQuery = query.Encode()
h := f.match(r)
require.NotNil(t, h)
query.Set(key1, val2)
r.URL.RawQuery = query.Encode()
h = f.match(r)
require.Nil(t, h)
})
t.Run("headers", func(t *testing.T) {
f := NewHandlerFilter().
Add(NewFilter().
HeadersMatch(key1, val1, key2, anyVal).
Headers(key3).
Handler(notNilHandler))
r, err := http.NewRequest(http.MethodGet, "https://localhost:8084", nil)
require.NoError(t, err)
r.Header.Set(key1, val1)
r.Header.Set(key2, val2)
r.Header.Set(key3, val3)
h := f.match(r)
require.NotNil(t, h)
r.Header.Set(key1, val2)
h = f.match(r)
require.Nil(t, h)
})
}

389
api/router_mock_test.go Normal file
View file

@ -0,0 +1,389 @@
package api
import (
"context"
"encoding/json"
"net/http"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"github.com/stretchr/testify/require"
)
type centerMock struct {
}
func (c *centerMock) Authenticate(*http.Request) (*auth.Box, error) {
return &auth.Box{}, nil
}
type handlerMock struct {
t *testing.T
}
type handlerResult struct {
Method string
ReqInfo *middleware.ReqInfo
}
func (h *handlerMock) HeadObjectHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetObjectACLHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutObjectACLHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetObjectTaggingHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutObjectTaggingHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) DeleteObjectTaggingHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) SelectObjectContentHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetObjectRetentionHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetObjectLegalHoldHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetObjectHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetObjectAttributesHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) CopyObjectHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutObjectRetentionHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutObjectLegalHoldHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
res := &handlerResult{
Method: "PutObject",
ReqInfo: middleware.GetReqInfo(r.Context()),
}
h.writeResponse(w, res)
}
func (h *handlerMock) DeleteObjectHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketLocationHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketPolicyHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketLifecycleHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketEncryptionHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketACLHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutBucketACLHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketCorsHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutBucketCorsHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) DeleteBucketCorsHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketWebsiteHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketAccelerateHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketRequestPaymentHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketLoggingHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketReplicationHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketTaggingHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) DeleteBucketWebsiteHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) DeleteBucketTaggingHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketObjectLockConfigHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketVersioningHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) GetBucketNotificationHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) ListenBucketNotificationHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) ListObjectsV2MHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
res := &handlerResult{
Method: "ListObjectsV2",
ReqInfo: middleware.GetReqInfo(r.Context()),
}
h.writeResponse(w, res)
}
func (h *handlerMock) ListBucketObjectVersionsHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
res := &handlerResult{
Method: "ListObjectsV1",
ReqInfo: middleware.GetReqInfo(r.Context()),
}
h.writeResponse(w, res)
}
func (h *handlerMock) PutBucketLifecycleHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutBucketEncryptionHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutBucketPolicyHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutBucketObjectLockConfigHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutBucketTaggingHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutBucketVersioningHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PutBucketNotificationHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) CreateBucketHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) HeadBucketHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) PostObject(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) DeleteMultipleObjectsHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) DeleteBucketPolicyHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) DeleteBucketLifecycleHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) DeleteBucketEncryptionHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) DeleteBucketHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) ListBucketsHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) Preflight(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) AppendCORSHeaders(http.ResponseWriter, *http.Request) {
}
func (h *handlerMock) CreateMultipartUploadHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
res := &handlerResult{
Method: "UploadPart",
ReqInfo: middleware.GetReqInfo(r.Context()),
}
h.writeResponse(w, res)
}
func (h *handlerMock) UploadPartCopy(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) CompleteMultipartUploadHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) AbortMultipartUploadHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) ListPartsHandler(http.ResponseWriter, *http.Request) {
//TODO implement me
panic("implement me")
}
func (h *handlerMock) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
res := &handlerResult{
Method: "ListMultipartUploads",
ReqInfo: middleware.GetReqInfo(r.Context()),
}
h.writeResponse(w, res)
}
func (h *handlerMock) ResolveBucket(context.Context, string) (*data.BucketInfo, error) {
return &data.BucketInfo{}, nil
}
func (h *handlerMock) writeResponse(w http.ResponseWriter, resp *handlerResult) {
respData, err := json.Marshal(resp)
require.NoError(h.t, err)
_, err = w.Write(respData)
require.NoError(h.t, err)
}

89
api/router_test.go Normal file
View file

@ -0,0 +1,89 @@
package api
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
func TestRouterUploadPart(t *testing.T) {
chiRouter := prepareRouter(t)
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, "/dkirillov/fix-object", nil)
query := make(url.Values)
query.Set("uploadId", "some-id")
query.Set("partNumber", "1")
r.URL.RawQuery = query.Encode()
chiRouter.ServeHTTP(w, r)
resp := readResponse(t, w)
require.Equal(t, "UploadPart", resp.Method)
}
func TestRouterListMultipartUploads(t *testing.T) {
chiRouter := prepareRouter(t)
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodGet, "/test-bucket", nil)
query := make(url.Values)
query.Set("uploads", "")
r.URL.RawQuery = query.Encode()
chiRouter.ServeHTTP(w, r)
resp := readResponse(t, w)
require.Equal(t, "ListMultipartUploads", resp.Method)
}
func TestRouterObjectWithSlashes(t *testing.T) {
chiRouter := prepareRouter(t)
bktName, objName := "dkirillov", "/fix/object"
target := fmt.Sprintf("/%s/%s", bktName, objName)
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, target, nil)
chiRouter.ServeHTTP(w, r)
resp := readResponse(t, w)
require.Equal(t, "PutObject", resp.Method)
require.Equal(t, objName, resp.ReqInfo.ObjectName)
}
func prepareRouter(t *testing.T) *chi.Mux {
throttleOps := middleware.ThrottleOpts{
Limit: 10,
BacklogTimeout: 30 * time.Second,
}
handleMock := &handlerMock{t: t}
cntrMock := &centerMock{}
log := zaptest.NewLogger(t)
metric := &metrics.AppMetrics{}
chiRouter := chi.NewRouter()
AttachChi(chiRouter, nil, throttleOps, handleMock, cntrMock, log, metric)
return chiRouter
}
func readResponse(t *testing.T, w *httptest.ResponseRecorder) handlerResult {
var res handlerResult
resData, err := io.ReadAll(w.Result().Body)
require.NoError(t, err)
err = json.Unmarshal(resData, &res)
require.NoErrorf(t, err, "actual body: '%s'", string(resData))
return res
}

View file

@ -11,6 +11,8 @@ import (
"os"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
sessionv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
@ -105,7 +107,21 @@ type (
Lifetime time.Duration
AwsCliCredentialsFile string
ContainerPolicies ContainerPolicies
UpdateCreds *UpdateOptions
}
// UpdateSecretOptions contains options for passing to Agent.UpdateSecret method.
UpdateSecretOptions struct {
FrostFSKey *keys.PrivateKey
GatesPublicKeys []*keys.PublicKey
Address oid.Address
GatePrivateKey *keys.PrivateKey
}
tokenUpdateOptions struct {
frostFSKey *keys.PrivateKey
gatesPublicKeys []*keys.PublicKey
lifetime lifetimeOptions
box *accessbox.Box
}
// ContainerOptions groups parameters of auth container to put the secret into.
@ -136,8 +152,8 @@ type lifetimeOptions struct {
type (
issuingResult struct {
AccessKeyID string `json:"access_key_id"`
InitialAccessKeyID string `json:"initial_access_key_id"`
AccessKeyID string `json:"access_key_id"`
SecretAccessKey string `json:"secret_access_key"`
OwnerPrivateKey string `json:"owner_private_key"`
WalletPublicKey string `json:"wallet_public_key"`
@ -237,12 +253,7 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
return fmt.Errorf("create tokens: %w", err)
}
var secret []byte
if options.UpdateCreds != nil {
secret = options.UpdateCreds.SecretAccessKey
}
box, secrets, err := accessbox.PackTokens(gatesData, secret)
box, secrets, err := accessbox.PackTokens(gatesData, nil)
if err != nil {
return fmt.Errorf("pack tokens: %w", err)
}
@ -261,24 +272,15 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
creds := tokens.New(a.frostFS, secrets.EphemeralKey, cache.DefaultAccessBoxConfig(a.log))
var addr oid.Address
var oldAddr oid.Address
if options.UpdateCreds != nil {
oldAddr = options.UpdateCreds.Address
addr, err = creds.Update(ctx, oldAddr, idOwner, box, lifetime.Exp, options.GatesPublicKeys...)
} else {
addr, err = creds.Put(ctx, id, idOwner, box, lifetime.Exp, options.GatesPublicKeys...)
oldAddr = addr
}
addr, err := creds.Put(ctx, id, idOwner, box, lifetime.Exp, options.GatesPublicKeys...)
if err != nil {
return fmt.Errorf("failed to put creds: %w", err)
}
accessKeyID := addr.Container().EncodeToString() + "0" + addr.Object().EncodeToString()
accessKeyID := accessKeyIDFromAddr(addr)
ir := &issuingResult{
InitialAccessKeyID: accessKeyID,
AccessKeyID: accessKeyID,
InitialAccessKeyID: oldAddr.Container().EncodeToString() + "0" + oldAddr.Object().EncodeToString(),
SecretAccessKey: secrets.AccessKey,
OwnerPrivateKey: hex.EncodeToString(secrets.EphemeralKey.Bytes()),
WalletPublicKey: hex.EncodeToString(options.FrostFSKey.PublicKey().Bytes()),
@ -309,6 +311,73 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
return nil
}
// UpdateSecret updates an auth token (change list of gates that can use credential), puts new cred version to the FrostFS network and writes to io.Writer a result.
func (a *Agent) UpdateSecret(ctx context.Context, w io.Writer, options *UpdateSecretOptions) error {
creds := tokens.New(a.frostFS, options.GatePrivateKey, cache.DefaultAccessBoxConfig(a.log))
box, err := creds.GetBox(ctx, options.Address)
if err != nil {
return fmt.Errorf("get accessbox: %w", err)
}
secret, err := hex.DecodeString(box.Gate.AccessKey)
if err != nil {
return fmt.Errorf("failed to decode secret key access box: %w", err)
}
lifetime := getLifetimeFromGateData(box.Gate)
tokenOptions := tokenUpdateOptions{
frostFSKey: options.FrostFSKey,
gatesPublicKeys: options.GatesPublicKeys,
lifetime: lifetime,
box: box,
}
gatesData, err := formTokensToUpdate(tokenOptions)
if err != nil {
return fmt.Errorf("create tokens: %w", err)
}
updatedBox, secrets, err := accessbox.PackTokens(gatesData, secret)
if err != nil {
return fmt.Errorf("pack tokens: %w", err)
}
var idOwner user.ID
user.IDFromKey(&idOwner, options.FrostFSKey.PrivateKey.PublicKey)
a.log.Info("update access cred object into FrostFS",
zap.Stringer("owner_tkn", idOwner))
oldAddr := options.Address
addr, err := creds.Update(ctx, oldAddr, idOwner, updatedBox, lifetime.Exp, options.GatesPublicKeys...)
if err != nil {
return fmt.Errorf("failed to update creds: %w", err)
}
ir := &issuingResult{
AccessKeyID: accessKeyIDFromAddr(addr),
InitialAccessKeyID: accessKeyIDFromAddr(oldAddr),
SecretAccessKey: secrets.AccessKey,
OwnerPrivateKey: hex.EncodeToString(secrets.EphemeralKey.Bytes()),
WalletPublicKey: hex.EncodeToString(options.FrostFSKey.PublicKey().Bytes()),
ContainerID: addr.Container().EncodeToString(),
}
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(ir)
}
func getLifetimeFromGateData(gateData *accessbox.GateData) lifetimeOptions {
var btokenv2 acl.BearerToken
gateData.BearerToken.WriteToV2(&btokenv2)
return lifetimeOptions{
Iat: btokenv2.GetBody().GetLifetime().GetIat(),
Exp: btokenv2.GetBody().GetLifetime().GetExp(),
}
}
// ObtainSecret receives an existing secret access key from FrostFS and
// writes to io.Writer the secret access key.
func (a *Agent) ObtainSecret(ctx context.Context, w io.Writer, options *ObtainSecretOptions) error {
@ -404,7 +473,9 @@ func buildBearerTokens(key *keys.PrivateKey, impersonate bool, table *eacl.Table
func buildSessionToken(key *keys.PrivateKey, lifetime lifetimeOptions, ctx sessionTokenContext, gateKey *keys.PublicKey) (*session.Container, error) {
tok := new(session.Container)
tok.ForVerb(ctx.verb)
tok.AppliedTo(ctx.containerID)
if !ctx.containerID.Equals(cid.ID{}) {
tok.ApplyOnlyTo(ctx.containerID)
}
tok.SetID(uuid.New())
tok.SetAuthKey((*frostfsecdsa.PublicKey)(gateKey))
@ -465,3 +536,55 @@ func createTokens(options *IssueSecretOptions, lifetime lifetimeOptions) ([]*acc
return gates, nil
}
func formTokensToUpdate(options tokenUpdateOptions) ([]*accessbox.GateData, error) {
btoken := options.box.Gate.BearerToken
table := btoken.EACLTable()
bearerTokens, err := buildBearerTokens(options.frostFSKey, btoken.Impersonate(), &table, options.lifetime, options.gatesPublicKeys)
if err != nil {
return nil, fmt.Errorf("failed to build bearer tokens: %w", err)
}
gates := make([]*accessbox.GateData, len(options.gatesPublicKeys))
for i, gateKey := range options.gatesPublicKeys {
gates[i] = accessbox.NewGateData(gateKey, bearerTokens[i])
}
sessionRules := make([]sessionTokenContext, len(options.box.Gate.SessionTokens))
for i, token := range options.box.Gate.SessionTokens {
var stoken sessionv2.Token
token.WriteToV2(&stoken)
sessionCtx, ok := stoken.GetBody().GetContext().(*sessionv2.ContainerSessionContext)
if !ok {
return nil, fmt.Errorf("get context from session token: %w", err)
}
var cnrID cid.ID
if cnrIDv2 := sessionCtx.ContainerID(); cnrIDv2 != nil {
if err = cnrID.ReadFromV2(*cnrIDv2); err != nil {
return nil, fmt.Errorf("read from v2 container id: %w", err)
}
}
sessionRules[i] = sessionTokenContext{
verb: session.ContainerVerb(sessionCtx.Verb()),
containerID: cnrID,
}
}
sessionTokens, err := buildSessionTokens(options.frostFSKey, options.lifetime, sessionRules, options.gatesPublicKeys)
if err != nil {
return nil, fmt.Errorf("failed to biuild session token: %w", err)
}
for i, sessionTkns := range sessionTokens {
gates[i].SessionTokens = sessionTkns
}
return gates, nil
}
func accessKeyIDFromAddr(addr oid.Address) string {
return addr.Container().EncodeToString() + "0" + addr.Object().EncodeToString()
}

View file

@ -2,758 +2,19 @@ package main
import (
"context"
"crypto/ecdsa"
"encoding/hex"
"encoding/json"
"fmt"
"os"
"os/signal"
"runtime"
"strings"
"syscall"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/spf13/viper"
"github.com/urfave/cli/v2"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/cmd/s3-authmate/modules"
)
const (
poolDialTimeout = 5 * time.Second
poolHealthcheckTimeout = 5 * time.Second
poolRebalanceInterval = 30 * time.Second
poolStreamTimeout = 10 * time.Second
// a month.
defaultLifetime = 30 * 24 * time.Hour
defaultPresignedLifetime = 12 * time.Hour
)
type PoolConfig struct {
Key *ecdsa.PrivateKey
Address string
DialTimeout time.Duration
HealthcheckTimeout time.Duration
StreamTimeout time.Duration
RebalanceInterval time.Duration
}
var (
walletPathFlag string
accountAddressFlag string
peerAddressFlag string
eaclRulesFlag string
disableImpersonateFlag bool
gateWalletPathFlag string
gateAccountAddressFlag string
accessKeyIDFlag string
containerIDFlag string
containerFriendlyName string
containerPlacementPolicy string
gatesPublicKeysFlag cli.StringSlice
logEnabledFlag bool
logDebugEnabledFlag bool
sessionTokenFlag string
lifetimeFlag time.Duration
endpointFlag string
bucketFlag string
objectFlag string
methodFlag string
profileFlag string
regionFlag string
secretAccessKeyFlag string
containerPolicies string
awcCliCredFile string
timeoutFlag time.Duration
// pool timeouts flag.
poolDialTimeoutFlag time.Duration
poolHealthcheckTimeoutFlag time.Duration
poolRebalanceIntervalFlag time.Duration
poolStreamTimeoutFlag time.Duration
)
const (
envWalletPassphrase = "wallet.passphrase"
envWalletGatePassphrase = "wallet.gate.passphrase"
envSecretAccessKey = "secret.access.key"
)
var zapConfig = zap.Config{
Development: true,
Encoding: "console",
Level: zap.NewAtomicLevelAt(zapcore.FatalLevel),
OutputPaths: []string{"stdout"},
EncoderConfig: zapcore.EncoderConfig{
MessageKey: "message",
LevelKey: "level",
EncodeLevel: zapcore.CapitalLevelEncoder,
TimeKey: "time",
EncodeTime: zapcore.ISO8601TimeEncoder,
CallerKey: "caller",
EncodeCaller: zapcore.ShortCallerEncoder,
},
}
func prepare() (context.Context, *zap.Logger) {
var (
err error
log = zap.NewNop()
ctx, _ = signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
)
if !logEnabledFlag {
return ctx, log
} else if logDebugEnabledFlag {
zapConfig.Level = zap.NewAtomicLevelAt(zapcore.DebugLevel)
}
if log, err = zapConfig.Build(); err != nil {
panic(fmt.Errorf("create logger: %w", err))
}
return ctx, log
}
func main() {
app := &cli.App{
Name: "FrostFS S3 Authmate",
Usage: "Helps manage delegated access via gates to data stored in FrostFS network",
Version: version.Version,
Flags: appFlags(),
Commands: appCommands(),
}
cli.VersionPrinter = func(c *cli.Context) {
fmt.Printf("%s\nVersion: %s\nGoVersion: %s\n", c.App.Name, c.App.Version, runtime.Version())
}
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
viper.AutomaticEnv()
viper.SetEnvPrefix("AUTHMATE")
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AllowEmptyEnv(true)
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintf(os.Stderr, "error: %s\n", err)
os.Exit(100)
if cmd, err := modules.Execute(ctx); err != nil {
cmd.PrintErrln("Error:", err.Error())
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
os.Exit(1)
}
}
func appFlags() []cli.Flag {
return []cli.Flag{
&cli.BoolFlag{
Name: "with-log",
Usage: "Enable logger",
Destination: &logEnabledFlag,
},
&cli.BoolFlag{
Name: "debug",
Usage: "Enable debug logger level",
Destination: &logDebugEnabledFlag,
},
&cli.DurationFlag{
Name: "timeout",
Usage: "timeout of processing of the command, for example 2m " +
"(note: max time unit is an hour so to set a day you should use 24h)",
Destination: &timeoutFlag,
Value: 1 * time.Minute,
},
}
}
func appCommands() []*cli.Command {
return []*cli.Command{
issueSecret(),
obtainSecret(),
generatePresignedURL(),
}
}
func issueSecret() *cli.Command {
return &cli.Command{
Name: "issue-secret",
Usage: "Issue a secret in FrostFS network",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "wallet",
Value: "",
Usage: "path to the wallet",
Required: true,
Destination: &walletPathFlag,
},
&cli.StringFlag{
Name: "address",
Value: "",
Usage: "address of wallet account",
Required: false,
Destination: &accountAddressFlag,
},
&cli.StringFlag{
Name: "peer",
Value: "",
Usage: "address of a frostfs peer to connect to",
Required: true,
Destination: &peerAddressFlag,
},
&cli.StringFlag{
Name: "bearer-rules",
Usage: "rules for bearer token (filepath or a plain json string are allowed, can be used only with --disable-impersonate)",
Required: false,
Destination: &eaclRulesFlag,
},
&cli.BoolFlag{
Name: "disable-impersonate",
Usage: "mark token as not impersonate to don't consider token signer as request owner (must be provided to use --bearer-rules flag)",
Required: false,
Destination: &disableImpersonateFlag,
},
&cli.StringSliceFlag{
Name: "gate-public-key",
Usage: "public 256r1 key of a gate (use flags repeatedly for multiple gates)",
Required: true,
Destination: &gatesPublicKeysFlag,
},
&cli.StringFlag{
Name: "container-id",
Usage: "auth container id to put the secret into",
Required: false,
Destination: &containerIDFlag,
},
&cli.StringFlag{
Name: "access-key-id",
Usage: "access key id for s3 (use this flag to update existing creds, if this flag is provided '--container-id', '--container-friendly-name' and '--container-placement-policy' are ineffective)",
Required: false,
Destination: &accessKeyIDFlag,
},
&cli.StringFlag{
Name: "container-friendly-name",
Usage: "friendly name of auth container to put the secret into",
Required: false,
Destination: &containerFriendlyName,
},
&cli.StringFlag{
Name: "container-placement-policy",
Usage: "placement policy of auth container to put the secret into",
Required: false,
Destination: &containerPlacementPolicy,
Value: "REP 2 IN X CBF 3 SELECT 2 FROM * AS X",
},
&cli.StringFlag{
Name: "session-tokens",
Usage: "create session tokens with rules, if the rules are set as 'none', no session tokens will be created",
Required: false,
Destination: &sessionTokenFlag,
Value: "",
},
&cli.DurationFlag{
Name: "lifetime",
Usage: `Lifetime of tokens. For example 50h30m (note: max time unit is an hour so to set a day you should use 24h).
It will be ceil rounded to the nearest amount of epoch.`,
Required: false,
Destination: &lifetimeFlag,
Value: defaultLifetime,
},
&cli.StringFlag{
Name: "container-policy",
Usage: "mapping AWS storage class to FrostFS storage policy as plain json string or path to json file",
Required: false,
Destination: &containerPolicies,
},
&cli.StringFlag{
Name: "aws-cli-credentials",
Usage: "path to the aws cli credential file",
Required: false,
Destination: &awcCliCredFile,
},
&cli.DurationFlag{
Name: "pool-dial-timeout",
Usage: `Timeout for connection to the node in pool to be established`,
Required: false,
Destination: &poolDialTimeoutFlag,
Value: poolDialTimeout,
},
&cli.DurationFlag{
Name: "pool-healthcheck-timeout",
Usage: `Timeout for request to node to decide if it is alive`,
Required: false,
Destination: &poolHealthcheckTimeoutFlag,
Value: poolHealthcheckTimeout,
},
&cli.DurationFlag{
Name: "pool-rebalance-interval",
Usage: `Interval for updating nodes health status`,
Required: false,
Destination: &poolRebalanceIntervalFlag,
Value: poolRebalanceInterval,
},
&cli.DurationFlag{
Name: "pool-stream-timeout",
Usage: `Timeout for individual operation in streaming RPC`,
Required: false,
Destination: &poolStreamTimeoutFlag,
Value: poolStreamTimeout,
},
},
Action: func(c *cli.Context) error {
ctx, log := prepare()
password := wallet.GetPassword(viper.GetViper(), envWalletPassphrase)
key, err := wallet.GetKeyFromPath(walletPathFlag, accountAddressFlag, password)
if err != nil {
return cli.Exit(fmt.Sprintf("failed to load frostfs private key: %s", err), 1)
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
poolCfg := PoolConfig{
Key: &key.PrivateKey,
Address: peerAddressFlag,
DialTimeout: poolDialTimeoutFlag,
HealthcheckTimeout: poolHealthcheckTimeoutFlag,
StreamTimeout: poolStreamTimeoutFlag,
RebalanceInterval: poolRebalanceIntervalFlag,
}
frostFS, err := createFrostFS(ctx, log, poolCfg)
if err != nil {
return cli.Exit(fmt.Sprintf("failed to create FrostFS component: %s", err), 2)
}
agent := authmate.New(log, frostFS)
var containerID cid.ID
if len(containerIDFlag) > 0 {
if err = containerID.DecodeString(containerIDFlag); err != nil {
return cli.Exit(fmt.Sprintf("failed to parse auth container id: %s", err), 3)
}
}
var credsToUpdate *authmate.UpdateOptions
if len(accessKeyIDFlag) > 0 {
secretAccessKeyStr := wallet.GetPassword(viper.GetViper(), envSecretAccessKey)
if secretAccessKeyStr == nil {
return fmt.Errorf("you must provide AUTHMATE_SECRET_ACCESS_KEY env to update existing creds")
}
secretAccessKey, err := hex.DecodeString(*secretAccessKeyStr)
if err != nil {
return fmt.Errorf("access key must be hex encoded")
}
var addr oid.Address
credAddr := strings.Replace(accessKeyIDFlag, "0", "/", 1)
if err = addr.DecodeString(credAddr); err != nil {
return fmt.Errorf("failed to parse creds address: %w", err)
}
// we can create new creds version only in the same container
containerID = addr.Container()
credsToUpdate = &authmate.UpdateOptions{
Address: addr,
SecretAccessKey: secretAccessKey,
}
}
var gatesPublicKeys []*keys.PublicKey
for _, key := range gatesPublicKeysFlag.Value() {
gpk, err := keys.NewPublicKeyFromString(key)
if err != nil {
return cli.Exit(fmt.Sprintf("failed to load gate's public key: %s", err), 4)
}
gatesPublicKeys = append(gatesPublicKeys, gpk)
}
if lifetimeFlag <= 0 {
return cli.Exit(fmt.Sprintf("lifetime must be greater 0, current value: %d", lifetimeFlag), 5)
}
policies, err := parsePolicies(containerPolicies)
if err != nil {
return cli.Exit(fmt.Sprintf("couldn't parse container policy: %s", err.Error()), 6)
}
if !disableImpersonateFlag && eaclRulesFlag != "" {
return cli.Exit("--bearer-rules flag can be used only with --disable-impersonate", 6)
}
bearerRules, err := getJSONRules(eaclRulesFlag)
if err != nil {
return cli.Exit(fmt.Sprintf("couldn't parse 'bearer-rules' flag: %s", err.Error()), 7)
}
sessionRules, skipSessionRules, err := getSessionRules(sessionTokenFlag)
if err != nil {
return cli.Exit(fmt.Sprintf("couldn't parse 'session-tokens' flag: %s", err.Error()), 8)
}
issueSecretOptions := &authmate.IssueSecretOptions{
Container: authmate.ContainerOptions{
ID: containerID,
FriendlyName: containerFriendlyName,
PlacementPolicy: containerPlacementPolicy,
},
FrostFSKey: key,
GatesPublicKeys: gatesPublicKeys,
EACLRules: bearerRules,
Impersonate: !disableImpersonateFlag,
SessionTokenRules: sessionRules,
SkipSessionRules: skipSessionRules,
ContainerPolicies: policies,
Lifetime: lifetimeFlag,
AwsCliCredentialsFile: awcCliCredFile,
UpdateCreds: credsToUpdate,
}
var tcancel context.CancelFunc
ctx, tcancel = context.WithTimeout(ctx, timeoutFlag)
defer tcancel()
if err = agent.IssueSecret(ctx, os.Stdout, issueSecretOptions); err != nil {
return cli.Exit(fmt.Sprintf("failed to issue secret: %s", err), 7)
}
return nil
},
}
}
func generatePresignedURL() *cli.Command {
return &cli.Command{
Name: "generate-presigned-url",
Description: `Generate presigned url using AWS credentials. Credentials must be placed in ~/.aws/credentials.
You provide profile to load using --profile flag or explicitly provide credentials and region using
--aws-access-key-id, --aws-secret-access-key, --region.
Note to override credentials you must provide both access key and secret key.`,
Usage: "generate-presigned-url --endpoint http://s3.frostfs.devenv:8080 --bucket bucket-name --object object-name --method get --profile aws-profile",
Flags: []cli.Flag{
&cli.DurationFlag{
Name: "lifetime",
Usage: `Lifetime of presigned URL. For example 50h30m (note: max time unit is an hour so to set a day you should use 24h).
It will be ceil rounded to the nearest amount of epoch.`,
Required: false,
Destination: &lifetimeFlag,
Value: defaultPresignedLifetime,
},
&cli.StringFlag{
Name: "endpoint",
Usage: `Endpoint of s3-gw`,
Required: true,
Destination: &endpointFlag,
},
&cli.StringFlag{
Name: "bucket",
Usage: `Bucket name to perform action`,
Required: true,
Destination: &bucketFlag,
},
&cli.StringFlag{
Name: "object",
Usage: `Object name to perform action`,
Required: true,
Destination: &objectFlag,
},
&cli.StringFlag{
Name: "method",
Usage: `HTTP method to perform action`,
Required: true,
Destination: &methodFlag,
},
&cli.StringFlag{
Name: "profile",
Usage: `AWS profile to load`,
Required: false,
Destination: &profileFlag,
},
&cli.StringFlag{
Name: "region",
Usage: `AWS region to use in signature (default is taken from ~/.aws/config)`,
Required: false,
Destination: &regionFlag,
},
&cli.StringFlag{
Name: "aws-access-key-id",
Usage: `AWS access key id to sign the URL (default is taken from ~/.aws/credentials)`,
Required: false,
Destination: &accessKeyIDFlag,
},
&cli.StringFlag{
Name: "aws-secret-access-key",
Usage: `AWS access secret access key to sign the URL (default is taken from ~/.aws/credentials)`,
Required: false,
Destination: &secretAccessKeyFlag,
},
},
Action: func(c *cli.Context) error {
var cfg aws.Config
if regionFlag != "" {
cfg.Region = &regionFlag
}
if accessKeyIDFlag != "" && secretAccessKeyFlag != "" {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(credentials.Value{
AccessKeyID: accessKeyIDFlag,
SecretAccessKey: secretAccessKeyFlag,
})
}
sess, err := session.NewSessionWithOptions(session.Options{
Config: cfg,
Profile: profileFlag,
SharedConfigState: session.SharedConfigEnable,
})
if err != nil {
return fmt.Errorf("couldn't get credentials: %w", err)
}
reqData := auth.RequestData{
Method: methodFlag,
Endpoint: endpointFlag,
Bucket: bucketFlag,
Object: objectFlag,
}
presignData := auth.PresignData{
Service: "s3",
Region: *sess.Config.Region,
Lifetime: lifetimeFlag,
SignTime: time.Now().UTC(),
}
req, err := auth.PresignRequest(sess.Config.Credentials, reqData, presignData)
if err != nil {
return err
}
res := &struct{ URL string }{
URL: req.URL.String(),
}
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
enc.SetEscapeHTML(false)
return enc.Encode(res)
},
}
}
func parsePolicies(val string) (authmate.ContainerPolicies, error) {
if val == "" {
return nil, nil
}
var (
data = []byte(val)
err error
)
if !json.Valid(data) {
if data, err = os.ReadFile(val); err != nil {
return nil, fmt.Errorf("coudln't read json file or provided json is invalid")
}
}
var policies authmate.ContainerPolicies
if err = json.Unmarshal(data, &policies); err != nil {
return nil, fmt.Errorf("unmarshal policies: %w", err)
}
if _, ok := policies[api.DefaultLocationConstraint]; ok {
return nil, fmt.Errorf("config overrides %s location constraint", api.DefaultLocationConstraint)
}
return policies, nil
}
func getJSONRules(val string) ([]byte, error) {
if val == "" {
return nil, nil
}
data := []byte(val)
if json.Valid(data) {
return data, nil
}
if data, err := os.ReadFile(val); err == nil {
if json.Valid(data) {
return data, nil
}
}
return nil, fmt.Errorf("coudln't read json file or provided json is invalid")
}
// getSessionRules reads json session rules.
// It returns true if rules must be skipped.
func getSessionRules(r string) ([]byte, bool, error) {
if r == "none" {
return nil, true, nil
}
data, err := getJSONRules(r)
return data, false, err
}
func obtainSecret() *cli.Command {
command := &cli.Command{
Name: "obtain-secret",
Usage: "Obtain a secret from FrostFS network",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "wallet",
Value: "",
Usage: "path to the wallet",
Required: true,
Destination: &walletPathFlag,
},
&cli.StringFlag{
Name: "address",
Value: "",
Usage: "address of wallet account",
Required: false,
Destination: &accountAddressFlag,
},
&cli.StringFlag{
Name: "peer",
Value: "",
Usage: "address of frostfs peer to connect to",
Required: true,
Destination: &peerAddressFlag,
},
&cli.StringFlag{
Name: "gate-wallet",
Value: "",
Usage: "path to the wallet",
Required: true,
Destination: &gateWalletPathFlag,
},
&cli.StringFlag{
Name: "gate-address",
Value: "",
Usage: "address of wallet account",
Required: false,
Destination: &gateAccountAddressFlag,
},
&cli.StringFlag{
Name: "access-key-id",
Usage: "access key id for s3",
Required: true,
Destination: &accessKeyIDFlag,
},
&cli.DurationFlag{
Name: "pool-dial-timeout",
Usage: `Timeout for connection to the node in pool to be established`,
Required: false,
Destination: &poolDialTimeoutFlag,
Value: poolDialTimeout,
},
&cli.DurationFlag{
Name: "pool-healthcheck-timeout",
Usage: `Timeout for request to node to decide if it is alive`,
Required: false,
Destination: &poolHealthcheckTimeoutFlag,
Value: poolHealthcheckTimeout,
},
&cli.DurationFlag{
Name: "pool-rebalance-interval",
Usage: `Interval for updating nodes health status`,
Required: false,
Destination: &poolRebalanceIntervalFlag,
Value: poolRebalanceInterval,
},
&cli.DurationFlag{
Name: "pool-stream-timeout",
Usage: `Timeout for individual operation in streaming RPC`,
Required: false,
Destination: &poolStreamTimeoutFlag,
Value: poolStreamTimeout,
},
},
Action: func(c *cli.Context) error {
ctx, log := prepare()
password := wallet.GetPassword(viper.GetViper(), envWalletPassphrase)
key, err := wallet.GetKeyFromPath(walletPathFlag, accountAddressFlag, password)
if err != nil {
return cli.Exit(fmt.Sprintf("failed to load frostfs private key: %s", err), 1)
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
poolCfg := PoolConfig{
Key: &key.PrivateKey,
Address: peerAddressFlag,
DialTimeout: poolDialTimeoutFlag,
HealthcheckTimeout: poolHealthcheckTimeoutFlag,
StreamTimeout: poolStreamTimeoutFlag,
RebalanceInterval: poolRebalanceIntervalFlag,
}
frostFS, err := createFrostFS(ctx, log, poolCfg)
if err != nil {
return cli.Exit(fmt.Sprintf("failed to create FrostFS component: %s", err), 2)
}
agent := authmate.New(log, frostFS)
var _ = agent
password = wallet.GetPassword(viper.GetViper(), envWalletGatePassphrase)
gateCreds, err := wallet.GetKeyFromPath(gateWalletPathFlag, gateAccountAddressFlag, password)
if err != nil {
return cli.Exit(fmt.Sprintf("failed to create owner's private key: %s", err), 4)
}
secretAddress := strings.Replace(accessKeyIDFlag, "0", "/", 1)
obtainSecretOptions := &authmate.ObtainSecretOptions{
SecretAddress: secretAddress,
GatePrivateKey: gateCreds,
}
var tcancel context.CancelFunc
ctx, tcancel = context.WithTimeout(ctx, timeoutFlag)
defer tcancel()
if err = agent.ObtainSecret(ctx, os.Stdout, obtainSecretOptions); err != nil {
return cli.Exit(fmt.Sprintf("failed to obtain secret: %s", err), 5)
}
return nil
},
}
return command
}
func createFrostFS(ctx context.Context, log *zap.Logger, cfg PoolConfig) (authmate.FrostFS, error) {
log.Debug("prepare connection pool")
var prm pool.InitParameters
prm.SetKey(cfg.Key)
prm.SetNodeDialTimeout(cfg.DialTimeout)
prm.SetHealthcheckTimeout(cfg.HealthcheckTimeout)
prm.SetNodeStreamTimeout(cfg.StreamTimeout)
prm.SetClientRebalanceInterval(cfg.RebalanceInterval)
prm.AddNode(pool.NewNodeParam(1, cfg.Address, 1))
p, err := pool.NewPool(prm)
if err != nil {
return nil, fmt.Errorf("create pool: %w", err)
}
if err = p.Dial(ctx); err != nil {
return nil, fmt.Errorf("dial pool: %w", err)
}
return frostfs.NewAuthmateFrostFS(p), nil
}

View file

@ -0,0 +1,108 @@
package modules
import (
"encoding/json"
"fmt"
"os"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var generatePresignedURLCmd = &cobra.Command{
Use: "generate-presigned-url",
Short: "Generate presigned url using AWS credentials",
Long: `Generate presigned url using AWS credentials.Credentials must be placed in ~/.aws/credentials.
You provide profile to load using --profile flag or explicitly provide credentials and region using
--aws-access-key-id, --aws-secret-access-key, --region.
Note to override credentials you must provide both access key and secret key.`,
Example: `frostfs-s3-authmate generate-presigned-url --method put --bucket my-bucket --object my-object --endpoint http://localhost:8084 --lifetime 12h --region ru --aws-access-key-id ETaA2CadPcA7bAkLsML2PbTudXY8uRt2PDjCCwkvRv9s0FDCxWDXYc1SA1vKv8KbyCNsLY2AmAjJ92Vz5rgvsFCy --aws-secret-access-key c2d65ef2980f03f4f495bdebedeeae760496697880d61d106bb9a4e5cd2e0607`,
RunE: runGeneratePresignedURLCmd,
}
const defaultPresignedLifetime = 12 * time.Hour
const (
endpointFlag = "endpoint"
bucketFlag = "bucket"
objectFlag = "object"
methodFlag = "method"
profileFlag = "profile"
regionFlag = "region"
awsAccessKeyIDFlag = "aws-access-key-id"
awsSecretAccessKeyFlag = "aws-secret-access-key"
)
func initGeneratePresignedURLCmd() {
generatePresignedURLCmd.Flags().Duration(lifetimeFlag, defaultPresignedLifetime, "Lifetime of presigned URL. For example 50h30m (note: max time unit is an hour so to set a day you should use 24h).\nIt will be ceil rounded to the nearest amount of epoch.")
generatePresignedURLCmd.Flags().String(endpointFlag, "", "S3 gateway endpoint")
generatePresignedURLCmd.Flags().String(bucketFlag, "", "Bucket name to perform action")
generatePresignedURLCmd.Flags().String(objectFlag, "", "Object name to perform action")
generatePresignedURLCmd.Flags().String(methodFlag, "", "HTTP method to perform action")
generatePresignedURLCmd.Flags().String(profileFlag, "", "AWS profile to load")
generatePresignedURLCmd.Flags().String(regionFlag, "", "AWS region to use in signature (default is taken from ~/.aws/config)")
generatePresignedURLCmd.Flags().String(awsAccessKeyIDFlag, "", "AWS access key id to sign the URL (default is taken from ~/.aws/credentials)")
generatePresignedURLCmd.Flags().String(awsSecretAccessKeyFlag, "", "AWS secret access key to sign the URL (default is taken from ~/.aws/credentials)")
_ = generatePresignedURLCmd.MarkFlagRequired(endpointFlag)
_ = generatePresignedURLCmd.MarkFlagRequired(bucketFlag)
_ = generatePresignedURLCmd.MarkFlagRequired(objectFlag)
}
func runGeneratePresignedURLCmd(*cobra.Command, []string) error {
var cfg aws.Config
if region := viper.GetString(regionFlag); region != "" {
cfg.Region = &region
}
accessKeyID := viper.GetString(awsAccessKeyIDFlag)
secretAccessKey := viper.GetString(awsSecretAccessKeyFlag)
if accessKeyID != "" && secretAccessKey != "" {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(credentials.Value{
AccessKeyID: accessKeyID,
SecretAccessKey: secretAccessKey,
})
}
sess, err := session.NewSessionWithOptions(session.Options{
Config: cfg,
Profile: viper.GetString(profileFlag),
SharedConfigState: session.SharedConfigEnable,
})
if err != nil {
return fmt.Errorf("couldn't get aws credentials: %w", err)
}
reqData := auth.RequestData{
Method: viper.GetString(methodFlag),
Endpoint: viper.GetString(endpointFlag),
Bucket: viper.GetString(bucketFlag),
Object: viper.GetString(objectFlag),
}
presignData := auth.PresignData{
Service: "s3",
Region: *sess.Config.Region,
Lifetime: viper.GetDuration(lifetimeFlag),
SignTime: time.Now().UTC(),
}
req, err := auth.PresignRequest(sess.Config.Credentials, reqData, presignData)
if err != nil {
return err
}
res := &struct{ URL string }{
URL: req.URL.String(),
}
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", " ")
enc.SetEscapeHTML(false)
return enc.Encode(res)
}

View file

@ -0,0 +1,176 @@
package modules
import (
"context"
"errors"
"fmt"
"os"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var issueSecretCmd = &cobra.Command{
Use: "issue-secret",
Short: "Issue a secret in FrostFS network",
Long: "Creates new s3 credentials to use with frostfs-s3-gw",
Example: `frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a`,
RunE: runIssueSecretCmd,
}
const (
walletFlag = "wallet"
addressFlag = "address"
peerFlag = "peer"
bearerRulesFlag = "bearer-rules"
disableImpersonateFlag = "disable-impersonate"
gatePublicKeyFlag = "gate-public-key"
containerIDFlag = "container-id"
containerFriendlyNameFlag = "container-friendly-name"
containerPlacementPolicyFlag = "container-placement-policy"
sessionTokensFlag = "session-tokens"
lifetimeFlag = "lifetime"
containerPolicyFlag = "container-policy"
awsCLICredentialFlag = "aws-cli-credentials"
)
const (
walletPassphraseCfg = "wallet.passphrase"
)
const (
defaultAccessBoxLifetime = 30 * 24 * time.Hour
defaultPoolDialTimeout = 5 * time.Second
defaultPoolHealthcheckTimeout = 5 * time.Second
defaultPoolRebalanceInterval = 30 * time.Second
defaultPoolStreamTimeout = 10 * time.Second
)
const (
poolDialTimeoutFlag = "pool-dial-timeout"
poolHealthcheckTimeoutFlag = "pool-healthcheck-timeout"
poolRebalanceIntervalFlag = "pool-rebalance-interval"
poolStreamTimeoutFlag = "pool-stream-timeout"
)
func initIssueSecretCmd() {
issueSecretCmd.Flags().String(walletFlag, "", "Path to the wallet that will be owner of the credentials")
issueSecretCmd.Flags().String(addressFlag, "", "Address of the wallet account")
issueSecretCmd.Flags().String(peerFlag, "", "Address of a frostfs peer to connect to")
issueSecretCmd.Flags().String(bearerRulesFlag, "", "Rules for bearer token (filepath or a plain json string are allowed, can be used only with --disable-impersonate)")
issueSecretCmd.Flags().Bool(disableImpersonateFlag, false, "Mark token as not impersonate to don't consider token signer as request owner (must be provided to use --bearer-rules flag)")
issueSecretCmd.Flags().StringSlice(gatePublicKeyFlag, nil, "Public 256r1 key of a gate (use flags repeatedly for multiple gates or separate them by comma)")
issueSecretCmd.Flags().String(containerIDFlag, "", "Auth container id to put the secret into (if not provided new container will be created)")
issueSecretCmd.Flags().String(containerFriendlyNameFlag, "", "Friendly name of auth container to put the secret into (flag value will be used only if --container-id is missed)")
issueSecretCmd.Flags().String(containerPlacementPolicyFlag, "REP 2 IN X CBF 3 SELECT 2 FROM * AS X", "Placement policy of auth container to put the secret into (flag value will be used only if --container-id is missed)")
issueSecretCmd.Flags().String(sessionTokensFlag, "", "create session tokens with rules, if the rules are set as 'none', no session tokens will be created")
issueSecretCmd.Flags().Duration(lifetimeFlag, defaultAccessBoxLifetime, "Lifetime of tokens. For example 50h30m (note: max time unit is an hour so to set a day you should use 24h).\nIt will be ceil rounded to the nearest amount of epoch.")
issueSecretCmd.Flags().String(containerPolicyFlag, "", "Mapping AWS storage class to FrostFS storage policy as plain json string or path to json file")
issueSecretCmd.Flags().String(awsCLICredentialFlag, "", "Path to the aws cli credential file")
issueSecretCmd.Flags().Duration(poolDialTimeoutFlag, defaultPoolDialTimeout, "Timeout for connection to the node in pool to be established")
issueSecretCmd.Flags().Duration(poolHealthcheckTimeoutFlag, defaultPoolHealthcheckTimeout, "Timeout for request to node to decide if it is alive")
issueSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
issueSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
_ = issueSecretCmd.MarkFlagRequired(walletFlag)
_ = issueSecretCmd.MarkFlagRequired(peerFlag)
_ = issueSecretCmd.MarkFlagRequired(gatePublicKeyFlag)
}
func runIssueSecretCmd(cmd *cobra.Command, _ []string) error {
ctx, cancel := context.WithTimeout(cmd.Context(), viper.GetDuration(timeoutFlag))
defer cancel()
log := getLogger()
password := wallet.GetPassword(viper.GetViper(), walletPassphraseCfg)
key, err := wallet.GetKeyFromPath(viper.GetString(walletFlag), viper.GetString(addressFlag), password)
if err != nil {
return fmt.Errorf("failed to load frostfs private key: %s", err)
}
var cnrID cid.ID
containerID := viper.GetString(containerIDFlag)
if len(containerID) > 0 {
if err = cnrID.DecodeString(containerID); err != nil {
return fmt.Errorf("failed to parse auth container id: %s", err)
}
}
var gatesPublicKeys []*keys.PublicKey
for _, keyStr := range viper.GetStringSlice(gatePublicKeyFlag) {
gpk, err := keys.NewPublicKeyFromString(keyStr)
if err != nil {
return fmt.Errorf("failed to load gate's public key: %s", err)
}
gatesPublicKeys = append(gatesPublicKeys, gpk)
}
lifetime := viper.GetDuration(lifetimeFlag)
if lifetime <= 0 {
return fmt.Errorf("lifetime must be greater 0, current value: %d", lifetime)
}
policies, err := parsePolicies(viper.GetString(containerPolicyFlag))
if err != nil {
return fmt.Errorf("couldn't parse container policy: %s", err.Error())
}
disableImpersonate := viper.GetBool(disableImpersonateFlag)
eaclRules := viper.GetString(bearerRulesFlag)
if !disableImpersonate && eaclRules != "" {
return errors.New("--bearer-rules flag can be used only with --disable-impersonate")
}
bearerRules, err := getJSONRules(eaclRules)
if err != nil {
return fmt.Errorf("couldn't parse 'bearer-rules' flag: %s", err.Error())
}
sessionRules, skipSessionRules, err := getSessionRules(viper.GetString(sessionTokensFlag))
if err != nil {
return fmt.Errorf("couldn't parse 'session-tokens' flag: %s", err.Error())
}
poolCfg := PoolConfig{
Key: key,
Address: viper.GetString(peerFlag),
DialTimeout: viper.GetDuration(poolDialTimeoutFlag),
HealthcheckTimeout: viper.GetDuration(poolHealthcheckTimeoutFlag),
StreamTimeout: viper.GetDuration(poolStreamTimeoutFlag),
RebalanceInterval: viper.GetDuration(poolRebalanceIntervalFlag),
}
frostFS, err := createFrostFS(ctx, log, poolCfg)
if err != nil {
return fmt.Errorf("failed to create FrostFS component: %s", err)
}
issueSecretOptions := &authmate.IssueSecretOptions{
Container: authmate.ContainerOptions{
ID: cnrID,
FriendlyName: viper.GetString(containerFriendlyNameFlag),
PlacementPolicy: viper.GetString(containerPlacementPolicyFlag),
},
FrostFSKey: key,
GatesPublicKeys: gatesPublicKeys,
EACLRules: bearerRules,
Impersonate: !disableImpersonate,
SessionTokenRules: sessionRules,
SkipSessionRules: skipSessionRules,
ContainerPolicies: policies,
Lifetime: lifetime,
AwsCliCredentialsFile: viper.GetString(awsCLICredentialFlag),
}
if err = authmate.New(log, frostFS).IssueSecret(ctx, os.Stdout, issueSecretOptions); err != nil {
return fmt.Errorf("failed to issue secret: %s", err)
}
return nil
}

View file

@ -0,0 +1,94 @@
package modules
import (
"context"
"fmt"
"os"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/urfave/cli/v2"
)
var obtainSecretCmd = &cobra.Command{
Use: "obtain-secret",
Short: "Obtain a secret from FrostFS network",
Long: "Gets generated secret from credential object (accessbox)",
Example: `frostfs-s3-authmate obtain-secret --wallet wallet.json --peer s01.neofs.devenv:8080 --gate-wallet s3-wallet.json --access-key-id EC3tyWpTEKfGNS888PFBpwQzZTrnwDXReGjgAxa8Em1h037VoWktUZCAk1LVA5SvVbVd2NHHb2NQm9jhcd5WFU5VD`,
RunE: runObtainSecretCmd,
}
const (
gateWalletFlag = "gate-wallet"
gateAddressFlag = "gate-address"
accessKeyIDFlag = "access-key-id"
)
const (
walletGatePassphraseCfg = "wallet.gate.passphrase"
)
func initObtainSecretCmd() {
obtainSecretCmd.Flags().String(walletFlag, "", "Path to the wallet that will be owner of the credentials")
obtainSecretCmd.Flags().String(addressFlag, "", "Address of the wallet account")
obtainSecretCmd.Flags().String(peerFlag, "", "Address of a frostfs peer to connect to")
obtainSecretCmd.Flags().String(gateWalletFlag, "", "Path to the s3 gateway wallet to decrypt accessbox")
obtainSecretCmd.Flags().String(gateAddressFlag, "", "Address of the s3 gateway wallet account")
obtainSecretCmd.Flags().String(accessKeyIDFlag, "", "Access key id of s3 credential for which secret must be obtained")
obtainSecretCmd.Flags().Duration(poolDialTimeoutFlag, defaultPoolDialTimeout, "Timeout for connection to the node in pool to be established")
obtainSecretCmd.Flags().Duration(poolHealthcheckTimeoutFlag, defaultPoolHealthcheckTimeout, "Timeout for request to node to decide if it is alive")
obtainSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
obtainSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
_ = obtainSecretCmd.MarkFlagRequired(walletFlag)
_ = obtainSecretCmd.MarkFlagRequired(peerFlag)
_ = obtainSecretCmd.MarkFlagRequired(gateWalletFlag)
_ = obtainSecretCmd.MarkFlagRequired(accessKeyIDFlag)
}
func runObtainSecretCmd(cmd *cobra.Command, _ []string) error {
ctx, cancel := context.WithTimeout(cmd.Context(), viper.GetDuration(timeoutFlag))
defer cancel()
log := getLogger()
password := wallet.GetPassword(viper.GetViper(), walletPassphraseCfg)
key, err := wallet.GetKeyFromPath(viper.GetString(walletFlag), viper.GetString(addressFlag), password)
if err != nil {
return fmt.Errorf("failed to load frostfs private key: %s", err)
}
gatePassword := wallet.GetPassword(viper.GetViper(), walletGatePassphraseCfg)
gateKey, err := wallet.GetKeyFromPath(viper.GetString(gateWalletFlag), viper.GetString(gateAddressFlag), gatePassword)
if err != nil {
return fmt.Errorf("failed to load s3 gate private key: %s", err)
}
poolCfg := PoolConfig{
Key: key,
Address: viper.GetString(peerFlag),
DialTimeout: viper.GetDuration(poolDialTimeoutFlag),
HealthcheckTimeout: viper.GetDuration(poolHealthcheckTimeoutFlag),
StreamTimeout: viper.GetDuration(poolStreamTimeoutFlag),
RebalanceInterval: viper.GetDuration(poolRebalanceIntervalFlag),
}
frostFS, err := createFrostFS(ctx, log, poolCfg)
if err != nil {
return cli.Exit(fmt.Sprintf("failed to create FrostFS component: %s", err), 2)
}
obtainSecretOptions := &authmate.ObtainSecretOptions{
SecretAddress: strings.Replace(viper.GetString(accessKeyIDFlag), "0", "/", 1),
GatePrivateKey: gateKey,
}
if err = authmate.New(log, frostFS).ObtainSecret(ctx, os.Stdout, obtainSecretOptions); err != nil {
return fmt.Errorf("failed to obtain secret: %s", err)
}
return nil
}

View file

@ -0,0 +1,68 @@
package modules
import (
"context"
"runtime"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
// rootCmd represents the base command when called without any subcommands.
var rootCmd = &cobra.Command{
Use: "frostfs-s3-authmate",
Version: version.Version,
Short: "FrostFS S3 Authmate",
Long: "Helps manage delegated access via gates to data stored in FrostFS network",
Example: "frostfs-s3-authmate --version",
SilenceErrors: true,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
viper.AutomaticEnv()
viper.SetEnvPrefix("AUTHMATE")
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AllowEmptyEnv(true)
return viper.BindPFlags(cmd.Flags())
},
RunE: func(cmd *cobra.Command, _ []string) error {
return cmd.Help()
},
}
const (
withLogFlag = "with-log"
debugFlag = "debug"
timeoutFlag = "timeout"
)
func Execute(ctx context.Context) (*cobra.Command, error) {
return rootCmd.ExecuteContextC(ctx)
}
func init() {
rootCmd.PersistentFlags().Bool(withLogFlag, false, "Enable logger")
rootCmd.PersistentFlags().Bool(debugFlag, false, "Enable debug logger level")
rootCmd.PersistentFlags().Duration(timeoutFlag, time.Minute, "Timeout of processing of the command, for example 2m (note: max time unit is an hour so to set a day you should use 24h)")
cobra.AddTemplateFunc("runtimeVersion", runtime.Version)
rootCmd.SetVersionTemplate(`Frostfs S3 Authmate
{{printf "Version: %s" .Version }}
GoVersion: {{ runtimeVersion }}
`)
rootCmd.AddCommand(issueSecretCmd)
initIssueSecretCmd()
rootCmd.AddCommand(obtainSecretCmd)
initObtainSecretCmd()
rootCmd.AddCommand(generatePresignedURLCmd)
initGeneratePresignedURLCmd()
rootCmd.AddCommand(updateSecretCmd)
initUpdateSecretCmd()
}

View file

@ -0,0 +1,108 @@
package modules
import (
"context"
"fmt"
"os"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var updateSecretCmd = &cobra.Command{
Use: "update-secret",
Short: "Update a secret in FrostFS network",
Long: `Creates new access box that will be available for extend list of s3 gates, preserve all timeout from initial credentials.
After using this command you can use initial access-key-id to interact with newly added gates`,
Example: `To extend list of s3 gates that can use existing credentials run:
frostfs-s3-authmate update-secret --wallet wallet.json --peer s01.neofs.devenv:8080 --gate-wallet s3-wallet.json \
--gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a \
--gate-public-key 021dc56fc6d81d581ae7605a8e00e0e0bab6cbad566a924a527339475a97a8e38e \
--acces-key-id EC3tyWpTEKfGNS888PFBpwQzZTrnwDXReGjgAxa8Em1h037VoWktUZCAk1LVA5SvVbVd2NHHb2NQm9jhcd5WFU5VD`,
RunE: runUpdateSecretCmd,
}
func initUpdateSecretCmd() {
updateSecretCmd.Flags().String(walletFlag, "", "Path to the wallet that will be owner of the credentials")
updateSecretCmd.Flags().String(addressFlag, "", "Address of the wallet account")
updateSecretCmd.Flags().String(peerFlag, "", "Address of a frostfs peer to connect to")
updateSecretCmd.Flags().String(gateWalletFlag, "", "Path to the s3 gateway wallet to decrypt accessbox")
updateSecretCmd.Flags().String(gateAddressFlag, "", "Address of the s3 gateway wallet account")
updateSecretCmd.Flags().String(accessKeyIDFlag, "", "Access key id of s3 credential for which secret must be obtained")
updateSecretCmd.Flags().StringSlice(gatePublicKeyFlag, nil, "Public 256r1 key of a gate (use flags repeatedly for multiple gates or separate them by comma)")
updateSecretCmd.Flags().Duration(poolDialTimeoutFlag, defaultPoolDialTimeout, "Timeout for connection to the node in pool to be established")
updateSecretCmd.Flags().Duration(poolHealthcheckTimeoutFlag, defaultPoolHealthcheckTimeout, "Timeout for request to node to decide if it is alive")
updateSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
updateSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
_ = updateSecretCmd.MarkFlagRequired(walletFlag)
_ = updateSecretCmd.MarkFlagRequired(peerFlag)
_ = updateSecretCmd.MarkFlagRequired(gateWalletFlag)
_ = updateSecretCmd.MarkFlagRequired(accessKeyIDFlag)
_ = updateSecretCmd.MarkFlagRequired(gatePublicKeyFlag)
}
func runUpdateSecretCmd(cmd *cobra.Command, _ []string) error {
ctx, cancel := context.WithTimeout(cmd.Context(), viper.GetDuration(timeoutFlag))
defer cancel()
log := getLogger()
password := wallet.GetPassword(viper.GetViper(), walletPassphraseCfg)
key, err := wallet.GetKeyFromPath(viper.GetString(walletFlag), viper.GetString(addressFlag), password)
if err != nil {
return fmt.Errorf("failed to load frostfs private key: %s", err)
}
gatePassword := wallet.GetPassword(viper.GetViper(), walletGatePassphraseCfg)
gateKey, err := wallet.GetKeyFromPath(viper.GetString(gateWalletFlag), viper.GetString(gateAddressFlag), gatePassword)
if err != nil {
return fmt.Errorf("failed to load s3 gate private key: %s", err)
}
var accessBoxAddress oid.Address
credAddr := strings.Replace(viper.GetString(accessKeyIDFlag), "0", "/", 1)
if err = accessBoxAddress.DecodeString(credAddr); err != nil {
return fmt.Errorf("failed to parse creds address: %w", err)
}
var gatesPublicKeys []*keys.PublicKey
for _, keyStr := range viper.GetStringSlice(gatePublicKeyFlag) {
gpk, err := keys.NewPublicKeyFromString(keyStr)
if err != nil {
return fmt.Errorf("failed to load gate's public key: %s", err)
}
gatesPublicKeys = append(gatesPublicKeys, gpk)
}
poolCfg := PoolConfig{
Key: key,
Address: viper.GetString(peerFlag),
DialTimeout: viper.GetDuration(poolDialTimeoutFlag),
HealthcheckTimeout: viper.GetDuration(poolHealthcheckTimeoutFlag),
StreamTimeout: viper.GetDuration(poolStreamTimeoutFlag),
RebalanceInterval: viper.GetDuration(poolRebalanceIntervalFlag),
}
frostFS, err := createFrostFS(ctx, log, poolCfg)
if err != nil {
return fmt.Errorf("failed to create FrostFS component: %s", err)
}
updateSecretOptions := &authmate.UpdateSecretOptions{
Address: accessBoxAddress,
FrostFSKey: key,
GatesPublicKeys: gatesPublicKeys,
GatePrivateKey: gateKey,
}
if err = authmate.New(log, frostFS).UpdateSecret(ctx, os.Stdout, updateSecretOptions); err != nil {
return fmt.Errorf("failed to update secret: %s", err)
}
return nil
}

View file

@ -0,0 +1,142 @@
package modules
import (
"context"
"encoding/json"
"fmt"
"os"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/spf13/viper"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
type PoolConfig struct {
Key *keys.PrivateKey
Address string
DialTimeout time.Duration
HealthcheckTimeout time.Duration
StreamTimeout time.Duration
RebalanceInterval time.Duration
}
func createFrostFS(ctx context.Context, log *zap.Logger, cfg PoolConfig) (authmate.FrostFS, error) {
log.Debug("prepare connection pool")
var prm pool.InitParameters
prm.SetKey(&cfg.Key.PrivateKey)
prm.SetNodeDialTimeout(cfg.DialTimeout)
prm.SetHealthcheckTimeout(cfg.HealthcheckTimeout)
prm.SetNodeStreamTimeout(cfg.StreamTimeout)
prm.SetClientRebalanceInterval(cfg.RebalanceInterval)
prm.SetLogger(log)
prm.AddNode(pool.NewNodeParam(1, cfg.Address, 1))
p, err := pool.NewPool(prm)
if err != nil {
return nil, fmt.Errorf("create pool: %w", err)
}
if err = p.Dial(ctx); err != nil {
return nil, fmt.Errorf("dial pool: %w", err)
}
return frostfs.NewAuthmateFrostFS(p, cfg.Key), nil
}
func parsePolicies(val string) (authmate.ContainerPolicies, error) {
if val == "" {
return nil, nil
}
var (
data = []byte(val)
err error
)
if !json.Valid(data) {
if data, err = os.ReadFile(val); err != nil {
return nil, fmt.Errorf("coudln't read json file or provided json is invalid")
}
}
var policies authmate.ContainerPolicies
if err = json.Unmarshal(data, &policies); err != nil {
return nil, fmt.Errorf("unmarshal policies: %w", err)
}
if _, ok := policies[api.DefaultLocationConstraint]; ok {
return nil, fmt.Errorf("config overrides %s location constraint", api.DefaultLocationConstraint)
}
return policies, nil
}
func getJSONRules(val string) ([]byte, error) {
if val == "" {
return nil, nil
}
data := []byte(val)
if json.Valid(data) {
return data, nil
}
if data, err := os.ReadFile(val); err == nil {
if json.Valid(data) {
return data, nil
}
}
return nil, fmt.Errorf("coudln't read json file or provided json is invalid")
}
// getSessionRules reads json session rules.
// It returns true if rules must be skipped.
func getSessionRules(r string) ([]byte, bool, error) {
if r == "none" {
return nil, true, nil
}
data, err := getJSONRules(r)
return data, false, err
}
// getLogger returns new logger depending on appropriate values in viper.Viper
// if logger cannot be built it panics.
func getLogger() *zap.Logger {
if !viper.GetBool(withLogFlag) {
return zap.NewNop()
}
var zapConfig = zap.Config{
Development: true,
Encoding: "console",
Level: zap.NewAtomicLevelAt(zapcore.FatalLevel),
OutputPaths: []string{"stdout"},
EncoderConfig: zapcore.EncoderConfig{
MessageKey: "message",
LevelKey: "level",
EncodeLevel: zapcore.CapitalLevelEncoder,
TimeKey: "time",
EncodeTime: zapcore.ISO8601TimeEncoder,
CallerKey: "caller",
EncodeCaller: zapcore.ShortCallerEncoder,
},
}
if viper.GetBool(debugFlag) {
zapConfig.Level = zap.NewAtomicLevelAt(zapcore.DebugLevel)
}
log, err := zapConfig.Build()
if err != nil {
panic(fmt.Errorf("create logger: %w", err))
}
return log
}

View file

@ -10,6 +10,7 @@ import (
"os/signal"
"strconv"
"sync"
"sync/atomic"
"syscall"
"time"
@ -32,12 +33,13 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"github.com/gorilla/mux"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/spf13/viper"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
type (
@ -59,16 +61,22 @@ type (
bucketResolver *resolver.BucketResolver
services []*Service
settings *appSettings
maxClients api.MaxClients
webDone chan struct{}
wrkDone chan struct{}
}
appSettings struct {
logLevel zap.AtomicLevel
policies *placementPolicy
xmlDecoder *xml.DecoderProvider
logLevel zap.AtomicLevel
policies *placementPolicy
xmlDecoder *xml.DecoderProvider
maxClient maxClientsConfig
bypassContentEncodingInChunks atomic.Bool
}
maxClientsConfig struct {
deadline time.Duration
count int
}
Logger struct {
@ -89,7 +97,7 @@ func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
objPool, treePool, key := getPools(ctx, log.logger, v)
// prepare auth center
ctr := auth.New(frostfs.NewAuthmateFrostFS(objPool), key, v.GetStringSlice(cfgAllowedAccessKeyIDPrefixes), getAccessBoxCacheConfig(v, log.logger))
ctr := auth.New(frostfs.NewAuthmateFrostFS(objPool, key), key, v.GetStringSlice(cfgAllowedAccessKeyIDPrefixes), getAccessBoxCacheConfig(v, log.logger))
app := &App{
ctr: ctr,
@ -102,8 +110,7 @@ func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
webDone: make(chan struct{}, 1),
wrkDone: make(chan struct{}, 1),
maxClients: newMaxClients(v),
settings: newAppSettings(log, v),
settings: newAppSettings(log, v),
}
app.init(ctx)
@ -127,17 +134,21 @@ func (a *App) initLayer(ctx context.Context) {
a.log.Fatal("couldn't generate random key", zap.Error(err))
}
var gateOwner user.ID
user.IDFromKey(&gateOwner, a.key.PrivateKey.PublicKey)
layerCfg := &layer.Config{
Caches: getCacheOptions(a.cfg, a.log),
AnonKey: layer.AnonymousKey{
Key: randomKey,
},
GateOwner: gateOwner,
Resolver: a.bucketResolver,
TreeService: tree.NewTree(services.NewPoolWrapper(a.treePool), a.log),
}
// prepare object layer
a.obj = layer.NewLayer(a.log, frostfs.NewFrostFS(a.pool), layerCfg)
a.obj = layer.NewLayer(a.log, frostfs.NewFrostFS(a.pool, a.key), layerCfg)
if a.cfg.GetBool(cfgEnableNATS) {
nopts := getNotificationsOptions(a.cfg, a.log)
@ -158,11 +169,24 @@ func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
log.logger.Fatal("failed to create new policy mapping", zap.Error(err))
}
return &appSettings{
settings := &appSettings{
logLevel: log.lvl,
policies: policies,
xmlDecoder: xml.NewDecoderProvider(v.GetBool(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload)),
maxClient: newMaxClients(v),
}
settings.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
return settings
}
func (s *appSettings) BypassContentEncodingInChunks() bool {
return s.bypassContentEncodingInChunks.Load()
}
func (s *appSettings) setBypassContentEncodingInChunks(bypass bool) {
s.bypassContentEncodingInChunks.Store(bypass)
}
func getDefaultPolicyValue(v *viper.Viper) string {
@ -243,18 +267,20 @@ func (a *App) shutdownTracing() {
}
}
func newMaxClients(cfg *viper.Viper) api.MaxClients {
maxClientsCount := cfg.GetInt(cfgMaxClientsCount)
if maxClientsCount <= 0 {
maxClientsCount = defaultMaxClientsCount
func newMaxClients(cfg *viper.Viper) maxClientsConfig {
config := maxClientsConfig{}
config.count = cfg.GetInt(cfgMaxClientsCount)
if config.count <= 0 {
config.count = defaultMaxClientsCount
}
maxClientsDeadline := cfg.GetDuration(cfgMaxClientsDeadline)
if maxClientsDeadline <= 0 {
maxClientsDeadline = defaultMaxClientsDeadline
config.deadline = cfg.GetDuration(cfgMaxClientsDeadline)
if config.deadline <= 0 {
config.deadline = defaultMaxClientsDeadline
}
return api.NewMaxClientsMiddleware(maxClientsCount, maxClientsDeadline)
return config
}
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
@ -313,7 +339,7 @@ func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.
prmTree.SetLogger(logger)
var apiGRPCDialOpts []grpc.DialOption
var treeGRPCDialOpts = []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
var treeGRPCDialOpts []grpc.DialOption
if cfg.GetBool(cfgTracingEnabled) {
interceptors := []grpc.DialOption{
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
@ -491,12 +517,18 @@ func (a *App) Serve(ctx context.Context) {
// Attach S3 API:
domains := a.cfg.GetStringSlice(cfgListenDomains)
a.log.Info("fetch domains, prepare to use API", zap.Strings("domains", domains))
router := mux.NewRouter().SkipClean(true).UseEncodedPath()
api.Attach(router, domains, a.maxClients, a.api, a.ctr, a.log, a.metrics)
throttleOps := middleware.ThrottleOpts{
Limit: a.settings.maxClient.count,
BacklogTimeout: a.settings.maxClient.deadline,
}
chiRouter := chi.NewRouter()
api.AttachChi(chiRouter, domains, throttleOps, a.api, a.ctr, a.log, a.metrics)
// Use mux.Router as http.Handler
srv := new(http.Server)
srv.Handler = router
srv.Handler = chiRouter
srv.ErrorLog = zap.NewStdLog(a.log)
a.startServices()
@ -582,6 +614,7 @@ func (a *App) updateSettings() {
a.settings.policies.update(a.log, a.cfg)
a.settings.xmlDecoder.UseDefaultNamespaceForCompleteMultipart(a.cfg.GetBool(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload))
a.settings.setBypassContentEncodingInChunks(a.cfg.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
}
func (a *App) startServices() {
@ -770,6 +803,7 @@ func (a *App) initHandler() {
}
cfg.CompleteMultipartKeepalive = a.cfg.GetDuration(cfgKludgeCompleteMultipartUploadKeepalive)
cfg.Kludge = a.settings
var err error
a.api, err = handler.New(a.log, a.obj, a.nc, cfg)

View file

@ -120,6 +120,7 @@ const ( // Settings.
// Kludge.
cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload = "kludge.use_default_xmlns_for_complete_multipart"
cfgKludgeCompleteMultipartUploadKeepalive = "kludge.complete_multipart_keepalive"
cfgKludgeBypassContentEncodingCheckInChunks = "kludge.bypass_content_encoding_check_in_chunks"
// Command line args.
cmdHelp = "help"
@ -306,6 +307,7 @@ func newSettings() *viper.Viper {
// kludge
v.SetDefault(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload, false)
v.SetDefault(cfgKludgeCompleteMultipartUploadKeepalive, 10*time.Second)
v.SetDefault(cfgKludgeBypassContentEncodingCheckInChunks, false)
// Bind flags
if err := bindFlags(v, flags); err != nil {

View file

@ -138,6 +138,8 @@ S3_GW_RESOLVE_BUCKET_ALLOW=container
S3_GW_KLUDGE_USE_DEFAULT_XMLNS_FOR_COMPLETE_MULTIPART=false
# Set timeout between whitespace transmissions during CompleteMultipartUpload processing.
S3_GW_KLUDGE_COMPLETE_MULTIPART_KEEPALIVE=10s
# Use this flag to be able to use chunked upload approach without having `aws-chunked` value in `Content-Encoding` header.
S3_GW_BYPASS_CONTENT_ENCODING_CHECK_IN_CHUNKS=false
S3_GW_TRACING_ENABLED=false
S3_GW_TRACING_ENDPOINT="localhost:4318"

View file

@ -167,3 +167,5 @@ kludge:
use_default_xmlns_for_complete_multipart: false
# Set timeout between whitespace transmissions during CompleteMultipartUpload processing.
complete_multipart_keepalive: 10s
# Use this flag to be able to use chunked upload approach without having `aws-chunked` value in `Content-Encoding` header.
bypass_content_encoding_check_in_chunks: false

View file

@ -26,6 +26,7 @@ potentially).
4. [Containers policy](#containers-policy)
3. [Obtainment of a secret](#obtaining-credential-secrets)
4. [Generate presigned url](#generate-presigned-url)
5. [Update secrets](#update-secret)
## Generation of wallet
@ -334,3 +335,39 @@ $ aws s3 --endpoint http://localhost:8084 presign s3://pregigned/obj
http://localhost:8084/pregigned/obj?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=6UpmiuYspPLMWfyhEKYmZQSsTGkFLS5MhQVdsda3fhz908Hw9eo9urTmaJtfvHMHUpY8SWAptk61bns2Js8f1M5tZ%2F20220615%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Date=20220615T072348Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=b82c13952534b1bba699a718f2d42d135c2833a1e64030d4ce0e198af46551d4
```
## Update secret
You can extend list of s3 gates that can accept already issued credentials.
To do this use `frostfs-s3-authmate update-secret` command:
**Required parameters:**
* `--wallet` is a path to a user wallet `.json` file. You can provide a passphrase to decrypt
a wallet via environment variable `AUTHMATE_WALLET_PASSPHRASE`, or you will be asked to enter a passphrase
interactively. You can also specify an account address to use from a wallet using the `--address` parameter.
* `--gate-wallet` is a path to a gate wallet `.json` file (need to decrypt current access box version). You can provide a passphrase to decrypt
a wallet via environment variable `AUTHMATE_WALLET_GATE_PASSPHRASE`, or you will be asked to enter a passphrase
interactively. You can also specify an account address to use from a wallet using the `--gate-address` parameter.
* `--peer` is an address of a FrostFS peer to connect to
* `--gate-public-key` is a public `secp256r1` 33-byte short key of a gate (use flags repeatedly for multiple gates).
* `--access-key-id` is a credential id to update.
```shell
$ frostfs-s3-authmate update-secret --wallet wallet.json --gate-wallet s3-wallet.json \
--peer 192.168.130.71:8080 \
--gate-public-key 0313b1ac3a8076e155a7e797b24f0b650cccad5941ea59d7cfd51a024a8b2a06bf \
--gate-public-key 0317585fa8274f7afdf1fc5f2a2e7bece549d5175c4e5182e37924f30229aef967 \
--gate-public-key 0223450b9db6d0c083e9c6de1f7d8fd22858d70829e09afa39828bb2416bf190fc \
--access-key-id HwrdXgetdGcEWAQwi68r1PMvw4iSm1Y5Z1fsFNSD6sQP04QomYDfYsspMhENEDhzTGwGxm86Q6R2Weugf3PG4sJ3M
Enter password for wallet.json >
Enter password for s3-wallet.json >
{
"initial_access_key_id": "HwrdXgetdGcEWAQwi68r1PMvw4iSm1Y5Z1fsFNSD6sQP04QomYDfYsspMhENEDhzTGwGxm86Q6R2Weugf3PG4sJ3M",
"access_key_id": "HwrdXgetdGcEWAQwi68r1PMvw4iSm1Y5Z1fsFNSD6sQP0xXf1ahGndNkydG9MrL9WmCebrPwdSHTAysQa9w6yCNJ",
"secret_access_key": "f6a65481fd2752e69e4aa80a6fdcad70cfbf8304d2b3b8c2f9c15212aeee3ae7",
"owner_private_key": "7f40233893e4f4a54e4f2f52455a0e6d563f7eb0233a985094937ed69faef681",
"wallet_public_key": "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a",
"container_id": "HwrdXgetdGcEWAQwi68r1PMvw4iSm1Y5Z1fsFNSD6sQP"
}
```

View file

@ -536,9 +536,11 @@ Workarounds for non-standard use cases.
kludge:
use_default_xmlns_for_complete_multipart: false
complete_multipart_keepalive: 10s
bypass_content_encoding_check_in_chunks: false
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|--------------------------------------------|------------|---------------|---------------|-----------------------------------------------------------------------------------------------------------------------------|
| `use_default_xmlns_for_complete_multipart` | `bool` | yes | false | Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse `CompleteMultipartUpload` xml body. |
| `complete_multipart_keepalive` | `duration` | no | 10s | Set timeout between whitespace transmissions during CompleteMultipartUpload processing. |
| Parameter | Type | SIGHUP reload | Default value | Description |
|--------------------------------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `use_default_xmlns_for_complete_multipart` | `bool` | yes | false | Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse `CompleteMultipartUpload` xml body. |
| `complete_multipart_keepalive` | `duration` | no | 10s | Set timeout between whitespace transmissions during CompleteMultipartUpload processing. |
| `bypass_content_encoding_check_in_chunks` | `bool` | yes | false | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. |

12
go.mod
View file

@ -3,19 +3,20 @@ module git.frostfs.info/TrueCloudLab/frostfs-s3-gw
go 1.19
require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230531114046-62edd68f47ac
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230608140155-9d40228cecbe
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230802103237-363f153eafa6
github.com/aws/aws-sdk-go v1.44.6
github.com/bluele/gcache v0.0.2
github.com/go-chi/chi/v5 v5.0.8
github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0
github.com/minio/sio v0.3.0
github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d
github.com/nspcc-dev/neo-go v0.101.1
github.com/nspcc-dev/neo-go v0.101.2-0.20230601131642-a0117042e8fc
github.com/panjf2000/ants/v2 v2.5.0
github.com/prometheus/client_golang v1.15.1
github.com/prometheus/client_model v0.3.0
github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.15.0
github.com/stretchr/testify v1.8.3
@ -52,6 +53,7 @@ require (
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
@ -61,7 +63,7 @@ require (
github.com/nats-io/nkeys v0.3.0 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb // indirect
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230615193820-9185820289ce // indirect
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect

26
go.sum
View file

@ -36,16 +36,16 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230531114046-62edd68f47ac h1:a6/Zc5BejflmguShwbllgJdEehnM9gshkLrLbKQHCU0=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230531114046-62edd68f47ac/go.mod h1:pKJJRLOChW4zDQsAt1e8k/snWKljJtpkiPfxV53ngjI=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44 h1:v6JqBD/VzZx3QSxbaXnUwnnJ1KEYheU4LzLGr3IhsAE=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44/go.mod h1:pKJJRLOChW4zDQsAt1e8k/snWKljJtpkiPfxV53ngjI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb h1:S/TrbOOu9qEXZRZ9/Ddw7crnxbBUQLo68PSzQWYrc9M=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb/go.mod h1:nkR5gaGeez3Zv2SE7aceP0YwxG2FzIB5cGKpQO2vV2o=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230608140155-9d40228cecbe h1:47lrWXcl36ayN7AJ9IW7sDDnTj//RUyHoIZOsjbYAYA=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230608140155-9d40228cecbe/go.mod h1:w+s3ozlbFfTDFHhjX0A3Iif3BRtnTkwiACxFZD+Q0cQ=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230802103237-363f153eafa6 h1:u6lzNotV6MEMNEG/XeS7g+FjPrrf+j4gnOHtvun2KJc=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230802103237-363f153eafa6/go.mod h1:LI2GOj0pEx0jYTjB3QHja2PNhQFYL2pCm71RAFwDv0M=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
@ -143,12 +143,14 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:rZfgFAXFS/z/lEd6LJmf9HVZ1LkgYiHx5pHhV5DR16M=
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0=
github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@ -243,8 +245,6 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
@ -264,6 +264,8 @@ github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@ -345,11 +347,11 @@ github.com/nspcc-dev/hrw v1.0.9/go.mod h1:l/W2vx83vMQo6aStyx2AuZrJ+07lGv2JQGlVkP
github.com/nspcc-dev/neo-go v0.73.1-pre.0.20200303142215-f5a1b928ce09/go.mod h1:pPYwPZ2ks+uMnlRLUyXOpLieaDQSEaf4NM3zHVbRjmg=
github.com/nspcc-dev/neo-go v0.98.0/go.mod h1:E3cc1x6RXSXrJb2nDWXTXjnXk3rIqVN8YdFyWv+FrqM=
github.com/nspcc-dev/neo-go v0.99.4/go.mod h1:mKTolfRUfKjFso5HPvGSQtUZc70n0VKBMs16eGuC5gA=
github.com/nspcc-dev/neo-go v0.101.1 h1:TVdcIpH/+bxQBTLRwWE3+Pw3j6j/JwguENbBSGAGid0=
github.com/nspcc-dev/neo-go v0.101.1/go.mod h1:J4tspxWw7jknX06F+VSMsKvIiNpYGfVTb2IxVC005YU=
github.com/nspcc-dev/neo-go v0.101.2-0.20230601131642-a0117042e8fc h1:fySIWvUQsitK5e5qYIHnTDCXuPpwzz89SEUEIyY11sg=
github.com/nspcc-dev/neo-go v0.101.2-0.20230601131642-a0117042e8fc/go.mod h1:s9QhjMC784MWqTURovMbyYduIJc86mnCruxcMiAebpc=
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20220927123257-24c107e3a262/go.mod h1:23bBw0v6pBYcrWs8CBEEDIEDJNbcFoIh8pGGcf2Vv8s=
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb h1:GFxfkpXEYAbMIr69JpKOsQWeLOaGrd49HNAor8uDW+A=
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb/go.mod h1:23bBw0v6pBYcrWs8CBEEDIEDJNbcFoIh8pGGcf2Vv8s=
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230615193820-9185820289ce h1:vLGuUNDkmQrWMa4rr4vTd1u8ULqejWxVmNz1L7ocTEI=
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230615193820-9185820289ce/go.mod h1:ZUuXOkdtHZgaC13za/zMgXfQFncZ0jLzfQTe+OsDOtg=
github.com/nspcc-dev/neofs-api-go/v2 v2.11.0-pre.0.20211201134523-3604d96f3fe1/go.mod h1:oS8dycEh8PPf2Jjp6+8dlwWyEv2Dy77h/XhhcdxYEFs=
github.com/nspcc-dev/neofs-api-go/v2 v2.11.1/go.mod h1:oS8dycEh8PPf2Jjp6+8dlwWyEv2Dy77h/XhhcdxYEFs=
github.com/nspcc-dev/neofs-crypto v0.2.0/go.mod h1:F/96fUzPM3wR+UGsPi3faVNmFlA9KAEAUQR7dMxZmNA=
@ -433,6 +435,8 @@ github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=

View file

@ -17,6 +17,7 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
const (
@ -29,8 +30,8 @@ type AuthmateFrostFS struct {
}
// NewAuthmateFrostFS creates new AuthmateFrostFS using provided pool.Pool.
func NewAuthmateFrostFS(p *pool.Pool) *AuthmateFrostFS {
return &AuthmateFrostFS{frostFS: NewFrostFS(p)}
func NewAuthmateFrostFS(p *pool.Pool, key *keys.PrivateKey) *AuthmateFrostFS {
return &AuthmateFrostFS{frostFS: NewFrostFS(p, key)}
}
// ContainerExists implements authmate.FrostFS interface method.
@ -116,7 +117,6 @@ func (x *AuthmateFrostFS) CreateObject(ctx context.Context, prm tokens.PrmObject
}
return x.frostFS.CreateObject(ctx, layer.PrmObjectCreate{
Creator: prm.Creator,
Container: prm.Container,
Filepath: prm.Filepath,
Attributes: attributes,

View file

@ -12,6 +12,7 @@ import (
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
errorsFrost "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@ -21,6 +22,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
// FrostFS represents virtual connection to the FrostFS network.
@ -29,6 +31,7 @@ import (
type FrostFS struct {
pool *pool.Pool
await pool.WaitParams
owner user.ID
}
const (
@ -37,14 +40,18 @@ const (
)
// NewFrostFS creates new FrostFS using provided pool.Pool.
func NewFrostFS(p *pool.Pool) *FrostFS {
func NewFrostFS(p *pool.Pool, key *keys.PrivateKey) *FrostFS {
var await pool.WaitParams
await.SetPollInterval(defaultPollInterval)
await.SetTimeout(defaultPollTimeout)
var owner user.ID
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
return &FrostFS{
pool: p,
await: await,
owner: owner,
}
}
@ -136,12 +143,12 @@ func (x *FrostFS) CreateContainer(ctx context.Context, prm layer.PrmContainerCre
return cid.ID{}, handleObjectError("sync container with the network state", err)
}
var prmPut pool.PrmContainerPut
prmPut.SetContainer(cnr)
prmPut.SetWaitParams(x.await)
if prm.SessionToken != nil {
prmPut.WithinSession(*prm.SessionToken)
prmPut := pool.PrmContainerPut{
ClientParams: client.PrmContainerPut{
Container: &cnr,
Session: prm.SessionToken,
},
WaitParams: &x.await,
}
// send request to save the container
@ -237,7 +244,7 @@ func (x *FrostFS) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (
obj := object.New()
obj.SetContainerID(prm.Container)
obj.SetOwnerID(&prm.Creator)
obj.SetOwnerID(&x.owner)
obj.SetAttributes(attrs...)
obj.SetPayloadSize(prm.PayloadSize)

View file

@ -6,8 +6,8 @@ import (
"fmt"
"io"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
errorsFrost "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
@ -169,7 +169,7 @@ func (w *PoolWrapper) RemoveNode(ctx context.Context, bktInfo *data.BucketInfo,
}
func getBearer(ctx context.Context, bktInfo *data.BucketInfo) []byte {
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil {
if bd, ok := ctx.Value(middleware.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil {
if bd.Gate.BearerToken != nil {
if bd.Gate.BearerToken.Impersonate() || bktInfo.Owner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
return bd.Gate.BearerToken.Marshal()

View file

@ -117,19 +117,13 @@ var appMetricsDesc = map[string]map[string]Description{
Help: "Total number of s3 errors in current FrostFS S3 Gate instance",
VariableLabels: []string{"api"},
},
txBytesTotalMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: statisticSubsystem,
Name: txBytesTotalMetric,
Help: "Total number of bytes sent by current FrostFS S3 Gate instance",
},
rxBytesTotalMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: statisticSubsystem,
Name: rxBytesTotalMetric,
Help: "Total number of bytes received by current FrostFS S3 Gate instance",
bytesTotalMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: statisticSubsystem,
Name: bytesTotalMetric,
Help: "Total number of bytes sent/received by current FrostFS S3 Gate instance",
VariableLabels: []string{"direction"},
},
},
}

View file

@ -16,7 +16,7 @@ type StatisticScraper interface {
}
type GateMetrics struct {
registry *prometheus.Registry
registry prometheus.Registerer
State *StateMetrics
Pool *poolMetricsCollector
Billing *billingMetrics
@ -24,7 +24,7 @@ type GateMetrics struct {
}
func NewGateMetrics(scraper StatisticScraper) *GateMetrics {
registry := prometheus.NewRegistry()
registry := prometheus.DefaultRegisterer
stateMetric := newStateMetrics()
registry.MustRegister(stateMetric)

View file

@ -28,8 +28,7 @@ type (
currentS3RequestsDesc *prometheus.Desc
totalS3RequestsDesc *prometheus.Desc
totalS3ErrorsDesc *prometheus.Desc
txBytesTotalDesc *prometheus.Desc
rxBytesTotalDesc *prometheus.Desc
bytesTotalDesc *prometheus.Desc
}
APIStatMetrics struct {
@ -47,8 +46,12 @@ const (
requestsCurrentMetric = "requests_current"
requestsTotalMetric = "requests_total"
errorsTotalMetric = "errors_total"
txBytesTotalMetric = "tx_bytes_total"
rxBytesTotalMetric = "rx_bytes_total"
bytesTotalMetric = "bytes_total"
)
const (
INDirection = "IN"
OUTDirection = "OUT"
)
func newAPIStatMetrics() *APIStatMetrics {
@ -132,8 +135,7 @@ func newHTTPStats() *httpStats {
currentS3RequestsDesc: newDesc(appMetricsDesc[statisticSubsystem][requestsCurrentMetric]),
totalS3RequestsDesc: newDesc(appMetricsDesc[statisticSubsystem][requestsTotalMetric]),
totalS3ErrorsDesc: newDesc(appMetricsDesc[statisticSubsystem][errorsTotalMetric]),
txBytesTotalDesc: newDesc(appMetricsDesc[statisticSubsystem][txBytesTotalMetric]),
rxBytesTotalDesc: newDesc(appMetricsDesc[statisticSubsystem][rxBytesTotalMetric]),
bytesTotalDesc: newDesc(appMetricsDesc[statisticSubsystem][bytesTotalMetric]),
}
}
@ -141,8 +143,7 @@ func (s *httpStats) Describe(desc chan<- *prometheus.Desc) {
desc <- s.currentS3RequestsDesc
desc <- s.totalS3RequestsDesc
desc <- s.totalS3ErrorsDesc
desc <- s.txBytesTotalDesc
desc <- s.rxBytesTotalDesc
desc <- s.bytesTotalDesc
}
func (s *httpStats) Collect(ch chan<- prometheus.Metric) {
@ -159,8 +160,8 @@ func (s *httpStats) Collect(ch chan<- prometheus.Metric) {
}
// Network Sent/Received Bytes (Outbound)
ch <- prometheus.MustNewConstMetric(s.txBytesTotalDesc, prometheus.CounterValue, float64(s.getInputBytes()))
ch <- prometheus.MustNewConstMetric(s.rxBytesTotalDesc, prometheus.CounterValue, float64(s.getOutputBytes()))
ch <- prometheus.MustNewConstMetric(s.bytesTotalDesc, prometheus.CounterValue, float64(s.getInputBytes()), INDirection)
ch <- prometheus.MustNewConstMetric(s.bytesTotalDesc, prometheus.CounterValue, float64(s.getOutputBytes()), OUTDirection)
}
// Inc increments the api stats counter.

View file

@ -9,9 +9,9 @@ import (
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"go.uber.org/zap"
@ -73,6 +73,7 @@ const (
lockConfigurationKV = "LockConfiguration"
oidKV = "OID"
isCombinedKV = "IsCombined"
isUnversionedKV = "IsUnversioned"
isTagKV = "IsTag"
uploadIDKV = "UploadId"
@ -181,6 +182,7 @@ func newNodeVersion(filePath string, node NodeResponse) (*data.NodeVersion, erro
func newNodeVersionFromTreeNode(filePath string, treeNode *treeNode) *data.NodeVersion {
_, isUnversioned := treeNode.Get(isUnversionedKV)
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
_, isCombined := treeNode.Get(isCombinedKV)
eTag, _ := treeNode.Get(etagKV)
version := &data.NodeVersion{
@ -194,6 +196,7 @@ func newNodeVersionFromTreeNode(filePath string, treeNode *treeNode) *data.NodeV
FilePath: filePath,
},
IsUnversioned: isUnversioned,
IsCombined: isCombined,
}
if isDeleteMarker {
@ -930,7 +933,6 @@ func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartN
etagKV: info.ETag,
}
var foundPartID uint64
for _, part := range parts {
if part.GetNodeID() == multipartNodeID {
continue
@ -940,20 +942,15 @@ func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartN
continue
}
if partInfo.Number == info.Number {
foundPartID = part.GetNodeID()
oldObjIDToDelete = partInfo.OID
break
return partInfo.OID, c.service.MoveNode(ctx, bktInfo, systemTree, part.GetNodeID(), multipartNodeID, meta)
}
}
if foundPartID != multipartNodeID {
if _, err = c.service.AddNode(ctx, bktInfo, systemTree, multipartNodeID, meta); err != nil {
return oid.ID{}, err
}
return oid.ID{}, layer.ErrNoNodeToRemove
if _, err = c.service.AddNode(ctx, bktInfo, systemTree, multipartNodeID, meta); err != nil {
return oid.ID{}, err
}
return oldObjIDToDelete, c.service.MoveNode(ctx, bktInfo, systemTree, foundPartID, multipartNodeID, meta)
return oid.ID{}, layer.ErrNoNodeToRemove
}
func (c *Tree) GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfo, error) {
@ -1073,6 +1070,10 @@ func (c *Tree) addVersion(ctx context.Context, bktInfo *data.BucketInfo, treeID
meta[createdKV] = strconv.FormatInt(version.DeleteMarker.Created.UTC().UnixMilli(), 10)
}
if version.IsCombined {
meta[isCombinedKV] = "true"
}
if version.IsUnversioned {
meta[isUnversionedKV] = "true"
@ -1192,7 +1193,7 @@ func (c *Tree) getNode(ctx context.Context, bktInfo *data.BucketInfo, treeID str
}
func (c *Tree) reqLogger(ctx context.Context) *zap.Logger {
reqLogger := api.GetReqLog(ctx)
reqLogger := middleware.GetReqLog(ctx)
if reqLogger != nil {
return reqLogger
}