forked from TrueCloudLab/frostfs-s3-gw
Compare commits
12 commits
master
...
support/v0
Author | SHA1 | Date | |
---|---|---|---|
5e308b65e7 | |||
4286f7945b | |||
809bc2eac8 | |||
aa2c016f83 | |||
962c120125 | |||
d1a1d489b1 | |||
2f29229383 | |||
ff5c55e14d | |||
e87c3715c5 | |||
5f9555afad | |||
4f9811fe70 | |||
670576248a |
148 changed files with 3020 additions and 14904 deletions
|
@ -18,6 +18,3 @@ jobs:
|
|||
|
||||
- name: Build binary
|
||||
run: make
|
||||
|
||||
- name: Check dirty suffix
|
||||
run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi
|
||||
|
|
|
@ -15,6 +15,6 @@ jobs:
|
|||
go-version: '1.21'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v1
|
||||
with:
|
||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
||||
from: 3fbad97a
|
||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -22,6 +22,7 @@ coverage.html
|
|||
|
||||
# debian package build files
|
||||
debian/files
|
||||
debian/changelog
|
||||
debian/*.log
|
||||
debian/*.substvars
|
||||
debian/frostfs-s3-gw/
|
||||
|
|
51
CHANGELOG.md
51
CHANGELOG.md
|
@ -4,54 +4,6 @@ This document outlines major changes between releases.
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
### Fixed
|
||||
- Fix marshaling errors in `DeleteObjects` method (#222)
|
||||
- Fix status code in GET/HEAD delete marker (#226)
|
||||
- Fix `NextVersionIDMarker` in `list-object-versions` (#248)
|
||||
- Fix possibility of panic during SIGHUP (#288)
|
||||
- Fix flaky `TestErrorTimeoutChecking` (`make test` sometimes failed) (#290)
|
||||
- Fix user owner ID in billing metrics (#321)
|
||||
- Fix HTTP/2 requests (#341)
|
||||
|
||||
### Added
|
||||
- Add new `frostfs.buffer_max_size_for_put` config param and sync TZ hash for PUT operations (#197)
|
||||
- Add `X-Amz-Version-Id` header after complete multipart upload (#227)
|
||||
- Add handling of `X-Amz-Copy-Source-Server-Side-Encryption-Customer-*` headers during copy (#217)
|
||||
- Add new `logger.destination` config param (#236)
|
||||
- Add `X-Amz-Content-Sha256` header validation (#218)
|
||||
- Support frostfsid contract. See `frostfsid` config section (#260)
|
||||
- Support per namespace placement policies configuration (see `namespaces.config` config param) (#266)
|
||||
- Support control api to manage policies. See `control` config section (#258)
|
||||
- Add `namespace` label to billing metrics (#271)
|
||||
- Support policy-engine (#257)
|
||||
- Support `policy` contract (#259)
|
||||
- Support `proxy` contract (#287)
|
||||
- Authmate: support custom attributes (#292)
|
||||
- Add new `reconnect_interval` config param (#291)
|
||||
- Support `GetBucketPolicyStatus` (#301)
|
||||
|
||||
### Changed
|
||||
- Generalise config param `use_default_xmlns_for_complete_multipart` to `use_default_xmlns` so that use default xmlns for all requests (#221)
|
||||
- Set server IdleTimeout and ReadHeaderTimeout to `30s` and allow to configure them (#220)
|
||||
- Return `ETag` value in quotes (#219)
|
||||
- Use tombstone when delete multipart upload (#275)
|
||||
- Support new parameter `cache.accessbox.removing_check_interval` (#305)
|
||||
- Use APE rules instead of eACL in container creation (#306)
|
||||
|
||||
### Removed
|
||||
- Drop sending whitespace characters during complete multipart upload and related config param `kludge.complete_multipart_keepalive` (#227)
|
||||
|
||||
## [0.28.1] - 2024-01-24
|
||||
|
||||
### Added
|
||||
- MD5 hash as ETag and response header (#205)
|
||||
- Tree pool traversal limit (#262)
|
||||
|
||||
### Updating from 0.28.0
|
||||
|
||||
See new `features.md5.enabled` and `frostfs.tree_pool_max_attempts` config
|
||||
parameters.
|
||||
|
||||
## [0.28.0] - Academy of Sciences - 2023-12-07
|
||||
|
||||
### Fixed
|
||||
|
@ -156,5 +108,4 @@ To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs
|
|||
|
||||
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...v0.27.0
|
||||
[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.27.0...v0.28.0
|
||||
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.0...v0.28.1
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.1...master
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.0...master
|
||||
|
|
13
Makefile
13
Makefile
|
@ -4,8 +4,8 @@
|
|||
REPO ?= $(shell go list -m)
|
||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||
GO_VERSION ?= 1.20
|
||||
LINT_VERSION ?= 1.56.1
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.5
|
||||
LINT_VERSION ?= 1.54.0
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.2
|
||||
BINDIR = bin
|
||||
|
||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
||||
|
@ -151,21 +151,16 @@ clean:
|
|||
|
||||
# Generate code from .proto files
|
||||
protoc:
|
||||
# Install specific version for protobuf lib
|
||||
@GOBIN=$(abspath $(BINDIR)) go install -mod=mod -v git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/protogen
|
||||
@for f in `find . -type f -name '*.proto' -not -path './vendor/*'`; do \
|
||||
echo "⇒ Processing $$f "; \
|
||||
protoc \
|
||||
--go_out=paths=source_relative:. \
|
||||
--plugin=protoc-gen-go-frostfs=$(BINDIR)/protogen \
|
||||
--go-frostfs_out=. --go-frostfs_opt=paths=source_relative \
|
||||
--go-grpc_opt=require_unimplemented_servers=false \
|
||||
--go-grpc_out=. --go-grpc_opt=paths=source_relative $$f; \
|
||||
--go_out=paths=source_relative:. $$f; \
|
||||
done
|
||||
rm -rf vendor
|
||||
|
||||
# Package for Debian
|
||||
debpackage:
|
||||
cp debian/changelog.init debian/changelog
|
||||
dch --package frostfs-s3-gw \
|
||||
--controlmaint \
|
||||
--newversion $(PKG_VERSION) \
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v0.28.1
|
||||
v0.28.0
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
|
@ -14,12 +15,13 @@ import (
|
|||
"time"
|
||||
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
)
|
||||
|
||||
// authorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
||||
|
@ -29,13 +31,27 @@ var authorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(
|
|||
var postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`)
|
||||
|
||||
type (
|
||||
Center struct {
|
||||
// Center is a user authentication interface.
|
||||
Center interface {
|
||||
Authenticate(request *http.Request) (*Box, error)
|
||||
}
|
||||
|
||||
// Box contains access box and additional info.
|
||||
Box struct {
|
||||
AccessBox *accessbox.Box
|
||||
ClientTime time.Time
|
||||
AuthHeaders *AuthHeader
|
||||
}
|
||||
|
||||
center struct {
|
||||
reg *RegexpSubmatcher
|
||||
postReg *RegexpSubmatcher
|
||||
cli tokens.Credentials
|
||||
allowedAccessKeyIDPrefixes []string // empty slice means all access key ids are allowed
|
||||
}
|
||||
|
||||
prs int
|
||||
|
||||
//nolint:revive
|
||||
AuthHeader struct {
|
||||
AccessKeyID string
|
||||
|
@ -60,46 +76,42 @@ const (
|
|||
AmzSignedHeaders = "X-Amz-SignedHeaders"
|
||||
AmzExpires = "X-Amz-Expires"
|
||||
AmzDate = "X-Amz-Date"
|
||||
AmzContentSHA256 = "X-Amz-Content-Sha256"
|
||||
AuthorizationHdr = "Authorization"
|
||||
ContentTypeHdr = "Content-Type"
|
||||
|
||||
UnsignedPayload = "UNSIGNED-PAYLOAD"
|
||||
StreamingUnsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
|
||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
StreamingContentSHA256Trailer = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
|
||||
StreamingContentECDSASHA256 = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
|
||||
StreamingContentECDSASHA256Trailer = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
|
||||
)
|
||||
|
||||
var ContentSHA256HeaderStandardValue = map[string]struct{}{
|
||||
UnsignedPayload: {},
|
||||
StreamingUnsignedPayloadTrailer: {},
|
||||
StreamingContentSHA256: {},
|
||||
StreamingContentSHA256Trailer: {},
|
||||
StreamingContentECDSASHA256: {},
|
||||
StreamingContentECDSASHA256Trailer: {},
|
||||
// ErrNoAuthorizationHeader is returned for unauthenticated requests.
|
||||
var ErrNoAuthorizationHeader = errors.New("no authorization header")
|
||||
|
||||
func (p prs) Read(_ []byte) (n int, err error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (p prs) Seek(_ int64, _ int) (int64, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
var _ io.ReadSeeker = prs(0)
|
||||
|
||||
// New creates an instance of AuthCenter.
|
||||
func New(creds tokens.Credentials, prefixes []string) *Center {
|
||||
return &Center{
|
||||
cli: creds,
|
||||
func New(frostFS tokens.FrostFS, key *keys.PrivateKey, prefixes []string, config *cache.Config) Center {
|
||||
return ¢er{
|
||||
cli: tokens.New(frostFS, key, config),
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
allowedAccessKeyIDPrefixes: prefixes,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Center) parseAuthHeader(header string) (*AuthHeader, error) {
|
||||
func (c *center) parseAuthHeader(header string) (*AuthHeader, error) {
|
||||
submatches := c.reg.GetSubmatches(header)
|
||||
if len(submatches) != authHeaderPartsNum {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed), header)
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed)
|
||||
}
|
||||
|
||||
accessKey := strings.Split(submatches["access_key_id"], "0")
|
||||
if len(accessKey) != accessKeyPartsNum {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID), accessKey)
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
|
||||
}
|
||||
|
||||
signedFields := strings.Split(submatches["signed_header_fields"], ";")
|
||||
|
@ -114,21 +126,15 @@ func (c *Center) parseAuthHeader(header string) (*AuthHeader, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func getAddress(accessKeyID string) (oid.Address, error) {
|
||||
func (a *AuthHeader) getAddress() (oid.Address, error) {
|
||||
var addr oid.Address
|
||||
if err := addr.DecodeString(strings.ReplaceAll(accessKeyID, "0", "/")); err != nil {
|
||||
return addr, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID), accessKeyID)
|
||||
if err := addr.DecodeString(strings.ReplaceAll(a.AccessKeyID, "0", "/")); err != nil {
|
||||
return addr, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
|
||||
}
|
||||
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
func IsStandardContentSHA256(key string) bool {
|
||||
_, ok := ContentSHA256HeaderStandardValue[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
||||
func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
||||
var (
|
||||
err error
|
||||
authHdr *AuthHeader
|
||||
|
@ -162,7 +168,7 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
|||
if strings.HasPrefix(r.Header.Get(ContentTypeHdr), "multipart/form-data") {
|
||||
return c.checkFormData(r)
|
||||
}
|
||||
return nil, fmt.Errorf("%w: %v", middleware.ErrNoAuthorizationHeader, authHeaderField)
|
||||
return nil, ErrNoAuthorizationHeader
|
||||
}
|
||||
authHdr, err = c.parseAuthHeader(authHeaderField[0])
|
||||
if err != nil {
|
||||
|
@ -177,22 +183,18 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
|||
return nil, fmt.Errorf("failed to parse x-amz-date header field: %w", err)
|
||||
}
|
||||
|
||||
if err = c.checkAccessKeyID(authHdr.AccessKeyID); err != nil {
|
||||
if err := c.checkAccessKeyID(authHdr.AccessKeyID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr, err := getAddress(authHdr.AccessKeyID)
|
||||
addr, err := authHdr.getAddress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
box, err := c.cli.GetBox(r.Context(), addr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get box '%s': %w", addr, err)
|
||||
}
|
||||
|
||||
if err = checkFormatHashContentSHA256(r.Header.Get(AmzContentSHA256)); err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("get box: %w", err)
|
||||
}
|
||||
|
||||
clonedRequest := cloneRequest(r, authHdr)
|
||||
|
@ -200,13 +202,9 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
result := &middleware.Box{
|
||||
AccessBox: box,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: authHdr.AccessKeyID,
|
||||
Region: authHdr.Region,
|
||||
SignatureV4: authHdr.SignatureV4,
|
||||
},
|
||||
result := &Box{
|
||||
AccessBox: box,
|
||||
AuthHeaders: authHdr,
|
||||
}
|
||||
if needClientTime {
|
||||
result.ClientTime = signatureDateTime
|
||||
|
@ -215,22 +213,7 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func checkFormatHashContentSHA256(hash string) error {
|
||||
if !IsStandardContentSHA256(hash) {
|
||||
hashBinary, err := hex.DecodeString(hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w: decode hash: %s: %s", apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch),
|
||||
hash, err.Error())
|
||||
}
|
||||
if len(hashBinary) != sha256.Size && len(hash) != 0 {
|
||||
return fmt.Errorf("%w: invalid hash size %d", apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch), len(hashBinary))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Center) checkAccessKeyID(accessKeyID string) error {
|
||||
func (c center) checkAccessKeyID(accessKeyID string) error {
|
||||
if len(c.allowedAccessKeyIDPrefixes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -241,12 +224,12 @@ func (c Center) checkAccessKeyID(accessKeyID string) error {
|
|||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("%w: accesskeyID prefix isn't allowed", apiErrors.GetAPIError(apiErrors.ErrAccessDenied))
|
||||
return apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
||||
func (c *center) checkFormData(r *http.Request) (*Box, error) {
|
||||
if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil {
|
||||
return nil, fmt.Errorf("%w: parse multipart form with max size %d", apiErrors.GetAPIError(apiErrors.ErrInvalidArgument), maxFormSizeMemory)
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
if err := prepareForm(r.MultipartForm); err != nil {
|
||||
|
@ -255,13 +238,12 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
|||
|
||||
policy := MultipartFormValue(r, "policy")
|
||||
if policy == "" {
|
||||
return nil, fmt.Errorf("%w: missing policy", middleware.ErrNoAuthorizationHeader)
|
||||
return nil, ErrNoAuthorizationHeader
|
||||
}
|
||||
|
||||
creds := MultipartFormValue(r, "x-amz-credential")
|
||||
submatches := c.postReg.GetSubmatches(creds)
|
||||
submatches := c.postReg.GetSubmatches(MultipartFormValue(r, "x-amz-credential"))
|
||||
if len(submatches) != 4 {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed), creds)
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed)
|
||||
}
|
||||
|
||||
signatureDateTime, err := time.Parse("20060102T150405Z", MultipartFormValue(r, "x-amz-date"))
|
||||
|
@ -269,27 +251,25 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
|||
return nil, fmt.Errorf("failed to parse x-amz-date field: %w", err)
|
||||
}
|
||||
|
||||
addr, err := getAddress(submatches["access_key_id"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var addr oid.Address
|
||||
if err = addr.DecodeString(strings.ReplaceAll(submatches["access_key_id"], "0", "/")); err != nil {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
|
||||
}
|
||||
|
||||
box, err := c.cli.GetBox(r.Context(), addr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get box '%s': %w", addr, err)
|
||||
return nil, fmt.Errorf("get box: %w", err)
|
||||
}
|
||||
|
||||
secret := box.Gate.SecretKey
|
||||
secret := box.Gate.AccessKey
|
||||
service, region := submatches["service"], submatches["region"]
|
||||
|
||||
signature := signStr(secret, service, region, signatureDateTime, policy)
|
||||
reqSignature := MultipartFormValue(r, "x-amz-signature")
|
||||
if signature != reqSignature {
|
||||
return nil, fmt.Errorf("%w: %s != %s", apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch),
|
||||
reqSignature, signature)
|
||||
if signature != MultipartFormValue(r, "x-amz-signature") {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
|
||||
return &middleware.Box{AccessBox: box}, nil
|
||||
return &Box{AccessBox: box}, nil
|
||||
}
|
||||
|
||||
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
||||
|
@ -313,8 +293,8 @@ func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
|||
return otherRequest
|
||||
}
|
||||
|
||||
func (c *Center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
||||
awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.SecretKey, "")
|
||||
func (c *center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
||||
awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.AccessKey, "")
|
||||
signer := v4.NewSigner(awsCreds)
|
||||
signer.DisableURIPathEscaping = true
|
||||
|
||||
|
@ -322,12 +302,10 @@ func (c *Center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *
|
|||
if authHeader.IsPresigned {
|
||||
now := time.Now()
|
||||
if signatureDateTime.Add(authHeader.Expiration).Before(now) {
|
||||
return fmt.Errorf("%w: expired: now %s, signature %s", apiErrors.GetAPIError(apiErrors.ErrExpiredPresignRequest),
|
||||
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
|
||||
return apiErrors.GetAPIError(apiErrors.ErrExpiredPresignRequest)
|
||||
}
|
||||
if now.Before(signatureDateTime) {
|
||||
return fmt.Errorf("%w: signature time from the future: now %s, signature %s", apiErrors.GetAPIError(apiErrors.ErrBadRequest),
|
||||
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
|
||||
return apiErrors.GetAPIError(apiErrors.ErrBadRequest)
|
||||
}
|
||||
if _, err := signer.Presign(request, nil, authHeader.Service, authHeader.Region, authHeader.Expiration, signatureDateTime); err != nil {
|
||||
return fmt.Errorf("failed to pre-sign temporary HTTP request: %w", err)
|
||||
|
@ -341,8 +319,7 @@ func (c *Center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *
|
|||
}
|
||||
|
||||
if authHeader.SignatureV4 != signature {
|
||||
return fmt.Errorf("%w: %s != %s: headers %v", apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch),
|
||||
authHeader.SignatureV4, signature, authHeader.SignedFields)
|
||||
return apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
func TestAuthHeaderParse(t *testing.T) {
|
||||
defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f"
|
||||
|
||||
center := &Center{
|
||||
center := ¢er{
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@ func TestAuthHeaderParse(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
authHeader, err := center.parseAuthHeader(tc.header)
|
||||
require.ErrorIs(t, err, tc.err, tc.header)
|
||||
require.Equal(t, tc.err, err, tc.header)
|
||||
require.Equal(t, tc.expected, authHeader, tc.header)
|
||||
}
|
||||
}
|
||||
|
@ -82,8 +82,8 @@ func TestAuthHeaderGetAddress(t *testing.T) {
|
|||
err: defaulErr,
|
||||
},
|
||||
} {
|
||||
_, err := getAddress(tc.authHeader.AccessKeyID)
|
||||
require.ErrorIs(t, err, tc.err, tc.authHeader.AccessKeyID)
|
||||
_, err := tc.authHeader.getAddress()
|
||||
require.Equal(t, tc.err, err, tc.authHeader.AccessKeyID)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,49 +99,3 @@ func TestSignature(t *testing.T) {
|
|||
signature := signStr(secret, "s3", "us-east-1", signTime, strToSign)
|
||||
require.Equal(t, "dfbe886241d9e369cf4b329ca0f15eb27306c97aa1022cc0bb5a914c4ef87634", signature)
|
||||
}
|
||||
|
||||
func TestCheckFormatContentSHA256(t *testing.T) {
|
||||
defaultErr := errors.GetAPIError(errors.ErrContentSHA256Mismatch)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
hash string
|
||||
error error
|
||||
}{
|
||||
{
|
||||
name: "invalid hash format: length and character",
|
||||
hash: "invalid-hash",
|
||||
error: defaultErr,
|
||||
},
|
||||
{
|
||||
name: "invalid hash format: length (63 characters)",
|
||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f7",
|
||||
error: defaultErr,
|
||||
},
|
||||
{
|
||||
name: "invalid hash format: character",
|
||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f7s",
|
||||
error: defaultErr,
|
||||
},
|
||||
{
|
||||
name: "unsigned payload",
|
||||
hash: "UNSIGNED-PAYLOAD",
|
||||
error: nil,
|
||||
},
|
||||
{
|
||||
name: "no hash",
|
||||
hash: "",
|
||||
error: nil,
|
||||
},
|
||||
{
|
||||
name: "correct hash format",
|
||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
|
||||
error: nil,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := checkFormatHashContentSHA256(tc.hash)
|
||||
require.ErrorIs(t, err, tc.error)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,9 @@ import (
|
|||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -40,11 +42,11 @@ func (m credentialsMock) GetBox(_ context.Context, addr oid.Address) (*accessbox
|
|||
return box, nil
|
||||
}
|
||||
|
||||
func (m credentialsMock) Put(context.Context, cid.ID, tokens.CredentialsParam) (oid.Address, error) {
|
||||
func (m credentialsMock) Put(context.Context, cid.ID, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error) {
|
||||
return oid.Address{}, nil
|
||||
}
|
||||
|
||||
func (m credentialsMock) Update(context.Context, oid.Address, tokens.CredentialsParam) (oid.Address, error) {
|
||||
func (m credentialsMock) Update(context.Context, oid.Address, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error) {
|
||||
return oid.Address{}, nil
|
||||
}
|
||||
|
||||
|
@ -75,14 +77,14 @@ func TestCheckSign(t *testing.T) {
|
|||
|
||||
expBox := &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: secretKey,
|
||||
AccessKey: secretKey,
|
||||
},
|
||||
}
|
||||
|
||||
mock := newTokensFrostfsMock()
|
||||
mock.addBox(accessKeyAddr, expBox)
|
||||
|
||||
c := &Center{
|
||||
c := ¢er{
|
||||
cli: mock,
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
|
|
26
api/cache/accessbox.go
vendored
26
api/cache/accessbox.go
vendored
|
@ -24,11 +24,6 @@ type (
|
|||
Lifetime time.Duration
|
||||
Logger *zap.Logger
|
||||
}
|
||||
|
||||
AccessBoxCacheValue struct {
|
||||
Box *accessbox.Box
|
||||
PutTime time.Time
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -47,21 +42,21 @@ func DefaultAccessBoxConfig(logger *zap.Logger) *Config {
|
|||
}
|
||||
}
|
||||
|
||||
// NewAccessBoxCache creates an object of AccessBoxCache.
|
||||
// NewAccessBoxCache creates an object of BucketCache.
|
||||
func NewAccessBoxCache(config *Config) *AccessBoxCache {
|
||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
||||
|
||||
return &AccessBoxCache{cache: gc, logger: config.Logger}
|
||||
}
|
||||
|
||||
// Get returns a cached accessbox.
|
||||
func (o *AccessBoxCache) Get(address oid.Address) *AccessBoxCacheValue {
|
||||
// Get returns a cached object.
|
||||
func (o *AccessBoxCache) Get(address oid.Address) *accessbox.Box {
|
||||
entry, err := o.cache.Get(address)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result, ok := entry.(*AccessBoxCacheValue)
|
||||
result, ok := entry.(*accessbox.Box)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
|
@ -71,16 +66,7 @@ func (o *AccessBoxCache) Get(address oid.Address) *AccessBoxCacheValue {
|
|||
return result
|
||||
}
|
||||
|
||||
// Put stores an accessbox to cache.
|
||||
// Put stores an object to cache.
|
||||
func (o *AccessBoxCache) Put(address oid.Address, box *accessbox.Box) error {
|
||||
val := &AccessBoxCacheValue{
|
||||
Box: box,
|
||||
PutTime: time.Now(),
|
||||
}
|
||||
return o.cache.Set(address, val)
|
||||
}
|
||||
|
||||
// Delete removes an accessbox from cache.
|
||||
func (o *AccessBoxCache) Delete(address oid.Address) {
|
||||
o.cache.Remove(address)
|
||||
return o.cache.Set(address, box)
|
||||
}
|
||||
|
|
14
api/cache/buckets.go
vendored
14
api/cache/buckets.go
vendored
|
@ -39,8 +39,8 @@ func NewBucketCache(config *Config) *BucketCache {
|
|||
}
|
||||
|
||||
// Get returns a cached object.
|
||||
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
||||
entry, err := o.cache.Get(formKey(ns, bktName))
|
||||
func (o *BucketCache) Get(key string) *data.BucketInfo {
|
||||
entry, err := o.cache.Get(key)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -57,14 +57,10 @@ func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
|||
|
||||
// Put puts an object to cache.
|
||||
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
||||
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
||||
return o.cache.Set(bkt.Name, bkt)
|
||||
}
|
||||
|
||||
// Delete deletes an object from cache.
|
||||
func (o *BucketCache) Delete(bkt *data.BucketInfo) bool {
|
||||
return o.cache.Remove(formKey(bkt.Zone, bkt.Name))
|
||||
}
|
||||
|
||||
func formKey(zone, name string) string {
|
||||
return name + "." + zone
|
||||
func (o *BucketCache) Delete(key string) bool {
|
||||
return o.cache.Remove(key)
|
||||
}
|
||||
|
|
8
api/cache/cache_test.go
vendored
8
api/cache/cache_test.go
vendored
|
@ -22,7 +22,7 @@ func TestAccessBoxCacheType(t *testing.T) {
|
|||
err := cache.Put(addr, box)
|
||||
require.NoError(t, err)
|
||||
val := cache.Get(addr)
|
||||
require.Equal(t, box, val.Box)
|
||||
require.Equal(t, box, val)
|
||||
require.Equal(t, 0, observedLog.Len())
|
||||
|
||||
err = cache.cache.Set(addr, "tmp")
|
||||
|
@ -38,13 +38,13 @@ func TestBucketsCacheType(t *testing.T) {
|
|||
|
||||
err := cache.Put(bktInfo)
|
||||
require.NoError(t, err)
|
||||
val := cache.Get("", bktInfo.Name)
|
||||
val := cache.Get(bktInfo.Name)
|
||||
require.Equal(t, bktInfo, val)
|
||||
require.Equal(t, 0, observedLog.Len())
|
||||
|
||||
err = cache.cache.Set(bktInfo.Name+"."+bktInfo.Zone, "tmp")
|
||||
err = cache.cache.Set(bktInfo.Name, "tmp")
|
||||
require.NoError(t, err)
|
||||
assertInvalidCacheEntry(t, cache.Get(bktInfo.Zone, bktInfo.Name), observedLog)
|
||||
assertInvalidCacheEntry(t, cache.Get(bktInfo.Name), observedLog)
|
||||
}
|
||||
|
||||
func TestObjectNamesCacheType(t *testing.T) {
|
||||
|
|
107
api/cache/listsession.go
vendored
107
api/cache/listsession.go
vendored
|
@ -1,107 +0,0 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// ListSessionCache contains cache for list session (during pagination).
|
||||
ListSessionCache struct {
|
||||
cache gcache.Cache
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// ListSessionKey is a key to find a ListSessionCache's entry.
|
||||
ListSessionKey struct {
|
||||
cid cid.ID
|
||||
prefix string
|
||||
token string
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultListSessionCacheLifetime is a default lifetime of entries in cache of ListObjects.
|
||||
DefaultListSessionCacheLifetime = time.Second * 60
|
||||
// DefaultListSessionCacheSize is a default size of cache of ListObjects.
|
||||
DefaultListSessionCacheSize = 100
|
||||
)
|
||||
|
||||
// DefaultListSessionConfig returns new default cache expiration values.
|
||||
func DefaultListSessionConfig(logger *zap.Logger) *Config {
|
||||
return &Config{
|
||||
Size: DefaultListSessionCacheSize,
|
||||
Lifetime: DefaultListSessionCacheLifetime,
|
||||
Logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (k *ListSessionKey) String() string {
|
||||
return k.cid.EncodeToString() + k.prefix + k.token
|
||||
}
|
||||
|
||||
// NewListSessionCache is a constructor which creates an object of ListObjectsCache with the given lifetime of entries.
|
||||
func NewListSessionCache(config *Config) *ListSessionCache {
|
||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).EvictedFunc(func(_ interface{}, val interface{}) {
|
||||
session, ok := val.(*data.ListSession)
|
||||
if !ok {
|
||||
config.Logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", val)),
|
||||
zap.String("expected", fmt.Sprintf("%T", session)))
|
||||
}
|
||||
|
||||
if !session.Acquired.Load() {
|
||||
session.Cancel()
|
||||
}
|
||||
}).Build()
|
||||
return &ListSessionCache{cache: gc, logger: config.Logger}
|
||||
}
|
||||
|
||||
// GetListSession returns a list of ObjectInfo.
|
||||
func (l *ListSessionCache) GetListSession(key ListSessionKey) *data.ListSession {
|
||||
entry, err := l.cache.Get(key)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result, ok := entry.(*data.ListSession)
|
||||
if !ok {
|
||||
l.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// PutListSession puts a list of object versions to cache.
|
||||
func (l *ListSessionCache) PutListSession(key ListSessionKey, session *data.ListSession) error {
|
||||
s := l.GetListSession(key)
|
||||
if s != nil && s != session {
|
||||
if !s.Acquired.Load() {
|
||||
s.Cancel()
|
||||
}
|
||||
}
|
||||
return l.cache.Set(key, session)
|
||||
}
|
||||
|
||||
// DeleteListSession removes key from cache.
|
||||
func (l *ListSessionCache) DeleteListSession(key ListSessionKey) {
|
||||
l.cache.Remove(key)
|
||||
}
|
||||
|
||||
// CreateListSessionCacheKey returns ListSessionKey with the given CID, prefix and token.
|
||||
func CreateListSessionCacheKey(cnr cid.ID, prefix, token string) ListSessionKey {
|
||||
p := ListSessionKey{
|
||||
cid: cnr,
|
||||
prefix: prefix,
|
||||
token: token,
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
72
api/cache/policy.go
vendored
72
api/cache/policy.go
vendored
|
@ -1,72 +0,0 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// MorphPolicyCache provides lru cache for listing policies stored in policy contract.
|
||||
type MorphPolicyCache struct {
|
||||
cache gcache.Cache
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
type MorphPolicyCacheKey struct {
|
||||
Target engine.Target
|
||||
Name chain.Name
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultMorphPolicyCacheSize is a default maximum number of entries in cache.
|
||||
DefaultMorphPolicyCacheSize = 1e4
|
||||
// DefaultMorphPolicyCacheLifetime is a default lifetime of entries in cache.
|
||||
DefaultMorphPolicyCacheLifetime = time.Minute
|
||||
)
|
||||
|
||||
// DefaultMorphPolicyConfig returns new default cache expiration values.
|
||||
func DefaultMorphPolicyConfig(logger *zap.Logger) *Config {
|
||||
return &Config{
|
||||
Size: DefaultMorphPolicyCacheSize,
|
||||
Lifetime: DefaultMorphPolicyCacheLifetime,
|
||||
Logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// NewMorphPolicyCache creates an object of MorphPolicyCache.
|
||||
func NewMorphPolicyCache(config *Config) *MorphPolicyCache {
|
||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
||||
return &MorphPolicyCache{cache: gc, logger: config.Logger}
|
||||
}
|
||||
|
||||
// Get returns a cached object. Returns nil if value is missing.
|
||||
func (o *MorphPolicyCache) Get(key MorphPolicyCacheKey) []*chain.Chain {
|
||||
entry, err := o.cache.Get(key)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result, ok := entry.([]*chain.Chain)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Put puts an object to cache.
|
||||
func (o *MorphPolicyCache) Put(key MorphPolicyCacheKey, list []*chain.Chain) error {
|
||||
return o.cache.Set(key, list)
|
||||
}
|
||||
|
||||
// Delete deletes an object from cache.
|
||||
func (o *MorphPolicyCache) Delete(key MorphPolicyCacheKey) bool {
|
||||
return o.cache.Remove(key)
|
||||
}
|
|
@ -2,13 +2,11 @@ package data
|
|||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -24,21 +22,21 @@ const (
|
|||
type (
|
||||
// BucketInfo stores basic bucket data.
|
||||
BucketInfo struct {
|
||||
Name string // container name from system attribute
|
||||
Zone string // container zone from system attribute
|
||||
CID cid.ID
|
||||
Owner user.ID
|
||||
Created time.Time
|
||||
LocationConstraint string
|
||||
ObjectLockEnabled bool
|
||||
HomomorphicHashDisabled bool
|
||||
APEEnabled bool
|
||||
Name string // container name from system attribute
|
||||
Zone string // container zone from system attribute
|
||||
CID cid.ID
|
||||
Owner user.ID
|
||||
Created time.Time
|
||||
LocationConstraint string
|
||||
ObjectLockEnabled bool
|
||||
}
|
||||
|
||||
// ObjectInfo holds S3 object data.
|
||||
ObjectInfo struct {
|
||||
ID oid.ID
|
||||
CID cid.ID
|
||||
ID oid.ID
|
||||
CID cid.ID
|
||||
IsDir bool
|
||||
IsDeleteMarker bool
|
||||
|
||||
Bucket string
|
||||
Name string
|
||||
|
@ -62,10 +60,8 @@ type (
|
|||
|
||||
// BucketSettings stores settings such as versioning.
|
||||
BucketSettings struct {
|
||||
Versioning string
|
||||
LockConfiguration *ObjectLockConfiguration
|
||||
CannedACL string
|
||||
OwnerKey *keys.PublicKey
|
||||
Versioning string `json:"versioning"`
|
||||
LockConfiguration *ObjectLockConfiguration `json:"lock_configuration"`
|
||||
}
|
||||
|
||||
// CORSConfiguration stores CORS configuration of a request.
|
||||
|
@ -91,7 +87,7 @@ func NotificationInfoFromObject(objInfo *ObjectInfo, md5Enabled bool) *Notificat
|
|||
Name: objInfo.Name,
|
||||
Version: objInfo.VersionID(),
|
||||
Size: objInfo.Size,
|
||||
HashSum: Quote(objInfo.ETag(md5Enabled)),
|
||||
HashSum: objInfo.ETag(md5Enabled),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,11 +134,3 @@ func (b BucketSettings) VersioningEnabled() bool {
|
|||
func (b BucketSettings) VersioningSuspended() bool {
|
||||
return b.Versioning == VersioningSuspended
|
||||
}
|
||||
|
||||
func Quote(val string) string {
|
||||
return "\"" + val + "\""
|
||||
}
|
||||
|
||||
func UnQuote(val string) string {
|
||||
return strings.Trim(val, "\"")
|
||||
}
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
package data
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type VersionsStream interface {
|
||||
Next(ctx context.Context) (*NodeVersion, error)
|
||||
}
|
||||
|
||||
type ListSession struct {
|
||||
Next []*ExtendedNodeVersion
|
||||
Stream VersionsStream
|
||||
NamesMap map[string]struct{}
|
||||
Context context.Context
|
||||
Cancel context.CancelFunc
|
||||
Acquired atomic.Bool
|
||||
}
|
|
@ -1,10 +1,7 @@
|
|||
package data
|
||||
|
||||
import "encoding/xml"
|
||||
|
||||
type (
|
||||
NotificationConfiguration struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NotificationConfiguration" json:"-"`
|
||||
QueueConfigurations []QueueConfiguration `xml:"QueueConfiguration" json:"QueueConfigurations"`
|
||||
// Not supported topics
|
||||
TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration" json:"TopicConfigurations"`
|
||||
|
|
|
@ -16,31 +16,20 @@ const (
|
|||
// NodeVersion represent node from tree service.
|
||||
type NodeVersion struct {
|
||||
BaseNodeVersion
|
||||
DeleteMarker *DeleteMarkerInfo
|
||||
IsUnversioned bool
|
||||
IsCombined bool
|
||||
}
|
||||
|
||||
// ExtendedNodeVersion contains additional node info to be able to sort versions by timestamp.
|
||||
type ExtendedNodeVersion struct {
|
||||
NodeVersion *NodeVersion
|
||||
IsLatest bool
|
||||
DirName string
|
||||
func (v NodeVersion) IsDeleteMarker() bool {
|
||||
return v.DeleteMarker != nil
|
||||
}
|
||||
|
||||
func (e ExtendedNodeVersion) Version() string {
|
||||
if e.NodeVersion.IsUnversioned {
|
||||
return UnversionedObjectVersionID
|
||||
}
|
||||
|
||||
return e.NodeVersion.OID.EncodeToString()
|
||||
}
|
||||
|
||||
func (e ExtendedNodeVersion) Name() string {
|
||||
if e.DirName != "" {
|
||||
return e.DirName
|
||||
}
|
||||
|
||||
return e.NodeVersion.FilePath
|
||||
// DeleteMarkerInfo is used to save object info if node in the tree service is delete marker.
|
||||
// We need this information because the "delete marker" object is no longer stored in FrostFS.
|
||||
type DeleteMarkerInfo struct {
|
||||
Created time.Time
|
||||
Owner user.ID
|
||||
}
|
||||
|
||||
// ExtendedObjectInfo contains additional node info to be able to sort versions by timestamp.
|
||||
|
@ -61,35 +50,14 @@ func (e ExtendedObjectInfo) Version() string {
|
|||
// BaseNodeVersion is minimal node info from tree service.
|
||||
// Basically used for "system" object.
|
||||
type BaseNodeVersion struct {
|
||||
ID uint64
|
||||
ParenID uint64
|
||||
OID oid.ID
|
||||
Timestamp uint64
|
||||
Size uint64
|
||||
ETag string
|
||||
MD5 string
|
||||
FilePath string
|
||||
Created *time.Time
|
||||
Owner *user.ID
|
||||
IsDeleteMarker bool
|
||||
}
|
||||
|
||||
func (v *BaseNodeVersion) GetETag(md5Enabled bool) string {
|
||||
if md5Enabled && len(v.MD5) > 0 {
|
||||
return v.MD5
|
||||
}
|
||||
return v.ETag
|
||||
}
|
||||
|
||||
// IsFilledExtra returns true is node was created by version of gate v0.29.x and later.
|
||||
func (v BaseNodeVersion) IsFilledExtra() bool {
|
||||
return v.Created != nil && v.Owner != nil
|
||||
}
|
||||
|
||||
func (v *BaseNodeVersion) FillExtra(owner *user.ID, created *time.Time, realSize uint64) {
|
||||
v.Owner = owner
|
||||
v.Created = created
|
||||
v.Size = realSize
|
||||
ID uint64
|
||||
ParenID uint64
|
||||
OID oid.ID
|
||||
Timestamp uint64
|
||||
Size uint64
|
||||
ETag string
|
||||
MD5 string
|
||||
FilePath string
|
||||
}
|
||||
|
||||
type ObjectTaggingInfo struct {
|
||||
|
@ -109,7 +77,6 @@ type MultipartInfo struct {
|
|||
Created time.Time
|
||||
Meta map[string]string
|
||||
CopiesNumbers []uint32
|
||||
Finished bool
|
||||
}
|
||||
|
||||
// PartInfo is upload information about part.
|
||||
|
|
|
@ -26,7 +26,6 @@ type (
|
|||
const (
|
||||
_ ErrorCode = iota
|
||||
ErrAccessDenied
|
||||
ErrAccessControlListNotSupported
|
||||
ErrBadDigest
|
||||
ErrEntityTooSmall
|
||||
ErrEntityTooLarge
|
||||
|
@ -74,7 +73,6 @@ const (
|
|||
ErrInvalidArgument
|
||||
ErrInvalidTagKey
|
||||
ErrInvalidTagValue
|
||||
ErrInvalidTagKeyUniqueness
|
||||
ErrInvalidTagsSizeExceed
|
||||
ErrNotImplemented
|
||||
ErrPreconditionFailed
|
||||
|
@ -91,7 +89,6 @@ const (
|
|||
ErrBucketNotEmpty
|
||||
ErrAllAccessDisabled
|
||||
ErrMalformedPolicy
|
||||
ErrMalformedPolicyNotPrincipal
|
||||
ErrMissingFields
|
||||
ErrMissingCredTag
|
||||
ErrCredMalformed
|
||||
|
@ -151,7 +148,6 @@ const (
|
|||
ErrInvalidEncryptionAlgorithm
|
||||
ErrInvalidSSECustomerKey
|
||||
ErrMissingSSECustomerKey
|
||||
ErrMissingSSECustomerAlgorithm
|
||||
ErrMissingSSECustomerKeyMD5
|
||||
ErrSSECustomerKeyMD5Mismatch
|
||||
ErrInvalidSSECustomerParameters
|
||||
|
@ -186,7 +182,6 @@ const (
|
|||
ErrInvalidRequest
|
||||
ErrInvalidRequestLargeCopy
|
||||
ErrInvalidStorageClass
|
||||
VersionIDMarkerWithoutKeyMarker
|
||||
|
||||
ErrMalformedJSON
|
||||
ErrInsecureClientRequest
|
||||
|
@ -318,12 +313,6 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Invalid storage class.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
VersionIDMarkerWithoutKeyMarker: {
|
||||
ErrCode: VersionIDMarkerWithoutKeyMarker,
|
||||
Code: "VersionIDMarkerWithoutKeyMarker",
|
||||
Description: "A version-id marker cannot be specified without a key marker.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRequestBody: {
|
||||
ErrCode: ErrInvalidRequestBody,
|
||||
Code: "InvalidArgument",
|
||||
|
@ -378,12 +367,6 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Access Denied.",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrAccessControlListNotSupported: {
|
||||
ErrCode: ErrAccessControlListNotSupported,
|
||||
Code: "AccessControlListNotSupported",
|
||||
Description: "The bucket does not allow ACLs.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrBadDigest: {
|
||||
ErrCode: ErrBadDigest,
|
||||
Code: "BadDigest",
|
||||
|
@ -543,19 +526,13 @@ var errorCodes = errorCodeMap{
|
|||
ErrInvalidTagKey: {
|
||||
ErrCode: ErrInvalidTagKey,
|
||||
Code: "InvalidTag",
|
||||
Description: "The TagKey you have provided is invalid",
|
||||
Description: "The TagValue you have provided is invalid",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTagValue: {
|
||||
ErrCode: ErrInvalidTagValue,
|
||||
Code: "InvalidTag",
|
||||
Description: "The TagValue you have provided is invalid",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTagKeyUniqueness: {
|
||||
ErrCode: ErrInvalidTagKeyUniqueness,
|
||||
Code: "InvalidTag",
|
||||
Description: "Cannot provide multiple Tags with the same key",
|
||||
Description: "The TagKey you have provided is invalid",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidTagsSizeExceed: {
|
||||
|
@ -621,7 +598,7 @@ var errorCodes = errorCodeMap{
|
|||
ErrAuthorizationHeaderMalformed: {
|
||||
ErrCode: ErrAuthorizationHeaderMalformed,
|
||||
Code: "AuthorizationHeaderMalformed",
|
||||
Description: "The authorization header that you provided is not valid.",
|
||||
Description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMalformedPOSTRequest: {
|
||||
|
@ -666,12 +643,6 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Policy has invalid resource.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMalformedPolicyNotPrincipal: {
|
||||
ErrCode: ErrMalformedPolicyNotPrincipal,
|
||||
Code: "MalformedPolicy",
|
||||
Description: "Allow with NotPrincipal is not allowed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingFields: {
|
||||
ErrCode: ErrMissingFields,
|
||||
Code: "MissingFields",
|
||||
|
@ -1077,12 +1048,6 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Requests specifying Server Side Encryption with Customer provided keys must provide an appropriate secret key.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingSSECustomerAlgorithm: {
|
||||
ErrCode: ErrMissingSSECustomerAlgorithm,
|
||||
Code: "InvalidArgument",
|
||||
Description: "Requests specifying Server Side Encryption with Customer provided keys must provide a valid encryption algorithm.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrMissingSSECustomerKeyMD5: {
|
||||
ErrCode: ErrMissingSSECustomerKeyMD5,
|
||||
Code: "InvalidArgument",
|
||||
|
|
|
@ -6,9 +6,9 @@ import (
|
|||
"crypto/elliptic"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -20,14 +20,11 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
engineiam "git.frostfs.info/TrueCloudLab/policy-engine/iam"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -257,20 +254,6 @@ func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if bktInfo.APEEnabled || len(settings.CannedACL) != 0 {
|
||||
if err = middleware.EncodeToResponse(w, h.encodeBucketCannedACL(ctx, bktInfo, settings)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
bucketACL, err := h.obj.GetBucketACL(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||
|
@ -283,75 +266,16 @@ func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
func (h *handler) encodeBucketCannedACL(ctx context.Context, bktInfo *data.BucketInfo, settings *data.BucketSettings) *AccessControlPolicy {
|
||||
res := h.encodePrivateCannedACL(ctx, bktInfo, settings)
|
||||
|
||||
switch settings.CannedACL {
|
||||
case basicACLPublic:
|
||||
grantee := NewGrantee(acpGroup)
|
||||
grantee.URI = allUsersGroup
|
||||
|
||||
res.AccessControlList = append(res.AccessControlList, &Grant{
|
||||
Grantee: grantee,
|
||||
Permission: aclWrite,
|
||||
})
|
||||
fallthrough
|
||||
case basicACLReadOnly:
|
||||
grantee := NewGrantee(acpGroup)
|
||||
grantee.URI = allUsersGroup
|
||||
|
||||
res.AccessControlList = append(res.AccessControlList, &Grant{
|
||||
Grantee: grantee,
|
||||
Permission: aclRead,
|
||||
})
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (h *handler) encodePrivateCannedACL(ctx context.Context, bktInfo *data.BucketInfo, settings *data.BucketSettings) *AccessControlPolicy {
|
||||
ownerDisplayName := bktInfo.Owner.EncodeToString()
|
||||
ownerEncodedID := ownerDisplayName
|
||||
|
||||
if settings.OwnerKey == nil {
|
||||
h.reqLogger(ctx).Warn(logs.BucketOwnerKeyIsMissing, zap.String("owner", bktInfo.Owner.String()))
|
||||
} else {
|
||||
ownerDisplayName = settings.OwnerKey.Address()
|
||||
ownerEncodedID = hex.EncodeToString(settings.OwnerKey.Bytes())
|
||||
}
|
||||
|
||||
res := &AccessControlPolicy{Owner: Owner{
|
||||
ID: ownerEncodedID,
|
||||
DisplayName: ownerDisplayName,
|
||||
}}
|
||||
|
||||
granteeOwner := NewGrantee(acpCanonicalUser)
|
||||
granteeOwner.ID = ownerEncodedID
|
||||
granteeOwner.DisplayName = ownerDisplayName
|
||||
|
||||
res.AccessControlList = []*Grant{{
|
||||
Grantee: granteeOwner,
|
||||
Permission: aclFullControl,
|
||||
}}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (h *handler) bearerTokenIssuerKey(ctx context.Context) (*keys.PublicKey, error) {
|
||||
box, err := middleware.GetBoxData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return getTokenIssuerKey(box)
|
||||
}
|
||||
var btoken v2acl.BearerToken
|
||||
box.Gate.BearerToken.WriteToV2(&btoken)
|
||||
|
||||
func getTokenIssuerKey(box *accessbox.Box) (*keys.PublicKey, error) {
|
||||
if box.Gate.BearerToken == nil {
|
||||
return nil, stderrors.New("bearer token is missing")
|
||||
}
|
||||
|
||||
key, err := keys.NewPublicKeyFromBytes(box.Gate.BearerToken.SigningKeyBytes(), elliptic.P256())
|
||||
key, err := keys.NewPublicKeyFromBytes(btoken.GetSignature().GetKey(), elliptic.P256())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("public key from bytes: %w", err)
|
||||
}
|
||||
|
@ -361,24 +285,6 @@ func getTokenIssuerKey(box *accessbox.Box) (*keys.PublicKey, error) {
|
|||
|
||||
func (h *handler) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if bktInfo.APEEnabled || len(settings.CannedACL) != 0 {
|
||||
h.putBucketACLAPEHandler(w, r, reqInfo, bktInfo, settings)
|
||||
return
|
||||
}
|
||||
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bearer token issuer key", reqInfo, err)
|
||||
|
@ -398,7 +304,7 @@ func (h *handler) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
h.logAndSendError(w, "could not parse bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
} else if err = h.cfg.NewXMLDecoder(r.Body).Decode(list); err != nil {
|
||||
} else if err = xml.NewDecoder(r.Body).Decode(list); err != nil {
|
||||
h.logAndSendError(w, "could not parse bucket acl", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
||||
return
|
||||
}
|
||||
|
@ -410,6 +316,12 @@ func (h *handler) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = h.updateBucketACL(r, astBucket, bktInfo, token); err != nil {
|
||||
h.logAndSendError(w, "could not update bucket acl", reqInfo, err)
|
||||
return
|
||||
|
@ -417,60 +329,6 @@ func (h *handler) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *handler) putBucketACLAPEHandler(w http.ResponseWriter, r *http.Request, reqInfo *middleware.ReqInfo, bktInfo *data.BucketInfo, settings *data.BucketSettings) {
|
||||
ctx := r.Context()
|
||||
|
||||
defer func() {
|
||||
if errBody := r.Body.Close(); errBody != nil {
|
||||
h.reqLogger(r.Context()).Warn(logs.CouldNotCloseRequestBody, zap.Error(errBody))
|
||||
}
|
||||
}()
|
||||
|
||||
written, err := io.Copy(io.Discard, r.Body)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't read request body", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if written != 0 || len(r.Header.Get(api.AmzACL)) == 0 {
|
||||
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
||||
return
|
||||
}
|
||||
|
||||
cannedACL, err := parseCannedACL(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not parse canned ACL", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bearer token issuer key", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
chainRules := bucketCannedACLToAPERules(cannedACL, reqInfo, key, bktInfo.CID)
|
||||
if err = h.ape.SaveACLChains(bktInfo.CID.EncodeToString(), chainRules); err != nil {
|
||||
h.logAndSendError(w, "failed to add morph rule chains", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
settings.CannedACL = cannedACL
|
||||
|
||||
sp := &layer.PutSettingsParams{
|
||||
BktInfo: bktInfo,
|
||||
Settings: settings,
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
|
||||
h.logAndSendError(w, "couldn't save bucket settings", reqInfo, err,
|
||||
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *handler) updateBucketACL(r *http.Request, astChild *ast, bktInfo *data.BucketInfo, sessionToken *session.Container) (bool, error) {
|
||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
|
@ -519,20 +377,6 @@ func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if bktInfo.APEEnabled || len(settings.CannedACL) != 0 {
|
||||
if err = middleware.EncodeToResponse(w, h.encodePrivateCannedACL(ctx, bktInfo, settings)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
bucketACL, err := h.obj.GetBucketACL(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||
|
@ -547,7 +391,7 @@ func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
objInfo, err := h.obj.GetObjectInfo(ctx, prm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get object info", reqInfo, err)
|
||||
h.logAndSendError(w, "could not object info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -559,29 +403,6 @@ func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
apeEnabled := bktInfo.APEEnabled
|
||||
|
||||
if !apeEnabled {
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
apeEnabled = len(settings.CannedACL) != 0
|
||||
}
|
||||
|
||||
if apeEnabled {
|
||||
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
||||
return
|
||||
}
|
||||
|
||||
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
if err != nil {
|
||||
|
@ -595,6 +416,12 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
p := &layer.HeadObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: reqInfo.ObjectName,
|
||||
|
@ -614,7 +441,7 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
h.logAndSendError(w, "could not parse bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
} else if err = h.cfg.NewXMLDecoder(r.Body).Decode(list); err != nil {
|
||||
} else if err = xml.NewDecoder(r.Body).Decode(list); err != nil {
|
||||
h.logAndSendError(w, "could not parse bucket acl", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
||||
return
|
||||
}
|
||||
|
@ -639,7 +466,7 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
if updated {
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectACLPut,
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.MD5Enabled()),
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.Features.MD5Enabled()),
|
||||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
|
@ -650,48 +477,6 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketPolicyStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
jsonPolicy, err := h.ape.GetBucketPolicy(reqInfo.Namespace, bktInfo.CID)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucketPolicy), err.Error())
|
||||
}
|
||||
h.logAndSendError(w, "failed to get policy from storage", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
var bktPolicy engineiam.Policy
|
||||
if err = json.Unmarshal(jsonPolicy, &bktPolicy); err != nil {
|
||||
h.logAndSendError(w, "could not parse bucket policy", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
policyStatus := &PolicyStatus{
|
||||
IsPublic: PolicyStatusIsPublicFalse,
|
||||
}
|
||||
|
||||
for _, st := range bktPolicy.Statement {
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html#access-control-block-public-access-policy-status
|
||||
if _, ok := st.Principal[engineiam.Wildcard]; ok {
|
||||
policyStatus.IsPublic = PolicyStatusIsPublicTrue
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, policyStatus); err != nil {
|
||||
h.logAndSendError(w, "encode and write response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
|
@ -701,36 +486,19 @@ func (h *handler) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
jsonPolicy, err := h.ape.GetBucketPolicy(reqInfo.Namespace, bktInfo.CID)
|
||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucketPolicy), err.Error())
|
||||
}
|
||||
h.logAndSendError(w, "failed to get policy from storage", reqInfo, err)
|
||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set(api.ContentType, "application/json")
|
||||
ast := tableToAst(bucketACL.EACL, reqInfo.BucketName)
|
||||
bktPolicy := astToPolicy(ast)
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
if _, err = w.Write(jsonPolicy); err != nil {
|
||||
h.logAndSendError(w, "write json policy to client", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
chainIDs := []chain.ID{getBucketChainID(chain.S3, bktInfo), getBucketChainID(chain.Ingress, bktInfo)}
|
||||
if err = h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil {
|
||||
h.logAndSendError(w, "failed to delete policy from storage", reqInfo, err)
|
||||
return
|
||||
if err = json.NewEncoder(w).Encode(bktPolicy); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -756,91 +524,30 @@ func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
jsonPolicy, err := io.ReadAll(r.Body)
|
||||
token, err := getSessionTokenSetEACL(r.Context())
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "read body", reqInfo, err)
|
||||
h.logAndSendError(w, "couldn't get eacl token", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
var bktPolicy engineiam.Policy
|
||||
if err = json.Unmarshal(jsonPolicy, &bktPolicy); err != nil {
|
||||
bktPolicy := &bucketPolicy{Bucket: reqInfo.BucketName}
|
||||
if err = json.NewDecoder(r.Body).Decode(bktPolicy); err != nil {
|
||||
h.logAndSendError(w, "could not parse bucket policy", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, stat := range bktPolicy.Statement {
|
||||
if len(stat.NotResource) != 0 {
|
||||
h.logAndSendError(w, "policy resource mismatched bucket", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicy))
|
||||
return
|
||||
}
|
||||
|
||||
if len(stat.NotPrincipal) != 0 && stat.Effect == engineiam.AllowEffect {
|
||||
h.logAndSendError(w, "invalid NotPrincipal", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicyNotPrincipal))
|
||||
return
|
||||
}
|
||||
|
||||
for _, resource := range stat.Resource {
|
||||
if reqInfo.BucketName != strings.Split(strings.TrimPrefix(resource, arnAwsPrefix), "/")[0] {
|
||||
h.logAndSendError(w, "policy resource mismatched bucket", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicy))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s3Chain, err := engineiam.ConvertToS3Chain(bktPolicy, h.frostfsid)
|
||||
astPolicy, err := policyToAst(bktPolicy)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not convert s3 policy to chain policy", reqInfo, err)
|
||||
h.logAndSendError(w, "could not translate policy to ast", reqInfo, err)
|
||||
return
|
||||
}
|
||||
s3Chain.ID = getBucketChainID(chain.S3, bktInfo)
|
||||
|
||||
nativeChain, err := engineiam.ConvertToNativeChain(bktPolicy, h.nativeResolver(reqInfo.Namespace, bktInfo))
|
||||
if err == nil {
|
||||
nativeChain.ID = getBucketChainID(chain.Ingress, bktInfo)
|
||||
} else if !stderrors.Is(err, engineiam.ErrActionsNotApplicable) {
|
||||
h.logAndSendError(w, "could not convert s3 policy to native chain policy", reqInfo, err)
|
||||
return
|
||||
} else {
|
||||
h.reqLogger(r.Context()).Warn(logs.PolicyCouldntBeConvertedToNativeRules)
|
||||
}
|
||||
|
||||
chainsToSave := []*chain.Chain{s3Chain}
|
||||
if nativeChain != nil {
|
||||
chainsToSave = append(chainsToSave, nativeChain)
|
||||
}
|
||||
|
||||
if err = h.ape.PutBucketPolicy(reqInfo.Namespace, bktInfo.CID, jsonPolicy, chainsToSave); err != nil {
|
||||
h.logAndSendError(w, "failed to update policy in contract", reqInfo, err)
|
||||
if _, err = h.updateBucketACL(r, astPolicy, bktInfo, token); err != nil {
|
||||
h.logAndSendError(w, "could not update bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
type nativeResolver struct {
|
||||
FrostFSID
|
||||
namespace string
|
||||
bktInfo *data.BucketInfo
|
||||
}
|
||||
|
||||
func (n *nativeResolver) GetBucketInfo(bucket string) (*engineiam.BucketInfo, error) {
|
||||
if n.bktInfo.Name != bucket {
|
||||
return nil, fmt.Errorf("invalid bucket %s: %w", bucket, errors.GetAPIError(errors.ErrMalformedPolicy))
|
||||
}
|
||||
|
||||
return &engineiam.BucketInfo{Namespace: n.namespace, Container: n.bktInfo.CID.EncodeToString()}, nil
|
||||
}
|
||||
|
||||
func (h *handler) nativeResolver(ns string, bktInfo *data.BucketInfo) engineiam.NativeResolver {
|
||||
return &nativeResolver{
|
||||
FrostFSID: h.frostfsid,
|
||||
namespace: ns,
|
||||
bktInfo: bktInfo,
|
||||
}
|
||||
}
|
||||
|
||||
func getBucketChainID(prefix chain.Name, bktInfo *data.BucketInfo) chain.ID {
|
||||
return chain.ID(string(prefix) + ":bkt" + string(bktInfo.CID[:]))
|
||||
}
|
||||
|
||||
func parseACLHeaders(header http.Header, key *keys.PublicKey) (*AccessControlPolicy, error) {
|
||||
var err error
|
||||
acp := &AccessControlPolicy{Owner: Owner{
|
||||
|
@ -936,7 +643,7 @@ func parseGrantee(grantees string) ([]*Grantee, error) {
|
|||
}
|
||||
|
||||
func formGrantee(granteeType, value string) (*Grantee, error) {
|
||||
value = data.UnQuote(value)
|
||||
value = strings.Trim(value, "\"")
|
||||
switch granteeType {
|
||||
case "id":
|
||||
return &Grantee{
|
||||
|
@ -1428,6 +1135,73 @@ func resourceInfoFromName(name, bucketName string) resourceInfo {
|
|||
return resInfo
|
||||
}
|
||||
|
||||
func astToPolicy(ast *ast) *bucketPolicy {
|
||||
bktPolicy := &bucketPolicy{}
|
||||
|
||||
for _, resource := range ast.Resources {
|
||||
allowed, denied := triageOperations(resource.Operations)
|
||||
handleResourceOperations(bktPolicy, allowed, eacl.ActionAllow, resource.Name())
|
||||
handleResourceOperations(bktPolicy, denied, eacl.ActionDeny, resource.Name())
|
||||
}
|
||||
|
||||
return bktPolicy
|
||||
}
|
||||
|
||||
func handleResourceOperations(bktPolicy *bucketPolicy, list []*astOperation, eaclAction eacl.Action, resourceName string) {
|
||||
userOpsMap := make(map[string][]eacl.Operation)
|
||||
|
||||
for _, op := range list {
|
||||
if !op.IsGroupGrantee() {
|
||||
for _, user := range op.Users {
|
||||
userOps := userOpsMap[user]
|
||||
userOps = append(userOps, op.Op)
|
||||
userOpsMap[user] = userOps
|
||||
}
|
||||
} else {
|
||||
userOps := userOpsMap[allUsersGroup]
|
||||
userOps = append(userOps, op.Op)
|
||||
userOpsMap[allUsersGroup] = userOps
|
||||
}
|
||||
}
|
||||
|
||||
for user, userOps := range userOpsMap {
|
||||
var actions []string
|
||||
LOOP:
|
||||
for action, ops := range actionToOpMap {
|
||||
for _, op := range ops {
|
||||
if !contains(userOps, op) {
|
||||
continue LOOP
|
||||
}
|
||||
}
|
||||
actions = append(actions, action)
|
||||
}
|
||||
if len(actions) != 0 {
|
||||
state := statement{
|
||||
Effect: actionToEffect(eaclAction),
|
||||
Principal: principal{CanonicalUser: user},
|
||||
Action: actions,
|
||||
Resource: []string{arnAwsPrefix + resourceName},
|
||||
}
|
||||
if user == allUsersGroup {
|
||||
state.Principal = principal{AWS: allUsersWildcard}
|
||||
}
|
||||
bktPolicy.Statement = append(bktPolicy.Statement, state)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func triageOperations(operations []*astOperation) ([]*astOperation, []*astOperation) {
|
||||
var allowed, denied []*astOperation
|
||||
for _, op := range operations {
|
||||
if op.Action == eacl.ActionAllow {
|
||||
allowed = append(allowed, op)
|
||||
} else {
|
||||
denied = append(denied, op)
|
||||
}
|
||||
}
|
||||
return allowed, denied
|
||||
}
|
||||
|
||||
func addTo(list []*astOperation, userID string, op eacl.Operation, groupGrantee bool, action eacl.Action) []*astOperation {
|
||||
var found *astOperation
|
||||
for _, astop := range list {
|
||||
|
@ -1614,6 +1388,17 @@ func effectToAction(effect string) eacl.Action {
|
|||
return eacl.ActionUnknown
|
||||
}
|
||||
|
||||
func actionToEffect(action eacl.Action) string {
|
||||
switch action {
|
||||
case eacl.ActionAllow:
|
||||
return "Allow"
|
||||
case eacl.ActionDeny:
|
||||
return "Deny"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func permissionToOperations(permission AWSACL) []eacl.Operation {
|
||||
switch permission {
|
||||
case aclFullControl:
|
||||
|
@ -1630,26 +1415,6 @@ func isWriteOperation(op eacl.Operation) bool {
|
|||
return op == eacl.OperationDelete || op == eacl.OperationPut
|
||||
}
|
||||
|
||||
type access struct {
|
||||
recipient string
|
||||
operations []eacl.Operation
|
||||
}
|
||||
|
||||
type accessList struct {
|
||||
list []access
|
||||
}
|
||||
|
||||
func (c *accessList) addAccess(recipient string, operation eacl.Operation) {
|
||||
for i, v := range c.list {
|
||||
if v.recipient == recipient {
|
||||
c.list[i].operations = append(c.list[i].operations, operation)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.list = append(c.list, access{recipient, []eacl.Operation{operation}})
|
||||
}
|
||||
|
||||
func (h *handler) encodeObjectACL(ctx context.Context, bucketACL *layer.BucketACL, bucketName, objectVersion string) *AccessControlPolicy {
|
||||
res := &AccessControlPolicy{
|
||||
Owner: Owner{
|
||||
|
@ -1658,7 +1423,7 @@ func (h *handler) encodeObjectACL(ctx context.Context, bucketACL *layer.BucketAC
|
|||
},
|
||||
}
|
||||
|
||||
m := &accessList{}
|
||||
m := make(map[string][]eacl.Operation)
|
||||
|
||||
astList := tableToAst(bucketACL.EACL, bucketName)
|
||||
|
||||
|
@ -1673,20 +1438,22 @@ func (h *handler) encodeObjectACL(ctx context.Context, bucketACL *layer.BucketAC
|
|||
}
|
||||
|
||||
if len(op.Users) == 0 {
|
||||
m.addAccess(allUsersGroup, op.Op)
|
||||
list := append(m[allUsersGroup], op.Op)
|
||||
m[allUsersGroup] = list
|
||||
} else {
|
||||
for _, user := range op.Users {
|
||||
m.addAccess(user, op.Op)
|
||||
list := append(m[user], op.Op)
|
||||
m[user] = list
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, val := range m.list {
|
||||
for key, val := range m {
|
||||
permission := aclFullControl
|
||||
read := true
|
||||
for op := eacl.OperationGet; op <= eacl.OperationRangeHash; op++ {
|
||||
if !contains(val.operations, op) && !isWriteOperation(op) {
|
||||
if !contains(val, op) && !isWriteOperation(op) {
|
||||
read = false
|
||||
}
|
||||
}
|
||||
|
@ -1698,12 +1465,12 @@ func (h *handler) encodeObjectACL(ctx context.Context, bucketACL *layer.BucketAC
|
|||
}
|
||||
|
||||
var grantee *Grantee
|
||||
if val.recipient == allUsersGroup {
|
||||
if key == allUsersGroup {
|
||||
grantee = NewGrantee(acpGroup)
|
||||
grantee.URI = allUsersGroup
|
||||
} else {
|
||||
grantee = NewGrantee(acpCanonicalUser)
|
||||
grantee.ID = val.recipient
|
||||
grantee.ID = key
|
||||
}
|
||||
|
||||
grant := &Grant{
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -17,7 +16,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
|
@ -25,8 +23,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
engineiam "git.frostfs.info/TrueCloudLab/policy-engine/iam"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -1303,235 +1299,59 @@ func TestBucketAclToAst(t *testing.T) {
|
|||
|
||||
func TestPutBucketACL(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
tc.config.aclEnabled = true
|
||||
bktName := "bucket-for-acl"
|
||||
|
||||
info := createBucket(tc, bktName)
|
||||
box, _ := createAccessBox(t)
|
||||
bktInfo := createBucket(t, tc, bktName, box)
|
||||
|
||||
header := map[string]string{api.AmzACL: "public-read"}
|
||||
putBucketACL(tc, bktName, info.Box, header)
|
||||
putBucketACL(t, tc, bktName, box, header)
|
||||
|
||||
header = map[string]string{api.AmzACL: "private"}
|
||||
putBucketACL(tc, bktName, info.Box, header)
|
||||
checkLastRecords(t, tc, info.BktInfo, eacl.ActionDeny)
|
||||
}
|
||||
|
||||
func TestPutBucketAPE(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bucket-for-acl-ape"
|
||||
|
||||
info := createBucket(hc, bktName)
|
||||
|
||||
_, err := hc.tp.ContainerEACL(hc.Context(), layer.PrmContainerEACL{ContainerID: info.BktInfo.CID})
|
||||
require.ErrorContains(t, err, "not found")
|
||||
|
||||
chains, err := hc.h.ape.(*apeMock).ListChains(engine.ContainerTarget(info.BktInfo.CID.EncodeToString()))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, chains, 2)
|
||||
}
|
||||
|
||||
func TestPutObjectACLErrorAPE(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName, objName := "bucket-for-acl-ape", "object"
|
||||
|
||||
info := createBucket(hc, bktName)
|
||||
|
||||
putObjectWithHeadersAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, s3errors.ErrAccessControlListNotSupported)
|
||||
putObjectWithHeaders(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPrivate}) // only `private` canned acl is allowed, that is actually ignored
|
||||
putObjectWithHeaders(hc, bktName, objName, nil)
|
||||
|
||||
aclBody := &AccessControlPolicy{}
|
||||
putObjectACLAssertS3Error(hc, bktName, objName, info.Box, nil, aclBody, s3errors.ErrAccessControlListNotSupported)
|
||||
|
||||
aclRes := getObjectACL(hc, bktName, objName)
|
||||
checkPrivateACL(t, aclRes, info.Key.PublicKey())
|
||||
}
|
||||
|
||||
func TestCreateObjectACLErrorAPE(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName, objName, objNameCopy := "bucket-for-acl-ape", "object", "copy"
|
||||
|
||||
createBucket(hc, bktName)
|
||||
|
||||
putObject(hc, bktName, objName)
|
||||
copyObject(hc, bktName, objName, objNameCopy, CopyMeta{Headers: map[string]string{api.AmzACL: basicACLPublic}}, http.StatusBadRequest)
|
||||
copyObject(hc, bktName, objName, objNameCopy, CopyMeta{Headers: map[string]string{api.AmzACL: basicACLPrivate}}, http.StatusOK)
|
||||
|
||||
createMultipartUploadAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, s3errors.ErrAccessControlListNotSupported)
|
||||
createMultipartUpload(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPrivate})
|
||||
}
|
||||
|
||||
func TestPutObjectACLBackwardCompatibility(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
hc.config.aclEnabled = true
|
||||
bktName, objName := "bucket-for-acl-ape", "object"
|
||||
|
||||
info := createBucket(hc, bktName)
|
||||
|
||||
putObjectWithHeadersBase(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPrivate}, info.Box, nil)
|
||||
putObjectWithHeadersBase(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, info.Box, nil)
|
||||
|
||||
aclRes := getObjectACL(hc, bktName, objName)
|
||||
require.Len(t, aclRes.AccessControlList, 2)
|
||||
require.Equal(t, hex.EncodeToString(info.Key.PublicKey().Bytes()), aclRes.AccessControlList[0].Grantee.ID)
|
||||
require.Equal(t, aclFullControl, aclRes.AccessControlList[0].Permission)
|
||||
require.Equal(t, allUsersGroup, aclRes.AccessControlList[1].Grantee.URI)
|
||||
require.Equal(t, aclFullControl, aclRes.AccessControlList[1].Permission)
|
||||
|
||||
aclBody := &AccessControlPolicy{}
|
||||
putObjectACLBase(hc, bktName, objName, info.Box, nil, aclBody)
|
||||
}
|
||||
|
||||
func TestBucketACLAPE(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bucket-for-acl-ape"
|
||||
|
||||
info := createBucket(hc, bktName)
|
||||
|
||||
aclBody := &AccessControlPolicy{}
|
||||
putBucketACLAssertS3Error(hc, bktName, info.Box, nil, aclBody, s3errors.ErrAccessControlListNotSupported)
|
||||
|
||||
aclRes := getBucketACL(hc, bktName)
|
||||
checkPrivateACL(t, aclRes, info.Key.PublicKey())
|
||||
|
||||
putBucketACL(hc, bktName, info.Box, map[string]string{api.AmzACL: basicACLPrivate})
|
||||
aclRes = getBucketACL(hc, bktName)
|
||||
checkPrivateACL(t, aclRes, info.Key.PublicKey())
|
||||
|
||||
putBucketACL(hc, bktName, info.Box, map[string]string{api.AmzACL: basicACLReadOnly})
|
||||
aclRes = getBucketACL(hc, bktName)
|
||||
checkPublicReadACL(t, aclRes, info.Key.PublicKey())
|
||||
|
||||
putBucketACL(hc, bktName, info.Box, map[string]string{api.AmzACL: basicACLPublic})
|
||||
aclRes = getBucketACL(hc, bktName)
|
||||
checkPublicReadWriteACL(t, aclRes, info.Key.PublicKey())
|
||||
}
|
||||
|
||||
func checkPrivateACL(t *testing.T, aclRes *AccessControlPolicy, ownerKey *keys.PublicKey) {
|
||||
checkACLOwner(t, aclRes, ownerKey, 1)
|
||||
}
|
||||
|
||||
func checkPublicReadACL(t *testing.T, aclRes *AccessControlPolicy, ownerKey *keys.PublicKey) {
|
||||
checkACLOwner(t, aclRes, ownerKey, 2)
|
||||
|
||||
require.Equal(t, allUsersGroup, aclRes.AccessControlList[1].Grantee.URI)
|
||||
require.Equal(t, aclRead, aclRes.AccessControlList[1].Permission)
|
||||
}
|
||||
|
||||
func checkPublicReadWriteACL(t *testing.T, aclRes *AccessControlPolicy, ownerKey *keys.PublicKey) {
|
||||
checkACLOwner(t, aclRes, ownerKey, 3)
|
||||
|
||||
require.Equal(t, allUsersGroup, aclRes.AccessControlList[1].Grantee.URI)
|
||||
require.Equal(t, aclWrite, aclRes.AccessControlList[1].Permission)
|
||||
|
||||
require.Equal(t, allUsersGroup, aclRes.AccessControlList[2].Grantee.URI)
|
||||
require.Equal(t, aclRead, aclRes.AccessControlList[2].Permission)
|
||||
}
|
||||
|
||||
func checkACLOwner(t *testing.T, aclRes *AccessControlPolicy, ownerKey *keys.PublicKey, ln int) {
|
||||
ownerIDStr := hex.EncodeToString(ownerKey.Bytes())
|
||||
ownerNameStr := ownerKey.Address()
|
||||
|
||||
require.Equal(t, ownerIDStr, aclRes.Owner.ID)
|
||||
require.Equal(t, ownerNameStr, aclRes.Owner.DisplayName)
|
||||
|
||||
require.Len(t, aclRes.AccessControlList, ln)
|
||||
|
||||
require.Equal(t, ownerIDStr, aclRes.AccessControlList[0].Grantee.ID)
|
||||
require.Equal(t, ownerNameStr, aclRes.AccessControlList[0].Grantee.DisplayName)
|
||||
require.Equal(t, aclFullControl, aclRes.AccessControlList[0].Permission)
|
||||
putBucketACL(t, tc, bktName, box, header)
|
||||
checkLastRecords(t, tc, bktInfo, eacl.ActionDeny)
|
||||
}
|
||||
|
||||
func TestBucketPolicy(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bucket-for-policy"
|
||||
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
getBucketPolicy(hc, bktName, s3errors.ErrNoSuchBucketPolicy)
|
||||
|
||||
newPolicy := engineiam.Policy{
|
||||
Version: "2012-10-17",
|
||||
Statement: []engineiam.Statement{{
|
||||
Principal: map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}},
|
||||
Effect: engineiam.DenyEffect,
|
||||
Action: engineiam.Action{"s3:PutObject"},
|
||||
Resource: engineiam.Resource{"arn:aws:s3:::test/*"},
|
||||
}},
|
||||
}
|
||||
|
||||
putBucketPolicy(hc, bktName, newPolicy, s3errors.ErrMalformedPolicy)
|
||||
|
||||
newPolicy.Statement[0].Resource[0] = arnAwsPrefix + bktName + "/*"
|
||||
putBucketPolicy(hc, bktName, newPolicy)
|
||||
box, key := createAccessBox(t)
|
||||
createBucket(t, hc, bktName, box)
|
||||
|
||||
bktPolicy := getBucketPolicy(hc, bktName)
|
||||
require.Equal(t, newPolicy, bktPolicy)
|
||||
}
|
||||
for _, st := range bktPolicy.Statement {
|
||||
if st.Effect == "Allow" {
|
||||
require.Equal(t, hex.EncodeToString(key.PublicKey().Bytes()), st.Principal.CanonicalUser)
|
||||
require.Equal(t, []string{arnAwsPrefix + bktName}, st.Resource)
|
||||
} else {
|
||||
require.Equal(t, allUsersWildcard, st.Principal.AWS)
|
||||
require.Equal(t, "Deny", st.Effect)
|
||||
require.Equal(t, []string{arnAwsPrefix + bktName}, st.Resource)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBucketPolicyStatus(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bucket-for-policy"
|
||||
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
getBucketPolicy(hc, bktName, s3errors.ErrNoSuchBucketPolicy)
|
||||
|
||||
newPolicy := engineiam.Policy{
|
||||
Version: "2012-10-17",
|
||||
Statement: []engineiam.Statement{{
|
||||
NotPrincipal: engineiam.Principal{engineiam.Wildcard: {}},
|
||||
Effect: engineiam.AllowEffect,
|
||||
Action: engineiam.Action{"s3:PutObject"},
|
||||
Resource: engineiam.Resource{arnAwsPrefix + bktName + "/*"},
|
||||
newPolicy := &bucketPolicy{
|
||||
Statement: []statement{{
|
||||
Effect: "Allow",
|
||||
Principal: principal{AWS: allUsersWildcard},
|
||||
Action: []string{s3GetObject},
|
||||
Resource: []string{arnAwsPrefix + "dummy"},
|
||||
}},
|
||||
}
|
||||
|
||||
putBucketPolicy(hc, bktName, newPolicy, s3errors.ErrMalformedPolicyNotPrincipal)
|
||||
putBucketPolicy(hc, bktName, newPolicy, box, http.StatusInternalServerError)
|
||||
|
||||
newPolicy.Statement[0].NotPrincipal = nil
|
||||
newPolicy.Statement[0].Principal = map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}}
|
||||
putBucketPolicy(hc, bktName, newPolicy)
|
||||
bktPolicyStatus := getBucketPolicyStatus(hc, bktName)
|
||||
require.True(t, PolicyStatusIsPublicTrue == bktPolicyStatus.IsPublic)
|
||||
newPolicy.Statement[0].Resource[0] = arnAwsPrefix + bktName
|
||||
putBucketPolicy(hc, bktName, newPolicy, box, http.StatusOK)
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
hc.Handler().frostfsid.(*frostfsidMock).data["devenv"] = key.PublicKey()
|
||||
|
||||
newPolicy.Statement[0].Principal = map[engineiam.PrincipalType][]string{engineiam.AWSPrincipalType: {"arn:aws:iam:::user/devenv"}}
|
||||
putBucketPolicy(hc, bktName, newPolicy)
|
||||
bktPolicyStatus = getBucketPolicyStatus(hc, bktName)
|
||||
require.True(t, PolicyStatusIsPublicFalse == bktPolicyStatus.IsPublic)
|
||||
}
|
||||
|
||||
func TestDeleteBucketWithPolicy(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-policy"
|
||||
bi := createTestBucket(hc, bktName)
|
||||
|
||||
newPolicy := engineiam.Policy{
|
||||
Version: "2012-10-17",
|
||||
Statement: []engineiam.Statement{{
|
||||
Principal: map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}},
|
||||
Effect: engineiam.AllowEffect,
|
||||
Action: engineiam.Action{"s3:PutObject"},
|
||||
Resource: engineiam.Resource{"arn:aws:s3:::bucket-for-policy/*"},
|
||||
}},
|
||||
bktPolicy = getBucketPolicy(hc, bktName)
|
||||
for _, st := range bktPolicy.Statement {
|
||||
if st.Effect == "Allow" && st.Principal.AWS == allUsersWildcard {
|
||||
require.Equal(t, []string{arnAwsPrefix + bktName}, st.Resource)
|
||||
require.ElementsMatch(t, []string{s3GetObject, s3ListBucket}, st.Action)
|
||||
}
|
||||
}
|
||||
|
||||
putBucketPolicy(hc, bktName, newPolicy)
|
||||
|
||||
require.Len(t, hc.h.ape.(*apeMock).policyMap, 1)
|
||||
require.Len(t, hc.h.ape.(*apeMock).chainMap[engine.ContainerTarget(bi.CID.EncodeToString())], 4)
|
||||
|
||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||
|
||||
require.Empty(t, hc.h.ape.(*apeMock).policyMap)
|
||||
chains, err := hc.h.ape.(*apeMock).ListChains(engine.ContainerTarget(bi.CID.EncodeToString()))
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, chains)
|
||||
}
|
||||
|
||||
func TestBucketPolicyUnmarshal(t *testing.T) {
|
||||
|
@ -1591,7 +1411,9 @@ func TestPutBucketPolicy(t *testing.T) {
|
|||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Principal": "*",
|
||||
"Principal": {
|
||||
"AWS": "*"
|
||||
},
|
||||
"Effect": "Deny",
|
||||
"Action": "s3:GetObject",
|
||||
"Resource": "arn:aws:s3:::bucket-for-policy/*"
|
||||
|
@ -1601,57 +1423,36 @@ func TestPutBucketPolicy(t *testing.T) {
|
|||
hc := prepareHandlerContext(t)
|
||||
bktName := "bucket-for-policy"
|
||||
|
||||
createTestBucket(hc, bktName)
|
||||
box, _ := createAccessBox(t)
|
||||
createBucket(t, hc, bktName, box)
|
||||
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, "", bytes.NewReader([]byte(bktPolicy)))
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutBucketPolicyHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func getBucketPolicy(hc *handlerContext, bktName string, errCode ...s3errors.ErrorCode) engineiam.Policy {
|
||||
func getBucketPolicy(hc *handlerContext, bktName string) *bucketPolicy {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
hc.Handler().GetBucketPolicyHandler(w, r)
|
||||
|
||||
var policy engineiam.Policy
|
||||
if len(errCode) == 0 {
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
err := json.NewDecoder(w.Result().Body).Decode(&policy)
|
||||
require.NoError(hc.t, err)
|
||||
} else {
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(errCode[0]))
|
||||
}
|
||||
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
policy := &bucketPolicy{}
|
||||
err := json.NewDecoder(w.Result().Body).Decode(policy)
|
||||
require.NoError(hc.t, err)
|
||||
return policy
|
||||
}
|
||||
|
||||
func getBucketPolicyStatus(hc *handlerContext, bktName string, errCode ...s3errors.ErrorCode) PolicyStatus {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
hc.Handler().GetBucketPolicyStatusHandler(w, r)
|
||||
|
||||
var policyStatus PolicyStatus
|
||||
if len(errCode) == 0 {
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(&policyStatus)
|
||||
require.NoError(hc.t, err)
|
||||
} else {
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(errCode[0]))
|
||||
}
|
||||
|
||||
return policyStatus
|
||||
}
|
||||
|
||||
func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy engineiam.Policy, errCode ...s3errors.ErrorCode) {
|
||||
func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy *bucketPolicy, box *accessbox.Box, status int) {
|
||||
body, err := json.Marshal(bktPolicy)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, "", bytes.NewReader(body))
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutBucketPolicyHandler(w, r)
|
||||
|
||||
if len(errCode) == 0 {
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
} else {
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(errCode[0]))
|
||||
}
|
||||
assertStatus(hc.t, w, status)
|
||||
}
|
||||
|
||||
func checkLastRecords(t *testing.T, tc *handlerContext, bktInfo *data.BucketInfo, action eacl.Action) {
|
||||
|
@ -1699,26 +1500,13 @@ func createAccessBox(t *testing.T) (*accessbox.Box, *keys.PrivateKey) {
|
|||
return box, key
|
||||
}
|
||||
|
||||
type createBucketInfo struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Box *accessbox.Box
|
||||
Key *keys.PrivateKey
|
||||
}
|
||||
|
||||
func createBucket(hc *handlerContext, bktName string) *createBucketInfo {
|
||||
box, key := createAccessBox(hc.t)
|
||||
|
||||
func createBucket(t *testing.T, hc *handlerContext, bktName string, box *accessbox.Box) *data.BucketInfo {
|
||||
w := createBucketBase(hc, bktName, box)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
bktInfo, err := hc.Layer().GetBucketInfo(hc.Context(), bktName)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
return &createBucketInfo{
|
||||
BktInfo: bktInfo,
|
||||
Box: box,
|
||||
Key: key,
|
||||
}
|
||||
require.NoError(t, err)
|
||||
return bktInfo
|
||||
}
|
||||
|
||||
func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code s3errors.ErrorCode) {
|
||||
|
@ -1734,93 +1522,13 @@ func createBucketBase(hc *handlerContext, bktName string, box *accessbox.Box) *h
|
|||
return w
|
||||
}
|
||||
|
||||
func putBucketACL(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string) {
|
||||
w := putBucketACLBase(hc, bktName, box, header, nil)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func putBucketACLAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy, code s3errors.ErrorCode) {
|
||||
w := putBucketACLBase(hc, bktName, box, header, body)
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(code))
|
||||
}
|
||||
|
||||
func putBucketACLBase(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestRequest(hc, bktName, "", body)
|
||||
func putBucketACL(t *testing.T, tc *handlerContext, bktName string, box *accessbox.Box, header map[string]string) {
|
||||
w, r := prepareTestRequest(tc, bktName, "", nil)
|
||||
for key, val := range header {
|
||||
r.Header.Set(key, val)
|
||||
}
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutBucketACLHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func getBucketACL(hc *handlerContext, bktName string) *AccessControlPolicy {
|
||||
w := getBucketACLBase(hc, bktName)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &AccessControlPolicy{}
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func getBucketACLBase(hc *handlerContext, bktName string) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
hc.Handler().GetBucketACLHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func putObjectACLAssertS3Error(hc *handlerContext, bktName, objName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy, code s3errors.ErrorCode) {
|
||||
w := putObjectACLBase(hc, bktName, objName, box, header, body)
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(code))
|
||||
}
|
||||
|
||||
func putObjectACLBase(hc *handlerContext, bktName, objName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestRequest(hc, bktName, objName, body)
|
||||
for key, val := range header {
|
||||
r.Header.Set(key, val)
|
||||
}
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
hc.Handler().PutObjectACLHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func getObjectACL(hc *handlerContext, bktName, objName string) *AccessControlPolicy {
|
||||
w := getObjectACLBase(hc, bktName, objName)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &AccessControlPolicy{}
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func getObjectACLBase(hc *handlerContext, bktName, objName string) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().GetObjectACLHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func putObjectWithHeaders(hc *handlerContext, bktName, objName string, headers map[string]string) http.Header {
|
||||
w := putObjectWithHeadersBase(hc, bktName, objName, headers, nil, nil)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
return w.Header()
|
||||
}
|
||||
|
||||
func putObjectWithHeadersAssertS3Error(hc *handlerContext, bktName, objName string, headers map[string]string, code s3errors.ErrorCode) {
|
||||
w := putObjectWithHeadersBase(hc, bktName, objName, headers, nil, nil)
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(code))
|
||||
}
|
||||
|
||||
func putObjectWithHeadersBase(hc *handlerContext, bktName, objName string, headers map[string]string, box *accessbox.Box, data []byte) *httptest.ResponseRecorder {
|
||||
body := bytes.NewReader(data)
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
||||
|
||||
for k, v := range headers {
|
||||
r.Header.Set(k, v)
|
||||
}
|
||||
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
r = r.WithContext(ctx)
|
||||
|
||||
hc.Handler().PutObjectHandler(w, r)
|
||||
return w
|
||||
tc.Handler().PutBucketACLHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
}
|
||||
|
|
|
@ -12,9 +12,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -23,9 +21,7 @@ type (
|
|||
log *zap.Logger
|
||||
obj layer.Client
|
||||
notificator Notificator
|
||||
cfg Config
|
||||
ape APE
|
||||
frostfsid FrostFSID
|
||||
cfg *Config
|
||||
}
|
||||
|
||||
Notificator interface {
|
||||
|
@ -34,51 +30,46 @@ type (
|
|||
}
|
||||
|
||||
// Config contains data which handler needs to keep.
|
||||
Config interface {
|
||||
DefaultPlacementPolicy(namespace string) netmap.PlacementPolicy
|
||||
PlacementPolicy(namespace, constraint string) (netmap.PlacementPolicy, bool)
|
||||
CopiesNumbers(namespace, constraint string) ([]uint32, bool)
|
||||
DefaultCopiesNumbers(namespace string) []uint32
|
||||
NewXMLDecoder(io.Reader) *xml.Decoder
|
||||
DefaultMaxAge() int
|
||||
NotificatorEnabled() bool
|
||||
ResolveZoneList() []string
|
||||
IsResolveListAllow() bool
|
||||
Config struct {
|
||||
Policy PlacementPolicy
|
||||
XMLDecoder XMLDecoderProvider
|
||||
DefaultMaxAge int
|
||||
NotificatorEnabled bool
|
||||
ResolveZoneList []string
|
||||
IsResolveListAllow bool // True if ResolveZoneList contains allowed zones
|
||||
CompleteMultipartKeepalive time.Duration
|
||||
Kludge KludgeSettings
|
||||
Features layer.FeatureSettings
|
||||
}
|
||||
|
||||
PlacementPolicy interface {
|
||||
DefaultPlacementPolicy() netmap.PlacementPolicy
|
||||
PlacementPolicy(string) (netmap.PlacementPolicy, bool)
|
||||
CopiesNumbers(string) ([]uint32, bool)
|
||||
DefaultCopiesNumbers() []uint32
|
||||
}
|
||||
|
||||
XMLDecoderProvider interface {
|
||||
NewCompleteMultipartDecoder(io.Reader) *xml.Decoder
|
||||
}
|
||||
|
||||
KludgeSettings interface {
|
||||
BypassContentEncodingInChunks() bool
|
||||
MD5Enabled() bool
|
||||
ACLEnabled() bool
|
||||
}
|
||||
|
||||
FrostFSID interface {
|
||||
GetUserAddress(account, user string) (string, error)
|
||||
GetUserKey(account, name string) (string, error)
|
||||
}
|
||||
|
||||
// APE is Access Policy Engine that needs to save policy and acl info to different places.
|
||||
APE interface {
|
||||
PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chains []*chain.Chain) error
|
||||
DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.ID) error
|
||||
GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error)
|
||||
SaveACLChains(cid string, chains []*chain.Chain) error
|
||||
}
|
||||
)
|
||||
|
||||
var _ api.Handler = (*handler)(nil)
|
||||
|
||||
// New creates new api.Handler using given logger and client.
|
||||
func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg Config, storage APE, ffsid FrostFSID) (api.Handler, error) {
|
||||
func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg *Config) (api.Handler, error) {
|
||||
switch {
|
||||
case obj == nil:
|
||||
return nil, errors.New("empty FrostFS Object Layer")
|
||||
case log == nil:
|
||||
return nil, errors.New("empty logger")
|
||||
case storage == nil:
|
||||
return nil, errors.New("empty policy storage")
|
||||
case ffsid == nil:
|
||||
return nil, errors.New("empty frostfsid")
|
||||
}
|
||||
|
||||
if !cfg.NotificatorEnabled() {
|
||||
if !cfg.NotificatorEnabled {
|
||||
log.Warn(logs.NotificatorIsDisabledS3WontProduceNotificationEvents)
|
||||
} else if notificator == nil {
|
||||
return nil, errors.New("empty notificator")
|
||||
|
@ -88,9 +79,7 @@ func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg Config,
|
|||
log: log,
|
||||
obj: obj,
|
||||
cfg: cfg,
|
||||
ape: storage,
|
||||
notificator: notificator,
|
||||
frostfsid: ffsid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -98,7 +87,7 @@ func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg Config,
|
|||
// 1) array of copies numbers sent in request's header has the highest priority.
|
||||
// 2) array of copies numbers with corresponding location constraint provided in the config file.
|
||||
// 3) default copies number from the config file wrapped into array.
|
||||
func (h *handler) pickCopiesNumbers(metadata map[string]string, namespace, locationConstraint string) ([]uint32, error) {
|
||||
func (h *handler) pickCopiesNumbers(metadata map[string]string, locationConstraint string) ([]uint32, error) {
|
||||
copiesNumbersStr, ok := metadata[layer.AttributeFrostfsCopiesNumber]
|
||||
if ok {
|
||||
result, err := parseCopiesNumbers(copiesNumbersStr)
|
||||
|
@ -108,12 +97,12 @@ func (h *handler) pickCopiesNumbers(metadata map[string]string, namespace, locat
|
|||
return result, nil
|
||||
}
|
||||
|
||||
copiesNumbers, ok := h.cfg.CopiesNumbers(namespace, locationConstraint)
|
||||
copiesNumbers, ok := h.cfg.Policy.CopiesNumbers(locationConstraint)
|
||||
if ok {
|
||||
return copiesNumbers, nil
|
||||
}
|
||||
|
||||
return h.cfg.DefaultCopiesNumbers(namespace), nil
|
||||
return h.cfg.Policy.DefaultCopiesNumbers(), nil
|
||||
}
|
||||
|
||||
func parseCopiesNumbers(copiesNumbersStr string) ([]uint32, error) {
|
||||
|
|
|
@ -12,9 +12,11 @@ func TestCopiesNumberPicker(t *testing.T) {
|
|||
locationConstraint2 := "two"
|
||||
locationConstraints[locationConstraint1] = []uint32{2, 3, 4}
|
||||
|
||||
config := &configMock{
|
||||
copiesNumbers: locationConstraints,
|
||||
defaultCopiesNumbers: []uint32{1},
|
||||
config := &Config{
|
||||
Policy: &placementPolicyMock{
|
||||
copiesNumbers: locationConstraints,
|
||||
defaultCopiesNumbers: []uint32{1},
|
||||
},
|
||||
}
|
||||
h := handler{
|
||||
cfg: config,
|
||||
|
@ -26,7 +28,7 @@ func TestCopiesNumberPicker(t *testing.T) {
|
|||
metadata["somekey1"] = "5, 6, 7"
|
||||
expectedCopiesNumbers := []uint32{1}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
@ -35,7 +37,7 @@ func TestCopiesNumberPicker(t *testing.T) {
|
|||
metadata["somekey2"] = "6, 7, 8"
|
||||
expectedCopiesNumbers := []uint32{2, 3, 4}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint1)
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
@ -44,7 +46,7 @@ func TestCopiesNumberPicker(t *testing.T) {
|
|||
metadata["frostfs-copies-number"] = "7, 8, 9"
|
||||
expectedCopiesNumbers := []uint32{7, 8, 9}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
@ -53,7 +55,7 @@ func TestCopiesNumberPicker(t *testing.T) {
|
|||
metadata["frostfs-copies-number"] = "7,8,9"
|
||||
expectedCopiesNumbers := []uint32{7, 8, 9}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
@ -62,7 +64,7 @@ func TestCopiesNumberPicker(t *testing.T) {
|
|||
metadata["frostfs-copies-number"] = "11, 12, 13, "
|
||||
expectedCopiesNumbers := []uint32{11, 12, 13}
|
||||
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, "", locationConstraint2)
|
||||
actualCopiesNumbers, err := h.pickCopiesNumbers(metadata, locationConstraint2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedCopiesNumbers, actualCopiesNumbers)
|
||||
})
|
||||
|
|
|
@ -108,7 +108,7 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
if err = checkPreconditions(info, params.Conditional, h.cfg.MD5Enabled()); err != nil {
|
||||
if err = checkPreconditions(info, params.Conditional, h.cfg.Features.MD5Enabled()); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
response, err := encodeToObjectAttributesResponse(info, params, h.cfg.MD5Enabled())
|
||||
response, err := encodeToObjectAttributesResponse(info, params, h.cfg.Features.MD5Enabled())
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't encode object info to response", reqInfo, err)
|
||||
return
|
||||
|
@ -137,7 +137,7 @@ func writeAttributesHeaders(h http.Header, info *data.ExtendedObjectInfo, isBuck
|
|||
h.Set(api.AmzVersionID, info.Version())
|
||||
}
|
||||
|
||||
if info.NodeVersion.IsDeleteMarker {
|
||||
if info.NodeVersion.IsDeleteMarker() {
|
||||
h.Set(api.AmzDeleteMarker, strconv.FormatBool(true))
|
||||
}
|
||||
|
||||
|
@ -187,9 +187,9 @@ func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttribu
|
|||
for _, attr := range p.Attributes {
|
||||
switch attr {
|
||||
case eTag:
|
||||
resp.ETag = data.Quote(info.ETag(md5Enabled))
|
||||
resp.ETag = info.ETag(md5Enabled)
|
||||
case storageClass:
|
||||
resp.StorageClass = api.DefaultStorageClass
|
||||
resp.StorageClass = "STANDARD"
|
||||
case objectSize:
|
||||
resp.ObjectSize = info.Size
|
||||
case checksum:
|
||||
|
|
|
@ -26,7 +26,7 @@ func TestGetObjectPartsAttributes(t *testing.T) {
|
|||
multipartUpload := createMultipartUpload(hc, bktName, objMultipartName, map[string]string{})
|
||||
etag, _ := uploadPart(hc, bktName, objMultipartName, multipartUpload.UploadID, 1, partSize)
|
||||
completeMultipartUpload(hc, bktName, objMultipartName, multipartUpload.UploadID, []string{etag})
|
||||
etagBytes, err := hex.DecodeString(etag[1 : len(etag)-1])
|
||||
etagBytes, err := hex.DecodeString(etag)
|
||||
require.NoError(t, err)
|
||||
|
||||
result = getObjectAttributes(hc, bktName, objMultipartName, objectParts)
|
||||
|
|
|
@ -51,7 +51,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
ctx = r.Context()
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
|
||||
cannedACLStatus = aclHeadersStatus(r)
|
||||
containsACL = containsACLHeaders(r)
|
||||
)
|
||||
|
||||
src := r.Header.Get(api.AmzCopySource)
|
||||
|
@ -93,14 +93,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
apeEnabled := dstBktInfo.APEEnabled || settings.CannedACL != ""
|
||||
if apeEnabled && cannedACLStatus == aclStatusYes {
|
||||
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
||||
return
|
||||
}
|
||||
|
||||
needUpdateEACLTable := !(apeEnabled || cannedACLStatus == aclStatusNo)
|
||||
if needUpdateEACLTable {
|
||||
if containsACL {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(ctx); err != nil {
|
||||
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
||||
return
|
||||
|
@ -114,37 +107,24 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
srcObjInfo := extendedSrcObjInfo.ObjectInfo
|
||||
|
||||
srcEncryptionParams, err := formCopySourceEncryptionParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
dstEncryptionParams, err := formEncryptionParams(r)
|
||||
encryptionParams, err := formEncryptionParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
||||
if errors.IsS3Error(err, errors.ErrInvalidEncryptionParameters) || errors.IsS3Error(err, errors.ErrSSEEncryptedObject) ||
|
||||
errors.IsS3Error(err, errors.ErrInvalidSSECustomerParameters) {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, err, zap.Error(err))
|
||||
return
|
||||
}
|
||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
var dstSize uint64
|
||||
srcSize, err := layer.GetObjectSize(srcObjInfo)
|
||||
if err != nil {
|
||||
if srcSize, err := layer.GetObjectSize(srcObjInfo); err != nil {
|
||||
h.logAndSendError(w, "failed to get source object size", reqInfo, err)
|
||||
return
|
||||
} else if srcSize > layer.UploadMaxSize { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
|
||||
} else if srcSize > layer.UploadMaxSize { //https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
|
||||
h.logAndSendError(w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy))
|
||||
return
|
||||
}
|
||||
dstSize = srcSize
|
||||
|
||||
args, err := parseCopyObjectArgs(r.Header)
|
||||
if err != nil {
|
||||
|
@ -184,7 +164,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
if err = checkPreconditions(srcObjInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
|
||||
if err = checkPreconditions(srcObjInfo, args.Conditional, h.cfg.Features.MD5Enabled()); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
|
||||
return
|
||||
}
|
||||
|
@ -194,24 +174,23 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
srcObjInfo.Headers[api.ContentType] = srcObjInfo.ContentType
|
||||
}
|
||||
metadata = makeCopyMap(srcObjInfo.Headers)
|
||||
filterMetadataMap(metadata)
|
||||
delete(metadata, layer.MultipartObjectSize) // object payload will be real one rather than list of compound parts
|
||||
} else if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
|
||||
metadata[api.ContentType] = contentType
|
||||
}
|
||||
|
||||
params := &layer.CopyObjectParams{
|
||||
SrcVersioned: srcObjPrm.Versioned(),
|
||||
SrcObject: srcObjInfo,
|
||||
ScrBktInfo: srcObjPrm.BktInfo,
|
||||
DstBktInfo: dstBktInfo,
|
||||
DstObject: reqInfo.ObjectName,
|
||||
DstSize: dstSize,
|
||||
Header: metadata,
|
||||
SrcEncryption: srcEncryptionParams,
|
||||
DstEncryption: dstEncryptionParams,
|
||||
SrcVersioned: srcObjPrm.Versioned(),
|
||||
SrcObject: srcObjInfo,
|
||||
ScrBktInfo: srcObjPrm.BktInfo,
|
||||
DstBktInfo: dstBktInfo,
|
||||
DstObject: reqInfo.ObjectName,
|
||||
SrcSize: srcObjInfo.Size,
|
||||
Header: metadata,
|
||||
Encryption: encryptionParams,
|
||||
}
|
||||
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, dstBktInfo.LocationConstraint)
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, dstBktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
|
@ -231,15 +210,12 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
dstObjInfo := extendedDstObjInfo.ObjectInfo
|
||||
|
||||
if err = middleware.EncodeToResponse(w, &CopyObjectResponse{
|
||||
LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339),
|
||||
ETag: data.Quote(dstObjInfo.ETag(h.cfg.MD5Enabled())),
|
||||
}); err != nil {
|
||||
if err = middleware.EncodeToResponse(w, &CopyObjectResponse{LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), ETag: dstObjInfo.ETag(h.cfg.Features.MD5Enabled())}); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
if needUpdateEACLTable {
|
||||
if containsACL {
|
||||
newEaclTable, err := h.getNewEAclTable(r, dstBktInfo, dstObjInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get new eacl table", reqInfo, err)
|
||||
|
@ -278,7 +254,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectCreatedCopy,
|
||||
NotificationInfo: data.NotificationInfoFromObject(dstObjInfo, h.cfg.MD5Enabled()),
|
||||
NotificationInfo: data.NotificationInfoFromObject(dstObjInfo, h.cfg.Features.MD5Enabled()),
|
||||
BktInfo: dstBktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
|
@ -286,7 +262,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
||||
}
|
||||
|
||||
if dstEncryptionParams.Enabled() {
|
||||
if encryptionParams.Enabled() {
|
||||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
}
|
||||
|
@ -299,13 +275,6 @@ func makeCopyMap(headers map[string]string) map[string]string {
|
|||
return res
|
||||
}
|
||||
|
||||
func filterMetadataMap(metadata map[string]string) {
|
||||
delete(metadata, layer.MultipartObjectSize) // object payload will be real one rather than list of compound parts
|
||||
for key := range layer.EncryptionMetadata {
|
||||
delete(metadata, key)
|
||||
}
|
||||
}
|
||||
|
||||
func isCopyingToItselfForbidden(reqInfo *middleware.ReqInfo, srcBucket string, srcObject string, settings *data.BucketSettings, args *copyObjectArgs) bool {
|
||||
if reqInfo.BucketName != srcBucket || reqInfo.ObjectName != srcObject {
|
||||
return false
|
||||
|
@ -321,8 +290,8 @@ func isCopyingToItselfForbidden(reqInfo *middleware.ReqInfo, srcBucket string, s
|
|||
func parseCopyObjectArgs(headers http.Header) (*copyObjectArgs, error) {
|
||||
var err error
|
||||
args := &conditionalArgs{
|
||||
IfMatch: data.UnQuote(headers.Get(api.AmzCopyIfMatch)),
|
||||
IfNoneMatch: data.UnQuote(headers.Get(api.AmzCopyIfNoneMatch)),
|
||||
IfMatch: headers.Get(api.AmzCopyIfMatch),
|
||||
IfNoneMatch: headers.Get(api.AmzCopyIfNoneMatch),
|
||||
}
|
||||
|
||||
if args.IfModifiedSince, err = parseHTTPTime(headers.Get(api.AmzCopyIfModifiedSince)); err != nil {
|
||||
|
|
|
@ -1,19 +1,13 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -22,7 +16,6 @@ type CopyMeta struct {
|
|||
Tags map[string]string
|
||||
MetadataDirective string
|
||||
Metadata map[string]string
|
||||
Headers map[string]string
|
||||
}
|
||||
|
||||
func TestCopyWithTaggingDirective(t *testing.T) {
|
||||
|
@ -105,165 +98,6 @@ func TestCopyMultipart(t *testing.T) {
|
|||
equalDataSlices(t, data, copiedData)
|
||||
}
|
||||
|
||||
func TestCopyEncryptedToUnencrypted(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
|
||||
key1 := []byte("firstencriptionkeyofsourceobject")
|
||||
key1Md5 := md5.Sum(key1)
|
||||
key2 := []byte("anotherencriptionkeysourceobject")
|
||||
key2Md5 := md5.Sum(key2)
|
||||
bktInfo := createTestBucket(tc, bktName)
|
||||
|
||||
srcEnc, err := encryption.NewParams(key1)
|
||||
require.NoError(t, err)
|
||||
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, *srcEnc)
|
||||
require.True(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
|
||||
|
||||
dstObjName := "copy-object"
|
||||
|
||||
// empty copy-source-sse headers
|
||||
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
|
||||
r.TLS = &tls.ConnectionState{}
|
||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
||||
tc.Handler().CopyObjectHandler(w, r)
|
||||
|
||||
assertStatus(t, w, http.StatusBadRequest)
|
||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrSSEEncryptedObject))
|
||||
|
||||
// empty copy-source-sse-custom-key
|
||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
||||
r.TLS = &tls.ConnectionState{}
|
||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
||||
tc.Handler().CopyObjectHandler(w, r)
|
||||
|
||||
assertStatus(t, w, http.StatusBadRequest)
|
||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrMissingSSECustomerKey))
|
||||
|
||||
// empty copy-source-sse-custom-algorithm
|
||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
||||
r.TLS = &tls.ConnectionState{}
|
||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
|
||||
tc.Handler().CopyObjectHandler(w, r)
|
||||
|
||||
assertStatus(t, w, http.StatusBadRequest)
|
||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrMissingSSECustomerAlgorithm))
|
||||
|
||||
// invalid copy-source-sse-custom-key
|
||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
||||
r.TLS = &tls.ConnectionState{}
|
||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key2))
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key2Md5[:]))
|
||||
tc.Handler().CopyObjectHandler(w, r)
|
||||
|
||||
assertStatus(t, w, http.StatusBadRequest)
|
||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters))
|
||||
|
||||
// success copy
|
||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
||||
r.TLS = &tls.ConnectionState{}
|
||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key1Md5[:]))
|
||||
tc.Handler().CopyObjectHandler(w, r)
|
||||
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], strconv.Itoa(int(dstObjInfo.Size)))
|
||||
require.False(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
|
||||
}
|
||||
|
||||
func TestCopyUnencryptedToEncrypted(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
|
||||
key := []byte("firstencriptionkeyofsourceobject")
|
||||
keyMd5 := md5.Sum(key)
|
||||
bktInfo := createTestBucket(tc, bktName)
|
||||
|
||||
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, encryption.Params{})
|
||||
require.False(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
|
||||
|
||||
dstObjName := "copy-object"
|
||||
|
||||
// invalid copy-source-sse headers
|
||||
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
|
||||
r.TLS = &tls.ConnectionState{}
|
||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key))
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMd5[:]))
|
||||
tc.Handler().CopyObjectHandler(w, r)
|
||||
|
||||
assertStatus(t, w, http.StatusBadRequest)
|
||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrInvalidEncryptionParameters))
|
||||
|
||||
// success copy
|
||||
w, r = prepareTestRequest(tc, bktName, dstObjName, nil)
|
||||
r.TLS = &tls.ConnectionState{}
|
||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
||||
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key))
|
||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMd5[:]))
|
||||
tc.Handler().CopyObjectHandler(w, r)
|
||||
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
|
||||
require.NoError(t, err)
|
||||
require.True(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
|
||||
require.Equal(t, strconv.Itoa(int(srcObjInfo.Size)), dstObjInfo.Headers[layer.AttributeDecryptedSize])
|
||||
}
|
||||
|
||||
func TestCopyEncryptedToEncryptedWithAnotherKey(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, srcObjName := "bucket-for-copy", "object-for-copy"
|
||||
key1 := []byte("firstencriptionkeyofsourceobject")
|
||||
key1Md5 := md5.Sum(key1)
|
||||
key2 := []byte("anotherencriptionkeysourceobject")
|
||||
key2Md5 := md5.Sum(key2)
|
||||
bktInfo := createTestBucket(tc, bktName)
|
||||
|
||||
srcEnc, err := encryption.NewParams(key1)
|
||||
require.NoError(t, err)
|
||||
srcObjInfo := createTestObject(tc, bktInfo, srcObjName, *srcEnc)
|
||||
require.True(t, containEncryptionMetadataHeaders(srcObjInfo.Headers))
|
||||
|
||||
dstObjName := "copy-object"
|
||||
|
||||
w, r := prepareTestRequest(tc, bktName, dstObjName, nil)
|
||||
r.TLS = &tls.ConnectionState{}
|
||||
r.Header.Set(api.AmzCopySource, bktName+"/"+srcObjName)
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key1))
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key1Md5[:]))
|
||||
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key2))
|
||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(key2Md5[:]))
|
||||
tc.Handler().CopyObjectHandler(w, r)
|
||||
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
dstObjInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: dstObjName})
|
||||
require.NoError(t, err)
|
||||
require.True(t, containEncryptionMetadataHeaders(dstObjInfo.Headers))
|
||||
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], dstObjInfo.Headers[layer.AttributeDecryptedSize])
|
||||
}
|
||||
|
||||
func containEncryptionMetadataHeaders(headers map[string]string) bool {
|
||||
for k := range headers {
|
||||
if _, ok := layer.EncryptionMetadata[k]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func copyObject(hc *handlerContext, bktName, fromObject, toObject string, copyMeta CopyMeta, statusCode int) {
|
||||
w, r := prepareTestRequest(hc, bktName, toObject, nil)
|
||||
r.Header.Set(api.AmzCopySource, bktName+"/"+fromObject)
|
||||
|
@ -280,10 +114,6 @@ func copyObject(hc *handlerContext, bktName, fromObject, toObject string, copyMe
|
|||
}
|
||||
r.Header.Set(api.AmzTagging, tagsQuery.Encode())
|
||||
|
||||
for key, val := range copyMeta.Headers {
|
||||
r.Header.Set(key, val)
|
||||
}
|
||||
|
||||
hc.Handler().CopyObjectHandler(w, r)
|
||||
assertStatus(hc.t, w, statusCode)
|
||||
}
|
||||
|
|
|
@ -50,12 +50,11 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
p := &layer.PutCORSParams{
|
||||
BktInfo: bktInfo,
|
||||
Reader: r.Body,
|
||||
NewDecoder: h.cfg.NewXMLDecoder,
|
||||
BktInfo: bktInfo,
|
||||
Reader: r.Body,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
|
@ -66,10 +65,7 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
||||
h.logAndSendError(w, "write response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -198,15 +194,12 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
|||
if rule.MaxAgeSeconds > 0 || rule.MaxAgeSeconds == -1 {
|
||||
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(rule.MaxAgeSeconds))
|
||||
} else {
|
||||
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(h.cfg.DefaultMaxAge()))
|
||||
w.Header().Set(api.AccessControlMaxAge, strconv.Itoa(h.cfg.DefaultMaxAge))
|
||||
}
|
||||
if o != wildcard {
|
||||
w.Header().Set(api.AccessControlAllowCredentials, "true")
|
||||
}
|
||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
||||
h.logAndSendError(w, "write response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,8 +15,8 @@ import (
|
|||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// limitation of AWS https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
|
||||
|
@ -24,9 +24,8 @@ const maxObjectsToDelete = 1000
|
|||
|
||||
// DeleteObjectsRequest -- xml carrying the object key names which should be deleted.
|
||||
type DeleteObjectsRequest struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delete" json:"-"`
|
||||
// Element to enable quiet mode for the request
|
||||
Quiet bool `xml:"Quiet,omitempty"`
|
||||
Quiet bool
|
||||
// List of objects to be deleted
|
||||
Objects []ObjectIdentifier `xml:"Object"`
|
||||
}
|
||||
|
@ -46,10 +45,10 @@ type DeletedObject struct {
|
|||
|
||||
// DeleteError structure.
|
||||
type DeleteError struct {
|
||||
Code string `xml:"Code,omitempty"`
|
||||
Message string `xml:"Message,omitempty"`
|
||||
Key string `xml:"Key,omitempty"`
|
||||
VersionID string `xml:"VersionId,omitempty"`
|
||||
Code string
|
||||
Message string
|
||||
Key string
|
||||
VersionID string `xml:"versionId,omitempty"`
|
||||
}
|
||||
|
||||
// DeleteObjectsResponse container for multiple object deletes.
|
||||
|
@ -178,7 +177,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
|
||||
// Unmarshal list of keys to be deleted.
|
||||
requested := &DeleteObjectsRequest{}
|
||||
if err := h.cfg.NewXMLDecoder(r.Body).Decode(requested); err != nil {
|
||||
if err := xml.NewDecoder(r.Body).Decode(requested); err != nil {
|
||||
h.logAndSendError(w, "couldn't decode body", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
||||
return
|
||||
}
|
||||
|
@ -216,14 +215,21 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
marshaler := zapcore.ArrayMarshalerFunc(func(encoder zapcore.ArrayEncoder) error {
|
||||
for _, obj := range toRemove {
|
||||
encoder.AppendString(obj.String())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
p := &layer.DeleteObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Objects: toRemove,
|
||||
Settings: bktSettings,
|
||||
IsMultiple: true,
|
||||
BktInfo: bktInfo,
|
||||
Objects: toRemove,
|
||||
Settings: bktSettings,
|
||||
}
|
||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||
|
||||
var errs []error
|
||||
for _, obj := range deletedObjects {
|
||||
if obj.Error != nil {
|
||||
code := "BadRequest"
|
||||
|
@ -236,6 +242,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
Key: obj.Name,
|
||||
VersionID: obj.VersionID,
|
||||
})
|
||||
errs = append(errs, obj.Error)
|
||||
} else if !requested.Quiet {
|
||||
deletedObj := DeletedObject{
|
||||
ObjectIdentifier: ObjectIdentifier{
|
||||
|
@ -250,9 +257,16 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
response.DeletedObjects = append(response.DeletedObjects, deletedObj)
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
fields := []zap.Field{
|
||||
zap.Array("objects", marshaler),
|
||||
zap.Errors("errors", errs),
|
||||
}
|
||||
h.reqLogger(ctx).Error(logs.CouldntDeleteObjects, fields...)
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "could not write response", reqInfo, err)
|
||||
h.logAndSendError(w, "could not write response", reqInfo, err, zap.Array("objects", marshaler))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -278,17 +292,5 @@ func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}); err != nil {
|
||||
h.logAndSendError(w, "couldn't delete bucket", reqInfo, err)
|
||||
}
|
||||
|
||||
chainIDs := []chain.ID{
|
||||
getBucketChainID(chain.S3, bktInfo),
|
||||
getBucketChainID(chain.Ingress, bktInfo),
|
||||
getBucketCannedChainID(chain.S3, bktInfo.CID),
|
||||
getBucketCannedChainID(chain.Ingress, bktInfo.CID),
|
||||
}
|
||||
if err = h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil {
|
||||
h.logAndSendError(w, "failed to delete policy from storage", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
|
@ -11,12 +10,8 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -85,38 +80,6 @@ func TestDeleteBucketOnNotFoundError(t *testing.T) {
|
|||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func TestDeleteObjectsError(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
putBucketVersioning(t, hc, bktName, true)
|
||||
|
||||
putObject(hc, bktName, objName)
|
||||
|
||||
nodeVersion, err := hc.tree.GetLatestVersion(hc.context, bktInfo, objName)
|
||||
require.NoError(t, err)
|
||||
var addr oid.Address
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(nodeVersion.OID)
|
||||
|
||||
expectedError := apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
|
||||
hc.tp.SetObjectError(addr, expectedError)
|
||||
|
||||
w := deleteObjectsBase(hc, bktName, [][2]string{{objName, nodeVersion.OID.EncodeToString()}})
|
||||
|
||||
res := &s3.DeleteObjectsOutput{}
|
||||
err = xmlutil.UnmarshalXML(res, xml.NewDecoder(w.Result().Body), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.ElementsMatch(t, []*s3.Error{{
|
||||
Code: aws.String(expectedError.Code),
|
||||
Key: aws.String(objName),
|
||||
Message: aws.String(expectedError.Error()),
|
||||
VersionId: aws.String(nodeVersion.OID.EncodeToString()),
|
||||
}}, res.Errors)
|
||||
}
|
||||
|
||||
func TestDeleteObject(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -168,7 +131,7 @@ func TestDeleteDeletedObject(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("versioned bucket not found obj", func(t *testing.T) {
|
||||
bktName, objName := "bucket-versioned-for-removal-not-found", "object-to-delete"
|
||||
bktName, objName := "bucket-versioned-for-removal", "object-to-delete"
|
||||
_, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
|
||||
|
||||
versionID, isDeleteMarker := deleteObject(t, tc, bktName, objName, objInfo.VersionID())
|
||||
|
@ -243,7 +206,6 @@ func TestDeleteMarkerVersioned(t *testing.T) {
|
|||
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
require.True(t, isDeleteMarker)
|
||||
versions := listVersions(t, tc, bktName)
|
||||
require.Len(t, versions.DeleteMarker, 1)
|
||||
require.Equal(t, deleteMarkerVersion, versions.DeleteMarker[0].VersionID)
|
||||
|
||||
_, isDeleteMarker = deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||
|
@ -365,27 +327,6 @@ func TestDeleteMarkers(t *testing.T) {
|
|||
require.Len(t, listOIDsFromMockedFrostFS(t, tc, bktName), 0, "shouldn't be any object in frostfs")
|
||||
}
|
||||
|
||||
func TestGetHeadDeleteMarker(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
createTestBucket(hc, bktName)
|
||||
putBucketVersioning(t, hc, bktName, true)
|
||||
|
||||
putObject(hc, bktName, objName)
|
||||
|
||||
deleteMarkerVersionID, _ := deleteObject(t, hc, bktName, objName, emptyVersion)
|
||||
|
||||
w := headObjectBase(hc, bktName, objName, deleteMarkerVersionID)
|
||||
require.Equal(t, w.Code, http.StatusMethodNotAllowed)
|
||||
require.Equal(t, w.Result().Header.Get(api.AmzDeleteMarker), "true")
|
||||
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().GetObjectHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusNotFound)
|
||||
require.Equal(t, w.Result().Header.Get(api.AmzDeleteMarker), "true")
|
||||
}
|
||||
|
||||
func TestDeleteObjectFromListCache(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -429,25 +370,22 @@ func TestDeleteObjectCheckMarkerReturn(t *testing.T) {
|
|||
func createBucketAndObject(tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
||||
bktInfo := createTestBucket(tc, bktName)
|
||||
|
||||
objInfo := createTestObject(tc, bktInfo, objName, encryption.Params{})
|
||||
objInfo := createTestObject(tc, bktInfo, objName)
|
||||
|
||||
return bktInfo, objInfo
|
||||
}
|
||||
|
||||
func createVersionedBucketAndObject(_ *testing.T, tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
||||
bktInfo := createVersionedBucket(tc, bktName)
|
||||
objInfo := createTestObject(tc, bktInfo, objName, encryption.Params{})
|
||||
func createVersionedBucketAndObject(t *testing.T, tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
||||
createTestBucket(tc, bktName)
|
||||
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
|
||||
require.NoError(t, err)
|
||||
putBucketVersioning(t, tc, bktName, true)
|
||||
|
||||
objInfo := createTestObject(tc, bktInfo, objName)
|
||||
|
||||
return bktInfo, objInfo
|
||||
}
|
||||
|
||||
func createVersionedBucket(hc *handlerContext, bktName string) *data.BucketInfo {
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
putBucketVersioning(hc.t, hc, bktName, true)
|
||||
|
||||
return bktInfo
|
||||
}
|
||||
|
||||
func putBucketVersioning(t *testing.T, tc *handlerContext, bktName string, enabled bool) {
|
||||
cfg := &VersioningConfiguration{Status: "Suspended"}
|
||||
if enabled {
|
||||
|
@ -470,14 +408,6 @@ func deleteObject(t *testing.T, tc *handlerContext, bktName, objName, version st
|
|||
}
|
||||
|
||||
func deleteObjects(t *testing.T, tc *handlerContext, bktName string, objVersions [][2]string) *DeleteObjectsResponse {
|
||||
w := deleteObjectsBase(tc, bktName, objVersions)
|
||||
|
||||
res := &DeleteObjectsResponse{}
|
||||
parseTestResponse(t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func deleteObjectsBase(hc *handlerContext, bktName string, objVersions [][2]string) *httptest.ResponseRecorder {
|
||||
req := &DeleteObjectsRequest{}
|
||||
for _, version := range objVersions {
|
||||
req.Objects = append(req.Objects, ObjectIdentifier{
|
||||
|
@ -486,12 +416,14 @@ func deleteObjectsBase(hc *handlerContext, bktName string, objVersions [][2]stri
|
|||
})
|
||||
}
|
||||
|
||||
w, r := prepareTestRequest(hc, bktName, "", req)
|
||||
w, r := prepareTestRequest(tc, bktName, "", req)
|
||||
r.Header.Set(api.ContentMD5, "")
|
||||
hc.Handler().DeleteMultipleObjectsHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
tc.Handler().DeleteMultipleObjectsHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
return w
|
||||
res := &DeleteObjectsResponse{}
|
||||
parseTestResponse(t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func deleteBucket(t *testing.T, tc *handlerContext, bktName string, code int) {
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -235,33 +234,24 @@ func multipartUpload(hc *handlerContext, bktName, objName string, headers map[st
|
|||
}
|
||||
|
||||
func createMultipartUploadEncrypted(hc *handlerContext, bktName, objName string, headers map[string]string) *InitiateMultipartUploadResponse {
|
||||
return createMultipartUploadOkBase(hc, bktName, objName, true, headers)
|
||||
return createMultipartUploadBase(hc, bktName, objName, true, headers)
|
||||
}
|
||||
|
||||
func createMultipartUpload(hc *handlerContext, bktName, objName string, headers map[string]string) *InitiateMultipartUploadResponse {
|
||||
return createMultipartUploadOkBase(hc, bktName, objName, false, headers)
|
||||
return createMultipartUploadBase(hc, bktName, objName, false, headers)
|
||||
}
|
||||
|
||||
func createMultipartUploadOkBase(hc *handlerContext, bktName, objName string, encrypted bool, headers map[string]string) *InitiateMultipartUploadResponse {
|
||||
w := createMultipartUploadBase(hc, bktName, objName, encrypted, headers)
|
||||
multipartInitInfo := &InitiateMultipartUploadResponse{}
|
||||
readResponse(hc.t, w, http.StatusOK, multipartInitInfo)
|
||||
return multipartInitInfo
|
||||
}
|
||||
|
||||
func createMultipartUploadAssertS3Error(hc *handlerContext, bktName, objName string, headers map[string]string, code errors.ErrorCode) {
|
||||
w := createMultipartUploadBase(hc, bktName, objName, false, headers)
|
||||
assertS3Error(hc.t, w, errors.GetAPIError(code))
|
||||
}
|
||||
|
||||
func createMultipartUploadBase(hc *handlerContext, bktName, objName string, encrypted bool, headers map[string]string) *httptest.ResponseRecorder {
|
||||
func createMultipartUploadBase(hc *handlerContext, bktName, objName string, encrypted bool, headers map[string]string) *InitiateMultipartUploadResponse {
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
if encrypted {
|
||||
setEncryptHeaders(r)
|
||||
}
|
||||
setHeaders(r, headers)
|
||||
hc.Handler().CreateMultipartUploadHandler(w, r)
|
||||
return w
|
||||
multipartInitInfo := &InitiateMultipartUploadResponse{}
|
||||
readResponse(hc.t, w, http.StatusOK, multipartInitInfo)
|
||||
|
||||
return multipartInitInfo
|
||||
}
|
||||
|
||||
func completeMultipartUpload(hc *handlerContext, bktName, objName, uploadID string, partsETags []string) {
|
||||
|
|
|
@ -95,10 +95,9 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
|
|||
h.Set(api.ContentLength, strconv.FormatUint(info.Size, 10))
|
||||
}
|
||||
|
||||
h.Set(api.ETag, data.Quote(info.ETag(md5Enabled)))
|
||||
h.Set(api.ETag, info.ETag(md5Enabled))
|
||||
|
||||
h.Set(api.AmzTaggingCount, strconv.Itoa(tagSetLength))
|
||||
h.Set(api.AmzStorageClass, api.DefaultStorageClass)
|
||||
|
||||
if !isBucketUnversioned {
|
||||
h.Set(api.AmzVersionID, extendedInfo.Version())
|
||||
|
@ -113,9 +112,6 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
|
|||
if encodings := info.Headers[api.ContentEncoding]; encodings != "" {
|
||||
h.Set(api.ContentEncoding, encodings)
|
||||
}
|
||||
if contentLanguage := info.Headers[api.ContentLanguage]; contentLanguage != "" {
|
||||
h.Set(api.ContentLanguage, contentLanguage)
|
||||
}
|
||||
|
||||
for key, val := range info.Headers {
|
||||
if layer.IsSystemHeader(key) {
|
||||
|
@ -157,7 +153,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
info := extendedInfo.ObjectInfo
|
||||
|
||||
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
||||
if err = checkPreconditions(info, conditional, h.cfg.Features.MD5Enabled()); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -225,7 +221,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.MD5Enabled())
|
||||
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.Features.MD5Enabled())
|
||||
if params != nil {
|
||||
writeRangeHeaders(w, params, fullSize)
|
||||
} else {
|
||||
|
@ -268,8 +264,8 @@ func checkPreconditions(info *data.ObjectInfo, args *conditionalArgs, md5Enabled
|
|||
func parseConditionalHeaders(headers http.Header) (*conditionalArgs, error) {
|
||||
var err error
|
||||
args := &conditionalArgs{
|
||||
IfMatch: data.UnQuote(headers.Get(api.IfMatch)),
|
||||
IfNoneMatch: data.UnQuote(headers.Get(api.IfNoneMatch)),
|
||||
IfMatch: strings.Trim(headers.Get(api.IfMatch), "\""),
|
||||
IfNoneMatch: strings.Trim(headers.Get(api.IfNoneMatch), "\""),
|
||||
}
|
||||
|
||||
if args.IfModifiedSince, err = parseHTTPTime(headers.Get(api.IfModifiedSince)); err != nil {
|
||||
|
|
|
@ -203,19 +203,18 @@ func TestGetObjectEnabledMD5(t *testing.T) {
|
|||
_, objInfo := createBucketAndObject(hc, bktName, objName)
|
||||
|
||||
_, headers := getObject(hc, bktName, objName)
|
||||
require.Equal(t, data.Quote(objInfo.HashSum), headers.Get(api.ETag))
|
||||
require.Equal(t, objInfo.HashSum, headers.Get(api.ETag))
|
||||
|
||||
hc.config.md5Enabled = true
|
||||
hc.features.SetMD5Enabled(true)
|
||||
_, headers = getObject(hc, bktName, objName)
|
||||
require.Equal(t, data.Quote(objInfo.MD5Sum), headers.Get(api.ETag))
|
||||
require.Equal(t, objInfo.MD5Sum, headers.Get(api.ETag))
|
||||
}
|
||||
|
||||
func putObjectContent(hc *handlerContext, bktName, objName, content string) http.Header {
|
||||
func putObjectContent(hc *handlerContext, bktName, objName, content string) {
|
||||
body := bytes.NewReader([]byte(content))
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
||||
hc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
return w.Result().Header
|
||||
}
|
||||
|
||||
func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, start, end int) []byte {
|
||||
|
|
|
@ -4,10 +4,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
@ -19,21 +16,18 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type handlerContext struct {
|
||||
|
@ -43,11 +37,9 @@ type handlerContext struct {
|
|||
tp *layer.TestFrostFS
|
||||
tree *tree.Tree
|
||||
context context.Context
|
||||
config *configMock
|
||||
kludge *kludgeSettingsMock
|
||||
|
||||
layerFeatures *layer.FeatureSettingsMock
|
||||
treeMock *tree.ServiceClientMemory
|
||||
cache *layer.Cache
|
||||
features *layer.FeatureSettingsMock
|
||||
}
|
||||
|
||||
func (hc *handlerContext) Handler() *handler {
|
||||
|
@ -66,81 +58,52 @@ func (hc *handlerContext) Context() context.Context {
|
|||
return hc.context
|
||||
}
|
||||
|
||||
type configMock struct {
|
||||
defaultPolicy netmap.PlacementPolicy
|
||||
copiesNumbers map[string][]uint32
|
||||
defaultCopiesNumbers []uint32
|
||||
bypassContentEncodingInChunks bool
|
||||
md5Enabled bool
|
||||
aclEnabled bool
|
||||
type placementPolicyMock struct {
|
||||
defaultPolicy netmap.PlacementPolicy
|
||||
copiesNumbers map[string][]uint32
|
||||
defaultCopiesNumbers []uint32
|
||||
}
|
||||
|
||||
func (c *configMock) DefaultPlacementPolicy(_ string) netmap.PlacementPolicy {
|
||||
return c.defaultPolicy
|
||||
func (p *placementPolicyMock) DefaultPlacementPolicy() netmap.PlacementPolicy {
|
||||
return p.defaultPolicy
|
||||
}
|
||||
|
||||
func (c *configMock) PlacementPolicy(_, _ string) (netmap.PlacementPolicy, bool) {
|
||||
func (p *placementPolicyMock) PlacementPolicy(string) (netmap.PlacementPolicy, bool) {
|
||||
return netmap.PlacementPolicy{}, false
|
||||
}
|
||||
|
||||
func (c *configMock) CopiesNumbers(_, locationConstraint string) ([]uint32, bool) {
|
||||
result, ok := c.copiesNumbers[locationConstraint]
|
||||
func (p *placementPolicyMock) CopiesNumbers(locationConstraint string) ([]uint32, bool) {
|
||||
result, ok := p.copiesNumbers[locationConstraint]
|
||||
return result, ok
|
||||
}
|
||||
|
||||
func (c *configMock) DefaultCopiesNumbers(_ string) []uint32 {
|
||||
return c.defaultCopiesNumbers
|
||||
func (p *placementPolicyMock) DefaultCopiesNumbers() []uint32 {
|
||||
return p.defaultCopiesNumbers
|
||||
}
|
||||
|
||||
func (c *configMock) NewXMLDecoder(r io.Reader) *xml.Decoder {
|
||||
type xmlDecoderProviderMock struct{}
|
||||
|
||||
func (p *xmlDecoderProviderMock) NewCompleteMultipartDecoder(r io.Reader) *xml.Decoder {
|
||||
return xml.NewDecoder(r)
|
||||
}
|
||||
|
||||
func (c *configMock) BypassContentEncodingInChunks() bool {
|
||||
return c.bypassContentEncodingInChunks
|
||||
type kludgeSettingsMock struct {
|
||||
bypassContentEncodingInChunks bool
|
||||
}
|
||||
|
||||
func (c *configMock) DefaultMaxAge() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *configMock) NotificatorEnabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *configMock) ResolveZoneList() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (c *configMock) IsResolveListAllow() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *configMock) CompleteMultipartKeepalive() time.Duration {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
func (c *configMock) MD5Enabled() bool {
|
||||
return c.md5Enabled
|
||||
}
|
||||
|
||||
func (c *configMock) ACLEnabled() bool {
|
||||
return c.aclEnabled
|
||||
}
|
||||
|
||||
func (c *configMock) ResolveNamespaceAlias(ns string) string {
|
||||
return ns
|
||||
func (k *kludgeSettingsMock) BypassContentEncodingInChunks() bool {
|
||||
return k.bypassContentEncodingInChunks
|
||||
}
|
||||
|
||||
func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||
return prepareHandlerContextBase(t, layer.DefaultCachesConfigs(zap.NewExample()))
|
||||
return prepareHandlerContextBase(t, false)
|
||||
}
|
||||
|
||||
func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext {
|
||||
return prepareHandlerContextBase(t, getMinCacheConfig(zap.NewExample()))
|
||||
return prepareHandlerContextBase(t, true)
|
||||
}
|
||||
|
||||
func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *handlerContext {
|
||||
func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -155,35 +118,38 @@ func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *hand
|
|||
var owner user.ID
|
||||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||
|
||||
memCli, err := tree.NewTreeServiceClientMemory()
|
||||
require.NoError(t, err)
|
||||
treeMock := NewTreeServiceMock(t)
|
||||
|
||||
treeMock := tree.NewTree(memCli, zap.NewExample())
|
||||
cacheCfg := layer.DefaultCachesConfigs(l)
|
||||
if minCache {
|
||||
cacheCfg = getMinCacheConfig(l)
|
||||
}
|
||||
|
||||
features := &layer.FeatureSettingsMock{}
|
||||
|
||||
layerCfg := &layer.Config{
|
||||
Cache: layer.NewCache(cacheCfg),
|
||||
Caches: cacheCfg,
|
||||
AnonKey: layer.AnonymousKey{Key: key},
|
||||
Resolver: testResolver,
|
||||
TreeService: treeMock,
|
||||
Features: features,
|
||||
GateOwner: owner,
|
||||
}
|
||||
|
||||
var pp netmap.PlacementPolicy
|
||||
err = pp.DecodeString("REP 1")
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &configMock{
|
||||
defaultPolicy: pp,
|
||||
}
|
||||
kludge := &kludgeSettingsMock{}
|
||||
|
||||
h := &handler{
|
||||
log: l,
|
||||
obj: layer.NewLayer(l, tp, layerCfg),
|
||||
cfg: cfg,
|
||||
ape: newAPEMock(),
|
||||
frostfsid: newFrostfsIDMock(),
|
||||
log: l,
|
||||
obj: layer.NewLayer(l, tp, layerCfg),
|
||||
cfg: &Config{
|
||||
Policy: &placementPolicyMock{defaultPolicy: pp},
|
||||
XMLDecoder: &xmlDecoderProviderMock{},
|
||||
Kludge: kludge,
|
||||
Features: features,
|
||||
},
|
||||
}
|
||||
|
||||
return &handlerContext{
|
||||
|
@ -193,11 +159,9 @@ func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *hand
|
|||
tp: tp,
|
||||
tree: treeMock,
|
||||
context: middleware.SetBoxData(context.Background(), newTestAccessBox(t, key)),
|
||||
config: cfg,
|
||||
kludge: kludge,
|
||||
|
||||
layerFeatures: features,
|
||||
treeMock: memCli,
|
||||
cache: layerCfg.Cache,
|
||||
features: features,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -211,7 +175,6 @@ func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
|||
Logger: logger,
|
||||
Objects: minCacheCfg,
|
||||
ObjectsList: minCacheCfg,
|
||||
SessionList: minCacheCfg,
|
||||
Names: minCacheCfg,
|
||||
Buckets: minCacheCfg,
|
||||
System: minCacheCfg,
|
||||
|
@ -219,130 +182,27 @@ func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
|||
}
|
||||
}
|
||||
|
||||
type apeMock struct {
|
||||
chainMap map[engine.Target][]*chain.Chain
|
||||
policyMap map[string][]byte
|
||||
}
|
||||
|
||||
func newAPEMock() *apeMock {
|
||||
return &apeMock{
|
||||
chainMap: map[engine.Target][]*chain.Chain{},
|
||||
policyMap: map[string][]byte{},
|
||||
}
|
||||
}
|
||||
|
||||
func (a *apeMock) AddChain(target engine.Target, c *chain.Chain) error {
|
||||
list := a.chainMap[target]
|
||||
|
||||
ind := slices.IndexFunc(list, func(item *chain.Chain) bool { return bytes.Equal(item.ID, c.ID) })
|
||||
if ind != -1 {
|
||||
list[ind] = c
|
||||
} else {
|
||||
list = append(list, c)
|
||||
}
|
||||
|
||||
a.chainMap[target] = list
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *apeMock) RemoveChain(target engine.Target, chainID chain.ID) error {
|
||||
a.chainMap[target] = slices.DeleteFunc(a.chainMap[target], func(item *chain.Chain) bool { return bytes.Equal(item.ID, chainID) })
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *apeMock) ListChains(target engine.Target) ([]*chain.Chain, error) {
|
||||
return a.chainMap[target], nil
|
||||
}
|
||||
|
||||
func (a *apeMock) PutPolicy(namespace string, cnrID cid.ID, policy []byte) error {
|
||||
a.policyMap[namespace+cnrID.EncodeToString()] = policy
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *apeMock) DeletePolicy(namespace string, cnrID cid.ID) error {
|
||||
delete(a.policyMap, namespace+cnrID.EncodeToString())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *apeMock) PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chain []*chain.Chain) error {
|
||||
if err := a.PutPolicy(ns, cnrID, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range chain {
|
||||
if err := a.AddChain(engine.ContainerTarget(cnrID.EncodeToString()), chain[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *apeMock) DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.ID) error {
|
||||
if err := a.DeletePolicy(ns, cnrID); err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range chainIDs {
|
||||
if err := a.RemoveChain(engine.ContainerTarget(cnrID.EncodeToString()), chainIDs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *apeMock) GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error) {
|
||||
policy, ok := a.policyMap[ns+cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, errors.New("not found")
|
||||
}
|
||||
|
||||
return policy, nil
|
||||
}
|
||||
|
||||
func (a *apeMock) SaveACLChains(cid string, chains []*chain.Chain) error {
|
||||
for i := range chains {
|
||||
if err := a.AddChain(engine.ContainerTarget(cid), chains[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type frostfsidMock struct {
|
||||
data map[string]*keys.PublicKey
|
||||
}
|
||||
|
||||
func newFrostfsIDMock() *frostfsidMock {
|
||||
return &frostfsidMock{data: map[string]*keys.PublicKey{}}
|
||||
}
|
||||
|
||||
func (f *frostfsidMock) GetUserAddress(account, user string) (string, error) {
|
||||
res, ok := f.data[account+user]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("not found")
|
||||
}
|
||||
|
||||
return res.Address(), nil
|
||||
}
|
||||
|
||||
func (f *frostfsidMock) GetUserKey(account, user string) (string, error) {
|
||||
res, ok := f.data[account+user]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("not found")
|
||||
}
|
||||
|
||||
return hex.EncodeToString(res.Bytes()), nil
|
||||
func NewTreeServiceMock(t *testing.T) *tree.Tree {
|
||||
memCli, err := tree.NewTreeServiceClientMemory()
|
||||
require.NoError(t, err)
|
||||
return tree.NewTree(memCli, zap.NewExample())
|
||||
}
|
||||
|
||||
func createTestBucket(hc *handlerContext, bktName string) *data.BucketInfo {
|
||||
info := createBucket(hc, bktName)
|
||||
return info.BktInfo
|
||||
_, err := hc.MockedPool().CreateContainer(hc.Context(), layer.PrmContainerCreate{
|
||||
Creator: hc.owner,
|
||||
Name: bktName,
|
||||
BasicACL: acl.PublicRWExtended,
|
||||
})
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
bktInfo, err := hc.Layer().GetBucketInfo(hc.Context(), bktName)
|
||||
require.NoError(hc.t, err)
|
||||
return bktInfo
|
||||
}
|
||||
|
||||
func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.ObjectLockConfiguration) *data.BucketInfo {
|
||||
res, err := hc.MockedPool().CreateContainer(hc.Context(), layer.PrmContainerCreate{
|
||||
cnrID, err := hc.MockedPool().CreateContainer(hc.Context(), layer.PrmContainerCreate{
|
||||
Creator: hc.owner,
|
||||
Name: bktName,
|
||||
AdditionalAttributes: [][2]string{{layer.AttributeLockEnabled, "true"}},
|
||||
|
@ -352,22 +212,17 @@ func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.Obj
|
|||
var ownerID user.ID
|
||||
|
||||
bktInfo := &data.BucketInfo{
|
||||
CID: res.ContainerID,
|
||||
Name: bktName,
|
||||
ObjectLockEnabled: true,
|
||||
Owner: ownerID,
|
||||
HomomorphicHashDisabled: res.HomomorphicHashDisabled,
|
||||
CID: cnrID,
|
||||
Name: bktName,
|
||||
ObjectLockEnabled: true,
|
||||
Owner: ownerID,
|
||||
}
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
sp := &layer.PutSettingsParams{
|
||||
BktInfo: bktInfo,
|
||||
Settings: &data.BucketSettings{
|
||||
Versioning: data.VersioningEnabled,
|
||||
LockConfiguration: conf,
|
||||
OwnerKey: key.PublicKey(),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -377,7 +232,7 @@ func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.Obj
|
|||
return bktInfo
|
||||
}
|
||||
|
||||
func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName string, encryption encryption.Params) *data.ObjectInfo {
|
||||
func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName string) *data.ObjectInfo {
|
||||
content := make([]byte, 1024)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(hc.t, err)
|
||||
|
@ -387,12 +242,11 @@ func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName stri
|
|||
}
|
||||
|
||||
extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: objName,
|
||||
Size: uint64(len(content)),
|
||||
Reader: bytes.NewReader(content),
|
||||
Header: header,
|
||||
Encryption: encryption,
|
||||
BktInfo: bktInfo,
|
||||
Object: objName,
|
||||
Size: uint64(len(content)),
|
||||
Reader: bytes.NewReader(content),
|
||||
Header: header,
|
||||
})
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
|
||||
if err = checkPreconditions(info, conditional, h.cfg.Features.MD5Enabled()); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.MD5Enabled())
|
||||
writeHeaders(w.Header(), r.Header, extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.Features.MD5Enabled())
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
|
@ -135,18 +135,15 @@ func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
w.Header().Set(api.ContainerID, bktInfo.CID.EncodeToString())
|
||||
w.Header().Set(api.AmzBucketRegion, bktInfo.LocationConstraint)
|
||||
|
||||
if isAvailableToResolve(bktInfo.Zone, h.cfg.ResolveZoneList(), h.cfg.IsResolveListAllow()) {
|
||||
if isAvailableToResolve(bktInfo.Zone, h.cfg.ResolveZoneList, h.cfg.IsResolveListAllow) {
|
||||
w.Header().Set(api.ContainerName, bktInfo.Name)
|
||||
w.Header().Set(api.ContainerZone, bktInfo.Zone)
|
||||
}
|
||||
|
||||
if err = middleware.WriteResponse(w, http.StatusOK, nil, middleware.MimeNone); err != nil {
|
||||
h.logAndSendError(w, "write response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
middleware.WriteResponse(w, http.StatusOK, nil, middleware.MimeNone)
|
||||
}
|
||||
|
||||
func (h *handler) setLockingHeaders(bktInfo *data.BucketInfo, lockInfo data.LockInfo, header http.Header) error {
|
||||
func (h *handler) setLockingHeaders(bktInfo *data.BucketInfo, lockInfo *data.LockInfo, header http.Header) error {
|
||||
if !bktInfo.ObjectLockEnabled {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package handler
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
@ -41,7 +42,7 @@ func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
|
|||
}
|
||||
|
||||
lockingConf := &data.ObjectLockConfiguration{}
|
||||
if err = h.cfg.NewXMLDecoder(r.Body).Decode(lockingConf); err != nil {
|
||||
if err = xml.NewDecoder(r.Body).Decode(lockingConf); err != nil {
|
||||
h.logAndSendError(w, "couldn't parse locking configuration", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -121,7 +122,7 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
legalHold := &data.LegalHold{}
|
||||
if err = h.cfg.NewXMLDecoder(r.Body).Decode(legalHold); err != nil {
|
||||
if err = xml.NewDecoder(r.Body).Decode(legalHold); err != nil {
|
||||
h.logAndSendError(w, "couldn't parse legal hold configuration", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -145,7 +146,7 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
|||
},
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
|
@ -209,7 +210,7 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
retention := &data.Retention{}
|
||||
if err = h.cfg.NewXMLDecoder(r.Body).Decode(retention); err != nil {
|
||||
if err = xml.NewDecoder(r.Body).Decode(retention); err != nil {
|
||||
h.logAndSendError(w, "couldn't parse object retention", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -229,7 +230,7 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
NewLock: lock,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -427,7 +426,7 @@ func TestObjectLegalHold(t *testing.T) {
|
|||
bktInfo := createTestBucketWithLock(hc, bktName, nil)
|
||||
|
||||
objName := "obj-for-legal-hold"
|
||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||
createTestObject(hc, bktInfo, objName)
|
||||
|
||||
getObjectLegalHold(hc, bktName, objName, legalHoldOff)
|
||||
|
||||
|
@ -471,7 +470,7 @@ func TestObjectRetention(t *testing.T) {
|
|||
bktInfo := createTestBucketWithLock(hc, bktName, nil)
|
||||
|
||||
objName := "obj-for-retention"
|
||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||
createTestObject(hc, bktInfo, objName)
|
||||
|
||||
getObjectRetention(hc, bktName, objName, nil, apiErrors.ErrNoSuchKey)
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ package handler
|
|||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
@ -60,7 +61,7 @@ type (
|
|||
Owner Owner `xml:"Owner"`
|
||||
Parts []*layer.Part `xml:"Part"`
|
||||
PartNumberMarker int `xml:"PartNumberMarker,omitempty"`
|
||||
StorageClass string `xml:"StorageClass"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"`
|
||||
UploadID string `xml:"UploadId"`
|
||||
}
|
||||
|
||||
|
@ -69,7 +70,7 @@ type (
|
|||
Initiator Initiator `xml:"Initiator"`
|
||||
Key string `xml:"Key"`
|
||||
Owner Owner `xml:"Owner"`
|
||||
StorageClass string `xml:"StorageClass"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"`
|
||||
UploadID string `xml:"UploadId"`
|
||||
}
|
||||
|
||||
|
@ -103,9 +104,6 @@ const (
|
|||
|
||||
func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
uploadID := uuid.New()
|
||||
cannedACLStatus := aclHeadersStatus(r)
|
||||
additional := []zap.Field{zap.String("uploadID", uploadID.String())}
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -113,17 +111,8 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
apeEnabled := bktInfo.APEEnabled || settings.CannedACL != ""
|
||||
if apeEnabled && cannedACLStatus == aclStatusYes {
|
||||
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
||||
return
|
||||
}
|
||||
uploadID := uuid.New()
|
||||
additional := []zap.Field{zap.String("uploadID", uploadID.String())}
|
||||
|
||||
p := &layer.CreateMultipartParams{
|
||||
Info: &layer.UploadInfoParams{
|
||||
|
@ -134,8 +123,7 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
Data: &layer.UploadData{},
|
||||
}
|
||||
|
||||
needUpdateEACLTable := !(apeEnabled || cannedACLStatus == aclStatusNo)
|
||||
if needUpdateEACLTable {
|
||||
if containsACLHeaders(r) {
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get gate key", reqInfo, err, additional...)
|
||||
|
@ -166,11 +154,8 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
|||
if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
|
||||
p.Header[api.ContentType] = contentType
|
||||
}
|
||||
if contentLanguage := r.Header.Get(api.ContentLanguage); len(contentLanguage) > 0 {
|
||||
p.Header[api.ContentLanguage] = contentLanguage
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err, additional...)
|
||||
return
|
||||
|
@ -255,11 +240,10 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
Bkt: bktInfo,
|
||||
Key: reqInfo.ObjectName,
|
||||
},
|
||||
PartNumber: partNumber,
|
||||
Size: size,
|
||||
Reader: body,
|
||||
ContentMD5: r.Header.Get(api.ContentMD5),
|
||||
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
|
||||
PartNumber: partNumber,
|
||||
Size: size,
|
||||
Reader: body,
|
||||
ContentMD5: r.Header.Get(api.ContentMD5),
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r)
|
||||
|
@ -278,11 +262,8 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
|
||||
w.Header().Set(api.ETag, data.Quote(hash))
|
||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
||||
h.logAndSendError(w, "write response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set(api.ETag, hash)
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -356,23 +337,12 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = checkPreconditions(srcInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
|
||||
if err = checkPreconditions(srcInfo, args.Conditional, h.cfg.Features.MD5Enabled()); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed),
|
||||
additional...)
|
||||
return
|
||||
}
|
||||
|
||||
srcEncryptionParams, err := formCopySourceEncryptionParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcInfo.Headers)); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrBadRequest), err), additional...)
|
||||
return
|
||||
}
|
||||
|
||||
p := &layer.UploadCopyParams{
|
||||
Versioned: headPrm.Versioned(),
|
||||
Info: &layer.UploadInfoParams{
|
||||
|
@ -380,11 +350,10 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
Bkt: bktInfo,
|
||||
Key: reqInfo.ObjectName,
|
||||
},
|
||||
SrcObjInfo: srcInfo,
|
||||
SrcBktInfo: srcBktInfo,
|
||||
SrcEncryption: srcEncryptionParams,
|
||||
PartNumber: partNumber,
|
||||
Range: srcRange,
|
||||
SrcObjInfo: srcInfo,
|
||||
SrcBktInfo: srcBktInfo,
|
||||
PartNumber: partNumber,
|
||||
Range: srcRange,
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r)
|
||||
|
@ -393,6 +362,11 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = p.Info.Encryption.MatchObjectEncryption(layer.FormEncryptionInfo(srcInfo.Headers)); err != nil {
|
||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrBadRequest), err), additional...)
|
||||
return
|
||||
}
|
||||
|
||||
info, err := h.obj.UploadPartCopy(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not upload part copy", reqInfo, err, additional...)
|
||||
|
@ -401,7 +375,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
response := UploadPartCopyResponse{
|
||||
LastModified: info.Created.UTC().Format(time.RFC3339),
|
||||
ETag: data.Quote(info.ETag(h.cfg.MD5Enabled())),
|
||||
ETag: info.ETag(h.cfg.Features.MD5Enabled()),
|
||||
}
|
||||
|
||||
if p.Info.Encryption.Enabled() {
|
||||
|
@ -422,12 +396,6 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
uploadID = r.URL.Query().Get(uploadIDHeaderName)
|
||||
uploadInfo = &layer.UploadInfoParams{
|
||||
|
@ -439,7 +407,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
)
|
||||
|
||||
reqBody := new(CompleteMultipartUpload)
|
||||
if err = h.cfg.NewXMLDecoder(r.Body).Decode(reqBody); err != nil {
|
||||
if err = h.cfg.XMLDecoder.NewCompleteMultipartDecoder(r.Body).Decode(reqBody); err != nil {
|
||||
h.logAndSendError(w, "could not read complete multipart upload xml", reqInfo,
|
||||
errors.GetAPIError(errors.ErrMalformedXML), additional...)
|
||||
return
|
||||
|
@ -454,27 +422,44 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
Parts: reqBody.Parts,
|
||||
}
|
||||
|
||||
// Next operations might take some time, so we want to keep client's
|
||||
// connection alive. To do so, gateway sends periodic white spaces
|
||||
// back to the client the same way as Amazon S3 service does.
|
||||
stopPeriodicResponseWriter := periodicXMLWriter(w, h.cfg.CompleteMultipartKeepalive)
|
||||
|
||||
// Start complete multipart upload which may take some time to fetch object
|
||||
// and re-upload it part by part.
|
||||
objInfo, err := h.completeMultipartUpload(r, c, bktInfo, reqInfo)
|
||||
|
||||
// Stop periodic writer as complete multipart upload is finished
|
||||
// successfully or not.
|
||||
headerIsWritten := stopPeriodicResponseWriter()
|
||||
|
||||
responseWriter := middleware.EncodeToResponse
|
||||
errLogger := h.logAndSendError
|
||||
// Do not send XML and HTTP headers if periodic writer was invoked at this point.
|
||||
if headerIsWritten {
|
||||
responseWriter = middleware.EncodeToResponseNoHeader
|
||||
errLogger = h.logAndSendErrorNoHeader
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "complete multipart error", reqInfo, err, additional...)
|
||||
errLogger(w, "complete multipart error", reqInfo, err, additional...)
|
||||
return
|
||||
}
|
||||
|
||||
response := CompleteMultipartUploadResponse{
|
||||
Bucket: objInfo.Bucket,
|
||||
Key: objInfo.Name,
|
||||
ETag: data.Quote(objInfo.ETag(h.cfg.MD5Enabled())),
|
||||
ETag: objInfo.ETag(h.cfg.Features.MD5Enabled()),
|
||||
}
|
||||
|
||||
if settings.VersioningEnabled() {
|
||||
w.Header().Set(api.AmzVersionID, objInfo.VersionID())
|
||||
}
|
||||
// Here we previously set api.AmzVersionID header for versioned bucket.
|
||||
// It is not possible after #60, because of periodic white
|
||||
// space XML writer to keep connection with the client.
|
||||
|
||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
|
||||
if err = responseWriter(w, response); err != nil {
|
||||
errLogger(w, "something went wrong", reqInfo, err, additional...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -530,7 +515,7 @@ func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMult
|
|||
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectCreatedCompleteMultipartUpload,
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.MD5Enabled()),
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.Features.MD5Enabled()),
|
||||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
|
@ -709,8 +694,7 @@ func encodeListMultipartUploadsToResponse(info *layer.ListMultipartUploadsInfo,
|
|||
ID: u.Owner.String(),
|
||||
DisplayName: u.Owner.String(),
|
||||
},
|
||||
UploadID: u.UploadID,
|
||||
StorageClass: api.DefaultStorageClass,
|
||||
UploadID: u.UploadID,
|
||||
}
|
||||
uploads = append(uploads, m)
|
||||
}
|
||||
|
@ -739,6 +723,55 @@ func encodeListPartsToResponse(info *layer.ListPartsInfo, params *layer.ListPart
|
|||
PartNumberMarker: params.PartNumberMarker,
|
||||
UploadID: params.Info.UploadID,
|
||||
Parts: info.Parts,
|
||||
StorageClass: api.DefaultStorageClass,
|
||||
}
|
||||
}
|
||||
|
||||
// periodicXMLWriter creates go routine to write xml header and whitespaces
|
||||
// over time to avoid connection drop from the client. To work properly,
|
||||
// pass `http.ResponseWriter` with implemented `http.Flusher` interface.
|
||||
// Returns stop function which returns boolean if writer has been used
|
||||
// during goroutine execution. To disable writer, pass 0 duration value.
|
||||
func periodicXMLWriter(w io.Writer, dur time.Duration) (stop func() bool) {
|
||||
if dur == 0 { // 0 duration disables periodic writer
|
||||
return func() bool { return false }
|
||||
}
|
||||
|
||||
whitespaceChar := []byte(" ")
|
||||
closer := make(chan struct{})
|
||||
done := make(chan struct{})
|
||||
headerWritten := false
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
|
||||
tick := time.NewTicker(dur)
|
||||
defer tick.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
if !headerWritten {
|
||||
_, err := w.Write([]byte(xml.Header))
|
||||
headerWritten = err == nil
|
||||
}
|
||||
_, err := w.Write(whitespaceChar)
|
||||
if err != nil {
|
||||
return // is there anything we can do better than ignore error?
|
||||
}
|
||||
if buffered, ok := w.(http.Flusher); ok {
|
||||
buffered.Flush()
|
||||
}
|
||||
case <-closer:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
stop = func() bool {
|
||||
close(closer)
|
||||
<-done // wait for goroutine to stop
|
||||
return headerWritten
|
||||
}
|
||||
|
||||
return stop
|
||||
}
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
|
@ -11,12 +10,11 @@ import (
|
|||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -24,6 +22,46 @@ const (
|
|||
partNumberMarkerQuery = "part-number-marker"
|
||||
)
|
||||
|
||||
func TestPeriodicWriter(t *testing.T) {
|
||||
t.Skip()
|
||||
|
||||
const dur = 100 * time.Millisecond
|
||||
const whitespaces = 8
|
||||
expected := []byte(xml.Header)
|
||||
for i := 0; i < whitespaces; i++ {
|
||||
expected = append(expected, []byte(" ")...)
|
||||
}
|
||||
|
||||
t.Run("writes data", func(t *testing.T) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
stop := periodicXMLWriter(buf, dur)
|
||||
|
||||
// N number of whitespaces + half durations to guarantee at least N writes in buffer
|
||||
time.Sleep(whitespaces*dur + dur/2)
|
||||
require.True(t, stop())
|
||||
require.Equal(t, expected, buf.Bytes())
|
||||
|
||||
t.Run("no additional data after stop", func(t *testing.T) {
|
||||
time.Sleep(2 * dur)
|
||||
require.Equal(t, expected, buf.Bytes())
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("does not write data", func(t *testing.T) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
stop := periodicXMLWriter(buf, dur)
|
||||
time.Sleep(dur / 2)
|
||||
require.False(t, stop())
|
||||
require.Empty(t, buf.Bytes())
|
||||
|
||||
t.Run("disabled", func(t *testing.T) {
|
||||
stop = periodicXMLWriter(buf, 0)
|
||||
require.False(t, stop())
|
||||
require.Empty(t, buf.Bytes())
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultipartUploadInvalidPart(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -38,36 +76,6 @@ func TestMultipartUploadInvalidPart(t *testing.T) {
|
|||
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
|
||||
}
|
||||
|
||||
func TestDeleteMultipartAllParts(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
partSize := layer.UploadMinSize
|
||||
objLen := 6 * partSize
|
||||
|
||||
bktName, bktName2, objName := "bucket", "bucket2", "object"
|
||||
|
||||
// unversioned bucket
|
||||
createTestBucket(hc, bktName)
|
||||
multipartUpload(hc, bktName, objName, nil, objLen, partSize)
|
||||
deleteObject(t, hc, bktName, objName, emptyVersion)
|
||||
require.Empty(t, hc.tp.Objects())
|
||||
|
||||
// encrypted multipart
|
||||
multipartUploadEncrypted(hc, bktName, objName, nil, objLen, partSize)
|
||||
deleteObject(t, hc, bktName, objName, emptyVersion)
|
||||
require.Empty(t, hc.tp.Objects())
|
||||
|
||||
// versions bucket
|
||||
createTestBucket(hc, bktName2)
|
||||
putBucketVersioning(t, hc, bktName2, true)
|
||||
multipartUpload(hc, bktName2, objName, nil, objLen, partSize)
|
||||
_, hdr := getObject(hc, bktName2, objName)
|
||||
versionID := hdr.Get("X-Amz-Version-Id")
|
||||
deleteObject(t, hc, bktName2, objName, emptyVersion)
|
||||
deleteObject(t, hc, bktName2, objName, versionID)
|
||||
require.Empty(t, hc.tp.Objects())
|
||||
}
|
||||
|
||||
func TestMultipartReUploadPart(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -210,58 +218,13 @@ func TestMultipartUploadSize(t *testing.T) {
|
|||
equalDataSlices(t, data[partSize:], part)
|
||||
})
|
||||
|
||||
t.Run("check correct size when part copy", func(_ *testing.T) {
|
||||
t.Run("check correct size when part copy", func(t *testing.T) {
|
||||
objName2 := "obj2"
|
||||
uploadInfo := createMultipartUpload(hc, bktName, objName2, headers)
|
||||
sourceCopy := bktName + "/" + objName
|
||||
uploadPartCopy(hc, bktName, objName2, uploadInfo.UploadID, 1, sourceCopy, 0, 0)
|
||||
uploadPartCopy(hc, bktName, objName2, uploadInfo.UploadID, 2, sourceCopy, 0, partSize)
|
||||
})
|
||||
|
||||
t.Run("check correct size when copy part from encrypted source", func(t *testing.T) {
|
||||
newBucket, newObjName := "new-bucket", "new-object-multipart"
|
||||
bktInfo := createTestBucket(hc, newBucket)
|
||||
|
||||
srcObjName := "source-object"
|
||||
key := []byte("firstencriptionkeyofsourceobject")
|
||||
keyMd5 := md5.Sum(key)
|
||||
srcEnc, err := encryption.NewParams(key)
|
||||
require.NoError(t, err)
|
||||
srcObjInfo := createTestObject(hc, bktInfo, srcObjName, *srcEnc)
|
||||
|
||||
multipartInfo := createMultipartUpload(hc, newBucket, newObjName, headers)
|
||||
|
||||
sourceCopy := newBucket + "/" + srcObjName
|
||||
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, multipartInfo.UploadID)
|
||||
query.Set(partNumberQuery, "1")
|
||||
|
||||
// empty copy-source-sse headers
|
||||
w, r := prepareTestRequestWithQuery(hc, newBucket, newObjName, query, nil)
|
||||
r.TLS = &tls.ConnectionState{}
|
||||
r.Header.Set(api.AmzCopySource, sourceCopy)
|
||||
hc.Handler().UploadPartCopy(w, r)
|
||||
|
||||
assertStatus(t, w, http.StatusBadRequest)
|
||||
|
||||
// success copy
|
||||
w, r = prepareTestRequestWithQuery(hc, newBucket, newObjName, query, nil)
|
||||
r.TLS = &tls.ConnectionState{}
|
||||
r.Header.Set(api.AmzCopySource, sourceCopy)
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key))
|
||||
r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMd5[:]))
|
||||
hc.Handler().UploadPartCopy(w, r)
|
||||
|
||||
uploadPartCopyResponse := &UploadPartCopyResponse{}
|
||||
readResponse(hc.t, w, http.StatusOK, uploadPartCopyResponse)
|
||||
|
||||
completeMultipartUpload(hc, newBucket, newObjName, multipartInfo.UploadID, []string{uploadPartCopyResponse.ETag})
|
||||
attr := getObjectAttributes(hc, newBucket, newObjName, objectParts)
|
||||
require.Equal(t, 1, attr.ObjectParts.PartsCount)
|
||||
require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], strconv.Itoa(attr.ObjectParts.Parts[0].Size))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListParts(t *testing.T) {
|
||||
|
@ -294,33 +257,9 @@ func TestListParts(t *testing.T) {
|
|||
require.Len(t, list.Parts, 0)
|
||||
}
|
||||
|
||||
func TestMultipartUploadWithContentLanguage(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-1", "object-1"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
partSize := 5 * 1024 * 1024
|
||||
exceptedContentLanguage := "en"
|
||||
headers := map[string]string{
|
||||
api.ContentLanguage: exceptedContentLanguage,
|
||||
}
|
||||
|
||||
multipartUpload := createMultipartUpload(hc, bktName, objName, headers)
|
||||
etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
|
||||
etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, r)
|
||||
require.Equal(t, exceptedContentLanguage, w.Header().Get(api.ContentLanguage))
|
||||
}
|
||||
|
||||
func TestMultipartUploadEnabledMD5(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
hc.config.md5Enabled = true
|
||||
hc.layerFeatures.SetMD5Enabled(true)
|
||||
hc.features.SetMD5Enabled(true)
|
||||
|
||||
bktName, objName := "bucket-md5", "object-md5"
|
||||
createTestBucket(hc, bktName)
|
||||
|
@ -329,11 +268,11 @@ func TestMultipartUploadEnabledMD5(t *testing.T) {
|
|||
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
etag1, partBody1 := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
|
||||
md5Sum1 := md5.Sum(partBody1)
|
||||
require.Equal(t, data.Quote(hex.EncodeToString(md5Sum1[:])), etag1)
|
||||
require.Equal(t, hex.EncodeToString(md5Sum1[:]), etag1)
|
||||
|
||||
etag2, partBody2 := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
|
||||
md5Sum2 := md5.Sum(partBody2)
|
||||
require.Equal(t, data.Quote(hex.EncodeToString(md5Sum2[:])), etag2)
|
||||
require.Equal(t, hex.EncodeToString(md5Sum2[:]), etag2)
|
||||
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
@ -341,85 +280,7 @@ func TestMultipartUploadEnabledMD5(t *testing.T) {
|
|||
err := xml.NewDecoder(w.Result().Body).Decode(resp)
|
||||
require.NoError(t, err)
|
||||
completeMD5Sum := md5.Sum(append(md5Sum1[:], md5Sum2[:]...))
|
||||
require.Equal(t, data.Quote(hex.EncodeToString(completeMD5Sum[:])+"-2"), resp.ETag)
|
||||
}
|
||||
|
||||
func TestUploadPartCheckContentSHA256(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-1", "object-1"
|
||||
createTestBucket(hc, bktName)
|
||||
partSize := 5 * 1024 * 1024
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
hash string
|
||||
content []byte
|
||||
error bool
|
||||
}{
|
||||
{
|
||||
name: "invalid hash value",
|
||||
hash: "d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8",
|
||||
content: []byte("content"),
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "correct hash for empty payload",
|
||||
hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
content: []byte(""),
|
||||
error: false,
|
||||
},
|
||||
{
|
||||
name: "unsigned payload",
|
||||
hash: "UNSIGNED-PAYLOAD",
|
||||
content: []byte("content"),
|
||||
error: false,
|
||||
},
|
||||
{
|
||||
name: "correct hash",
|
||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
|
||||
content: []byte("content"),
|
||||
error: false,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
|
||||
etag1, data1 := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
|
||||
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, multipartUpload.UploadID)
|
||||
query.Set(partNumberQuery, strconv.Itoa(2))
|
||||
|
||||
w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, tc.content)
|
||||
r.Header.Set(api.AmzContentSha256, tc.hash)
|
||||
hc.Handler().UploadPartHandler(w, r)
|
||||
if tc.error {
|
||||
assertS3Error(t, w, s3Errors.GetAPIError(s3Errors.ErrContentSHA256Mismatch))
|
||||
|
||||
list := listParts(hc, bktName, objName, multipartUpload.UploadID, "0", http.StatusOK)
|
||||
require.Len(t, list.Parts, 1)
|
||||
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1})
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
data, _ := getObject(hc, bktName, objName)
|
||||
equalDataSlices(t, data1, data)
|
||||
return
|
||||
}
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
list := listParts(hc, bktName, objName, multipartUpload.UploadID, "0", http.StatusOK)
|
||||
require.Len(t, list.Parts, 2)
|
||||
|
||||
etag2 := w.Header().Get(api.ETag)
|
||||
w = completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
data, _ := getObject(hc, bktName, objName)
|
||||
equalDataSlices(t, append(data1, tc.content...), data)
|
||||
})
|
||||
}
|
||||
require.Equal(t, hex.EncodeToString(completeMD5Sum[:])+"-2", resp.ETag)
|
||||
}
|
||||
|
||||
func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
|
||||
|
|
|
@ -7,6 +7,10 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
)
|
||||
|
||||
func (h *handler) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package handler
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
@ -25,6 +26,11 @@ type (
|
|||
User string
|
||||
Time time.Time
|
||||
}
|
||||
|
||||
NotificationConfiguration struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NotificationConfiguation"`
|
||||
NotificationConfiguration data.NotificationConfiguration
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -99,7 +105,7 @@ func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
conf := &data.NotificationConfiguration{}
|
||||
if err = h.cfg.NewXMLDecoder(r.Body).Decode(conf); err != nil {
|
||||
if err = xml.NewDecoder(r.Body).Decode(conf); err != nil {
|
||||
h.logAndSendError(w, "couldn't decode notification configuration", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
|
||||
return
|
||||
}
|
||||
|
@ -115,7 +121,7 @@ func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
|||
Configuration: conf,
|
||||
}
|
||||
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
|
@ -149,7 +155,7 @@ func (h *handler) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
func (h *handler) sendNotifications(ctx context.Context, p *SendNotificationParams) error {
|
||||
if !h.cfg.NotificatorEnabled() {
|
||||
if !h.cfg.NotificatorEnabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -192,7 +198,7 @@ func (h *handler) checkBucketConfiguration(ctx context.Context, conf *data.Notif
|
|||
return
|
||||
}
|
||||
|
||||
if h.cfg.NotificatorEnabled() {
|
||||
if h.cfg.NotificatorEnabled {
|
||||
if err = h.notificator.SendTestNotification(q.QueueArn, r.BucketName, r.RequestID, r.Host, layer.TimeNow(ctx)); err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
|
@ -53,7 +52,7 @@ func (h *handler) encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjects
|
|||
|
||||
res.CommonPrefixes = fillPrefixes(list.Prefixes, p.Encode)
|
||||
|
||||
res.Contents = fillContentsWithOwner(list.Objects, p.Encode, h.cfg.MD5Enabled())
|
||||
res.Contents = fillContentsWithOwner(list.Objects, p.Encode, h.cfg.Features.MD5Enabled())
|
||||
|
||||
return res
|
||||
}
|
||||
|
@ -99,7 +98,7 @@ func (h *handler) encodeV2(p *layer.ListObjectsParamsV2, list *layer.ListObjects
|
|||
|
||||
res.CommonPrefixes = fillPrefixes(list.Prefixes, p.Encode)
|
||||
|
||||
res.Contents = fillContents(list.Objects, p.Encode, p.FetchOwner, h.cfg.MD5Enabled())
|
||||
res.Contents = fillContents(list.Objects, p.Encode, p.FetchOwner, h.cfg.Features.MD5Enabled())
|
||||
|
||||
return res
|
||||
}
|
||||
|
@ -185,26 +184,28 @@ func fillPrefixes(src []string, encode string) []CommonPrefix {
|
|||
return dst
|
||||
}
|
||||
|
||||
func fillContentsWithOwner(src []*data.ExtendedNodeVersion, encode string, md5Enabled bool) []Object {
|
||||
func fillContentsWithOwner(src []*data.ObjectInfo, encode string, md5Enabled bool) []Object {
|
||||
return fillContents(src, encode, true, md5Enabled)
|
||||
}
|
||||
|
||||
func fillContents(src []*data.ExtendedNodeVersion, encode string, fetchOwner, md5Enabled bool) []Object {
|
||||
func fillContents(src []*data.ObjectInfo, encode string, fetchOwner, md5Enabled bool) []Object {
|
||||
var dst []Object
|
||||
for _, obj := range src {
|
||||
res := Object{
|
||||
Key: s3PathEncode(obj.NodeVersion.FilePath, encode),
|
||||
Size: obj.NodeVersion.Size,
|
||||
LastModified: obj.NodeVersion.Created.UTC().Format(time.RFC3339),
|
||||
ETag: data.Quote(obj.NodeVersion.GetETag(md5Enabled)),
|
||||
StorageClass: api.DefaultStorageClass,
|
||||
Key: s3PathEncode(obj.Name, encode),
|
||||
Size: obj.Size,
|
||||
LastModified: obj.Created.UTC().Format(time.RFC3339),
|
||||
ETag: obj.ETag(md5Enabled),
|
||||
}
|
||||
|
||||
if size, err := layer.GetObjectSize(obj); err == nil {
|
||||
res.Size = size
|
||||
}
|
||||
|
||||
if fetchOwner {
|
||||
owner := obj.NodeVersion.Owner.String()
|
||||
res.Owner = &Owner{
|
||||
ID: owner,
|
||||
DisplayName: owner,
|
||||
ID: obj.Owner.String(),
|
||||
DisplayName: obj.Owner.String(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -232,7 +233,7 @@ func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
response := encodeListObjectVersionsToResponse(info, p.BktInfo.Name, h.cfg.MD5Enabled())
|
||||
response := encodeListObjectVersionsToResponse(info, p.BktInfo.Name, h.cfg.Features.MD5Enabled())
|
||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
|
@ -257,10 +258,6 @@ func parseListObjectVersionsRequest(reqInfo *middleware.ReqInfo) (*layer.ListObj
|
|||
res.Encode = queryValues.Get("encoding-type")
|
||||
res.VersionIDMarker = queryValues.Get("version-id-marker")
|
||||
|
||||
if res.VersionIDMarker != "" && res.KeyMarker == "" {
|
||||
return nil, errors.GetAPIError(errors.VersionIDMarkerWithoutKeyMarker)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
|
@ -281,27 +278,26 @@ func encodeListObjectVersionsToResponse(info *layer.ListObjectVersionsInfo, buck
|
|||
for _, ver := range info.Version {
|
||||
res.Version = append(res.Version, ObjectVersionResponse{
|
||||
IsLatest: ver.IsLatest,
|
||||
Key: ver.NodeVersion.FilePath,
|
||||
LastModified: ver.NodeVersion.Created.UTC().Format(time.RFC3339),
|
||||
Key: ver.ObjectInfo.Name,
|
||||
LastModified: ver.ObjectInfo.Created.UTC().Format(time.RFC3339),
|
||||
Owner: Owner{
|
||||
ID: ver.NodeVersion.Owner.String(),
|
||||
DisplayName: ver.NodeVersion.Owner.String(),
|
||||
ID: ver.ObjectInfo.Owner.String(),
|
||||
DisplayName: ver.ObjectInfo.Owner.String(),
|
||||
},
|
||||
Size: ver.NodeVersion.Size,
|
||||
VersionID: ver.Version(),
|
||||
ETag: data.Quote(ver.NodeVersion.GetETag(md5Enabled)),
|
||||
StorageClass: api.DefaultStorageClass,
|
||||
Size: ver.ObjectInfo.Size,
|
||||
VersionID: ver.Version(),
|
||||
ETag: ver.ObjectInfo.ETag(md5Enabled),
|
||||
})
|
||||
}
|
||||
// this loop is not starting till versioning is not implemented
|
||||
for _, del := range info.DeleteMarker {
|
||||
res.DeleteMarker = append(res.DeleteMarker, DeleteMarkerEntry{
|
||||
IsLatest: del.IsLatest,
|
||||
Key: del.NodeVersion.FilePath,
|
||||
LastModified: del.NodeVersion.Created.UTC().Format(time.RFC3339),
|
||||
Key: del.ObjectInfo.Name,
|
||||
LastModified: del.ObjectInfo.Created.UTC().Format(time.RFC3339),
|
||||
Owner: Owner{
|
||||
ID: del.NodeVersion.Owner.String(),
|
||||
DisplayName: del.NodeVersion.Owner.String(),
|
||||
ID: del.ObjectInfo.Owner.String(),
|
||||
DisplayName: del.ObjectInfo.Owner.String(),
|
||||
},
|
||||
VersionID: del.Version(),
|
||||
})
|
||||
|
|
|
@ -1,22 +1,14 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func TestParseContinuationToken(t *testing.T) {
|
||||
|
@ -65,164 +57,13 @@ func TestListObjectNullVersions(t *testing.T) {
|
|||
require.Equal(t, data.UnversionedObjectVersionID, result.Version[1].VersionID)
|
||||
}
|
||||
|
||||
func TestListObjectsWithOldTreeNodes(t *testing.T) {
|
||||
func TestListObjectsPaging(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-versioning-enabled", "object"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
srcEnc, err := encryption.NewParams([]byte("1234567890qwertyuiopasdfghjklzxc"))
|
||||
require.NoError(t, err)
|
||||
bktName := "bucket-versioning-enabled"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
n := 10
|
||||
objInfos := make([]*data.ObjectInfo, n)
|
||||
for i := 0; i < n; i++ {
|
||||
objInfos[i] = createTestObject(hc, bktInfo, objName+strconv.Itoa(i), *srcEnc)
|
||||
}
|
||||
sort.Slice(objInfos, func(i, j int) bool { return objInfos[i].Name < objInfos[j].Name })
|
||||
|
||||
makeAllTreeObjectsOld(hc, bktInfo)
|
||||
|
||||
listV1 := listObjectsV1(hc, bktName, "", "", "", -1)
|
||||
checkListOldNodes(hc, listV1.Contents, objInfos)
|
||||
|
||||
listV2 := listObjectsV2(hc, bktName, "", "", "", "", -1)
|
||||
checkListOldNodes(hc, listV2.Contents, objInfos)
|
||||
|
||||
listVers := listObjectsVersions(hc, bktName, "", "", "", "", -1)
|
||||
checkListVersionsOldNodes(hc, listVers.Version, objInfos)
|
||||
}
|
||||
|
||||
func makeAllTreeObjectsOld(hc *handlerContext, bktInfo *data.BucketInfo) {
|
||||
nodes, err := hc.treeMock.GetSubTree(hc.Context(), bktInfo, "version", 0, 0)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
for _, node := range nodes {
|
||||
if node.GetNodeID() == 0 {
|
||||
continue
|
||||
}
|
||||
meta := make(map[string]string, len(node.GetMeta()))
|
||||
for _, m := range node.GetMeta() {
|
||||
if m.GetKey() != "Created" && m.GetKey() != "Owner" {
|
||||
meta[m.GetKey()] = string(m.GetValue())
|
||||
}
|
||||
}
|
||||
|
||||
err = hc.treeMock.MoveNode(hc.Context(), bktInfo, "version", node.GetNodeID(), node.GetParentID(), meta)
|
||||
require.NoError(hc.t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func checkListOldNodes(hc *handlerContext, list []Object, objInfos []*data.ObjectInfo) {
|
||||
require.Len(hc.t, list, len(objInfos))
|
||||
for i := range list {
|
||||
require.Equal(hc.t, objInfos[i].Name, list[i].Key)
|
||||
realSize, err := layer.GetObjectSize(objInfos[i])
|
||||
require.NoError(hc.t, err)
|
||||
require.Equal(hc.t, objInfos[i].Owner.EncodeToString(), list[i].Owner.ID)
|
||||
require.Equal(hc.t, objInfos[i].Created.UTC().Format(time.RFC3339), list[i].LastModified)
|
||||
require.Equal(hc.t, realSize, list[i].Size)
|
||||
}
|
||||
}
|
||||
|
||||
func checkListVersionsOldNodes(hc *handlerContext, list []ObjectVersionResponse, objInfos []*data.ObjectInfo) {
|
||||
require.Len(hc.t, list, len(objInfos))
|
||||
for i := range list {
|
||||
require.Equal(hc.t, objInfos[i].Name, list[i].Key)
|
||||
realSize, err := layer.GetObjectSize(objInfos[i])
|
||||
require.NoError(hc.t, err)
|
||||
require.Equal(hc.t, objInfos[i].Owner.EncodeToString(), list[i].Owner.ID)
|
||||
require.Equal(hc.t, objInfos[i].Created.UTC().Format(time.RFC3339), list[i].LastModified)
|
||||
require.Equal(hc.t, realSize, list[i].Size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListObjectsContextCanceled(t *testing.T) {
|
||||
layerCfg := layer.DefaultCachesConfigs(zaptest.NewLogger(t))
|
||||
layerCfg.SessionList.Lifetime = time.Hour
|
||||
layerCfg.SessionList.Size = 1
|
||||
|
||||
hc := prepareHandlerContextBase(t, layerCfg)
|
||||
|
||||
bktName := "bucket-versioning-enabled"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
putObject(hc, bktName, "object"+strconv.Itoa(i))
|
||||
}
|
||||
|
||||
result := listObjectsV1(hc, bktName, "", "", "", 2)
|
||||
session := hc.cache.GetListSession(hc.owner, cache.CreateListSessionCacheKey(bktInfo.CID, "", result.NextMarker))
|
||||
// invoke list again to trigger cache eviction
|
||||
// (use empty prefix to check that context canceled on replace)
|
||||
listObjectsV1(hc, bktName, "", "", "", 2)
|
||||
checkContextCanceled(session.Context, t)
|
||||
|
||||
result2 := listObjectsV2(hc, bktName, "", "", "", "", 2)
|
||||
session2 := hc.cache.GetListSession(hc.owner, cache.CreateListSessionCacheKey(bktInfo.CID, "", result2.NextContinuationToken))
|
||||
// invoke list again to trigger cache eviction
|
||||
// (use non-empty prefix to check that context canceled on cache eviction)
|
||||
listObjectsV2(hc, bktName, "o", "", "", "", 2)
|
||||
checkContextCanceled(session2.Context, t)
|
||||
|
||||
result3 := listObjectsVersions(hc, bktName, "", "", "", "", 2)
|
||||
session3 := hc.cache.GetListSession(hc.owner, cache.CreateListSessionCacheKey(bktInfo.CID, "", result3.NextVersionIDMarker))
|
||||
// invoke list again to trigger cache eviction
|
||||
listObjectsVersions(hc, bktName, "o", "", "", "", 2)
|
||||
checkContextCanceled(session3.Context, t)
|
||||
}
|
||||
|
||||
func checkContextCanceled(ctx context.Context, t *testing.T) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(10 * time.Second):
|
||||
}
|
||||
require.ErrorIs(t, ctx.Err(), context.Canceled)
|
||||
}
|
||||
|
||||
func TestListObjectsLatestVersions(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-versioning-enabled"
|
||||
createTestBucket(hc, bktName)
|
||||
putBucketVersioning(t, hc, bktName, true)
|
||||
|
||||
objName1, objName2 := "object1", "object2"
|
||||
objContent1, objContent2 := "content1", "content2"
|
||||
|
||||
putObjectContent(hc, bktName, objName1, objContent1)
|
||||
hdr1 := putObjectContent(hc, bktName, objName1, objContent2)
|
||||
putObjectContent(hc, bktName, objName2, objContent1)
|
||||
hdr2 := putObjectContent(hc, bktName, objName2, objContent2)
|
||||
|
||||
t.Run("listv1", func(t *testing.T) {
|
||||
result := listObjectsV1(hc, bktName, "", "", "", -1)
|
||||
|
||||
require.Len(t, result.Contents, 2)
|
||||
require.Equal(t, objName1, result.Contents[0].Key)
|
||||
require.Equal(t, hdr1.Get(api.ETag), result.Contents[0].ETag)
|
||||
require.Equal(t, objName2, result.Contents[1].Key)
|
||||
require.Equal(t, hdr2.Get(api.ETag), result.Contents[1].ETag)
|
||||
})
|
||||
|
||||
t.Run("listv2", func(t *testing.T) {
|
||||
result := listObjectsV2(hc, bktName, "", "", "", "", -1)
|
||||
|
||||
require.Len(t, result.Contents, 2)
|
||||
require.Equal(t, objName1, result.Contents[0].Key)
|
||||
require.Equal(t, hdr1.Get(api.ETag), result.Contents[0].ETag)
|
||||
require.Equal(t, objName2, result.Contents[1].Key)
|
||||
require.Equal(t, hdr2.Get(api.ETag), result.Contents[1].ETag)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListObjectsVersionsPaging(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-versioning-enabled"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
n := 12
|
||||
|
||||
var objects []string
|
||||
for i := 0; i < n; i++ {
|
||||
|
@ -246,65 +87,6 @@ func TestListObjectsVersionsPaging(t *testing.T) {
|
|||
require.Empty(t, objects)
|
||||
}
|
||||
|
||||
func TestListObjectsVersionsCorrectIsLatestFlag(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-versioning-enabled"
|
||||
createVersionedBucket(hc, bktName)
|
||||
|
||||
objName1, objName2 := "obj1", "obj2"
|
||||
|
||||
n := 9
|
||||
listSize := 3
|
||||
headers := make([]http.Header, n)
|
||||
// objects uploaded: ["obj1"-v1, "obj1"-v2, "obj1"-v3, "obj2"-v1, "obj2"-v2, "obj2"-v3, "obj2"-v4, "obj2"-v5, "obj2"-v6]
|
||||
for i := 0; i < n; i++ {
|
||||
objName := objName1
|
||||
if i >= listSize {
|
||||
objName = objName2
|
||||
}
|
||||
headers[i] = putObjectContent(hc, bktName, objName, fmt.Sprintf("content/%d", i))
|
||||
}
|
||||
|
||||
versions := listObjectsVersions(hc, bktName, "", "", "", "", listSize)
|
||||
// expected objects: ["obj1"-v3, "obj1"-v2, "obj1"-v1]
|
||||
checkListVersionsParts(t, versions, formReverseVersionResponse(objName1, headers[:listSize], true))
|
||||
|
||||
versions = listObjectsVersions(hc, bktName, "", "", versions.NextKeyMarker, versions.NextVersionIDMarker, listSize)
|
||||
// expected objects: ["obj2"-v6, "obj2"-v5, "obj2"-v4]
|
||||
checkListVersionsParts(t, versions, formReverseVersionResponse(objName2, headers[2*listSize:], true))
|
||||
|
||||
versions = listObjectsVersions(hc, bktName, "", "", versions.NextKeyMarker, versions.NextVersionIDMarker, listSize)
|
||||
// expected objects: ["obj2"-v3, "obj2"-v2, "obj2"-v1]
|
||||
checkListVersionsParts(t, versions, formReverseVersionResponse(objName2, headers[listSize:2*listSize], false))
|
||||
}
|
||||
|
||||
func formReverseVersionResponse(objName string, headers []http.Header, isLatest bool) []ObjectVersionResponse {
|
||||
res := make([]ObjectVersionResponse, len(headers))
|
||||
|
||||
for i, h := range headers {
|
||||
ind := len(headers) - 1 - i
|
||||
res[ind] = ObjectVersionResponse{
|
||||
ETag: h.Get(api.ETag),
|
||||
IsLatest: isLatest && ind == 0,
|
||||
Key: objName,
|
||||
VersionID: h.Get(api.AmzVersionID),
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func checkListVersionsParts(t *testing.T, versions *ListObjectsVersionsResponse, expected []ObjectVersionResponse) {
|
||||
require.Len(t, versions.Version, len(expected))
|
||||
for i, res := range versions.Version {
|
||||
require.Equal(t, expected[i].Key, res.Key)
|
||||
require.Equal(t, expected[i].ETag, res.ETag)
|
||||
require.Equal(t, expected[i].VersionID, res.VersionID)
|
||||
require.Equal(t, expected[i].IsLatest, res.IsLatest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3CompatibilityBucketListV2BothContinuationTokenStartAfter(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -313,7 +95,7 @@ func TestS3CompatibilityBucketListV2BothContinuationTokenStartAfter(t *testing.T
|
|||
bktInfo, _ := createBucketAndObject(tc, bktName, objects[0])
|
||||
|
||||
for _, objName := range objects[1:] {
|
||||
createTestObject(tc, bktInfo, objName, encryption.Params{})
|
||||
createTestObject(tc, bktInfo, objName)
|
||||
}
|
||||
|
||||
listV2Response1 := listObjectsV2(tc, bktName, "", "", "bar", "", 1)
|
||||
|
@ -330,36 +112,6 @@ func TestS3CompatibilityBucketListV2BothContinuationTokenStartAfter(t *testing.T
|
|||
require.Equal(t, "quxx", listV2Response2.Contents[1].Key)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2EncodingBasic(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing-v1-encoding"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"foo+1/bar", "foo/bar/xyzzy", "quux ab/thud", "asdf+b"}
|
||||
for _, objName := range objects {
|
||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||
}
|
||||
|
||||
query := make(url.Values)
|
||||
query.Add("delimiter", "/")
|
||||
query.Add("encoding-type", "url")
|
||||
|
||||
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
|
||||
hc.Handler().ListObjectsV2Handler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
listV2Response := &ListObjectsV2Response{}
|
||||
parseTestResponse(hc.t, w, listV2Response)
|
||||
|
||||
require.Equal(t, "/", listV2Response.Delimiter)
|
||||
require.Len(t, listV2Response.Contents, 1)
|
||||
require.Equal(t, "asdf%2Bb", listV2Response.Contents[0].Key)
|
||||
require.Len(t, listV2Response.CommonPrefixes, 3)
|
||||
require.Equal(t, "foo%2B1/", listV2Response.CommonPrefixes[0].Prefix)
|
||||
require.Equal(t, "foo/", listV2Response.CommonPrefixes[1].Prefix)
|
||||
require.Equal(t, "quux%20ab/", listV2Response.CommonPrefixes[2].Prefix)
|
||||
}
|
||||
|
||||
func TestS3BucketListDelimiterBasic(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -368,7 +120,7 @@ func TestS3BucketListDelimiterBasic(t *testing.T) {
|
|||
bktInfo, _ := createBucketAndObject(tc, bktName, objects[0])
|
||||
|
||||
for _, objName := range objects[1:] {
|
||||
createTestObject(tc, bktInfo, objName, encryption.Params{})
|
||||
createTestObject(tc, bktInfo, objName)
|
||||
}
|
||||
|
||||
listV1Response := listObjectsV1(tc, bktName, "", "/", "", -1)
|
||||
|
@ -379,132 +131,6 @@ func TestS3BucketListDelimiterBasic(t *testing.T) {
|
|||
require.Equal(t, "quux/", listV1Response.CommonPrefixes[1].Prefix)
|
||||
}
|
||||
|
||||
func TestS3BucketListEmpty(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
versions := listObjectsVersions(hc, bktName, "", "", "", "", -1)
|
||||
require.Empty(t, versions.Version)
|
||||
require.Empty(t, versions.DeleteMarker)
|
||||
require.Empty(t, versions.CommonPrefixes)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2PrefixAlt(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"bar", "baz", "foo"}
|
||||
for _, objName := range objects {
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
response := listObjectsV2(hc, bktName, "ba", "", "", "", -1)
|
||||
|
||||
require.Equal(t, "ba", response.Prefix)
|
||||
require.Len(t, response.Contents, 2)
|
||||
require.Equal(t, "bar", response.Contents[0].Key)
|
||||
require.Equal(t, "baz", response.Contents[1].Key)
|
||||
require.Empty(t, response.CommonPrefixes)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2PrefixNotExist(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"foo/bar", "foo/baz", "quux"}
|
||||
for _, objName := range objects {
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
response := listObjectsV2(hc, bktName, "d", "", "", "", -1)
|
||||
|
||||
require.Equal(t, "d", response.Prefix)
|
||||
require.Empty(t, response.Contents)
|
||||
require.Empty(t, response.CommonPrefixes)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2PrefixUnreadable(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"foo/bar", "foo/baz", "quux"}
|
||||
for _, objName := range objects {
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
response := listObjectsV2(hc, bktName, "\x0a", "", "", "", -1)
|
||||
|
||||
require.Equal(t, "\x0a", response.Prefix)
|
||||
require.Empty(t, response.Contents)
|
||||
require.Empty(t, response.CommonPrefixes)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2PrefixDelimiterAlt(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"bar", "bazar", "cab", "foo"}
|
||||
for _, objName := range objects {
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
response := listObjectsV2(hc, bktName, "ba", "a", "", "", -1)
|
||||
|
||||
require.Equal(t, "ba", response.Prefix)
|
||||
require.Equal(t, "a", response.Delimiter)
|
||||
require.Len(t, response.Contents, 1)
|
||||
require.Equal(t, "bar", response.Contents[0].Key)
|
||||
require.Len(t, response.CommonPrefixes, 1)
|
||||
require.Equal(t, "baza", response.CommonPrefixes[0].Prefix)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2PrefixDelimiterDelimiterNotExist(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"b/a/c", "b/a/g", "b/a/r", "g"}
|
||||
for _, objName := range objects {
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
response := listObjectsV2(hc, bktName, "b", "z", "", "", -1)
|
||||
|
||||
require.Len(t, response.Contents, 3)
|
||||
require.Equal(t, "b/a/c", response.Contents[0].Key)
|
||||
require.Equal(t, "b/a/g", response.Contents[1].Key)
|
||||
require.Equal(t, "b/a/r", response.Contents[2].Key)
|
||||
require.Empty(t, response.CommonPrefixes)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2PrefixDelimiterPrefixDelimiterNotExist(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"b/a/c", "b/a/g", "b/a/r", "g"}
|
||||
for _, objName := range objects {
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
response := listObjectsV2(hc, bktName, "y", "z", "", "", -1)
|
||||
|
||||
require.Empty(t, response.Contents)
|
||||
require.Empty(t, response.CommonPrefixes)
|
||||
}
|
||||
|
||||
func TestS3BucketListV2DelimiterPercentage(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -513,7 +139,7 @@ func TestS3BucketListV2DelimiterPercentage(t *testing.T) {
|
|||
bktInfo, _ := createBucketAndObject(tc, bktName, objects[0])
|
||||
|
||||
for _, objName := range objects[1:] {
|
||||
createTestObject(tc, bktInfo, objName, encryption.Params{})
|
||||
createTestObject(tc, bktInfo, objName)
|
||||
}
|
||||
|
||||
listV2Response := listObjectsV2(tc, bktName, "", "%", "", "", -1)
|
||||
|
@ -525,35 +151,6 @@ func TestS3BucketListV2DelimiterPercentage(t *testing.T) {
|
|||
require.Equal(t, "c%", listV2Response.CommonPrefixes[1].Prefix)
|
||||
}
|
||||
|
||||
func TestS3BucketListDelimiterPrefix(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"asdf", "boo/bar", "boo/baz/xyzzy", "cquux/thud", "cquux/bla"}
|
||||
for _, objName := range objects {
|
||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||
}
|
||||
|
||||
var empty []string
|
||||
delim := "/"
|
||||
prefix := ""
|
||||
|
||||
marker := validateListV1(t, hc, bktName, prefix, delim, "", 1, true, []string{"asdf"}, empty, "asdf")
|
||||
marker = validateListV1(t, hc, bktName, prefix, delim, marker, 1, true, empty, []string{"boo/"}, "boo/")
|
||||
validateListV1(t, hc, bktName, prefix, delim, marker, 1, false, empty, []string{"cquux/"}, "")
|
||||
|
||||
marker = validateListV1(t, hc, bktName, prefix, delim, "", 2, true, []string{"asdf"}, []string{"boo/"}, "boo/")
|
||||
validateListV1(t, hc, bktName, prefix, delim, marker, 2, false, empty, []string{"cquux/"}, "")
|
||||
|
||||
prefix = "boo/"
|
||||
marker = validateListV1(t, hc, bktName, prefix, delim, "", 1, true, []string{"boo/bar"}, empty, "boo/bar")
|
||||
validateListV1(t, hc, bktName, prefix, delim, marker, 1, false, empty, []string{"boo/baz/"}, "")
|
||||
|
||||
validateListV1(t, hc, bktName, prefix, delim, "", 2, false, []string{"boo/bar"}, []string{"boo/baz/"}, "")
|
||||
}
|
||||
|
||||
func TestS3BucketListV2DelimiterPrefix(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -562,7 +159,7 @@ func TestS3BucketListV2DelimiterPrefix(t *testing.T) {
|
|||
bktInfo, _ := createBucketAndObject(tc, bktName, objects[0])
|
||||
|
||||
for _, objName := range objects[1:] {
|
||||
createTestObject(tc, bktInfo, objName, encryption.Params{})
|
||||
createTestObject(tc, bktInfo, objName)
|
||||
}
|
||||
|
||||
var empty []string
|
||||
|
@ -583,120 +180,14 @@ func TestS3BucketListV2DelimiterPrefix(t *testing.T) {
|
|||
validateListV2(t, tc, bktName, prefix, delim, "", 2, false, true, []string{"boo/bar"}, []string{"boo/baz/"})
|
||||
}
|
||||
|
||||
func TestS3BucketListDelimiterPrefixUnderscore(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"_obj1_", "_under1/bar", "_under1/baz/xyzzy", "_under2/thud", "_under2/bla"}
|
||||
for _, objName := range objects {
|
||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||
}
|
||||
|
||||
var empty []string
|
||||
delim := "/"
|
||||
prefix := ""
|
||||
|
||||
marker := validateListV1(t, hc, bktName, prefix, delim, "", 1, true, []string{"_obj1_"}, empty, "_obj1_")
|
||||
marker = validateListV1(t, hc, bktName, prefix, delim, marker, 1, true, empty, []string{"_under1/"}, "_under1/")
|
||||
validateListV1(t, hc, bktName, prefix, delim, marker, 1, false, empty, []string{"_under2/"}, "")
|
||||
|
||||
marker = validateListV1(t, hc, bktName, prefix, delim, "", 2, true, []string{"_obj1_"}, []string{"_under1/"}, "_under1/")
|
||||
validateListV1(t, hc, bktName, prefix, delim, marker, 2, false, empty, []string{"_under2/"}, "")
|
||||
|
||||
prefix = "_under1/"
|
||||
marker = validateListV1(t, hc, bktName, prefix, delim, "", 1, true, []string{"_under1/bar"}, empty, "_under1/bar")
|
||||
validateListV1(t, hc, bktName, prefix, delim, marker, 1, false, empty, []string{"_under1/baz/"}, "")
|
||||
|
||||
validateListV1(t, hc, bktName, prefix, delim, "", 2, false, []string{"_under1/bar"}, []string{"_under1/baz/"}, "")
|
||||
}
|
||||
|
||||
func TestS3BucketListDelimiterNotSkipSpecial(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-for-listing"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
objects := []string{"0/"}
|
||||
for i := 1000; i < 1999; i++ {
|
||||
objects = append(objects, fmt.Sprintf("0/%d", i))
|
||||
}
|
||||
|
||||
objects2 := []string{"1999", "1999#", "1999+", "2000"}
|
||||
objects = append(objects, objects2...)
|
||||
|
||||
for _, objName := range objects {
|
||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||
}
|
||||
|
||||
delimiter := "/"
|
||||
list := listObjectsV1(hc, bktName, "", delimiter, "", -1)
|
||||
|
||||
require.Equal(t, delimiter, list.Delimiter)
|
||||
require.Equal(t, []CommonPrefix{{Prefix: "0/"}}, list.CommonPrefixes)
|
||||
|
||||
require.Len(t, list.Contents, len(objects2))
|
||||
for i := 0; i < len(list.Contents); i++ {
|
||||
require.Equal(t, objects2[i], list.Contents[i].Key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMintVersioningListObjectVersionsVersionIDContinuation(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "mint-bucket-for-listing-versions", "objName"
|
||||
createTestBucket(hc, bktName)
|
||||
putBucketVersioning(t, hc, bktName, true)
|
||||
|
||||
length := 10
|
||||
objects := make([]string, length)
|
||||
for i := 0; i < length; i++ {
|
||||
objects[i] = objName
|
||||
putObject(hc, bktName, objName)
|
||||
}
|
||||
|
||||
maxKeys := 5
|
||||
|
||||
page1 := listObjectsVersions(hc, bktName, "", "", "", "", maxKeys)
|
||||
require.Len(t, page1.Version, maxKeys)
|
||||
checkVersionsNames(t, page1, objects)
|
||||
require.Equal(t, page1.Version[maxKeys-1].VersionID, page1.NextVersionIDMarker)
|
||||
require.True(t, page1.IsTruncated)
|
||||
require.Empty(t, page1.KeyMarker)
|
||||
require.Empty(t, page1.VersionIDMarker)
|
||||
|
||||
page2 := listObjectsVersions(hc, bktName, "", "", page1.NextKeyMarker, page1.NextVersionIDMarker, maxKeys)
|
||||
require.Len(t, page2.Version, maxKeys)
|
||||
checkVersionsNames(t, page1, objects)
|
||||
require.Empty(t, page2.NextVersionIDMarker)
|
||||
require.False(t, page2.IsTruncated)
|
||||
require.Equal(t, page1.NextKeyMarker, page2.KeyMarker)
|
||||
require.Equal(t, page1.NextVersionIDMarker, page2.VersionIDMarker)
|
||||
}
|
||||
|
||||
func checkVersionsNames(t *testing.T, versions *ListObjectsVersionsResponse, names []string) {
|
||||
for i, v := range versions.Version {
|
||||
require.Equal(t, names[i], v.Key)
|
||||
}
|
||||
}
|
||||
|
||||
func listObjectsV2(hc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken string, maxKeys int) *ListObjectsV2Response {
|
||||
return listObjectsV2Ext(hc, bktName, prefix, delimiter, startAfter, continuationToken, "", maxKeys)
|
||||
}
|
||||
|
||||
func listObjectsV2Ext(hc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken, encodingType string, maxKeys int) *ListObjectsV2Response {
|
||||
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
|
||||
query.Add("fetch-owner", "true")
|
||||
if len(startAfter) != 0 {
|
||||
query.Add("start-after", startAfter)
|
||||
}
|
||||
if len(continuationToken) != 0 {
|
||||
query.Add("continuation-token", continuationToken)
|
||||
}
|
||||
if len(encodingType) != 0 {
|
||||
query.Add("encoding-type", encodingType)
|
||||
}
|
||||
|
||||
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
|
||||
hc.Handler().ListObjectsV2Handler(w, r)
|
||||
|
@ -706,26 +197,6 @@ func listObjectsV2Ext(hc *handlerContext, bktName, prefix, delimiter, startAfter
|
|||
return res
|
||||
}
|
||||
|
||||
func validateListV1(t *testing.T, tc *handlerContext, bktName, prefix, delimiter, marker string, maxKeys int,
|
||||
isTruncated bool, checkObjects, checkPrefixes []string, nextMarker string) string {
|
||||
response := listObjectsV1(tc, bktName, prefix, delimiter, marker, maxKeys)
|
||||
|
||||
require.Equal(t, isTruncated, response.IsTruncated)
|
||||
require.Equal(t, nextMarker, response.NextMarker)
|
||||
|
||||
require.Len(t, response.Contents, len(checkObjects))
|
||||
for i := 0; i < len(checkObjects); i++ {
|
||||
require.Equal(t, checkObjects[i], response.Contents[i].Key)
|
||||
}
|
||||
|
||||
require.Len(t, response.CommonPrefixes, len(checkPrefixes))
|
||||
for i := 0; i < len(checkPrefixes); i++ {
|
||||
require.Equal(t, checkPrefixes[i], response.CommonPrefixes[i].Prefix)
|
||||
}
|
||||
|
||||
return response.NextMarker
|
||||
}
|
||||
|
||||
func validateListV2(t *testing.T, tc *handlerContext, bktName, prefix, delimiter, continuationToken string, maxKeys int,
|
||||
isTruncated, last bool, checkObjects, checkPrefixes []string) string {
|
||||
response := listObjectsV2(tc, bktName, prefix, delimiter, "", continuationToken, maxKeys)
|
||||
|
|
|
@ -4,10 +4,9 @@ import (
|
|||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
stderrors "errors"
|
||||
errorsStd "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
@ -26,13 +25,8 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/s3"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -186,31 +180,12 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
err error
|
||||
newEaclTable *eacl.Table
|
||||
sessionTokenEACL *session.Container
|
||||
cannedACLStatus = aclHeadersStatus(r)
|
||||
containsACL = containsACLHeaders(r)
|
||||
ctx = r.Context()
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
)
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket objInfo", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
apeEnabled := bktInfo.APEEnabled || settings.CannedACL != ""
|
||||
if apeEnabled && cannedACLStatus == aclStatusYes {
|
||||
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
||||
return
|
||||
}
|
||||
|
||||
needUpdateEACLTable := !(apeEnabled || cannedACLStatus == aclStatusNo)
|
||||
if needUpdateEACLTable {
|
||||
if containsACL {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(r.Context()); err != nil {
|
||||
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
||||
return
|
||||
|
@ -223,6 +198,12 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket objInfo", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
metadata := parseMetadata(r)
|
||||
if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
|
||||
metadata[api.ContentType] = contentType
|
||||
|
@ -233,9 +214,6 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
if expires := r.Header.Get(api.Expires); len(expires) > 0 {
|
||||
metadata[api.Expires] = expires
|
||||
}
|
||||
if contentLanguage := r.Header.Get(api.ContentLanguage); len(contentLanguage) > 0 {
|
||||
metadata[api.ContentLanguage] = contentLanguage
|
||||
}
|
||||
|
||||
encryptionParams, err := formEncryptionParams(r)
|
||||
if err != nil {
|
||||
|
@ -258,22 +236,27 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
params := &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: reqInfo.ObjectName,
|
||||
Reader: body,
|
||||
Size: size,
|
||||
Header: metadata,
|
||||
Encryption: encryptionParams,
|
||||
ContentMD5: r.Header.Get(api.ContentMD5),
|
||||
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
|
||||
BktInfo: bktInfo,
|
||||
Object: reqInfo.ObjectName,
|
||||
Reader: body,
|
||||
Size: size,
|
||||
Header: metadata,
|
||||
Encryption: encryptionParams,
|
||||
ContentMD5: r.Header.Get(api.ContentMD5),
|
||||
}
|
||||
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
params.Lock, err = formObjectLock(ctx, bktInfo, settings.LockConfiguration, r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not form object lock", reqInfo, err)
|
||||
|
@ -291,7 +274,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectCreatedPut,
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.MD5Enabled()),
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.Features.MD5Enabled()),
|
||||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
|
@ -299,7 +282,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
||||
}
|
||||
|
||||
if needUpdateEACLTable {
|
||||
if containsACL {
|
||||
if newEaclTable, err = h.getNewEAclTable(r, bktInfo, objInfo); err != nil {
|
||||
h.logAndSendError(w, "could not get new eacl table", reqInfo, err)
|
||||
return
|
||||
|
@ -342,12 +325,9 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
addSSECHeaders(w.Header(), r.Header)
|
||||
}
|
||||
|
||||
w.Header().Set(api.ETag, data.Quote(objInfo.ETag(h.cfg.MD5Enabled())))
|
||||
w.Header().Set(api.ETag, objInfo.ETag(h.cfg.Features.MD5Enabled()))
|
||||
|
||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
||||
h.logAndSendError(w, "write response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
|
||||
|
@ -370,7 +350,7 @@ func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
|
|||
}
|
||||
r.Header.Set(api.ContentEncoding, strings.Join(resultContentEncoding, ","))
|
||||
|
||||
if !chunkedEncoding && !h.cfg.BypassContentEncodingInChunks() {
|
||||
if !chunkedEncoding && !h.cfg.Kludge.BypassContentEncodingInChunks() {
|
||||
return nil, fmt.Errorf("%w: request is not chunk encoded, encodings '%s'",
|
||||
errors.GetAPIError(errors.ErrInvalidEncodingMethod), strings.Join(encodings, ","))
|
||||
}
|
||||
|
@ -393,38 +373,16 @@ func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
|
|||
}
|
||||
|
||||
func formEncryptionParams(r *http.Request) (enc encryption.Params, err error) {
|
||||
return formEncryptionParamsBase(r, false)
|
||||
}
|
||||
|
||||
func formCopySourceEncryptionParams(r *http.Request) (enc encryption.Params, err error) {
|
||||
return formEncryptionParamsBase(r, true)
|
||||
}
|
||||
|
||||
func formEncryptionParamsBase(r *http.Request, isCopySource bool) (enc encryption.Params, err error) {
|
||||
var sseCustomerAlgorithm, sseCustomerKey, sseCustomerKeyMD5 string
|
||||
if isCopySource {
|
||||
sseCustomerAlgorithm = r.Header.Get(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm)
|
||||
sseCustomerKey = r.Header.Get(api.AmzCopySourceServerSideEncryptionCustomerKey)
|
||||
sseCustomerKeyMD5 = r.Header.Get(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5)
|
||||
} else {
|
||||
sseCustomerAlgorithm = r.Header.Get(api.AmzServerSideEncryptionCustomerAlgorithm)
|
||||
sseCustomerKey = r.Header.Get(api.AmzServerSideEncryptionCustomerKey)
|
||||
sseCustomerKeyMD5 = r.Header.Get(api.AmzServerSideEncryptionCustomerKeyMD5)
|
||||
}
|
||||
sseCustomerAlgorithm := r.Header.Get(api.AmzServerSideEncryptionCustomerAlgorithm)
|
||||
sseCustomerKey := r.Header.Get(api.AmzServerSideEncryptionCustomerKey)
|
||||
sseCustomerKeyMD5 := r.Header.Get(api.AmzServerSideEncryptionCustomerKeyMD5)
|
||||
|
||||
if len(sseCustomerAlgorithm) == 0 && len(sseCustomerKey) == 0 && len(sseCustomerKeyMD5) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if r.TLS == nil {
|
||||
return enc, errors.GetAPIError(errors.ErrInsecureSSECustomerRequest)
|
||||
}
|
||||
|
||||
if len(sseCustomerKey) > 0 && len(sseCustomerAlgorithm) == 0 {
|
||||
return enc, errors.GetAPIError(errors.ErrMissingSSECustomerAlgorithm)
|
||||
}
|
||||
if len(sseCustomerAlgorithm) > 0 && len(sseCustomerKey) == 0 {
|
||||
return enc, errors.GetAPIError(errors.ErrMissingSSECustomerKey)
|
||||
return enc, errorsStd.New("encryption available only when TLS is enabled")
|
||||
}
|
||||
|
||||
if sseCustomerAlgorithm != layer.AESEncryptionAlgorithm {
|
||||
|
@ -433,16 +391,10 @@ func formEncryptionParamsBase(r *http.Request, isCopySource bool) (enc encryptio
|
|||
|
||||
key, err := base64.StdEncoding.DecodeString(sseCustomerKey)
|
||||
if err != nil {
|
||||
if isCopySource {
|
||||
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters)
|
||||
}
|
||||
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
|
||||
}
|
||||
|
||||
if len(key) != layer.AESKeySize {
|
||||
if isCopySource {
|
||||
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters)
|
||||
}
|
||||
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
|
||||
}
|
||||
|
||||
|
@ -472,7 +424,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
ctx = r.Context()
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
metadata = make(map[string]string)
|
||||
cannedACLStatus = aclHeadersStatus(r)
|
||||
containsACL = containsACLHeaders(r)
|
||||
)
|
||||
|
||||
policy, err := checkPostPolicy(r, reqInfo, metadata)
|
||||
|
@ -483,33 +435,14 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
if tagging := auth.MultipartFormValue(r, "tagging"); tagging != "" {
|
||||
buffer := bytes.NewBufferString(tagging)
|
||||
tagSet, err = h.readTagSet(buffer)
|
||||
tagSet, err = readTagSet(buffer)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not read tag set", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket objInfo", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
apeEnabled := bktInfo.APEEnabled || settings.CannedACL != ""
|
||||
if apeEnabled && cannedACLStatus == aclStatusYes {
|
||||
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
||||
return
|
||||
}
|
||||
|
||||
needUpdateEACLTable := !(apeEnabled || cannedACLStatus == aclStatusNo)
|
||||
if needUpdateEACLTable {
|
||||
if containsACL {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(ctx); err != nil {
|
||||
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
||||
return
|
||||
|
@ -536,6 +469,12 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.obj.GetBucketInfo(ctx, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
params := &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: reqInfo.ObjectName,
|
||||
|
@ -553,7 +492,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectCreatedPost,
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.MD5Enabled()),
|
||||
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.Features.MD5Enabled()),
|
||||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
|
@ -602,7 +541,9 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
if settings.VersioningEnabled() {
|
||||
if settings, err := h.obj.GetBucketSettings(ctx, bktInfo); err != nil {
|
||||
h.reqLogger(ctx).Warn(logs.CouldntGetBucketVersioning, zap.String("bucket name", reqInfo.BucketName), zap.Error(err))
|
||||
} else if settings.VersioningEnabled() {
|
||||
w.Header().Set(api.AmzVersionID, objInfo.VersionID())
|
||||
}
|
||||
|
||||
|
@ -620,21 +561,17 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
resp := &PostResponse{
|
||||
Bucket: objInfo.Bucket,
|
||||
Key: objInfo.Name,
|
||||
ETag: data.Quote(objInfo.ETag(h.cfg.MD5Enabled())),
|
||||
ETag: objInfo.ETag(h.cfg.Features.MD5Enabled()),
|
||||
}
|
||||
w.WriteHeader(status)
|
||||
respData, err := middleware.EncodeResponse(resp)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "encode response", reqInfo, err)
|
||||
}
|
||||
if _, err = w.Write(respData); err != nil {
|
||||
if _, err = w.Write(middleware.EncodeResponse(resp)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set(api.ETag, data.Quote(objInfo.ETag(h.cfg.MD5Enabled())))
|
||||
w.Header().Set(api.ETag, objInfo.ETag(h.cfg.Features.MD5Enabled()))
|
||||
w.WriteHeader(status)
|
||||
}
|
||||
|
||||
|
@ -654,21 +591,11 @@ func checkPostPolicy(r *http.Request, reqInfo *middleware.ReqInfo, metadata map[
|
|||
policy.empty = false
|
||||
}
|
||||
|
||||
if r.MultipartForm == nil {
|
||||
return nil, stderrors.New("empty multipart form")
|
||||
}
|
||||
|
||||
for key, v := range r.MultipartForm.Value {
|
||||
value := v[0]
|
||||
if key == "file" || key == "policy" || key == "x-amz-signature" || strings.HasPrefix(key, "x-ignore-") {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(v) != 1 {
|
||||
return nil, fmt.Errorf("empty multipart value for key '%s'", key)
|
||||
}
|
||||
|
||||
value := v[0]
|
||||
|
||||
if err := policy.CheckField(key, value); err != nil {
|
||||
return nil, fmt.Errorf("'%s' form field doesn't match the policy: %w", key, err)
|
||||
}
|
||||
|
@ -698,33 +625,9 @@ func checkPostPolicy(r *http.Request, reqInfo *middleware.ReqInfo, metadata map[
|
|||
return policy, nil
|
||||
}
|
||||
|
||||
type aclStatus int
|
||||
|
||||
const (
|
||||
// aclStatusNo means no acl headers at all.
|
||||
aclStatusNo aclStatus = iota
|
||||
// aclStatusYesAPECompatible means that only X-Amz-Acl present and equals to private.
|
||||
aclStatusYesAPECompatible
|
||||
// aclStatusYes means any other acl headers configuration.
|
||||
aclStatusYes
|
||||
)
|
||||
|
||||
func aclHeadersStatus(r *http.Request) aclStatus {
|
||||
if r.Header.Get(api.AmzGrantRead) != "" ||
|
||||
r.Header.Get(api.AmzGrantFullControl) != "" ||
|
||||
r.Header.Get(api.AmzGrantWrite) != "" {
|
||||
return aclStatusYes
|
||||
}
|
||||
|
||||
cannedACL := r.Header.Get(api.AmzACL)
|
||||
if cannedACL != "" {
|
||||
if cannedACL == basicACLPrivate {
|
||||
return aclStatusYesAPECompatible
|
||||
}
|
||||
return aclStatusYes
|
||||
}
|
||||
|
||||
return aclStatusNo
|
||||
func containsACLHeaders(r *http.Request) bool {
|
||||
return r.Header.Get(api.AmzACL) != "" || r.Header.Get(api.AmzGrantRead) != "" ||
|
||||
r.Header.Get(api.AmzGrantFullControl) != "" || r.Header.Get(api.AmzGrantWrite) != ""
|
||||
}
|
||||
|
||||
func (h *handler) getNewEAclTable(r *http.Request, bktInfo *data.BucketInfo, objInfo *data.ObjectInfo) (*eacl.Table, error) {
|
||||
|
@ -810,143 +713,21 @@ func parseMetadata(r *http.Request) map[string]string {
|
|||
return res
|
||||
}
|
||||
|
||||
func parseCannedACL(header http.Header) (string, error) {
|
||||
acl := header.Get(api.AmzACL)
|
||||
if len(acl) == 0 {
|
||||
return basicACLPrivate, nil
|
||||
}
|
||||
|
||||
if acl == basicACLPrivate || acl == basicACLPublic ||
|
||||
acl == cannedACLAuthRead || acl == basicACLReadOnly {
|
||||
return acl, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unknown acl: %s", acl)
|
||||
}
|
||||
|
||||
func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if h.cfg.ACLEnabled() {
|
||||
h.createBucketHandlerACL(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
h.createBucketHandlerPolicy(w, r)
|
||||
}
|
||||
|
||||
func (h *handler) parseCommonCreateBucketParams(reqInfo *middleware.ReqInfo, boxData *accessbox.Box, r *http.Request) (*keys.PublicKey, *layer.CreateBucketParams, error) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
p := &layer.CreateBucketParams{
|
||||
Name: reqInfo.BucketName,
|
||||
Namespace: reqInfo.Namespace,
|
||||
SessionContainerCreation: boxData.Gate.SessionTokenForPut(),
|
||||
}
|
||||
|
||||
if p.SessionContainerCreation == nil {
|
||||
return nil, nil, fmt.Errorf("%w: couldn't find session token for put", errors.GetAPIError(errors.ErrAccessDenied))
|
||||
Name: reqInfo.BucketName,
|
||||
}
|
||||
|
||||
if err := checkBucketName(reqInfo.BucketName); err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid bucket name: %w", err)
|
||||
h.logAndSendError(w, "invalid bucket name", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
key, err := getTokenIssuerKey(boxData)
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("couldn't get bearer token signature key: %w", err)
|
||||
}
|
||||
|
||||
createParams, err := h.parseLocationConstraint(r)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not parse location contraint: %w", err)
|
||||
}
|
||||
|
||||
if err = h.setPlacementPolicy(p, reqInfo.Namespace, createParams.LocationConstraint, boxData.Policies); err != nil {
|
||||
return nil, nil, fmt.Errorf("couldn't set placement policy: %w", err)
|
||||
}
|
||||
|
||||
p.ObjectLockEnabled = isLockEnabled(h.reqLogger(r.Context()), r.Header)
|
||||
|
||||
return key, p, nil
|
||||
}
|
||||
|
||||
func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
boxData, err := middleware.GetBoxData(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "get access box from request", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
key, p, err := h.parseCommonCreateBucketParams(reqInfo, boxData, r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "parse create bucket params", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
cannedACL, err := parseCannedACL(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not parse canned ACL", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
p.APEEnabled = true
|
||||
bktInfo, err := h.obj.CreateBucket(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not create bucket", reqInfo, err)
|
||||
return
|
||||
}
|
||||
h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID))
|
||||
|
||||
chains := bucketCannedACLToAPERules(cannedACL, reqInfo, key, bktInfo.CID)
|
||||
if err = h.ape.SaveACLChains(bktInfo.CID.EncodeToString(), chains); err != nil {
|
||||
h.logAndSendError(w, "failed to add morph rule chain", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
sp := &layer.PutSettingsParams{
|
||||
BktInfo: bktInfo,
|
||||
Settings: &data.BucketSettings{
|
||||
CannedACL: cannedACL,
|
||||
OwnerKey: key,
|
||||
Versioning: data.VersioningUnversioned,
|
||||
},
|
||||
}
|
||||
|
||||
if p.ObjectLockEnabled {
|
||||
sp.Settings.Versioning = data.VersioningEnabled
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
|
||||
h.logAndSendError(w, "couldn't save bucket settings", reqInfo, err,
|
||||
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
||||
h.logAndSendError(w, "write response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) createBucketHandlerACL(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
boxData, err := middleware.GetBoxData(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "get access box from request", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
key, p, err := h.parseCommonCreateBucketParams(reqInfo, boxData, r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "parse create bucket params", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
aclPrm := &layer.PutBucketACLParams{SessionToken: boxData.Gate.SessionTokenForSetEACL()}
|
||||
if aclPrm.SessionToken == nil {
|
||||
h.logAndSendError(w, "couldn't find session token for setEACL", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
||||
h.logAndSendError(w, "couldn't get bearer token signature key", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -957,179 +738,68 @@ func (h *handler) createBucketHandlerACL(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
resInfo := &resourceInfo{Bucket: reqInfo.BucketName}
|
||||
|
||||
aclPrm.EACL, err = bucketACLToTable(bktACL, resInfo)
|
||||
p.EACL, err = bucketACLToTable(bktACL, resInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could translate bucket acl to eacl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
createParams, err := parseLocationConstraint(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not parse body", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
var policies []*accessbox.ContainerPolicy
|
||||
boxData, err := middleware.GetBoxData(ctx)
|
||||
if err == nil {
|
||||
policies = boxData.Policies
|
||||
p.SessionContainerCreation = boxData.Gate.SessionTokenForPut()
|
||||
p.SessionEACL = boxData.Gate.SessionTokenForSetEACL()
|
||||
}
|
||||
|
||||
if p.SessionContainerCreation == nil {
|
||||
h.logAndSendError(w, "couldn't find session token for put", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
||||
return
|
||||
}
|
||||
|
||||
if p.SessionEACL == nil {
|
||||
h.logAndSendError(w, "couldn't find session token for setEACL", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.setPolicy(p, createParams.LocationConstraint, policies); err != nil {
|
||||
h.logAndSendError(w, "couldn't set placement policy", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
p.ObjectLockEnabled = isLockEnabled(r.Header)
|
||||
|
||||
bktInfo, err := h.obj.CreateBucket(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not create bucket", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID))
|
||||
|
||||
aclPrm.BktInfo = bktInfo
|
||||
if err = h.obj.PutBucketACL(r.Context(), aclPrm); err != nil {
|
||||
h.logAndSendError(w, "could not put bucket e/ACL", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
sp := &layer.PutSettingsParams{
|
||||
BktInfo: bktInfo,
|
||||
Settings: &data.BucketSettings{
|
||||
OwnerKey: key,
|
||||
Versioning: data.VersioningUnversioned,
|
||||
},
|
||||
}
|
||||
|
||||
if p.ObjectLockEnabled {
|
||||
sp.Settings.Versioning = data.VersioningEnabled
|
||||
sp := &layer.PutSettingsParams{
|
||||
BktInfo: bktInfo,
|
||||
Settings: &data.BucketSettings{Versioning: data.VersioningEnabled},
|
||||
}
|
||||
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
|
||||
h.logAndSendError(w, "couldn't enable bucket versioning", reqInfo, err,
|
||||
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
|
||||
h.logAndSendError(w, "couldn't save bucket settings", reqInfo, err,
|
||||
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
||||
h.logAndSendError(w, "write response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
const s3ActionPrefix = "s3:"
|
||||
|
||||
var (
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html
|
||||
|
||||
writeACLBucketS3Actions = []string{
|
||||
s3ActionPrefix + middleware.PutObjectOperation,
|
||||
s3ActionPrefix + middleware.PostObjectOperation,
|
||||
s3ActionPrefix + middleware.CopyObjectOperation,
|
||||
s3ActionPrefix + middleware.UploadPartOperation,
|
||||
s3ActionPrefix + middleware.UploadPartCopyOperation,
|
||||
s3ActionPrefix + middleware.CreateMultipartUploadOperation,
|
||||
s3ActionPrefix + middleware.CompleteMultipartUploadOperation,
|
||||
}
|
||||
|
||||
readACLBucketS3Actions = []string{
|
||||
s3ActionPrefix + middleware.HeadBucketOperation,
|
||||
s3ActionPrefix + middleware.GetBucketLocationOperation,
|
||||
s3ActionPrefix + middleware.ListObjectsV1Operation,
|
||||
s3ActionPrefix + middleware.ListObjectsV2Operation,
|
||||
s3ActionPrefix + middleware.ListBucketObjectVersionsOperation,
|
||||
s3ActionPrefix + middleware.ListMultipartUploadsOperation,
|
||||
}
|
||||
|
||||
writeACLBucketNativeActions = []string{
|
||||
native.MethodPutObject,
|
||||
}
|
||||
|
||||
readACLBucketNativeActions = []string{
|
||||
native.MethodGetContainer,
|
||||
native.MethodGetObject,
|
||||
native.MethodHeadObject,
|
||||
native.MethodSearchObject,
|
||||
native.MethodRangeObject,
|
||||
native.MethodHashObject,
|
||||
}
|
||||
)
|
||||
|
||||
func bucketCannedACLToAPERules(cannedACL string, reqInfo *middleware.ReqInfo, key *keys.PublicKey, cnrID cid.ID) []*chain.Chain {
|
||||
cnrIDStr := cnrID.EncodeToString()
|
||||
|
||||
chains := []*chain.Chain{
|
||||
{
|
||||
ID: getBucketCannedChainID(chain.S3, cnrID),
|
||||
Rules: []chain.Rule{{
|
||||
Status: chain.Allow,
|
||||
Actions: chain.Actions{Names: []string{"s3:*"}},
|
||||
Resources: chain.Resources{Names: []string{
|
||||
fmt.Sprintf(s3.ResourceFormatS3Bucket, reqInfo.BucketName),
|
||||
fmt.Sprintf(s3.ResourceFormatS3BucketObjects, reqInfo.BucketName),
|
||||
}},
|
||||
Condition: []chain.Condition{{
|
||||
Op: chain.CondStringEquals,
|
||||
Object: chain.ObjectRequest,
|
||||
Key: s3.PropertyKeyOwner,
|
||||
Value: key.Address(),
|
||||
}},
|
||||
}}},
|
||||
{
|
||||
ID: getBucketCannedChainID(chain.Ingress, cnrID),
|
||||
Rules: []chain.Rule{{
|
||||
Status: chain.Allow,
|
||||
Actions: chain.Actions{Names: []string{"*"}},
|
||||
Resources: chain.Resources{Names: []string{
|
||||
fmt.Sprintf(native.ResourceFormatNamespaceContainer, reqInfo.Namespace, cnrIDStr),
|
||||
fmt.Sprintf(native.ResourceFormatNamespaceContainerObjects, reqInfo.Namespace, cnrIDStr),
|
||||
}},
|
||||
Condition: []chain.Condition{{
|
||||
Op: chain.CondStringEquals,
|
||||
Object: chain.ObjectRequest,
|
||||
Key: native.PropertyKeyActorPublicKey,
|
||||
Value: hex.EncodeToString(key.Bytes()),
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
switch cannedACL {
|
||||
case basicACLPrivate:
|
||||
case cannedACLAuthRead:
|
||||
fallthrough
|
||||
case basicACLReadOnly:
|
||||
chains[0].Rules = append(chains[0].Rules, chain.Rule{
|
||||
Status: chain.Allow,
|
||||
Actions: chain.Actions{Names: readACLBucketS3Actions},
|
||||
Resources: chain.Resources{Names: []string{
|
||||
fmt.Sprintf(s3.ResourceFormatS3Bucket, reqInfo.BucketName),
|
||||
fmt.Sprintf(s3.ResourceFormatS3BucketObjects, reqInfo.BucketName),
|
||||
}},
|
||||
})
|
||||
|
||||
chains[1].Rules = append(chains[1].Rules, chain.Rule{
|
||||
Status: chain.Allow,
|
||||
Actions: chain.Actions{Names: readACLBucketNativeActions},
|
||||
Resources: chain.Resources{Names: []string{
|
||||
fmt.Sprintf(native.ResourceFormatNamespaceContainer, reqInfo.Namespace, cnrIDStr),
|
||||
fmt.Sprintf(native.ResourceFormatNamespaceContainerObjects, reqInfo.Namespace, cnrIDStr),
|
||||
}},
|
||||
})
|
||||
case basicACLPublic:
|
||||
chains[0].Rules = append(chains[0].Rules, chain.Rule{
|
||||
Status: chain.Allow,
|
||||
Actions: chain.Actions{Names: append(readACLBucketS3Actions, writeACLBucketS3Actions...)},
|
||||
Resources: chain.Resources{Names: []string{
|
||||
fmt.Sprintf(s3.ResourceFormatS3Bucket, reqInfo.BucketName),
|
||||
fmt.Sprintf(s3.ResourceFormatS3BucketObjects, reqInfo.BucketName),
|
||||
}},
|
||||
})
|
||||
|
||||
chains[1].Rules = append(chains[1].Rules, chain.Rule{
|
||||
Status: chain.Allow,
|
||||
Actions: chain.Actions{Names: append(readACLBucketNativeActions, writeACLBucketNativeActions...)},
|
||||
Resources: chain.Resources{Names: []string{
|
||||
fmt.Sprintf(native.ResourceFormatNamespaceContainer, reqInfo.Namespace, cnrIDStr),
|
||||
fmt.Sprintf(native.ResourceFormatNamespaceContainerObjects, reqInfo.Namespace, cnrIDStr),
|
||||
}},
|
||||
})
|
||||
default:
|
||||
panic("unknown canned acl") // this should never happen
|
||||
}
|
||||
|
||||
return chains
|
||||
}
|
||||
|
||||
func getBucketCannedChainID(prefix chain.Name, cnrID cid.ID) chain.ID {
|
||||
return chain.ID(string(prefix) + ":bktCanned" + string(cnrID[:]))
|
||||
}
|
||||
|
||||
func (h handler) setPlacementPolicy(prm *layer.CreateBucketParams, namespace, locationConstraint string, userPolicies []*accessbox.ContainerPolicy) error {
|
||||
prm.Policy = h.cfg.DefaultPlacementPolicy(namespace)
|
||||
func (h handler) setPolicy(prm *layer.CreateBucketParams, locationConstraint string, userPolicies []*accessbox.ContainerPolicy) error {
|
||||
prm.Policy = h.cfg.Policy.DefaultPlacementPolicy()
|
||||
prm.LocationConstraint = locationConstraint
|
||||
|
||||
if locationConstraint == "" {
|
||||
|
@ -1143,7 +813,7 @@ func (h handler) setPlacementPolicy(prm *layer.CreateBucketParams, namespace, lo
|
|||
}
|
||||
}
|
||||
|
||||
if policy, ok := h.cfg.PlacementPolicy(namespace, locationConstraint); ok {
|
||||
if policy, ok := h.cfg.Policy.PlacementPolicy(locationConstraint); ok {
|
||||
prm.Policy = policy
|
||||
return nil
|
||||
}
|
||||
|
@ -1151,17 +821,9 @@ func (h handler) setPlacementPolicy(prm *layer.CreateBucketParams, namespace, lo
|
|||
return errors.GetAPIError(errors.ErrInvalidLocationConstraint)
|
||||
}
|
||||
|
||||
func isLockEnabled(log *zap.Logger, header http.Header) bool {
|
||||
func isLockEnabled(header http.Header) bool {
|
||||
lockEnabledStr := header.Get(api.AmzBucketObjectLockEnabled)
|
||||
if len(lockEnabledStr) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
lockEnabled, err := strconv.ParseBool(lockEnabledStr)
|
||||
if err != nil {
|
||||
log.Warn(logs.InvalidBucketObjectLockEnabledHeader, zap.String("header", lockEnabledStr), zap.Error(err))
|
||||
}
|
||||
|
||||
lockEnabled, _ := strconv.ParseBool(lockEnabledStr)
|
||||
return lockEnabled
|
||||
}
|
||||
|
||||
|
@ -1199,13 +861,13 @@ func isAlphaNum(char int32) bool {
|
|||
return 'a' <= char && char <= 'z' || '0' <= char && char <= '9'
|
||||
}
|
||||
|
||||
func (h *handler) parseLocationConstraint(r *http.Request) (*createBucketParams, error) {
|
||||
func parseLocationConstraint(r *http.Request) (*createBucketParams, error) {
|
||||
if r.ContentLength == 0 {
|
||||
return new(createBucketParams), nil
|
||||
}
|
||||
|
||||
params := new(createBucketParams)
|
||||
if err := h.cfg.NewXMLDecoder(r.Body).Decode(params); err != nil {
|
||||
if err := xml.NewDecoder(r.Body).Decode(params); err != nil {
|
||||
return nil, errors.GetAPIError(errors.ErrMalformedXML)
|
||||
}
|
||||
return params, nil
|
||||
|
|
|
@ -4,21 +4,24 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
|
@ -176,9 +179,27 @@ func TestPutObjectWithStreamBodyError(t *testing.T) {
|
|||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
||||
}
|
||||
|
||||
func TestPutObjectWithWrapReaderDiscardOnError(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-put", "object-for-put"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
content := make([]byte, 128*1024)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
tc.tp.SetObjectPutError(objName, errors.New("some error"))
|
||||
numGoroutineBefore := runtime.NumGoroutine()
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
numGoroutineAfter := runtime.NumGoroutine()
|
||||
require.Equal(t, numGoroutineBefore, numGoroutineAfter, "goroutines shouldn't leak during put object")
|
||||
}
|
||||
|
||||
func TestPutObjectWithInvalidContentMD5(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
tc.config.md5Enabled = true
|
||||
tc.features.SetMD5Enabled(true)
|
||||
|
||||
bktName, objName := "bucket-for-put", "object-for-put"
|
||||
createTestBucket(tc, bktName)
|
||||
|
@ -194,7 +215,7 @@ func TestPutObjectWithInvalidContentMD5(t *testing.T) {
|
|||
|
||||
func TestPutObjectWithEnabledMD5(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
tc.config.md5Enabled = true
|
||||
tc.features.SetMD5Enabled(true)
|
||||
|
||||
bktName, objName := "bucket-for-put", "object-for-put"
|
||||
createTestBucket(tc, bktName)
|
||||
|
@ -204,64 +225,7 @@ func TestPutObjectWithEnabledMD5(t *testing.T) {
|
|||
md5Hash.Write(content)
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
require.Equal(t, data.Quote(hex.EncodeToString(md5Hash.Sum(nil))), w.Header().Get(api.ETag))
|
||||
}
|
||||
|
||||
func TestPutObjectCheckContentSHA256(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-put", "object-for-put"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
hash string
|
||||
content []byte
|
||||
error bool
|
||||
}{
|
||||
{
|
||||
name: "invalid hash value",
|
||||
hash: "d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8",
|
||||
content: []byte("content"),
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "correct hash for empty payload",
|
||||
hash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
content: []byte(""),
|
||||
error: false,
|
||||
},
|
||||
{
|
||||
name: "unsigned payload",
|
||||
hash: "UNSIGNED-PAYLOAD",
|
||||
content: []byte("content"),
|
||||
error: false,
|
||||
},
|
||||
{
|
||||
name: "correct hash",
|
||||
hash: "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
|
||||
content: []byte("content"),
|
||||
error: false,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, bytes.NewReader(tc.content))
|
||||
r.Header.Set("X-Amz-Content-Sha256", tc.hash)
|
||||
hc.Handler().PutObjectHandler(w, r)
|
||||
|
||||
if tc.error {
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch))
|
||||
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().GetObjectHandler(w, r)
|
||||
|
||||
assertStatus(t, w, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
})
|
||||
}
|
||||
require.Equal(t, hex.EncodeToString(md5Hash.Sum(nil)), w.Header().Get(api.ETag))
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyAWSExample(t *testing.T) {
|
||||
|
@ -300,7 +264,7 @@ func TestPutChunkedTestContentEncoding(t *testing.T) {
|
|||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrInvalidEncodingMethod))
|
||||
|
||||
hc.config.bypassContentEncodingInChunks = true
|
||||
hc.kludge.bypassContentEncodingInChunks = true
|
||||
w, req, _ = getChunkedRequest(hc.context, t, bktName, objName)
|
||||
req.Header.Set(api.ContentEncoding, "gzip")
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
|
@ -354,14 +318,15 @@ func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName strin
|
|||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetClientTime(req.Context(), signTime))
|
||||
req = req.WithContext(middleware.SetAuthHeaders(req.Context(), &middleware.AuthHeader{
|
||||
req = req.WithContext(middleware.SetAuthHeaders(req.Context(), &auth.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "4f232c4386841ef735655705268965c44a0e4690baa4adea153f7db9fa80a0a9",
|
||||
Service: "s3",
|
||||
Region: "us-east-1",
|
||||
}))
|
||||
req = req.WithContext(middleware.SetBoxData(req.Context(), &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: AWSSecretAccessKey,
|
||||
AccessKey: AWSSecretAccessKey,
|
||||
},
|
||||
}))
|
||||
|
||||
|
@ -372,48 +337,14 @@ func TestCreateBucket(t *testing.T) {
|
|||
hc := prepareHandlerContext(t)
|
||||
bktName := "bkt-name"
|
||||
|
||||
info := createBucket(hc, bktName)
|
||||
createBucketAssertS3Error(hc, bktName, info.Box, s3errors.ErrBucketAlreadyOwnedByYou)
|
||||
box, _ := createAccessBox(t)
|
||||
createBucket(t, hc, bktName, box)
|
||||
createBucketAssertS3Error(hc, bktName, box, s3errors.ErrBucketAlreadyOwnedByYou)
|
||||
|
||||
box2, _ := createAccessBox(t)
|
||||
createBucketAssertS3Error(hc, bktName, box2, s3errors.ErrBucketAlreadyExists)
|
||||
}
|
||||
|
||||
func TestCreateOldBucketPutVersioning(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
hc.config.aclEnabled = true
|
||||
bktName := "bkt-name"
|
||||
|
||||
info := createBucket(hc, bktName)
|
||||
settings, err := hc.tree.GetSettingsNode(hc.Context(), info.BktInfo)
|
||||
require.NoError(t, err)
|
||||
settings.OwnerKey = nil
|
||||
err = hc.tree.PutSettingsNode(hc.Context(), info.BktInfo, settings)
|
||||
require.NoError(t, err)
|
||||
|
||||
putBucketVersioning(t, hc, bktName, true)
|
||||
}
|
||||
|
||||
func TestCreateNamespacedBucket(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bkt-name"
|
||||
namespace := "yabloko"
|
||||
|
||||
box, _ := createAccessBox(t)
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
ctx := middleware.SetBoxData(r.Context(), box)
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
reqInfo.Namespace = namespace
|
||||
r = r.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
hc.Handler().CreateBucketHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
bktInfo, err := hc.Layer().GetBucketInfo(middleware.SetReqInfo(hc.Context(), reqInfo), bktName)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, namespace+".ns", bktInfo.Zone)
|
||||
}
|
||||
|
||||
func TestPutObjectClientCut(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName, objName1, objName2 := "bkt-name", "obj-name1", "obj-name2"
|
||||
|
@ -423,7 +354,7 @@ func TestPutObjectClientCut(t *testing.T) {
|
|||
obj1 := getObjectFromLayer(hc, objName1)[0]
|
||||
require.Empty(t, getObjectAttribute(obj1, "s3-client-cut"))
|
||||
|
||||
hc.layerFeatures.SetClientCut(true)
|
||||
hc.features.SetClientCut(true)
|
||||
putObject(hc, bktName, objName2)
|
||||
obj2 := getObjectFromLayer(hc, objName2)[0]
|
||||
require.Equal(t, "true", getObjectAttribute(obj2, "s3-client-cut"))
|
||||
|
@ -447,18 +378,3 @@ func getObjectAttribute(obj *object.Object, attrName string) string {
|
|||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func TestPutObjectWithContentLanguage(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
exceptedContentLanguage := "en"
|
||||
bktName, objName := "bucket-1", "object-1"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
w, r := prepareTestRequest(tc, bktName, objName, nil)
|
||||
r.Header.Set(api.ContentLanguage, exceptedContentLanguage)
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
|
||||
tc.Handler().HeadObjectHandler(w, r)
|
||||
require.Equal(t, exceptedContentLanguage, w.Header().Get(api.ContentLanguage))
|
||||
}
|
||||
|
|
|
@ -55,19 +55,6 @@ type Bucket struct {
|
|||
CreationDate string // time string of format "2006-01-02T15:04:05.000Z"
|
||||
}
|
||||
|
||||
// PolicyStatus contains status of bucket policy.
|
||||
type PolicyStatus struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PolicyStatus" json:"-"`
|
||||
IsPublic PolicyStatusIsPublic `xml:"IsPublic"`
|
||||
}
|
||||
|
||||
type PolicyStatusIsPublic string
|
||||
|
||||
const (
|
||||
PolicyStatusIsPublicFalse = "FALSE"
|
||||
PolicyStatusIsPublicTrue = "TRUE"
|
||||
)
|
||||
|
||||
// AccessControlPolicy contains ACL.
|
||||
type AccessControlPolicy struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ AccessControlPolicy" json:"-"`
|
||||
|
@ -123,7 +110,7 @@ type Object struct {
|
|||
Owner *Owner `xml:"Owner,omitempty"`
|
||||
|
||||
// Class of storage used to store the object.
|
||||
StorageClass string `xml:"StorageClass"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"`
|
||||
}
|
||||
|
||||
// ObjectVersionResponse container for object version in the response of ListBucketObjectVersionsHandler.
|
||||
|
@ -134,7 +121,7 @@ type ObjectVersionResponse struct {
|
|||
LastModified string `xml:"LastModified"`
|
||||
Owner Owner `xml:"Owner"`
|
||||
Size uint64 `xml:"Size"`
|
||||
StorageClass string `xml:"StorageClass"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"` // is empty!!
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
||||
|
|
|
@ -199,7 +199,7 @@ func newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, error) {
|
|||
return nil, errs.GetAPIError(errs.ErrAuthorizationHeaderMalformed)
|
||||
}
|
||||
|
||||
currentCredentials := credentials.NewStaticCredentials(authHeaders.AccessKeyID, box.Gate.SecretKey, "")
|
||||
currentCredentials := credentials.NewStaticCredentials(authHeaders.AccessKeyID, box.Gate.AccessKey, "")
|
||||
seed, err := hex.DecodeString(authHeaders.SignatureV4)
|
||||
if err != nil {
|
||||
return nil, errs.GetAPIError(errs.ErrSignatureDoesNotMatch)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
@ -28,7 +29,7 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
tagSet, err := h.readTagSet(r.Body)
|
||||
tagSet, err := readTagSet(r.Body)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not read tag set", reqInfo, err)
|
||||
return
|
||||
|
@ -152,7 +153,7 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
|
|||
func (h *handler) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
tagSet, err := h.readTagSet(r.Body)
|
||||
tagSet, err := readTagSet(r.Body)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not read tag set", reqInfo, err)
|
||||
return
|
||||
|
@ -207,9 +208,9 @@ func (h *handler) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Requ
|
|||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (h *handler) readTagSet(reader io.Reader) (map[string]string, error) {
|
||||
func readTagSet(reader io.Reader) (map[string]string, error) {
|
||||
tagging := new(Tagging)
|
||||
if err := h.cfg.NewXMLDecoder(reader).Decode(tagging); err != nil {
|
||||
if err := xml.NewDecoder(reader).Decode(tagging); err != nil {
|
||||
return nil, errors.GetAPIError(errors.ErrMalformedXML)
|
||||
}
|
||||
|
||||
|
@ -219,9 +220,6 @@ func (h *handler) readTagSet(reader io.Reader) (map[string]string, error) {
|
|||
|
||||
tagSet := make(map[string]string, len(tagging.TagSet))
|
||||
for _, tag := range tagging.TagSet {
|
||||
if _, ok := tagSet[tag.Key]; ok {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidTagKeyUniqueness)
|
||||
}
|
||||
tagSet[tag.Key] = tag.Value
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -46,66 +44,3 @@ func TestTagsValidity(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutObjectTaggingCheckUniqueness(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-1", "object-1"
|
||||
createBucketAndObject(hc, bktName, objName)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
body *Tagging
|
||||
error bool
|
||||
}{
|
||||
{
|
||||
name: "Two tags with unique keys",
|
||||
body: &Tagging{
|
||||
TagSet: []Tag{
|
||||
{
|
||||
Key: "key-1",
|
||||
Value: "val-1",
|
||||
},
|
||||
{
|
||||
Key: "key-2",
|
||||
Value: "val-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
error: false,
|
||||
},
|
||||
{
|
||||
name: "Two tags with the same keys",
|
||||
body: &Tagging{
|
||||
TagSet: []Tag{
|
||||
{
|
||||
Key: "key-1",
|
||||
Value: "val-1",
|
||||
},
|
||||
{
|
||||
Key: "key-1",
|
||||
Value: "val-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
w, r := prepareTestRequest(hc, bktName, objName, tc.body)
|
||||
hc.Handler().PutObjectTaggingHandler(w, r)
|
||||
if tc.error {
|
||||
assertS3Error(t, w, apiErrors.GetAPIError(apiErrors.ErrInvalidTagKeyUniqueness))
|
||||
return
|
||||
}
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
tagging := getObjectTagging(t, hc, bktName, objName, emptyVersion)
|
||||
require.Len(t, tagging.TagSet, 2)
|
||||
require.Equal(t, "key-1", tagging.TagSet[0].Key)
|
||||
require.Equal(t, "val-1", tagging.TagSet[0].Value)
|
||||
require.Equal(t, "key-2", tagging.TagSet[1].Key)
|
||||
require.Equal(t, "val-2", tagging.TagSet[1].Value)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package handler
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -15,7 +14,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"go.uber.org/zap"
|
||||
|
@ -30,13 +28,9 @@ func (h *handler) reqLogger(ctx context.Context) *zap.Logger {
|
|||
}
|
||||
|
||||
func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo *middleware.ReqInfo, err error, additional ...zap.Field) {
|
||||
err = handleDeleteMarker(w, err)
|
||||
if code, wrErr := middleware.WriteErrorResponse(w, reqInfo, transformToS3Error(err)); wrErr != nil {
|
||||
additional = append(additional, zap.NamedError("write_response_error", wrErr))
|
||||
} else {
|
||||
additional = append(additional, zap.Int("status", code))
|
||||
}
|
||||
code := middleware.WriteErrorResponse(w, reqInfo, transformToS3Error(err))
|
||||
fields := []zap.Field{
|
||||
zap.Int("status", code),
|
||||
zap.String("request_id", reqInfo.RequestID),
|
||||
zap.String("method", reqInfo.API),
|
||||
zap.String("bucket", reqInfo.BucketName),
|
||||
|
@ -50,14 +44,20 @@ func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo
|
|||
h.log.Error(logs.RequestFailed, fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
|
||||
}
|
||||
|
||||
func handleDeleteMarker(w http.ResponseWriter, err error) error {
|
||||
var target layer.DeleteMarkerError
|
||||
if !errors.As(err, &target) {
|
||||
return err
|
||||
func (h *handler) logAndSendErrorNoHeader(w http.ResponseWriter, logText string, reqInfo *middleware.ReqInfo, err error, additional ...zap.Field) {
|
||||
middleware.WriteErrorResponseNoHeader(w, reqInfo, transformToS3Error(err))
|
||||
fields := []zap.Field{
|
||||
zap.String("request_id", reqInfo.RequestID),
|
||||
zap.String("method", reqInfo.API),
|
||||
zap.String("bucket", reqInfo.BucketName),
|
||||
zap.String("object", reqInfo.ObjectName),
|
||||
zap.String("description", logText),
|
||||
zap.Error(err)}
|
||||
fields = append(fields, additional...)
|
||||
if traceID, err := trace.TraceIDFromHex(reqInfo.TraceID); err == nil && traceID.IsValid() {
|
||||
fields = append(fields, zap.String("trace_id", reqInfo.TraceID))
|
||||
}
|
||||
|
||||
w.Header().Set(api.AmzDeleteMarker, "true")
|
||||
return fmt.Errorf("%w: %s", s3errors.GetAPIError(target.ErrorCode), err)
|
||||
h.log.Error(logs.RequestFailed, fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
|
||||
}
|
||||
|
||||
func transformToS3Error(err error) error {
|
||||
|
@ -82,10 +82,6 @@ func (h *handler) ResolveBucket(ctx context.Context, bucket string) (*data.Bucke
|
|||
return h.obj.GetBucketInfo(ctx, bucket)
|
||||
}
|
||||
|
||||
func (h *handler) ResolveCID(ctx context.Context, bucket string) (cid.ID, error) {
|
||||
return h.obj.ResolveCID(ctx, bucket)
|
||||
}
|
||||
|
||||
func (h *handler) getBucketAndCheckOwner(r *http.Request, bucket string, header ...string) (*data.BucketInfo, error) {
|
||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), bucket)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
|
@ -13,7 +14,7 @@ func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
|
|||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
configuration := new(VersioningConfiguration)
|
||||
if err := h.cfg.NewXMLDecoder(r.Body).Decode(configuration); err != nil {
|
||||
if err := xml.NewDecoder(r.Body).Decode(configuration); err != nil {
|
||||
h.logAndSendError(w, "couldn't decode versioning configuration", reqInfo, errors.GetAPIError(errors.ErrIllegalVersioningConfigurationException))
|
||||
return
|
||||
}
|
||||
|
|
|
@ -61,16 +61,11 @@ const (
|
|||
AmzObjectAttributes = "X-Amz-Object-Attributes"
|
||||
AmzMaxParts = "X-Amz-Max-Parts"
|
||||
AmzPartNumberMarker = "X-Amz-Part-Number-Marker"
|
||||
AmzStorageClass = "X-Amz-Storage-Class"
|
||||
|
||||
AmzServerSideEncryptionCustomerAlgorithm = "x-amz-server-side-encryption-customer-algorithm"
|
||||
AmzServerSideEncryptionCustomerKey = "x-amz-server-side-encryption-customer-key"
|
||||
AmzServerSideEncryptionCustomerKeyMD5 = "x-amz-server-side-encryption-customer-key-MD5"
|
||||
|
||||
AmzCopySourceServerSideEncryptionCustomerAlgorithm = "x-amz-copy-source-server-side-encryption-customer-algorithm"
|
||||
AmzCopySourceServerSideEncryptionCustomerKey = "x-amz-copy-source-server-side-encryption-customer-key"
|
||||
AmzCopySourceServerSideEncryptionCustomerKeyMD5 = "x-amz-copy-source-server-side-encryption-customer-key-MD5"
|
||||
|
||||
OwnerID = "X-Owner-Id"
|
||||
ContainerID = "X-Container-Id"
|
||||
ContainerName = "X-Container-Name"
|
||||
|
@ -94,8 +89,6 @@ const (
|
|||
DefaultLocationConstraint = "default"
|
||||
|
||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
|
||||
DefaultStorageClass = "STANDARD"
|
||||
)
|
||||
|
||||
// S3 request query params.
|
||||
|
@ -121,7 +114,6 @@ var SystemMetadata = map[string]struct{}{
|
|||
ContentType: {},
|
||||
LastModified: {},
|
||||
ETag: {},
|
||||
ContentLanguage: {},
|
||||
}
|
||||
|
||||
func IsSignedStreamingV4(r *http.Request) bool {
|
||||
|
|
|
@ -11,14 +11,13 @@ import (
|
|||
)
|
||||
|
||||
type Cache struct {
|
||||
logger *zap.Logger
|
||||
listsCache *cache.ObjectsListCache
|
||||
sessionListCache *cache.ListSessionCache
|
||||
objCache *cache.ObjectsCache
|
||||
namesCache *cache.ObjectsNameCache
|
||||
bucketCache *cache.BucketCache
|
||||
systemCache *cache.SystemCache
|
||||
accessCache *cache.AccessControlCache
|
||||
logger *zap.Logger
|
||||
listsCache *cache.ObjectsListCache
|
||||
objCache *cache.ObjectsCache
|
||||
namesCache *cache.ObjectsNameCache
|
||||
bucketCache *cache.BucketCache
|
||||
systemCache *cache.SystemCache
|
||||
accessCache *cache.AccessControlCache
|
||||
}
|
||||
|
||||
// CachesConfig contains params for caches.
|
||||
|
@ -26,7 +25,6 @@ type CachesConfig struct {
|
|||
Logger *zap.Logger
|
||||
Objects *cache.Config
|
||||
ObjectsList *cache.Config
|
||||
SessionList *cache.Config
|
||||
Names *cache.Config
|
||||
Buckets *cache.Config
|
||||
System *cache.Config
|
||||
|
@ -39,7 +37,6 @@ func DefaultCachesConfigs(logger *zap.Logger) *CachesConfig {
|
|||
Logger: logger,
|
||||
Objects: cache.DefaultObjectsConfig(logger),
|
||||
ObjectsList: cache.DefaultObjectsListConfig(logger),
|
||||
SessionList: cache.DefaultListSessionConfig(logger),
|
||||
Names: cache.DefaultObjectsNameConfig(logger),
|
||||
Buckets: cache.DefaultBucketConfig(logger),
|
||||
System: cache.DefaultSystemConfig(logger),
|
||||
|
@ -49,33 +46,31 @@ func DefaultCachesConfigs(logger *zap.Logger) *CachesConfig {
|
|||
|
||||
func NewCache(cfg *CachesConfig) *Cache {
|
||||
return &Cache{
|
||||
logger: cfg.Logger,
|
||||
listsCache: cache.NewObjectsListCache(cfg.ObjectsList),
|
||||
sessionListCache: cache.NewListSessionCache(cfg.SessionList),
|
||||
objCache: cache.New(cfg.Objects),
|
||||
namesCache: cache.NewObjectsNameCache(cfg.Names),
|
||||
bucketCache: cache.NewBucketCache(cfg.Buckets),
|
||||
systemCache: cache.NewSystemCache(cfg.System),
|
||||
accessCache: cache.NewAccessControlCache(cfg.AccessControl),
|
||||
logger: cfg.Logger,
|
||||
listsCache: cache.NewObjectsListCache(cfg.ObjectsList),
|
||||
objCache: cache.New(cfg.Objects),
|
||||
namesCache: cache.NewObjectsNameCache(cfg.Names),
|
||||
bucketCache: cache.NewBucketCache(cfg.Buckets),
|
||||
systemCache: cache.NewSystemCache(cfg.System),
|
||||
accessCache: cache.NewAccessControlCache(cfg.AccessControl),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) GetBucket(zone, name string) *data.BucketInfo {
|
||||
return c.bucketCache.Get(zone, name)
|
||||
func (c *Cache) GetBucket(name string) *data.BucketInfo {
|
||||
return c.bucketCache.Get(name)
|
||||
}
|
||||
|
||||
func (c *Cache) PutBucket(bktInfo *data.BucketInfo) {
|
||||
if err := c.bucketCache.Put(bktInfo); err != nil {
|
||||
c.logger.Warn(logs.CouldntPutBucketInfoIntoCache,
|
||||
zap.String("zone", bktInfo.Zone),
|
||||
zap.String("bucket name", bktInfo.Name),
|
||||
zap.Stringer("bucket cid", bktInfo.CID),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) DeleteBucket(bktInfo *data.BucketInfo) {
|
||||
c.bucketCache.Delete(bktInfo)
|
||||
func (c *Cache) DeleteBucket(name string) {
|
||||
c.bucketCache.Delete(name)
|
||||
}
|
||||
|
||||
func (c *Cache) CleanListCacheEntriesContainingObject(objectName string, cnrID cid.ID) {
|
||||
|
@ -148,29 +143,6 @@ func (c *Cache) PutList(owner user.ID, key cache.ObjectsListKey, list []*data.No
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cache) GetListSession(owner user.ID, key cache.ListSessionKey) *data.ListSession {
|
||||
if !c.accessCache.Get(owner, key.String()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.sessionListCache.GetListSession(key)
|
||||
}
|
||||
|
||||
func (c *Cache) PutListSession(owner user.ID, key cache.ListSessionKey, session *data.ListSession) {
|
||||
if err := c.sessionListCache.PutListSession(key, session); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheListSession, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key.String()); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) DeleteListSession(owner user.ID, key cache.ListSessionKey) {
|
||||
c.sessionListCache.DeleteListSession(key)
|
||||
c.accessCache.Delete(owner, key.String())
|
||||
}
|
||||
|
||||
func (c *Cache) GetTagging(owner user.ID, key string) map[string]string {
|
||||
if !c.accessCache.Get(owner, key) {
|
||||
return nil
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
)
|
||||
|
||||
func (n *layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, data.LockInfo, error) {
|
||||
func (n *layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) {
|
||||
var err error
|
||||
owner := n.BearerOwner(ctx)
|
||||
|
||||
|
@ -17,26 +17,26 @@ func (n *layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *ObjectV
|
|||
lockInfo := n.cache.GetLockInfo(owner, lockObjectKey(objVersion))
|
||||
|
||||
if tags != nil && lockInfo != nil {
|
||||
return tags, *lockInfo, nil
|
||||
return tags, lockInfo, nil
|
||||
}
|
||||
|
||||
if nodeVersion == nil {
|
||||
nodeVersion, err = n.getNodeVersion(ctx, objVersion)
|
||||
if err != nil {
|
||||
return nil, data.LockInfo{}, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tags, lockInfo, err = n.treeService.GetObjectTaggingAndLock(ctx, objVersion.BktInfo, nodeVersion)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, data.LockInfo{}, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
return nil, nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return nil, data.LockInfo{}, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
n.cache.PutTagging(owner, objectTaggingCacheKey(objVersion), tags)
|
||||
n.cache.PutLockInfo(owner, lockObjectKey(objVersion), lockInfo)
|
||||
|
||||
return tags, *lockInfo, nil
|
||||
return tags, lockInfo, nil
|
||||
}
|
||||
|
|
|
@ -5,14 +5,13 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
|
@ -32,21 +31,18 @@ const (
|
|||
AttributeLockEnabled = "LockEnabled"
|
||||
)
|
||||
|
||||
func (n *layer) containerInfo(ctx context.Context, prm PrmContainer) (*data.BucketInfo, error) {
|
||||
func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketInfo, error) {
|
||||
var (
|
||||
err error
|
||||
res *container.Container
|
||||
log = n.reqLogger(ctx).With(zap.Stringer("cid", prm.ContainerID))
|
||||
log = n.reqLogger(ctx).With(zap.Stringer("cid", idCnr))
|
||||
|
||||
info = &data.BucketInfo{
|
||||
CID: prm.ContainerID,
|
||||
Name: prm.ContainerID.EncodeToString(),
|
||||
CID: idCnr,
|
||||
Name: idCnr.EncodeToString(),
|
||||
}
|
||||
|
||||
reqInfo = middleware.GetReqInfo(ctx)
|
||||
)
|
||||
|
||||
res, err = n.frostFS.Container(ctx, prm)
|
||||
res, err = n.frostFS.Container(ctx, idCnr)
|
||||
if err != nil {
|
||||
if client.IsErrContainerNotFound(err) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchBucket), err.Error())
|
||||
|
@ -63,8 +59,6 @@ func (n *layer) containerInfo(ctx context.Context, prm PrmContainer) (*data.Buck
|
|||
}
|
||||
info.Created = container.CreatedAt(cnr)
|
||||
info.LocationConstraint = cnr.Attribute(attributeLocationConstraint)
|
||||
info.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(cnr)
|
||||
info.APEEnabled = cnr.BasicACL().Bits() == 0
|
||||
|
||||
attrLockEnabled := cnr.Attribute(AttributeLockEnabled)
|
||||
if len(attrLockEnabled) > 0 {
|
||||
|
@ -77,25 +71,13 @@ func (n *layer) containerInfo(ctx context.Context, prm PrmContainer) (*data.Buck
|
|||
}
|
||||
}
|
||||
|
||||
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
|
||||
if zone != info.Zone {
|
||||
return nil, fmt.Errorf("ns '%s' and zone '%s' are mismatched for container '%s'", zone, info.Zone, prm.ContainerID)
|
||||
}
|
||||
|
||||
n.cache.PutBucket(info)
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
||||
stoken := n.SessionTokenForRead(ctx)
|
||||
|
||||
prm := PrmUserContainers{
|
||||
UserID: n.BearerOwner(ctx),
|
||||
SessionToken: stoken,
|
||||
}
|
||||
|
||||
res, err := n.frostFS.UserContainers(ctx, prm)
|
||||
res, err := n.frostFS.UserContainers(ctx, n.BearerOwner(ctx))
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldNotListUserContainers, zap.Error(err))
|
||||
return nil, err
|
||||
|
@ -103,11 +85,7 @@ func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
|||
|
||||
list := make([]*data.BucketInfo, 0, len(res))
|
||||
for i := range res {
|
||||
getPrm := PrmContainer{
|
||||
ContainerID: res[i],
|
||||
SessionToken: stoken,
|
||||
}
|
||||
info, err := n.containerInfo(ctx, getPrm)
|
||||
info, err := n.containerInfo(ctx, res[i])
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldNotFetchContainerInfo, zap.Error(err))
|
||||
continue
|
||||
|
@ -123,22 +101,20 @@ func (n *layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
|
|||
if p.LocationConstraint == "" {
|
||||
p.LocationConstraint = api.DefaultLocationConstraint // s3tests_boto3.functional.test_s3:test_bucket_get_location
|
||||
}
|
||||
|
||||
zone, _ := n.features.FormContainerZone(p.Namespace)
|
||||
|
||||
bktInfo := &data.BucketInfo{
|
||||
Name: p.Name,
|
||||
Zone: zone,
|
||||
Zone: v2container.SysAttributeZoneDefault,
|
||||
Owner: n.BearerOwner(ctx),
|
||||
Created: TimeNow(ctx),
|
||||
LocationConstraint: p.LocationConstraint,
|
||||
ObjectLockEnabled: p.ObjectLockEnabled,
|
||||
APEEnabled: p.APEEnabled,
|
||||
}
|
||||
|
||||
attributes := [][2]string{
|
||||
{attributeLocationConstraint, p.LocationConstraint},
|
||||
}
|
||||
var attributes [][2]string
|
||||
|
||||
attributes = append(attributes, [2]string{
|
||||
attributeLocationConstraint, p.LocationConstraint,
|
||||
})
|
||||
|
||||
if p.ObjectLockEnabled {
|
||||
attributes = append(attributes, [2]string{
|
||||
|
@ -146,27 +122,23 @@ func (n *layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
|
|||
})
|
||||
}
|
||||
|
||||
basicACL := acl.PublicRWExtended
|
||||
if p.APEEnabled {
|
||||
basicACL = 0
|
||||
}
|
||||
|
||||
res, err := n.frostFS.CreateContainer(ctx, PrmContainerCreate{
|
||||
idCnr, err := n.frostFS.CreateContainer(ctx, PrmContainerCreate{
|
||||
Creator: bktInfo.Owner,
|
||||
Policy: p.Policy,
|
||||
Name: p.Name,
|
||||
Zone: zone,
|
||||
SessionToken: p.SessionContainerCreation,
|
||||
CreationTime: bktInfo.Created,
|
||||
AdditionalAttributes: attributes,
|
||||
BasicACL: basicACL,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create container: %w", err)
|
||||
}
|
||||
|
||||
bktInfo.CID = res.ContainerID
|
||||
bktInfo.HomomorphicHashDisabled = res.HomomorphicHashDisabled
|
||||
bktInfo.CID = idCnr
|
||||
|
||||
if err = n.setContainerEACLTable(ctx, bktInfo.CID, p.EACL, p.SessionEACL); err != nil {
|
||||
return nil, fmt.Errorf("set container eacl: %w", err)
|
||||
}
|
||||
|
||||
n.cache.PutBucket(bktInfo)
|
||||
|
||||
|
@ -179,10 +151,6 @@ func (n *layer) setContainerEACLTable(ctx context.Context, idCnr cid.ID, table *
|
|||
return n.frostFS.SetContainerEACL(ctx, *table, sessionToken)
|
||||
}
|
||||
|
||||
func (n *layer) GetContainerEACL(ctx context.Context, cnrID cid.ID) (*eacl.Table, error) {
|
||||
prm := PrmContainerEACL{
|
||||
ContainerID: cnrID,
|
||||
SessionToken: n.SessionTokenForRead(ctx),
|
||||
}
|
||||
return n.frostFS.ContainerEACL(ctx, prm)
|
||||
func (n *layer) GetContainerEACL(ctx context.Context, idCnr cid.ID) (*eacl.Table, error) {
|
||||
return n.frostFS.ContainerEACL(ctx, idCnr)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package layer
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
errorsStd "errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -24,7 +25,7 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|||
cors = &data.CORSConfiguration{}
|
||||
)
|
||||
|
||||
if err := p.NewDecoder(tee).Decode(cors); err != nil {
|
||||
if err := xml.NewDecoder(tee).Decode(cors); err != nil {
|
||||
return fmt.Errorf("xml decode cors: %w", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
|
@ -101,11 +100,8 @@ func (p Params) HMAC() ([]byte, []byte, error) {
|
|||
|
||||
// MatchObjectEncryption checks if encryption params are valid for provided object.
|
||||
func (p Params) MatchObjectEncryption(encInfo ObjectEncryption) error {
|
||||
if p.Enabled() && !encInfo.Enabled {
|
||||
return errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
if !p.Enabled() && encInfo.Enabled {
|
||||
return errors.GetAPIError(errors.ErrSSEEncryptedObject)
|
||||
if p.Enabled() != encInfo.Enabled {
|
||||
return errorsStd.New("invalid encryption view")
|
||||
}
|
||||
|
||||
if !encInfo.Enabled {
|
||||
|
@ -126,7 +122,7 @@ func (p Params) MatchObjectEncryption(encInfo ObjectEncryption) error {
|
|||
mac.Write(hmacSalt)
|
||||
expectedHmacKey := mac.Sum(nil)
|
||||
if !bytes.Equal(expectedHmacKey, hmacKey) {
|
||||
return errors.GetAPIError(errors.ErrInvalidSSECustomerParameters)
|
||||
return errorsStd.New("mismatched hmac key")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -30,9 +30,6 @@ type PrmContainerCreate struct {
|
|||
// Name for the container.
|
||||
Name string
|
||||
|
||||
// Zone for container registration.
|
||||
Zone string
|
||||
|
||||
// CreationTime value for Timestamp attribute
|
||||
CreationTime time.Time
|
||||
|
||||
|
@ -46,39 +43,6 @@ type PrmContainerCreate struct {
|
|||
AdditionalAttributes [][2]string
|
||||
}
|
||||
|
||||
// PrmContainer groups parameters of FrostFS.Container operation.
|
||||
type PrmContainer struct {
|
||||
// Container identifier.
|
||||
ContainerID cid.ID
|
||||
|
||||
// Token of the container's creation session. Nil means session absence.
|
||||
SessionToken *session.Container
|
||||
}
|
||||
|
||||
// PrmUserContainers groups parameters of FrostFS.UserContainers operation.
|
||||
type PrmUserContainers struct {
|
||||
// User identifier.
|
||||
UserID user.ID
|
||||
|
||||
// Token of the container's creation session. Nil means session absence.
|
||||
SessionToken *session.Container
|
||||
}
|
||||
|
||||
// PrmContainerEACL groups parameters of FrostFS.ContainerEACL operation.
|
||||
type PrmContainerEACL struct {
|
||||
// Container identifier.
|
||||
ContainerID cid.ID
|
||||
|
||||
// Token of the container's creation session. Nil means session absence.
|
||||
SessionToken *session.Container
|
||||
}
|
||||
|
||||
// ContainerCreateResult is a result parameter of FrostFS.CreateContainer operation.
|
||||
type ContainerCreateResult struct {
|
||||
ContainerID cid.ID
|
||||
HomomorphicHashDisabled bool
|
||||
}
|
||||
|
||||
// PrmAuth groups authentication parameters for the FrostFS operation.
|
||||
type PrmAuth struct {
|
||||
// Bearer token to be used for the operation. Overlaps PrivateKey. Optional.
|
||||
|
@ -150,12 +114,6 @@ type PrmObjectCreate struct {
|
|||
|
||||
// Enables client side object preparing.
|
||||
ClientCut bool
|
||||
|
||||
// Disables using Tillich-Zémor hash for payload.
|
||||
WithoutHomomorphicHash bool
|
||||
|
||||
// Sets max buffer size to read payload.
|
||||
BufferMaxSize uint64
|
||||
}
|
||||
|
||||
// PrmObjectDelete groups parameters of FrostFS.DeleteObject operation.
|
||||
|
@ -200,21 +158,23 @@ type FrostFS interface {
|
|||
// It sets 'Timestamp' attribute to the current time.
|
||||
// It returns the ID of the saved container.
|
||||
//
|
||||
// Created container is public with enabled ACL extension.
|
||||
//
|
||||
// It returns exactly one non-zero value. It returns any error encountered which
|
||||
// prevented the container from being created.
|
||||
CreateContainer(context.Context, PrmContainerCreate) (*ContainerCreateResult, error)
|
||||
CreateContainer(context.Context, PrmContainerCreate) (cid.ID, error)
|
||||
|
||||
// Container reads a container from FrostFS by ID.
|
||||
//
|
||||
// It returns exactly one non-nil value. It returns any error encountered which
|
||||
// prevented the container from being read.
|
||||
Container(context.Context, PrmContainer) (*container.Container, error)
|
||||
Container(context.Context, cid.ID) (*container.Container, error)
|
||||
|
||||
// UserContainers reads a list of the containers owned by the specified user.
|
||||
//
|
||||
// It returns exactly one non-nil value. It returns any error encountered which
|
||||
// prevented the containers from being listed.
|
||||
UserContainers(context.Context, PrmUserContainers) ([]cid.ID, error)
|
||||
UserContainers(context.Context, user.ID) ([]cid.ID, error)
|
||||
|
||||
// SetContainerEACL saves the eACL table of the container in FrostFS. The
|
||||
// extended ACL is modified within session if session token is not nil.
|
||||
|
@ -226,7 +186,7 @@ type FrostFS interface {
|
|||
//
|
||||
// It returns exactly one non-nil value. It returns any error encountered which
|
||||
// prevented the eACL from being read.
|
||||
ContainerEACL(context.Context, PrmContainerEACL) (*eacl.Table, error)
|
||||
ContainerEACL(context.Context, cid.ID) (*eacl.Table, error)
|
||||
|
||||
// DeleteContainer marks the container to be removed from FrostFS by ID.
|
||||
// Request is sent within session if the session token is specified.
|
||||
|
|
|
@ -10,8 +10,6 @@ import (
|
|||
"io"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
|
@ -32,10 +30,6 @@ type FeatureSettingsMock struct {
|
|||
md5Enabled bool
|
||||
}
|
||||
|
||||
func (k *FeatureSettingsMock) BufferMaxSizeForPut() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (k *FeatureSettingsMock) ClientCut() bool {
|
||||
return k.clientCut
|
||||
}
|
||||
|
@ -52,14 +46,6 @@ func (k *FeatureSettingsMock) SetMD5Enabled(md5Enabled bool) {
|
|||
k.md5Enabled = md5Enabled
|
||||
}
|
||||
|
||||
func (k *FeatureSettingsMock) FormContainerZone(ns string) (zone string, isDefault bool) {
|
||||
if ns == "" {
|
||||
return v2container.SysAttributeZoneDefault, true
|
||||
}
|
||||
|
||||
return ns + ".ns", false
|
||||
}
|
||||
|
||||
type TestFrostFS struct {
|
||||
FrostFS
|
||||
|
||||
|
@ -137,11 +123,7 @@ func (t *TestFrostFS) ContainerID(name string) (cid.ID, error) {
|
|||
return cid.ID{}, fmt.Errorf("not found")
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) SetContainer(cnrID cid.ID, cnr *container.Container) {
|
||||
t.containers[cnrID.EncodeToString()] = cnr
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) CreateContainer(_ context.Context, prm PrmContainerCreate) (*ContainerCreateResult, error) {
|
||||
func (t *TestFrostFS) CreateContainer(_ context.Context, prm PrmContainerCreate) (cid.ID, error) {
|
||||
var cnr container.Container
|
||||
cnr.Init()
|
||||
cnr.SetOwner(prm.Creator)
|
||||
|
@ -157,7 +139,6 @@ func (t *TestFrostFS) CreateContainer(_ context.Context, prm PrmContainerCreate)
|
|||
if prm.Name != "" {
|
||||
var d container.Domain
|
||||
d.SetName(prm.Name)
|
||||
d.SetZone(prm.Zone)
|
||||
|
||||
container.WriteDomain(&cnr, d)
|
||||
container.SetName(&cnr, prm.Name)
|
||||
|
@ -169,14 +150,14 @@ func (t *TestFrostFS) CreateContainer(_ context.Context, prm PrmContainerCreate)
|
|||
|
||||
b := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
||||
return nil, err
|
||||
return cid.ID{}, err
|
||||
}
|
||||
|
||||
var id cid.ID
|
||||
id.SetSHA256(sha256.Sum256(b))
|
||||
t.containers[id.EncodeToString()] = &cnr
|
||||
|
||||
return &ContainerCreateResult{ContainerID: id}, nil
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) DeleteContainer(_ context.Context, cnrID cid.ID, _ *session.Container) error {
|
||||
|
@ -185,17 +166,17 @@ func (t *TestFrostFS) DeleteContainer(_ context.Context, cnrID cid.ID, _ *sessio
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) Container(_ context.Context, prm PrmContainer) (*container.Container, error) {
|
||||
func (t *TestFrostFS) Container(_ context.Context, id cid.ID) (*container.Container, error) {
|
||||
for k, v := range t.containers {
|
||||
if k == prm.ContainerID.EncodeToString() {
|
||||
if k == id.EncodeToString() {
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("container not found %s", prm.ContainerID)
|
||||
return nil, fmt.Errorf("container not found %s", id)
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) UserContainers(context.Context, PrmUserContainers) ([]cid.ID, error) {
|
||||
func (t *TestFrostFS) UserContainers(_ context.Context, _ user.ID) ([]cid.ID, error) {
|
||||
var res []cid.ID
|
||||
for k := range t.containers {
|
||||
var idCnr cid.ID
|
||||
|
@ -221,7 +202,7 @@ func (t *TestFrostFS) ReadObject(ctx context.Context, prm PrmObjectRead) (*Objec
|
|||
|
||||
if obj, ok := t.objects[sAddr]; ok {
|
||||
owner := getBearerOwner(ctx)
|
||||
if !t.checkAccess(prm.Container, owner, eacl.OperationGet, obj) {
|
||||
if !t.checkAccess(prm.Container, owner, eacl.OperationGet) {
|
||||
return nil, ErrAccessDenied
|
||||
}
|
||||
|
||||
|
@ -285,7 +266,7 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.
|
|||
obj.SetPayloadSize(prm.PayloadSize)
|
||||
obj.SetAttributes(attrs...)
|
||||
obj.SetCreationEpoch(t.currentEpoch)
|
||||
obj.SetOwnerID(owner)
|
||||
obj.SetOwnerID(&owner)
|
||||
t.currentEpoch++
|
||||
|
||||
if len(prm.Locks) > 0 {
|
||||
|
@ -323,9 +304,9 @@ func (t *TestFrostFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) err
|
|||
return err
|
||||
}
|
||||
|
||||
if obj, ok := t.objects[addr.EncodeToString()]; ok {
|
||||
if _, ok := t.objects[addr.EncodeToString()]; ok {
|
||||
owner := getBearerOwner(ctx)
|
||||
if !t.checkAccess(prm.Container, owner, eacl.OperationDelete, obj) {
|
||||
if !t.checkAccess(prm.Container, owner, eacl.OperationDelete) {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
|
@ -368,8 +349,8 @@ func (t *TestFrostFS) SetContainerEACL(_ context.Context, table eacl.Table, _ *s
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) ContainerEACL(_ context.Context, prm PrmContainerEACL) (*eacl.Table, error) {
|
||||
table, ok := t.eaclTables[prm.ContainerID.EncodeToString()]
|
||||
func (t *TestFrostFS) ContainerEACL(_ context.Context, cnrID cid.ID) (*eacl.Table, error) {
|
||||
table, ok := t.eaclTables[cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, errors.New("not found")
|
||||
}
|
||||
|
@ -377,7 +358,7 @@ func (t *TestFrostFS) ContainerEACL(_ context.Context, prm PrmContainerEACL) (*e
|
|||
return table, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID, op eacl.Operation, obj *object.Object) bool {
|
||||
func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID, op eacl.Operation) bool {
|
||||
cnr, ok := t.containers[cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
return false
|
||||
|
@ -393,51 +374,22 @@ func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID, op eacl.Operation
|
|||
}
|
||||
|
||||
for _, rec := range table.Records() {
|
||||
if rec.Operation() != op {
|
||||
continue
|
||||
}
|
||||
|
||||
if !matchTarget(rec, owner) {
|
||||
continue
|
||||
}
|
||||
|
||||
if matchFilter(rec.Filters(), obj) {
|
||||
return rec.Action() == eacl.ActionAllow
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func matchTarget(rec eacl.Record, owner user.ID) bool {
|
||||
for _, trgt := range rec.Targets() {
|
||||
if trgt.Role() == eacl.RoleOthers {
|
||||
return true
|
||||
}
|
||||
var targetOwner user.ID
|
||||
for _, pk := range eacl.TargetECDSAKeys(&trgt) {
|
||||
user.IDFromKey(&targetOwner, *pk)
|
||||
if targetOwner.Equals(owner) {
|
||||
return true
|
||||
if rec.Operation() == op && len(rec.Filters()) == 0 {
|
||||
for _, trgt := range rec.Targets() {
|
||||
if trgt.Role() == eacl.RoleOthers {
|
||||
return rec.Action() == eacl.ActionAllow
|
||||
}
|
||||
var targetOwner user.ID
|
||||
for _, pk := range eacl.TargetECDSAKeys(&trgt) {
|
||||
user.IDFromKey(&targetOwner, *pk)
|
||||
if targetOwner.Equals(owner) {
|
||||
return rec.Action() == eacl.ActionAllow
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func matchFilter(filters []eacl.Filter, obj *object.Object) bool {
|
||||
objID, _ := obj.ID()
|
||||
for _, f := range filters {
|
||||
fv2 := f.ToV2()
|
||||
if fv2.GetMatchType() != acl.MatchTypeStringEqual ||
|
||||
fv2.GetHeaderType() != acl.HeaderTypeObject ||
|
||||
fv2.GetKey() != acl.FilterObjectID ||
|
||||
fv2.GetValue() != objID.EncodeToString() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -4,8 +4,6 @@ import (
|
|||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
|
@ -50,9 +48,7 @@ type (
|
|||
|
||||
FeatureSettings interface {
|
||||
ClientCut() bool
|
||||
BufferMaxSizeForPut() uint64
|
||||
MD5Enabled() bool
|
||||
FormContainerZone(ns string) (zone string, isDefault bool)
|
||||
}
|
||||
|
||||
layer struct {
|
||||
|
@ -70,7 +66,7 @@ type (
|
|||
Config struct {
|
||||
GateOwner user.ID
|
||||
ChainAddress string
|
||||
Cache *Cache
|
||||
Caches *CachesConfig
|
||||
AnonKey AnonymousKey
|
||||
Resolver BucketResolver
|
||||
TreeService TreeService
|
||||
|
@ -114,17 +110,16 @@ type (
|
|||
|
||||
// PutObjectParams stores object put request parameters.
|
||||
PutObjectParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Object string
|
||||
Size uint64
|
||||
Reader io.Reader
|
||||
Header map[string]string
|
||||
Lock *data.ObjectLock
|
||||
Encryption encryption.Params
|
||||
CopiesNumbers []uint32
|
||||
CompleteMD5Hash string
|
||||
ContentMD5 string
|
||||
ContentSHA256Hash string
|
||||
BktInfo *data.BucketInfo
|
||||
Object string
|
||||
Size uint64
|
||||
Reader io.Reader
|
||||
Header map[string]string
|
||||
Lock *data.ObjectLock
|
||||
Encryption encryption.Params
|
||||
CopiesNumbers []uint32
|
||||
CompleteMD5Hash string
|
||||
ContentMD5 string
|
||||
}
|
||||
|
||||
PutCombinedObjectParams struct {
|
||||
|
@ -137,10 +132,9 @@ type (
|
|||
}
|
||||
|
||||
DeleteObjectParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Objects []*VersionedObject
|
||||
Settings *data.BucketSettings
|
||||
IsMultiple bool
|
||||
BktInfo *data.BucketInfo
|
||||
Objects []*VersionedObject
|
||||
Settings *data.BucketSettings
|
||||
}
|
||||
|
||||
// PutSettingsParams stores object copy request parameters.
|
||||
|
@ -154,7 +148,6 @@ type (
|
|||
BktInfo *data.BucketInfo
|
||||
Reader io.Reader
|
||||
CopiesNumbers []uint32
|
||||
NewDecoder func(io.Reader) *xml.Decoder
|
||||
}
|
||||
|
||||
// CopyObjectParams stores object copy request parameters.
|
||||
|
@ -164,23 +157,22 @@ type (
|
|||
ScrBktInfo *data.BucketInfo
|
||||
DstBktInfo *data.BucketInfo
|
||||
DstObject string
|
||||
DstSize uint64
|
||||
SrcSize uint64
|
||||
Header map[string]string
|
||||
Range *RangeParams
|
||||
Lock *data.ObjectLock
|
||||
SrcEncryption encryption.Params
|
||||
DstEncryption encryption.Params
|
||||
Encryption encryption.Params
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
// CreateBucketParams stores bucket create request parameters.
|
||||
CreateBucketParams struct {
|
||||
Name string
|
||||
Namespace string
|
||||
Policy netmap.PlacementPolicy
|
||||
EACL *eacl.Table
|
||||
SessionContainerCreation *session.Container
|
||||
SessionEACL *session.Container
|
||||
LocationConstraint string
|
||||
ObjectLockEnabled bool
|
||||
APEEnabled bool
|
||||
}
|
||||
// PutBucketACLParams stores put bucket acl request parameters.
|
||||
PutBucketACLParams struct {
|
||||
|
@ -235,7 +227,6 @@ type (
|
|||
|
||||
ListBuckets(ctx context.Context) ([]*data.BucketInfo, error)
|
||||
GetBucketInfo(ctx context.Context, name string) (*data.BucketInfo, error)
|
||||
ResolveCID(ctx context.Context, name string) (cid.ID, error)
|
||||
GetBucketACL(ctx context.Context, bktInfo *data.BucketInfo) (*BucketACL, error)
|
||||
PutBucketACL(ctx context.Context, p *PutBucketACLParams) error
|
||||
CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error)
|
||||
|
@ -280,7 +271,7 @@ type (
|
|||
// Compound methods for optimizations
|
||||
|
||||
// GetObjectTaggingAndLock unifies GetObjectTagging and GetLock methods in single tree service invocation.
|
||||
GetObjectTaggingAndLock(ctx context.Context, p *ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, data.LockInfo, error)
|
||||
GetObjectTaggingAndLock(ctx context.Context, p *ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error)
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -297,13 +288,6 @@ const (
|
|||
AttributeFrostfsCopiesNumber = "frostfs-copies-number" // such format to match X-Amz-Meta-Frostfs-Copies-Number header
|
||||
)
|
||||
|
||||
var EncryptionMetadata = map[string]struct{}{
|
||||
AttributeEncryptionAlgorithm: {},
|
||||
AttributeDecryptedSize: {},
|
||||
AttributeHMACSalt: {},
|
||||
AttributeHMACKey: {},
|
||||
}
|
||||
|
||||
func (t *VersionedObject) String() string {
|
||||
return t.Name + ":" + t.VersionID
|
||||
}
|
||||
|
@ -325,7 +309,7 @@ func NewLayer(log *zap.Logger, frostFS FrostFS, config *Config) Client {
|
|||
gateOwner: config.GateOwner,
|
||||
anonKey: config.AnonKey,
|
||||
resolver: config.Resolver,
|
||||
cache: config.Cache,
|
||||
cache: NewCache(config.Caches),
|
||||
treeService: config.TreeService,
|
||||
features: config.Features,
|
||||
}
|
||||
|
@ -379,15 +363,6 @@ func (n *layer) BearerOwner(ctx context.Context) user.ID {
|
|||
return ownerID
|
||||
}
|
||||
|
||||
// SessionTokenForRead returns session container token.
|
||||
func (n *layer) SessionTokenForRead(ctx context.Context) *session.Container {
|
||||
if bd, err := middleware.GetBoxData(ctx); err == nil && bd.Gate != nil {
|
||||
return bd.Gate.SessionToken()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *layer) reqLogger(ctx context.Context) *zap.Logger {
|
||||
reqLogger := middleware.GetReqLog(ctx)
|
||||
if reqLogger != nil {
|
||||
|
@ -414,10 +389,7 @@ func (n *layer) GetBucketInfo(ctx context.Context, name string) (*data.BucketInf
|
|||
return nil, fmt.Errorf("unescape bucket name: %w", err)
|
||||
}
|
||||
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
|
||||
|
||||
if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil {
|
||||
if bktInfo := n.cache.GetBucket(name); bktInfo != nil {
|
||||
return bktInfo, nil
|
||||
}
|
||||
|
||||
|
@ -429,29 +401,7 @@ func (n *layer) GetBucketInfo(ctx context.Context, name string) (*data.BucketInf
|
|||
return nil, err
|
||||
}
|
||||
|
||||
prm := PrmContainer{
|
||||
ContainerID: containerID,
|
||||
SessionToken: n.SessionTokenForRead(ctx),
|
||||
}
|
||||
|
||||
return n.containerInfo(ctx, prm)
|
||||
}
|
||||
|
||||
// ResolveCID returns container id by name.
|
||||
func (n *layer) ResolveCID(ctx context.Context, name string) (cid.ID, error) {
|
||||
name, err := url.QueryUnescape(name)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("unescape bucket name: %w", err)
|
||||
}
|
||||
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
|
||||
|
||||
if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil {
|
||||
return bktInfo.CID, nil
|
||||
}
|
||||
|
||||
return n.ResolveBucket(ctx, name)
|
||||
return n.containerInfo(ctx, containerID)
|
||||
}
|
||||
|
||||
// GetBucketACL returns bucket acl info by name.
|
||||
|
@ -630,7 +580,7 @@ func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.Exte
|
|||
Versioned: p.SrcVersioned,
|
||||
Range: p.Range,
|
||||
BucketInfo: p.ScrBktInfo,
|
||||
Encryption: p.SrcEncryption,
|
||||
Encryption: p.Encryption,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object to copy: %w", err)
|
||||
|
@ -639,10 +589,10 @@ func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.Exte
|
|||
return n.PutObject(ctx, &PutObjectParams{
|
||||
BktInfo: p.DstBktInfo,
|
||||
Object: p.DstObject,
|
||||
Size: p.DstSize,
|
||||
Size: p.SrcSize,
|
||||
Reader: objPayload,
|
||||
Header: p.Header,
|
||||
Encryption: p.DstEncryption,
|
||||
Encryption: p.Encryption,
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
})
|
||||
}
|
||||
|
@ -685,7 +635,7 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
|
||||
var nullVersionToDelete *data.NodeVersion
|
||||
if lastVersion.IsUnversioned {
|
||||
if !lastVersion.IsDeleteMarker {
|
||||
if !lastVersion.IsDeleteMarker() {
|
||||
nullVersionToDelete = lastVersion
|
||||
}
|
||||
} else if nullVersionToDelete, obj.Error = n.getNodeVersionToDelete(ctx, bkt, obj); obj.Error != nil {
|
||||
|
@ -701,7 +651,7 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
}
|
||||
}
|
||||
|
||||
if lastVersion.IsDeleteMarker {
|
||||
if lastVersion.IsDeleteMarker() {
|
||||
obj.DeleteMarkVersion = lastVersion.OID.EncodeToString()
|
||||
return obj
|
||||
}
|
||||
|
@ -713,14 +663,15 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
}
|
||||
|
||||
obj.DeleteMarkVersion = randOID.EncodeToString()
|
||||
now := TimeNow(ctx)
|
||||
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: randOID,
|
||||
FilePath: obj.Name,
|
||||
Created: &now,
|
||||
Owner: &n.gateOwner,
|
||||
IsDeleteMarker: true,
|
||||
OID: randOID,
|
||||
FilePath: obj.Name,
|
||||
},
|
||||
DeleteMarker: &data.DeleteMarkerInfo{
|
||||
Created: TimeNow(ctx),
|
||||
Owner: n.gateOwner,
|
||||
},
|
||||
IsUnversioned: settings.VersioningSuspended(),
|
||||
}
|
||||
|
@ -745,15 +696,24 @@ func (n *layer) handleNotFoundError(bkt *data.BucketInfo, obj *VersionedObject)
|
|||
}
|
||||
|
||||
func (n *layer) handleObjectDeleteErrors(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject, nodeID uint64) *VersionedObject {
|
||||
if !client.IsErrObjectAlreadyRemoved(obj.Error) && !client.IsErrObjectNotFound(obj.Error) {
|
||||
return obj
|
||||
if client.IsErrObjectAlreadyRemoved(obj.Error) {
|
||||
n.reqLogger(ctx).Debug(logs.ObjectAlreadyRemoved,
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID))
|
||||
|
||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeID)
|
||||
if obj.Error != nil {
|
||||
return obj
|
||||
}
|
||||
|
||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting,
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID), zap.Error(obj.Error))
|
||||
if client.IsErrObjectNotFound(obj.Error) {
|
||||
n.reqLogger(ctx).Debug(logs.ObjectNotFound,
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID))
|
||||
|
||||
obj.Error = nil
|
||||
|
||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeID)
|
||||
if obj.Error == nil {
|
||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||
}
|
||||
|
||||
|
@ -788,51 +748,17 @@ func (n *layer) getLastNodeVersion(ctx context.Context, bkt *data.BucketInfo, ob
|
|||
}
|
||||
|
||||
func (n *layer) removeOldVersion(ctx context.Context, bkt *data.BucketInfo, nodeVersion *data.NodeVersion, obj *VersionedObject) (string, error) {
|
||||
if nodeVersion.IsDeleteMarker {
|
||||
if nodeVersion.IsDeleteMarker() {
|
||||
return obj.VersionID, nil
|
||||
}
|
||||
|
||||
if nodeVersion.IsCombined {
|
||||
return "", n.removeCombinedObject(ctx, bkt, nodeVersion)
|
||||
}
|
||||
|
||||
return "", n.objectDelete(ctx, bkt, nodeVersion.OID)
|
||||
}
|
||||
|
||||
func (n *layer) removeCombinedObject(ctx context.Context, bkt *data.BucketInfo, nodeVersion *data.NodeVersion) error {
|
||||
combinedObj, err := n.objectGet(ctx, bkt, nodeVersion.OID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get combined object '%s': %w", nodeVersion.OID.EncodeToString(), err)
|
||||
}
|
||||
|
||||
var parts []*data.PartInfo
|
||||
if err = json.Unmarshal(combinedObj.Payload(), &parts); err != nil {
|
||||
return fmt.Errorf("unmarshal combined object parts: %w", err)
|
||||
}
|
||||
|
||||
for _, part := range parts {
|
||||
if err = n.objectDelete(ctx, bkt, part.OID); err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !client.IsErrObjectAlreadyRemoved(err) && !client.IsErrObjectNotFound(err) {
|
||||
return fmt.Errorf("couldn't delete part '%s': %w", part.OID.EncodeToString(), err)
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Warn(logs.CouldntDeletePart, zap.String("cid", bkt.CID.EncodeToString()),
|
||||
zap.String("oid", part.OID.EncodeToString()), zap.Int("part number", part.Number), zap.Error(err))
|
||||
}
|
||||
|
||||
return n.objectDelete(ctx, bkt, nodeVersion.OID)
|
||||
}
|
||||
|
||||
// DeleteObjects from the storage.
|
||||
func (n *layer) DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*VersionedObject {
|
||||
for i, obj := range p.Objects {
|
||||
p.Objects[i] = n.deleteObject(ctx, p.BktInfo, p.Settings, obj)
|
||||
if p.IsMultiple && p.Objects[i].Error != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteObject, zap.String("object", obj.String()), zap.Error(p.Objects[i].Error))
|
||||
}
|
||||
}
|
||||
|
||||
return p.Objects
|
||||
|
@ -868,18 +794,14 @@ func (n *layer) ResolveBucket(ctx context.Context, name string) (cid.ID, error)
|
|||
}
|
||||
|
||||
func (n *layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
||||
res, _, err := n.getAllObjectsVersions(ctx, commonVersionsListingParams{
|
||||
BktInfo: p.BktInfo,
|
||||
MaxKeys: 1,
|
||||
})
|
||||
|
||||
nodeVersions, err := n.getAllObjectsVersions(ctx, p.BktInfo, "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(res) != 0 {
|
||||
if len(nodeVersions) != 0 {
|
||||
return errors.GetAPIError(errors.ErrBucketNotEmpty)
|
||||
}
|
||||
|
||||
n.cache.DeleteBucket(p.BktInfo)
|
||||
n.cache.DeleteBucket(p.BktInfo.Name)
|
||||
return n.frostFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken)
|
||||
}
|
||||
|
|
|
@ -1,725 +0,0 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// ListObjectsParamsCommon contains common parameters for ListObjectsV1 and ListObjectsV2.
|
||||
ListObjectsParamsCommon struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Delimiter string
|
||||
Encode string
|
||||
MaxKeys int
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// ListObjectsParamsV1 contains params for ListObjectsV1.
|
||||
ListObjectsParamsV1 struct {
|
||||
ListObjectsParamsCommon
|
||||
Marker string
|
||||
}
|
||||
|
||||
// ListObjectsParamsV2 contains params for ListObjectsV2.
|
||||
ListObjectsParamsV2 struct {
|
||||
ListObjectsParamsCommon
|
||||
ContinuationToken string
|
||||
StartAfter string
|
||||
FetchOwner bool
|
||||
}
|
||||
|
||||
// ListObjectsInfo contains common fields of data for ListObjectsV1 and ListObjectsV2.
|
||||
ListObjectsInfo struct {
|
||||
Prefixes []string
|
||||
Objects []*data.ExtendedNodeVersion
|
||||
IsTruncated bool
|
||||
}
|
||||
|
||||
// ListObjectsInfoV1 holds data which ListObjectsV1 returns.
|
||||
ListObjectsInfoV1 struct {
|
||||
ListObjectsInfo
|
||||
NextMarker string
|
||||
}
|
||||
|
||||
// ListObjectsInfoV2 holds data which ListObjectsV2 returns.
|
||||
ListObjectsInfoV2 struct {
|
||||
ListObjectsInfo
|
||||
NextContinuationToken string
|
||||
}
|
||||
|
||||
// ListObjectVersionsInfo stores info and list of objects versions.
|
||||
ListObjectVersionsInfo struct {
|
||||
CommonPrefixes []string
|
||||
IsTruncated bool
|
||||
KeyMarker string
|
||||
NextKeyMarker string
|
||||
NextVersionIDMarker string
|
||||
Version []*data.ExtendedNodeVersion
|
||||
DeleteMarker []*data.ExtendedNodeVersion
|
||||
VersionIDMarker string
|
||||
}
|
||||
|
||||
commonVersionsListingParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Delimiter string
|
||||
Prefix string
|
||||
MaxKeys int
|
||||
Marker string
|
||||
Bookmark string
|
||||
}
|
||||
|
||||
commonLatestVersionsListingParams struct {
|
||||
commonVersionsListingParams
|
||||
ListType ListType
|
||||
}
|
||||
)
|
||||
|
||||
type ListType int
|
||||
|
||||
const (
|
||||
ListObjectsV1Type ListType = iota + 1
|
||||
ListObjectsV2Type ListType = iota + 1
|
||||
)
|
||||
|
||||
// ListObjectsV1 returns objects in a bucket for requests of Version 1.
|
||||
func (n *layer) ListObjectsV1(ctx context.Context, p *ListObjectsParamsV1) (*ListObjectsInfoV1, error) {
|
||||
var result ListObjectsInfoV1
|
||||
|
||||
prm := commonLatestVersionsListingParams{
|
||||
commonVersionsListingParams: commonVersionsListingParams{
|
||||
BktInfo: p.BktInfo,
|
||||
Delimiter: p.Delimiter,
|
||||
Prefix: p.Prefix,
|
||||
MaxKeys: p.MaxKeys,
|
||||
Marker: p.Marker,
|
||||
Bookmark: p.Marker,
|
||||
},
|
||||
ListType: ListObjectsV1Type,
|
||||
}
|
||||
|
||||
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
result.IsTruncated = true
|
||||
result.NextMarker = objects[len(objects)-1].Name()
|
||||
}
|
||||
|
||||
result.Prefixes, result.Objects = triageExtendedObjects(objects)
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// ListObjectsV2 returns objects in a bucket for requests of Version 2.
|
||||
func (n *layer) ListObjectsV2(ctx context.Context, p *ListObjectsParamsV2) (*ListObjectsInfoV2, error) {
|
||||
var result ListObjectsInfoV2
|
||||
|
||||
prm := commonLatestVersionsListingParams{
|
||||
commonVersionsListingParams: commonVersionsListingParams{
|
||||
BktInfo: p.BktInfo,
|
||||
Delimiter: p.Delimiter,
|
||||
Prefix: p.Prefix,
|
||||
MaxKeys: p.MaxKeys,
|
||||
Marker: p.StartAfter,
|
||||
Bookmark: p.ContinuationToken,
|
||||
},
|
||||
ListType: ListObjectsV2Type,
|
||||
}
|
||||
|
||||
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
result.IsTruncated = true
|
||||
result.NextContinuationToken = next.NodeVersion.OID.EncodeToString()
|
||||
}
|
||||
|
||||
result.Prefixes, result.Objects = triageExtendedObjects(objects)
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (n *layer) ListObjectVersions(ctx context.Context, p *ListObjectVersionsParams) (*ListObjectVersionsInfo, error) {
|
||||
prm := commonVersionsListingParams{
|
||||
BktInfo: p.BktInfo,
|
||||
Delimiter: p.Delimiter,
|
||||
Prefix: p.Prefix,
|
||||
MaxKeys: p.MaxKeys,
|
||||
Marker: p.KeyMarker,
|
||||
Bookmark: p.VersionIDMarker,
|
||||
}
|
||||
|
||||
objects, isTruncated, err := n.getAllObjectsVersions(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := &ListObjectVersionsInfo{
|
||||
KeyMarker: p.KeyMarker,
|
||||
VersionIDMarker: p.VersionIDMarker,
|
||||
IsTruncated: isTruncated,
|
||||
}
|
||||
|
||||
if res.IsTruncated {
|
||||
res.NextKeyMarker = objects[p.MaxKeys-1].NodeVersion.FilePath
|
||||
res.NextVersionIDMarker = objects[p.MaxKeys-1].NodeVersion.OID.EncodeToString()
|
||||
}
|
||||
|
||||
res.CommonPrefixes, objects = triageExtendedObjects(objects)
|
||||
res.Version, res.DeleteMarker = triageVersions(objects)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (n *layer) getLatestObjectsVersions(ctx context.Context, p commonLatestVersionsListingParams) (objects []*data.ExtendedNodeVersion, next *data.ExtendedNodeVersion, err error) {
|
||||
if p.MaxKeys == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
session, err := n.getListLatestVersionsSession(ctx, p)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
generator, errorCh := nodesGeneratorStream(ctx, p.commonVersionsListingParams, session)
|
||||
objOutCh, err := n.initWorkerPool(ctx, 2, p.commonVersionsListingParams, generator)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to init worker pool: %w", err)
|
||||
}
|
||||
|
||||
objects = make([]*data.ExtendedNodeVersion, 0, p.MaxKeys+1)
|
||||
objects = append(objects, session.Next...)
|
||||
for obj := range objOutCh {
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
if err = <-errorCh; err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get next object from stream: %w", err)
|
||||
}
|
||||
|
||||
sort.Slice(objects, func(i, j int) bool { return objects[i].NodeVersion.FilePath < objects[j].NodeVersion.FilePath })
|
||||
|
||||
if len(objects) > p.MaxKeys {
|
||||
next = objects[p.MaxKeys]
|
||||
n.putListLatestVersionsSession(ctx, p, session, objects)
|
||||
objects = objects[:p.MaxKeys]
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (n *layer) getAllObjectsVersions(ctx context.Context, p commonVersionsListingParams) ([]*data.ExtendedNodeVersion, bool, error) {
|
||||
if p.MaxKeys == 0 {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
session, err := n.getListAllVersionsSession(ctx, p)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
generator, errorCh := nodesGeneratorVersions(ctx, p, session)
|
||||
objOutCh, err := n.initWorkerPool(ctx, 2, p, generator)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
allObjects := handleGeneratedVersions(objOutCh, p, session)
|
||||
|
||||
sort.SliceStable(allObjects, func(i, j int) bool { return allObjects[i].NodeVersion.FilePath < allObjects[j].NodeVersion.FilePath })
|
||||
|
||||
if err = <-errorCh; err != nil {
|
||||
return nil, false, fmt.Errorf("failed to get next object from stream: %w", err)
|
||||
}
|
||||
|
||||
var isTruncated bool
|
||||
if len(allObjects) > p.MaxKeys {
|
||||
isTruncated = true
|
||||
n.putListAllVersionsSession(ctx, p, session, allObjects)
|
||||
allObjects = allObjects[:p.MaxKeys]
|
||||
}
|
||||
|
||||
return allObjects, isTruncated, nil
|
||||
}
|
||||
|
||||
func handleGeneratedVersions(objOutCh <-chan *data.ExtendedNodeVersion, p commonVersionsListingParams, session *data.ListSession) []*data.ExtendedNodeVersion {
|
||||
var lastName string
|
||||
var listRowStartIndex int
|
||||
allObjects := make([]*data.ExtendedNodeVersion, 0, p.MaxKeys)
|
||||
for eoi := range objOutCh {
|
||||
name := eoi.NodeVersion.FilePath
|
||||
if eoi.DirName != "" {
|
||||
name = eoi.DirName
|
||||
}
|
||||
|
||||
if lastName != name {
|
||||
formVersionsListRow(allObjects, listRowStartIndex, session)
|
||||
listRowStartIndex = len(allObjects)
|
||||
allObjects = append(allObjects, eoi)
|
||||
} else if eoi.DirName == "" {
|
||||
allObjects = append(allObjects, eoi)
|
||||
}
|
||||
lastName = name
|
||||
}
|
||||
|
||||
formVersionsListRow(allObjects, listRowStartIndex, session)
|
||||
|
||||
return allObjects
|
||||
}
|
||||
|
||||
func formVersionsListRow(objects []*data.ExtendedNodeVersion, rowStartIndex int, session *data.ListSession) {
|
||||
if len(objects) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
prevVersions := objects[rowStartIndex:]
|
||||
sort.Slice(prevVersions, func(i, j int) bool {
|
||||
return prevVersions[j].NodeVersion.Timestamp < prevVersions[i].NodeVersion.Timestamp // sort in reverse order to have last added first
|
||||
})
|
||||
|
||||
prevVersions[0].IsLatest = len(session.Next) == 0 || session.Next[0].NodeVersion.FilePath != prevVersions[0].NodeVersion.FilePath
|
||||
|
||||
for _, version := range prevVersions[1:] {
|
||||
version.IsLatest = false
|
||||
}
|
||||
}
|
||||
|
||||
func (n *layer) getListLatestVersionsSession(ctx context.Context, p commonLatestVersionsListingParams) (*data.ListSession, error) {
|
||||
return n.getListVersionsSession(ctx, p.commonVersionsListingParams, true)
|
||||
}
|
||||
|
||||
func (n *layer) getListAllVersionsSession(ctx context.Context, p commonVersionsListingParams) (*data.ListSession, error) {
|
||||
return n.getListVersionsSession(ctx, p, false)
|
||||
}
|
||||
|
||||
func (n *layer) getListVersionsSession(ctx context.Context, p commonVersionsListingParams, latestOnly bool) (*data.ListSession, error) {
|
||||
owner := n.BearerOwner(ctx)
|
||||
|
||||
cacheKey := cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, p.Bookmark)
|
||||
session := n.cache.GetListSession(owner, cacheKey)
|
||||
if session == nil {
|
||||
return n.initNewVersionsByPrefixSession(ctx, p, latestOnly)
|
||||
}
|
||||
|
||||
if session.Acquired.Swap(true) {
|
||||
return n.initNewVersionsByPrefixSession(ctx, p, latestOnly)
|
||||
}
|
||||
|
||||
// after reading next object from stream in session
|
||||
// the current cache value already doesn't match with next token in cache key
|
||||
n.cache.DeleteListSession(owner, cacheKey)
|
||||
|
||||
return session, nil
|
||||
}
|
||||
|
||||
func (n *layer) initNewVersionsByPrefixSession(ctx context.Context, p commonVersionsListingParams, latestOnly bool) (session *data.ListSession, err error) {
|
||||
session = &data.ListSession{NamesMap: make(map[string]struct{})}
|
||||
session.Context, session.Cancel = context.WithCancel(context.Background())
|
||||
|
||||
if bd, err := middleware.GetBoxData(ctx); err == nil {
|
||||
session.Context = middleware.SetBoxData(session.Context, bd)
|
||||
}
|
||||
|
||||
session.Stream, err = n.treeService.InitVersionsByPrefixStream(session.Context, p.BktInfo, p.Prefix, latestOnly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return session, nil
|
||||
}
|
||||
|
||||
func (n *layer) putListLatestVersionsSession(ctx context.Context, p commonLatestVersionsListingParams, session *data.ListSession, allObjects []*data.ExtendedNodeVersion) {
|
||||
if len(allObjects) <= p.MaxKeys {
|
||||
return
|
||||
}
|
||||
|
||||
var cacheKey cache.ListSessionKey
|
||||
switch p.ListType {
|
||||
case ListObjectsV1Type:
|
||||
cacheKey = cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, allObjects[p.MaxKeys-1].Name())
|
||||
case ListObjectsV2Type:
|
||||
cacheKey = cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, allObjects[p.MaxKeys].NodeVersion.OID.EncodeToString())
|
||||
default:
|
||||
// should never happen
|
||||
panic("invalid list type")
|
||||
}
|
||||
|
||||
session.Acquired.Store(false)
|
||||
session.Next = []*data.ExtendedNodeVersion{allObjects[p.MaxKeys]}
|
||||
n.cache.PutListSession(n.BearerOwner(ctx), cacheKey, session)
|
||||
}
|
||||
|
||||
func (n *layer) putListAllVersionsSession(ctx context.Context, p commonVersionsListingParams, session *data.ListSession, allObjects []*data.ExtendedNodeVersion) {
|
||||
if len(allObjects) <= p.MaxKeys {
|
||||
return
|
||||
}
|
||||
|
||||
session.Acquired.Store(false)
|
||||
|
||||
session.Next = make([]*data.ExtendedNodeVersion, len(allObjects)-p.MaxKeys+1)
|
||||
session.Next[0] = allObjects[p.MaxKeys-1]
|
||||
for i, node := range allObjects[p.MaxKeys:] {
|
||||
session.Next[i+1] = node
|
||||
}
|
||||
|
||||
cacheKey := cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, session.Next[0].NodeVersion.OID.EncodeToString())
|
||||
n.cache.PutListSession(n.BearerOwner(ctx), cacheKey, session)
|
||||
}
|
||||
|
||||
func nodesGeneratorStream(ctx context.Context, p commonVersionsListingParams, stream *data.ListSession) (<-chan *data.ExtendedNodeVersion, <-chan error) {
|
||||
nodeCh := make(chan *data.ExtendedNodeVersion, 1000)
|
||||
errCh := make(chan error, 1)
|
||||
existed := stream.NamesMap
|
||||
|
||||
if len(stream.Next) != 0 {
|
||||
existed[continuationToken] = struct{}{}
|
||||
}
|
||||
|
||||
limit := p.MaxKeys
|
||||
if len(stream.Next) == 0 {
|
||||
limit++
|
||||
}
|
||||
|
||||
go func() {
|
||||
var generated int
|
||||
var err error
|
||||
|
||||
LOOP:
|
||||
for err == nil {
|
||||
node, err := stream.Stream.Next(ctx)
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
errCh <- fmt.Errorf("stream next: %w", err)
|
||||
}
|
||||
break LOOP
|
||||
}
|
||||
|
||||
nodeExt := &data.ExtendedNodeVersion{
|
||||
NodeVersion: node,
|
||||
IsLatest: true,
|
||||
DirName: tryDirectoryName(node, p.Prefix, p.Delimiter),
|
||||
}
|
||||
|
||||
if shouldSkip(nodeExt, p, existed) {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break LOOP
|
||||
case nodeCh <- nodeExt:
|
||||
generated++
|
||||
|
||||
if generated == limit { // we use maxKeys+1 to be able to know nextMarker/nextContinuationToken
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
close(nodeCh)
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
return nodeCh, errCh
|
||||
}
|
||||
|
||||
func nodesGeneratorVersions(ctx context.Context, p commonVersionsListingParams, stream *data.ListSession) (<-chan *data.ExtendedNodeVersion, <-chan error) {
|
||||
nodeCh := make(chan *data.ExtendedNodeVersion, 1000)
|
||||
errCh := make(chan error, 1)
|
||||
existed := stream.NamesMap
|
||||
|
||||
delete(existed, continuationToken)
|
||||
|
||||
go func() {
|
||||
var (
|
||||
generated int
|
||||
ind int
|
||||
err error
|
||||
lastName string
|
||||
node *data.NodeVersion
|
||||
nodeExt *data.ExtendedNodeVersion
|
||||
)
|
||||
|
||||
LOOP:
|
||||
for err == nil {
|
||||
if ind < len(stream.Next) {
|
||||
nodeExt = stream.Next[ind]
|
||||
ind++
|
||||
} else {
|
||||
node, err = stream.Stream.Next(ctx)
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
errCh <- fmt.Errorf("stream next: %w", err)
|
||||
}
|
||||
break LOOP
|
||||
}
|
||||
|
||||
nodeExt = &data.ExtendedNodeVersion{
|
||||
NodeVersion: node,
|
||||
DirName: tryDirectoryName(node, p.Prefix, p.Delimiter),
|
||||
}
|
||||
}
|
||||
|
||||
if shouldSkipVersions(nodeExt, p, existed) {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break LOOP
|
||||
case nodeCh <- nodeExt:
|
||||
generated++
|
||||
if generated > p.MaxKeys && nodeExt.NodeVersion.FilePath != lastName {
|
||||
break LOOP
|
||||
}
|
||||
lastName = nodeExt.NodeVersion.FilePath
|
||||
}
|
||||
}
|
||||
close(nodeCh)
|
||||
close(errCh)
|
||||
}()
|
||||
|
||||
return nodeCh, errCh
|
||||
}
|
||||
|
||||
func (n *layer) initWorkerPool(ctx context.Context, size int, p commonVersionsListingParams, input <-chan *data.ExtendedNodeVersion) (<-chan *data.ExtendedNodeVersion, error) {
|
||||
reqLog := n.reqLogger(ctx)
|
||||
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog}))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("coudln't init go pool for listing: %w", err)
|
||||
}
|
||||
objCh := make(chan *data.ExtendedNodeVersion, size)
|
||||
|
||||
go func() {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
LOOP:
|
||||
for node := range input {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break LOOP
|
||||
default:
|
||||
}
|
||||
|
||||
if node.DirName != "" || node.NodeVersion.IsFilledExtra() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case objCh <- node:
|
||||
}
|
||||
} else {
|
||||
// We have to make a copy of pointer to data.NodeVersion
|
||||
// to get correct value in submitted task function.
|
||||
func(node *data.ExtendedNodeVersion) {
|
||||
wg.Add(1)
|
||||
err = pool.Submit(func() {
|
||||
defer wg.Done()
|
||||
|
||||
oi := n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.BktInfo, node.NodeVersion)
|
||||
if oi == nil {
|
||||
// try to get object again
|
||||
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.BktInfo, node.NodeVersion); oi == nil {
|
||||
// do not process object which are definitely missing in object service
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
realSize, err := GetObjectSize(oi)
|
||||
if err != nil {
|
||||
reqLog.Debug(logs.FailedToGetRealObjectSize, zap.Error(err))
|
||||
realSize = oi.Size
|
||||
}
|
||||
|
||||
node.NodeVersion.FillExtra(&oi.Owner, &oi.Created, realSize)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case objCh <- node:
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
wg.Done()
|
||||
reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err))
|
||||
}
|
||||
}(node)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
close(objCh)
|
||||
pool.Release()
|
||||
}()
|
||||
|
||||
return objCh, nil
|
||||
}
|
||||
|
||||
func shouldSkip(node *data.ExtendedNodeVersion, p commonVersionsListingParams, existed map[string]struct{}) bool {
|
||||
if node.NodeVersion.IsDeleteMarker {
|
||||
return true
|
||||
}
|
||||
|
||||
filePath := node.NodeVersion.FilePath
|
||||
if node.DirName != "" {
|
||||
filePath = node.DirName
|
||||
}
|
||||
|
||||
if _, ok := existed[filePath]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if filePath <= p.Marker {
|
||||
return true
|
||||
}
|
||||
|
||||
if p.Bookmark != "" {
|
||||
if _, ok := existed[continuationToken]; !ok {
|
||||
if p.Bookmark != node.NodeVersion.OID.EncodeToString() {
|
||||
return true
|
||||
}
|
||||
existed[continuationToken] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
existed[filePath] = struct{}{}
|
||||
return false
|
||||
}
|
||||
|
||||
func shouldSkipVersions(node *data.ExtendedNodeVersion, p commonVersionsListingParams, existed map[string]struct{}) bool {
|
||||
filePath := node.NodeVersion.FilePath
|
||||
if node.DirName != "" {
|
||||
filePath = node.DirName
|
||||
if _, ok := existed[filePath]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if filePath < p.Marker {
|
||||
return true
|
||||
}
|
||||
|
||||
if p.Bookmark != "" {
|
||||
if _, ok := existed[continuationToken]; !ok {
|
||||
if p.Bookmark != node.NodeVersion.OID.EncodeToString() {
|
||||
return true
|
||||
}
|
||||
existed[continuationToken] = struct{}{}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
existed[filePath] = struct{}{}
|
||||
return false
|
||||
}
|
||||
|
||||
func triageExtendedObjects(allObjects []*data.ExtendedNodeVersion) (prefixes []string, objects []*data.ExtendedNodeVersion) {
|
||||
for _, ov := range allObjects {
|
||||
if ov.DirName != "" {
|
||||
prefixes = append(prefixes, ov.DirName)
|
||||
} else {
|
||||
objects = append(objects, ov)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo *data.BucketInfo, node *data.NodeVersion) (oi *data.ObjectInfo) {
|
||||
owner := n.BearerOwner(ctx)
|
||||
if extInfo := n.cache.GetObject(owner, newAddress(bktInfo.CID, node.OID)); extInfo != nil {
|
||||
return extInfo.ObjectInfo
|
||||
}
|
||||
|
||||
meta, err := n.objectHead(ctx, bktInfo, node.OID)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.CouldNotFetchObjectMeta, zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
oi = objectInfoFromMeta(bktInfo, meta)
|
||||
oi.MD5Sum = node.MD5
|
||||
n.cache.PutObject(owner, &data.ExtendedObjectInfo{ObjectInfo: oi, NodeVersion: node})
|
||||
|
||||
return oi
|
||||
}
|
||||
|
||||
// tryDirectoryName forms directory name by prefix and delimiter.
|
||||
// If node isn't a directory empty string is returned.
|
||||
// This function doesn't check if node has a prefix. It must do a caller.
|
||||
func tryDirectoryName(node *data.NodeVersion, prefix, delimiter string) string {
|
||||
if len(delimiter) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
tail := strings.TrimPrefix(node.FilePath, prefix)
|
||||
index := strings.Index(tail, delimiter)
|
||||
if index >= 0 {
|
||||
return prefix + tail[:index+1]
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func filterVersionsByMarker(objects []*data.ExtendedNodeVersion, p *ListObjectVersionsParams) ([]*data.ExtendedNodeVersion, error) {
|
||||
if p.KeyMarker == "" {
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
for i, obj := range objects {
|
||||
if obj.NodeVersion.FilePath == p.KeyMarker {
|
||||
for j := i; j < len(objects); j++ {
|
||||
if objects[j].NodeVersion.FilePath != obj.NodeVersion.FilePath {
|
||||
if p.VersionIDMarker == "" {
|
||||
return objects[j:], nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if objects[j].NodeVersion.OID.EncodeToString() == p.VersionIDMarker {
|
||||
return objects[j+1:], nil
|
||||
}
|
||||
}
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidVersion)
|
||||
} else if obj.NodeVersion.FilePath > p.KeyMarker {
|
||||
if p.VersionIDMarker != "" {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidVersion)
|
||||
}
|
||||
return objects[i:], nil
|
||||
}
|
||||
}
|
||||
|
||||
// don't use nil as empty slice to be consistent with `return objects[j+1:], nil` above
|
||||
// that can be empty
|
||||
return []*data.ExtendedNodeVersion{}, nil
|
||||
}
|
||||
|
||||
func triageVersions(objVersions []*data.ExtendedNodeVersion) ([]*data.ExtendedNodeVersion, []*data.ExtendedNodeVersion) {
|
||||
if len(objVersions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var resVersion []*data.ExtendedNodeVersion
|
||||
var resDelMarkVersions []*data.ExtendedNodeVersion
|
||||
|
||||
for _, version := range objVersions {
|
||||
if version.NodeVersion.IsDeleteMarker {
|
||||
resDelMarkVersions = append(resDelMarkVersions, version)
|
||||
} else {
|
||||
resVersion = append(resVersion, version)
|
||||
}
|
||||
}
|
||||
|
||||
return resVersion, resDelMarkVersions
|
||||
}
|
|
@ -15,7 +15,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
|
@ -67,22 +66,20 @@ type (
|
|||
}
|
||||
|
||||
UploadPartParams struct {
|
||||
Info *UploadInfoParams
|
||||
PartNumber int
|
||||
Size uint64
|
||||
Reader io.Reader
|
||||
ContentMD5 string
|
||||
ContentSHA256Hash string
|
||||
Info *UploadInfoParams
|
||||
PartNumber int
|
||||
Size uint64
|
||||
Reader io.Reader
|
||||
ContentMD5 string
|
||||
}
|
||||
|
||||
UploadCopyParams struct {
|
||||
Versioned bool
|
||||
Info *UploadInfoParams
|
||||
SrcObjInfo *data.ObjectInfo
|
||||
SrcBktInfo *data.BucketInfo
|
||||
SrcEncryption encryption.Params
|
||||
PartNumber int
|
||||
Range *RangeParams
|
||||
Versioned bool
|
||||
Info *UploadInfoParams
|
||||
SrcObjInfo *data.ObjectInfo
|
||||
SrcBktInfo *data.BucketInfo
|
||||
PartNumber int
|
||||
Range *RangeParams
|
||||
}
|
||||
|
||||
CompleteMultipartParams struct {
|
||||
|
@ -262,20 +259,6 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
size = decSize
|
||||
}
|
||||
|
||||
if !p.Info.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) {
|
||||
contentHashBytes, err := hex.DecodeString(p.ContentSHA256Hash)
|
||||
if err != nil {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch)
|
||||
}
|
||||
if !bytes.Equal(contentHashBytes, hash) {
|
||||
err = n.objectDelete(ctx, bktInfo, id)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
||||
}
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch)
|
||||
}
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug(logs.UploadPart,
|
||||
zap.String("multipart upload", p.Info.UploadID), zap.Int("part number", p.PartNumber),
|
||||
zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
||||
|
@ -333,7 +316,6 @@ func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.
|
|||
|
||||
if objSize, err := GetObjectSize(p.SrcObjInfo); err == nil {
|
||||
srcObjectSize = objSize
|
||||
size = objSize
|
||||
}
|
||||
|
||||
if p.Range != nil {
|
||||
|
@ -351,7 +333,6 @@ func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.
|
|||
Versioned: p.Versioned,
|
||||
Range: p.Range,
|
||||
BucketInfo: p.SrcBktInfo,
|
||||
Encryption: p.SrcEncryption,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get object to upload copy: %w", err)
|
||||
|
@ -392,7 +373,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
md5Hash := md5.New()
|
||||
for i, part := range p.Parts {
|
||||
partInfo := partsInfo[part.PartNumber]
|
||||
if partInfo == nil || data.UnQuote(part.ETag) != partInfo.GetETag(n.features.MD5Enabled()) {
|
||||
if partInfo == nil || strings.Trim(part.ETag, "\"") != partInfo.GetETag(n.features.MD5Enabled()) {
|
||||
return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", s3errors.GetAPIError(s3errors.ErrInvalidPart), part.PartNumber)
|
||||
}
|
||||
delete(partsInfo, part.PartNumber)
|
||||
|
@ -489,7 +470,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
n.cache.DeleteObject(addr)
|
||||
}
|
||||
|
||||
return uploadData, extObjInfo, n.treeService.DeleteMultipartUpload(ctx, p.Info.Bkt, multipartInfo)
|
||||
return uploadData, extObjInfo, n.treeService.DeleteMultipartUpload(ctx, p.Info.Bkt, multipartInfo.ID)
|
||||
}
|
||||
|
||||
func (n *layer) ListMultipartUploads(ctx context.Context, p *ListMultipartUploadsParams) (*ListMultipartUploadsInfo, error) {
|
||||
|
@ -565,7 +546,7 @@ func (n *layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) e
|
|||
}
|
||||
}
|
||||
|
||||
return n.treeService.DeleteMultipartUpload(ctx, p.Bkt, multipartInfo)
|
||||
return n.treeService.DeleteMultipartUpload(ctx, p.Bkt, multipartInfo.ID)
|
||||
}
|
||||
|
||||
func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsInfo, error) {
|
||||
|
@ -587,7 +568,7 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
|||
|
||||
for _, partInfo := range partsInfo {
|
||||
parts = append(parts, &Part{
|
||||
ETag: data.Quote(partInfo.GetETag(n.features.MD5Enabled())),
|
||||
ETag: partInfo.GetETag(n.features.MD5Enabled()),
|
||||
LastModified: partInfo.Created.UTC().Format(time.RFC3339),
|
||||
PartNumber: partInfo.Number,
|
||||
Size: partInfo.Size,
|
||||
|
|
|
@ -13,11 +13,13 @@ import (
|
|||
"io"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
|
@ -26,6 +28,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/minio/sio"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -46,15 +49,39 @@ type (
|
|||
bktInfo *data.BucketInfo
|
||||
}
|
||||
|
||||
DeleteMarkerError struct {
|
||||
ErrorCode apiErrors.ErrorCode
|
||||
// ListObjectsParamsCommon contains common parameters for ListObjectsV1 and ListObjectsV2.
|
||||
ListObjectsParamsCommon struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Delimiter string
|
||||
Encode string
|
||||
MaxKeys int
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// ListObjectsParamsV1 contains params for ListObjectsV1.
|
||||
ListObjectsParamsV1 struct {
|
||||
ListObjectsParamsCommon
|
||||
Marker string
|
||||
}
|
||||
|
||||
// ListObjectsParamsV2 contains params for ListObjectsV2.
|
||||
ListObjectsParamsV2 struct {
|
||||
ListObjectsParamsCommon
|
||||
ContinuationToken string
|
||||
StartAfter string
|
||||
FetchOwner bool
|
||||
}
|
||||
|
||||
allObjectParams struct {
|
||||
Bucket *data.BucketInfo
|
||||
Delimiter string
|
||||
Prefix string
|
||||
MaxKeys int
|
||||
Marker string
|
||||
ContinuationToken string
|
||||
}
|
||||
)
|
||||
|
||||
func (e DeleteMarkerError) Error() string {
|
||||
return "object is delete marker"
|
||||
}
|
||||
|
||||
const (
|
||||
continuationToken = "<continuation-token>"
|
||||
)
|
||||
|
@ -281,30 +308,14 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
}
|
||||
}
|
||||
|
||||
if !p.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) {
|
||||
contentHashBytes, err := hex.DecodeString(p.ContentSHA256Hash)
|
||||
if err != nil {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
||||
}
|
||||
if !bytes.Equal(contentHashBytes, hash) {
|
||||
err = n.objectDelete(ctx, p.BktInfo, id)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
||||
}
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
||||
}
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
||||
now := TimeNow(ctx)
|
||||
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: id,
|
||||
ETag: hex.EncodeToString(hash),
|
||||
FilePath: p.Object,
|
||||
Size: p.Size,
|
||||
Created: &now,
|
||||
Owner: &n.gateOwner,
|
||||
Size: size,
|
||||
},
|
||||
IsUnversioned: !bktSettings.VersioningEnabled(),
|
||||
IsCombined: p.Header[MultipartObjectSize] != "",
|
||||
|
@ -377,14 +388,14 @@ func (n *layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.Bucke
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if node.IsDeleteMarker {
|
||||
return nil, DeleteMarkerError{ErrorCode: apiErrors.ErrNoSuchKey}
|
||||
if node.IsDeleteMarker() {
|
||||
return nil, fmt.Errorf("%w: found version is delete marker", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey))
|
||||
}
|
||||
|
||||
meta, err := n.objectHead(ctx, bkt, node.OID)
|
||||
if err != nil {
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
return nil, fmt.Errorf("%w: %s; %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error(), node.OID.EncodeToString())
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -434,10 +445,6 @@ func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
return extObjInfo, nil
|
||||
}
|
||||
|
||||
if foundVersion.IsDeleteMarker {
|
||||
return nil, DeleteMarkerError{ErrorCode: apiErrors.ErrMethodNotAllowed}
|
||||
}
|
||||
|
||||
meta, err := n.objectHead(ctx, bkt, foundVersion.OID)
|
||||
if err != nil {
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
|
@ -477,8 +484,6 @@ func (n *layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idOb
|
|||
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, []byte, error) {
|
||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||
prm.ClientCut = n.features.ClientCut()
|
||||
prm.BufferMaxSize = n.features.BufferMaxSizeForPut()
|
||||
prm.WithoutHomomorphicHash = bktInfo.HomomorphicHashDisabled
|
||||
var size uint64
|
||||
hash := sha256.New()
|
||||
md5Hash := md5.New()
|
||||
|
@ -498,6 +503,61 @@ func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktIn
|
|||
return size, id, hash.Sum(nil), md5Hash.Sum(nil), nil
|
||||
}
|
||||
|
||||
// ListObjectsV1 returns objects in a bucket for requests of Version 1.
|
||||
func (n *layer) ListObjectsV1(ctx context.Context, p *ListObjectsParamsV1) (*ListObjectsInfoV1, error) {
|
||||
var result ListObjectsInfoV1
|
||||
|
||||
prm := allObjectParams{
|
||||
Bucket: p.BktInfo,
|
||||
Delimiter: p.Delimiter,
|
||||
Prefix: p.Prefix,
|
||||
MaxKeys: p.MaxKeys,
|
||||
Marker: p.Marker,
|
||||
}
|
||||
|
||||
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
result.IsTruncated = true
|
||||
result.NextMarker = objects[len(objects)-1].Name
|
||||
}
|
||||
|
||||
result.Prefixes, result.Objects = triageObjects(objects)
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// ListObjectsV2 returns objects in a bucket for requests of Version 2.
|
||||
func (n *layer) ListObjectsV2(ctx context.Context, p *ListObjectsParamsV2) (*ListObjectsInfoV2, error) {
|
||||
var result ListObjectsInfoV2
|
||||
|
||||
prm := allObjectParams{
|
||||
Bucket: p.BktInfo,
|
||||
Delimiter: p.Delimiter,
|
||||
Prefix: p.Prefix,
|
||||
MaxKeys: p.MaxKeys,
|
||||
Marker: p.StartAfter,
|
||||
ContinuationToken: p.ContinuationToken,
|
||||
}
|
||||
|
||||
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
result.IsTruncated = true
|
||||
result.NextContinuationToken = next.ID.EncodeToString()
|
||||
}
|
||||
|
||||
result.Prefixes, result.Objects = triageObjects(objects)
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
type logWrapper struct {
|
||||
log *zap.Logger
|
||||
}
|
||||
|
@ -506,11 +566,310 @@ func (l *logWrapper) Printf(format string, args ...interface{}) {
|
|||
l.log.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (n *layer) getLatestObjectsVersions(ctx context.Context, p allObjectParams) (objects []*data.ObjectInfo, next *data.ObjectInfo, err error) {
|
||||
if p.MaxKeys == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
owner := n.BearerOwner(ctx)
|
||||
cacheKey := cache.CreateObjectsListCacheKey(p.Bucket.CID, p.Prefix, true)
|
||||
nodeVersions := n.cache.GetList(owner, cacheKey)
|
||||
|
||||
if nodeVersions == nil {
|
||||
nodeVersions, err = n.treeService.GetLatestVersionsByPrefix(ctx, p.Bucket, p.Prefix)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
n.cache.PutList(owner, cacheKey, nodeVersions)
|
||||
}
|
||||
|
||||
if len(nodeVersions) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
sort.Slice(nodeVersions, func(i, j int) bool {
|
||||
return nodeVersions[i].FilePath < nodeVersions[j].FilePath
|
||||
})
|
||||
|
||||
poolCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
objOutCh, err := n.initWorkerPool(poolCtx, 2, p, nodesGenerator(poolCtx, p, nodeVersions))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to init worker pool: %w", err)
|
||||
}
|
||||
|
||||
objects = make([]*data.ObjectInfo, 0, p.MaxKeys)
|
||||
|
||||
for obj := range objOutCh {
|
||||
objects = append(objects, obj)
|
||||
}
|
||||
|
||||
sort.Slice(objects, func(i, j int) bool {
|
||||
return objects[i].Name < objects[j].Name
|
||||
})
|
||||
|
||||
if len(objects) > p.MaxKeys {
|
||||
next = objects[p.MaxKeys]
|
||||
objects = objects[:p.MaxKeys]
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func nodesGenerator(ctx context.Context, p allObjectParams, nodeVersions []*data.NodeVersion) <-chan *data.NodeVersion {
|
||||
nodeCh := make(chan *data.NodeVersion)
|
||||
existed := make(map[string]struct{}, len(nodeVersions)) // to squash the same directories
|
||||
|
||||
go func() {
|
||||
var generated int
|
||||
LOOP:
|
||||
for _, node := range nodeVersions {
|
||||
if shouldSkip(node, p, existed) {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break LOOP
|
||||
case nodeCh <- node:
|
||||
generated++
|
||||
if generated == p.MaxKeys+1 { // we use maxKeys+1 to be able to know nextMarker/nextContinuationToken
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
close(nodeCh)
|
||||
}()
|
||||
|
||||
return nodeCh
|
||||
}
|
||||
|
||||
func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams, input <-chan *data.NodeVersion) (<-chan *data.ObjectInfo, error) {
|
||||
reqLog := n.reqLogger(ctx)
|
||||
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog}))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("coudln't init go pool for listing: %w", err)
|
||||
}
|
||||
objCh := make(chan *data.ObjectInfo)
|
||||
|
||||
go func() {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
LOOP:
|
||||
for node := range input {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break LOOP
|
||||
default:
|
||||
}
|
||||
|
||||
// We have to make a copy of pointer to data.NodeVersion
|
||||
// to get correct value in submitted task function.
|
||||
func(node *data.NodeVersion) {
|
||||
wg.Add(1)
|
||||
err = pool.Submit(func() {
|
||||
defer wg.Done()
|
||||
oi := n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter)
|
||||
if oi == nil {
|
||||
// try to get object again
|
||||
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil {
|
||||
// do not process object which are definitely missing in object service
|
||||
return
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case objCh <- oi:
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
wg.Done()
|
||||
reqLog.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err))
|
||||
}
|
||||
}(node)
|
||||
}
|
||||
wg.Wait()
|
||||
close(objCh)
|
||||
pool.Release()
|
||||
}()
|
||||
|
||||
return objCh, nil
|
||||
}
|
||||
|
||||
func (n *layer) bucketNodeVersions(ctx context.Context, bkt *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
|
||||
var err error
|
||||
|
||||
owner := n.BearerOwner(ctx)
|
||||
cacheKey := cache.CreateObjectsListCacheKey(bkt.CID, prefix, false)
|
||||
nodeVersions := n.cache.GetList(owner, cacheKey)
|
||||
|
||||
if nodeVersions == nil {
|
||||
nodeVersions, err = n.treeService.GetAllVersionsByPrefix(ctx, bkt, prefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get all versions from tree service: %w", err)
|
||||
}
|
||||
|
||||
n.cache.PutList(owner, cacheKey, nodeVersions)
|
||||
}
|
||||
|
||||
return nodeVersions, nil
|
||||
}
|
||||
|
||||
func (n *layer) getAllObjectsVersions(ctx context.Context, bkt *data.BucketInfo, prefix, delimiter string) (map[string][]*data.ExtendedObjectInfo, error) {
|
||||
nodeVersions, err := n.bucketNodeVersions(ctx, bkt, prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versions := make(map[string][]*data.ExtendedObjectInfo, len(nodeVersions))
|
||||
|
||||
for _, nodeVersion := range nodeVersions {
|
||||
oi := &data.ObjectInfo{}
|
||||
|
||||
if nodeVersion.IsDeleteMarker() { // delete marker does not match any object in FrostFS
|
||||
oi.ID = nodeVersion.OID
|
||||
oi.Name = nodeVersion.FilePath
|
||||
oi.Owner = nodeVersion.DeleteMarker.Owner
|
||||
oi.Created = nodeVersion.DeleteMarker.Created
|
||||
oi.IsDeleteMarker = true
|
||||
} else {
|
||||
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, bkt, nodeVersion, prefix, delimiter); oi == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
eoi := &data.ExtendedObjectInfo{
|
||||
ObjectInfo: oi,
|
||||
NodeVersion: nodeVersion,
|
||||
}
|
||||
|
||||
objVersions, ok := versions[oi.Name]
|
||||
if !ok {
|
||||
objVersions = []*data.ExtendedObjectInfo{eoi}
|
||||
} else if !oi.IsDir {
|
||||
objVersions = append(objVersions, eoi)
|
||||
}
|
||||
versions[oi.Name] = objVersions
|
||||
}
|
||||
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
func IsSystemHeader(key string) bool {
|
||||
_, ok := api.SystemMetadata[key]
|
||||
return ok || strings.HasPrefix(key, api.FrostFSSystemMetadataPrefix)
|
||||
}
|
||||
|
||||
func shouldSkip(node *data.NodeVersion, p allObjectParams, existed map[string]struct{}) bool {
|
||||
if node.IsDeleteMarker() {
|
||||
return true
|
||||
}
|
||||
|
||||
filePath := node.FilePath
|
||||
if dirName := tryDirectoryName(node, p.Prefix, p.Delimiter); len(dirName) != 0 {
|
||||
filePath = dirName
|
||||
}
|
||||
if _, ok := existed[filePath]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if filePath <= p.Marker {
|
||||
return true
|
||||
}
|
||||
|
||||
if p.ContinuationToken != "" {
|
||||
if _, ok := existed[continuationToken]; !ok {
|
||||
if p.ContinuationToken != node.OID.EncodeToString() {
|
||||
return true
|
||||
}
|
||||
existed[continuationToken] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
existed[filePath] = struct{}{}
|
||||
return false
|
||||
}
|
||||
|
||||
func triageObjects(allObjects []*data.ObjectInfo) (prefixes []string, objects []*data.ObjectInfo) {
|
||||
for _, ov := range allObjects {
|
||||
if ov.IsDir {
|
||||
prefixes = append(prefixes, ov.Name)
|
||||
} else {
|
||||
objects = append(objects, ov)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func triageExtendedObjects(allObjects []*data.ExtendedObjectInfo) (prefixes []string, objects []*data.ExtendedObjectInfo) {
|
||||
for _, ov := range allObjects {
|
||||
if ov.ObjectInfo.IsDir {
|
||||
prefixes = append(prefixes, ov.ObjectInfo.Name)
|
||||
} else {
|
||||
objects = append(objects, ov)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) (oi *data.ObjectInfo) {
|
||||
if oiDir := tryDirectory(bktInfo, node, prefix, delimiter); oiDir != nil {
|
||||
return oiDir
|
||||
}
|
||||
|
||||
owner := n.BearerOwner(ctx)
|
||||
if extInfo := n.cache.GetObject(owner, newAddress(bktInfo.CID, node.OID)); extInfo != nil {
|
||||
return extInfo.ObjectInfo
|
||||
}
|
||||
|
||||
meta, err := n.objectHead(ctx, bktInfo, node.OID)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.CouldNotFetchObjectMeta, zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
oi = objectInfoFromMeta(bktInfo, meta)
|
||||
oi.MD5Sum = node.MD5
|
||||
n.cache.PutObject(owner, &data.ExtendedObjectInfo{ObjectInfo: oi, NodeVersion: node})
|
||||
|
||||
return oi
|
||||
}
|
||||
|
||||
func tryDirectory(bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) *data.ObjectInfo {
|
||||
dirName := tryDirectoryName(node, prefix, delimiter)
|
||||
if len(dirName) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &data.ObjectInfo{
|
||||
ID: node.OID, // to use it as continuation token
|
||||
CID: bktInfo.CID,
|
||||
IsDir: true,
|
||||
IsDeleteMarker: node.IsDeleteMarker(),
|
||||
Bucket: bktInfo.Name,
|
||||
Name: dirName,
|
||||
}
|
||||
}
|
||||
|
||||
// tryDirectoryName forms directory name by prefix and delimiter.
|
||||
// If node isn't a directory empty string is returned.
|
||||
// This function doesn't check if node has a prefix. It must do a caller.
|
||||
func tryDirectoryName(node *data.NodeVersion, prefix, delimiter string) string {
|
||||
if len(delimiter) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
tail := strings.TrimPrefix(node.FilePath, prefix)
|
||||
index := strings.Index(tail, delimiter)
|
||||
if index >= 0 {
|
||||
return prefix + tail[:index+1]
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func wrapReader(input io.Reader, bufSize int, f func(buf []byte)) io.Reader {
|
||||
if input == nil {
|
||||
return nil
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
|
@ -28,25 +27,3 @@ func TestWrapReader(t *testing.T) {
|
|||
require.Equal(t, src, dst)
|
||||
require.Equal(t, h[:], streamHash.Sum(nil))
|
||||
}
|
||||
|
||||
func TestGoroutinesDontLeakInPutAndHash(t *testing.T) {
|
||||
tc := prepareContext(t)
|
||||
l, ok := tc.layer.(*layer)
|
||||
require.True(t, ok)
|
||||
|
||||
content := make([]byte, 128*1024)
|
||||
_, err := rand.Read(content)
|
||||
require.NoError(t, err)
|
||||
payload := bytes.NewReader(content)
|
||||
|
||||
prm := PrmObjectCreate{
|
||||
Filepath: tc.obj,
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
expErr := errors.New("some error")
|
||||
tc.testFrostFS.SetObjectPutError(tc.obj, expErr)
|
||||
_, _, _, _, err = l.objectPutAndHash(tc.ctx, prm, tc.bktInfo)
|
||||
require.ErrorIs(t, err, expErr)
|
||||
require.Empty(t, payload.Len(), "body must be read out otherwise goroutines can leak in wrapReader")
|
||||
}
|
||||
|
|
|
@ -174,13 +174,13 @@ func (n *layer) getNodeVersion(ctx context.Context, objVersion *ObjectVersion) (
|
|||
}
|
||||
}
|
||||
|
||||
if err == nil && version.IsDeleteMarker && !objVersion.NoErrorOnDeleteMarker {
|
||||
if err == nil && version.IsDeleteMarker() && !objVersion.NoErrorOnDeleteMarker {
|
||||
return nil, fmt.Errorf("%w: found version is delete marker", s3errors.GetAPIError(s3errors.ErrNoSuchKey))
|
||||
} else if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
|
||||
if err == nil && version != nil && !version.IsDeleteMarker {
|
||||
if err == nil && version != nil && !version.IsDeleteMarker() {
|
||||
n.reqLogger(ctx).Debug(logs.GetTreeNode,
|
||||
zap.Stringer("cid", objVersion.BktInfo.CID), zap.Stringer("oid", version.OID))
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package layer
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
|
@ -11,21 +10,6 @@ import (
|
|||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
type VersionsByPrefixStreamMock struct {
|
||||
result []*data.NodeVersion
|
||||
offset int
|
||||
}
|
||||
|
||||
func (s *VersionsByPrefixStreamMock) Next(context.Context) (*data.NodeVersion, error) {
|
||||
if s.offset > len(s.result)-1 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
res := s.result[s.offset]
|
||||
s.offset++
|
||||
return res, nil
|
||||
}
|
||||
|
||||
type TreeServiceMock struct {
|
||||
settings map[string]*data.BucketSettings
|
||||
versions map[string]map[string][]*data.NodeVersion
|
||||
|
@ -187,7 +171,7 @@ func (t *TreeServiceMock) GetLatestVersion(_ context.Context, bktInfo *data.Buck
|
|||
return nil, ErrNodeNotFound
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) InitVersionsByPrefixStream(_ context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) (data.VersionsStream, error) {
|
||||
func (t *TreeServiceMock) GetLatestVersionsByPrefix(_ context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
|
||||
cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, ErrNodeNotFound
|
||||
|
@ -200,11 +184,6 @@ func (t *TreeServiceMock) InitVersionsByPrefixStream(_ context.Context, bktInfo
|
|||
continue
|
||||
}
|
||||
|
||||
if !latestOnly {
|
||||
result = append(result, versions...)
|
||||
continue
|
||||
}
|
||||
|
||||
sort.Slice(versions, func(i, j int) bool {
|
||||
return versions[i].ID < versions[j].ID
|
||||
})
|
||||
|
@ -214,9 +193,7 @@ func (t *TreeServiceMock) InitVersionsByPrefixStream(_ context.Context, bktInfo
|
|||
}
|
||||
}
|
||||
|
||||
return &VersionsByPrefixStreamMock{
|
||||
result: result,
|
||||
}, nil
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetUnversioned(_ context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error) {
|
||||
|
@ -400,7 +377,7 @@ LOOP:
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, multipartInfo *data.MultipartInfo) error {
|
||||
func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) error {
|
||||
cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()]
|
||||
|
||||
var uploadID string
|
||||
|
@ -408,7 +385,7 @@ func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, bktInfo *data
|
|||
LOOP:
|
||||
for key, multiparts := range cnrMultipartsMap {
|
||||
for i, multipart := range multiparts {
|
||||
if multipart.ID == multipartInfo.ID {
|
||||
if multipart.ID == multipartNodeID {
|
||||
uploadID = multipart.UploadID
|
||||
cnrMultipartsMap[key] = append(multiparts[:i], multiparts[i+1:]...)
|
||||
break LOOP
|
||||
|
|
|
@ -54,7 +54,8 @@ type TreeService interface {
|
|||
|
||||
GetVersions(ctx context.Context, bktInfo *data.BucketInfo, objectName string) ([]*data.NodeVersion, error)
|
||||
GetLatestVersion(ctx context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error)
|
||||
InitVersionsByPrefixStream(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) (data.VersionsStream, error)
|
||||
GetLatestVersionsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.NodeVersion, error)
|
||||
GetAllVersionsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.NodeVersion, error)
|
||||
GetUnversioned(ctx context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error)
|
||||
AddVersion(ctx context.Context, bktInfo *data.BucketInfo, newVersion *data.NodeVersion) (uint64, error)
|
||||
RemoveVersion(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64) error
|
||||
|
@ -63,7 +64,7 @@ type TreeService interface {
|
|||
GetLock(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64) (*data.LockInfo, error)
|
||||
|
||||
CreateMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, info *data.MultipartInfo) error
|
||||
DeleteMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, info *data.MultipartInfo) error
|
||||
DeleteMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) error
|
||||
GetMultipartUploadsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.MultipartInfo, error)
|
||||
GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error)
|
||||
|
||||
|
|
|
@ -13,6 +13,39 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
)
|
||||
|
||||
type (
|
||||
// ListObjectsInfo contains common fields of data for ListObjectsV1 and ListObjectsV2.
|
||||
ListObjectsInfo struct {
|
||||
Prefixes []string
|
||||
Objects []*data.ObjectInfo
|
||||
IsTruncated bool
|
||||
}
|
||||
|
||||
// ListObjectsInfoV1 holds data which ListObjectsV1 returns.
|
||||
ListObjectsInfoV1 struct {
|
||||
ListObjectsInfo
|
||||
NextMarker string
|
||||
}
|
||||
|
||||
// ListObjectsInfoV2 holds data which ListObjectsV2 returns.
|
||||
ListObjectsInfoV2 struct {
|
||||
ListObjectsInfo
|
||||
NextContinuationToken string
|
||||
}
|
||||
|
||||
// ListObjectVersionsInfo stores info and list of objects versions.
|
||||
ListObjectVersionsInfo struct {
|
||||
CommonPrefixes []string
|
||||
IsTruncated bool
|
||||
KeyMarker string
|
||||
NextKeyMarker string
|
||||
NextVersionIDMarker string
|
||||
Version []*data.ExtendedObjectInfo
|
||||
DeleteMarker []*data.ExtendedObjectInfo
|
||||
VersionIDMarker string
|
||||
}
|
||||
)
|
||||
|
||||
// PathSeparator is a path components separator string.
|
||||
const PathSeparator = string(os.PathSeparator)
|
||||
|
||||
|
@ -48,15 +81,16 @@ func objectInfoFromMeta(bkt *data.BucketInfo, meta *object.Object) *data.ObjectI
|
|||
objID, _ := meta.ID()
|
||||
payloadChecksum, _ := meta.PayloadChecksum()
|
||||
return &data.ObjectInfo{
|
||||
ID: objID,
|
||||
CID: bkt.CID,
|
||||
ID: objID,
|
||||
CID: bkt.CID,
|
||||
IsDir: false,
|
||||
|
||||
Bucket: bkt.Name,
|
||||
Name: filepathFromObject(meta),
|
||||
Created: creation,
|
||||
ContentType: mimeType,
|
||||
Headers: headers,
|
||||
Owner: meta.OwnerID(),
|
||||
Owner: *meta.OwnerID(),
|
||||
Size: meta.PayloadSize(),
|
||||
CreationEpoch: meta.CreationEpoch(),
|
||||
HashSum: hex.EncodeToString(payloadChecksum.Value()),
|
||||
|
|
|
@ -1,13 +1,51 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultTestCreated = time.Now()
|
||||
defaultTestPayload = []byte("test object payload")
|
||||
defaultTestPayloadLength = uint64(len(defaultTestPayload))
|
||||
defaultTestContentType = http.DetectContentType(defaultTestPayload)
|
||||
)
|
||||
|
||||
func newTestInfo(obj oid.ID, bkt *data.BucketInfo, name string, isDir bool) *data.ObjectInfo {
|
||||
var hashSum checksum.Checksum
|
||||
info := &data.ObjectInfo{
|
||||
ID: obj,
|
||||
Name: name,
|
||||
Bucket: bkt.Name,
|
||||
CID: bkt.CID,
|
||||
Size: defaultTestPayloadLength,
|
||||
ContentType: defaultTestContentType,
|
||||
Created: time.Unix(defaultTestCreated.Unix(), 0),
|
||||
Owner: bkt.Owner,
|
||||
Headers: make(map[string]string),
|
||||
HashSum: hex.EncodeToString(hashSum.Value()),
|
||||
}
|
||||
|
||||
if isDir {
|
||||
info.IsDir = true
|
||||
info.Size = 0
|
||||
info.ContentType = ""
|
||||
info.Headers = nil
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
func newTestNodeVersion(id oid.ID, name string) *data.NodeVersion {
|
||||
return &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
|
@ -18,84 +56,98 @@ func newTestNodeVersion(id oid.ID, name string) *data.NodeVersion {
|
|||
}
|
||||
|
||||
func TestTryDirectory(t *testing.T) {
|
||||
var uid user.ID
|
||||
var id oid.ID
|
||||
var containerID cid.ID
|
||||
|
||||
bkt := &data.BucketInfo{
|
||||
Name: "test-container",
|
||||
CID: containerID,
|
||||
Owner: uid,
|
||||
Created: time.Now(),
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prefix string
|
||||
result string
|
||||
result *data.ObjectInfo
|
||||
node *data.NodeVersion
|
||||
delimiter string
|
||||
}{
|
||||
{
|
||||
name: "small.jpg",
|
||||
result: "",
|
||||
result: nil,
|
||||
node: newTestNodeVersion(id, "small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "small.jpg not matched prefix",
|
||||
prefix: "big",
|
||||
result: "",
|
||||
result: nil,
|
||||
node: newTestNodeVersion(id, "small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "small.jpg delimiter",
|
||||
delimiter: "/",
|
||||
result: "",
|
||||
result: nil,
|
||||
node: newTestNodeVersion(id, "small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "test/small.jpg",
|
||||
result: "",
|
||||
result: nil,
|
||||
node: newTestNodeVersion(id, "test/small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "test/small.jpg with prefix and delimiter",
|
||||
prefix: "test/",
|
||||
delimiter: "/",
|
||||
result: "",
|
||||
result: nil,
|
||||
node: newTestNodeVersion(id, "test/small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "a/b/small.jpg",
|
||||
prefix: "a",
|
||||
result: "",
|
||||
result: nil,
|
||||
node: newTestNodeVersion(id, "a/b/small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "a/b/small.jpg",
|
||||
prefix: "a/",
|
||||
delimiter: "/",
|
||||
result: "a/b/",
|
||||
result: newTestInfo(id, bkt, "a/b/", true),
|
||||
node: newTestNodeVersion(id, "a/b/small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "a/b/c/small.jpg",
|
||||
prefix: "a/",
|
||||
delimiter: "/",
|
||||
result: "a/b/",
|
||||
result: newTestInfo(id, bkt, "a/b/", true),
|
||||
node: newTestNodeVersion(id, "a/b/c/small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "a/b/c/small.jpg",
|
||||
prefix: "a/b/c/s",
|
||||
delimiter: "/",
|
||||
result: "",
|
||||
result: nil,
|
||||
node: newTestNodeVersion(id, "a/b/c/small.jpg"),
|
||||
},
|
||||
{
|
||||
name: "a/b/c/big.jpg",
|
||||
prefix: "a/b/",
|
||||
delimiter: "/",
|
||||
result: "a/b/c/",
|
||||
result: newTestInfo(id, bkt, "a/b/c/", true),
|
||||
node: newTestNodeVersion(id, "a/b/c/big.jpg"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
dirName := tryDirectoryName(tc.node, tc.prefix, tc.delimiter)
|
||||
require.Equal(t, tc.result, dirName)
|
||||
info := tryDirectory(bkt, tc.node, tc.prefix, tc.delimiter)
|
||||
if tc.result != nil {
|
||||
tc.result.Created = time.Time{}
|
||||
tc.result.Owner = user.ID{}
|
||||
}
|
||||
|
||||
require.Equal(t, tc.result, info)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
79
api/layer/versioning.go
Normal file
79
api/layer/versioning.go
Normal file
|
@ -0,0 +1,79 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
)
|
||||
|
||||
func (n *layer) ListObjectVersions(ctx context.Context, p *ListObjectVersionsParams) (*ListObjectVersionsInfo, error) {
|
||||
var (
|
||||
allObjects = make([]*data.ExtendedObjectInfo, 0, p.MaxKeys)
|
||||
res = &ListObjectVersionsInfo{}
|
||||
)
|
||||
|
||||
versions, err := n.getAllObjectsVersions(ctx, p.BktInfo, p.Prefix, p.Delimiter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sortedNames := make([]string, 0, len(versions))
|
||||
for k := range versions {
|
||||
sortedNames = append(sortedNames, k)
|
||||
}
|
||||
sort.Strings(sortedNames)
|
||||
|
||||
for _, name := range sortedNames {
|
||||
sortedVersions := versions[name]
|
||||
sort.Slice(sortedVersions, func(i, j int) bool {
|
||||
return sortedVersions[j].NodeVersion.Timestamp < sortedVersions[i].NodeVersion.Timestamp // sort in reverse order
|
||||
})
|
||||
|
||||
for i, version := range sortedVersions {
|
||||
version.IsLatest = i == 0
|
||||
allObjects = append(allObjects, version)
|
||||
}
|
||||
}
|
||||
|
||||
for i, obj := range allObjects {
|
||||
if obj.ObjectInfo.Name >= p.KeyMarker && obj.ObjectInfo.VersionID() >= p.VersionIDMarker {
|
||||
allObjects = allObjects[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
res.CommonPrefixes, allObjects = triageExtendedObjects(allObjects)
|
||||
|
||||
if len(allObjects) > p.MaxKeys {
|
||||
res.IsTruncated = true
|
||||
res.NextKeyMarker = allObjects[p.MaxKeys].ObjectInfo.Name
|
||||
res.NextVersionIDMarker = allObjects[p.MaxKeys].ObjectInfo.VersionID()
|
||||
|
||||
allObjects = allObjects[:p.MaxKeys]
|
||||
res.KeyMarker = allObjects[p.MaxKeys-1].ObjectInfo.Name
|
||||
res.VersionIDMarker = allObjects[p.MaxKeys-1].ObjectInfo.VersionID()
|
||||
}
|
||||
|
||||
res.Version, res.DeleteMarker = triageVersions(allObjects)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func triageVersions(objVersions []*data.ExtendedObjectInfo) ([]*data.ExtendedObjectInfo, []*data.ExtendedObjectInfo) {
|
||||
if len(objVersions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var resVersion []*data.ExtendedObjectInfo
|
||||
var resDelMarkVersions []*data.ExtendedObjectInfo
|
||||
|
||||
for _, version := range objVersions {
|
||||
if version.NodeVersion.IsDeleteMarker() {
|
||||
resDelMarkVersions = append(resDelMarkVersions, version)
|
||||
} else {
|
||||
resVersion = append(resVersion, version)
|
||||
}
|
||||
}
|
||||
|
||||
return resVersion, resDelMarkVersions
|
||||
}
|
|
@ -12,7 +12,6 @@ import (
|
|||
bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -72,7 +71,7 @@ func (tc *testContext) deleteObject(objectName, versionID string, settings *data
|
|||
}
|
||||
}
|
||||
|
||||
func (tc *testContext) listObjectsV1() []*data.ExtendedNodeVersion {
|
||||
func (tc *testContext) listObjectsV1() []*data.ObjectInfo {
|
||||
res, err := tc.layer.ListObjectsV1(tc.ctx, &ListObjectsParamsV1{
|
||||
ListObjectsParamsCommon: ListObjectsParamsCommon{
|
||||
BktInfo: tc.bktInfo,
|
||||
|
@ -83,7 +82,7 @@ func (tc *testContext) listObjectsV1() []*data.ExtendedNodeVersion {
|
|||
return res.Objects
|
||||
}
|
||||
|
||||
func (tc *testContext) listObjectsV2() []*data.ExtendedNodeVersion {
|
||||
func (tc *testContext) listObjectsV2() []*data.ObjectInfo {
|
||||
res, err := tc.layer.ListObjectsV2(tc.ctx, &ListObjectsParamsV2{
|
||||
ListObjectsParamsCommon: ListObjectsParamsCommon{
|
||||
BktInfo: tc.bktInfo,
|
||||
|
@ -154,7 +153,7 @@ func prepareContext(t *testing.T, cachesConfig ...*CachesConfig) *testContext {
|
|||
tp := NewTestFrostFS(key)
|
||||
|
||||
bktName := "testbucket1"
|
||||
res, err := tp.CreateContainer(ctx, PrmContainerCreate{
|
||||
bktID, err := tp.CreateContainer(ctx, PrmContainerCreate{
|
||||
Name: bktName,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
@ -168,21 +167,19 @@ func prepareContext(t *testing.T, cachesConfig ...*CachesConfig) *testContext {
|
|||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||
|
||||
layerCfg := &Config{
|
||||
Cache: NewCache(config),
|
||||
Caches: config,
|
||||
AnonKey: AnonymousKey{Key: key},
|
||||
TreeService: NewTreeService(),
|
||||
Features: &FeatureSettingsMock{},
|
||||
GateOwner: owner,
|
||||
}
|
||||
|
||||
return &testContext{
|
||||
ctx: ctx,
|
||||
layer: NewLayer(logger, tp, layerCfg),
|
||||
bktInfo: &data.BucketInfo{
|
||||
Name: bktName,
|
||||
Owner: owner,
|
||||
CID: res.ContainerID,
|
||||
HomomorphicHashDisabled: res.HomomorphicHashDisabled,
|
||||
Name: bktName,
|
||||
Owner: owner,
|
||||
CID: bktID,
|
||||
},
|
||||
obj: "obj1",
|
||||
t: t,
|
||||
|
@ -289,10 +286,9 @@ func TestVersioningDeleteSpecificObjectVersion(t *testing.T) {
|
|||
tc.getObject(tc.obj, "", true)
|
||||
|
||||
versions := tc.listVersions()
|
||||
require.Len(t, versions.DeleteMarker, 1)
|
||||
for _, ver := range versions.DeleteMarker {
|
||||
if ver.IsLatest {
|
||||
tc.deleteObject(tc.obj, ver.NodeVersion.OID.EncodeToString(), settings)
|
||||
tc.deleteObject(tc.obj, ver.ObjectInfo.VersionID(), settings)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -314,133 +310,3 @@ func TestNoVersioningDeleteObject(t *testing.T) {
|
|||
tc.getObject(tc.obj, "", true)
|
||||
tc.checkListObjects()
|
||||
}
|
||||
|
||||
func TestFilterVersionsByMarker(t *testing.T) {
|
||||
n := 10
|
||||
testOIDs := make([]oid.ID, n)
|
||||
for i := 0; i < n; i++ {
|
||||
testOIDs[i] = oidtest.ID()
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
objects []*data.ExtendedNodeVersion
|
||||
params *ListObjectVersionsParams
|
||||
expected []*data.ExtendedNodeVersion
|
||||
error bool
|
||||
}{
|
||||
{
|
||||
name: "missed key marker",
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "", VersionIDMarker: "dummy"},
|
||||
expected: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "last version id",
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj0", VersionIDMarker: testOIDs[1].EncodeToString()},
|
||||
expected: []*data.ExtendedNodeVersion{},
|
||||
},
|
||||
{
|
||||
name: "same name, different versions",
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj0", VersionIDMarker: testOIDs[0].EncodeToString()},
|
||||
expected: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "different name, different versions",
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj0", VersionIDMarker: testOIDs[0].EncodeToString()},
|
||||
expected: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[1]}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "not matched name alphabetically less",
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj", VersionIDMarker: ""},
|
||||
expected: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[1]}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "not matched name alphabetically less with dummy version id",
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj", VersionIDMarker: "dummy"},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "not matched name alphabetically greater",
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj2", VersionIDMarker: testOIDs[2].EncodeToString()},
|
||||
expected: []*data.ExtendedNodeVersion{},
|
||||
},
|
||||
{
|
||||
name: "not found version id",
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[2]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj0", VersionIDMarker: "dummy"},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "not found version id, obj last",
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj0", VersionIDMarker: "dummy"},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "not found version id, obj last",
|
||||
objects: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[0]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj0", OID: testOIDs[1]}}},
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[2]}}},
|
||||
},
|
||||
params: &ListObjectVersionsParams{KeyMarker: "obj0", VersionIDMarker: ""},
|
||||
expected: []*data.ExtendedNodeVersion{
|
||||
{NodeVersion: &data.NodeVersion{BaseNodeVersion: data.BaseNodeVersion{FilePath: "obj1", OID: testOIDs[2]}}},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := filterVersionsByMarker(tc.objects, tc.params)
|
||||
if tc.error {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,67 +1,28 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"crypto/elliptic"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
frostfsErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// Box contains access box and additional info.
|
||||
Box struct {
|
||||
AccessBox *accessbox.Box
|
||||
ClientTime time.Time
|
||||
AuthHeaders *AuthHeader
|
||||
}
|
||||
|
||||
// Center is a user authentication interface.
|
||||
Center interface {
|
||||
// Authenticate validate and authenticate request.
|
||||
// Must return ErrNoAuthorizationHeader if auth header is missed.
|
||||
Authenticate(request *http.Request) (*Box, error)
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
AuthHeader struct {
|
||||
AccessKeyID string
|
||||
Region string
|
||||
SignatureV4 string
|
||||
}
|
||||
)
|
||||
|
||||
// ErrNoAuthorizationHeader is returned for unauthenticated requests.
|
||||
var ErrNoAuthorizationHeader = errors.New("no authorization header")
|
||||
|
||||
func Auth(center Center, log *zap.Logger) Func {
|
||||
func Auth(center auth.Center, log *zap.Logger) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := GetReqInfo(ctx)
|
||||
reqInfo.User = "anon"
|
||||
box, err := center.Authenticate(r)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNoAuthorizationHeader) {
|
||||
reqLogOrDefault(ctx, log).Debug(logs.CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed, zap.Error(err))
|
||||
if err == auth.ErrNoAuthorizationHeader {
|
||||
reqLogOrDefault(ctx, log).Debug(logs.CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed)
|
||||
} else {
|
||||
reqLogOrDefault(ctx, log).Error(logs.FailedToPassAuthentication, zap.Error(err))
|
||||
err = frostfsErrors.UnwrapErr(err)
|
||||
if _, ok := err.(apiErrors.Error); !ok {
|
||||
err = apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
|
||||
}
|
||||
if _, wrErr := WriteErrorResponse(w, GetReqInfo(r.Context()), err); wrErr != nil {
|
||||
reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr))
|
||||
if _, ok := err.(errors.Error); !ok {
|
||||
err = errors.GetAPIError(errors.ErrAccessDenied)
|
||||
}
|
||||
WriteErrorResponse(w, GetReqInfo(r.Context()), err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
|
@ -70,58 +31,9 @@ func Auth(center Center, log *zap.Logger) Func {
|
|||
ctx = SetClientTime(ctx, box.ClientTime)
|
||||
}
|
||||
ctx = SetAuthHeaders(ctx, box.AuthHeaders)
|
||||
|
||||
if box.AccessBox.Gate.BearerToken != nil {
|
||||
reqInfo.User = bearer.ResolveIssuer(*box.AccessBox.Gate.BearerToken).String()
|
||||
}
|
||||
reqLogOrDefault(ctx, log).Debug(logs.SuccessfulAuth, zap.String("accessKeyID", box.AuthHeaders.AccessKeyID))
|
||||
}
|
||||
|
||||
h.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type FrostFSIDValidator interface {
|
||||
ValidatePublicKey(key *keys.PublicKey) error
|
||||
}
|
||||
|
||||
func FrostfsIDValidation(frostfsID FrostFSIDValidator, log *zap.Logger) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
bd, err := GetBoxData(ctx)
|
||||
if err != nil || bd.Gate.BearerToken == nil {
|
||||
reqLogOrDefault(ctx, log).Debug(logs.AnonRequestSkipFrostfsIDValidation)
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if err = validateBearerToken(frostfsID, bd.Gate.BearerToken); err != nil {
|
||||
reqLogOrDefault(ctx, log).Error(logs.FrostfsIDValidationFailed, zap.Error(err))
|
||||
if _, wrErr := WriteErrorResponse(w, GetReqInfo(r.Context()), err); wrErr != nil {
|
||||
reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func validateBearerToken(frostfsID FrostFSIDValidator, bt *bearer.Token) error {
|
||||
m := new(acl.BearerToken)
|
||||
bt.WriteToV2(m)
|
||||
|
||||
pk, err := keys.NewPublicKeyFromBytes(m.GetSignature().GetKey(), elliptic.P256())
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid bearer token public key: %w", err)
|
||||
}
|
||||
|
||||
if err = frostfsID.ValidatePublicKey(pk); err != nil {
|
||||
return fmt.Errorf("validation data user key failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,107 +0,0 @@
|
|||
package middleware
|
||||
|
||||
const (
|
||||
ListBucketsOperation = "ListBuckets"
|
||||
|
||||
// bucket operations.
|
||||
|
||||
OptionsOperation = "Options"
|
||||
HeadBucketOperation = "HeadBucket"
|
||||
ListMultipartUploadsOperation = "ListMultipartUploads"
|
||||
GetBucketLocationOperation = "GetBucketLocation"
|
||||
GetBucketPolicyStatusOperation = "GetBucketPolicyStatus"
|
||||
GetBucketPolicyOperation = "GetBucketPolicy"
|
||||
GetBucketLifecycleOperation = "GetBucketLifecycle"
|
||||
GetBucketEncryptionOperation = "GetBucketEncryption"
|
||||
GetBucketCorsOperation = "GetBucketCors"
|
||||
GetBucketACLOperation = "GetBucketACL"
|
||||
GetBucketWebsiteOperation = "GetBucketWebsite"
|
||||
GetBucketAccelerateOperation = "GetBucketAccelerate"
|
||||
GetBucketRequestPaymentOperation = "GetBucketRequestPayment"
|
||||
GetBucketLoggingOperation = "GetBucketLogging"
|
||||
GetBucketReplicationOperation = "GetBucketReplication"
|
||||
GetBucketTaggingOperation = "GetBucketTagging"
|
||||
GetBucketObjectLockConfigOperation = "GetBucketObjectLockConfig"
|
||||
GetBucketVersioningOperation = "GetBucketVersioning"
|
||||
GetBucketNotificationOperation = "GetBucketNotification"
|
||||
ListenBucketNotificationOperation = "ListenBucketNotification"
|
||||
ListBucketObjectVersionsOperation = "ListBucketObjectVersions"
|
||||
ListObjectsV2MOperation = "ListObjectsV2M"
|
||||
ListObjectsV2Operation = "ListObjectsV2"
|
||||
ListObjectsV1Operation = "ListObjectsV1"
|
||||
PutBucketCorsOperation = "PutBucketCors"
|
||||
PutBucketACLOperation = "PutBucketACL"
|
||||
PutBucketLifecycleOperation = "PutBucketLifecycle"
|
||||
PutBucketEncryptionOperation = "PutBucketEncryption"
|
||||
PutBucketPolicyOperation = "PutBucketPolicy"
|
||||
PutBucketObjectLockConfigOperation = "PutBucketObjectLockConfig"
|
||||
PutBucketTaggingOperation = "PutBucketTagging"
|
||||
PutBucketVersioningOperation = "PutBucketVersioning"
|
||||
PutBucketNotificationOperation = "PutBucketNotification"
|
||||
CreateBucketOperation = "CreateBucket"
|
||||
DeleteMultipleObjectsOperation = "DeleteMultipleObjects"
|
||||
PostObjectOperation = "PostObject"
|
||||
DeleteBucketCorsOperation = "DeleteBucketCors"
|
||||
DeleteBucketWebsiteOperation = "DeleteBucketWebsite"
|
||||
DeleteBucketTaggingOperation = "DeleteBucketTagging"
|
||||
DeleteBucketPolicyOperation = "DeleteBucketPolicy"
|
||||
DeleteBucketLifecycleOperation = "DeleteBucketLifecycle"
|
||||
DeleteBucketEncryptionOperation = "DeleteBucketEncryption"
|
||||
DeleteBucketOperation = "DeleteBucket"
|
||||
|
||||
// object operations.
|
||||
|
||||
HeadObjectOperation = "HeadObject"
|
||||
ListPartsOperation = "ListParts"
|
||||
GetObjectACLOperation = "GetObjectACL"
|
||||
GetObjectTaggingOperation = "GetObjectTagging"
|
||||
GetObjectRetentionOperation = "GetObjectRetention"
|
||||
GetObjectLegalHoldOperation = "GetObjectLegalHold"
|
||||
GetObjectAttributesOperation = "GetObjectAttributes"
|
||||
GetObjectOperation = "GetObject"
|
||||
UploadPartCopyOperation = "UploadPartCopy"
|
||||
UploadPartOperation = "UploadPart"
|
||||
PutObjectACLOperation = "PutObjectACL"
|
||||
PutObjectTaggingOperation = "PutObjectTagging"
|
||||
CopyObjectOperation = "CopyObject"
|
||||
PutObjectRetentionOperation = "PutObjectRetention"
|
||||
PutObjectLegalHoldOperation = "PutObjectLegalHold"
|
||||
PutObjectOperation = "PutObject"
|
||||
CompleteMultipartUploadOperation = "CompleteMultipartUpload"
|
||||
CreateMultipartUploadOperation = "CreateMultipartUpload"
|
||||
SelectObjectContentOperation = "SelectObjectContent"
|
||||
AbortMultipartUploadOperation = "AbortMultipartUpload"
|
||||
DeleteObjectTaggingOperation = "DeleteObjectTagging"
|
||||
DeleteObjectOperation = "DeleteObject"
|
||||
)
|
||||
|
||||
const (
|
||||
UploadsQuery = "uploads"
|
||||
LocationQuery = "location"
|
||||
PolicyStatusQuery = "policyStatus"
|
||||
PolicyQuery = "policy"
|
||||
LifecycleQuery = "lifecycle"
|
||||
EncryptionQuery = "encryption"
|
||||
CorsQuery = "cors"
|
||||
ACLQuery = "acl"
|
||||
WebsiteQuery = "website"
|
||||
AccelerateQuery = "accelerate"
|
||||
RequestPaymentQuery = "requestPayment"
|
||||
LoggingQuery = "logging"
|
||||
ReplicationQuery = "replication"
|
||||
TaggingQuery = "tagging"
|
||||
ObjectLockQuery = "object-lock"
|
||||
VersioningQuery = "versioning"
|
||||
NotificationQuery = "notification"
|
||||
EventsQuery = "events"
|
||||
VersionsQuery = "versions"
|
||||
ListTypeQuery = "list-type"
|
||||
MetadataQuery = "metadata"
|
||||
DeleteQuery = "delete"
|
||||
UploadIDQuery = "uploadId"
|
||||
RetentionQuery = "retention"
|
||||
LegalQuery = "legal"
|
||||
AttributesQuery = "attributes"
|
||||
PartNumberQuery = "partNumber"
|
||||
LegalHoldQuery = "legal-hold"
|
||||
)
|
|
@ -9,13 +9,18 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
UsersStat interface {
|
||||
Update(user, bucket, cnrID string, reqType int, in, out uint64)
|
||||
}
|
||||
|
||||
readCounter struct {
|
||||
io.ReadCloser
|
||||
countBytes uint64
|
||||
|
@ -34,12 +39,8 @@ type (
|
|||
startTime time.Time
|
||||
}
|
||||
|
||||
MetricsSettings interface {
|
||||
ResolveNamespaceAlias(namespace string) string
|
||||
}
|
||||
|
||||
// ContainerIDResolveFunc is a func to resolve container id by name.
|
||||
ContainerIDResolveFunc func(ctx context.Context, bucket string) (cid.ID, error)
|
||||
// BucketResolveFunc is a func to resolve bucket info by name.
|
||||
BucketResolveFunc func(ctx context.Context, bucket string) (*data.BucketInfo, error)
|
||||
|
||||
// cidResolveFunc is a func to resolve CID in Stats handler.
|
||||
cidResolveFunc func(ctx context.Context, reqInfo *ReqInfo) (cnrID string)
|
||||
|
@ -48,14 +49,14 @@ type (
|
|||
const systemPath = "/system"
|
||||
|
||||
// Metrics wraps http handler for api with basic statistics collection.
|
||||
func Metrics(log *zap.Logger, resolveBucket ContainerIDResolveFunc, appMetrics *metrics.AppMetrics, settings MetricsSettings) Func {
|
||||
func Metrics(log *zap.Logger, resolveBucket BucketResolveFunc, appMetrics *metrics.AppMetrics) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return stats(h.ServeHTTP, resolveCID(log, resolveBucket), appMetrics, settings)
|
||||
return stats(h.ServeHTTP, resolveCID(log, resolveBucket), appMetrics)
|
||||
}
|
||||
}
|
||||
|
||||
// Stats is a handler that update metrics.
|
||||
func stats(f http.HandlerFunc, resolveCID cidResolveFunc, appMetrics *metrics.AppMetrics, settings MetricsSettings) http.HandlerFunc {
|
||||
func stats(f http.HandlerFunc, resolveCID cidResolveFunc, appMetrics *metrics.AppMetrics) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := GetReqInfo(r.Context())
|
||||
|
||||
|
@ -79,9 +80,9 @@ func stats(f http.HandlerFunc, resolveCID cidResolveFunc, appMetrics *metrics.Ap
|
|||
// simply for the fact that it is not human-readable.
|
||||
durationSecs := time.Since(statsWriter.startTime).Seconds()
|
||||
|
||||
user := resolveUser(r.Context())
|
||||
cnrID := resolveCID(r.Context(), reqInfo)
|
||||
appMetrics.UsersAPIStats().Update(reqInfo.User, reqInfo.BucketName, cnrID, settings.ResolveNamespaceAlias(reqInfo.Namespace),
|
||||
requestTypeFromAPI(reqInfo.API), in.countBytes, out.countBytes)
|
||||
appMetrics.Update(user, reqInfo.BucketName, cnrID, requestTypeFromAPI(reqInfo.API), in.countBytes, out.countBytes)
|
||||
|
||||
code := statsWriter.statusCode
|
||||
// A successful request has a 2xx response code
|
||||
|
@ -103,27 +104,27 @@ func stats(f http.HandlerFunc, resolveCID cidResolveFunc, appMetrics *metrics.Ap
|
|||
|
||||
func requestTypeFromAPI(api string) metrics.RequestType {
|
||||
switch api {
|
||||
case OptionsOperation, HeadObjectOperation, HeadBucketOperation:
|
||||
case "Options", "HeadObject", "HeadBucket":
|
||||
return metrics.HEADRequest
|
||||
case CreateMultipartUploadOperation, UploadPartCopyOperation, UploadPartOperation, CompleteMultipartUploadOperation,
|
||||
PutObjectACLOperation, PutObjectTaggingOperation, CopyObjectOperation, PutObjectRetentionOperation, PutObjectLegalHoldOperation,
|
||||
PutObjectOperation, PutBucketCorsOperation, PutBucketACLOperation, PutBucketLifecycleOperation, PutBucketEncryptionOperation,
|
||||
PutBucketPolicyOperation, PutBucketObjectLockConfigOperation, PutBucketTaggingOperation, PutBucketVersioningOperation,
|
||||
PutBucketNotificationOperation, CreateBucketOperation, PostObjectOperation:
|
||||
case "CreateMultipartUpload", "UploadPartCopy", "UploadPart", "CompleteMultipartUpload",
|
||||
"PutObjectACL", "PutObjectTagging", "CopyObject", "PutObjectRetention", "PutObjectLegalHold",
|
||||
"PutObject", "PutBucketCors", "PutBucketACL", "PutBucketLifecycle", "PutBucketEncryption",
|
||||
"PutBucketPolicy", "PutBucketObjectLockConfig", "PutBucketTagging", "PutBucketVersioning",
|
||||
"PutBucketNotification", "CreateBucket", "PostObject":
|
||||
return metrics.PUTRequest
|
||||
case ListPartsOperation, ListMultipartUploadsOperation, ListObjectsV2MOperation, ListObjectsV2Operation,
|
||||
ListObjectsV1Operation, ListBucketsOperation:
|
||||
case "ListObjectParts", "ListMultipartUploads", "ListObjectsV2M", "ListObjectsV2", "ListBucketVersions",
|
||||
"ListObjectsV1", "ListBuckets":
|
||||
return metrics.LISTRequest
|
||||
case GetObjectACLOperation, GetObjectTaggingOperation, SelectObjectContentOperation, GetObjectRetentionOperation, GetObjectLegalHoldOperation,
|
||||
GetObjectAttributesOperation, GetObjectOperation, GetBucketLocationOperation, GetBucketPolicyOperation,
|
||||
GetBucketLifecycleOperation, GetBucketEncryptionOperation, GetBucketCorsOperation, GetBucketACLOperation,
|
||||
GetBucketWebsiteOperation, GetBucketAccelerateOperation, GetBucketRequestPaymentOperation, GetBucketLoggingOperation,
|
||||
GetBucketReplicationOperation, GetBucketTaggingOperation, GetBucketObjectLockConfigOperation,
|
||||
GetBucketVersioningOperation, GetBucketNotificationOperation, ListenBucketNotificationOperation:
|
||||
case "GetObjectACL", "GetObjectTagging", "SelectObjectContent", "GetObjectRetention", "getobjectlegalhold",
|
||||
"GetObjectAttributes", "GetObject", "GetBucketLocation", "GetBucketPolicy",
|
||||
"GetBucketLifecycle", "GetBucketEncryption", "GetBucketCors", "GetBucketACL",
|
||||
"GetBucketWebsite", "GetBucketAccelerate", "GetBucketRequestPayment", "GetBucketLogging",
|
||||
"GetBucketReplication", "GetBucketTagging", "GetBucketObjectLockConfig",
|
||||
"GetBucketVersioning", "GetBucketNotification", "ListenBucketNotification":
|
||||
return metrics.GETRequest
|
||||
case AbortMultipartUploadOperation, DeleteObjectTaggingOperation, DeleteObjectOperation, DeleteBucketCorsOperation,
|
||||
DeleteBucketWebsiteOperation, DeleteBucketTaggingOperation, DeleteMultipleObjectsOperation, DeleteBucketPolicyOperation,
|
||||
DeleteBucketLifecycleOperation, DeleteBucketEncryptionOperation, DeleteBucketOperation:
|
||||
case "AbortMultipartUpload", "DeleteObjectTagging", "DeleteObject", "DeleteBucketCors",
|
||||
"DeleteBucketWebsite", "DeleteBucketTagging", "DeleteMultipleObjects", "DeleteBucketPolicy",
|
||||
"DeleteBucketLifecycle", "DeleteBucketEncryption", "DeleteBucket":
|
||||
return metrics.DELETERequest
|
||||
default:
|
||||
return metrics.UNKNOWNRequest
|
||||
|
@ -131,22 +132,30 @@ func requestTypeFromAPI(api string) metrics.RequestType {
|
|||
}
|
||||
|
||||
// resolveCID forms CIDResolveFunc using BucketResolveFunc.
|
||||
func resolveCID(log *zap.Logger, resolveContainerID ContainerIDResolveFunc) cidResolveFunc {
|
||||
func resolveCID(log *zap.Logger, resolveBucket BucketResolveFunc) cidResolveFunc {
|
||||
return func(ctx context.Context, reqInfo *ReqInfo) (cnrID string) {
|
||||
if reqInfo.BucketName == "" || reqInfo.API == CreateBucketOperation || reqInfo.API == "" {
|
||||
if reqInfo.BucketName == "" || reqInfo.API == "CreateBucket" || reqInfo.API == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
containerID, err := resolveContainerID(ctx, reqInfo.BucketName)
|
||||
bktInfo, err := resolveBucket(ctx, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
reqLogOrDefault(ctx, log).Debug(logs.FailedToResolveCID, zap.Error(err))
|
||||
return ""
|
||||
}
|
||||
|
||||
return containerID.EncodeToString()
|
||||
return bktInfo.CID.EncodeToString()
|
||||
}
|
||||
}
|
||||
|
||||
func resolveUser(ctx context.Context) string {
|
||||
user := "anon"
|
||||
if bd, err := GetBoxData(ctx); err == nil && bd.Gate.BearerToken != nil {
|
||||
user = bearer.ResolveIssuer(*bd.Gate.BearerToken).String()
|
||||
}
|
||||
return user
|
||||
}
|
||||
|
||||
// WriteHeader -- writes http status code.
|
||||
func (w *responseWrapper) WriteHeader(code int) {
|
||||
w.Do(func() {
|
||||
|
|
|
@ -1,406 +0,0 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/elliptic"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/resource/testutil"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/s3"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
QueryVersionID = "versionId"
|
||||
QueryPrefix = "prefix"
|
||||
QueryDelimiter = "delimiter"
|
||||
QueryMaxKeys = "max-keys"
|
||||
)
|
||||
|
||||
type PolicySettings interface {
|
||||
PolicyDenyByDefault() bool
|
||||
ACLEnabled() bool
|
||||
}
|
||||
|
||||
type FrostFSIDInformer interface {
|
||||
GetUserGroupIDs(userHash util.Uint160) ([]string, error)
|
||||
}
|
||||
|
||||
// BucketResolveFunc is a func to resolve bucket info by name.
|
||||
type BucketResolveFunc func(ctx context.Context, bucket string) (*data.BucketInfo, error)
|
||||
|
||||
type PolicyConfig struct {
|
||||
Storage engine.ChainRouter
|
||||
FrostfsID FrostFSIDInformer
|
||||
Settings PolicySettings
|
||||
Domains []string
|
||||
Log *zap.Logger
|
||||
BucketResolver BucketResolveFunc
|
||||
}
|
||||
|
||||
func PolicyCheck(cfg PolicyConfig) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
if err := policyCheck(r, cfg); err != nil {
|
||||
reqLogOrDefault(ctx, cfg.Log).Error(logs.PolicyValidationFailed, zap.Error(err))
|
||||
if _, wrErr := WriteErrorResponse(w, GetReqInfo(ctx), err); wrErr != nil {
|
||||
reqLogOrDefault(ctx, cfg.Log).Error(logs.FailedToWriteResponse, zap.Error(wrErr))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func policyCheck(r *http.Request, cfg PolicyConfig) error {
|
||||
reqType, bktName, objName := getBucketObject(r, cfg.Domains)
|
||||
req, err := getPolicyRequest(r, cfg.FrostfsID, reqType, bktName, objName, cfg.Log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var bktInfo *data.BucketInfo
|
||||
if reqType != noneType && !strings.HasSuffix(req.Operation(), CreateBucketOperation) {
|
||||
bktInfo, err = cfg.BucketResolver(r.Context(), bktName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
reqInfo := GetReqInfo(r.Context())
|
||||
target := engine.NewRequestTargetWithNamespace(reqInfo.Namespace)
|
||||
if bktInfo != nil {
|
||||
cnrTarget := engine.ContainerTarget(bktInfo.CID.EncodeToString())
|
||||
target.Container = &cnrTarget
|
||||
}
|
||||
|
||||
st, found, err := cfg.Storage.IsAllowed(chain.S3, target, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
st = chain.NoRuleFound
|
||||
}
|
||||
|
||||
switch {
|
||||
case st == chain.Allow:
|
||||
return nil
|
||||
case st != chain.NoRuleFound:
|
||||
return apiErr.GetAPIErrorWithError(apiErr.ErrAccessDenied, fmt.Errorf("policy check: %s", st.String()))
|
||||
}
|
||||
|
||||
isAPE := !cfg.Settings.ACLEnabled()
|
||||
if bktInfo != nil {
|
||||
isAPE = bktInfo.APEEnabled
|
||||
}
|
||||
|
||||
if isAPE && cfg.Settings.PolicyDenyByDefault() {
|
||||
return apiErr.GetAPIErrorWithError(apiErr.ErrAccessDenied, fmt.Errorf("policy check: %s", st.String()))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPolicyRequest(r *http.Request, frostfsid FrostFSIDInformer, reqType ReqType, bktName string, objName string, log *zap.Logger) (*testutil.Request, error) {
|
||||
var (
|
||||
owner string
|
||||
groups []string
|
||||
)
|
||||
|
||||
ctx := r.Context()
|
||||
bd, err := GetBoxData(ctx)
|
||||
if err == nil && bd.Gate.BearerToken != nil {
|
||||
pk, err := keys.NewPublicKeyFromBytes(bd.Gate.BearerToken.SigningKeyBytes(), elliptic.P256())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse pubclic key from btoken: %w", err)
|
||||
}
|
||||
owner = pk.Address()
|
||||
|
||||
groups, err = frostfsid.GetUserGroupIDs(pk.GetScriptHash())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get group ids: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
op := determineOperation(r, reqType)
|
||||
var res string
|
||||
switch reqType {
|
||||
case objectType:
|
||||
res = fmt.Sprintf(s3.ResourceFormatS3BucketObject, bktName, objName)
|
||||
default:
|
||||
res = fmt.Sprintf(s3.ResourceFormatS3Bucket, bktName)
|
||||
}
|
||||
|
||||
properties := determineProperties(ctx, reqType, op, owner, groups)
|
||||
|
||||
reqLogOrDefault(r.Context(), log).Debug(logs.PolicyRequest, zap.String("action", op),
|
||||
zap.String("resource", res), zap.Any("properties", properties))
|
||||
|
||||
return testutil.NewRequest(op, testutil.NewResource(res, nil), properties), nil
|
||||
}
|
||||
|
||||
type ReqType int
|
||||
|
||||
const (
|
||||
noneType ReqType = iota
|
||||
bucketType
|
||||
objectType
|
||||
)
|
||||
|
||||
func getBucketObject(r *http.Request, domains []string) (reqType ReqType, bktName string, objName string) {
|
||||
for _, domain := range domains {
|
||||
ind := strings.Index(r.Host, "."+domain)
|
||||
if ind == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
bkt := r.Host[:ind]
|
||||
if obj := strings.TrimPrefix(r.URL.Path, "/"); obj != "" {
|
||||
return objectType, bkt, obj
|
||||
}
|
||||
|
||||
return bucketType, bkt, ""
|
||||
}
|
||||
|
||||
bktObj := strings.TrimPrefix(r.URL.Path, "/")
|
||||
if bktObj == "" {
|
||||
return noneType, "", ""
|
||||
}
|
||||
|
||||
if ind := strings.IndexByte(bktObj, '/'); ind != -1 {
|
||||
return objectType, bktObj[:ind], bktObj[ind+1:]
|
||||
}
|
||||
|
||||
return bucketType, bktObj, ""
|
||||
}
|
||||
|
||||
func determineOperation(r *http.Request, reqType ReqType) (operation string) {
|
||||
switch reqType {
|
||||
case objectType:
|
||||
operation = determineObjectOperation(r)
|
||||
case bucketType:
|
||||
operation = determineBucketOperation(r)
|
||||
default:
|
||||
operation = determineGeneralOperation(r)
|
||||
}
|
||||
|
||||
return "s3:" + operation
|
||||
}
|
||||
|
||||
func determineBucketOperation(r *http.Request) string {
|
||||
query := r.URL.Query()
|
||||
switch r.Method {
|
||||
case http.MethodOptions:
|
||||
return OptionsOperation
|
||||
case http.MethodHead:
|
||||
return HeadBucketOperation
|
||||
case http.MethodGet:
|
||||
switch {
|
||||
case query.Has(UploadsQuery):
|
||||
return ListMultipartUploadsOperation
|
||||
case query.Has(LocationQuery):
|
||||
return GetBucketLocationOperation
|
||||
case query.Has(PolicyQuery):
|
||||
return GetBucketPolicyOperation
|
||||
case query.Has(LifecycleQuery):
|
||||
return GetBucketLifecycleOperation
|
||||
case query.Has(EncryptionQuery):
|
||||
return GetBucketEncryptionOperation
|
||||
case query.Has(CorsQuery):
|
||||
return GetBucketCorsOperation
|
||||
case query.Has(ACLQuery):
|
||||
return GetBucketACLOperation
|
||||
case query.Has(WebsiteQuery):
|
||||
return GetBucketWebsiteOperation
|
||||
case query.Has(AccelerateQuery):
|
||||
return GetBucketAccelerateOperation
|
||||
case query.Has(RequestPaymentQuery):
|
||||
return GetBucketRequestPaymentOperation
|
||||
case query.Has(LoggingQuery):
|
||||
return GetBucketLoggingOperation
|
||||
case query.Has(ReplicationQuery):
|
||||
return GetBucketReplicationOperation
|
||||
case query.Has(TaggingQuery):
|
||||
return GetBucketTaggingOperation
|
||||
case query.Has(ObjectLockQuery):
|
||||
return GetBucketObjectLockConfigOperation
|
||||
case query.Has(VersioningQuery):
|
||||
return GetBucketVersioningOperation
|
||||
case query.Has(NotificationQuery):
|
||||
return GetBucketNotificationOperation
|
||||
case query.Has(EventsQuery):
|
||||
return ListenBucketNotificationOperation
|
||||
case query.Has(VersionsQuery):
|
||||
return ListBucketObjectVersionsOperation
|
||||
case query.Get(ListTypeQuery) == "2" && query.Get(MetadataQuery) == "true":
|
||||
return ListObjectsV2MOperation
|
||||
case query.Get(ListTypeQuery) == "2":
|
||||
return ListObjectsV2Operation
|
||||
default:
|
||||
return ListObjectsV1Operation
|
||||
}
|
||||
case http.MethodPut:
|
||||
switch {
|
||||
case query.Has(CorsQuery):
|
||||
return PutBucketCorsOperation
|
||||
case query.Has(ACLQuery):
|
||||
return PutBucketACLOperation
|
||||
case query.Has(LifecycleQuery):
|
||||
return PutBucketLifecycleOperation
|
||||
case query.Has(EncryptionQuery):
|
||||
return PutBucketEncryptionOperation
|
||||
case query.Has(PolicyQuery):
|
||||
return PutBucketPolicyOperation
|
||||
case query.Has(ObjectLockQuery):
|
||||
return PutBucketObjectLockConfigOperation
|
||||
case query.Has(TaggingQuery):
|
||||
return PutBucketTaggingOperation
|
||||
case query.Has(VersioningQuery):
|
||||
return PutBucketVersioningOperation
|
||||
case query.Has(NotificationQuery):
|
||||
return PutBucketNotificationOperation
|
||||
default:
|
||||
return CreateBucketOperation
|
||||
}
|
||||
case http.MethodPost:
|
||||
switch {
|
||||
case query.Has(DeleteQuery):
|
||||
return DeleteMultipleObjectsOperation
|
||||
default:
|
||||
return PostObjectOperation
|
||||
}
|
||||
case http.MethodDelete:
|
||||
switch {
|
||||
case query.Has(CorsQuery):
|
||||
return DeleteBucketCorsOperation
|
||||
case query.Has(WebsiteQuery):
|
||||
return DeleteBucketWebsiteOperation
|
||||
case query.Has(TaggingQuery):
|
||||
return DeleteBucketTaggingOperation
|
||||
case query.Has(PolicyQuery):
|
||||
return DeleteBucketPolicyOperation
|
||||
case query.Has(LifecycleQuery):
|
||||
return DeleteBucketLifecycleOperation
|
||||
case query.Has(EncryptionQuery):
|
||||
return DeleteBucketEncryptionOperation
|
||||
default:
|
||||
return DeleteBucketOperation
|
||||
}
|
||||
}
|
||||
|
||||
return "UnmatchedBucketOperation"
|
||||
}
|
||||
|
||||
func determineObjectOperation(r *http.Request) string {
|
||||
query := r.URL.Query()
|
||||
switch r.Method {
|
||||
case http.MethodHead:
|
||||
return HeadObjectOperation
|
||||
case http.MethodGet:
|
||||
switch {
|
||||
case query.Has(UploadIDQuery):
|
||||
return ListPartsOperation
|
||||
case query.Has(ACLQuery):
|
||||
return GetObjectACLOperation
|
||||
case query.Has(TaggingQuery):
|
||||
return GetObjectTaggingOperation
|
||||
case query.Has(RetentionQuery):
|
||||
return GetObjectRetentionOperation
|
||||
case query.Has(LegalQuery):
|
||||
return GetObjectLegalHoldOperation
|
||||
case query.Has(AttributesQuery):
|
||||
return GetObjectAttributesOperation
|
||||
default:
|
||||
return GetObjectOperation
|
||||
}
|
||||
case http.MethodPut:
|
||||
switch {
|
||||
case query.Has(PartNumberQuery) && query.Has(UploadIDQuery) && r.Header.Get("X-Amz-Copy-Source") != "":
|
||||
return UploadPartCopyOperation
|
||||
case query.Has(PartNumberQuery) && query.Has(UploadIDQuery):
|
||||
return UploadPartOperation
|
||||
case query.Has(ACLQuery):
|
||||
return PutObjectACLOperation
|
||||
case query.Has(TaggingQuery):
|
||||
return PutObjectTaggingOperation
|
||||
case r.Header.Get("X-Amz-Copy-Source") != "":
|
||||
return CopyObjectOperation
|
||||
case query.Has(RetentionQuery):
|
||||
return PutObjectRetentionOperation
|
||||
case query.Has(LegalHoldQuery):
|
||||
return PutObjectLegalHoldOperation
|
||||
default:
|
||||
return PutObjectOperation
|
||||
}
|
||||
case http.MethodPost:
|
||||
switch {
|
||||
case query.Has(UploadIDQuery):
|
||||
return CompleteMultipartUploadOperation
|
||||
case query.Has(UploadsQuery):
|
||||
return CreateMultipartUploadOperation
|
||||
default:
|
||||
return SelectObjectContentOperation
|
||||
}
|
||||
case http.MethodDelete:
|
||||
switch {
|
||||
case query.Has(UploadIDQuery):
|
||||
return AbortMultipartUploadOperation
|
||||
case query.Has(TaggingQuery):
|
||||
return DeleteObjectTaggingOperation
|
||||
default:
|
||||
return DeleteObjectOperation
|
||||
}
|
||||
}
|
||||
|
||||
return "UnmatchedObjectOperation"
|
||||
}
|
||||
|
||||
func determineGeneralOperation(r *http.Request) string {
|
||||
if r.Method == http.MethodGet {
|
||||
return ListBucketsOperation
|
||||
}
|
||||
return "UnmatchedOperation"
|
||||
}
|
||||
|
||||
func determineProperties(ctx context.Context, reqType ReqType, op, owner string, groups []string) map[string]string {
|
||||
res := map[string]string{
|
||||
s3.PropertyKeyOwner: owner,
|
||||
common.PropertyKeyFrostFSIDGroupID: chain.FormCondSliceContainsValue(groups),
|
||||
}
|
||||
queries := GetReqInfo(ctx).URL.Query()
|
||||
|
||||
if reqType == objectType {
|
||||
if versionID := queries.Get(QueryVersionID); len(versionID) > 0 {
|
||||
res[s3.PropertyKeyVersionID] = versionID
|
||||
}
|
||||
}
|
||||
|
||||
if reqType == bucketType && (strings.HasSuffix(op, ListObjectsV1Operation) || strings.HasSuffix(op, ListObjectsV2Operation) ||
|
||||
strings.HasSuffix(op, ListBucketObjectVersionsOperation) || strings.HasSuffix(op, ListMultipartUploadsOperation)) {
|
||||
if prefix := queries.Get(QueryPrefix); len(prefix) > 0 {
|
||||
res[s3.PropertyKeyPrefix] = prefix
|
||||
}
|
||||
if delimiter := queries.Get(QueryDelimiter); len(delimiter) > 0 {
|
||||
res[s3.PropertyKeyDelimiter] = delimiter
|
||||
}
|
||||
if maxKeys := queries.Get(QueryMaxKeys); len(maxKeys) > 0 {
|
||||
res[s3.PropertyKeyMaxKeys] = maxKeys
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
|
@ -9,7 +9,6 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"github.com/go-chi/chi/v5"
|
||||
|
@ -38,8 +37,6 @@ type (
|
|||
ObjectName string // Object name
|
||||
TraceID string // Trace ID
|
||||
URL *url.URL // Request url
|
||||
Namespace string
|
||||
User string // User owner id
|
||||
tags []KeyVal // Any additional info not accommodated by above fields
|
||||
}
|
||||
|
||||
|
@ -189,19 +186,11 @@ func GetReqLog(ctx context.Context) *zap.Logger {
|
|||
return nil
|
||||
}
|
||||
|
||||
type RequestSettings interface {
|
||||
NamespaceHeader() string
|
||||
ResolveNamespaceAlias(string) string
|
||||
}
|
||||
|
||||
func Request(log *zap.Logger, settings RequestSettings) Func {
|
||||
func Request(log *zap.Logger) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// generate random UUIDv4
|
||||
id, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
log.Error(logs.FailedToGenerateRequestID, zap.Error(err))
|
||||
}
|
||||
id, _ := uuid.NewRandom()
|
||||
|
||||
// set request id into response header
|
||||
// also we have to set request id here
|
||||
|
@ -211,7 +200,6 @@ func Request(log *zap.Logger, settings RequestSettings) Func {
|
|||
// set request info into context
|
||||
// bucket name and object will be set in reqInfo later (limitation of go-chi)
|
||||
reqInfo := NewReqInfo(w, r, ObjectRequest{})
|
||||
reqInfo.Namespace = settings.ResolveNamespaceAlias(r.Header.Get(settings.NamespaceHeader()))
|
||||
r = r.WithContext(SetReqInfo(r.Context(), reqInfo))
|
||||
|
||||
// set request id into gRPC meta header
|
||||
|
@ -225,7 +213,7 @@ func Request(log *zap.Logger, settings RequestSettings) Func {
|
|||
r = r.WithContext(SetReqLogger(r.Context(), reqLogger))
|
||||
|
||||
reqLogger.Info(logs.RequestStart, zap.String("host", r.Host),
|
||||
zap.String("remote_host", reqInfo.RemoteHost), zap.String("namespace", reqInfo.Namespace))
|
||||
zap.String("remote_host", reqInfo.RemoteHost))
|
||||
|
||||
// continue execution
|
||||
h.ServeHTTP(w, r)
|
||||
|
@ -242,10 +230,8 @@ func AddBucketName(l *zap.Logger) Func {
|
|||
reqInfo := GetReqInfo(ctx)
|
||||
reqInfo.BucketName = chi.URLParam(r, BucketURLPrm)
|
||||
|
||||
if reqInfo.BucketName != "" {
|
||||
reqLogger := reqLogOrDefault(ctx, l)
|
||||
r = r.WithContext(SetReqLogger(ctx, reqLogger.With(zap.String("bucket", reqInfo.BucketName))))
|
||||
}
|
||||
reqLogger := reqLogOrDefault(ctx, l)
|
||||
r = r.WithContext(SetReqLogger(ctx, reqLogger.With(zap.String("bucket", reqInfo.BucketName))))
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
|
@ -258,26 +244,13 @@ func AddObjectName(l *zap.Logger) Func {
|
|||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := GetReqInfo(ctx)
|
||||
reqLogger := reqLogOrDefault(ctx, l)
|
||||
|
||||
rctx := chi.RouteContext(ctx)
|
||||
// trim leading slash (always present)
|
||||
reqInfo.ObjectName = rctx.RoutePath[1:]
|
||||
|
||||
if r.URL.RawPath != "" {
|
||||
// we have to do this because of
|
||||
// https://github.com/go-chi/chi/issues/641
|
||||
// https://github.com/go-chi/chi/issues/642
|
||||
if obj, err := url.PathUnescape(reqInfo.ObjectName); err != nil {
|
||||
reqLogger.Warn(logs.FailedToUnescapeObjectName, zap.Error(err))
|
||||
} else {
|
||||
reqInfo.ObjectName = obj
|
||||
}
|
||||
}
|
||||
|
||||
if reqInfo.ObjectName != "" {
|
||||
r = r.WithContext(SetReqLogger(ctx, reqLogger.With(zap.String("object", reqInfo.ObjectName))))
|
||||
}
|
||||
reqLogger := reqLogOrDefault(ctx, l)
|
||||
r = r.WithContext(SetReqLogger(ctx, reqLogger.With(zap.String("object", reqInfo.ObjectName))))
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
|
@ -312,7 +285,7 @@ func getSourceIP(r *http.Request) string {
|
|||
if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 {
|
||||
// IPv6 addresses in Forwarded headers are quoted-strings. We strip
|
||||
// these quotes.
|
||||
addr = data.UnQuote(match[1])
|
||||
addr = strings.Trim(match[1], `"`)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -118,8 +118,7 @@ var s3ErrorResponseMap = map[string]string{
|
|||
}
|
||||
|
||||
// WriteErrorResponse writes error headers.
|
||||
// returns http error code and error in case of failure of response writing.
|
||||
func WriteErrorResponse(w http.ResponseWriter, reqInfo *ReqInfo, err error) (int, error) {
|
||||
func WriteErrorResponse(w http.ResponseWriter, reqInfo *ReqInfo, err error) int {
|
||||
code := http.StatusInternalServerError
|
||||
|
||||
if e, ok := err.(errors.Error); ok {
|
||||
|
@ -130,19 +129,23 @@ func WriteErrorResponse(w http.ResponseWriter, reqInfo *ReqInfo, err error) (int
|
|||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(hdrRetryAfter, "120")
|
||||
case "AccessDenied":
|
||||
// TODO process when the request is from browser and also if browser
|
||||
}
|
||||
}
|
||||
|
||||
// Generates error response.
|
||||
errorResponse := getAPIErrorResponse(reqInfo, err)
|
||||
encodedErrorResponse, err := EncodeResponse(errorResponse)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("encode response: %w", err)
|
||||
}
|
||||
if err = WriteResponse(w, code, encodedErrorResponse, MimeXML); err != nil {
|
||||
return 0, fmt.Errorf("write response: %w", err)
|
||||
}
|
||||
return code, nil
|
||||
encodedErrorResponse := EncodeResponse(errorResponse)
|
||||
WriteResponse(w, code, encodedErrorResponse, MimeXML)
|
||||
return code
|
||||
}
|
||||
|
||||
// WriteErrorResponseNoHeader writes XML encoded error to the response body.
|
||||
func WriteErrorResponseNoHeader(w http.ResponseWriter, reqInfo *ReqInfo, err error) {
|
||||
errorResponse := getAPIErrorResponse(reqInfo, err)
|
||||
encodedErrorResponse := EncodeResponse(errorResponse)
|
||||
WriteResponseBody(w, encodedErrorResponse)
|
||||
}
|
||||
|
||||
// Write http common headers.
|
||||
|
@ -163,7 +166,7 @@ func removeSensitiveHeaders(h http.Header) {
|
|||
}
|
||||
|
||||
// WriteResponse writes given statusCode and response into w (with mType header if set).
|
||||
func WriteResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) error {
|
||||
func WriteResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||
setCommonHeaders(w)
|
||||
if mType != MimeNone {
|
||||
w.Header().Set(hdrContentType, string(mType))
|
||||
|
@ -171,46 +174,37 @@ func WriteResponse(w http.ResponseWriter, statusCode int, response []byte, mType
|
|||
w.Header().Set(hdrContentLength, strconv.Itoa(len(response)))
|
||||
w.WriteHeader(statusCode)
|
||||
if response == nil {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
return WriteResponseBody(w, response)
|
||||
WriteResponseBody(w, response)
|
||||
}
|
||||
|
||||
// WriteResponseBody writes response into w.
|
||||
func WriteResponseBody(w http.ResponseWriter, response []byte) error {
|
||||
if _, err := w.Write(response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func WriteResponseBody(w http.ResponseWriter, response []byte) {
|
||||
_, _ = w.Write(response)
|
||||
if flusher, ok := w.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeResponse encodes the response headers into XML format.
|
||||
func EncodeResponse(response interface{}) ([]byte, error) {
|
||||
func EncodeResponse(response interface{}) []byte {
|
||||
var bytesBuffer bytes.Buffer
|
||||
bytesBuffer.WriteString(xml.Header)
|
||||
if err := xml.NewEncoder(&bytesBuffer).Encode(response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bytesBuffer.Bytes(), nil
|
||||
_ = xml.
|
||||
NewEncoder(&bytesBuffer).
|
||||
Encode(response)
|
||||
return bytesBuffer.Bytes()
|
||||
}
|
||||
|
||||
// EncodeResponseNoHeader encodes response without setting xml.Header.
|
||||
// Should be used with periodicXMLWriter which sends xml.Header to the client
|
||||
// with whitespaces to keep connection alive.
|
||||
func EncodeResponseNoHeader(response interface{}) ([]byte, error) {
|
||||
func EncodeResponseNoHeader(response interface{}) []byte {
|
||||
var bytesBuffer bytes.Buffer
|
||||
if err := xml.NewEncoder(&bytesBuffer).Encode(response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bytesBuffer.Bytes(), nil
|
||||
_ = xml.NewEncoder(&bytesBuffer).Encode(response)
|
||||
return bytesBuffer.Bytes()
|
||||
}
|
||||
|
||||
// EncodeToResponse encodes the response into ResponseWriter.
|
||||
|
@ -242,8 +236,8 @@ func EncodeToResponseNoHeader(w http.ResponseWriter, response interface{}) error
|
|||
|
||||
// WriteSuccessResponseHeadersOnly writes HTTP (200) OK response with no data
|
||||
// to the client.
|
||||
func WriteSuccessResponseHeadersOnly(w http.ResponseWriter) error {
|
||||
return WriteResponse(w, http.StatusOK, nil, MimeNone)
|
||||
func WriteSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
||||
WriteResponse(w, http.StatusOK, nil, MimeNone)
|
||||
}
|
||||
|
||||
// Error -- Returns S3 error string.
|
||||
|
@ -327,20 +321,12 @@ func LogSuccessResponse(l *zap.Logger) Func {
|
|||
reqLogger := reqLogOrDefault(ctx, l)
|
||||
reqInfo := GetReqInfo(ctx)
|
||||
|
||||
fields := make([]zap.Field, 0, 6)
|
||||
fields = append(fields,
|
||||
zap.Int("status", lw.statusCode),
|
||||
zap.String("description", http.StatusText(lw.statusCode)),
|
||||
fields := []zap.Field{
|
||||
zap.String("method", reqInfo.API),
|
||||
)
|
||||
|
||||
if reqInfo.BucketName != "" {
|
||||
fields = append(fields, zap.String("bucket", reqInfo.BucketName))
|
||||
}
|
||||
if reqInfo.ObjectName != "" {
|
||||
fields = append(fields, zap.String("object", reqInfo.ObjectName))
|
||||
}
|
||||
|
||||
zap.String("bucket", reqInfo.BucketName),
|
||||
zap.String("object", reqInfo.ObjectName),
|
||||
zap.Int("status", lw.statusCode),
|
||||
zap.String("description", http.StatusText(lw.statusCode))}
|
||||
if traceID, err := trace.TraceIDFromHex(reqInfo.TraceID); err == nil && traceID.IsValid() {
|
||||
fields = append(fields, zap.String("trace_id", reqInfo.TraceID))
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
)
|
||||
|
||||
|
@ -36,8 +37,8 @@ func GetBoxData(ctx context.Context) (*accessbox.Box, error) {
|
|||
}
|
||||
|
||||
// GetAuthHeaders extracts auth.AuthHeader from context.
|
||||
func GetAuthHeaders(ctx context.Context) (*AuthHeader, error) {
|
||||
authHeaders, ok := ctx.Value(authHeadersKey).(*AuthHeader)
|
||||
func GetAuthHeaders(ctx context.Context) (*auth.AuthHeader, error) {
|
||||
authHeaders, ok := ctx.Value(authHeadersKey).(*auth.AuthHeader)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("couldn't get auth headers from context")
|
||||
}
|
||||
|
@ -61,7 +62,7 @@ func SetBoxData(ctx context.Context, box *accessbox.Box) context.Context {
|
|||
}
|
||||
|
||||
// SetAuthHeaders sets auth.AuthHeader in the context.
|
||||
func SetAuthHeaders(ctx context.Context, header *AuthHeader) context.Context {
|
||||
func SetAuthHeaders(ctx context.Context, header *auth.AuthHeader) context.Context {
|
||||
return context.WithValue(ctx, authHeadersKey, header)
|
||||
}
|
||||
|
||||
|
|
|
@ -136,11 +136,10 @@ func (c *Controller) Subscribe(_ context.Context, topic string, handler layer.Ms
|
|||
ch := make(chan *nats.Msg, 1)
|
||||
|
||||
c.mu.RLock()
|
||||
_, ok := c.handlers[topic]
|
||||
c.mu.RUnlock()
|
||||
if ok {
|
||||
if _, ok := c.handlers[topic]; ok {
|
||||
return fmt.Errorf("already subscribed to topic '%s'", topic)
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
|
||||
if _, err := c.jsClient.AddStream(&nats.StreamConfig{Name: topic}); err != nil {
|
||||
return fmt.Errorf("add stream: %w", err)
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||
|
@ -29,21 +28,12 @@ type FrostFS interface {
|
|||
SystemDNS(context.Context) (string, error)
|
||||
}
|
||||
|
||||
type Settings interface {
|
||||
FormContainerZone(ns string) (zone string, isDefault bool)
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
FrostFS FrostFS
|
||||
RPCAddress string
|
||||
Settings Settings
|
||||
}
|
||||
|
||||
type BucketResolver struct {
|
||||
rpcAddress string
|
||||
frostfs FrostFS
|
||||
settings Settings
|
||||
|
||||
mu sync.RWMutex
|
||||
resolvers []*Resolver
|
||||
}
|
||||
|
@ -68,9 +58,7 @@ func NewBucketResolver(resolverNames []string, cfg *Config) (*BucketResolver, er
|
|||
}
|
||||
|
||||
return &BucketResolver{
|
||||
rpcAddress: cfg.RPCAddress,
|
||||
frostfs: cfg.FrostFS,
|
||||
resolvers: resolvers,
|
||||
resolvers: resolvers,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -112,7 +100,7 @@ func (r *BucketResolver) Resolve(ctx context.Context, bktName string) (cnrID cid
|
|||
return cnrID, ErrNoResolvers
|
||||
}
|
||||
|
||||
func (r *BucketResolver) UpdateResolvers(resolverNames []string) error {
|
||||
func (r *BucketResolver) UpdateResolvers(resolverNames []string, cfg *Config) error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
|
@ -120,12 +108,6 @@ func (r *BucketResolver) UpdateResolvers(resolverNames []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
FrostFS: r.frostfs,
|
||||
RPCAddress: r.rpcAddress,
|
||||
Settings: r.settings,
|
||||
}
|
||||
|
||||
resolvers, err := createResolvers(resolverNames, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -152,37 +134,28 @@ func (r *BucketResolver) equals(resolverNames []string) bool {
|
|||
func newResolver(name string, cfg *Config) (*Resolver, error) {
|
||||
switch name {
|
||||
case DNSResolver:
|
||||
return NewDNSResolver(cfg.FrostFS, cfg.Settings)
|
||||
return NewDNSResolver(cfg.FrostFS)
|
||||
case NNSResolver:
|
||||
return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
|
||||
return NewNNSResolver(cfg.RPCAddress)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown resolver: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
|
||||
func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
|
||||
if frostFS == nil {
|
||||
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
|
||||
}
|
||||
if settings == nil {
|
||||
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
|
||||
}
|
||||
|
||||
var dns ns.DNS
|
||||
|
||||
resolveFunc := func(ctx context.Context, name string) (cid.ID, error) {
|
||||
var err error
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
zone, isDefault := settings.FormContainerZone(reqInfo.Namespace)
|
||||
if isDefault {
|
||||
zone, err = frostFS.SystemDNS(ctx)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
|
||||
}
|
||||
domain, err := frostFS.SystemDNS(ctx)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
|
||||
}
|
||||
|
||||
domain := name + "." + zone
|
||||
domain = name + "." + domain
|
||||
cnrID, err := dns.ResolveContainerName(domain)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("couldn't resolve container '%s' as '%s': %w", name, domain, err)
|
||||
|
@ -196,13 +169,10 @@ func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func NewNNSResolver(address string, settings Settings) (*Resolver, error) {
|
||||
func NewNNSResolver(address string) (*Resolver, error) {
|
||||
if address == "" {
|
||||
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
|
||||
}
|
||||
if settings == nil {
|
||||
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
|
||||
}
|
||||
|
||||
var nns ns.NNS
|
||||
|
||||
|
@ -210,14 +180,10 @@ func NewNNSResolver(address string, settings Settings) (*Resolver, error) {
|
|||
return nil, fmt.Errorf("dial %s: %w", address, err)
|
||||
}
|
||||
|
||||
resolveFunc := func(ctx context.Context, name string) (cid.ID, error) {
|
||||
resolveFunc := func(_ context.Context, name string) (cid.ID, error) {
|
||||
var d container.Domain
|
||||
d.SetName(name)
|
||||
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
zone, _ := settings.FormContainerZone(reqInfo.Namespace)
|
||||
d.SetZone(zone)
|
||||
|
||||
cnrID, err := nns.ResolveContainerDomain(d)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("couldn't resolve container '%s': %w", name, err)
|
||||
|
|
319
api/router.go
319
api/router.go
|
@ -5,13 +5,12 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
s3middleware "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"go.uber.org/zap"
|
||||
|
@ -37,7 +36,6 @@ type (
|
|||
PutObjectHandler(http.ResponseWriter, *http.Request)
|
||||
DeleteObjectHandler(http.ResponseWriter, *http.Request)
|
||||
GetBucketLocationHandler(http.ResponseWriter, *http.Request)
|
||||
GetBucketPolicyStatusHandler(http.ResponseWriter, *http.Request)
|
||||
GetBucketPolicyHandler(http.ResponseWriter, *http.Request)
|
||||
GetBucketLifecycleHandler(http.ResponseWriter, *http.Request)
|
||||
GetBucketEncryptionHandler(http.ResponseWriter, *http.Request)
|
||||
|
@ -89,80 +87,32 @@ type (
|
|||
ListMultipartUploadsHandler(http.ResponseWriter, *http.Request)
|
||||
|
||||
ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error)
|
||||
ResolveCID(ctx context.Context, bucket string) (cid.ID, error)
|
||||
}
|
||||
)
|
||||
|
||||
type Settings interface {
|
||||
s3middleware.RequestSettings
|
||||
s3middleware.PolicySettings
|
||||
s3middleware.MetricsSettings
|
||||
}
|
||||
|
||||
type FrostFSID interface {
|
||||
s3middleware.FrostFSIDValidator
|
||||
s3middleware.FrostFSIDInformer
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Throttle middleware.ThrottleOpts
|
||||
Handler Handler
|
||||
Center s3middleware.Center
|
||||
Log *zap.Logger
|
||||
Metrics *metrics.AppMetrics
|
||||
|
||||
MiddlewareSettings Settings
|
||||
|
||||
// Domains optional. If empty no virtual hosted domains will be attached.
|
||||
Domains []string
|
||||
|
||||
FrostfsID FrostFSID
|
||||
|
||||
FrostFSIDValidation bool
|
||||
|
||||
PolicyChecker engine.ChainRouter
|
||||
}
|
||||
|
||||
func NewRouter(cfg Config) *chi.Mux {
|
||||
api := chi.NewRouter()
|
||||
func AttachChi(api *chi.Mux, domains []string, throttle middleware.ThrottleOpts, h Handler, center auth.Center, log *zap.Logger, appMetrics *metrics.AppMetrics) {
|
||||
api.Use(
|
||||
s3middleware.Request(cfg.Log, cfg.MiddlewareSettings),
|
||||
middleware.ThrottleWithOpts(cfg.Throttle),
|
||||
s3middleware.Request(log),
|
||||
middleware.ThrottleWithOpts(throttle),
|
||||
middleware.Recoverer,
|
||||
s3middleware.Tracing(),
|
||||
s3middleware.Metrics(cfg.Log, cfg.Handler.ResolveCID, cfg.Metrics, cfg.MiddlewareSettings),
|
||||
s3middleware.LogSuccessResponse(cfg.Log),
|
||||
s3middleware.Auth(cfg.Center, cfg.Log),
|
||||
s3middleware.Metrics(log, h.ResolveBucket, appMetrics),
|
||||
s3middleware.LogSuccessResponse(log),
|
||||
s3middleware.Auth(center, log),
|
||||
)
|
||||
|
||||
if cfg.FrostFSIDValidation {
|
||||
api.Use(s3middleware.FrostfsIDValidation(cfg.FrostfsID, cfg.Log))
|
||||
}
|
||||
|
||||
api.Use(s3middleware.PolicyCheck(s3middleware.PolicyConfig{
|
||||
Storage: cfg.PolicyChecker,
|
||||
FrostfsID: cfg.FrostfsID,
|
||||
Settings: cfg.MiddlewareSettings,
|
||||
Domains: cfg.Domains,
|
||||
Log: cfg.Log,
|
||||
BucketResolver: cfg.Handler.ResolveBucket,
|
||||
}))
|
||||
|
||||
defaultRouter := chi.NewRouter()
|
||||
defaultRouter.Mount(fmt.Sprintf("/{%s}", s3middleware.BucketURLPrm), bucketRouter(cfg.Handler, cfg.Log))
|
||||
defaultRouter.Get("/", named("ListBuckets", cfg.Handler.ListBucketsHandler))
|
||||
attachErrorHandler(defaultRouter)
|
||||
defaultRouter.Mount(fmt.Sprintf("/{%s}", s3middleware.BucketURLPrm), bucketRouter(h, log))
|
||||
defaultRouter.Get("/", named("ListBuckets", h.ListBucketsHandler))
|
||||
|
||||
hr := NewHostBucketRouter("bucket")
|
||||
hr.Default(defaultRouter)
|
||||
for _, domain := range cfg.Domains {
|
||||
hr.Map(domain, bucketRouter(cfg.Handler, cfg.Log))
|
||||
for _, domain := range domains {
|
||||
hr.Map(domain, bucketRouter(h, log))
|
||||
}
|
||||
api.Mount("/", hr)
|
||||
|
||||
attachErrorHandler(api)
|
||||
|
||||
return api
|
||||
}
|
||||
|
||||
func named(name string, handlerFunc http.HandlerFunc) http.HandlerFunc {
|
||||
|
@ -179,24 +129,14 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
|
|||
reqInfo := s3middleware.GetReqInfo(ctx)
|
||||
|
||||
desc := fmt.Sprintf("Unknown API request at %s", r.URL.Path)
|
||||
_, wrErr := s3middleware.WriteErrorResponse(w, reqInfo, errors.Error{
|
||||
s3middleware.WriteErrorResponse(w, reqInfo, errors.Error{
|
||||
Code: "UnknownAPIRequest",
|
||||
Description: desc,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
})
|
||||
|
||||
if log := s3middleware.GetReqLog(ctx); log != nil {
|
||||
fields := []zap.Field{
|
||||
zap.String("method", reqInfo.API),
|
||||
zap.String("http method", r.Method),
|
||||
zap.String("url", r.RequestURI),
|
||||
}
|
||||
|
||||
if wrErr != nil {
|
||||
fields = append(fields, zap.NamedError("write_response_error", wrErr))
|
||||
}
|
||||
|
||||
log.Error(logs.RequestUnmatched, fields...)
|
||||
log.Error(logs.RequestUnmatched, zap.String("method", reqInfo.API))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -220,142 +160,139 @@ func bucketRouter(h Handler, log *zap.Logger) chi.Router {
|
|||
|
||||
bktRouter.Options("/", h.Preflight)
|
||||
|
||||
bktRouter.Head("/", named(s3middleware.HeadBucketOperation, h.HeadBucketHandler))
|
||||
bktRouter.Head("/", named("HeadBucket", h.HeadBucketHandler))
|
||||
|
||||
// GET method handlers
|
||||
bktRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodGet, "/", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.UploadsQuery).
|
||||
Handler(named(s3middleware.ListMultipartUploadsOperation, h.ListMultipartUploadsHandler))).
|
||||
Queries("uploads").
|
||||
Handler(named("ListMultipartUploads", h.ListMultipartUploadsHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.LocationQuery).
|
||||
Handler(named(s3middleware.GetBucketLocationOperation, h.GetBucketLocationHandler))).
|
||||
Queries("location").
|
||||
Handler(named("GetBucketLocation", h.GetBucketLocationHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.PolicyStatusQuery).
|
||||
Handler(named(s3middleware.GetBucketPolicyStatusOperation, h.GetBucketPolicyStatusHandler))).
|
||||
Queries("policy").
|
||||
Handler(named("GetBucketPolicy", h.GetBucketPolicyHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.PolicyQuery).
|
||||
Handler(named(s3middleware.GetBucketPolicyOperation, h.GetBucketPolicyHandler))).
|
||||
Queries("lifecycle").
|
||||
Handler(named("GetBucketLifecycle", h.GetBucketLifecycleHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.LifecycleQuery).
|
||||
Handler(named(s3middleware.GetBucketLifecycleOperation, h.GetBucketLifecycleHandler))).
|
||||
Queries("encryption").
|
||||
Handler(named("GetBucketEncryption", h.GetBucketEncryptionHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.EncryptionQuery).
|
||||
Handler(named(s3middleware.GetBucketEncryptionOperation, h.GetBucketEncryptionHandler))).
|
||||
Queries("cors").
|
||||
Handler(named("GetBucketCors", h.GetBucketCorsHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.CorsQuery).
|
||||
Handler(named(s3middleware.GetBucketCorsOperation, h.GetBucketCorsHandler))).
|
||||
Queries("acl").
|
||||
Handler(named("GetBucketACL", h.GetBucketACLHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.ACLQuery).
|
||||
Handler(named(s3middleware.GetBucketACLOperation, h.GetBucketACLHandler))).
|
||||
Queries("website").
|
||||
Handler(named("GetBucketWebsite", h.GetBucketWebsiteHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.WebsiteQuery).
|
||||
Handler(named(s3middleware.GetBucketWebsiteOperation, h.GetBucketWebsiteHandler))).
|
||||
Queries("accelerate").
|
||||
Handler(named("GetBucketAccelerate", h.GetBucketAccelerateHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.AccelerateQuery).
|
||||
Handler(named(s3middleware.GetBucketAccelerateOperation, h.GetBucketAccelerateHandler))).
|
||||
Queries("requestPayment").
|
||||
Handler(named("GetBucketRequestPayment", h.GetBucketRequestPaymentHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.RequestPaymentQuery).
|
||||
Handler(named(s3middleware.GetBucketRequestPaymentOperation, h.GetBucketRequestPaymentHandler))).
|
||||
Queries("logging").
|
||||
Handler(named("GetBucketLogging", h.GetBucketLoggingHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.LoggingQuery).
|
||||
Handler(named(s3middleware.GetBucketLoggingOperation, h.GetBucketLoggingHandler))).
|
||||
Queries("replication").
|
||||
Handler(named("GetBucketReplication", h.GetBucketReplicationHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.ReplicationQuery).
|
||||
Handler(named(s3middleware.GetBucketReplicationOperation, h.GetBucketReplicationHandler))).
|
||||
Queries("tagging").
|
||||
Handler(named("GetBucketTagging", h.GetBucketTaggingHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.TaggingQuery).
|
||||
Handler(named(s3middleware.GetBucketTaggingOperation, h.GetBucketTaggingHandler))).
|
||||
Queries("object-lock").
|
||||
Handler(named("GetBucketObjectLockConfig", h.GetBucketObjectLockConfigHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.ObjectLockQuery).
|
||||
Handler(named(s3middleware.GetBucketObjectLockConfigOperation, h.GetBucketObjectLockConfigHandler))).
|
||||
Queries("versioning").
|
||||
Handler(named("GetBucketVersioning", h.GetBucketVersioningHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.VersioningQuery).
|
||||
Handler(named(s3middleware.GetBucketVersioningOperation, h.GetBucketVersioningHandler))).
|
||||
Queries("notification").
|
||||
Handler(named("GetBucketNotification", h.GetBucketNotificationHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.NotificationQuery).
|
||||
Handler(named(s3middleware.GetBucketNotificationOperation, h.GetBucketNotificationHandler))).
|
||||
Queries("events").
|
||||
Handler(named("ListenBucketNotification", h.ListenBucketNotificationHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.EventsQuery).
|
||||
Handler(named(s3middleware.ListenBucketNotificationOperation, h.ListenBucketNotificationHandler))).
|
||||
QueriesMatch("list-type", "2", "metadata", "true").
|
||||
Handler(named("ListObjectsV2M", h.ListObjectsV2MHandler))).
|
||||
Add(NewFilter().
|
||||
QueriesMatch(s3middleware.ListTypeQuery, "2", s3middleware.MetadataQuery, "true").
|
||||
Handler(named(s3middleware.ListObjectsV2MOperation, h.ListObjectsV2MHandler))).
|
||||
QueriesMatch("list-type", "2").
|
||||
Handler(named("ListObjectsV2", h.ListObjectsV2Handler))).
|
||||
Add(NewFilter().
|
||||
QueriesMatch(s3middleware.ListTypeQuery, "2").
|
||||
Handler(named(s3middleware.ListObjectsV2Operation, h.ListObjectsV2Handler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.VersionsQuery).
|
||||
Handler(named(s3middleware.ListBucketObjectVersionsOperation, h.ListBucketObjectVersionsHandler))).
|
||||
DefaultHandler(named(s3middleware.ListObjectsV1Operation, h.ListObjectsV1Handler)))
|
||||
Queries("versions").
|
||||
Handler(named("ListBucketObjectVersions", h.ListBucketObjectVersionsHandler))).
|
||||
DefaultHandler(named("ListObjectsV1", h.ListObjectsV1Handler)))
|
||||
})
|
||||
|
||||
// PUT method handlers
|
||||
bktRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodPut, "/", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.CorsQuery).
|
||||
Handler(named(s3middleware.PutBucketCorsOperation, h.PutBucketCorsHandler))).
|
||||
Queries("cors").
|
||||
Handler(named("PutBucketCors", h.PutBucketCorsHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.ACLQuery).
|
||||
Handler(named(s3middleware.PutBucketACLOperation, h.PutBucketACLHandler))).
|
||||
Queries("acl").
|
||||
Handler(named("PutBucketACL", h.PutBucketACLHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.LifecycleQuery).
|
||||
Handler(named(s3middleware.PutBucketLifecycleOperation, h.PutBucketLifecycleHandler))).
|
||||
Queries("lifecycle").
|
||||
Handler(named("PutBucketLifecycle", h.PutBucketLifecycleHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.EncryptionQuery).
|
||||
Handler(named(s3middleware.PutBucketEncryptionOperation, h.PutBucketEncryptionHandler))).
|
||||
Queries("encryption").
|
||||
Handler(named("PutBucketEncryption", h.PutBucketEncryptionHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.PolicyQuery).
|
||||
Handler(named(s3middleware.PutBucketPolicyOperation, h.PutBucketPolicyHandler))).
|
||||
Queries("policy").
|
||||
Handler(named("PutBucketPolicy", h.PutBucketPolicyHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.ObjectLockQuery).
|
||||
Handler(named(s3middleware.PutBucketObjectLockConfigOperation, h.PutBucketObjectLockConfigHandler))).
|
||||
Queries("object-lock").
|
||||
Handler(named("PutBucketObjectLockConfig", h.PutBucketObjectLockConfigHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.TaggingQuery).
|
||||
Handler(named(s3middleware.PutBucketTaggingOperation, h.PutBucketTaggingHandler))).
|
||||
Queries("tagging").
|
||||
Handler(named("PutBucketTagging", h.PutBucketTaggingHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.VersioningQuery).
|
||||
Handler(named(s3middleware.PutBucketVersioningOperation, h.PutBucketVersioningHandler))).
|
||||
Queries("versioning").
|
||||
Handler(named("PutBucketVersioning", h.PutBucketVersioningHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.NotificationQuery).
|
||||
Handler(named(s3middleware.PutBucketNotificationOperation, h.PutBucketNotificationHandler))).
|
||||
DefaultHandler(named(s3middleware.CreateBucketOperation, h.CreateBucketHandler)))
|
||||
Queries("notification").
|
||||
Handler(named("PutBucketNotification", h.PutBucketNotificationHandler))).
|
||||
DefaultHandler(named("CreateBucket", h.CreateBucketHandler)))
|
||||
})
|
||||
|
||||
// POST method handlers
|
||||
bktRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodPost, "/", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.DeleteQuery).
|
||||
Handler(named(s3middleware.DeleteMultipleObjectsOperation, h.DeleteMultipleObjectsHandler))).
|
||||
Queries("delete").
|
||||
Handler(named("DeleteMultipleObjects", h.DeleteMultipleObjectsHandler))).
|
||||
// todo consider add filter to match header for defaultHandler: hdrContentType, "multipart/form-data*"
|
||||
DefaultHandler(named(s3middleware.PostObjectOperation, h.PostObject)))
|
||||
DefaultHandler(named("PostObject", h.PostObject)))
|
||||
})
|
||||
|
||||
// DELETE method handlers
|
||||
bktRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodDelete, "/", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.CorsQuery).
|
||||
Handler(named(s3middleware.DeleteBucketCorsOperation, h.DeleteBucketCorsHandler))).
|
||||
Queries("cors").
|
||||
Handler(named("DeleteBucketCors", h.DeleteBucketCorsHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.WebsiteQuery).
|
||||
Handler(named(s3middleware.DeleteBucketWebsiteOperation, h.DeleteBucketWebsiteHandler))).
|
||||
Queries("website").
|
||||
Handler(named("DeleteBucketWebsite", h.DeleteBucketWebsiteHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.TaggingQuery).
|
||||
Handler(named(s3middleware.DeleteBucketTaggingOperation, h.DeleteBucketTaggingHandler))).
|
||||
Queries("tagging").
|
||||
Handler(named("DeleteBucketTagging", h.DeleteBucketTaggingHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.PolicyQuery).
|
||||
Handler(named(s3middleware.DeleteBucketPolicyOperation, h.DeleteBucketPolicyHandler))).
|
||||
Queries("policy").
|
||||
Handler(named("PutBucketPolicy", h.PutBucketPolicyHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.LifecycleQuery).
|
||||
Handler(named(s3middleware.PutBucketLifecycleOperation, h.PutBucketLifecycleHandler))).
|
||||
Queries("lifecycle").
|
||||
Handler(named("PutBucketLifecycle", h.PutBucketLifecycleHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.EncryptionQuery).
|
||||
Handler(named(s3middleware.DeleteBucketEncryptionOperation, h.DeleteBucketEncryptionHandler))).
|
||||
DefaultHandler(named(s3middleware.DeleteBucketOperation, h.DeleteBucketHandler)))
|
||||
Queries("encryption").
|
||||
Handler(named("DeleteBucketEncryption", h.DeleteBucketEncryptionHandler))).
|
||||
DefaultHandler(named("DeleteBucket", h.DeleteBucketHandler)))
|
||||
})
|
||||
|
||||
attachErrorHandler(bktRouter)
|
||||
|
@ -367,30 +304,30 @@ func objectRouter(h Handler, l *zap.Logger) chi.Router {
|
|||
objRouter := chi.NewRouter()
|
||||
objRouter.Use(s3middleware.AddObjectName(l))
|
||||
|
||||
objRouter.Head("/*", named(s3middleware.HeadObjectOperation, h.HeadObjectHandler))
|
||||
objRouter.Head("/*", named("HeadObject", h.HeadObjectHandler))
|
||||
|
||||
// GET method handlers
|
||||
objRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodGet, "/*", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.UploadIDQuery).
|
||||
Handler(named(s3middleware.ListPartsOperation, h.ListPartsHandler))).
|
||||
Queries("uploadId").
|
||||
Handler(named("ListParts", h.ListPartsHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.ACLQuery).
|
||||
Handler(named(s3middleware.GetObjectACLOperation, h.GetObjectACLHandler))).
|
||||
Queries("acl").
|
||||
Handler(named("GetObjectACL", h.GetObjectACLHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.TaggingQuery).
|
||||
Handler(named(s3middleware.GetObjectTaggingOperation, h.GetObjectTaggingHandler))).
|
||||
Queries("tagging").
|
||||
Handler(named("GetObjectTagging", h.GetObjectTaggingHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.RetentionQuery).
|
||||
Handler(named(s3middleware.GetObjectRetentionOperation, h.GetObjectRetentionHandler))).
|
||||
Queries("retention").
|
||||
Handler(named("GetObjectRetention", h.GetObjectRetentionHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.LegalHoldQuery).
|
||||
Handler(named(s3middleware.GetObjectLegalHoldOperation, h.GetObjectLegalHoldHandler))).
|
||||
Queries("legal-hold").
|
||||
Handler(named("GetObjectLegalHold", h.GetObjectLegalHoldHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.AttributesQuery).
|
||||
Handler(named(s3middleware.GetObjectAttributesOperation, h.GetObjectAttributesHandler))).
|
||||
DefaultHandler(named(s3middleware.GetObjectOperation, h.GetObjectHandler)))
|
||||
Queries("attributes").
|
||||
Handler(named("GetObjectAttributes", h.GetObjectAttributesHandler))).
|
||||
DefaultHandler(named("GetObject", h.GetObjectHandler)))
|
||||
})
|
||||
|
||||
// PUT method handlers
|
||||
|
@ -398,51 +335,51 @@ func objectRouter(h Handler, l *zap.Logger) chi.Router {
|
|||
r.Method(http.MethodPut, "/*", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Headers(AmzCopySource).
|
||||
Queries(s3middleware.PartNumberQuery, s3middleware.UploadIDQuery).
|
||||
Handler(named(s3middleware.UploadPartCopyOperation, h.UploadPartCopy))).
|
||||
Queries("partNumber", "uploadId").
|
||||
Handler(named("UploadPartCopy", h.UploadPartCopy))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.PartNumberQuery, s3middleware.UploadIDQuery).
|
||||
Handler(named(s3middleware.UploadPartOperation, h.UploadPartHandler))).
|
||||
Queries("partNumber", "uploadId").
|
||||
Handler(named("UploadPart", h.UploadPartHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.ACLQuery).
|
||||
Handler(named(s3middleware.PutObjectACLOperation, h.PutObjectACLHandler))).
|
||||
Queries("acl").
|
||||
Handler(named("PutObjectACL", h.PutObjectACLHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.TaggingQuery).
|
||||
Handler(named(s3middleware.PutObjectTaggingOperation, h.PutObjectTaggingHandler))).
|
||||
Queries("tagging").
|
||||
Handler(named("PutObjectTagging", h.PutObjectTaggingHandler))).
|
||||
Add(NewFilter().
|
||||
Headers(AmzCopySource).
|
||||
Handler(named(s3middleware.CopyObjectOperation, h.CopyObjectHandler))).
|
||||
Handler(named("CopyObject", h.CopyObjectHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.RetentionQuery).
|
||||
Handler(named(s3middleware.PutObjectRetentionOperation, h.PutObjectRetentionHandler))).
|
||||
Queries("retention").
|
||||
Handler(named("PutObjectRetention", h.PutObjectRetentionHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.LegalHoldQuery).
|
||||
Handler(named(s3middleware.PutObjectLegalHoldOperation, h.PutObjectLegalHoldHandler))).
|
||||
DefaultHandler(named(s3middleware.PutObjectOperation, h.PutObjectHandler)))
|
||||
Queries("legal-hold").
|
||||
Handler(named("PutObjectLegalHold", h.PutObjectLegalHoldHandler))).
|
||||
DefaultHandler(named("PutObject", h.PutObjectHandler)))
|
||||
})
|
||||
|
||||
// POST method handlers
|
||||
objRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodPost, "/*", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.UploadIDQuery).
|
||||
Handler(named(s3middleware.CompleteMultipartUploadOperation, h.CompleteMultipartUploadHandler))).
|
||||
Queries("uploadId").
|
||||
Handler(named("CompleteMultipartUpload", h.CompleteMultipartUploadHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.UploadsQuery).
|
||||
Handler(named(s3middleware.CreateMultipartUploadOperation, h.CreateMultipartUploadHandler))).
|
||||
DefaultHandler(named(s3middleware.SelectObjectContentOperation, h.SelectObjectContentHandler)))
|
||||
Queries("uploads").
|
||||
Handler(named("CreateMultipartUpload", h.CreateMultipartUploadHandler))).
|
||||
DefaultHandler(named("SelectObjectContent", h.SelectObjectContentHandler)))
|
||||
})
|
||||
|
||||
// DELETE method handlers
|
||||
objRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodDelete, "/*", NewHandlerFilter().
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.UploadIDQuery).
|
||||
Handler(named(s3middleware.AbortMultipartUploadOperation, h.AbortMultipartUploadHandler))).
|
||||
Queries("uploadId").
|
||||
Handler(named("AbortMultipartUpload", h.AbortMultipartUploadHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.TaggingQuery).
|
||||
Handler(named(s3middleware.DeleteObjectTaggingOperation, h.DeleteObjectTaggingHandler))).
|
||||
DefaultHandler(named(s3middleware.DeleteObjectOperation, h.DeleteObjectHandler)))
|
||||
Queries("tagging").
|
||||
Handler(named("DeleteObjectTagging", h.DeleteObjectTaggingHandler))).
|
||||
DefaultHandler(named("DeleteObject", h.DeleteObjectHandler)))
|
||||
})
|
||||
|
||||
attachErrorHandler(objRouter)
|
||||
|
|
|
@ -3,93 +3,24 @@ package api
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const FrostfsNamespaceHeader = "X-Frostfs-Namespace"
|
||||
|
||||
type poolStatisticMock struct {
|
||||
}
|
||||
|
||||
func (p *poolStatisticMock) Statistic() pool.Statistic {
|
||||
return pool.Statistic{}
|
||||
}
|
||||
|
||||
type centerMock struct {
|
||||
t *testing.T
|
||||
anon bool
|
||||
}
|
||||
|
||||
func (c *centerMock) Authenticate(*http.Request) (*middleware.Box, error) {
|
||||
var token *bearer.Token
|
||||
|
||||
if !c.anon {
|
||||
bt := bearertest.Token()
|
||||
token = &bt
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(c.t, err)
|
||||
require.NoError(c.t, token.Sign(key.PrivateKey))
|
||||
}
|
||||
|
||||
return &middleware.Box{
|
||||
AuthHeaders: &middleware.AuthHeader{},
|
||||
AccessBox: &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
BearerToken: token,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type middlewareSettingsMock struct {
|
||||
denyByDefault bool
|
||||
aclEnabled bool
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) NamespaceHeader() string {
|
||||
return FrostfsNamespaceHeader
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) ResolveNamespaceAlias(ns string) string {
|
||||
return ns
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) PolicyDenyByDefault() bool {
|
||||
return r.denyByDefault
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) ACLEnabled() bool {
|
||||
return r.aclEnabled
|
||||
}
|
||||
|
||||
type frostFSIDMock struct {
|
||||
}
|
||||
|
||||
func (f *frostFSIDMock) ValidatePublicKey(*keys.PublicKey) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *frostFSIDMock) GetUserGroupIDs(util.Uint160) ([]string, error) {
|
||||
return []string{}, nil
|
||||
func (c *centerMock) Authenticate(*http.Request) (*auth.Box, error) {
|
||||
return &auth.Box{}, nil
|
||||
}
|
||||
|
||||
type handlerMock struct {
|
||||
t *testing.T
|
||||
cfg *middlewareSettingsMock
|
||||
buckets map[string]*data.BucketInfo
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
type handlerResult struct {
|
||||
|
@ -169,7 +100,7 @@ func (h *handlerMock) PutObjectLegalHoldHandler(http.ResponseWriter, *http.Reque
|
|||
|
||||
func (h *handlerMock) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
res := &handlerResult{
|
||||
Method: middleware.PutObjectOperation,
|
||||
Method: "PutObject",
|
||||
ReqInfo: middleware.GetReqInfo(r.Context()),
|
||||
}
|
||||
|
||||
|
@ -186,11 +117,6 @@ func (h *handlerMock) GetBucketLocationHandler(http.ResponseWriter, *http.Reques
|
|||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *handlerMock) GetBucketPolicyStatusHandler(http.ResponseWriter, *http.Request) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *handlerMock) GetBucketPolicyHandler(http.ResponseWriter, *http.Request) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
|
@ -354,29 +280,14 @@ func (h *handlerMock) PutBucketNotificationHandler(http.ResponseWriter, *http.Re
|
|||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *handlerMock) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := middleware.GetReqInfo(r.Context())
|
||||
|
||||
h.buckets[reqInfo.Namespace+reqInfo.BucketName] = &data.BucketInfo{
|
||||
Name: reqInfo.BucketName,
|
||||
APEEnabled: !h.cfg.ACLEnabled(),
|
||||
}
|
||||
|
||||
res := &handlerResult{
|
||||
Method: middleware.CreateBucketOperation,
|
||||
ReqInfo: middleware.GetReqInfo(r.Context()),
|
||||
}
|
||||
|
||||
h.writeResponse(w, res)
|
||||
func (h *handlerMock) CreateBucketHandler(http.ResponseWriter, *http.Request) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *handlerMock) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
res := &handlerResult{
|
||||
Method: middleware.HeadBucketOperation,
|
||||
ReqInfo: middleware.GetReqInfo(r.Context()),
|
||||
}
|
||||
|
||||
h.writeResponse(w, res)
|
||||
func (h *handlerMock) HeadBucketHandler(http.ResponseWriter, *http.Request) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *handlerMock) PostObject(http.ResponseWriter, *http.Request) {
|
||||
|
@ -409,13 +320,9 @@ func (h *handlerMock) DeleteBucketHandler(http.ResponseWriter, *http.Request) {
|
|||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *handlerMock) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
res := &handlerResult{
|
||||
Method: middleware.ListBucketsOperation,
|
||||
ReqInfo: middleware.GetReqInfo(r.Context()),
|
||||
}
|
||||
|
||||
h.writeResponse(w, res)
|
||||
func (h *handlerMock) ListBucketsHandler(http.ResponseWriter, *http.Request) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *handlerMock) Preflight(http.ResponseWriter, *http.Request) {
|
||||
|
@ -469,21 +376,8 @@ func (h *handlerMock) ListMultipartUploadsHandler(w http.ResponseWriter, r *http
|
|||
h.writeResponse(w, res)
|
||||
}
|
||||
|
||||
func (h *handlerMock) ResolveBucket(ctx context.Context, name string) (*data.BucketInfo, error) {
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
bktInfo, ok := h.buckets[reqInfo.Namespace+name]
|
||||
if !ok {
|
||||
return nil, errors.New("not found")
|
||||
}
|
||||
return bktInfo, nil
|
||||
}
|
||||
|
||||
func (h *handlerMock) ResolveCID(ctx context.Context, bucket string) (cid.ID, error) {
|
||||
bktInfo, err := h.ResolveBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return cid.ID{}, err
|
||||
}
|
||||
return bktInfo.CID, nil
|
||||
func (h *handlerMock) ResolveBucket(context.Context, string) (*data.BucketInfo, error) {
|
||||
return &data.BucketInfo{}, nil
|
||||
}
|
||||
|
||||
func (h *handlerMock) writeResponse(w http.ResponseWriter, resp *handlerResult) {
|
||||
|
|
|
@ -2,85 +2,24 @@ package api
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
s3middleware "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
|
||||
engineiam "git.frostfs.info/TrueCloudLab/policy-engine/iam"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/s3"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
type routerMock struct {
|
||||
t *testing.T
|
||||
router *chi.Mux
|
||||
cfg Config
|
||||
middlewareSettings *middlewareSettingsMock
|
||||
policyChecker engine.LocalOverrideEngine
|
||||
}
|
||||
|
||||
func (m *routerMock) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
m.router.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func prepareRouter(t *testing.T) *routerMock {
|
||||
middlewareSettings := &middlewareSettingsMock{}
|
||||
policyChecker := inmemory.NewInMemoryLocalOverrides()
|
||||
|
||||
logger := zaptest.NewLogger(t)
|
||||
|
||||
metricsConfig := metrics.AppMetricsConfig{
|
||||
Logger: logger,
|
||||
PoolStatistics: &poolStatisticMock{},
|
||||
Registerer: prometheus.NewRegistry(),
|
||||
Enabled: true,
|
||||
}
|
||||
|
||||
cfg := Config{
|
||||
Throttle: middleware.ThrottleOpts{
|
||||
Limit: 10,
|
||||
BacklogTimeout: 30 * time.Second,
|
||||
},
|
||||
Handler: &handlerMock{t: t, cfg: middlewareSettings, buckets: map[string]*data.BucketInfo{}},
|
||||
Center: ¢erMock{t: t},
|
||||
Log: logger,
|
||||
Metrics: metrics.NewAppMetrics(metricsConfig),
|
||||
MiddlewareSettings: middlewareSettings,
|
||||
PolicyChecker: policyChecker,
|
||||
Domains: []string{"domain1", "domain2"},
|
||||
FrostfsID: &frostFSIDMock{},
|
||||
}
|
||||
return &routerMock{
|
||||
t: t,
|
||||
router: NewRouter(cfg),
|
||||
cfg: cfg,
|
||||
middlewareSettings: middlewareSettings,
|
||||
policyChecker: policyChecker,
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouterUploadPart(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
|
||||
createBucket(chiRouter, "", "dkirillov")
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPut, "/dkirillov/fix-object", nil)
|
||||
query := make(url.Values)
|
||||
|
@ -96,8 +35,6 @@ func TestRouterUploadPart(t *testing.T) {
|
|||
func TestRouterListMultipartUploads(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
|
||||
createBucket(chiRouter, "", "test-bucket")
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodGet, "/test-bucket", nil)
|
||||
query := make(url.Values)
|
||||
|
@ -112,18 +49,22 @@ func TestRouterListMultipartUploads(t *testing.T) {
|
|||
func TestRouterObjectWithSlashes(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
|
||||
ns, bktName, objName := "", "dkirillov", "/fix/object"
|
||||
bktName, objName := "dkirillov", "/fix/object"
|
||||
target := fmt.Sprintf("/%s/%s", bktName, objName)
|
||||
|
||||
createBucket(chiRouter, ns, bktName)
|
||||
resp := putObject(chiRouter, ns, bktName, objName)
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPut, target, nil)
|
||||
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
resp := readResponse(t, w)
|
||||
require.Equal(t, "PutObject", resp.Method)
|
||||
require.Equal(t, objName, resp.ReqInfo.ObjectName)
|
||||
}
|
||||
|
||||
func TestRouterObjectEscaping(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
|
||||
ns, bktName := "", "dkirillov"
|
||||
createBucket(chiRouter, ns, bktName)
|
||||
bktName := "dkirillov"
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
|
@ -141,487 +82,44 @@ func TestRouterObjectEscaping(t *testing.T) {
|
|||
objName: "fix/object",
|
||||
},
|
||||
{
|
||||
name: "with slash escaped",
|
||||
expectedObjName: "/foo/bar",
|
||||
objName: "/foo%2fbar",
|
||||
name: "with percentage",
|
||||
expectedObjName: "fix/object%ac",
|
||||
objName: "fix/object%ac",
|
||||
},
|
||||
{
|
||||
name: "with percentage escaped",
|
||||
expectedObjName: "fix/object%ac",
|
||||
objName: "fix/object%25ac",
|
||||
},
|
||||
{
|
||||
name: "with awful mint name",
|
||||
expectedObjName: "äöüex ®©µÄÆÐÕæŒƕƩDž 01000000 0x40 \u0040 amȡȹɆple&0a!-_.*'()&$@=;:+,?<>.pdf",
|
||||
objName: "%C3%A4%C3%B6%C3%BCex%20%C2%AE%C2%A9%C2%B5%C3%84%C3%86%C3%90%C3%95%C3%A6%C5%92%C6%95%C6%A9%C7%85%2001000000%200x40%20%40%20am%C8%A1%C8%B9%C9%86ple%260a%21-_.%2A%27%28%29%26%24%40%3D%3B%3A%2B%2C%3F%3C%3E.pdf",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resp := putObject(chiRouter, ns, bktName, tc.objName)
|
||||
target := fmt.Sprintf("/%s/%s", bktName, tc.objName)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodPut, target, nil)
|
||||
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
resp := readResponse(t, w)
|
||||
require.Equal(t, "PutObject", resp.Method)
|
||||
require.Equal(t, tc.expectedObjName, resp.ReqInfo.ObjectName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPolicyChecker(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
ns1, bktName1, objName1 := "", "bucket", "object"
|
||||
ns2, bktName2, objName2 := "custom-ns", "other-bucket", "object"
|
||||
|
||||
createBucket(chiRouter, ns1, bktName1)
|
||||
createBucket(chiRouter, ns2, bktName1)
|
||||
createBucket(chiRouter, ns2, bktName2)
|
||||
|
||||
ruleChain := &chain.Chain{
|
||||
ID: chain.ID("id"),
|
||||
Rules: []chain.Rule{{
|
||||
Status: chain.AccessDenied,
|
||||
Actions: chain.Actions{Names: []string{"*"}},
|
||||
Resources: chain.Resources{Names: []string{fmt.Sprintf(s3.ResourceFormatS3BucketObjects, bktName1)}},
|
||||
}},
|
||||
func prepareRouter(t *testing.T) *chi.Mux {
|
||||
throttleOps := middleware.ThrottleOpts{
|
||||
Limit: 10,
|
||||
BacklogTimeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
_, _, err := chiRouter.policyChecker.MorphRuleChainStorage().AddMorphRuleChain(chain.S3, engine.NamespaceTarget(ns2), ruleChain)
|
||||
require.NoError(t, err)
|
||||
|
||||
// check we can access 'bucket' in default namespace
|
||||
putObject(chiRouter, ns1, bktName1, objName1)
|
||||
|
||||
// check we can access 'other-bucket' in custom namespace
|
||||
putObject(chiRouter, ns2, bktName2, objName2)
|
||||
|
||||
// check we cannot access 'bucket' in custom namespace
|
||||
putObjectErr(chiRouter, ns2, bktName1, objName2, apiErrors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func TestPolicyCheckerReqTypeDetermination(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
bktName, objName := "bucket", "object"
|
||||
createBucket(chiRouter, "", bktName)
|
||||
|
||||
policy := engineiam.Policy{
|
||||
Version: "2012-10-17",
|
||||
Statement: []engineiam.Statement{{
|
||||
Principal: map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}},
|
||||
Effect: engineiam.AllowEffect,
|
||||
Action: engineiam.Action{"s3:*"},
|
||||
Resource: engineiam.Resource{fmt.Sprintf(s3.ResourceFormatS3All)},
|
||||
}},
|
||||
}
|
||||
|
||||
ruleChain, err := engineiam.ConvertToS3Chain(policy, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = chiRouter.policyChecker.MorphRuleChainStorage().AddMorphRuleChain(chain.S3, engine.NamespaceTarget(""), ruleChain)
|
||||
require.NoError(t, err)
|
||||
|
||||
chiRouter.middlewareSettings.denyByDefault = true
|
||||
t.Run("can list buckets", func(t *testing.T) {
|
||||
w, r := httptest.NewRecorder(), httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
resp := readResponse(t, w)
|
||||
require.Equal(t, s3middleware.ListBucketsOperation, resp.Method)
|
||||
})
|
||||
|
||||
t.Run("can head 'bucket'", func(t *testing.T) {
|
||||
w, r := httptest.NewRecorder(), httptest.NewRequest(http.MethodHead, "/"+bktName, nil)
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
resp := readResponse(t, w)
|
||||
require.Equal(t, s3middleware.HeadBucketOperation, resp.Method)
|
||||
})
|
||||
|
||||
t.Run("can put object into 'bucket'", func(t *testing.T) {
|
||||
w, r := httptest.NewRecorder(), httptest.NewRequest(http.MethodPut, fmt.Sprintf("/%s/%s", bktName, objName), nil)
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
resp := readResponse(t, w)
|
||||
require.Equal(t, s3middleware.PutObjectOperation, resp.Method)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultBehaviorPolicyChecker(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
ns, bktName := "", "bucket"
|
||||
|
||||
// check we can access bucket if rules not found
|
||||
createBucket(chiRouter, ns, bktName)
|
||||
|
||||
// check we cannot access if rules not found when settings is enabled
|
||||
chiRouter.middlewareSettings.denyByDefault = true
|
||||
createBucketErr(chiRouter, ns, bktName, apiErrors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func TestACLAPE(t *testing.T) {
|
||||
t.Run("acl disabled, ape deny by default", func(t *testing.T) {
|
||||
router := prepareRouter(t)
|
||||
|
||||
ns, bktName, objName := "", "bucket", "object"
|
||||
bktNameOld, bktNameNew := "old-bucket", "new-bucket"
|
||||
createOldBucket(router, bktNameOld)
|
||||
createNewBucket(router, bktNameNew)
|
||||
|
||||
router.middlewareSettings.aclEnabled = false
|
||||
router.middlewareSettings.denyByDefault = true
|
||||
|
||||
// Allow because of using old bucket
|
||||
putObject(router, ns, bktNameOld, objName)
|
||||
// Deny because of deny by default
|
||||
putObjectErr(router, ns, bktNameNew, objName, apiErrors.ErrAccessDenied)
|
||||
|
||||
// Deny because of deny by default
|
||||
createBucketErr(router, ns, bktName, apiErrors.ErrAccessDenied)
|
||||
listBucketsErr(router, ns, apiErrors.ErrAccessDenied)
|
||||
|
||||
// Allow operations and check
|
||||
allowOperations(router, ns, []string{"s3:CreateBucket", "s3:ListAllMyBuckets"}, nil)
|
||||
createBucket(router, ns, bktName)
|
||||
listBuckets(router, ns)
|
||||
})
|
||||
|
||||
t.Run("acl disabled, ape allow by default", func(t *testing.T) {
|
||||
router := prepareRouter(t)
|
||||
|
||||
ns, bktName, objName := "", "bucket", "object"
|
||||
bktNameOld, bktNameNew := "old-bucket", "new-bucket"
|
||||
createOldBucket(router, bktNameOld)
|
||||
createNewBucket(router, bktNameNew)
|
||||
|
||||
router.middlewareSettings.aclEnabled = false
|
||||
router.middlewareSettings.denyByDefault = false
|
||||
|
||||
// Allow because of using old bucket
|
||||
putObject(router, ns, bktNameOld, objName)
|
||||
// Allow because of allow by default
|
||||
putObject(router, ns, bktNameNew, objName)
|
||||
|
||||
// Allow because of deny by default
|
||||
createBucket(router, ns, bktName)
|
||||
listBuckets(router, ns)
|
||||
|
||||
// Deny operations and check
|
||||
denyOperations(router, ns, []string{"s3:CreateBucket", "s3:ListAllMyBuckets"}, nil)
|
||||
createBucketErr(router, ns, bktName, apiErrors.ErrAccessDenied)
|
||||
listBucketsErr(router, ns, apiErrors.ErrAccessDenied)
|
||||
})
|
||||
|
||||
t.Run("acl enabled, ape deny by default", func(t *testing.T) {
|
||||
router := prepareRouter(t)
|
||||
|
||||
ns, bktName, objName := "", "bucket", "object"
|
||||
bktNameOld, bktNameNew := "old-bucket", "new-bucket"
|
||||
createOldBucket(router, bktNameOld)
|
||||
createNewBucket(router, bktNameNew)
|
||||
|
||||
router.middlewareSettings.aclEnabled = true
|
||||
router.middlewareSettings.denyByDefault = true
|
||||
|
||||
// Allow because of using old bucket
|
||||
putObject(router, ns, bktNameOld, objName)
|
||||
// Deny because of deny by default
|
||||
putObjectErr(router, ns, bktNameNew, objName, apiErrors.ErrAccessDenied)
|
||||
|
||||
// Allow because of old behavior
|
||||
createBucket(router, ns, bktName)
|
||||
listBuckets(router, ns)
|
||||
})
|
||||
|
||||
t.Run("acl enabled, ape allow by default", func(t *testing.T) {
|
||||
router := prepareRouter(t)
|
||||
|
||||
ns, bktName, objName := "", "bucket", "object"
|
||||
bktNameOld, bktNameNew := "old-bucket", "new-bucket"
|
||||
createOldBucket(router, bktNameOld)
|
||||
createNewBucket(router, bktNameNew)
|
||||
|
||||
router.middlewareSettings.aclEnabled = true
|
||||
router.middlewareSettings.denyByDefault = false
|
||||
|
||||
// Allow because of using old bucket
|
||||
putObject(router, ns, bktNameOld, objName)
|
||||
// Allow because of allow by default
|
||||
putObject(router, ns, bktNameNew, objName)
|
||||
|
||||
// Allow because of old behavior
|
||||
createBucket(router, ns, bktName)
|
||||
listBuckets(router, ns)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRequestParametersCheck(t *testing.T) {
|
||||
t.Run("prefix parameter, allow specific value", func(t *testing.T) {
|
||||
router := prepareRouter(t)
|
||||
|
||||
ns, bktName, prefix := "", "bucket", "prefix"
|
||||
router.middlewareSettings.denyByDefault = true
|
||||
|
||||
allowOperations(router, ns, []string{"s3:CreateBucket"}, nil)
|
||||
createBucket(router, ns, bktName)
|
||||
|
||||
// Add policies and check
|
||||
denyOperations(router, ns, []string{"s3:ListBucket"}, engineiam.Conditions{
|
||||
engineiam.CondStringNotEquals: engineiam.Condition{s3.PropertyKeyPrefix: []string{prefix}},
|
||||
})
|
||||
allowOperations(router, ns, []string{"s3:ListBucket"}, engineiam.Conditions{
|
||||
engineiam.CondStringEquals: engineiam.Condition{s3.PropertyKeyPrefix: []string{prefix}},
|
||||
})
|
||||
|
||||
listObjectsV1(router, ns, bktName, prefix, "", "")
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "", apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "invalid", "", "", apiErrors.ErrAccessDenied)
|
||||
})
|
||||
|
||||
t.Run("delimiter parameter, prohibit specific value", func(t *testing.T) {
|
||||
router := prepareRouter(t)
|
||||
|
||||
ns, bktName, delimiter := "", "bucket", "delimiter"
|
||||
router.middlewareSettings.denyByDefault = true
|
||||
|
||||
allowOperations(router, ns, []string{"s3:CreateBucket"}, nil)
|
||||
createBucket(router, ns, bktName)
|
||||
|
||||
// Add policies and check
|
||||
denyOperations(router, ns, []string{"s3:ListBucket"}, engineiam.Conditions{
|
||||
engineiam.CondStringEquals: engineiam.Condition{s3.PropertyKeyDelimiter: []string{delimiter}},
|
||||
})
|
||||
allowOperations(router, ns, []string{"s3:ListBucket"}, engineiam.Conditions{
|
||||
engineiam.CondStringNotEquals: engineiam.Condition{s3.PropertyKeyDelimiter: []string{delimiter}},
|
||||
})
|
||||
|
||||
listObjectsV1(router, ns, bktName, "", "", "")
|
||||
listObjectsV1(router, ns, bktName, "", "some-delimiter", "")
|
||||
listObjectsV1Err(router, ns, bktName, "", delimiter, "", apiErrors.ErrAccessDenied)
|
||||
})
|
||||
|
||||
t.Run("max-keys parameter, allow specific value", func(t *testing.T) {
|
||||
router := prepareRouter(t)
|
||||
|
||||
ns, bktName, maxKeys := "", "bucket", 10
|
||||
router.middlewareSettings.denyByDefault = true
|
||||
|
||||
allowOperations(router, ns, []string{"s3:CreateBucket"}, nil)
|
||||
createBucket(router, ns, bktName)
|
||||
|
||||
// Add policies and check
|
||||
denyOperations(router, ns, []string{"s3:ListBucket"}, engineiam.Conditions{
|
||||
engineiam.CondNumericNotEquals: engineiam.Condition{s3.PropertyKeyMaxKeys: []string{strconv.Itoa(maxKeys)}},
|
||||
})
|
||||
allowOperations(router, ns, []string{"s3:ListBucket"}, engineiam.Conditions{
|
||||
engineiam.CondNumericEquals: engineiam.Condition{s3.PropertyKeyMaxKeys: []string{strconv.Itoa(maxKeys)}},
|
||||
})
|
||||
|
||||
listObjectsV1(router, ns, bktName, "", "", strconv.Itoa(maxKeys))
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "", apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", strconv.Itoa(maxKeys-1), apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "invalid", apiErrors.ErrAccessDenied)
|
||||
})
|
||||
|
||||
t.Run("max-keys parameter, allow range of values", func(t *testing.T) {
|
||||
router := prepareRouter(t)
|
||||
|
||||
ns, bktName, maxKeys := "", "bucket", 10
|
||||
router.middlewareSettings.denyByDefault = true
|
||||
|
||||
allowOperations(router, ns, []string{"s3:CreateBucket"}, nil)
|
||||
createBucket(router, ns, bktName)
|
||||
|
||||
// Add policies and check
|
||||
denyOperations(router, ns, []string{"s3:ListBucket"}, engineiam.Conditions{
|
||||
engineiam.CondNumericGreaterThan: engineiam.Condition{s3.PropertyKeyMaxKeys: []string{strconv.Itoa(maxKeys)}},
|
||||
})
|
||||
allowOperations(router, ns, []string{"s3:ListBucket"}, engineiam.Conditions{
|
||||
engineiam.CondNumericLessThanEquals: engineiam.Condition{s3.PropertyKeyMaxKeys: []string{strconv.Itoa(maxKeys)}},
|
||||
})
|
||||
|
||||
listObjectsV1(router, ns, bktName, "", "", strconv.Itoa(maxKeys))
|
||||
listObjectsV1(router, ns, bktName, "", "", strconv.Itoa(maxKeys-1))
|
||||
listObjectsV1Err(router, ns, bktName, "", "", strconv.Itoa(maxKeys+1), apiErrors.ErrAccessDenied)
|
||||
})
|
||||
|
||||
t.Run("max-keys parameter, prohibit specific value", func(t *testing.T) {
|
||||
router := prepareRouter(t)
|
||||
|
||||
ns, bktName, maxKeys := "", "bucket", 10
|
||||
router.middlewareSettings.denyByDefault = true
|
||||
|
||||
allowOperations(router, ns, []string{"s3:CreateBucket"}, nil)
|
||||
createBucket(router, ns, bktName)
|
||||
|
||||
// Add policies and check
|
||||
denyOperations(router, ns, []string{"s3:ListBucket"}, engineiam.Conditions{
|
||||
engineiam.CondNumericEquals: engineiam.Condition{s3.PropertyKeyMaxKeys: []string{strconv.Itoa(maxKeys)}},
|
||||
})
|
||||
allowOperations(router, ns, []string{"s3:ListBucket"}, engineiam.Conditions{
|
||||
engineiam.CondNumericNotEquals: engineiam.Condition{s3.PropertyKeyMaxKeys: []string{strconv.Itoa(maxKeys)}},
|
||||
})
|
||||
|
||||
listObjectsV1(router, ns, bktName, "", "", "")
|
||||
listObjectsV1(router, ns, bktName, "", "", strconv.Itoa(maxKeys-1))
|
||||
listObjectsV1Err(router, ns, bktName, "", "", strconv.Itoa(maxKeys), apiErrors.ErrAccessDenied)
|
||||
})
|
||||
}
|
||||
|
||||
func allowOperations(router *routerMock, ns string, operations []string, conditions engineiam.Conditions) {
|
||||
addPolicy(router, ns, "allow", engineiam.AllowEffect, operations, conditions)
|
||||
}
|
||||
|
||||
func denyOperations(router *routerMock, ns string, operations []string, conditions engineiam.Conditions) {
|
||||
addPolicy(router, ns, "deny", engineiam.DenyEffect, operations, conditions)
|
||||
}
|
||||
|
||||
func addPolicy(router *routerMock, ns string, id string, effect engineiam.Effect, operations []string, conditions engineiam.Conditions) {
|
||||
policy := engineiam.Policy{
|
||||
Version: "2012-10-17",
|
||||
Statement: []engineiam.Statement{{
|
||||
Principal: map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}},
|
||||
Effect: effect,
|
||||
Action: engineiam.Action(operations),
|
||||
Resource: engineiam.Resource{fmt.Sprintf(s3.ResourceFormatS3All)},
|
||||
Conditions: conditions,
|
||||
}},
|
||||
}
|
||||
|
||||
ruleChain, err := engineiam.ConvertToS3Chain(policy, nil)
|
||||
require.NoError(router.t, err)
|
||||
ruleChain.ID = chain.ID(id)
|
||||
|
||||
_, _, err = router.policyChecker.MorphRuleChainStorage().AddMorphRuleChain(chain.S3, engine.NamespaceTarget(ns), ruleChain)
|
||||
require.NoError(router.t, err)
|
||||
}
|
||||
|
||||
func createOldBucket(router *routerMock, bktName string) {
|
||||
createSpecificBucket(router, bktName, true)
|
||||
}
|
||||
|
||||
func createNewBucket(router *routerMock, bktName string) {
|
||||
createSpecificBucket(router, bktName, false)
|
||||
}
|
||||
|
||||
func createSpecificBucket(router *routerMock, bktName string, old bool) {
|
||||
aclEnabled := router.middlewareSettings.ACLEnabled()
|
||||
router.middlewareSettings.aclEnabled = old
|
||||
createBucket(router, "", bktName)
|
||||
router.middlewareSettings.aclEnabled = aclEnabled
|
||||
}
|
||||
|
||||
func createBucket(router *routerMock, namespace, bktName string) {
|
||||
w := createBucketBase(router, namespace, bktName)
|
||||
resp := readResponse(router.t, w)
|
||||
require.Equal(router.t, s3middleware.CreateBucketOperation, resp.Method)
|
||||
}
|
||||
|
||||
func createBucketErr(router *routerMock, namespace, bktName string, errCode apiErrors.ErrorCode) {
|
||||
w := createBucketBase(router, namespace, bktName)
|
||||
assertAPIError(router.t, w, errCode)
|
||||
}
|
||||
|
||||
func createBucketBase(router *routerMock, namespace, bktName string) *httptest.ResponseRecorder {
|
||||
w, r := httptest.NewRecorder(), httptest.NewRequest(http.MethodPut, "/"+bktName, nil)
|
||||
r.Header.Set(FrostfsNamespaceHeader, namespace)
|
||||
router.ServeHTTP(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func listBuckets(router *routerMock, namespace string) {
|
||||
w := listBucketsBase(router, namespace)
|
||||
resp := readResponse(router.t, w)
|
||||
require.Equal(router.t, s3middleware.ListBucketsOperation, resp.Method)
|
||||
}
|
||||
|
||||
func listBucketsErr(router *routerMock, namespace string, errCode apiErrors.ErrorCode) {
|
||||
w := listBucketsBase(router, namespace)
|
||||
assertAPIError(router.t, w, errCode)
|
||||
}
|
||||
|
||||
func listBucketsBase(router *routerMock, namespace string) *httptest.ResponseRecorder {
|
||||
w, r := httptest.NewRecorder(), httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
r.Header.Set(FrostfsNamespaceHeader, namespace)
|
||||
router.ServeHTTP(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func putObject(router *routerMock, namespace, bktName, objName string) handlerResult {
|
||||
w := putObjectBase(router, namespace, bktName, objName)
|
||||
resp := readResponse(router.t, w)
|
||||
require.Equal(router.t, s3middleware.PutObjectOperation, resp.Method)
|
||||
return resp
|
||||
}
|
||||
|
||||
func putObjectErr(router *routerMock, namespace, bktName, objName string, errCode apiErrors.ErrorCode) {
|
||||
w := putObjectBase(router, namespace, bktName, objName)
|
||||
assertAPIError(router.t, w, errCode)
|
||||
}
|
||||
|
||||
func putObjectBase(router *routerMock, namespace, bktName, objName string) *httptest.ResponseRecorder {
|
||||
w, r := httptest.NewRecorder(), httptest.NewRequest(http.MethodPut, "/"+bktName+"/"+objName, nil)
|
||||
r.Header.Set(FrostfsNamespaceHeader, namespace)
|
||||
router.ServeHTTP(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func listObjectsV1(router *routerMock, namespace, bktName, prefix, delimiter, maxKeys string) handlerResult {
|
||||
w := listObjectsV1Base(router, namespace, bktName, prefix, delimiter, maxKeys)
|
||||
resp := readResponse(router.t, w)
|
||||
require.Equal(router.t, s3middleware.ListObjectsV1Operation, resp.Method)
|
||||
return resp
|
||||
}
|
||||
|
||||
func listObjectsV1Err(router *routerMock, namespace, bktName, prefix, delimiter, maxKeys string, errCode apiErrors.ErrorCode) {
|
||||
w := listObjectsV1Base(router, namespace, bktName, prefix, delimiter, maxKeys)
|
||||
assertAPIError(router.t, w, errCode)
|
||||
}
|
||||
|
||||
func listObjectsV1Base(router *routerMock, namespace, bktName, prefix, delimiter, maxKeys string) *httptest.ResponseRecorder {
|
||||
queries := url.Values{}
|
||||
if len(prefix) > 0 {
|
||||
queries.Add(s3middleware.QueryPrefix, prefix)
|
||||
}
|
||||
if len(delimiter) > 0 {
|
||||
queries.Add(s3middleware.QueryDelimiter, delimiter)
|
||||
}
|
||||
if len(maxKeys) > 0 {
|
||||
queries.Add(s3middleware.QueryMaxKeys, maxKeys)
|
||||
}
|
||||
encoded := queries.Encode()
|
||||
|
||||
w, r := httptest.NewRecorder(), httptest.NewRequest(http.MethodGet, "/"+bktName, nil)
|
||||
r.URL.RawQuery = encoded
|
||||
r.Header.Set(FrostfsNamespaceHeader, namespace)
|
||||
router.ServeHTTP(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func TestOwnerIDRetrieving(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
|
||||
ns, bktName, objName := "", "test-bucket", "test-object"
|
||||
|
||||
createBucket(chiRouter, ns, bktName)
|
||||
|
||||
resp := putObject(chiRouter, ns, bktName, objName)
|
||||
require.NotEqual(t, "anon", resp.ReqInfo.User)
|
||||
|
||||
chiRouter.cfg.Center.(*centerMock).anon = true
|
||||
resp = putObject(chiRouter, ns, bktName, objName)
|
||||
require.Equal(t, "anon", resp.ReqInfo.User)
|
||||
}
|
||||
|
||||
func TestBillingMetrics(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
|
||||
ns, bktName, objName := "", "test-bucket", "test-object"
|
||||
|
||||
createBucket(chiRouter, ns, bktName)
|
||||
dump := chiRouter.cfg.Metrics.UsersAPIStats().DumpMetrics()
|
||||
require.Len(t, dump.Requests, 1)
|
||||
require.NotEqual(t, "anon", dump.Requests[0].User)
|
||||
require.Equal(t, metrics.PUTRequest, dump.Requests[0].Operation)
|
||||
require.Equal(t, bktName, dump.Requests[0].Bucket)
|
||||
require.Equal(t, 1, dump.Requests[0].Requests)
|
||||
|
||||
chiRouter.cfg.Center.(*centerMock).anon = true
|
||||
putObject(chiRouter, ns, bktName, objName)
|
||||
dump = chiRouter.cfg.Metrics.UsersAPIStats().DumpMetrics()
|
||||
require.Len(t, dump.Requests, 1)
|
||||
require.Equal(t, "anon", dump.Requests[0].User)
|
||||
handleMock := &handlerMock{t: t}
|
||||
cntrMock := ¢erMock{}
|
||||
log := zaptest.NewLogger(t)
|
||||
metric := &metrics.AppMetrics{}
|
||||
|
||||
chiRouter := chi.NewRouter()
|
||||
AttachChi(chiRouter, nil, throttleOps, handleMock, cntrMock, log, metric)
|
||||
return chiRouter
|
||||
}
|
||||
|
||||
func readResponse(t *testing.T, w *httptest.ResponseRecorder) handlerResult {
|
||||
|
@ -634,18 +132,3 @@ func readResponse(t *testing.T, w *httptest.ResponseRecorder) handlerResult {
|
|||
require.NoErrorf(t, err, "actual body: '%s'", string(resData))
|
||||
return res
|
||||
}
|
||||
|
||||
func assertAPIError(t *testing.T, w *httptest.ResponseRecorder, expectedErrorCode apiErrors.ErrorCode) {
|
||||
actualErrorResponse := &s3middleware.ErrorResponse{}
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(actualErrorResponse)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedError := apiErrors.GetAPIError(expectedErrorCode)
|
||||
|
||||
require.Equal(t, expectedError.HTTPStatusCode, w.Code)
|
||||
require.Equal(t, expectedError.Code, actualErrorResponse.Code)
|
||||
|
||||
if expectedError.ErrCode != apiErrors.ErrInternalError {
|
||||
require.Contains(t, actualErrorResponse.Message, expectedError.Description)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
|
@ -82,11 +81,6 @@ type FrostFS interface {
|
|||
TimeToEpoch(context.Context, time.Time) (uint64, uint64, error)
|
||||
}
|
||||
|
||||
// FrostFSID represents interface to interact with frostfsid contract.
|
||||
type FrostFSID interface {
|
||||
RegisterPublicKey(ns string, key *keys.PublicKey) error
|
||||
}
|
||||
|
||||
// Agent contains client communicating with FrostFS and logger.
|
||||
type Agent struct {
|
||||
frostFS FrostFS
|
||||
|
@ -114,16 +108,14 @@ type (
|
|||
Lifetime time.Duration
|
||||
AwsCliCredentialsFile string
|
||||
ContainerPolicies ContainerPolicies
|
||||
CustomAttributes []object.Attribute
|
||||
}
|
||||
|
||||
// UpdateSecretOptions contains options for passing to Agent.UpdateSecret method.
|
||||
UpdateSecretOptions struct {
|
||||
FrostFSKey *keys.PrivateKey
|
||||
GatesPublicKeys []*keys.PublicKey
|
||||
Address oid.Address
|
||||
GatePrivateKey *keys.PrivateKey
|
||||
CustomAttributes []object.Attribute
|
||||
FrostFSKey *keys.PrivateKey
|
||||
GatesPublicKeys []*keys.PublicKey
|
||||
Address oid.Address
|
||||
GatePrivateKey *keys.PrivateKey
|
||||
}
|
||||
|
||||
tokenUpdateOptions struct {
|
||||
|
@ -279,23 +271,9 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
a.log.Info(logs.StoreBearerTokenIntoFrostFS,
|
||||
zap.Stringer("owner_tkn", idOwner))
|
||||
|
||||
cfg := tokens.Config{
|
||||
FrostFS: a.frostFS,
|
||||
Key: secrets.EphemeralKey,
|
||||
CacheConfig: cache.DefaultAccessBoxConfig(a.log),
|
||||
}
|
||||
creds := tokens.New(a.frostFS, secrets.EphemeralKey, cache.DefaultAccessBoxConfig(a.log))
|
||||
|
||||
creds := tokens.New(cfg)
|
||||
|
||||
prm := tokens.CredentialsParam{
|
||||
OwnerID: idOwner,
|
||||
AccessBox: box,
|
||||
Expiration: lifetime.Exp,
|
||||
Keys: options.GatesPublicKeys,
|
||||
CustomAttributes: options.CustomAttributes,
|
||||
}
|
||||
|
||||
addr, err := creds.Put(ctx, id, prm)
|
||||
addr, err := creds.Put(ctx, id, idOwner, box, lifetime.Exp, options.GatesPublicKeys...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to put creds: %w", err)
|
||||
}
|
||||
|
@ -304,7 +282,7 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
ir := &issuingResult{
|
||||
InitialAccessKeyID: accessKeyID,
|
||||
AccessKeyID: accessKeyID,
|
||||
SecretAccessKey: secrets.SecretKey,
|
||||
SecretAccessKey: secrets.AccessKey,
|
||||
OwnerPrivateKey: hex.EncodeToString(secrets.EphemeralKey.Bytes()),
|
||||
WalletPublicKey: hex.EncodeToString(options.FrostFSKey.PublicKey().Bytes()),
|
||||
ContainerID: id.EncodeToString(),
|
||||
|
@ -327,7 +305,7 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
}
|
||||
defer file.Close()
|
||||
if _, err = file.WriteString(fmt.Sprintf("\n[%s]\naws_access_key_id = %s\naws_secret_access_key = %s\n",
|
||||
profileName, accessKeyID, secrets.SecretKey)); err != nil {
|
||||
profileName, accessKeyID, secrets.AccessKey)); err != nil {
|
||||
return fmt.Errorf("fails to write to file: %w", err)
|
||||
}
|
||||
}
|
||||
|
@ -336,20 +314,14 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
|
||||
// UpdateSecret updates an auth token (change list of gates that can use credential), puts new cred version to the FrostFS network and writes to io.Writer a result.
|
||||
func (a *Agent) UpdateSecret(ctx context.Context, w io.Writer, options *UpdateSecretOptions) error {
|
||||
cfg := tokens.Config{
|
||||
FrostFS: a.frostFS,
|
||||
Key: options.GatePrivateKey,
|
||||
CacheConfig: cache.DefaultAccessBoxConfig(a.log),
|
||||
}
|
||||
|
||||
creds := tokens.New(cfg)
|
||||
creds := tokens.New(a.frostFS, options.GatePrivateKey, cache.DefaultAccessBoxConfig(a.log))
|
||||
|
||||
box, err := creds.GetBox(ctx, options.Address)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get accessbox: %w", err)
|
||||
}
|
||||
|
||||
secret, err := hex.DecodeString(box.Gate.SecretKey)
|
||||
secret, err := hex.DecodeString(box.Gate.AccessKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode secret key access box: %w", err)
|
||||
}
|
||||
|
@ -377,16 +349,8 @@ func (a *Agent) UpdateSecret(ctx context.Context, w io.Writer, options *UpdateSe
|
|||
a.log.Info(logs.UpdateAccessCredObjectIntoFrostFS,
|
||||
zap.Stringer("owner_tkn", idOwner))
|
||||
|
||||
prm := tokens.CredentialsParam{
|
||||
OwnerID: idOwner,
|
||||
AccessBox: updatedBox,
|
||||
Expiration: lifetime.Exp,
|
||||
Keys: options.GatesPublicKeys,
|
||||
CustomAttributes: options.CustomAttributes,
|
||||
}
|
||||
|
||||
oldAddr := options.Address
|
||||
addr, err := creds.Update(ctx, oldAddr, prm)
|
||||
addr, err := creds.Update(ctx, oldAddr, idOwner, updatedBox, lifetime.Exp, options.GatesPublicKeys...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update creds: %w", err)
|
||||
}
|
||||
|
@ -394,7 +358,7 @@ func (a *Agent) UpdateSecret(ctx context.Context, w io.Writer, options *UpdateSe
|
|||
ir := &issuingResult{
|
||||
AccessKeyID: accessKeyIDFromAddr(addr),
|
||||
InitialAccessKeyID: accessKeyIDFromAddr(oldAddr),
|
||||
SecretAccessKey: secrets.SecretKey,
|
||||
SecretAccessKey: secrets.AccessKey,
|
||||
OwnerPrivateKey: hex.EncodeToString(secrets.EphemeralKey.Bytes()),
|
||||
WalletPublicKey: hex.EncodeToString(options.FrostFSKey.PublicKey().Bytes()),
|
||||
ContainerID: addr.Container().EncodeToString(),
|
||||
|
@ -418,13 +382,7 @@ func getLifetimeFromGateData(gateData *accessbox.GateData) lifetimeOptions {
|
|||
// ObtainSecret receives an existing secret access key from FrostFS and
|
||||
// writes to io.Writer the secret access key.
|
||||
func (a *Agent) ObtainSecret(ctx context.Context, w io.Writer, options *ObtainSecretOptions) error {
|
||||
cfg := tokens.Config{
|
||||
FrostFS: a.frostFS,
|
||||
Key: options.GatePrivateKey,
|
||||
CacheConfig: cache.DefaultAccessBoxConfig(a.log),
|
||||
}
|
||||
|
||||
bearerCreds := tokens.New(cfg)
|
||||
bearerCreds := tokens.New(a.frostFS, options.GatePrivateKey, cache.DefaultAccessBoxConfig(a.log))
|
||||
|
||||
var addr oid.Address
|
||||
if err := addr.DecodeString(options.SecretAddress); err != nil {
|
||||
|
@ -438,7 +396,7 @@ func (a *Agent) ObtainSecret(ctx context.Context, w io.Writer, options *ObtainSe
|
|||
|
||||
or := &obtainingResult{
|
||||
BearerToken: box.Gate.BearerToken,
|
||||
SecretAccessKey: box.Gate.SecretKey,
|
||||
SecretAccessKey: box.Gate.AccessKey,
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
|
|
|
@ -12,10 +12,6 @@ type (
|
|||
businessLogicError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
frostFSIDInitError struct {
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
func wrapPreparationError(e error) error {
|
||||
|
@ -42,14 +38,6 @@ func (e businessLogicError) Error() string {
|
|||
return e.err.Error()
|
||||
}
|
||||
|
||||
func wrapFrostFSIDInitError(e error) error {
|
||||
return frostFSIDInitError{e}
|
||||
}
|
||||
|
||||
func (e frostFSIDInitError) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
// ExitCode picks corresponding error code depending on the type of error provided.
|
||||
// Returns 1 if error type is unknown.
|
||||
func ExitCode(e error) int {
|
||||
|
@ -60,8 +48,6 @@ func ExitCode(e error) int {
|
|||
return 3
|
||||
case businessLogicError:
|
||||
return 4
|
||||
case frostFSIDInitError:
|
||||
return 4
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/frostfsid"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
|
@ -17,12 +16,11 @@ import (
|
|||
)
|
||||
|
||||
var issueSecretCmd = &cobra.Command{
|
||||
Use: "issue-secret",
|
||||
Short: "Issue a secret in FrostFS network",
|
||||
Long: "Creates new s3 credentials to use with frostfs-s3-gw",
|
||||
Example: `frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a
|
||||
frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a --attributes LOGIN=NUUb82KR2JrVByHs2YSKgtK29gKnF5q6Vt`,
|
||||
RunE: runIssueSecretCmd,
|
||||
Use: "issue-secret",
|
||||
Short: "Issue a secret in FrostFS network",
|
||||
Long: "Creates new s3 credentials to use with frostfs-s3-gw",
|
||||
Example: `frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a`,
|
||||
RunE: runIssueSecretCmd,
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -39,11 +37,6 @@ const (
|
|||
lifetimeFlag = "lifetime"
|
||||
containerPolicyFlag = "container-policy"
|
||||
awsCLICredentialFlag = "aws-cli-credentials"
|
||||
frostfsIDFlag = "frostfsid"
|
||||
frostfsIDProxyFlag = "frostfsid-proxy"
|
||||
frostfsIDNamespaceFlag = "frostfsid-namespace"
|
||||
rpcEndpointFlag = "rpc-endpoint"
|
||||
attributesFlag = "attributes"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -84,11 +77,6 @@ func initIssueSecretCmd() {
|
|||
issueSecretCmd.Flags().Duration(poolHealthcheckTimeoutFlag, defaultPoolHealthcheckTimeout, "Timeout for request to node to decide if it is alive")
|
||||
issueSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
|
||||
issueSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
|
||||
issueSecretCmd.Flags().String(frostfsIDFlag, "", "FrostfsID contract hash (LE) or name in NNS to register public key in contract (rpc-endpoint flag also must be provided)")
|
||||
issueSecretCmd.Flags().String(frostfsIDProxyFlag, "", "Proxy contract hash (LE) or name in NNS to use when interact with frostfsid contract")
|
||||
issueSecretCmd.Flags().String(frostfsIDNamespaceFlag, "", "Namespace to register public key in frostfsid contract")
|
||||
issueSecretCmd.Flags().String(rpcEndpointFlag, "", "NEO node RPC address")
|
||||
issueSecretCmd.Flags().String(attributesFlag, "", "User attributes in form of Key1=Value1,Key2=Value2 (note: you cannot override system attributes)")
|
||||
|
||||
_ = issueSecretCmd.MarkFlagRequired(walletFlag)
|
||||
_ = issueSecretCmd.MarkFlagRequired(peerFlag)
|
||||
|
@ -164,34 +152,6 @@ func runIssueSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
return wrapFrostFSInitError(fmt.Errorf("failed to create FrostFS component: %s", err))
|
||||
}
|
||||
|
||||
frostFSID := viper.GetString(frostfsIDFlag)
|
||||
if frostFSID != "" {
|
||||
rpcAddress := viper.GetString(rpcEndpointFlag)
|
||||
if rpcAddress == "" {
|
||||
return wrapPreparationError(fmt.Errorf("you can use '%s' flag only along with '%s'", frostfsIDFlag, rpcEndpointFlag))
|
||||
}
|
||||
cfg := frostfsid.Config{
|
||||
RPCAddress: rpcAddress,
|
||||
Contract: frostFSID,
|
||||
ProxyContract: viper.GetString(frostfsIDProxyFlag),
|
||||
Key: key,
|
||||
}
|
||||
|
||||
frostfsIDClient, err := createFrostFSID(ctx, log, cfg)
|
||||
if err != nil {
|
||||
return wrapFrostFSIDInitError(err)
|
||||
}
|
||||
|
||||
if err = frostfsIDClient.RegisterPublicKey(viper.GetString(frostfsIDNamespaceFlag), key.PublicKey()); err != nil {
|
||||
return wrapBusinessLogicError(fmt.Errorf("failed to register key in frostfsid: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
customAttrs, err := parseObjectAttrs(viper.GetString(attributesFlag))
|
||||
if err != nil {
|
||||
return wrapPreparationError(fmt.Errorf("failed to parse attributes: %s", err))
|
||||
}
|
||||
|
||||
issueSecretOptions := &authmate.IssueSecretOptions{
|
||||
Container: authmate.ContainerOptions{
|
||||
ID: cnrID,
|
||||
|
@ -207,7 +167,6 @@ func runIssueSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
ContainerPolicies: policies,
|
||||
Lifetime: lifetime,
|
||||
AwsCliCredentialsFile: viper.GetString(awsCLICredentialFlag),
|
||||
CustomAttributes: customAttrs,
|
||||
}
|
||||
|
||||
if err = authmate.New(log, frostFS).IssueSecret(ctx, os.Stdout, issueSecretOptions); err != nil {
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/frostfsid"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
|
@ -40,11 +39,6 @@ func initUpdateSecretCmd() {
|
|||
updateSecretCmd.Flags().Duration(poolHealthcheckTimeoutFlag, defaultPoolHealthcheckTimeout, "Timeout for request to node to decide if it is alive")
|
||||
updateSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
|
||||
updateSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
|
||||
updateSecretCmd.Flags().String(frostfsIDFlag, "", "FrostfsID contract hash (LE) or name in NNS to register public key in contract (rpc-endpoint flag also must be provided)")
|
||||
updateSecretCmd.Flags().String(frostfsIDProxyFlag, "", "Proxy contract hash (LE) or name in NNS to use when interact with frostfsid contract")
|
||||
updateSecretCmd.Flags().String(frostfsIDNamespaceFlag, "", "Namespace to register public key in frostfsid contract")
|
||||
updateSecretCmd.Flags().String(rpcEndpointFlag, "", "NEO node RPC address")
|
||||
updateSecretCmd.Flags().String(attributesFlag, "", "User attributes in form of Key1=Value1,Key2=Value2 (note: you cannot override system attributes)")
|
||||
|
||||
_ = updateSecretCmd.MarkFlagRequired(walletFlag)
|
||||
_ = updateSecretCmd.MarkFlagRequired(peerFlag)
|
||||
|
@ -100,40 +94,11 @@ func runUpdateSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
return wrapFrostFSInitError(fmt.Errorf("failed to create FrostFS component: %s", err))
|
||||
}
|
||||
|
||||
frostFSID := viper.GetString(frostfsIDFlag)
|
||||
if frostFSID != "" {
|
||||
rpcAddress := viper.GetString(rpcEndpointFlag)
|
||||
if rpcAddress == "" {
|
||||
return wrapPreparationError(fmt.Errorf("you can use '%s' flag only along with '%s'", frostfsIDFlag, rpcEndpointFlag))
|
||||
}
|
||||
cfg := frostfsid.Config{
|
||||
RPCAddress: rpcAddress,
|
||||
Contract: frostFSID,
|
||||
ProxyContract: viper.GetString(frostfsIDProxyFlag),
|
||||
Key: key,
|
||||
}
|
||||
|
||||
frostfsIDClient, err := createFrostFSID(ctx, log, cfg)
|
||||
if err != nil {
|
||||
return wrapFrostFSIDInitError(err)
|
||||
}
|
||||
|
||||
if err = frostfsIDClient.RegisterPublicKey(viper.GetString(frostfsIDNamespaceFlag), key.PublicKey()); err != nil {
|
||||
return wrapBusinessLogicError(fmt.Errorf("failed to register key in frostfsid: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
customAttrs, err := parseObjectAttrs(viper.GetString(attributesFlag))
|
||||
if err != nil {
|
||||
return wrapPreparationError(fmt.Errorf("failed to parse attributes: %s", err))
|
||||
}
|
||||
|
||||
updateSecretOptions := &authmate.UpdateSecretOptions{
|
||||
Address: accessBoxAddress,
|
||||
FrostFSKey: key,
|
||||
GatesPublicKeys: gatesPublicKeys,
|
||||
GatePrivateKey: gateKey,
|
||||
CustomAttributes: customAttrs,
|
||||
Address: accessBoxAddress,
|
||||
FrostFSKey: key,
|
||||
GatesPublicKeys: gatesPublicKeys,
|
||||
GatePrivateKey: gateKey,
|
||||
}
|
||||
|
||||
if err = authmate.New(log, frostFS).UpdateSecret(ctx, os.Stdout, updateSecretOptions); err != nil {
|
||||
|
|
|
@ -5,15 +5,12 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/frostfsid"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -144,34 +141,3 @@ func getLogger() *zap.Logger {
|
|||
|
||||
return log
|
||||
}
|
||||
|
||||
func createFrostFSID(ctx context.Context, log *zap.Logger, cfg frostfsid.Config) (authmate.FrostFSID, error) {
|
||||
log.Debug(logs.PrepareFrostfsIDClient)
|
||||
|
||||
cli, err := frostfsid.New(ctx, cfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create frostfsid client: %w", err)
|
||||
}
|
||||
|
||||
return cli, nil
|
||||
}
|
||||
|
||||
func parseObjectAttrs(attributes string) ([]object.Attribute, error) {
|
||||
if len(attributes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rawAttrs := strings.Split(attributes, ",")
|
||||
|
||||
attrs := make([]object.Attribute, len(rawAttrs))
|
||||
for i := range rawAttrs {
|
||||
k, v, found := strings.Cut(rawAttrs[i], "=")
|
||||
if !found {
|
||||
return nil, fmt.Errorf("invalid attribute format: %s", rawAttrs[i])
|
||||
}
|
||||
attrs[i].SetKey(k)
|
||||
attrs[i].SetValue(v)
|
||||
}
|
||||
|
||||
return attrs, nil
|
||||
}
|
||||
|
|
639
cmd/s3-gw/app.go
639
cmd/s3-gw/app.go
|
@ -3,20 +3,16 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
|
@ -24,40 +20,32 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/handler"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
s3middleware "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/notifications"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/frostfsid"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/policy"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/policy/contract"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/services"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/xml"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/control"
|
||||
controlSvc "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/control/server"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const awsDefaultNamespace = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
|
||||
type (
|
||||
// App is the main application structure.
|
||||
App struct {
|
||||
ctr s3middleware.Center
|
||||
ctr auth.Center
|
||||
log *zap.Logger
|
||||
cfg *viper.Viper
|
||||
pool *pool.Pool
|
||||
|
@ -67,15 +55,7 @@ type (
|
|||
obj layer.Client
|
||||
api api.Handler
|
||||
|
||||
frostfsid *frostfsid.FrostFSID
|
||||
|
||||
policyStorage *policy.Storage
|
||||
|
||||
servers []Server
|
||||
unbindServers []ServerInfo
|
||||
mu sync.RWMutex
|
||||
|
||||
controlAPI *grpc.Server
|
||||
servers []Server
|
||||
|
||||
metrics *metrics.AppMetrics
|
||||
bucketResolver *resolver.BucketResolver
|
||||
|
@ -87,27 +67,13 @@ type (
|
|||
}
|
||||
|
||||
appSettings struct {
|
||||
logLevel zap.AtomicLevel
|
||||
maxClient maxClientsConfig
|
||||
defaultMaxAge int
|
||||
reconnectInterval time.Duration
|
||||
notificatorEnabled bool
|
||||
resolveZoneList []string
|
||||
isResolveListAllow bool // True if ResolveZoneList contains allowed zones
|
||||
frostfsidValidation bool
|
||||
|
||||
mu sync.RWMutex
|
||||
namespaces Namespaces
|
||||
defaultXMLNS bool
|
||||
bypassContentEncodingInChunks bool
|
||||
clientCut bool
|
||||
maxBufferSizeForPut uint64
|
||||
md5Enabled bool
|
||||
aclEnabled bool
|
||||
namespaceHeader string
|
||||
defaultNamespaces []string
|
||||
authorizedControlAPIKeys [][]byte
|
||||
policyDenyByDefault bool
|
||||
logLevel zap.AtomicLevel
|
||||
policies *placementPolicy
|
||||
xmlDecoder *xml.DecoderProvider
|
||||
maxClient maxClientsConfig
|
||||
bypassContentEncodingInChunks atomic.Bool
|
||||
clientCut atomic.Bool
|
||||
md5Enabled atomic.Bool
|
||||
}
|
||||
|
||||
maxClientsConfig struct {
|
||||
|
@ -119,20 +85,21 @@ type (
|
|||
logger *zap.Logger
|
||||
lvl zap.AtomicLevel
|
||||
}
|
||||
|
||||
placementPolicy struct {
|
||||
mu sync.RWMutex
|
||||
defaultPolicy netmap.PlacementPolicy
|
||||
regionMap map[string]netmap.PlacementPolicy
|
||||
copiesNumbers map[string][]uint32
|
||||
defaultCopiesNumbers []uint32
|
||||
}
|
||||
)
|
||||
|
||||
func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
|
||||
objPool, treePool, key := getPools(ctx, log.logger, v)
|
||||
|
||||
cfg := tokens.Config{
|
||||
FrostFS: frostfs.NewAuthmateFrostFS(objPool, key),
|
||||
Key: key,
|
||||
CacheConfig: getAccessBoxCacheConfig(v, log.logger),
|
||||
RemovingCheckAfterDurations: fetchRemovingCheckInterval(v, log.logger),
|
||||
}
|
||||
|
||||
// prepare auth center
|
||||
ctr := auth.New(tokens.New(cfg), v.GetStringSlice(cfgAllowedAccessKeyIDPrefixes))
|
||||
ctr := auth.New(frostfs.NewAuthmateFrostFS(objPool, key), key, v.GetStringSlice(cfgAllowedAccessKeyIDPrefixes), getAccessBoxCacheConfig(v, log.logger))
|
||||
|
||||
app := &App{
|
||||
ctr: ctr,
|
||||
|
@ -145,7 +112,7 @@ func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
|
|||
webDone: make(chan struct{}, 1),
|
||||
wrkDone: make(chan struct{}, 1),
|
||||
|
||||
settings: newAppSettings(log, v, key),
|
||||
settings: newAppSettings(log, v),
|
||||
}
|
||||
|
||||
app.init(ctx)
|
||||
|
@ -155,11 +122,8 @@ func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
|
|||
|
||||
func (a *App) init(ctx context.Context) {
|
||||
a.setRuntimeParameters()
|
||||
a.initFrostfsID(ctx)
|
||||
a.initPolicyStorage(ctx)
|
||||
a.initAPI(ctx)
|
||||
a.initMetrics()
|
||||
a.initControlAPI()
|
||||
a.initServers(ctx)
|
||||
a.initTracing(ctx)
|
||||
}
|
||||
|
@ -177,7 +141,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
user.IDFromKey(&gateOwner, a.key.PrivateKey.PublicKey)
|
||||
|
||||
layerCfg := &layer.Config{
|
||||
Cache: layer.NewCache(getCacheOptions(a.cfg, a.log)),
|
||||
Caches: getCacheOptions(a.cfg, a.log),
|
||||
AnonKey: layer.AnonymousKey{
|
||||
Key: randomKey,
|
||||
},
|
||||
|
@ -203,229 +167,43 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
func newAppSettings(log *Logger, v *viper.Viper, key *keys.PrivateKey) *appSettings {
|
||||
func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
||||
settings := &appSettings{
|
||||
logLevel: log.lvl,
|
||||
maxClient: newMaxClients(v),
|
||||
defaultMaxAge: fetchDefaultMaxAge(v, log.logger),
|
||||
reconnectInterval: fetchReconnectInterval(v),
|
||||
notificatorEnabled: v.GetBool(cfgEnableNATS),
|
||||
frostfsidValidation: v.GetBool(cfgFrostfsIDValidationEnabled),
|
||||
logLevel: log.lvl,
|
||||
policies: newPlacementPolicy(log.logger, v),
|
||||
xmlDecoder: xml.NewDecoderProvider(v.GetBool(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload)),
|
||||
maxClient: newMaxClients(v),
|
||||
}
|
||||
|
||||
settings.resolveZoneList = v.GetStringSlice(cfgResolveBucketAllow)
|
||||
settings.isResolveListAllow = len(settings.resolveZoneList) > 0
|
||||
if !settings.isResolveListAllow {
|
||||
settings.resolveZoneList = v.GetStringSlice(cfgResolveBucketDeny)
|
||||
}
|
||||
|
||||
settings.update(v, log.logger, key)
|
||||
settings.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
||||
settings.setClientCut(v.GetBool(cfgClientCut))
|
||||
settings.setMD5Enabled(v.GetBool(cfgMD5Enabled))
|
||||
|
||||
return settings
|
||||
}
|
||||
|
||||
func (s *appSettings) update(v *viper.Viper, log *zap.Logger, key *keys.PrivateKey) {
|
||||
s.updateNamespacesSettings(v, log)
|
||||
s.useDefaultXMLNamespace(v.GetBool(cfgKludgeUseDefaultXMLNS))
|
||||
s.setACLEnabled(v.GetBool(cfgKludgeACLEnabled))
|
||||
s.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
||||
s.setClientCut(v.GetBool(cfgClientCut))
|
||||
s.setBufferMaxSizeForPut(v.GetUint64(cfgBufferMaxSizeForPut))
|
||||
s.setMD5Enabled(v.GetBool(cfgMD5Enabled))
|
||||
s.setAuthorizedControlAPIKeys(append(fetchAuthorizedKeys(log, v), key.PublicKey()))
|
||||
s.setPolicyDenyByDefault(v.GetBool(cfgPolicyDenyByDefault))
|
||||
}
|
||||
|
||||
func (s *appSettings) updateNamespacesSettings(v *viper.Viper, log *zap.Logger) {
|
||||
nsHeader := v.GetString(cfgResolveNamespaceHeader)
|
||||
nsConfig, defaultNamespaces := fetchNamespacesConfig(log, v)
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.namespaceHeader = nsHeader
|
||||
s.defaultNamespaces = defaultNamespaces
|
||||
s.namespaces = nsConfig.Namespaces
|
||||
}
|
||||
|
||||
func (s *appSettings) BypassContentEncodingInChunks() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.bypassContentEncodingInChunks
|
||||
return s.bypassContentEncodingInChunks.Load()
|
||||
}
|
||||
|
||||
func (s *appSettings) setBypassContentEncodingInChunks(bypass bool) {
|
||||
s.mu.Lock()
|
||||
s.bypassContentEncodingInChunks = bypass
|
||||
s.mu.Unlock()
|
||||
s.bypassContentEncodingInChunks.Store(bypass)
|
||||
}
|
||||
|
||||
func (s *appSettings) ClientCut() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.clientCut
|
||||
return s.clientCut.Load()
|
||||
}
|
||||
|
||||
func (s *appSettings) setClientCut(clientCut bool) {
|
||||
s.mu.Lock()
|
||||
s.clientCut = clientCut
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) BufferMaxSizeForPut() uint64 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.maxBufferSizeForPut
|
||||
}
|
||||
|
||||
func (s *appSettings) setBufferMaxSizeForPut(size uint64) {
|
||||
s.mu.Lock()
|
||||
s.maxBufferSizeForPut = size
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) DefaultPlacementPolicy(namespace string) netmap.PlacementPolicy {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.namespaces[namespace].LocationConstraints[defaultConstraintName]
|
||||
}
|
||||
|
||||
func (s *appSettings) PlacementPolicy(namespace, constraint string) (netmap.PlacementPolicy, bool) {
|
||||
s.mu.RLock()
|
||||
placementPolicy, ok := s.namespaces[namespace].LocationConstraints[constraint]
|
||||
s.mu.RUnlock()
|
||||
|
||||
return placementPolicy, ok
|
||||
}
|
||||
|
||||
func (s *appSettings) CopiesNumbers(namespace, constraint string) ([]uint32, bool) {
|
||||
s.mu.RLock()
|
||||
copiesNumbers, ok := s.namespaces[namespace].CopiesNumbers[constraint]
|
||||
s.mu.RUnlock()
|
||||
|
||||
return copiesNumbers, ok
|
||||
}
|
||||
|
||||
func (s *appSettings) DefaultCopiesNumbers(namespace string) []uint32 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.namespaces[namespace].CopiesNumbers[defaultConstraintName]
|
||||
}
|
||||
|
||||
func (s *appSettings) NewXMLDecoder(r io.Reader) *xml.Decoder {
|
||||
dec := xml.NewDecoder(r)
|
||||
|
||||
s.mu.RLock()
|
||||
if s.defaultXMLNS {
|
||||
dec.DefaultSpace = awsDefaultNamespace
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
|
||||
return dec
|
||||
}
|
||||
|
||||
func (s *appSettings) useDefaultXMLNamespace(useDefaultNamespace bool) {
|
||||
s.mu.Lock()
|
||||
s.defaultXMLNS = useDefaultNamespace
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) DefaultMaxAge() int {
|
||||
return s.defaultMaxAge
|
||||
}
|
||||
|
||||
func (s *appSettings) NotificatorEnabled() bool {
|
||||
return s.notificatorEnabled
|
||||
}
|
||||
|
||||
func (s *appSettings) ResolveZoneList() []string {
|
||||
return s.resolveZoneList
|
||||
}
|
||||
|
||||
func (s *appSettings) IsResolveListAllow() bool {
|
||||
return s.isResolveListAllow
|
||||
s.clientCut.Store(clientCut)
|
||||
}
|
||||
|
||||
func (s *appSettings) MD5Enabled() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.md5Enabled
|
||||
return s.md5Enabled.Load()
|
||||
}
|
||||
|
||||
func (s *appSettings) setMD5Enabled(md5Enabled bool) {
|
||||
s.mu.Lock()
|
||||
s.md5Enabled = md5Enabled
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) setACLEnabled(enableACL bool) {
|
||||
s.mu.Lock()
|
||||
s.aclEnabled = enableACL
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) ACLEnabled() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.aclEnabled
|
||||
}
|
||||
|
||||
func (s *appSettings) NamespaceHeader() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.namespaceHeader
|
||||
}
|
||||
|
||||
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
|
||||
if len(ns) == 0 {
|
||||
return v2container.SysAttributeZoneDefault, true
|
||||
}
|
||||
|
||||
return ns + ".ns", false
|
||||
}
|
||||
|
||||
func (s *appSettings) isDefaultNamespace(ns string) bool {
|
||||
s.mu.RLock()
|
||||
namespaces := s.defaultNamespaces
|
||||
s.mu.RUnlock()
|
||||
return slices.Contains(namespaces, ns)
|
||||
}
|
||||
|
||||
func (s *appSettings) FetchRawKeys() [][]byte {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.authorizedControlAPIKeys
|
||||
}
|
||||
|
||||
func (s *appSettings) setAuthorizedControlAPIKeys(keys keys.PublicKeys) {
|
||||
rawPubs := make([][]byte, len(keys))
|
||||
for i := range keys {
|
||||
rawPubs[i] = keys[i].Bytes()
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.authorizedControlAPIKeys = rawPubs
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) ResolveNamespaceAlias(namespace string) string {
|
||||
if s.isDefaultNamespace(namespace) {
|
||||
return defaultNamespace
|
||||
}
|
||||
|
||||
return namespace
|
||||
}
|
||||
|
||||
func (s *appSettings) PolicyDenyByDefault() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.policyDenyByDefault
|
||||
}
|
||||
|
||||
func (s *appSettings) setPolicyDenyByDefault(policyDenyByDefault bool) {
|
||||
s.mu.Lock()
|
||||
s.policyDenyByDefault = policyDenyByDefault
|
||||
s.mu.Unlock()
|
||||
s.md5Enabled.Store(md5Enabled)
|
||||
}
|
||||
|
||||
func (a *App) initAPI(ctx context.Context) {
|
||||
|
@ -433,79 +211,27 @@ func (a *App) initAPI(ctx context.Context) {
|
|||
a.initHandler()
|
||||
}
|
||||
|
||||
func (a *App) initControlAPI() {
|
||||
svc := controlSvc.New(
|
||||
controlSvc.WithSettings(a.settings),
|
||||
controlSvc.WithLogger(a.log),
|
||||
controlSvc.WithChainStorage(a.policyStorage.LocalStorage()),
|
||||
)
|
||||
|
||||
a.controlAPI = grpc.NewServer()
|
||||
|
||||
control.RegisterControlServiceServer(a.controlAPI, svc)
|
||||
}
|
||||
|
||||
func (a *App) initMetrics() {
|
||||
cfg := metrics.AppMetricsConfig{
|
||||
Logger: a.log,
|
||||
PoolStatistics: frostfs.NewPoolStatistic(a.pool),
|
||||
Enabled: a.cfg.GetBool(cfgPrometheusEnabled),
|
||||
}
|
||||
|
||||
a.metrics = metrics.NewAppMetrics(cfg)
|
||||
a.metrics = metrics.NewAppMetrics(a.log, frostfs.NewPoolStatistic(a.pool), a.cfg.GetBool(cfgPrometheusEnabled))
|
||||
a.metrics.State().SetHealth(metrics.HealthStatusStarting)
|
||||
}
|
||||
|
||||
func (a *App) initFrostfsID(ctx context.Context) {
|
||||
var err error
|
||||
a.frostfsid, err = frostfsid.New(ctx, frostfsid.Config{
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
Contract: a.cfg.GetString(cfgFrostfsIDContract),
|
||||
ProxyContract: a.cfg.GetString(cfgProxyContract),
|
||||
Key: a.key,
|
||||
})
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.InitFrostfsIDContractFailed, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) initPolicyStorage(ctx context.Context) {
|
||||
policyContract, err := contract.New(ctx, contract.Config{
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
Contract: a.cfg.GetString(cfgPolicyContract),
|
||||
ProxyContract: a.cfg.GetString(cfgProxyContract),
|
||||
Key: a.key,
|
||||
})
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.InitPolicyContractFailed, zap.Error(err))
|
||||
}
|
||||
|
||||
a.policyStorage = policy.NewStorage(policy.StorageConfig{
|
||||
Contract: policyContract,
|
||||
Cache: cache.NewMorphPolicyCache(getMorphPolicyCacheConfig(a.cfg, a.log)),
|
||||
Log: a.log,
|
||||
})
|
||||
}
|
||||
|
||||
func (a *App) initResolver() {
|
||||
var err error
|
||||
a.bucketResolver, err = resolver.NewBucketResolver(a.getResolverOrder(), a.getResolverConfig())
|
||||
a.bucketResolver, err = resolver.NewBucketResolver(a.getResolverConfig())
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) getResolverConfig() *resolver.Config {
|
||||
return &resolver.Config{
|
||||
func (a *App) getResolverConfig() ([]string, *resolver.Config) {
|
||||
resolveCfg := &resolver.Config{
|
||||
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
Settings: a.settings,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) getResolverOrder() []string {
|
||||
order := a.cfg.GetStringSlice(cfgResolveOrder)
|
||||
if a.cfg.GetString(cfgRPCEndpoint) == "" {
|
||||
if resolveCfg.RPCAddress == "" {
|
||||
order = remove(order, resolver.NNSResolver)
|
||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
||||
}
|
||||
|
@ -514,7 +240,7 @@ func (a *App) getResolverOrder() []string {
|
|||
a.log.Info(logs.ContainerResolverWillBeDisabled)
|
||||
}
|
||||
|
||||
return order
|
||||
return order, resolveCfg
|
||||
}
|
||||
|
||||
func (a *App) initTracing(ctx context.Context) {
|
||||
|
@ -634,6 +360,55 @@ func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.
|
|||
return p, treePool, key
|
||||
}
|
||||
|
||||
func newPlacementPolicy(l *zap.Logger, v *viper.Viper) *placementPolicy {
|
||||
var policies placementPolicy
|
||||
policies.update(l, v)
|
||||
return &policies
|
||||
}
|
||||
|
||||
func (p *placementPolicy) DefaultPlacementPolicy() netmap.PlacementPolicy {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
return p.defaultPolicy
|
||||
}
|
||||
|
||||
func (p *placementPolicy) PlacementPolicy(name string) (netmap.PlacementPolicy, bool) {
|
||||
p.mu.RLock()
|
||||
policy, ok := p.regionMap[name]
|
||||
p.mu.RUnlock()
|
||||
|
||||
return policy, ok
|
||||
}
|
||||
|
||||
func (p *placementPolicy) CopiesNumbers(locationConstraint string) ([]uint32, bool) {
|
||||
p.mu.RLock()
|
||||
copiesNumbers, ok := p.copiesNumbers[locationConstraint]
|
||||
p.mu.RUnlock()
|
||||
|
||||
return copiesNumbers, ok
|
||||
}
|
||||
|
||||
func (p *placementPolicy) DefaultCopiesNumbers() []uint32 {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
return p.defaultCopiesNumbers
|
||||
}
|
||||
|
||||
func (p *placementPolicy) update(l *zap.Logger, v *viper.Viper) {
|
||||
defaultPolicy := fetchDefaultPolicy(l, v)
|
||||
regionMap := fetchRegionMappingPolicies(l, v)
|
||||
defaultCopies := fetchDefaultCopiesNumbers(l, v)
|
||||
copiesNumbers := fetchCopiesNumbers(l, v)
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
p.defaultPolicy = defaultPolicy
|
||||
p.regionMap = regionMap
|
||||
p.defaultCopiesNumbers = defaultCopies
|
||||
p.copiesNumbers = copiesNumbers
|
||||
}
|
||||
|
||||
func remove(list []string, element string) []string {
|
||||
for i, item := range list {
|
||||
if item == element {
|
||||
|
@ -672,64 +447,32 @@ func (a *App) Serve(ctx context.Context) {
|
|||
domains := a.cfg.GetStringSlice(cfgListenDomains)
|
||||
a.log.Info(logs.FetchDomainsPrepareToUseAPI, zap.Strings("domains", domains))
|
||||
|
||||
cfg := api.Config{
|
||||
Throttle: middleware.ThrottleOpts{
|
||||
Limit: a.settings.maxClient.count,
|
||||
BacklogTimeout: a.settings.maxClient.deadline,
|
||||
},
|
||||
Handler: a.api,
|
||||
Center: a.ctr,
|
||||
Log: a.log,
|
||||
Metrics: a.metrics,
|
||||
Domains: domains,
|
||||
|
||||
MiddlewareSettings: a.settings,
|
||||
PolicyChecker: a.policyStorage,
|
||||
|
||||
FrostfsID: a.frostfsid,
|
||||
FrostFSIDValidation: a.settings.frostfsidValidation,
|
||||
throttleOps := middleware.ThrottleOpts{
|
||||
Limit: a.settings.maxClient.count,
|
||||
BacklogTimeout: a.settings.maxClient.deadline,
|
||||
}
|
||||
|
||||
chiRouter := api.NewRouter(cfg)
|
||||
chiRouter := chi.NewRouter()
|
||||
api.AttachChi(chiRouter, domains, throttleOps, a.api, a.ctr, a.log, a.metrics)
|
||||
|
||||
// Use mux.Router as http.Handler
|
||||
srv := new(http.Server)
|
||||
srv.Handler = chiRouter
|
||||
srv.ErrorLog = zap.NewStdLog(a.log)
|
||||
srv.ReadTimeout = a.cfg.GetDuration(cfgWebReadTimeout)
|
||||
srv.ReadHeaderTimeout = a.cfg.GetDuration(cfgWebReadHeaderTimeout)
|
||||
srv.WriteTimeout = a.cfg.GetDuration(cfgWebWriteTimeout)
|
||||
srv.IdleTimeout = a.cfg.GetDuration(cfgWebIdleTimeout)
|
||||
|
||||
a.startServices()
|
||||
|
||||
servs := a.getServers()
|
||||
|
||||
for i := range servs {
|
||||
for i := range a.servers {
|
||||
go func(i int) {
|
||||
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()))
|
||||
a.log.Info(logs.StartingServer, zap.String("address", a.servers[i].Address()))
|
||||
|
||||
if err := srv.Serve(servs[i].Listener()); err != nil && err != http.ErrServerClosed {
|
||||
a.metrics.MarkUnhealthy(servs[i].Address())
|
||||
if err := srv.Serve(a.servers[i].Listener()); err != nil && err != http.ErrServerClosed {
|
||||
a.metrics.MarkUnhealthy(a.servers[i].Address())
|
||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
if len(a.unbindServers) != 0 {
|
||||
a.scheduleReconnect(ctx, srv)
|
||||
}
|
||||
|
||||
go func() {
|
||||
address := a.cfg.GetString(cfgControlGRPCEndpoint)
|
||||
a.log.Info(logs.StartingControlAPI, zap.String("address", address))
|
||||
if listener, err := net.Listen("tcp", address); err != nil {
|
||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
||||
} else if err = a.controlAPI.Serve(listener); err != nil {
|
||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGHUP)
|
||||
|
||||
|
@ -748,7 +491,6 @@ LOOP:
|
|||
|
||||
a.log.Info(logs.StoppingServer, zap.Error(srv.Shutdown(ctx)))
|
||||
|
||||
a.stopControlAPI()
|
||||
a.metrics.Shutdown()
|
||||
a.stopServices()
|
||||
a.shutdownTracing()
|
||||
|
@ -760,25 +502,6 @@ func shutdownContext() (context.Context, context.CancelFunc) {
|
|||
return context.WithTimeout(context.Background(), defaultShutdownTimeout)
|
||||
}
|
||||
|
||||
func (a *App) stopControlAPI() {
|
||||
ctx, cancel := shutdownContext()
|
||||
defer cancel()
|
||||
|
||||
go func() {
|
||||
a.controlAPI.GracefulStop()
|
||||
cancel()
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
|
||||
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
a.log.Info(logs.ControlAPICannotShutdownGracefully)
|
||||
a.controlAPI.Stop()
|
||||
}
|
||||
|
||||
a.log.Info(logs.ControlAPIServiceStopped)
|
||||
}
|
||||
|
||||
func (a *App) configReload(ctx context.Context) {
|
||||
a.log.Info(logs.SIGHUPConfigReloadStarted)
|
||||
|
||||
|
@ -791,7 +514,7 @@ func (a *App) configReload(ctx context.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
if err := a.bucketResolver.UpdateResolvers(a.getResolverOrder()); err != nil {
|
||||
if err := a.bucketResolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
||||
a.log.Warn(logs.FailedToReloadResolvers, zap.Error(err))
|
||||
}
|
||||
|
||||
|
@ -820,7 +543,12 @@ func (a *App) updateSettings() {
|
|||
a.settings.logLevel.SetLevel(lvl)
|
||||
}
|
||||
|
||||
a.settings.update(a.cfg, a.log, a.key)
|
||||
a.settings.policies.update(a.log, a.cfg)
|
||||
|
||||
a.settings.xmlDecoder.UseDefaultNamespaceForCompleteMultipart(a.cfg.GetBool(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload))
|
||||
a.settings.setBypassContentEncodingInChunks(a.cfg.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
||||
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
|
||||
a.settings.setMD5Enabled(a.cfg.GetBool(cfgMD5Enabled))
|
||||
}
|
||||
|
||||
func (a *App) startServices() {
|
||||
|
@ -836,7 +564,7 @@ func (a *App) startServices() {
|
|||
}
|
||||
|
||||
func (a *App) initServers(ctx context.Context) {
|
||||
serversInfo := fetchServers(a.cfg, a.log)
|
||||
serversInfo := fetchServers(a.cfg)
|
||||
|
||||
a.servers = make([]Server, 0, len(serversInfo))
|
||||
for _, serverInfo := range serversInfo {
|
||||
|
@ -846,7 +574,6 @@ func (a *App) initServers(ctx context.Context) {
|
|||
}
|
||||
srv, err := newServer(ctx, serverInfo)
|
||||
if err != nil {
|
||||
a.unbindServers = append(a.unbindServers, serverInfo)
|
||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err))...)
|
||||
continue
|
||||
|
@ -863,24 +590,21 @@ func (a *App) initServers(ctx context.Context) {
|
|||
}
|
||||
|
||||
func (a *App) updateServers() error {
|
||||
serversInfo := fetchServers(a.cfg, a.log)
|
||||
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
serversInfo := fetchServers(a.cfg)
|
||||
|
||||
var found bool
|
||||
for _, serverInfo := range serversInfo {
|
||||
ser := a.getServer(serverInfo.Address)
|
||||
if ser != nil {
|
||||
if serverInfo.TLS.Enabled {
|
||||
if err := ser.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
||||
return fmt.Errorf("failed to update tls certs: %w", err)
|
||||
}
|
||||
found = true
|
||||
}
|
||||
} else if unbind := a.updateUnbindServerInfo(serverInfo); unbind {
|
||||
found = true
|
||||
index := a.serverIndex(serverInfo.Address)
|
||||
if index == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
if serverInfo.TLS.Enabled {
|
||||
if err := a.servers[index].UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
||||
return fmt.Errorf("failed to update tls certs: %w", err)
|
||||
}
|
||||
}
|
||||
found = true
|
||||
}
|
||||
|
||||
if !found {
|
||||
|
@ -890,6 +614,15 @@ func (a *App) updateServers() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *App) serverIndex(address string) int {
|
||||
for i := range a.servers {
|
||||
if a.servers[i].Address() == address {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (a *App) stopServices() {
|
||||
ctx, cancel := shutdownContext()
|
||||
defer cancel()
|
||||
|
@ -919,9 +652,6 @@ func getCacheOptions(v *viper.Viper, l *zap.Logger) *layer.CachesConfig {
|
|||
cacheCfg.ObjectsList.Lifetime = fetchCacheLifetime(v, l, cfgListObjectsCacheLifetime, cacheCfg.ObjectsList.Lifetime)
|
||||
cacheCfg.ObjectsList.Size = fetchCacheSize(v, l, cfgListObjectsCacheSize, cacheCfg.ObjectsList.Size)
|
||||
|
||||
cacheCfg.SessionList.Lifetime = fetchCacheLifetime(v, l, cfgSessionListCacheLifetime, cacheCfg.SessionList.Lifetime)
|
||||
cacheCfg.SessionList.Size = fetchCacheSize(v, l, cfgSessionListCacheSize, cacheCfg.SessionList.Size)
|
||||
|
||||
cacheCfg.Buckets.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Buckets.Lifetime)
|
||||
cacheCfg.Buckets.Size = fetchCacheSize(v, l, cfgBucketsCacheSize, cacheCfg.Buckets.Size)
|
||||
|
||||
|
@ -946,49 +676,31 @@ func getAccessBoxCacheConfig(v *viper.Viper, l *zap.Logger) *cache.Config {
|
|||
return cacheCfg
|
||||
}
|
||||
|
||||
func getMorphPolicyCacheConfig(v *viper.Viper, l *zap.Logger) *cache.Config {
|
||||
cacheCfg := cache.DefaultMorphPolicyConfig(l)
|
||||
|
||||
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgMorphPolicyCacheLifetime, cacheCfg.Lifetime)
|
||||
cacheCfg.Size = fetchCacheSize(v, l, cfgMorphPolicyCacheSize, cacheCfg.Size)
|
||||
|
||||
return cacheCfg
|
||||
}
|
||||
|
||||
func (a *App) initHandler() {
|
||||
var err error
|
||||
cfg := &handler.Config{
|
||||
Policy: a.settings.policies,
|
||||
DefaultMaxAge: fetchDefaultMaxAge(a.cfg, a.log),
|
||||
NotificatorEnabled: a.cfg.GetBool(cfgEnableNATS),
|
||||
XMLDecoder: a.settings.xmlDecoder,
|
||||
}
|
||||
|
||||
a.api, err = handler.New(a.log, a.obj, a.nc, a.settings, a.policyStorage, a.frostfsid)
|
||||
cfg.ResolveZoneList = a.cfg.GetStringSlice(cfgResolveBucketAllow)
|
||||
cfg.IsResolveListAllow = len(cfg.ResolveZoneList) > 0
|
||||
if !cfg.IsResolveListAllow {
|
||||
cfg.ResolveZoneList = a.cfg.GetStringSlice(cfgResolveBucketDeny)
|
||||
}
|
||||
|
||||
cfg.CompleteMultipartKeepalive = a.cfg.GetDuration(cfgKludgeCompleteMultipartUploadKeepalive)
|
||||
cfg.Kludge = a.settings
|
||||
cfg.Features = a.settings
|
||||
|
||||
var err error
|
||||
a.api, err = handler.New(a.log, a.obj, a.nc, cfg)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotInitializeAPIHandler, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) getServer(address string) Server {
|
||||
for i := range a.servers {
|
||||
if a.servers[i].Address() == address {
|
||||
return a.servers[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *App) updateUnbindServerInfo(info ServerInfo) bool {
|
||||
for i := range a.unbindServers {
|
||||
if a.unbindServers[i].Address == info.Address {
|
||||
a.unbindServers[i] = info
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *App) getServers() []Server {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
return a.servers
|
||||
}
|
||||
|
||||
func (a *App) setRuntimeParameters() {
|
||||
if len(os.Getenv("GOMEMLIMIT")) != 0 {
|
||||
// default limit < yaml limit < app env limit < GOMEMLIMIT
|
||||
|
@ -1004,60 +716,3 @@ func (a *App) setRuntimeParameters() {
|
|||
zap.Int64("old_value", previous))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) scheduleReconnect(ctx context.Context, srv *http.Server) {
|
||||
go func() {
|
||||
t := time.NewTicker(a.settings.reconnectInterval)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
if a.tryReconnect(ctx, srv) {
|
||||
return
|
||||
}
|
||||
t.Reset(a.settings.reconnectInterval)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (a *App) tryReconnect(ctx context.Context, sr *http.Server) bool {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
a.log.Info(logs.ServerReconnecting)
|
||||
var failedServers []ServerInfo
|
||||
|
||||
for _, serverInfo := range a.unbindServers {
|
||||
fields := []zap.Field{
|
||||
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
||||
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
||||
}
|
||||
|
||||
srv, err := newServer(ctx, serverInfo)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err))
|
||||
failedServers = append(failedServers, serverInfo)
|
||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||
continue
|
||||
}
|
||||
|
||||
go func() {
|
||||
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()))
|
||||
a.metrics.MarkHealthy(serverInfo.Address)
|
||||
if err = sr.Serve(srv.Listener()); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
a.log.Warn(logs.ListenAndServe, zap.Error(err))
|
||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||
}
|
||||
}()
|
||||
|
||||
a.servers = append(a.servers, srv)
|
||||
a.log.Info(logs.ServerReconnectedSuccessfully, fields...)
|
||||
}
|
||||
|
||||
a.unbindServers = failedServers
|
||||
|
||||
return len(a.unbindServers) == 0
|
||||
}
|
||||
|
|
|
@ -20,20 +20,12 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/ssgreg/journald"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
const (
|
||||
destinationStdout = "stdout"
|
||||
destinationJournald = "journald"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultRebalanceInterval = 60 * time.Second
|
||||
defaultHealthcheckTimeout = 15 * time.Second
|
||||
|
@ -48,30 +40,13 @@ const (
|
|||
defaultMaxClientsDeadline = time.Second * 30
|
||||
|
||||
defaultSoftMemoryLimit = math.MaxInt64
|
||||
|
||||
defaultReadHeaderTimeout = 30 * time.Second
|
||||
defaultIdleTimeout = 30 * time.Second
|
||||
|
||||
defaultAccessBoxCacheRemovingCheckInterval = 5 * time.Minute
|
||||
|
||||
defaultNamespaceHeader = "X-Frostfs-Namespace"
|
||||
|
||||
defaultConstraintName = "default"
|
||||
|
||||
defaultNamespace = ""
|
||||
|
||||
defaultReconnectInterval = time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
defaultCopiesNumbers = []uint32{0}
|
||||
defaultDefaultNamespaces = []string{"", "root"}
|
||||
)
|
||||
var defaultCopiesNumbers = []uint32{0}
|
||||
|
||||
const ( // Settings.
|
||||
// Logger.
|
||||
cfgLoggerLevel = "logger.level"
|
||||
cfgLoggerDestination = "logger.destination"
|
||||
cfgLoggerLevel = "logger.level"
|
||||
|
||||
// Wallet.
|
||||
cfgWalletPath = "wallet.path"
|
||||
|
@ -86,10 +61,6 @@ const ( // Settings.
|
|||
cfgTLSKeyFile = "tls.key_file"
|
||||
cfgTLSCertFile = "tls.cert_file"
|
||||
|
||||
// Control API.
|
||||
cfgControlAuthorizedKeys = "control.authorized_keys"
|
||||
cfgControlGRPCEndpoint = "control.grpc.endpoint"
|
||||
|
||||
// Pool config.
|
||||
cfgConnectTimeout = "connect_timeout"
|
||||
cfgStreamTimeout = "stream_timeout"
|
||||
|
@ -102,8 +73,6 @@ const ( // Settings.
|
|||
cfgObjectsCacheSize = "cache.objects.size"
|
||||
cfgListObjectsCacheLifetime = "cache.list.lifetime"
|
||||
cfgListObjectsCacheSize = "cache.list.size"
|
||||
cfgSessionListCacheLifetime = "cache.list_session.lifetime"
|
||||
cfgSessionListCacheSize = "cache.list_session.size"
|
||||
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
|
||||
cfgBucketsCacheSize = "cache.buckets.size"
|
||||
cfgNamesCacheLifetime = "cache.names.lifetime"
|
||||
|
@ -114,10 +83,6 @@ const ( // Settings.
|
|||
cfgAccessBoxCacheSize = "cache.accessbox.size"
|
||||
cfgAccessControlCacheLifetime = "cache.accesscontrol.lifetime"
|
||||
cfgAccessControlCacheSize = "cache.accesscontrol.size"
|
||||
cfgMorphPolicyCacheLifetime = "cache.morph_policy.lifetime"
|
||||
cfgMorphPolicyCacheSize = "cache.morph_policy.size"
|
||||
|
||||
cfgAccessBoxCacheRemovingCheckInterval = "cache.accessbox.removing_check_interval"
|
||||
|
||||
// NATS.
|
||||
cfgEnableNATS = "nats.enabled"
|
||||
|
@ -165,19 +130,9 @@ const ( // Settings.
|
|||
cfgApplicationBuildTime = "app.build_time"
|
||||
|
||||
// Kludge.
|
||||
cfgKludgeUseDefaultXMLNS = "kludge.use_default_xmlns"
|
||||
cfgKludgeBypassContentEncodingCheckInChunks = "kludge.bypass_content_encoding_check_in_chunks"
|
||||
cfgKludgeDefaultNamespaces = "kludge.default_namespaces"
|
||||
cfgKludgeACLEnabled = "kludge.acl_enabled"
|
||||
|
||||
// Web.
|
||||
cfgWebReadTimeout = "web.read_timeout"
|
||||
cfgWebReadHeaderTimeout = "web.read_header_timeout"
|
||||
cfgWebWriteTimeout = "web.write_timeout"
|
||||
cfgWebIdleTimeout = "web.idle_timeout"
|
||||
|
||||
// Namespaces.
|
||||
cfgNamespacesConfig = "namespaces.config"
|
||||
cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload = "kludge.use_default_xmlns_for_complete_multipart"
|
||||
cfgKludgeCompleteMultipartUploadKeepalive = "kludge.complete_multipart_keepalive"
|
||||
cfgKludgeBypassContentEncodingCheckInChunks = "kludge.bypass_content_encoding_check_in_chunks"
|
||||
|
||||
// Command line args.
|
||||
cmdHelp = "help"
|
||||
|
@ -194,8 +149,6 @@ const ( // Settings.
|
|||
cfgSetCopiesNumber = "frostfs.set_copies_number"
|
||||
// Enabling client side object preparing for PUT operations.
|
||||
cfgClientCut = "frostfs.client_cut"
|
||||
// Sets max buffer size for read payload in put operations.
|
||||
cfgBufferMaxSizeForPut = "frostfs.buffer_max_size_for_put"
|
||||
// Sets max attempt to make successful tree request.
|
||||
cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
|
||||
|
||||
|
@ -203,29 +156,14 @@ const ( // Settings.
|
|||
cfgAllowedAccessKeyIDPrefixes = "allowed_access_key_id_prefixes"
|
||||
|
||||
// Bucket resolving options.
|
||||
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
|
||||
cfgResolveBucketAllow = "resolve_bucket.allow"
|
||||
cfgResolveBucketDeny = "resolve_bucket.deny"
|
||||
cfgResolveBucketAllow = "resolve_bucket.allow"
|
||||
cfgResolveBucketDeny = "resolve_bucket.deny"
|
||||
|
||||
// Runtime.
|
||||
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
|
||||
|
||||
// Enable return MD5 checksum in ETag.
|
||||
cfgMD5Enabled = "features.md5.enabled"
|
||||
cfgPolicyDenyByDefault = "features.policy.deny_by_default"
|
||||
|
||||
// FrostfsID.
|
||||
cfgFrostfsIDContract = "frostfsid.contract"
|
||||
cfgFrostfsIDValidationEnabled = "frostfsid.validation.enabled"
|
||||
|
||||
// Policy.
|
||||
cfgPolicyContract = "policy.contract"
|
||||
|
||||
// Proxy.
|
||||
cfgProxyContract = "proxy.contract"
|
||||
|
||||
// Server.
|
||||
cfgReconnectInterval = "reconnect_interval"
|
||||
cfgMD5Enabled = "features.md5.enabled"
|
||||
|
||||
// envPrefix is an environment variables prefix used for configuration.
|
||||
envPrefix = "S3_GW"
|
||||
|
@ -249,15 +187,6 @@ func fetchConnectTimeout(cfg *viper.Viper) time.Duration {
|
|||
return connTimeout
|
||||
}
|
||||
|
||||
func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
|
||||
reconnect := cfg.GetDuration(cfgReconnectInterval)
|
||||
if reconnect <= 0 {
|
||||
reconnect = defaultReconnectInterval
|
||||
}
|
||||
|
||||
return reconnect
|
||||
}
|
||||
|
||||
func fetchStreamTimeout(cfg *viper.Viper) time.Duration {
|
||||
streamTimeout := cfg.GetDuration(cfgStreamTimeout)
|
||||
if streamTimeout <= 0 {
|
||||
|
@ -386,24 +315,6 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue
|
|||
return defaultValue
|
||||
}
|
||||
|
||||
func fetchRemovingCheckInterval(v *viper.Viper, l *zap.Logger) time.Duration {
|
||||
if !v.IsSet(cfgAccessBoxCacheRemovingCheckInterval) {
|
||||
return defaultAccessBoxCacheRemovingCheckInterval
|
||||
}
|
||||
|
||||
duration := v.GetDuration(cfgAccessBoxCacheRemovingCheckInterval)
|
||||
if duration >= 0 {
|
||||
return duration
|
||||
}
|
||||
|
||||
l.Error(logs.InvalidAccessBoxCacheRemovingCheckInterval,
|
||||
zap.String("parameter", cfgAccessBoxCacheRemovingCheckInterval),
|
||||
zap.Duration("value in config", duration),
|
||||
zap.Duration("default", defaultAccessBoxCacheRemovingCheckInterval))
|
||||
|
||||
return defaultAccessBoxCacheRemovingCheckInterval
|
||||
}
|
||||
|
||||
func fetchDefaultMaxAge(cfg *viper.Viper, l *zap.Logger) int {
|
||||
defaultMaxAge := handler.DefaultMaxAge
|
||||
|
||||
|
@ -515,86 +426,6 @@ func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper) map[string][]uint32 {
|
|||
return copiesNums
|
||||
}
|
||||
|
||||
func fetchDefaultNamespaces(l *zap.Logger, v *viper.Viper) []string {
|
||||
defaultNamespaces := v.GetStringSlice(cfgKludgeDefaultNamespaces)
|
||||
if len(defaultNamespaces) == 0 {
|
||||
defaultNamespaces = defaultDefaultNamespaces
|
||||
l.Warn(logs.DefaultNamespacesCannotBeEmpty, zap.Strings("namespaces", defaultNamespaces))
|
||||
}
|
||||
|
||||
for i := range defaultNamespaces { // to be set namespaces in env variable as `S3_GW_KLUDGE_DEFAULT_NAMESPACES="" 'root'`
|
||||
defaultNamespaces[i] = strings.Trim(defaultNamespaces[i], "\"'")
|
||||
}
|
||||
|
||||
return defaultNamespaces
|
||||
}
|
||||
|
||||
func fetchNamespacesConfig(l *zap.Logger, v *viper.Viper) (NamespacesConfig, []string) {
|
||||
defaultNSRegionMap := fetchRegionMappingPolicies(l, v)
|
||||
defaultNSRegionMap[defaultConstraintName] = fetchDefaultPolicy(l, v)
|
||||
|
||||
defaultNSCopiesNumbers := fetchCopiesNumbers(l, v)
|
||||
defaultNSCopiesNumbers[defaultConstraintName] = fetchDefaultCopiesNumbers(l, v)
|
||||
|
||||
defaultNSValue := Namespace{
|
||||
LocationConstraints: defaultNSRegionMap,
|
||||
CopiesNumbers: defaultNSCopiesNumbers,
|
||||
}
|
||||
|
||||
nsConfig, err := readNamespacesConfig(v.GetString(cfgNamespacesConfig))
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToParseNamespacesConfig, zap.Error(err))
|
||||
}
|
||||
|
||||
defaultNamespacesNames := fetchDefaultNamespaces(l, v)
|
||||
|
||||
var overrideDefaults []Namespace
|
||||
for _, name := range defaultNamespacesNames {
|
||||
if ns, ok := nsConfig.Namespaces[name]; ok {
|
||||
overrideDefaults = append(overrideDefaults, ns)
|
||||
delete(nsConfig.Namespaces, name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(overrideDefaults) > 0 {
|
||||
l.Warn(logs.DefaultNamespaceConfigValuesBeOverwritten)
|
||||
defaultNSValue.LocationConstraints = overrideDefaults[0].LocationConstraints
|
||||
defaultNSValue.CopiesNumbers = overrideDefaults[0].CopiesNumbers
|
||||
if len(overrideDefaults) > 1 {
|
||||
l.Warn(logs.MultipleDefaultOverridesFound, zap.String("name", overrideDefaults[0].Name))
|
||||
}
|
||||
}
|
||||
|
||||
nsConfig.Namespaces[defaultNamespace] = Namespace{
|
||||
Name: defaultNamespace,
|
||||
LocationConstraints: defaultNSValue.LocationConstraints,
|
||||
CopiesNumbers: defaultNSValue.CopiesNumbers,
|
||||
}
|
||||
|
||||
return nsConfig, defaultNamespacesNames
|
||||
}
|
||||
|
||||
func readNamespacesConfig(filepath string) (NamespacesConfig, error) {
|
||||
nsConfig := NamespacesConfig{
|
||||
Namespaces: make(Namespaces),
|
||||
}
|
||||
|
||||
if filepath == "" {
|
||||
return nsConfig, nil
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return nsConfig, fmt.Errorf("failed to read namespace config '%s': %w", filepath, err)
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(data, &nsConfig); err != nil {
|
||||
return nsConfig, fmt.Errorf("failed to parse namespace config: %w", err)
|
||||
}
|
||||
|
||||
return nsConfig, nil
|
||||
}
|
||||
|
||||
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
||||
var nodes []pool.NodeParam
|
||||
for i := 0; ; i++ {
|
||||
|
@ -625,9 +456,8 @@ func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
|||
return nodes
|
||||
}
|
||||
|
||||
func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
||||
func fetchServers(v *viper.Viper) []ServerInfo {
|
||||
var servers []ServerInfo
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
for i := 0; ; i++ {
|
||||
key := cfgServer + "." + strconv.Itoa(i) + "."
|
||||
|
@ -642,34 +472,12 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
|||
break
|
||||
}
|
||||
|
||||
if _, ok := seen[serverInfo.Address]; ok {
|
||||
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address))
|
||||
continue
|
||||
}
|
||||
seen[serverInfo.Address] = struct{}{}
|
||||
servers = append(servers, serverInfo)
|
||||
}
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
func fetchAuthorizedKeys(l *zap.Logger, v *viper.Viper) keys.PublicKeys {
|
||||
strKeys := v.GetStringSlice(cfgControlAuthorizedKeys)
|
||||
pubs := make(keys.PublicKeys, 0, len(strKeys))
|
||||
|
||||
for i := range strKeys {
|
||||
pub, err := keys.NewPublicKeyFromString(strKeys[i])
|
||||
if err != nil {
|
||||
l.Warn(logs.FailedToParsePublicKey, zap.String("key", strKeys[i]))
|
||||
continue
|
||||
}
|
||||
|
||||
pubs = append(pubs, pub)
|
||||
}
|
||||
|
||||
return pubs
|
||||
}
|
||||
|
||||
func newSettings() *viper.Viper {
|
||||
v := viper.New()
|
||||
|
||||
|
@ -715,11 +523,8 @@ func newSettings() *viper.Viper {
|
|||
|
||||
// set defaults:
|
||||
|
||||
v.SetDefault(cfgAccessBoxCacheRemovingCheckInterval, defaultAccessBoxCacheRemovingCheckInterval)
|
||||
|
||||
// logger:
|
||||
v.SetDefault(cfgLoggerLevel, "debug")
|
||||
v.SetDefault(cfgLoggerDestination, "stdout")
|
||||
|
||||
// pool:
|
||||
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
||||
|
@ -728,32 +533,10 @@ func newSettings() *viper.Viper {
|
|||
v.SetDefault(cfgPProfAddress, "localhost:8085")
|
||||
v.SetDefault(cfgPrometheusAddress, "localhost:8086")
|
||||
|
||||
v.SetDefault(cfgControlGRPCEndpoint, "localhost:8083")
|
||||
|
||||
// frostfs
|
||||
v.SetDefault(cfgBufferMaxSizeForPut, 1024*1024) // 1mb
|
||||
|
||||
// kludge
|
||||
v.SetDefault(cfgKludgeUseDefaultXMLNS, false)
|
||||
v.SetDefault(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload, false)
|
||||
v.SetDefault(cfgKludgeCompleteMultipartUploadKeepalive, 10*time.Second)
|
||||
v.SetDefault(cfgKludgeBypassContentEncodingCheckInChunks, false)
|
||||
v.SetDefault(cfgKludgeDefaultNamespaces, defaultDefaultNamespaces)
|
||||
v.SetDefault(cfgKludgeACLEnabled, false)
|
||||
|
||||
// web
|
||||
v.SetDefault(cfgWebReadHeaderTimeout, defaultReadHeaderTimeout)
|
||||
v.SetDefault(cfgWebIdleTimeout, defaultIdleTimeout)
|
||||
|
||||
// frostfsid
|
||||
v.SetDefault(cfgFrostfsIDContract, "frostfsid.frostfs")
|
||||
|
||||
// policy
|
||||
v.SetDefault(cfgPolicyContract, "policy.frostfs")
|
||||
|
||||
// proxy
|
||||
v.SetDefault(cfgProxyContract, "proxy.frostfs")
|
||||
|
||||
// resolve
|
||||
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
|
||||
|
||||
// Bind flags
|
||||
if err := bindFlags(v, flags); err != nil {
|
||||
|
@ -940,25 +723,7 @@ func mergeConfig(v *viper.Viper, fileName string) error {
|
|||
return v.MergeConfig(cfgFile)
|
||||
}
|
||||
|
||||
func pickLogger(v *viper.Viper) *Logger {
|
||||
lvl, err := getLogLevel(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
dest := v.GetString(cfgLoggerDestination)
|
||||
|
||||
switch dest {
|
||||
case destinationStdout:
|
||||
return newStdoutLogger(lvl)
|
||||
case destinationJournald:
|
||||
return newJournaldLogger(lvl)
|
||||
default:
|
||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||
}
|
||||
}
|
||||
|
||||
// newStdoutLogger constructs a Logger instance for the current application.
|
||||
// newLogger constructs a Logger instance for the current application.
|
||||
// Panics on failure.
|
||||
//
|
||||
// Logger contains a logger is built from zap's production logging configuration with:
|
||||
|
@ -971,7 +736,12 @@ func pickLogger(v *viper.Viper) *Logger {
|
|||
// Logger records a stack trace for all messages at or above fatal level.
|
||||
//
|
||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||
func newStdoutLogger(lvl zapcore.Level) *Logger {
|
||||
func newLogger(v *viper.Viper) *Logger {
|
||||
lvl, err := getLogLevel(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
c := zap.NewProductionConfig()
|
||||
c.Level = zap.NewAtomicLevelAt(lvl)
|
||||
c.Encoding = "console"
|
||||
|
@ -990,28 +760,6 @@ func newStdoutLogger(lvl zapcore.Level) *Logger {
|
|||
}
|
||||
}
|
||||
|
||||
func newJournaldLogger(lvl zapcore.Level) *Logger {
|
||||
c := zap.NewProductionConfig()
|
||||
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
c.Level = zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
|
||||
|
||||
core := zapjournald.NewCore(c.Level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
coreWithContext := core.With([]zapcore.Field{
|
||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||
zapjournald.SyslogIdentifier(),
|
||||
zapjournald.SyslogPid(),
|
||||
})
|
||||
|
||||
l := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
||||
|
||||
return &Logger{
|
||||
logger: l,
|
||||
lvl: c.Level,
|
||||
}
|
||||
}
|
||||
|
||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||
var lvl zapcore.Level
|
||||
lvlStr := v.GetString(cfgLoggerLevel)
|
||||
|
|
|
@ -1,146 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/handler"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDefaultNamespace(t *testing.T) {
|
||||
xmlBodyWithNamespace := `
|
||||
<CompleteMultipartUpload xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Part>
|
||||
<ETag>string</ETag>
|
||||
<PartNumber>1</PartNumber>
|
||||
</Part>
|
||||
</CompleteMultipartUpload>
|
||||
`
|
||||
xmlBodyWithInvalidNamespace := `
|
||||
<CompleteMultipartUpload xmlns="http://bla.bla.bla/">
|
||||
<Part>
|
||||
<ETag>string</ETag>
|
||||
<PartNumber>1</PartNumber>
|
||||
</Part>
|
||||
</CompleteMultipartUpload>
|
||||
`
|
||||
xmlBody := `
|
||||
<CompleteMultipartUpload>
|
||||
<Part>
|
||||
<ETag>string</ETag>
|
||||
<PartNumber>1</PartNumber>
|
||||
</Part>
|
||||
</CompleteMultipartUpload>
|
||||
`
|
||||
|
||||
for _, tc := range []struct {
|
||||
settings *appSettings
|
||||
input string
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
settings: &appSettings{
|
||||
defaultXMLNS: false,
|
||||
},
|
||||
input: xmlBodyWithNamespace,
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
settings: &appSettings{
|
||||
defaultXMLNS: false,
|
||||
},
|
||||
input: xmlBody,
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
settings: &appSettings{
|
||||
defaultXMLNS: false,
|
||||
},
|
||||
input: xmlBodyWithInvalidNamespace,
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
settings: &appSettings{
|
||||
defaultXMLNS: true,
|
||||
},
|
||||
input: xmlBodyWithNamespace,
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
settings: &appSettings{
|
||||
defaultXMLNS: true,
|
||||
},
|
||||
input: xmlBody,
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
settings: &appSettings{
|
||||
defaultXMLNS: true,
|
||||
},
|
||||
input: xmlBodyWithInvalidNamespace,
|
||||
err: true,
|
||||
},
|
||||
} {
|
||||
t.Run("", func(t *testing.T) {
|
||||
model := new(handler.CompleteMultipartUpload)
|
||||
err := tc.settings.NewXMLDecoder(bytes.NewBufferString(tc.input)).Decode(model)
|
||||
if tc.err {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNamespacesMarshaling(t *testing.T) {
|
||||
dataJSON := `
|
||||
{
|
||||
"namespaces": {
|
||||
"kapusta": {
|
||||
"location_constraints": {
|
||||
"default": "REP 3",
|
||||
"load-1-1": "REP 1 CBF 1 SELECT 1 FROM *"
|
||||
},
|
||||
"copies_numbers": {
|
||||
"default": [
|
||||
0
|
||||
],
|
||||
"load-1-1": [
|
||||
1
|
||||
]
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"location_constraints": {
|
||||
"default": "REP 3",
|
||||
"test": "{\"replicas\":[{\"count\":1,\"selector\":\"\"}],\"containerBackupFactor\":1,\"selectors\":[{\"name\":\"\",\"count\":1,\"clause\":\"CLAUSE_UNSPECIFIED\",\"attribute\":\"\",\"filter\":\"Color\"}],\"filters\":[{\"name\":\"Color\",\"key\":\"Color\",\"op\":\"EQ\",\"value\":\"Red\",\"filters\":[]}],\"unique\":false}"
|
||||
},
|
||||
"copies_numbers": {
|
||||
"default": [
|
||||
0
|
||||
],
|
||||
"load-1-1": [
|
||||
1
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var nsConfig NamespacesConfig
|
||||
err := json.Unmarshal([]byte(dataJSON), &nsConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := json.Marshal(nsConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
var nsConfig2 NamespacesConfig
|
||||
err = json.Unmarshal(data, &nsConfig2)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, nsConfig, nsConfig2)
|
||||
}
|
|
@ -10,7 +10,7 @@ func main() {
|
|||
g, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
v := newSettings()
|
||||
l := pickLogger(v)
|
||||
l := newLogger(v)
|
||||
|
||||
a := newApp(g, l, v)
|
||||
|
||||
|
|
|
@ -1,79 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
type NamespacesConfig struct {
|
||||
Namespaces Namespaces `json:"namespaces"`
|
||||
}
|
||||
|
||||
type Namespaces map[string]Namespace
|
||||
|
||||
type Namespace struct {
|
||||
Name string `json:"-"`
|
||||
LocationConstraints LocationConstraints `json:"location_constraints"`
|
||||
CopiesNumbers map[string][]uint32 `json:"copies_numbers"`
|
||||
}
|
||||
|
||||
type LocationConstraints map[string]netmap.PlacementPolicy
|
||||
|
||||
func (c *Namespaces) UnmarshalJSON(data []byte) error {
|
||||
namespaces := make(map[string]Namespace)
|
||||
if err := json.Unmarshal(data, &namespaces); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, namespace := range namespaces {
|
||||
namespace.Name = name
|
||||
namespaces[name] = namespace
|
||||
}
|
||||
|
||||
*c = namespaces
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *LocationConstraints) UnmarshalJSON(data []byte) error {
|
||||
m := make(map[string]string)
|
||||
if err := json.Unmarshal(data, &m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*c = make(LocationConstraints, len(m))
|
||||
for region, policy := range m {
|
||||
var pp netmap.PlacementPolicy
|
||||
if err := pp.DecodeString(policy); err == nil {
|
||||
(*c)[region] = pp
|
||||
continue
|
||||
}
|
||||
|
||||
if err := pp.UnmarshalJSON([]byte(policy)); err == nil {
|
||||
(*c)[region] = pp
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to parse location contraint '%s': '%s'", region, policy)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c LocationConstraints) MarshalJSON() ([]byte, error) {
|
||||
m := make(map[string]string, len(c))
|
||||
|
||||
for region, policy := range c {
|
||||
var sb strings.Builder
|
||||
if err := policy.WriteStringTo(&sb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m[region] = sb.String()
|
||||
}
|
||||
|
||||
return json.Marshal(m)
|
||||
}
|
|
@ -68,13 +68,11 @@ func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) {
|
|||
|
||||
if serverInfo.TLS.Enabled {
|
||||
if err = tlsProvider.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
||||
lnErr := ln.Close()
|
||||
return nil, fmt.Errorf("failed to update cert (listener close: %v): %w", lnErr, err)
|
||||
return nil, fmt.Errorf("failed to update cert: %w", err)
|
||||
}
|
||||
|
||||
ln = tls.NewListener(ln, &tls.Config{
|
||||
GetCertificate: tlsProvider.GetCertificate,
|
||||
NextProtos: []string{"h2"}, // required to enable HTTP/2 requests in `http.Serve`
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -1,119 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
const (
|
||||
expHeaderKey = "Foo"
|
||||
expHeaderValue = "Bar"
|
||||
)
|
||||
|
||||
func TestHTTP2TLS(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
certPath, keyPath := prepareTestCerts(t)
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: http.HandlerFunc(testHandler),
|
||||
}
|
||||
|
||||
tlsListener, err := newServer(ctx, ServerInfo{
|
||||
Address: ":0",
|
||||
TLS: ServerTLSInfo{
|
||||
Enabled: true,
|
||||
CertFile: certPath,
|
||||
KeyFile: keyPath,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
port := tlsListener.Listener().Addr().(*net.TCPAddr).Port
|
||||
addr := fmt.Sprintf("https://localhost:%d", port)
|
||||
|
||||
go func() {
|
||||
_ = srv.Serve(tlsListener.Listener())
|
||||
}()
|
||||
|
||||
// Server is running, now send HTTP/2 request
|
||||
|
||||
tlsClientConfig := &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
cliHTTP1 := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
|
||||
cliHTTP2 := http.Client{Transport: &http2.Transport{TLSClientConfig: tlsClientConfig}}
|
||||
|
||||
req, err := http.NewRequest("GET", addr, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header[expHeaderKey] = []string{expHeaderValue}
|
||||
|
||||
resp, err := cliHTTP1.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
|
||||
resp, err = cliHTTP2.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
}
|
||||
|
||||
func testHandler(resp http.ResponseWriter, req *http.Request) {
|
||||
hdr, ok := req.Header[expHeaderKey]
|
||||
if !ok || len(hdr) != 1 || hdr[0] != expHeaderValue {
|
||||
resp.WriteHeader(http.StatusBadRequest)
|
||||
} else {
|
||||
resp.WriteHeader(http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func prepareTestCerts(t *testing.T) (certPath, keyPath string) {
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
require.NoError(t, err)
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{CommonName: "localhost"},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(time.Hour * 24 * 365),
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
dir := t.TempDir()
|
||||
certPath = path.Join(dir, "cert.pem")
|
||||
keyPath = path.Join(dir, "key.pem")
|
||||
|
||||
certFile, err := os.Create(certPath)
|
||||
require.NoError(t, err)
|
||||
defer certFile.Close()
|
||||
|
||||
keyFile, err := os.Create(keyPath)
|
||||
require.NoError(t, err)
|
||||
defer keyFile.Close()
|
||||
|
||||
err = pem.Encode(certFile, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = pem.Encode(keyFile, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
|
||||
require.NoError(t, err)
|
||||
|
||||
return certPath, keyPath
|
||||
}
|
|
@ -34,9 +34,6 @@ func (ms *Service) ShutDown(ctx context.Context) {
|
|||
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
|
||||
err := ms.Shutdown(ctx)
|
||||
if err != nil {
|
||||
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err))
|
||||
if err = ms.Close(); err != nil {
|
||||
ms.log.Panic(logs.CantShutDownService, zap.Error(err))
|
||||
}
|
||||
ms.log.Panic(logs.CantShutDownService)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,15 +33,6 @@ S3_GW_SERVER_1_TLS_ENABLED=true
|
|||
S3_GW_SERVER_1_TLS_CERT_FILE=/path/to/tls/cert
|
||||
S3_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key
|
||||
|
||||
# How often to reconnect to the servers
|
||||
S3_GW_RECONNECT_INTERVAL: 1m
|
||||
|
||||
# Control API
|
||||
# List of hex-encoded public keys that have rights to use the Control Service
|
||||
S3_GW_CONTROL_AUTHORIZED_KEYS=035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
|
||||
# Endpoint that is listened by the Control Service
|
||||
S3_GW_CONTROL_GRPC_ENDPOINT=localhost:8083
|
||||
|
||||
# Domains to be able to use virtual-hosted-style access to bucket.
|
||||
S3_GW_LISTEN_DOMAINS=s3dev.frostfs.devenv
|
||||
|
||||
|
@ -85,9 +76,6 @@ S3_GW_CACHE_OBJECTS_SIZE=1000000
|
|||
# Cache which keeps lists of objects in buckets
|
||||
S3_GW_CACHE_LIST_LIFETIME=1m
|
||||
S3_GW_CACHE_LIST_SIZE=100000
|
||||
# Cache which keeps listing session
|
||||
S3_GW_CACHE_LIST_SESSION_LIFETIME=1m
|
||||
S3_GW_CACHE_LIST_SESSION_SIZE=100
|
||||
# Cache which contains mapping of bucket name to bucket info
|
||||
S3_GW_CACHE_BUCKETS_LIFETIME=1m
|
||||
S3_GW_CACHE_BUCKETS_SIZE=1000
|
||||
|
@ -98,15 +86,11 @@ S3_GW_CACHE_NAMES_SIZE=10000
|
|||
S3_GW_CACHE_SYSTEM_LIFETIME=5m
|
||||
S3_GW_CACHE_SYSTEM_SIZE=100000
|
||||
# Cache which stores access box with tokens by its address
|
||||
S3_GW_CACHE_ACCESSBOX_REMOVING_CHECK_INTERVAL=5m
|
||||
S3_GW_CACHE_ACCESSBOX_LIFETIME=10m
|
||||
S3_GW_CACHE_ACCESSBOX_SIZE=100
|
||||
# Cache which stores owner to cache operation mapping
|
||||
S3_GW_CACHE_ACCESSCONTROL_LIFETIME=1m
|
||||
S3_GW_CACHE_ACCESSCONTROL_SIZE=100000
|
||||
# Cache which stores list of policy chains
|
||||
S3_GW_CACHE_MORPH_POLICY_LIFETIME=1m
|
||||
S3_GW_CACHE_MORPH_POLICY_SIZE=10000
|
||||
|
||||
# NATS
|
||||
S3_GW_NATS_ENABLED=true
|
||||
|
@ -143,8 +127,6 @@ S3_GW_CORS_DEFAULT_MAX_AGE=600
|
|||
S3_GW_FROSTFS_SET_COPIES_NUMBER=0
|
||||
# This flag enables client side object preparing.
|
||||
S3_GW_FROSTFS_CLIENT_CUT=false
|
||||
# Sets max buffer size for read payload in put operations.
|
||||
S3_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576
|
||||
# max attempt to make successful tree request.
|
||||
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
||||
S3_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
|
||||
|
@ -153,20 +135,16 @@ S3_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
|
|||
# If not set, S3 GW will accept all AccessKeyIDs
|
||||
S3_GW_ALLOWED_ACCESS_KEY_ID_PREFIXES=Ck9BHsgKcnwfCTUSFm6pxhoNS4cBqgN2NQ8zVgPjqZDX 3stjWenX15YwYzczMr88gy3CQr4NYFBQ8P7keGzH5QFn
|
||||
|
||||
# Header to determine zone to resolve bucket name
|
||||
S3_GW_RESOLVE_NAMESPACE_HEADER=X-Frostfs-Namespace
|
||||
# List of container NNS zones which are allowed or restricted to resolve with HEAD request
|
||||
S3_GW_RESOLVE_BUCKET_ALLOW=container
|
||||
# S3_GW_RESOLVE_BUCKET_DENY=
|
||||
|
||||
# Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies.
|
||||
S3_GW_KLUDGE_USE_DEFAULT_XMLNS=false
|
||||
# Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse`CompleteMultipartUpload` xml body.
|
||||
S3_GW_KLUDGE_USE_DEFAULT_XMLNS_FOR_COMPLETE_MULTIPART=false
|
||||
# Set timeout between whitespace transmissions during CompleteMultipartUpload processing.
|
||||
S3_GW_KLUDGE_COMPLETE_MULTIPART_KEEPALIVE=10s
|
||||
# Use this flag to be able to use chunked upload approach without having `aws-chunked` value in `Content-Encoding` header.
|
||||
S3_GW_KLUDGE_BYPASS_CONTENT_ENCODING_CHECK_IN_CHUNKS=false
|
||||
# Namespaces that should be handled as default
|
||||
S3_GW_KLUDGE_DEFAULT_NAMESPACES="" "root"
|
||||
# Enable bucket/object ACL support for newly created buckets.
|
||||
S3_GW_KLUDGE_ACL_ENABLED=false
|
||||
S3_GW_BYPASS_CONTENT_ENCODING_CHECK_IN_CHUNKS=false
|
||||
|
||||
S3_GW_TRACING_ENABLED=false
|
||||
S3_GW_TRACING_ENDPOINT="localhost:4318"
|
||||
|
@ -175,45 +153,3 @@ S3_GW_TRACING_EXPORTER="otlp_grpc"
|
|||
S3_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
|
||||
|
||||
S3_GW_FEATURES_MD5_ENABLED=false
|
||||
# Enable denying access for request that doesn't match any policy chain rules.
|
||||
S3_GW_FEATURES_POLICY_DENY_BY_DEFAULT=false
|
||||
|
||||
# ReadTimeout is the maximum duration for reading the entire
|
||||
# request, including the body. A zero or negative value means
|
||||
# there will be no timeout.
|
||||
S3_GW_WEB_READ_TIMEOUT=0
|
||||
# ReadHeaderTimeout is the amount of time allowed to read
|
||||
# request headers. The connection's read deadline is reset
|
||||
# after reading the headers and the Handler can decide what
|
||||
# is considered too slow for the body. If ReadHeaderTimeout
|
||||
# is zero, the value of ReadTimeout is used. If both are
|
||||
# zero, there is no timeout.
|
||||
S3_GW_WEB_READ_HEADER_TIMEOUT=30s
|
||||
# WriteTimeout is the maximum duration before timing out
|
||||
# writes of the response. It is reset whenever a new
|
||||
# request's header is read. Like ReadTimeout, it does not
|
||||
# let Handlers make decisions on a per-request basis.
|
||||
# A zero or negative value means there will be no timeout.
|
||||
S3_GW_WEB_WRITE_TIMEOUT=0
|
||||
# IdleTimeout is the maximum amount of time to wait for the
|
||||
# next request when keep-alives are enabled. If IdleTimeout
|
||||
# is zero, the value of ReadTimeout is used. If both are
|
||||
# zero, there is no timeout.
|
||||
S3_GW_WEB_IDLE_TIMEOUT=30s
|
||||
|
||||
# FrostfsID contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
# FrostfsID contract hash (LE) or name in NNS.
|
||||
S3_GW_FROSTFSID_CONTRACT=frostfsid.frostfs
|
||||
# Enables a check to only allow requests to users registered in the FrostfsID contract.
|
||||
S3_GW_FROSTFSID_VALIDATION_ENABLED=true
|
||||
|
||||
# Policy contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
# Policy contract hash (LE) or name in NNS.
|
||||
S3_GW_POLICY_CONTRACT=policy.frostfs
|
||||
|
||||
# Proxy contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
# Proxy contract hash (LE) or name in NNS.
|
||||
S3_GW_PROXY_CONTRACT=proxy.frostfs
|
||||
|
||||
# Namespaces configuration
|
||||
S3_GW_NAMESPACES_CONFIG=namespaces.json
|
||||
|
|
|
@ -25,8 +25,6 @@ peers:
|
|||
priority: 2
|
||||
weight: 0.9
|
||||
|
||||
reconnect_interval: 1m
|
||||
|
||||
server:
|
||||
- address: 0.0.0.0:8080
|
||||
tls:
|
||||
|
@ -39,22 +37,12 @@ server:
|
|||
cert_file: /path/to/cert
|
||||
key_file: /path/to/key
|
||||
|
||||
control:
|
||||
# List of hex-encoded public keys that have rights to use the Control Service
|
||||
authorized_keys:
|
||||
- 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11
|
||||
- 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6
|
||||
grpc:
|
||||
# Endpoint that is listened by the Control Service
|
||||
endpoint: localhost:8083
|
||||
|
||||
# Domains to be able to use virtual-hosted-style access to bucket.
|
||||
listen_domains:
|
||||
- s3dev.frostfs.devenv
|
||||
|
||||
logger:
|
||||
level: debug
|
||||
destination: stdout
|
||||
|
||||
# RPC endpoint and order of resolving of bucket names
|
||||
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||
|
@ -102,10 +90,6 @@ cache:
|
|||
list:
|
||||
lifetime: 1m
|
||||
size: 100
|
||||
# Cache which keeps listing sessions
|
||||
list_session:
|
||||
lifetime: 1m
|
||||
size: 100
|
||||
# Cache which contains mapping of nice name to object addresses
|
||||
names:
|
||||
lifetime: 1m
|
||||
|
@ -120,17 +104,12 @@ cache:
|
|||
size: 1000
|
||||
# Cache which stores access box with tokens by its address
|
||||
accessbox:
|
||||
removing_check_interval: 5m
|
||||
lifetime: 10m
|
||||
size: 100
|
||||
lifetime: 5m
|
||||
size: 10
|
||||
# Cache which stores owner to cache operation mapping
|
||||
accesscontrol:
|
||||
lifetime: 1m
|
||||
size: 100000
|
||||
# Cache which stores list of policy chains
|
||||
morph_policy:
|
||||
lifetime: 1m
|
||||
size: 10000
|
||||
|
||||
nats:
|
||||
enabled: true
|
||||
|
@ -170,11 +149,12 @@ cors:
|
|||
frostfs:
|
||||
# Numbers of the object copies (for each replica) to consider PUT to FrostFS successful.
|
||||
# `[0]` or empty list means that object will be processed according to the container's placement policy
|
||||
set_copies_number: [ 0 ]
|
||||
set_copies_number: [0]
|
||||
# This flag enables client side object preparing.
|
||||
client_cut: false
|
||||
# Sets max buffer size for read payload in put operations.
|
||||
buffer_max_size_for_put: 1048576
|
||||
# max attempt to make successful tree request.
|
||||
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
||||
tree_pool_max_attempts: 0
|
||||
|
||||
# List of allowed AccessKeyID prefixes
|
||||
# If the parameter is omitted, S3 GW will accept all AccessKeyIDs
|
||||
|
@ -183,75 +163,21 @@ allowed_access_key_id_prefixes:
|
|||
- 3stjWenX15YwYzczMr88gy3CQr4NYFBQ8P7keGzH5QFn
|
||||
|
||||
resolve_bucket:
|
||||
namespace_header: X-Frostfs-Namespace
|
||||
allow:
|
||||
- container
|
||||
deny:
|
||||
|
||||
kludge:
|
||||
# Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies.
|
||||
use_default_xmlns: false
|
||||
# Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse`CompleteMultipartUpload` xml body.
|
||||
use_default_xmlns_for_complete_multipart: false
|
||||
# Set timeout between whitespace transmissions during CompleteMultipartUpload processing.
|
||||
complete_multipart_keepalive: 10s
|
||||
# Use this flag to be able to use chunked upload approach without having `aws-chunked` value in `Content-Encoding` header.
|
||||
bypass_content_encoding_check_in_chunks: false
|
||||
# Namespaces that should be handled as default
|
||||
default_namespaces: [ "", "root" ]
|
||||
# Enable bucket/object ACL support for newly created buckets.
|
||||
acl_enabled: false
|
||||
|
||||
runtime:
|
||||
soft_memory_limit: 1gb
|
||||
|
||||
features:
|
||||
policy:
|
||||
# Enable denying access for request that doesn't match any policy chain rules.
|
||||
deny_by_default: false
|
||||
md5:
|
||||
enabled: false
|
||||
|
||||
web:
|
||||
# ReadTimeout is the maximum duration for reading the entire
|
||||
# request, including the body. A zero or negative value means
|
||||
# there will be no timeout.
|
||||
read_timeout: 0
|
||||
|
||||
# ReadHeaderTimeout is the amount of time allowed to read
|
||||
# request headers. The connection's read deadline is reset
|
||||
# after reading the headers and the Handler can decide what
|
||||
# is considered too slow for the body. If ReadHeaderTimeout
|
||||
# is zero, the value of ReadTimeout is used. If both are
|
||||
# zero, there is no timeout.
|
||||
read_header_timeout: 30s
|
||||
|
||||
# WriteTimeout is the maximum duration before timing out
|
||||
# writes of the response. It is reset whenever a new
|
||||
# request's header is read. Like ReadTimeout, it does not
|
||||
# let Handlers make decisions on a per-request basis.
|
||||
# A zero or negative value means there will be no timeout.
|
||||
write_timeout: 0
|
||||
|
||||
# IdleTimeout is the maximum amount of time to wait for the
|
||||
# next request when keep-alives are enabled. If IdleTimeout
|
||||
# is zero, the value of ReadTimeout is used. If both are
|
||||
# zero, there is no timeout.
|
||||
idle_timeout: 30s
|
||||
|
||||
# FrostfsID contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
frostfsid:
|
||||
# FrostfsID contract hash (LE) or name in NNS.
|
||||
contract: frostfsid.frostfs
|
||||
validation:
|
||||
# Enables a check to only allow requests to users registered in the FrostfsID contract.
|
||||
enabled: true
|
||||
|
||||
# Policy contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
policy:
|
||||
# Policy contract hash (LE) or name in NNS.
|
||||
contract: policy.frostfs
|
||||
|
||||
# Proxy contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||
proxy:
|
||||
# Proxy contract hash (LE) or name in NNS.
|
||||
contract: proxy.frostfs
|
||||
|
||||
namespaces:
|
||||
config: namespaces.json
|
||||
|
|
|
@ -33,7 +33,7 @@ type ContainerPolicy struct {
|
|||
|
||||
// GateData represents gate tokens in AccessBox.
|
||||
type GateData struct {
|
||||
SecretKey string
|
||||
AccessKey string
|
||||
BearerToken *bearer.Token
|
||||
SessionTokens []*session.Container
|
||||
GateKey *keys.PublicKey
|
||||
|
@ -59,14 +59,6 @@ func (g *GateData) SessionTokenForSetEACL() *session.Container {
|
|||
return g.containerSessionToken(session.VerbContainerSetEACL)
|
||||
}
|
||||
|
||||
// SessionToken returns the first container session context.
|
||||
func (g *GateData) SessionToken() *session.Container {
|
||||
if len(g.SessionTokens) != 0 {
|
||||
return g.SessionTokens[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *GateData) containerSessionToken(verb session.ContainerVerb) *session.Container {
|
||||
for _, sessionToken := range g.SessionTokens {
|
||||
if isAppropriateContainerContext(sessionToken, verb) {
|
||||
|
@ -85,9 +77,9 @@ func isAppropriateContainerContext(tok *session.Container, verb session.Containe
|
|||
}
|
||||
}
|
||||
|
||||
// Secrets represents SecretKey and the key to encrypt gate tokens.
|
||||
// Secrets represents AccessKey and the key to encrypt gate tokens.
|
||||
type Secrets struct {
|
||||
SecretKey string
|
||||
AccessKey string
|
||||
EphemeralKey *keys.PrivateKey
|
||||
}
|
||||
|
||||
|
@ -110,7 +102,7 @@ func PackTokens(gatesData []*GateData, secret []byte) (*AccessBox, *Secrets, err
|
|||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("create ephemeral key: %w", err)
|
||||
}
|
||||
box.SeedKey = ephemeralKey.PublicKey().Bytes()
|
||||
box.OwnerPublicKey = ephemeralKey.PublicKey().Bytes()
|
||||
|
||||
if secret == nil {
|
||||
secret, err = generateSecret()
|
||||
|
@ -128,9 +120,9 @@ func PackTokens(gatesData []*GateData, secret []byte) (*AccessBox, *Secrets, err
|
|||
|
||||
// GetTokens returns gate tokens from AccessBox.
|
||||
func (x *AccessBox) GetTokens(owner *keys.PrivateKey) (*GateData, error) {
|
||||
seedKey, err := keys.NewPublicKeyFromBytes(x.SeedKey, elliptic.P256())
|
||||
sender, err := keys.NewPublicKeyFromBytes(x.OwnerPublicKey, elliptic.P256())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't unmarshal SeedKey: %w", err)
|
||||
return nil, fmt.Errorf("couldn't unmarshal OwnerPublicKey: %w", err)
|
||||
}
|
||||
ownerKey := owner.PublicKey().Bytes()
|
||||
for _, gate := range x.Gates {
|
||||
|
@ -138,7 +130,7 @@ func (x *AccessBox) GetTokens(owner *keys.PrivateKey) (*GateData, error) {
|
|||
continue
|
||||
}
|
||||
|
||||
gateData, err := decodeGate(gate, owner, seedKey)
|
||||
gateData, err := decodeGate(gate, owner, sender)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode gate: %w", err)
|
||||
}
|
||||
|
@ -192,7 +184,7 @@ func (x *AccessBox) addTokens(gatesData []*GateData, ephemeralKey *keys.PrivateK
|
|||
}
|
||||
|
||||
tokens := new(Tokens)
|
||||
tokens.SecretKey = secret
|
||||
tokens.AccessKey = secret
|
||||
tokens.BearerToken = encBearer
|
||||
tokens.SessionTokens = encSessions
|
||||
|
||||
|
@ -205,25 +197,25 @@ func (x *AccessBox) addTokens(gatesData []*GateData, ephemeralKey *keys.PrivateK
|
|||
return nil
|
||||
}
|
||||
|
||||
func encodeGate(ephemeralKey *keys.PrivateKey, seedKey *keys.PublicKey, tokens *Tokens) (*AccessBox_Gate, error) {
|
||||
func encodeGate(ephemeralKey *keys.PrivateKey, ownerKey *keys.PublicKey, tokens *Tokens) (*AccessBox_Gate, error) {
|
||||
data, err := proto.Marshal(tokens)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encode tokens: %w", err)
|
||||
}
|
||||
|
||||
encrypted, err := encrypt(ephemeralKey, seedKey, data)
|
||||
encrypted, err := encrypt(ephemeralKey, ownerKey, data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ecrypt tokens: %w", err)
|
||||
}
|
||||
|
||||
gate := new(AccessBox_Gate)
|
||||
gate.GatePublicKey = seedKey.Bytes()
|
||||
gate.GatePublicKey = ownerKey.Bytes()
|
||||
gate.Tokens = encrypted
|
||||
return gate, nil
|
||||
}
|
||||
|
||||
func decodeGate(gate *AccessBox_Gate, owner *keys.PrivateKey, seedKey *keys.PublicKey) (*GateData, error) {
|
||||
data, err := decrypt(owner, seedKey, gate.Tokens)
|
||||
func decodeGate(gate *AccessBox_Gate, owner *keys.PrivateKey, sender *keys.PublicKey) (*GateData, error) {
|
||||
data, err := decrypt(owner, sender, gate.Tokens)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decrypt tokens: %w", err)
|
||||
}
|
||||
|
@ -248,7 +240,7 @@ func decodeGate(gate *AccessBox_Gate, owner *keys.PrivateKey, seedKey *keys.Publ
|
|||
|
||||
gateData := NewGateData(owner.PublicKey(), &bearerTkn)
|
||||
gateData.SessionTokens = sessionTkns
|
||||
gateData.SecretKey = hex.EncodeToString(tokens.SecretKey)
|
||||
gateData.AccessKey = hex.EncodeToString(tokens.AccessKey)
|
||||
return gateData, nil
|
||||
}
|
||||
|
||||
|
@ -276,8 +268,8 @@ func deriveKey(secret []byte) ([]byte, error) {
|
|||
return key, err
|
||||
}
|
||||
|
||||
func encrypt(owner *keys.PrivateKey, seedKey *keys.PublicKey, data []byte) ([]byte, error) {
|
||||
enc, err := getCipher(owner, seedKey)
|
||||
func encrypt(owner *keys.PrivateKey, sender *keys.PublicKey, data []byte) ([]byte, error) {
|
||||
enc, err := getCipher(owner, sender)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get chiper: %w", err)
|
||||
}
|
||||
|
@ -290,8 +282,8 @@ func encrypt(owner *keys.PrivateKey, seedKey *keys.PublicKey, data []byte) ([]by
|
|||
return enc.Seal(nonce, nonce, data, nil), nil
|
||||
}
|
||||
|
||||
func decrypt(owner *keys.PrivateKey, seedKey *keys.PublicKey, data []byte) ([]byte, error) {
|
||||
dec, err := getCipher(owner, seedKey)
|
||||
func decrypt(owner *keys.PrivateKey, sender *keys.PublicKey, data []byte) ([]byte, error) {
|
||||
dec, err := getCipher(owner, sender)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get chiper: %w", err)
|
||||
}
|
||||
|
@ -304,8 +296,8 @@ func decrypt(owner *keys.PrivateKey, seedKey *keys.PublicKey, data []byte) ([]by
|
|||
return dec.Open(nil, nonce, cypher, nil)
|
||||
}
|
||||
|
||||
func getCipher(owner *keys.PrivateKey, seedKey *keys.PublicKey) (cipher.AEAD, error) {
|
||||
secret, err := generateShared256(owner, seedKey)
|
||||
func getCipher(owner *keys.PrivateKey, sender *keys.PublicKey) (cipher.AEAD, error) {
|
||||
secret, err := generateShared256(owner, sender)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generate shared key: %w", err)
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v3.12.4
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.12
|
||||
// source: creds/accessbox/accessbox.proto
|
||||
|
||||
package accessbox
|
||||
|
@ -25,7 +25,7 @@ type AccessBox struct {
|
|||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
SeedKey []byte `protobuf:"bytes,1,opt,name=seedKey,proto3" json:"seedKey,omitempty"`
|
||||
OwnerPublicKey []byte `protobuf:"bytes,1,opt,name=ownerPublicKey,proto3" json:"ownerPublicKey,omitempty"`
|
||||
Gates []*AccessBox_Gate `protobuf:"bytes,2,rep,name=gates,proto3" json:"gates,omitempty"`
|
||||
ContainerPolicy []*AccessBox_ContainerPolicy `protobuf:"bytes,3,rep,name=containerPolicy,proto3" json:"containerPolicy,omitempty"`
|
||||
}
|
||||
|
@ -62,9 +62,9 @@ func (*AccessBox) Descriptor() ([]byte, []int) {
|
|||
return file_creds_accessbox_accessbox_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *AccessBox) GetSeedKey() []byte {
|
||||
func (x *AccessBox) GetOwnerPublicKey() []byte {
|
||||
if x != nil {
|
||||
return x.SeedKey
|
||||
return x.OwnerPublicKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ type Tokens struct {
|
|||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
SecretKey []byte `protobuf:"bytes,1,opt,name=secretKey,proto3" json:"secretKey,omitempty"`
|
||||
AccessKey []byte `protobuf:"bytes,1,opt,name=accessKey,proto3" json:"accessKey,omitempty"`
|
||||
BearerToken []byte `protobuf:"bytes,2,opt,name=bearerToken,proto3" json:"bearerToken,omitempty"`
|
||||
SessionTokens [][]byte `protobuf:"bytes,3,rep,name=sessionTokens,proto3" json:"sessionTokens,omitempty"`
|
||||
}
|
||||
|
@ -125,9 +125,9 @@ func (*Tokens) Descriptor() ([]byte, []int) {
|
|||
return file_creds_accessbox_accessbox_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *Tokens) GetSecretKey() []byte {
|
||||
func (x *Tokens) GetAccessKey() []byte {
|
||||
if x != nil {
|
||||
return x.SecretKey
|
||||
return x.AccessKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -261,40 +261,41 @@ var File_creds_accessbox_accessbox_proto protoreflect.FileDescriptor
|
|||
var file_creds_accessbox_accessbox_proto_rawDesc = []byte{
|
||||
0x0a, 0x1f, 0x63, 0x72, 0x65, 0x64, 0x73, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x62, 0x6f,
|
||||
0x78, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x62, 0x6f, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x12, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x62, 0x6f, 0x78, 0x22, 0xc7, 0x02, 0x0a,
|
||||
0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x6f, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65,
|
||||
0x65, 0x64, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x65, 0x65,
|
||||
0x64, 0x4b, 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x67, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20,
|
||||
0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x62, 0x6f, 0x78, 0x2e,
|
||||
0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x6f, 0x78, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x52, 0x05,
|
||||
0x67, 0x61, 0x74, 0x65, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
|
||||
0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24,
|
||||
0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x62, 0x6f, 0x78, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73,
|
||||
0x73, 0x42, 0x6f, 0x78, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x50, 0x6f,
|
||||
0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x50,
|
||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x44, 0x0a, 0x04, 0x47, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a,
|
||||
0x06, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x74,
|
||||
0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x67, 0x61, 0x74, 0x65, 0x50, 0x75, 0x62,
|
||||
0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x67, 0x61,
|
||||
0x74, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x1a, 0x59, 0x0a, 0x0f, 0x43,
|
||||
0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e,
|
||||
0x0a, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72,
|
||||
0x61, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x6f, 0x63, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x12, 0x16,
|
||||
0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06,
|
||||
0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x6e, 0x0a, 0x06, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73,
|
||||
0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x20,
|
||||
0x0a, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
|
||||
0x12, 0x24, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
|
||||
0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
|
||||
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72,
|
||||
0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43,
|
||||
0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d,
|
||||
0x73, 0x33, 0x2d, 0x67, 0x77, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x73, 0x2f, 0x74, 0x6f, 0x6b, 0x65,
|
||||
0x6e, 0x62, 0x6f, 0x78, 0x3b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x62, 0x6f, 0x78, 0x62, 0x06,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x6f, 0x12, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x62, 0x6f, 0x78, 0x22, 0xd5, 0x02, 0x0a,
|
||||
0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x6f, 0x78, 0x12, 0x26, 0x0a, 0x0e, 0x6f, 0x77,
|
||||
0x6e, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x0c, 0x52, 0x0e, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b,
|
||||
0x65, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x67, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x19, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x62, 0x6f, 0x78, 0x2e, 0x41, 0x63,
|
||||
0x63, 0x65, 0x73, 0x73, 0x42, 0x6f, 0x78, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x52, 0x05, 0x67, 0x61,
|
||||
0x74, 0x65, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
|
||||
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x61,
|
||||
0x63, 0x63, 0x65, 0x73, 0x73, 0x62, 0x6f, 0x78, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42,
|
||||
0x6f, 0x78, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69,
|
||||
0x63, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x6c,
|
||||
0x69, 0x63, 0x79, 0x1a, 0x44, 0x0a, 0x04, 0x47, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74,
|
||||
0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x6f, 0x6b,
|
||||
0x65, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x67, 0x61, 0x74, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69,
|
||||
0x63, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x67, 0x61, 0x74, 0x65,
|
||||
0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x1a, 0x59, 0x0a, 0x0f, 0x43, 0x6f, 0x6e,
|
||||
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, 0x0a, 0x12,
|
||||
0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69,
|
||||
0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06,
|
||||
0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x6f,
|
||||
0x6c, 0x69, 0x63, 0x79, 0x22, 0x6e, 0x0a, 0x06, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x1c,
|
||||
0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x0b,
|
||||
0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x0b, 0x62, 0x65, 0x61, 0x72, 0x65, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x24,
|
||||
0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18,
|
||||
0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f,
|
||||
0x6b, 0x65, 0x6e, 0x73, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x2e, 0x66, 0x72, 0x6f, 0x73,
|
||||
0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75, 0x65, 0x43, 0x6c, 0x6f,
|
||||
0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2d, 0x73, 0x33,
|
||||
0x2d, 0x67, 0x77, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x73, 0x2f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x62,
|
||||
0x6f, 0x78, 0x3b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x62, 0x6f, 0x78, 0x62, 0x06, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue