[#306] Add flag to enable old ACL bucket creation
Some checks failed
/ DCO (pull_request) Successful in 1m36s
/ Vulncheck (pull_request) Successful in 1m59s
/ Builds (1.20) (pull_request) Successful in 2m43s
/ Builds (1.21) (pull_request) Successful in 2m41s
/ Lint (pull_request) Failing after 2m51s
/ Tests (1.20) (pull_request) Successful in 2m58s
/ Tests (1.21) (pull_request) Successful in 1m40s

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
This commit is contained in:
Denis Kirillov 2024-02-14 17:04:35 +03:00
parent 5c7fa2496b
commit 4646f0f445
10 changed files with 166 additions and 91 deletions

View file

@ -20,6 +20,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -336,10 +337,15 @@ func (h *handler) bearerTokenIssuerKey(ctx context.Context) (*keys.PublicKey, er
return nil, err return nil, err
} }
var btoken v2acl.BearerToken return getTokenIssuerKey(box)
box.Gate.BearerToken.WriteToV2(&btoken) }
key, err := keys.NewPublicKeyFromBytes(btoken.GetSignature().GetKey(), elliptic.P256()) func getTokenIssuerKey(box *accessbox.Box) (*keys.PublicKey, error) {
if box.Gate.BearerToken == nil {
return nil, stderrors.New("bearer token is missing")
}
key, err := keys.NewPublicKeyFromBytes(box.Gate.BearerToken.SigningKeyBytes(), elliptic.P256())
if err != nil { if err != nil {
return nil, fmt.Errorf("public key from bytes: %w", err) return nil, fmt.Errorf("public key from bytes: %w", err)
} }

View file

@ -16,11 +16,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -1303,17 +1301,17 @@ func TestBucketAclToAst(t *testing.T) {
func TestPutBucketACL(t *testing.T) { func TestPutBucketACL(t *testing.T) {
tc := prepareHandlerContext(t) tc := prepareHandlerContext(t)
tc.config.aclEnabled = true
bktName := "bucket-for-acl" bktName := "bucket-for-acl"
box, _ := createAccessBox(t) info := createBucket(tc, bktName)
bktInfo := createBucketOldACL(tc, bktName, box)
header := map[string]string{api.AmzACL: "public-read"} header := map[string]string{api.AmzACL: "public-read"}
putBucketACL(tc, bktName, box, header) putBucketACL(tc, bktName, info.Box, header)
header = map[string]string{api.AmzACL: "private"} header = map[string]string{api.AmzACL: "private"}
putBucketACL(tc, bktName, box, header) putBucketACL(tc, bktName, info.Box, header)
checkLastRecords(t, tc, bktInfo, eacl.ActionDeny) checkLastRecords(t, tc, info.BktInfo, eacl.ActionDeny)
} }
func TestPutBucketAPE(t *testing.T) { func TestPutBucketAPE(t *testing.T) {
@ -1598,47 +1596,6 @@ func createBucket(hc *handlerContext, bktName string) *createBucketInfo {
} }
} }
func createBucketOldACL(hc *handlerContext, bktName string, box *accessbox.Box) *data.BucketInfo {
w := createBucketBase(hc, bktName, box)
assertStatus(hc.t, w, http.StatusOK)
cnrID, err := hc.tp.ContainerID(bktName)
require.NoError(hc.t, err)
cnr, err := hc.tp.Container(hc.Context(), cnrID)
require.NoError(hc.t, err)
cnr.SetBasicACL(acl.PublicRWExtended)
hc.tp.SetContainer(cnrID, cnr)
table := eacl.NewTable()
table.SetCID(cnrID)
key, err := hc.h.bearerTokenIssuerKey(hc.Context())
require.NoError(hc.t, err)
for _, op := range fullOps {
table.AddRecord(getAllowRecord(op, key))
}
for _, op := range fullOps {
table.AddRecord(getOthersRecord(op, eacl.ActionDeny))
}
err = hc.tp.SetContainerEACL(hc.Context(), *table, nil)
require.NoError(hc.t, err)
bktInfo, err := hc.Layer().GetBucketInfo(hc.Context(), bktName)
require.NoError(hc.t, err)
settings, err := hc.tree.GetSettingsNode(hc.Context(), bktInfo)
require.NoError(hc.t, err)
settings.CannedACL = ""
err = hc.Layer().PutBucketSettings(hc.Context(), &layer.PutSettingsParams{BktInfo: bktInfo, Settings: settings})
require.NoError(hc.t, err)
bktInfo, err = hc.Layer().GetBucketInfo(hc.Context(), bktName)
require.NoError(hc.t, err)
return bktInfo
}
func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code s3errors.ErrorCode) { func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code s3errors.ErrorCode) {
w := createBucketBase(hc, bktName, box) w := createBucketBase(hc, bktName, box)
assertS3Error(hc.t, w, s3errors.GetAPIError(code)) assertS3Error(hc.t, w, s3errors.GetAPIError(code))

View file

@ -46,6 +46,7 @@ type (
IsResolveListAllow() bool IsResolveListAllow() bool
BypassContentEncodingInChunks() bool BypassContentEncodingInChunks() bool
MD5Enabled() bool MD5Enabled() bool
ACLEnabled() bool
} }
FrostFSID interface { FrostFSID interface {

View file

@ -70,6 +70,7 @@ type configMock struct {
defaultCopiesNumbers []uint32 defaultCopiesNumbers []uint32
bypassContentEncodingInChunks bool bypassContentEncodingInChunks bool
md5Enabled bool md5Enabled bool
aclEnabled bool
} }
func (c *configMock) DefaultPlacementPolicy(_ string) netmap.PlacementPolicy { func (c *configMock) DefaultPlacementPolicy(_ string) netmap.PlacementPolicy {
@ -121,6 +122,10 @@ func (c *configMock) MD5Enabled() bool {
return c.md5Enabled return c.md5Enabled
} }
func (c *configMock) ACLEnabled() bool {
return c.aclEnabled
}
func (c *configMock) ResolveNamespaceAlias(ns string) string { func (c *configMock) ResolveNamespaceAlias(ns string) string {
return ns return ns
} }

View file

@ -765,22 +765,61 @@ func parseCannedACL(header http.Header) (string, error) {
} }
func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() if h.cfg.ACLEnabled() {
reqInfo := middleware.GetReqInfo(ctx) h.createBucketHandlerACL(w, r)
p := &layer.CreateBucketParams{
Name: reqInfo.BucketName,
Namespace: reqInfo.Namespace,
APEEnabled: true,
}
if err := checkBucketName(reqInfo.BucketName); err != nil {
h.logAndSendError(w, "invalid bucket name", reqInfo, err)
return return
} }
key, err := h.bearerTokenIssuerKey(ctx) h.createBucketHandlerPolicy(w, r)
}
func (h *handler) parseCommonCreateBucketParams(reqInfo *middleware.ReqInfo, boxData *accessbox.Box, r *http.Request) (*keys.PublicKey, *layer.CreateBucketParams, error) {
p := &layer.CreateBucketParams{
Name: reqInfo.BucketName,
Namespace: reqInfo.Namespace,
SessionContainerCreation: boxData.Gate.SessionTokenForPut(),
}
if p.SessionContainerCreation == nil {
return nil, nil, fmt.Errorf("%w: couldn't find session token for put", errors.GetAPIError(errors.ErrAccessDenied))
}
if err := checkBucketName(reqInfo.BucketName); err != nil {
return nil, nil, fmt.Errorf("invalid bucket name: %w", err)
}
key, err := getTokenIssuerKey(boxData)
if err != nil { if err != nil {
h.logAndSendError(w, "couldn't get bearer token signature key", reqInfo, err) return nil, nil, fmt.Errorf("couldn't get bearer token signature key: %w", err)
}
createParams, err := h.parseLocationConstraint(r)
if err != nil {
return nil, nil, fmt.Errorf("could not parse location contraint: %w", err)
}
if err = h.setPlacementPolicy(p, reqInfo.Namespace, createParams.LocationConstraint, boxData.Policies); err != nil {
return nil, nil, fmt.Errorf("couldn't set placement policy: %w", err)
}
p.ObjectLockEnabled = isLockEnabled(r.Header)
return key, p, nil
}
func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
boxData, err := middleware.GetBoxData(ctx)
if err != nil {
h.logAndSendError(w, "get access box from request", reqInfo, err)
return
}
key, p, err := h.parseCommonCreateBucketParams(reqInfo, boxData, r)
if err != nil {
h.logAndSendError(w, "parse create bucket params", reqInfo, err)
return return
} }
@ -790,31 +829,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
createParams, err := h.parseLocationConstraint(r) p.APEEnabled = true
if err != nil {
h.logAndSendError(w, "could not parse body", reqInfo, err)
return
}
var policies []*accessbox.ContainerPolicy
boxData, err := middleware.GetBoxData(ctx)
if err == nil {
policies = boxData.Policies
p.SessionContainerCreation = boxData.Gate.SessionTokenForPut()
}
if p.SessionContainerCreation == nil {
h.logAndSendError(w, "couldn't find session token for put", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
return
}
if err = h.setPlacementPolicy(p, reqInfo.Namespace, createParams.LocationConstraint, policies); err != nil {
h.logAndSendError(w, "couldn't set placement policy", reqInfo, err)
return
}
p.ObjectLockEnabled = isLockEnabled(r.Header)
bktInfo, err := h.obj.CreateBucket(ctx, p) bktInfo, err := h.obj.CreateBucket(ctx, p)
if err != nil { if err != nil {
h.logAndSendError(w, "could not create bucket", reqInfo, err) h.logAndSendError(w, "could not create bucket", reqInfo, err)
@ -850,6 +865,75 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
middleware.WriteSuccessResponseHeadersOnly(w) middleware.WriteSuccessResponseHeadersOnly(w)
} }
func (h *handler) createBucketHandlerACL(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
boxData, err := middleware.GetBoxData(ctx)
if err != nil {
h.logAndSendError(w, "get access box from request", reqInfo, err)
return
}
key, p, err := h.parseCommonCreateBucketParams(reqInfo, boxData, r)
if err != nil {
h.logAndSendError(w, "parse create bucket params", reqInfo, err)
return
}
aclPrm := &layer.PutBucketACLParams{SessionToken: boxData.Gate.SessionTokenForSetEACL()}
if aclPrm.SessionToken == nil {
h.logAndSendError(w, "couldn't find session token for setEACL", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
return
}
bktACL, err := parseACLHeaders(r.Header, key)
if err != nil {
h.logAndSendError(w, "could not parse bucket acl", reqInfo, err)
return
}
resInfo := &resourceInfo{Bucket: reqInfo.BucketName}
aclPrm.EACL, err = bucketACLToTable(bktACL, resInfo)
if err != nil {
h.logAndSendError(w, "could translate bucket acl to eacl", reqInfo, err)
return
}
bktInfo, err := h.obj.CreateBucket(ctx, p)
if err != nil {
h.logAndSendError(w, "could not create bucket", reqInfo, err)
return
}
h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID))
aclPrm.BktInfo = bktInfo
if err = h.obj.PutBucketACL(r.Context(), aclPrm); err != nil {
h.logAndSendError(w, "could not put bucket e/ACL", reqInfo, err)
return
}
sp := &layer.PutSettingsParams{
BktInfo: bktInfo,
Settings: &data.BucketSettings{
OwnerKey: key,
Versioning: data.VersioningUnversioned,
},
}
if p.ObjectLockEnabled {
sp.Settings.Versioning = data.VersioningEnabled
}
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
h.logAndSendError(w, "couldn't save bucket settings", reqInfo, err,
zap.String("container_id", bktInfo.CID.EncodeToString()))
return
}
middleware.WriteSuccessResponseHeadersOnly(w)
}
const s3ActionPrefix = "s3:" const s3ActionPrefix = "s3:"
var ( var (

View file

@ -100,6 +100,7 @@ type (
clientCut bool clientCut bool
maxBufferSizeForPut uint64 maxBufferSizeForPut uint64
md5Enabled bool md5Enabled bool
aclEnabled bool
namespaceHeader string namespaceHeader string
defaultNamespaces []string defaultNamespaces []string
authorizedControlAPIKeys [][]byte authorizedControlAPIKeys [][]byte
@ -222,6 +223,7 @@ func newAppSettings(log *Logger, v *viper.Viper, key *keys.PrivateKey) *appSetti
func (s *appSettings) update(v *viper.Viper, log *zap.Logger, key *keys.PrivateKey) { func (s *appSettings) update(v *viper.Viper, log *zap.Logger, key *keys.PrivateKey) {
s.updateNamespacesSettings(v, log) s.updateNamespacesSettings(v, log)
s.useDefaultXMLNamespace(v.GetBool(cfgKludgeUseDefaultXMLNS)) s.useDefaultXMLNamespace(v.GetBool(cfgKludgeUseDefaultXMLNS))
s.setACLEnabled(v.GetBool(cfgKludgeACLEnabled))
s.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks)) s.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
s.setClientCut(v.GetBool(cfgClientCut)) s.setClientCut(v.GetBool(cfgClientCut))
s.setBufferMaxSizeForPut(v.GetUint64(cfgBufferMaxSizeForPut)) s.setBufferMaxSizeForPut(v.GetUint64(cfgBufferMaxSizeForPut))
@ -352,6 +354,18 @@ func (s *appSettings) setMD5Enabled(md5Enabled bool) {
s.mu.Unlock() s.mu.Unlock()
} }
func (s *appSettings) setACLEnabled(enableACL bool) {
s.mu.Lock()
s.aclEnabled = enableACL
s.mu.Unlock()
}
func (s *appSettings) ACLEnabled() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.aclEnabled
}
func (s *appSettings) NamespaceHeader() string { func (s *appSettings) NamespaceHeader() string {
s.mu.RLock() s.mu.RLock()
defer s.mu.RUnlock() defer s.mu.RUnlock()

View file

@ -166,6 +166,7 @@ const ( // Settings.
cfgKludgeUseDefaultXMLNS = "kludge.use_default_xmlns" cfgKludgeUseDefaultXMLNS = "kludge.use_default_xmlns"
cfgKludgeBypassContentEncodingCheckInChunks = "kludge.bypass_content_encoding_check_in_chunks" cfgKludgeBypassContentEncodingCheckInChunks = "kludge.bypass_content_encoding_check_in_chunks"
cfgKludgeDefaultNamespaces = "kludge.default_namespaces" cfgKludgeDefaultNamespaces = "kludge.default_namespaces"
cfgKludgeACLEnabled = "kludge.acl_enabled"
// Web. // Web.
cfgWebReadTimeout = "web.read_timeout" cfgWebReadTimeout = "web.read_timeout"
@ -717,6 +718,7 @@ func newSettings() *viper.Viper {
v.SetDefault(cfgKludgeUseDefaultXMLNS, false) v.SetDefault(cfgKludgeUseDefaultXMLNS, false)
v.SetDefault(cfgKludgeBypassContentEncodingCheckInChunks, false) v.SetDefault(cfgKludgeBypassContentEncodingCheckInChunks, false)
v.SetDefault(cfgKludgeDefaultNamespaces, defaultDefaultNamespaces) v.SetDefault(cfgKludgeDefaultNamespaces, defaultDefaultNamespaces)
v.SetDefault(cfgKludgeACLEnabled, false)
// web // web
v.SetDefault(cfgWebReadHeaderTimeout, defaultReadHeaderTimeout) v.SetDefault(cfgWebReadHeaderTimeout, defaultReadHeaderTimeout)

View file

@ -162,6 +162,8 @@ S3_GW_KLUDGE_USE_DEFAULT_XMLNS=false
S3_GW_KLUDGE_BYPASS_CONTENT_ENCODING_CHECK_IN_CHUNKS=false S3_GW_KLUDGE_BYPASS_CONTENT_ENCODING_CHECK_IN_CHUNKS=false
# Namespaces that should be handled as default # Namespaces that should be handled as default
S3_GW_KLUDGE_DEFAULT_NAMESPACES="" "root" S3_GW_KLUDGE_DEFAULT_NAMESPACES="" "root"
# Enable bucket/object ACL support for newly created buckets.
S3_GW_KLUDGE_ACL_ENABLED=false
S3_GW_TRACING_ENABLED=false S3_GW_TRACING_ENABLED=false
S3_GW_TRACING_ENDPOINT="localhost:4318" S3_GW_TRACING_ENDPOINT="localhost:4318"

View file

@ -193,6 +193,8 @@ kludge:
bypass_content_encoding_check_in_chunks: false bypass_content_encoding_check_in_chunks: false
# Namespaces that should be handled as default # Namespaces that should be handled as default
default_namespaces: [ "", "root" ] default_namespaces: [ "", "root" ]
# Enable bucket/object ACL support for newly created buckets.
acl_enabled: false
runtime: runtime:
soft_memory_limit: 1gb soft_memory_limit: 1gb

View file

@ -597,13 +597,15 @@ kludge:
use_default_xmlns: false use_default_xmlns: false
bypass_content_encoding_check_in_chunks: false bypass_content_encoding_check_in_chunks: false
default_namespaces: [ "", "root" ] default_namespaces: [ "", "root" ]
acl_enabled: false
``` ```
| Parameter | Type | SIGHUP reload | Default value | Description | | Parameter | Type | SIGHUP reload | Default value | Description |
|-------------------------------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |-------------------------------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `use_default_xmlns` | `bool` | yes | false | Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies. | | `use_default_xmlns` | `bool` | yes | `false` | Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies. |
| `bypass_content_encoding_check_in_chunks` | `bool` | yes | false | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. | | `bypass_content_encoding_check_in_chunks` | `bool` | yes | `false` | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. |
| `default_namespaces` | `[]string` | n/d | ["","root"] | Namespaces that should be handled as default. | | `default_namespaces` | `[]string` | yes | `["","root"]` | Namespaces that should be handled as default. |
| `acl_enabled` | `bool` | yes | `false` | Enable bucket/object ACL support for newly created buckets. |
# `runtime` section # `runtime` section
Contains runtime parameters. Contains runtime parameters.