forked from TrueCloudLab/frostfs-s3-gw
[#306] Add flag to enable old ACL bucket creation
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
This commit is contained in:
parent
bac1b3fb2d
commit
c868af8a62
10 changed files with 166 additions and 91 deletions
|
@ -20,6 +20,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -336,10 +337,15 @@ func (h *handler) bearerTokenIssuerKey(ctx context.Context) (*keys.PublicKey, er
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var btoken v2acl.BearerToken
|
||||
box.Gate.BearerToken.WriteToV2(&btoken)
|
||||
return getTokenIssuerKey(box)
|
||||
}
|
||||
|
||||
key, err := keys.NewPublicKeyFromBytes(btoken.GetSignature().GetKey(), elliptic.P256())
|
||||
func getTokenIssuerKey(box *accessbox.Box) (*keys.PublicKey, error) {
|
||||
if box.Gate.BearerToken == nil {
|
||||
return nil, stderrors.New("bearer token is missing")
|
||||
}
|
||||
|
||||
key, err := keys.NewPublicKeyFromBytes(box.Gate.BearerToken.SigningKeyBytes(), elliptic.P256())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("public key from bytes: %w", err)
|
||||
}
|
||||
|
|
|
@ -16,11 +16,9 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
|
@ -1303,17 +1301,17 @@ func TestBucketAclToAst(t *testing.T) {
|
|||
|
||||
func TestPutBucketACL(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
tc.config.aclEnabled = true
|
||||
bktName := "bucket-for-acl"
|
||||
|
||||
box, _ := createAccessBox(t)
|
||||
bktInfo := createBucketOldACL(tc, bktName, box)
|
||||
info := createBucket(tc, bktName)
|
||||
|
||||
header := map[string]string{api.AmzACL: "public-read"}
|
||||
putBucketACL(tc, bktName, box, header)
|
||||
putBucketACL(tc, bktName, info.Box, header)
|
||||
|
||||
header = map[string]string{api.AmzACL: "private"}
|
||||
putBucketACL(tc, bktName, box, header)
|
||||
checkLastRecords(t, tc, bktInfo, eacl.ActionDeny)
|
||||
putBucketACL(tc, bktName, info.Box, header)
|
||||
checkLastRecords(t, tc, info.BktInfo, eacl.ActionDeny)
|
||||
}
|
||||
|
||||
func TestPutBucketAPE(t *testing.T) {
|
||||
|
@ -1598,47 +1596,6 @@ func createBucket(hc *handlerContext, bktName string) *createBucketInfo {
|
|||
}
|
||||
}
|
||||
|
||||
func createBucketOldACL(hc *handlerContext, bktName string, box *accessbox.Box) *data.BucketInfo {
|
||||
w := createBucketBase(hc, bktName, box)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
|
||||
cnrID, err := hc.tp.ContainerID(bktName)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
cnr, err := hc.tp.Container(hc.Context(), cnrID)
|
||||
require.NoError(hc.t, err)
|
||||
cnr.SetBasicACL(acl.PublicRWExtended)
|
||||
hc.tp.SetContainer(cnrID, cnr)
|
||||
table := eacl.NewTable()
|
||||
table.SetCID(cnrID)
|
||||
|
||||
key, err := hc.h.bearerTokenIssuerKey(hc.Context())
|
||||
require.NoError(hc.t, err)
|
||||
for _, op := range fullOps {
|
||||
table.AddRecord(getAllowRecord(op, key))
|
||||
}
|
||||
|
||||
for _, op := range fullOps {
|
||||
table.AddRecord(getOthersRecord(op, eacl.ActionDeny))
|
||||
}
|
||||
err = hc.tp.SetContainerEACL(hc.Context(), *table, nil)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
bktInfo, err := hc.Layer().GetBucketInfo(hc.Context(), bktName)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
settings, err := hc.tree.GetSettingsNode(hc.Context(), bktInfo)
|
||||
require.NoError(hc.t, err)
|
||||
settings.CannedACL = ""
|
||||
err = hc.Layer().PutBucketSettings(hc.Context(), &layer.PutSettingsParams{BktInfo: bktInfo, Settings: settings})
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
bktInfo, err = hc.Layer().GetBucketInfo(hc.Context(), bktName)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
return bktInfo
|
||||
}
|
||||
|
||||
func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code s3errors.ErrorCode) {
|
||||
w := createBucketBase(hc, bktName, box)
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(code))
|
||||
|
|
|
@ -46,6 +46,7 @@ type (
|
|||
IsResolveListAllow() bool
|
||||
BypassContentEncodingInChunks() bool
|
||||
MD5Enabled() bool
|
||||
ACLEnabled() bool
|
||||
}
|
||||
|
||||
FrostFSID interface {
|
||||
|
|
|
@ -70,6 +70,7 @@ type configMock struct {
|
|||
defaultCopiesNumbers []uint32
|
||||
bypassContentEncodingInChunks bool
|
||||
md5Enabled bool
|
||||
aclEnabled bool
|
||||
}
|
||||
|
||||
func (c *configMock) DefaultPlacementPolicy(_ string) netmap.PlacementPolicy {
|
||||
|
@ -121,6 +122,10 @@ func (c *configMock) MD5Enabled() bool {
|
|||
return c.md5Enabled
|
||||
}
|
||||
|
||||
func (c *configMock) ACLEnabled() bool {
|
||||
return c.aclEnabled
|
||||
}
|
||||
|
||||
func (c *configMock) ResolveNamespaceAlias(ns string) string {
|
||||
return ns
|
||||
}
|
||||
|
|
|
@ -765,22 +765,61 @@ func parseCannedACL(header http.Header) (string, error) {
|
|||
}
|
||||
|
||||
func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
p := &layer.CreateBucketParams{
|
||||
Name: reqInfo.BucketName,
|
||||
Namespace: reqInfo.Namespace,
|
||||
APEEnabled: true,
|
||||
}
|
||||
|
||||
if err := checkBucketName(reqInfo.BucketName); err != nil {
|
||||
h.logAndSendError(w, "invalid bucket name", reqInfo, err)
|
||||
if h.cfg.ACLEnabled() {
|
||||
h.createBucketHandlerACL(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
h.createBucketHandlerPolicy(w, r)
|
||||
}
|
||||
|
||||
func (h *handler) parseCommonCreateBucketParams(reqInfo *middleware.ReqInfo, boxData *accessbox.Box, r *http.Request) (*keys.PublicKey, *layer.CreateBucketParams, error) {
|
||||
p := &layer.CreateBucketParams{
|
||||
Name: reqInfo.BucketName,
|
||||
Namespace: reqInfo.Namespace,
|
||||
SessionContainerCreation: boxData.Gate.SessionTokenForPut(),
|
||||
}
|
||||
|
||||
if p.SessionContainerCreation == nil {
|
||||
return nil, nil, fmt.Errorf("%w: couldn't find session token for put", errors.GetAPIError(errors.ErrAccessDenied))
|
||||
}
|
||||
|
||||
if err := checkBucketName(reqInfo.BucketName); err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid bucket name: %w", err)
|
||||
}
|
||||
|
||||
key, err := getTokenIssuerKey(boxData)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bearer token signature key", reqInfo, err)
|
||||
return nil, nil, fmt.Errorf("couldn't get bearer token signature key: %w", err)
|
||||
}
|
||||
|
||||
createParams, err := h.parseLocationConstraint(r)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not parse location contraint: %w", err)
|
||||
}
|
||||
|
||||
if err = h.setPlacementPolicy(p, reqInfo.Namespace, createParams.LocationConstraint, boxData.Policies); err != nil {
|
||||
return nil, nil, fmt.Errorf("couldn't set placement policy: %w", err)
|
||||
}
|
||||
|
||||
p.ObjectLockEnabled = isLockEnabled(r.Header)
|
||||
|
||||
return key, p, nil
|
||||
}
|
||||
|
||||
func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
boxData, err := middleware.GetBoxData(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "get access box from request", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
key, p, err := h.parseCommonCreateBucketParams(reqInfo, boxData, r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "parse create bucket params", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -790,31 +829,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
createParams, err := h.parseLocationConstraint(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not parse body", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
var policies []*accessbox.ContainerPolicy
|
||||
boxData, err := middleware.GetBoxData(ctx)
|
||||
if err == nil {
|
||||
policies = boxData.Policies
|
||||
p.SessionContainerCreation = boxData.Gate.SessionTokenForPut()
|
||||
}
|
||||
|
||||
if p.SessionContainerCreation == nil {
|
||||
h.logAndSendError(w, "couldn't find session token for put", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.setPlacementPolicy(p, reqInfo.Namespace, createParams.LocationConstraint, policies); err != nil {
|
||||
h.logAndSendError(w, "couldn't set placement policy", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
p.ObjectLockEnabled = isLockEnabled(r.Header)
|
||||
|
||||
p.APEEnabled = true
|
||||
bktInfo, err := h.obj.CreateBucket(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not create bucket", reqInfo, err)
|
||||
|
@ -850,6 +865,75 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
func (h *handler) createBucketHandlerACL(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
boxData, err := middleware.GetBoxData(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "get access box from request", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
key, p, err := h.parseCommonCreateBucketParams(reqInfo, boxData, r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "parse create bucket params", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
aclPrm := &layer.PutBucketACLParams{SessionToken: boxData.Gate.SessionTokenForSetEACL()}
|
||||
if aclPrm.SessionToken == nil {
|
||||
h.logAndSendError(w, "couldn't find session token for setEACL", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
||||
return
|
||||
}
|
||||
|
||||
bktACL, err := parseACLHeaders(r.Header, key)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not parse bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
resInfo := &resourceInfo{Bucket: reqInfo.BucketName}
|
||||
|
||||
aclPrm.EACL, err = bucketACLToTable(bktACL, resInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could translate bucket acl to eacl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.obj.CreateBucket(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not create bucket", reqInfo, err)
|
||||
return
|
||||
}
|
||||
h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID))
|
||||
|
||||
aclPrm.BktInfo = bktInfo
|
||||
if err = h.obj.PutBucketACL(r.Context(), aclPrm); err != nil {
|
||||
h.logAndSendError(w, "could not put bucket e/ACL", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
sp := &layer.PutSettingsParams{
|
||||
BktInfo: bktInfo,
|
||||
Settings: &data.BucketSettings{
|
||||
OwnerKey: key,
|
||||
Versioning: data.VersioningUnversioned,
|
||||
},
|
||||
}
|
||||
|
||||
if p.ObjectLockEnabled {
|
||||
sp.Settings.Versioning = data.VersioningEnabled
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
|
||||
h.logAndSendError(w, "couldn't save bucket settings", reqInfo, err,
|
||||
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
||||
return
|
||||
}
|
||||
|
||||
middleware.WriteSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
const s3ActionPrefix = "s3:"
|
||||
|
||||
var (
|
||||
|
|
|
@ -100,6 +100,7 @@ type (
|
|||
clientCut bool
|
||||
maxBufferSizeForPut uint64
|
||||
md5Enabled bool
|
||||
aclEnabled bool
|
||||
namespaceHeader string
|
||||
defaultNamespaces []string
|
||||
authorizedControlAPIKeys [][]byte
|
||||
|
@ -222,6 +223,7 @@ func newAppSettings(log *Logger, v *viper.Viper, key *keys.PrivateKey) *appSetti
|
|||
func (s *appSettings) update(v *viper.Viper, log *zap.Logger, key *keys.PrivateKey) {
|
||||
s.updateNamespacesSettings(v, log)
|
||||
s.useDefaultXMLNamespace(v.GetBool(cfgKludgeUseDefaultXMLNS))
|
||||
s.setACLEnabled(v.GetBool(cfgKludgeACLEnabled))
|
||||
s.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
||||
s.setClientCut(v.GetBool(cfgClientCut))
|
||||
s.setBufferMaxSizeForPut(v.GetUint64(cfgBufferMaxSizeForPut))
|
||||
|
@ -352,6 +354,18 @@ func (s *appSettings) setMD5Enabled(md5Enabled bool) {
|
|||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) setACLEnabled(enableACL bool) {
|
||||
s.mu.Lock()
|
||||
s.aclEnabled = enableACL
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) ACLEnabled() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.aclEnabled
|
||||
}
|
||||
|
||||
func (s *appSettings) NamespaceHeader() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
|
|
@ -166,6 +166,7 @@ const ( // Settings.
|
|||
cfgKludgeUseDefaultXMLNS = "kludge.use_default_xmlns"
|
||||
cfgKludgeBypassContentEncodingCheckInChunks = "kludge.bypass_content_encoding_check_in_chunks"
|
||||
cfgKludgeDefaultNamespaces = "kludge.default_namespaces"
|
||||
cfgKludgeACLEnabled = "kludge.acl_enabled"
|
||||
|
||||
// Web.
|
||||
cfgWebReadTimeout = "web.read_timeout"
|
||||
|
@ -717,6 +718,7 @@ func newSettings() *viper.Viper {
|
|||
v.SetDefault(cfgKludgeUseDefaultXMLNS, false)
|
||||
v.SetDefault(cfgKludgeBypassContentEncodingCheckInChunks, false)
|
||||
v.SetDefault(cfgKludgeDefaultNamespaces, defaultDefaultNamespaces)
|
||||
v.SetDefault(cfgKludgeACLEnabled, false)
|
||||
|
||||
// web
|
||||
v.SetDefault(cfgWebReadHeaderTimeout, defaultReadHeaderTimeout)
|
||||
|
|
|
@ -162,6 +162,8 @@ S3_GW_KLUDGE_USE_DEFAULT_XMLNS=false
|
|||
S3_GW_KLUDGE_BYPASS_CONTENT_ENCODING_CHECK_IN_CHUNKS=false
|
||||
# Namespaces that should be handled as default
|
||||
S3_GW_KLUDGE_DEFAULT_NAMESPACES="" "root"
|
||||
# Enable bucket/object ACL support for newly created buckets.
|
||||
S3_GW_KLUDGE_ACL_ENABLED=false
|
||||
|
||||
S3_GW_TRACING_ENABLED=false
|
||||
S3_GW_TRACING_ENDPOINT="localhost:4318"
|
||||
|
|
|
@ -193,6 +193,8 @@ kludge:
|
|||
bypass_content_encoding_check_in_chunks: false
|
||||
# Namespaces that should be handled as default
|
||||
default_namespaces: [ "", "root" ]
|
||||
# Enable bucket/object ACL support for newly created buckets.
|
||||
acl_enabled: false
|
||||
|
||||
runtime:
|
||||
soft_memory_limit: 1gb
|
||||
|
|
|
@ -597,13 +597,15 @@ kludge:
|
|||
use_default_xmlns: false
|
||||
bypass_content_encoding_check_in_chunks: false
|
||||
default_namespaces: [ "", "root" ]
|
||||
acl_enabled: false
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-------------------------------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `use_default_xmlns` | `bool` | yes | false | Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies. |
|
||||
| `bypass_content_encoding_check_in_chunks` | `bool` | yes | false | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. |
|
||||
| `default_namespaces` | `[]string` | n/d | ["","root"] | Namespaces that should be handled as default. |
|
||||
| `use_default_xmlns` | `bool` | yes | `false` | Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies. |
|
||||
| `bypass_content_encoding_check_in_chunks` | `bool` | yes | `false` | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. |
|
||||
| `default_namespaces` | `[]string` | yes | `["","root"]` | Namespaces that should be handled as default. |
|
||||
| `acl_enabled` | `bool` | yes | `false` | Enable bucket/object ACL support for newly created buckets. |
|
||||
|
||||
# `runtime` section
|
||||
Contains runtime parameters.
|
||||
|
|
Loading…
Reference in a new issue