[#147] Add Kludge profiles #568

Merged
alexvanin merged 1 commit from pogpp/frostfs-s3-gw:feature/147_kludge_profiles into master 2024-12-13 11:25:08 +00:00
19 changed files with 126 additions and 29 deletions

View file

@ -32,11 +32,11 @@ type (
PlacementPolicy(namespace, constraint string) (netmap.PlacementPolicy, bool)
CopiesNumbers(namespace, constraint string) ([]uint32, bool)
DefaultCopiesNumbers(namespace string) []uint32
NewXMLDecoder(io.Reader) *xml.Decoder
NewXMLDecoder(reader io.Reader, agent string) *xml.Decoder
DefaultMaxAge() int
ResolveZoneList() []string
IsResolveListAllow() bool
BypassContentEncodingInChunks() bool
BypassContentEncodingInChunks(agent string) bool
MD5Enabled() bool
RetryMaxAttempts() int
RetryMaxBackoff() time.Duration

View file

@ -55,6 +55,7 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
BktInfo: bktInfo,
Reader: r.Body,
NewDecoder: h.cfg.NewXMLDecoder,
UserAgent: r.UserAgent(),
}
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)

View file

@ -147,7 +147,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
// Unmarshal list of keys to be deleted.
requested := &DeleteObjectsRequest{}
if err := h.cfg.NewXMLDecoder(r.Body).Decode(requested); err != nil {
if err := h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(requested); err != nil {
h.logAndSendError(ctx, w, "couldn't decode body", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()))
return
}

View file

@ -97,11 +97,11 @@ func (c *configMock) DefaultCopiesNumbers(_ string) []uint32 {
return c.defaultCopiesNumbers
}
func (c *configMock) NewXMLDecoder(r io.Reader) *xml.Decoder {
func (c *configMock) NewXMLDecoder(r io.Reader, _ string) *xml.Decoder {
return xml.NewDecoder(r)
}
func (c *configMock) BypassContentEncodingInChunks() bool {
func (c *configMock) BypassContentEncodingInChunks(_ string) bool {
return c.bypassContentEncodingInChunks
}

View file

@ -69,7 +69,7 @@ func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Reque
}
cfg := new(data.LifecycleConfiguration)
if err = h.cfg.NewXMLDecoder(tee).Decode(cfg); err != nil {
if err = h.cfg.NewXMLDecoder(tee, r.UserAgent()).Decode(cfg); err != nil {
h.logAndSendError(ctx, w, "could not decode body", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
return
}

View file

@ -42,7 +42,7 @@ func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
}
lockingConf := &data.ObjectLockConfiguration{}
if err = h.cfg.NewXMLDecoder(r.Body).Decode(lockingConf); err != nil {
if err = h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(lockingConf); err != nil {
h.logAndSendError(ctx, w, "couldn't parse locking configuration", reqInfo, err)
return
}
@ -124,7 +124,7 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
}
legalHold := &data.LegalHold{}
if err = h.cfg.NewXMLDecoder(r.Body).Decode(legalHold); err != nil {
if err = h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(legalHold); err != nil {
h.logAndSendError(ctx, w, "couldn't parse legal hold configuration", reqInfo, err)
return
}
@ -214,7 +214,7 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
}
retention := &data.Retention{}
if err = h.cfg.NewXMLDecoder(r.Body).Decode(retention); err != nil {
if err = h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(retention); err != nil {
h.logAndSendError(ctx, w, "couldn't parse object retention", reqInfo, err)
return
}

View file

@ -401,7 +401,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
)
reqBody := new(CompleteMultipartUpload)
if err = h.cfg.NewXMLDecoder(r.Body).Decode(reqBody); err != nil {
if err = h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(reqBody); err != nil {
h.logAndSendError(ctx, w, "could not read complete multipart upload xml", reqInfo,
fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()), additional...)
return

View file

@ -332,7 +332,9 @@ func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
}
r.Header.Set(api.ContentEncoding, strings.Join(resultContentEncoding, ","))
if !chunkedEncoding && !h.cfg.BypassContentEncodingInChunks() {
defBypass := h.cfg.BypassContentEncodingInChunks(r.UserAgent())
if !chunkedEncoding && !defBypass {
return nil, fmt.Errorf("%w: request is not chunk encoded, encodings '%s'",
apierr.GetAPIError(apierr.ErrInvalidEncodingMethod), strings.Join(encodings, ","))
}
@ -462,7 +464,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
if tagging := auth.MultipartFormValue(r, "tagging"); tagging != "" {
buffer := bytes.NewBufferString(tagging)
tags := new(data.Tagging)
if err = h.cfg.NewXMLDecoder(buffer).Decode(tags); err != nil {
if err = h.cfg.NewXMLDecoder(buffer, r.UserAgent()).Decode(tags); err != nil {
h.logAndSendError(ctx, w, "could not decode tag set", reqInfo,
fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
return
@ -1053,7 +1055,7 @@ func (h *handler) parseLocationConstraint(r *http.Request) (*createBucketParams,
}
params := new(createBucketParams)
if err := h.cfg.NewXMLDecoder(r.Body).Decode(params); err != nil {
if err := h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(params); err != nil {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error())
}
return params, nil

View file

@ -14,7 +14,7 @@ func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
reqInfo := middleware.GetReqInfo(ctx)
configuration := new(VersioningConfiguration)
if err := h.cfg.NewXMLDecoder(r.Body).Decode(configuration); err != nil {
if err := h.cfg.NewXMLDecoder(r.Body, r.UserAgent()).Decode(configuration); err != nil {
h.logAndSendError(ctx, w, "couldn't decode versioning configuration", reqInfo, errors.GetAPIError(errors.ErrIllegalVersioningConfigurationException))
return
}

View file

@ -28,7 +28,7 @@ func (n *Layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
cors = &data.CORSConfiguration{}
)
if err := p.NewDecoder(tee).Decode(cors); err != nil {
if err := p.NewDecoder(tee, p.UserAgent).Decode(cors); err != nil {
return fmt.Errorf("xml decode cors: %w", err)
}

View file

@ -149,7 +149,8 @@ type (
BktInfo *data.BucketInfo
Reader io.Reader
CopiesNumbers []uint32
NewDecoder func(io.Reader) *xml.Decoder
NewDecoder func(io.Reader, string) *xml.Decoder
UserAgent string
}
// CopyObjectParams stores object copy request parameters.

View file

@ -61,7 +61,7 @@ type FrostFSIDInformer interface {
}
type XMLDecoder interface {
NewXMLDecoder(io.Reader) *xml.Decoder
NewXMLDecoder(io.Reader, string) *xml.Decoder
}
type ResourceTagging interface {
@ -476,7 +476,7 @@ func determineRequestTags(r *http.Request, decoder XMLDecoder, op string) (map[s
if strings.HasSuffix(op, PutObjectTaggingOperation) || strings.HasSuffix(op, PutBucketTaggingOperation) {
tagging := new(data.Tagging)
if err := decoder.NewXMLDecoder(r.Body).Decode(tagging); err != nil {
if err := decoder.NewXMLDecoder(r.Body, r.UserAgent()).Decode(tagging); err != nil {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error())
}
GetReqInfo(r.Context()).Tagging = tagging

View file

@ -151,7 +151,7 @@ func (f *frostFSIDMock) GetUserGroupIDsAndClaims(util.Uint160) ([]string, map[st
type xmlMock struct {
}
func (m *xmlMock) NewXMLDecoder(r io.Reader) *xml.Decoder {
func (m *xmlMock) NewXMLDecoder(r io.Reader, _ string) *xml.Decoder {
return xml.NewDecoder(r)
}

View file

@ -112,6 +112,7 @@ type (
namespaces Namespaces
defaultXMLNS bool
bypassContentEncodingInChunks bool
kludgeProfiles map[string]*KludgeParams
clientCut bool
maxBufferSizeForPut uint64
md5Enabled bool
@ -298,6 +299,7 @@ func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
vhsNamespacesEnabled := s.prepareVHSNamespaces(v, log, defaultNamespaces)
defaultXMLNS := v.GetBool(cfgKludgeUseDefaultXMLNS)
bypassContentEncodingInChunks := v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks)
kludgeProfiles := fetchKludgeProfiles(v)
clientCut := v.GetBool(cfgClientCut)
maxBufferSizeForPut := v.GetUint64(cfgBufferMaxSizeForPut)
md5Enabled := v.GetBool(cfgMD5Enabled)
@ -334,6 +336,7 @@ func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
s.namespaces = nsConfig.Namespaces
s.defaultXMLNS = defaultXMLNS
s.bypassContentEncodingInChunks = bypassContentEncodingInChunks
s.kludgeProfiles = kludgeProfiles
s.clientCut = clientCut
s.maxBufferSizeForPut = maxBufferSizeForPut
s.md5Enabled = md5Enabled
@ -395,9 +398,17 @@ func (s *appSettings) VHSNamespacesEnabled() map[string]bool {
return s.vhsNamespacesEnabled
}
func (s *appSettings) BypassContentEncodingInChunks() bool {
func (s *appSettings) BypassContentEncodingInChunks(agent string) bool {
s.mu.RLock()
defer s.mu.RUnlock()
profiles := s.kludgeProfiles
for p := range profiles {
if strings.Contains(agent, p) {
return profiles[p].BypassContentEncodingCheckInChunks
}
}
return s.bypassContentEncodingInChunks
}
@ -448,7 +459,7 @@ func (s *appSettings) LogHTTPConfig() s3middleware.LogHTTPConfig {
return s.httpLogging
}
func (s *appSettings) NewXMLDecoder(r io.Reader) *xml.Decoder {
func (s *appSettings) NewXMLDecoder(r io.Reader, agent string) *xml.Decoder {
dec := xml.NewDecoder(r)
dec.CharsetReader = func(charset string, reader io.Reader) (io.Reader, error) {
enc, err := ianaindex.IANA.Encoding(charset)
@ -459,10 +470,19 @@ func (s *appSettings) NewXMLDecoder(r io.Reader) *xml.Decoder {
}
s.mu.RLock()
defer s.mu.RUnlock()
for p := range s.kludgeProfiles {
if strings.Contains(agent, p) {
if s.kludgeProfiles[p].UseDefaultXMLNS {
dec.DefaultSpace = awsDefaultNamespace
}
return dec
}
}
if s.defaultXMLNS {
dec.DefaultSpace = awsDefaultNamespace
}
s.mu.RUnlock()
return dec
}

View file

@ -79,6 +79,9 @@ const (
defaultTombstoneLifetime = 10
defaultTombstoneMembersSize = 100
defaultTombstoneWorkerPoolSize = 100
useDefaultXmlns = "use_default_xmlns"
bypassContentEncodingCheckInChunks = "bypass_content_encoding_check_in_chunks"
)
var (
@ -198,6 +201,7 @@ const ( // Settings.
cfgKludgeUseDefaultXMLNS = "kludge.use_default_xmlns"
cfgKludgeBypassContentEncodingCheckInChunks = "kludge.bypass_content_encoding_check_in_chunks"
cfgKludgeDefaultNamespaces = "kludge.default_namespaces"
cfgKludgeProfile = "kludge.profile"
// Web.
cfgWebReadTimeout = "web.read_timeout"
cfgWebReadHeaderTimeout = "web.read_header_timeout"
@ -567,6 +571,38 @@ func fetchDefaultCopiesNumbers(l *zap.Logger, v *viper.Viper) []uint32 {
return result
}
type KludgeParams struct {
UseDefaultXMLNS bool
BypassContentEncodingCheckInChunks bool
}
func fetchKludgeProfiles(v *viper.Viper) map[string]*KludgeParams {
kludgeProfiles := make(map[string]*KludgeParams)
for i := 0; ; i++ {
key := cfgKludgeProfile + "." + strconv.Itoa(i) + "."
userAgent := v.GetString(key + "user_agent")
if userAgent == "" {
break
}
kludgeParam := &KludgeParams{
UseDefaultXMLNS: v.GetBool(cfgKludgeUseDefaultXMLNS),
BypassContentEncodingCheckInChunks: v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks),
}
if v.IsSet(key + useDefaultXmlns) {
kludgeParam.UseDefaultXMLNS = v.GetBool(key + useDefaultXmlns)
}
if v.IsSet(key + bypassContentEncodingCheckInChunks) {
kludgeParam.BypassContentEncodingCheckInChunks = v.GetBool(key + bypassContentEncodingCheckInChunks)
}
kludgeProfiles[userAgent] = kludgeParam
}
return kludgeProfiles
}
func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper) map[string][]uint32 {
copiesNums := make(map[string][]uint32)
for i := 0; ; i++ {

View file

@ -101,7 +101,7 @@ func TestDefaultNamespace(t *testing.T) {
} {
t.Run("", func(t *testing.T) {
model := new(handler.CompleteMultipartUpload)
err := tc.settings.NewXMLDecoder(bytes.NewBufferString(tc.input)).Decode(model)
err := tc.settings.NewXMLDecoder(bytes.NewBufferString(tc.input), "test").Decode(model)
if tc.err {
require.Error(t, err)
} else {

View file

@ -186,6 +186,10 @@ S3_GW_KLUDGE_USE_DEFAULT_XMLNS=false
S3_GW_KLUDGE_BYPASS_CONTENT_ENCODING_CHECK_IN_CHUNKS=false
# Namespaces that should be handled as default
S3_GW_KLUDGE_DEFAULT_NAMESPACES="" "root"
# Kludge profiles
S3_GW_KLUDGE_PROFILE_0_USER_AGENT=aws-cli
S3_GW_KLUDGE_PROFILE_0_USE_DEFAULT_XMLNS=true
S3_GW_KLUDGE_PROFILE_0_BYPASS_CONTENT_ENCODING_CHECK_IN_CHUNKS=true
S3_GW_TRACING_ENABLED=false
S3_GW_TRACING_ENDPOINT="localhost:4318"

View file

@ -226,6 +226,13 @@ kludge:
bypass_content_encoding_check_in_chunks: false
# Namespaces that should be handled as default
default_namespaces: [ "", "root" ]
# new profile section override defaults based on user agent
profile:
- user_agent: aws-cli
use_default_xmlns: false
- user_agent: aws-sdk-go
use_default_xmlns: true
bypass_content_encoding_check_in_chunks: false
runtime:
soft_memory_limit: 1gb

View file

@ -626,20 +626,46 @@ resolve_bucket:
# `kludge` section
Workarounds for non-standard use cases.
Workarounds for non-standard use cases. In `profiles` subsection has the ability to override behavior for specific user agent.
```yaml
kludge:
use_default_xmlns: false
bypass_content_encoding_check_in_chunks: false
default_namespaces: [ "", "root" ]
profile:
- user_agent: aws-cli
use_default_xmlns: false
- user_agent: aws-sdk-go
use_default_xmlns: true
bypass_content_encoding_check_in_chunks: false
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-------------------------------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `use_default_xmlns` | `bool` | yes | `false` | Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies. |
| `bypass_content_encoding_check_in_chunks` | `bool` | yes | `false` | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. |
| `default_namespaces` | `[]string` | yes | `["","root"]` | Namespaces that should be handled as default. |
| Parameter | Type | SIGHUP reload | Default value | Description |
|-------------------------------------------|----------------------------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `use_default_xmlns` | `bool` | yes | `false` | Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies. |
| `bypass_content_encoding_check_in_chunks` | `bool` | yes | `false` | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. |
| `default_namespaces` | `[]string` | yes | `["","root"]` | Namespaces that should be handled as default. |
| `profile` | [[]Profile](#profile-subsection) | yes | | An array of configurable profiles. |
#### `profile` subsection
````yaml
profile:
- user_agent: aws-cli
use_default_xmlns: false
- user_agent: aws-sdk-go
use_default_xmlns: true
bypass_content_encoding_check_in_chunks: false
````
| Parameter | Type | SIGHUP reload | Default value | Description |
|-------------------------------------------|----------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `user_agent` | `string` | yes | | Profile substring to be matched with UserAgent header. |
| `use_default_xmlns` | `bool` | yes | | Enable using default xml namespace for profile. |
| `bypass_content_encoding_check_in_chunks` | `bool` | yes | | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. |
# `runtime` section
Contains runtime parameters.