[#221] Expand xmlns field ignore

Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
This commit is contained in:
Marina Biryukova 2023-10-09 15:34:51 +03:00
parent 10a03faeb4
commit 298662df9d
22 changed files with 76 additions and 79 deletions

View file

@ -45,6 +45,7 @@ This document outlines major changes between releases.
- Complete multipart upload doesn't unnecessary copy now. Thus, the total time of multipart upload was reduced by 2 times (#63) - Complete multipart upload doesn't unnecessary copy now. Thus, the total time of multipart upload was reduced by 2 times (#63)
- Use gate key to form object owner (#175) - Use gate key to form object owner (#175)
- Apply placement policies and copies if there is at least one valid value (#168) - Apply placement policies and copies if there is at least one valid value (#168)
- Generalise config param `use_default_xmlns_for_complete_multipart` to `use_default_xmlns` so that use default xmlns for all requests (#221)
### Removed ### Removed
- Drop `tree.service` param (now endpoints from `peers` section are used) (#133) - Drop `tree.service` param (now endpoints from `peers` section are used) (#133)

View file

@ -1,7 +1,10 @@
package data package data
import "encoding/xml"
type ( type (
NotificationConfiguration struct { NotificationConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NotificationConfiguration" json:"-"`
QueueConfigurations []QueueConfiguration `xml:"QueueConfiguration" json:"QueueConfigurations"` QueueConfigurations []QueueConfiguration `xml:"QueueConfiguration" json:"QueueConfigurations"`
// Not supported topics // Not supported topics
TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration" json:"TopicConfigurations"` TopicConfigurations []TopicConfiguration `xml:"TopicConfiguration" json:"TopicConfigurations"`

View file

@ -6,7 +6,6 @@ import (
"crypto/elliptic" "crypto/elliptic"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"encoding/xml"
stderrors "errors" stderrors "errors"
"fmt" "fmt"
"net/http" "net/http"
@ -304,7 +303,7 @@ func (h *handler) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "could not parse bucket acl", reqInfo, err) h.logAndSendError(w, "could not parse bucket acl", reqInfo, err)
return return
} }
} else if err = xml.NewDecoder(r.Body).Decode(list); err != nil { } else if err = h.cfg.NewXMLDecoder(r.Body).Decode(list); err != nil {
h.logAndSendError(w, "could not parse bucket acl", reqInfo, errors.GetAPIError(errors.ErrMalformedXML)) h.logAndSendError(w, "could not parse bucket acl", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
return return
} }
@ -441,7 +440,7 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "could not parse bucket acl", reqInfo, err) h.logAndSendError(w, "could not parse bucket acl", reqInfo, err)
return return
} }
} else if err = xml.NewDecoder(r.Body).Decode(list); err != nil { } else if err = h.cfg.NewXMLDecoder(r.Body).Decode(list); err != nil {
h.logAndSendError(w, "could not parse bucket acl", reqInfo, errors.GetAPIError(errors.ErrMalformedXML)) h.logAndSendError(w, "could not parse bucket acl", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
return return
} }

View file

@ -35,7 +35,7 @@ type (
PlacementPolicy(string) (netmap.PlacementPolicy, bool) PlacementPolicy(string) (netmap.PlacementPolicy, bool)
CopiesNumbers(string) ([]uint32, bool) CopiesNumbers(string) ([]uint32, bool)
DefaultCopiesNumbers() []uint32 DefaultCopiesNumbers() []uint32
NewCompleteMultipartDecoder(io.Reader) *xml.Decoder NewXMLDecoder(io.Reader) *xml.Decoder
DefaultMaxAge() int DefaultMaxAge() int
NotificatorEnabled() bool NotificatorEnabled() bool
ResolveZoneList() []string ResolveZoneList() []string

View file

@ -50,8 +50,9 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
} }
p := &layer.PutCORSParams{ p := &layer.PutCORSParams{
BktInfo: bktInfo, BktInfo: bktInfo,
Reader: r.Body, Reader: r.Body,
NewDecoder: h.cfg.NewXMLDecoder,
} }
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint) p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), bktInfo.LocationConstraint)

View file

@ -24,8 +24,9 @@ const maxObjectsToDelete = 1000
// DeleteObjectsRequest -- xml carrying the object key names which should be deleted. // DeleteObjectsRequest -- xml carrying the object key names which should be deleted.
type DeleteObjectsRequest struct { type DeleteObjectsRequest struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Delete" json:"-"`
// Element to enable quiet mode for the request // Element to enable quiet mode for the request
Quiet bool Quiet bool `xml:"Quiet,omitempty"`
// List of objects to be deleted // List of objects to be deleted
Objects []ObjectIdentifier `xml:"Object"` Objects []ObjectIdentifier `xml:"Object"`
} }
@ -177,7 +178,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
// Unmarshal list of keys to be deleted. // Unmarshal list of keys to be deleted.
requested := &DeleteObjectsRequest{} requested := &DeleteObjectsRequest{}
if err := xml.NewDecoder(r.Body).Decode(requested); err != nil { if err := h.cfg.NewXMLDecoder(r.Body).Decode(requested); err != nil {
h.logAndSendError(w, "couldn't decode body", reqInfo, errors.GetAPIError(errors.ErrMalformedXML)) h.logAndSendError(w, "couldn't decode body", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
return return
} }

View file

@ -82,7 +82,7 @@ func (c *configMock) DefaultCopiesNumbers() []uint32 {
return c.defaultCopiesNumbers return c.defaultCopiesNumbers
} }
func (c *configMock) NewCompleteMultipartDecoder(r io.Reader) *xml.Decoder { func (c *configMock) NewXMLDecoder(r io.Reader) *xml.Decoder {
return xml.NewDecoder(r) return xml.NewDecoder(r)
} }

View file

@ -2,7 +2,6 @@ package handler
import ( import (
"context" "context"
"encoding/xml"
"fmt" "fmt"
"net/http" "net/http"
"strconv" "strconv"
@ -42,7 +41,7 @@ func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
} }
lockingConf := &data.ObjectLockConfiguration{} lockingConf := &data.ObjectLockConfiguration{}
if err = xml.NewDecoder(r.Body).Decode(lockingConf); err != nil { if err = h.cfg.NewXMLDecoder(r.Body).Decode(lockingConf); err != nil {
h.logAndSendError(w, "couldn't parse locking configuration", reqInfo, err) h.logAndSendError(w, "couldn't parse locking configuration", reqInfo, err)
return return
} }
@ -122,7 +121,7 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
} }
legalHold := &data.LegalHold{} legalHold := &data.LegalHold{}
if err = xml.NewDecoder(r.Body).Decode(legalHold); err != nil { if err = h.cfg.NewXMLDecoder(r.Body).Decode(legalHold); err != nil {
h.logAndSendError(w, "couldn't parse legal hold configuration", reqInfo, err) h.logAndSendError(w, "couldn't parse legal hold configuration", reqInfo, err)
return return
} }
@ -210,7 +209,7 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
} }
retention := &data.Retention{} retention := &data.Retention{}
if err = xml.NewDecoder(r.Body).Decode(retention); err != nil { if err = h.cfg.NewXMLDecoder(r.Body).Decode(retention); err != nil {
h.logAndSendError(w, "couldn't parse object retention", reqInfo, err) h.logAndSendError(w, "couldn't parse object retention", reqInfo, err)
return return
} }

View file

@ -406,7 +406,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
) )
reqBody := new(CompleteMultipartUpload) reqBody := new(CompleteMultipartUpload)
if err = h.cfg.NewCompleteMultipartDecoder(r.Body).Decode(reqBody); err != nil { if err = h.cfg.NewXMLDecoder(r.Body).Decode(reqBody); err != nil {
h.logAndSendError(w, "could not read complete multipart upload xml", reqInfo, h.logAndSendError(w, "could not read complete multipart upload xml", reqInfo,
errors.GetAPIError(errors.ErrMalformedXML), additional...) errors.GetAPIError(errors.ErrMalformedXML), additional...)
return return

View file

@ -2,7 +2,6 @@ package handler
import ( import (
"context" "context"
"encoding/xml"
"fmt" "fmt"
"net/http" "net/http"
"strings" "strings"
@ -26,11 +25,6 @@ type (
User string User string
Time time.Time Time time.Time
} }
NotificationConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ NotificationConfiguation"`
NotificationConfiguration data.NotificationConfiguration
}
) )
const ( const (
@ -105,7 +99,7 @@ func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Re
} }
conf := &data.NotificationConfiguration{} conf := &data.NotificationConfiguration{}
if err = xml.NewDecoder(r.Body).Decode(conf); err != nil { if err = h.cfg.NewXMLDecoder(r.Body).Decode(conf); err != nil {
h.logAndSendError(w, "couldn't decode notification configuration", reqInfo, errors.GetAPIError(errors.ErrMalformedXML)) h.logAndSendError(w, "couldn't decode notification configuration", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
return return
} }

View file

@ -433,7 +433,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
if tagging := auth.MultipartFormValue(r, "tagging"); tagging != "" { if tagging := auth.MultipartFormValue(r, "tagging"); tagging != "" {
buffer := bytes.NewBufferString(tagging) buffer := bytes.NewBufferString(tagging)
tagSet, err = readTagSet(buffer) tagSet, err = h.readTagSet(buffer)
if err != nil { if err != nil {
h.logAndSendError(w, "could not read tag set", reqInfo, err) h.logAndSendError(w, "could not read tag set", reqInfo, err)
return return
@ -742,7 +742,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
createParams, err := parseLocationConstraint(r) createParams, err := h.parseLocationConstraint(r)
if err != nil { if err != nil {
h.logAndSendError(w, "could not parse body", reqInfo, err) h.logAndSendError(w, "could not parse body", reqInfo, err)
return return
@ -859,13 +859,13 @@ func isAlphaNum(char int32) bool {
return 'a' <= char && char <= 'z' || '0' <= char && char <= '9' return 'a' <= char && char <= 'z' || '0' <= char && char <= '9'
} }
func parseLocationConstraint(r *http.Request) (*createBucketParams, error) { func (h *handler) parseLocationConstraint(r *http.Request) (*createBucketParams, error) {
if r.ContentLength == 0 { if r.ContentLength == 0 {
return new(createBucketParams), nil return new(createBucketParams), nil
} }
params := new(createBucketParams) params := new(createBucketParams)
if err := xml.NewDecoder(r.Body).Decode(params); err != nil { if err := h.cfg.NewXMLDecoder(r.Body).Decode(params); err != nil {
return nil, errors.GetAPIError(errors.ErrMalformedXML) return nil, errors.GetAPIError(errors.ErrMalformedXML)
} }
return params, nil return params, nil

View file

@ -1,7 +1,6 @@
package handler package handler
import ( import (
"encoding/xml"
"io" "io"
"net/http" "net/http"
"sort" "sort"
@ -29,7 +28,7 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
ctx := r.Context() ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx) reqInfo := middleware.GetReqInfo(ctx)
tagSet, err := readTagSet(r.Body) tagSet, err := h.readTagSet(r.Body)
if err != nil { if err != nil {
h.logAndSendError(w, "could not read tag set", reqInfo, err) h.logAndSendError(w, "could not read tag set", reqInfo, err)
return return
@ -153,7 +152,7 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
func (h *handler) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := middleware.GetReqInfo(r.Context()) reqInfo := middleware.GetReqInfo(r.Context())
tagSet, err := readTagSet(r.Body) tagSet, err := h.readTagSet(r.Body)
if err != nil { if err != nil {
h.logAndSendError(w, "could not read tag set", reqInfo, err) h.logAndSendError(w, "could not read tag set", reqInfo, err)
return return
@ -208,9 +207,9 @@ func (h *handler) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Requ
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)
} }
func readTagSet(reader io.Reader) (map[string]string, error) { func (h *handler) readTagSet(reader io.Reader) (map[string]string, error) {
tagging := new(Tagging) tagging := new(Tagging)
if err := xml.NewDecoder(reader).Decode(tagging); err != nil { if err := h.cfg.NewXMLDecoder(reader).Decode(tagging); err != nil {
return nil, errors.GetAPIError(errors.ErrMalformedXML) return nil, errors.GetAPIError(errors.ErrMalformedXML)
} }

View file

@ -1,7 +1,6 @@
package handler package handler
import ( import (
"encoding/xml"
"net/http" "net/http"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
@ -14,7 +13,7 @@ func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
reqInfo := middleware.GetReqInfo(r.Context()) reqInfo := middleware.GetReqInfo(r.Context())
configuration := new(VersioningConfiguration) configuration := new(VersioningConfiguration)
if err := xml.NewDecoder(r.Body).Decode(configuration); err != nil { if err := h.cfg.NewXMLDecoder(r.Body).Decode(configuration); err != nil {
h.logAndSendError(w, "couldn't decode versioning configuration", reqInfo, errors.GetAPIError(errors.ErrIllegalVersioningConfigurationException)) h.logAndSendError(w, "couldn't decode versioning configuration", reqInfo, errors.GetAPIError(errors.ErrIllegalVersioningConfigurationException))
return return
} }

View file

@ -3,7 +3,6 @@ package layer
import ( import (
"bytes" "bytes"
"context" "context"
"encoding/xml"
errorsStd "errors" errorsStd "errors"
"fmt" "fmt"
"io" "io"
@ -25,7 +24,7 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
cors = &data.CORSConfiguration{} cors = &data.CORSConfiguration{}
) )
if err := xml.NewDecoder(tee).Decode(cors); err != nil { if err := p.NewDecoder(tee).Decode(cors); err != nil {
return fmt.Errorf("xml decode cors: %w", err) return fmt.Errorf("xml decode cors: %w", err)
} }

View file

@ -4,6 +4,7 @@ import (
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/rand" "crypto/rand"
"encoding/xml"
"fmt" "fmt"
"io" "io"
"net/url" "net/url"
@ -146,6 +147,7 @@ type (
BktInfo *data.BucketInfo BktInfo *data.BucketInfo
Reader io.Reader Reader io.Reader
CopiesNumbers []uint32 CopiesNumbers []uint32
NewDecoder func(io.Reader) *xml.Decoder
} }
// CopyObjectParams stores object copy request parameters. // CopyObjectParams stores object copy request parameters.

View file

@ -77,15 +77,15 @@ type (
isResolveListAllow bool // True if ResolveZoneList contains allowed zones isResolveListAllow bool // True if ResolveZoneList contains allowed zones
completeMultipartKeepalive time.Duration completeMultipartKeepalive time.Duration
mu sync.RWMutex mu sync.RWMutex
defaultPolicy netmap.PlacementPolicy defaultPolicy netmap.PlacementPolicy
regionMap map[string]netmap.PlacementPolicy regionMap map[string]netmap.PlacementPolicy
copiesNumbers map[string][]uint32 copiesNumbers map[string][]uint32
defaultCopiesNumbers []uint32 defaultCopiesNumbers []uint32
defaultXMLNSForCompleteMultipart bool defaultXMLNS bool
bypassContentEncodingInChunks bool bypassContentEncodingInChunks bool
clientCut bool clientCut bool
maxBufferSizeForPut uint64 maxBufferSizeForPut uint64
} }
maxClientsConfig struct { maxClientsConfig struct {
@ -173,12 +173,12 @@ func (a *App) initLayer(ctx context.Context) {
func newAppSettings(log *Logger, v *viper.Viper) *appSettings { func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
settings := &appSettings{ settings := &appSettings{
logLevel: log.lvl, logLevel: log.lvl,
maxClient: newMaxClients(v), maxClient: newMaxClients(v),
defaultXMLNSForCompleteMultipart: v.GetBool(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload), defaultXMLNS: v.GetBool(cfgKludgeUseDefaultXMLNS),
defaultMaxAge: fetchDefaultMaxAge(v, log.logger), defaultMaxAge: fetchDefaultMaxAge(v, log.logger),
notificatorEnabled: v.GetBool(cfgEnableNATS), notificatorEnabled: v.GetBool(cfgEnableNATS),
completeMultipartKeepalive: v.GetDuration(cfgKludgeCompleteMultipartUploadKeepalive), completeMultipartKeepalive: v.GetDuration(cfgKludgeCompleteMultipartUploadKeepalive),
} }
settings.resolveZoneList = v.GetStringSlice(cfgResolveBucketAllow) settings.resolveZoneList = v.GetStringSlice(cfgResolveBucketAllow)
@ -274,11 +274,11 @@ func (s *appSettings) DefaultCopiesNumbers() []uint32 {
return s.defaultCopiesNumbers return s.defaultCopiesNumbers
} }
func (s *appSettings) NewCompleteMultipartDecoder(r io.Reader) *xml.Decoder { func (s *appSettings) NewXMLDecoder(r io.Reader) *xml.Decoder {
dec := xml.NewDecoder(r) dec := xml.NewDecoder(r)
s.mu.RLock() s.mu.RLock()
if s.defaultXMLNSForCompleteMultipart { if s.defaultXMLNS {
dec.DefaultSpace = awsDefaultNamespace dec.DefaultSpace = awsDefaultNamespace
} }
s.mu.RUnlock() s.mu.RUnlock()
@ -286,9 +286,9 @@ func (s *appSettings) NewCompleteMultipartDecoder(r io.Reader) *xml.Decoder {
return dec return dec
} }
func (s *appSettings) useDefaultNamespaceForCompleteMultipart(useDefaultNamespace bool) { func (s *appSettings) useDefaultXMLNamespace(useDefaultNamespace bool) {
s.mu.Lock() s.mu.Lock()
s.defaultXMLNSForCompleteMultipart = useDefaultNamespace s.defaultXMLNS = useDefaultNamespace
s.mu.Unlock() s.mu.Unlock()
} }
@ -600,7 +600,7 @@ func (a *App) updateSettings() {
a.settings.initPlacementPolicy(a.log, a.cfg) a.settings.initPlacementPolicy(a.log, a.cfg)
a.settings.useDefaultNamespaceForCompleteMultipart(a.cfg.GetBool(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload)) a.settings.useDefaultXMLNamespace(a.cfg.GetBool(cfgKludgeUseDefaultXMLNS))
a.settings.setBypassContentEncodingInChunks(a.cfg.GetBool(cfgKludgeBypassContentEncodingCheckInChunks)) a.settings.setBypassContentEncodingInChunks(a.cfg.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut)) a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
a.settings.setBufferMaxSizeForPut(a.cfg.GetUint64(cfgBufferMaxSizeForPut)) a.settings.setBufferMaxSizeForPut(a.cfg.GetUint64(cfgBufferMaxSizeForPut))

View file

@ -130,9 +130,9 @@ const ( // Settings.
cfgApplicationBuildTime = "app.build_time" cfgApplicationBuildTime = "app.build_time"
// Kludge. // Kludge.
cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload = "kludge.use_default_xmlns_for_complete_multipart" cfgKludgeUseDefaultXMLNS = "kludge.use_default_xmlns"
cfgKludgeCompleteMultipartUploadKeepalive = "kludge.complete_multipart_keepalive" cfgKludgeCompleteMultipartUploadKeepalive = "kludge.complete_multipart_keepalive"
cfgKludgeBypassContentEncodingCheckInChunks = "kludge.bypass_content_encoding_check_in_chunks" cfgKludgeBypassContentEncodingCheckInChunks = "kludge.bypass_content_encoding_check_in_chunks"
// Command line args. // Command line args.
cmdHelp = "help" cmdHelp = "help"
@ -534,7 +534,7 @@ func newSettings() *viper.Viper {
v.SetDefault(cfgBufferMaxSizeForPut, 1024*1024) // 1mb v.SetDefault(cfgBufferMaxSizeForPut, 1024*1024) // 1mb
// kludge // kludge
v.SetDefault(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload, false) v.SetDefault(cfgKludgeUseDefaultXMLNS, false)
v.SetDefault(cfgKludgeCompleteMultipartUploadKeepalive, 10*time.Second) v.SetDefault(cfgKludgeCompleteMultipartUploadKeepalive, 10*time.Second)
v.SetDefault(cfgKludgeBypassContentEncodingCheckInChunks, false) v.SetDefault(cfgKludgeBypassContentEncodingCheckInChunks, false)

View file

@ -41,42 +41,42 @@ func TestDefaultNamespace(t *testing.T) {
}{ }{
{ {
settings: &appSettings{ settings: &appSettings{
defaultXMLNSForCompleteMultipart: false, defaultXMLNS: false,
}, },
input: xmlBodyWithNamespace, input: xmlBodyWithNamespace,
err: false, err: false,
}, },
{ {
settings: &appSettings{ settings: &appSettings{
defaultXMLNSForCompleteMultipart: false, defaultXMLNS: false,
}, },
input: xmlBody, input: xmlBody,
err: true, err: true,
}, },
{ {
settings: &appSettings{ settings: &appSettings{
defaultXMLNSForCompleteMultipart: false, defaultXMLNS: false,
}, },
input: xmlBodyWithInvalidNamespace, input: xmlBodyWithInvalidNamespace,
err: true, err: true,
}, },
{ {
settings: &appSettings{ settings: &appSettings{
defaultXMLNSForCompleteMultipart: true, defaultXMLNS: true,
}, },
input: xmlBodyWithNamespace, input: xmlBodyWithNamespace,
err: false, err: false,
}, },
{ {
settings: &appSettings{ settings: &appSettings{
defaultXMLNSForCompleteMultipart: true, defaultXMLNS: true,
}, },
input: xmlBody, input: xmlBody,
err: false, err: false,
}, },
{ {
settings: &appSettings{ settings: &appSettings{
defaultXMLNSForCompleteMultipart: true, defaultXMLNS: true,
}, },
input: xmlBodyWithInvalidNamespace, input: xmlBodyWithInvalidNamespace,
err: true, err: true,
@ -84,7 +84,7 @@ func TestDefaultNamespace(t *testing.T) {
} { } {
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
model := new(handler.CompleteMultipartUpload) model := new(handler.CompleteMultipartUpload)
err := tc.settings.NewCompleteMultipartDecoder(bytes.NewBufferString(tc.input)).Decode(model) err := tc.settings.NewXMLDecoder(bytes.NewBufferString(tc.input)).Decode(model)
if tc.err { if tc.err {
require.Error(t, err) require.Error(t, err)
} else { } else {

View file

@ -138,8 +138,8 @@ S3_GW_ALLOWED_ACCESS_KEY_ID_PREFIXES=Ck9BHsgKcnwfCTUSFm6pxhoNS4cBqgN2NQ8zVgPjqZD
S3_GW_RESOLVE_BUCKET_ALLOW=container S3_GW_RESOLVE_BUCKET_ALLOW=container
# S3_GW_RESOLVE_BUCKET_DENY= # S3_GW_RESOLVE_BUCKET_DENY=
# Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse`CompleteMultipartUpload` xml body. # Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies.
S3_GW_KLUDGE_USE_DEFAULT_XMLNS_FOR_COMPLETE_MULTIPART=false S3_GW_KLUDGE_USE_DEFAULT_XMLNS=false
# Set timeout between whitespace transmissions during CompleteMultipartUpload processing. # Set timeout between whitespace transmissions during CompleteMultipartUpload processing.
S3_GW_KLUDGE_COMPLETE_MULTIPART_KEEPALIVE=10s S3_GW_KLUDGE_COMPLETE_MULTIPART_KEEPALIVE=10s
# Use this flag to be able to use chunked upload approach without having `aws-chunked` value in `Content-Encoding` header. # Use this flag to be able to use chunked upload approach without having `aws-chunked` value in `Content-Encoding` header.

View file

@ -167,8 +167,8 @@ resolve_bucket:
deny: deny:
kludge: kludge:
# Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse`CompleteMultipartUpload` xml body. # Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies.
use_default_xmlns_for_complete_multipart: false use_default_xmlns: false
# Set timeout between whitespace transmissions during CompleteMultipartUpload processing. # Set timeout between whitespace transmissions during CompleteMultipartUpload processing.
complete_multipart_keepalive: 10s complete_multipart_keepalive: 10s
# Use this flag to be able to use chunked upload approach without having `aws-chunked` value in `Content-Encoding` header. # Use this flag to be able to use chunked upload approach without having `aws-chunked` value in `Content-Encoding` header.

View file

@ -540,16 +540,16 @@ Workarounds for non-standard use cases.
```yaml ```yaml
kludge: kludge:
use_default_xmlns_for_complete_multipart: false use_default_xmlns: false
complete_multipart_keepalive: 10s complete_multipart_keepalive: 10s
bypass_content_encoding_check_in_chunks: false bypass_content_encoding_check_in_chunks: false
``` ```
| Parameter | Type | SIGHUP reload | Default value | Description | | Parameter | Type | SIGHUP reload | Default value | Description |
|--------------------------------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |-------------------------------------------|------------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------|
| `use_default_xmlns_for_complete_multipart` | `bool` | yes | false | Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse `CompleteMultipartUpload` xml body. | | `use_default_xmlns` | `bool` | yes | false | Enable using default xml namespace `http://s3.amazonaws.com/doc/2006-03-01/` when parse xml bodies. |
| `complete_multipart_keepalive` | `duration` | no | 10s | Set timeout between whitespace transmissions during CompleteMultipartUpload processing. | | `complete_multipart_keepalive` | `duration` | no | 10s | Set timeout between whitespace transmissions during CompleteMultipartUpload processing. |
| `bypass_content_encoding_check_in_chunks` | `bool` | yes | false | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. | | `bypass_content_encoding_check_in_chunks` | `bool` | yes | false | Use this flag to be able to use [chunked upload approach](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html) without having `aws-chunked` value in `Content-Encoding` header. |
# `runtime` section # `runtime` section
Contains runtime parameters. Contains runtime parameters.