Denis Kirillov
80c7b73eb9
Some checks failed
/ DCO (pull_request) Successful in 2m50s
/ Vulncheck (pull_request) Failing after 3m15s
/ Builds (1.20) (pull_request) Successful in 3m39s
/ Builds (1.21) (pull_request) Successful in 3m41s
/ Lint (pull_request) Successful in 5m48s
/ Tests (1.20) (pull_request) Successful in 4m0s
/ Tests (1.21) (pull_request) Successful in 3m53s
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
1212 lines
34 KiB
Go
1212 lines
34 KiB
Go
package handler
|
|
|
|
import (
|
|
"bytes"
|
|
"crypto/md5"
|
|
"encoding/base64"
|
|
"encoding/hex"
|
|
"encoding/json"
|
|
"encoding/xml"
|
|
stderrors "errors"
|
|
"fmt"
|
|
"io"
|
|
"net"
|
|
"net/http"
|
|
"net/url"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
|
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
|
"git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
|
"git.frostfs.info/TrueCloudLab/policy-engine/schema/s3"
|
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
type postPolicy struct {
|
|
Expiration time.Time `json:"expiration"`
|
|
Conditions []*policyCondition `json:"conditions"`
|
|
empty bool
|
|
}
|
|
|
|
func (p *postPolicy) condition(key string) *policyCondition {
|
|
for _, condition := range p.Conditions {
|
|
if condition.Key == key {
|
|
return condition
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (p *postPolicy) CheckContentLength(size uint64) bool {
|
|
if p.empty {
|
|
return true
|
|
}
|
|
for _, condition := range p.Conditions {
|
|
if condition.Matching == "content-length-range" {
|
|
length := strconv.FormatUint(size, 10)
|
|
return condition.Key <= length && length <= condition.Value
|
|
}
|
|
}
|
|
return true
|
|
}
|
|
|
|
func (p *policyCondition) match(value string) bool {
|
|
switch p.Matching {
|
|
case "eq":
|
|
p.Matched = p.Value == value
|
|
case "starts-with":
|
|
if p.Key == api.ContentType {
|
|
p.Matched = true
|
|
for _, contentType := range strings.Split(value, ",") {
|
|
if !strings.HasPrefix(contentType, p.Value) {
|
|
p.Matched = false
|
|
}
|
|
}
|
|
} else {
|
|
p.Matched = strings.HasPrefix(value, p.Value)
|
|
}
|
|
}
|
|
return p.Matched
|
|
}
|
|
|
|
func (p *postPolicy) CheckField(key string, value string) error {
|
|
if p.empty {
|
|
return nil
|
|
}
|
|
cond := p.condition(key)
|
|
if cond == nil {
|
|
return errors.GetAPIError(errors.ErrPostPolicyConditionInvalidFormat)
|
|
}
|
|
|
|
if !cond.match(value) {
|
|
return errors.GetAPIError(errors.ErrPostPolicyConditionInvalidFormat)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (p *postPolicy) AllConditionMatched() bool {
|
|
for _, condition := range p.Conditions {
|
|
if !condition.Matched {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
}
|
|
|
|
type policyCondition struct {
|
|
Matching string
|
|
Key string
|
|
Value string
|
|
Matched bool
|
|
}
|
|
|
|
var errInvalidCondition = fmt.Errorf("invalid condition")
|
|
|
|
func (p *policyCondition) UnmarshalJSON(data []byte) error {
|
|
var (
|
|
ok bool
|
|
v interface{}
|
|
)
|
|
|
|
if err := json.Unmarshal(data, &v); err != nil {
|
|
return fmt.Errorf("unmarshal policy condition: %w", err)
|
|
}
|
|
|
|
switch v := v.(type) {
|
|
case []interface{}:
|
|
if len(v) != 3 {
|
|
return errInvalidCondition
|
|
}
|
|
if p.Matching, ok = v[0].(string); !ok {
|
|
return errInvalidCondition
|
|
}
|
|
|
|
if p.Matching == "content-length-range" {
|
|
min, ok := v[1].(float64)
|
|
max, ok2 := v[2].(float64)
|
|
if !ok || !ok2 {
|
|
return errInvalidCondition
|
|
}
|
|
p.Key = strconv.FormatFloat(min, 'f', 0, 32)
|
|
p.Value = strconv.FormatFloat(max, 'f', 0, 32)
|
|
} else {
|
|
key, ok2 := v[1].(string)
|
|
p.Value, ok = v[2].(string)
|
|
if !ok || !ok2 {
|
|
return errInvalidCondition
|
|
}
|
|
p.Key = strings.ToLower(strings.TrimPrefix(key, "$"))
|
|
}
|
|
|
|
case map[string]interface{}:
|
|
p.Matching = "eq"
|
|
for key, val := range v {
|
|
p.Key = strings.ToLower(key)
|
|
if p.Value, ok = val.(string); !ok {
|
|
return errInvalidCondition
|
|
}
|
|
}
|
|
default:
|
|
return fmt.Errorf("unknown condition type")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// keywords of predefined basic ACL values.
|
|
const (
|
|
basicACLPrivate = "private"
|
|
basicACLReadOnly = "public-read"
|
|
basicACLPublic = "public-read-write"
|
|
cannedACLAuthRead = "authenticated-read"
|
|
)
|
|
|
|
type createBucketParams struct {
|
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
|
|
LocationConstraint string
|
|
}
|
|
|
|
func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|
var (
|
|
err error
|
|
newEaclTable *eacl.Table
|
|
sessionTokenEACL *session.Container
|
|
cannedACLStatus = aclHeadersStatus(r)
|
|
ctx = r.Context()
|
|
reqInfo = middleware.GetReqInfo(ctx)
|
|
)
|
|
|
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could not get bucket objInfo", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
apeEnabled := bktInfo.APEEnabled || settings.CannedACL != ""
|
|
if apeEnabled && cannedACLStatus == aclStatusYes {
|
|
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
|
return
|
|
}
|
|
|
|
needUpdateEACLTable := !(apeEnabled || cannedACLStatus == aclStatusNo)
|
|
if needUpdateEACLTable {
|
|
if sessionTokenEACL, err = getSessionTokenSetEACL(r.Context()); err != nil {
|
|
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
tagSet, err := parseTaggingHeader(r.Header)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could not parse tagging header", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
metadata := parseMetadata(r)
|
|
if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
|
|
metadata[api.ContentType] = contentType
|
|
}
|
|
if cacheControl := r.Header.Get(api.CacheControl); len(cacheControl) > 0 {
|
|
metadata[api.CacheControl] = cacheControl
|
|
}
|
|
if expires := r.Header.Get(api.Expires); len(expires) > 0 {
|
|
metadata[api.Expires] = expires
|
|
}
|
|
if contentLanguage := r.Header.Get(api.ContentLanguage); len(contentLanguage) > 0 {
|
|
metadata[api.ContentLanguage] = contentLanguage
|
|
}
|
|
|
|
encryptionParams, err := formEncryptionParams(r)
|
|
if err != nil {
|
|
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
body, err := h.getBodyReader(r)
|
|
if err != nil {
|
|
h.logAndSendError(w, "failed to get body reader", reqInfo, err)
|
|
return
|
|
}
|
|
if encodings := r.Header.Get(api.ContentEncoding); len(encodings) > 0 {
|
|
metadata[api.ContentEncoding] = encodings
|
|
}
|
|
|
|
var size uint64
|
|
if r.ContentLength > 0 {
|
|
size = uint64(r.ContentLength)
|
|
}
|
|
|
|
params := &layer.PutObjectParams{
|
|
BktInfo: bktInfo,
|
|
Object: reqInfo.ObjectName,
|
|
Reader: body,
|
|
Size: size,
|
|
Header: metadata,
|
|
Encryption: encryptionParams,
|
|
ContentMD5: r.Header.Get(api.ContentMD5),
|
|
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
|
|
}
|
|
|
|
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, bktInfo.LocationConstraint)
|
|
if err != nil {
|
|
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
params.Lock, err = formObjectLock(ctx, bktInfo, settings.LockConfiguration, r.Header)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could not form object lock", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
extendedObjInfo, err := h.obj.PutObject(ctx, params)
|
|
if err != nil {
|
|
_, err2 := io.Copy(io.Discard, body)
|
|
err3 := body.Close()
|
|
h.logAndSendError(w, "could not upload object", reqInfo, err, zap.Errors("body close errors", []error{err2, err3}))
|
|
return
|
|
}
|
|
objInfo := extendedObjInfo.ObjectInfo
|
|
|
|
s := &SendNotificationParams{
|
|
Event: EventObjectCreatedPut,
|
|
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.MD5Enabled()),
|
|
BktInfo: bktInfo,
|
|
ReqInfo: reqInfo,
|
|
}
|
|
if err = h.sendNotifications(ctx, s); err != nil {
|
|
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
|
}
|
|
|
|
if needUpdateEACLTable {
|
|
if newEaclTable, err = h.getNewEAclTable(r, bktInfo, objInfo); err != nil {
|
|
h.logAndSendError(w, "could not get new eacl table", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
if tagSet != nil {
|
|
tagPrm := &layer.PutObjectTaggingParams{
|
|
ObjectVersion: &layer.ObjectVersion{
|
|
BktInfo: bktInfo,
|
|
ObjectName: objInfo.Name,
|
|
VersionID: objInfo.VersionID(),
|
|
},
|
|
TagSet: tagSet,
|
|
NodeVersion: extendedObjInfo.NodeVersion,
|
|
}
|
|
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
|
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
if newEaclTable != nil {
|
|
p := &layer.PutBucketACLParams{
|
|
BktInfo: bktInfo,
|
|
EACL: newEaclTable,
|
|
SessionToken: sessionTokenEACL,
|
|
}
|
|
|
|
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
|
|
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
if settings.VersioningEnabled() {
|
|
w.Header().Set(api.AmzVersionID, objInfo.VersionID())
|
|
}
|
|
if encryptionParams.Enabled() {
|
|
addSSECHeaders(w.Header(), r.Header)
|
|
}
|
|
|
|
w.Header().Set(api.ETag, data.Quote(objInfo.ETag(h.cfg.MD5Enabled())))
|
|
|
|
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
|
h.logAndSendError(w, "write response", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
|
|
if !api.IsSignedStreamingV4(r) {
|
|
return r.Body, nil
|
|
}
|
|
|
|
encodings := r.Header.Values(api.ContentEncoding)
|
|
var chunkedEncoding bool
|
|
resultContentEncoding := make([]string, 0, len(encodings))
|
|
for _, enc := range encodings {
|
|
for _, e := range strings.Split(enc, ",") {
|
|
e = strings.TrimSpace(e)
|
|
if e == api.AwsChunked { // probably we should also check position of this header value
|
|
chunkedEncoding = true
|
|
} else {
|
|
resultContentEncoding = append(resultContentEncoding, e)
|
|
}
|
|
}
|
|
}
|
|
r.Header.Set(api.ContentEncoding, strings.Join(resultContentEncoding, ","))
|
|
|
|
if !chunkedEncoding && !h.cfg.BypassContentEncodingInChunks() {
|
|
return nil, fmt.Errorf("%w: request is not chunk encoded, encodings '%s'",
|
|
errors.GetAPIError(errors.ErrInvalidEncodingMethod), strings.Join(encodings, ","))
|
|
}
|
|
|
|
decodeContentSize := r.Header.Get(api.AmzDecodedContentLength)
|
|
if len(decodeContentSize) == 0 {
|
|
return nil, errors.GetAPIError(errors.ErrMissingContentLength)
|
|
}
|
|
|
|
if _, err := strconv.Atoi(decodeContentSize); err != nil {
|
|
return nil, fmt.Errorf("%w: parse decoded content length: %s", errors.GetAPIError(errors.ErrMissingContentLength), err.Error())
|
|
}
|
|
|
|
chunkReader, err := newSignV4ChunkedReader(r)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("initialize chunk reader: %w", err)
|
|
}
|
|
|
|
return chunkReader, nil
|
|
}
|
|
|
|
func formEncryptionParams(r *http.Request) (enc encryption.Params, err error) {
|
|
return formEncryptionParamsBase(r, false)
|
|
}
|
|
|
|
func formCopySourceEncryptionParams(r *http.Request) (enc encryption.Params, err error) {
|
|
return formEncryptionParamsBase(r, true)
|
|
}
|
|
|
|
func formEncryptionParamsBase(r *http.Request, isCopySource bool) (enc encryption.Params, err error) {
|
|
var sseCustomerAlgorithm, sseCustomerKey, sseCustomerKeyMD5 string
|
|
if isCopySource {
|
|
sseCustomerAlgorithm = r.Header.Get(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm)
|
|
sseCustomerKey = r.Header.Get(api.AmzCopySourceServerSideEncryptionCustomerKey)
|
|
sseCustomerKeyMD5 = r.Header.Get(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5)
|
|
} else {
|
|
sseCustomerAlgorithm = r.Header.Get(api.AmzServerSideEncryptionCustomerAlgorithm)
|
|
sseCustomerKey = r.Header.Get(api.AmzServerSideEncryptionCustomerKey)
|
|
sseCustomerKeyMD5 = r.Header.Get(api.AmzServerSideEncryptionCustomerKeyMD5)
|
|
}
|
|
|
|
if len(sseCustomerAlgorithm) == 0 && len(sseCustomerKey) == 0 && len(sseCustomerKeyMD5) == 0 {
|
|
return
|
|
}
|
|
|
|
if r.TLS == nil {
|
|
return enc, errors.GetAPIError(errors.ErrInsecureSSECustomerRequest)
|
|
}
|
|
|
|
if len(sseCustomerKey) > 0 && len(sseCustomerAlgorithm) == 0 {
|
|
return enc, errors.GetAPIError(errors.ErrMissingSSECustomerAlgorithm)
|
|
}
|
|
if len(sseCustomerAlgorithm) > 0 && len(sseCustomerKey) == 0 {
|
|
return enc, errors.GetAPIError(errors.ErrMissingSSECustomerKey)
|
|
}
|
|
|
|
if sseCustomerAlgorithm != layer.AESEncryptionAlgorithm {
|
|
return enc, errors.GetAPIError(errors.ErrInvalidEncryptionAlgorithm)
|
|
}
|
|
|
|
key, err := base64.StdEncoding.DecodeString(sseCustomerKey)
|
|
if err != nil {
|
|
if isCopySource {
|
|
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters)
|
|
}
|
|
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
|
|
}
|
|
|
|
if len(key) != layer.AESKeySize {
|
|
if isCopySource {
|
|
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters)
|
|
}
|
|
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
|
|
}
|
|
|
|
keyMD5, err := base64.StdEncoding.DecodeString(sseCustomerKeyMD5)
|
|
if err != nil {
|
|
return enc, errors.GetAPIError(errors.ErrSSECustomerKeyMD5Mismatch)
|
|
}
|
|
|
|
md5Sum := md5.Sum(key)
|
|
if !bytes.Equal(md5Sum[:], keyMD5) {
|
|
return enc, errors.GetAPIError(errors.ErrSSECustomerKeyMD5Mismatch)
|
|
}
|
|
|
|
params, err := encryption.NewParams(key)
|
|
if err == nil {
|
|
enc = *params
|
|
}
|
|
|
|
return enc, err
|
|
}
|
|
|
|
func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|
var (
|
|
newEaclTable *eacl.Table
|
|
tagSet map[string]string
|
|
sessionTokenEACL *session.Container
|
|
ctx = r.Context()
|
|
reqInfo = middleware.GetReqInfo(ctx)
|
|
metadata = make(map[string]string)
|
|
cannedACLStatus = aclHeadersStatus(r)
|
|
)
|
|
|
|
policy, err := checkPostPolicy(r, reqInfo, metadata)
|
|
if err != nil {
|
|
h.logAndSendError(w, "failed check policy", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
if tagging := auth.MultipartFormValue(r, "tagging"); tagging != "" {
|
|
buffer := bytes.NewBufferString(tagging)
|
|
tagSet, err = h.readTagSet(buffer)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could not read tag set", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could not get bucket objInfo", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
apeEnabled := bktInfo.APEEnabled || settings.CannedACL != ""
|
|
if apeEnabled && cannedACLStatus == aclStatusYes {
|
|
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
|
return
|
|
}
|
|
|
|
needUpdateEACLTable := !(apeEnabled || cannedACLStatus == aclStatusNo)
|
|
if needUpdateEACLTable {
|
|
if sessionTokenEACL, err = getSessionTokenSetEACL(ctx); err != nil {
|
|
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
var contentReader io.Reader
|
|
var size uint64
|
|
if content, ok := r.MultipartForm.Value["file"]; ok {
|
|
contentReader = bytes.NewBufferString(content[0])
|
|
size = uint64(len(content[0]))
|
|
} else {
|
|
file, head, err := r.FormFile("file")
|
|
if err != nil {
|
|
h.logAndSendError(w, "could get uploading file", reqInfo, err)
|
|
return
|
|
}
|
|
contentReader = file
|
|
size = uint64(head.Size)
|
|
reqInfo.ObjectName = strings.ReplaceAll(reqInfo.ObjectName, "${filename}", head.Filename)
|
|
}
|
|
if !policy.CheckContentLength(size) {
|
|
h.logAndSendError(w, "invalid content-length", reqInfo, errors.GetAPIError(errors.ErrInvalidArgument))
|
|
return
|
|
}
|
|
|
|
params := &layer.PutObjectParams{
|
|
BktInfo: bktInfo,
|
|
Object: reqInfo.ObjectName,
|
|
Reader: contentReader,
|
|
Size: size,
|
|
Header: metadata,
|
|
}
|
|
|
|
extendedObjInfo, err := h.obj.PutObject(ctx, params)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could not upload object", reqInfo, err)
|
|
return
|
|
}
|
|
objInfo := extendedObjInfo.ObjectInfo
|
|
|
|
s := &SendNotificationParams{
|
|
Event: EventObjectCreatedPost,
|
|
NotificationInfo: data.NotificationInfoFromObject(objInfo, h.cfg.MD5Enabled()),
|
|
BktInfo: bktInfo,
|
|
ReqInfo: reqInfo,
|
|
}
|
|
if err = h.sendNotifications(ctx, s); err != nil {
|
|
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
|
}
|
|
|
|
if acl := auth.MultipartFormValue(r, "acl"); acl != "" {
|
|
r.Header.Set(api.AmzACL, acl)
|
|
r.Header.Set(api.AmzGrantFullControl, "")
|
|
r.Header.Set(api.AmzGrantWrite, "")
|
|
r.Header.Set(api.AmzGrantRead, "")
|
|
|
|
if newEaclTable, err = h.getNewEAclTable(r, bktInfo, objInfo); err != nil {
|
|
h.logAndSendError(w, "could not get new eacl table", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
if tagSet != nil {
|
|
tagPrm := &layer.PutObjectTaggingParams{
|
|
ObjectVersion: &layer.ObjectVersion{
|
|
BktInfo: bktInfo,
|
|
ObjectName: objInfo.Name,
|
|
VersionID: objInfo.VersionID(),
|
|
},
|
|
NodeVersion: extendedObjInfo.NodeVersion,
|
|
}
|
|
|
|
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
|
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
if newEaclTable != nil {
|
|
p := &layer.PutBucketACLParams{
|
|
BktInfo: bktInfo,
|
|
EACL: newEaclTable,
|
|
SessionToken: sessionTokenEACL,
|
|
}
|
|
|
|
if err = h.obj.PutBucketACL(ctx, p); err != nil {
|
|
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
if settings.VersioningEnabled() {
|
|
w.Header().Set(api.AmzVersionID, objInfo.VersionID())
|
|
}
|
|
|
|
if redirectURL := auth.MultipartFormValue(r, "success_action_redirect"); redirectURL != "" {
|
|
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
|
|
return
|
|
}
|
|
status := http.StatusNoContent
|
|
if statusStr := auth.MultipartFormValue(r, "success_action_status"); statusStr != "" {
|
|
switch statusStr {
|
|
case "200":
|
|
status = http.StatusOK
|
|
case "201":
|
|
status = http.StatusCreated
|
|
resp := &PostResponse{
|
|
Bucket: objInfo.Bucket,
|
|
Key: objInfo.Name,
|
|
ETag: data.Quote(objInfo.ETag(h.cfg.MD5Enabled())),
|
|
}
|
|
w.WriteHeader(status)
|
|
respData, err := middleware.EncodeResponse(resp)
|
|
if err != nil {
|
|
h.logAndSendError(w, "encode response", reqInfo, err)
|
|
}
|
|
if _, err = w.Write(respData); err != nil {
|
|
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
|
}
|
|
return
|
|
}
|
|
}
|
|
|
|
w.Header().Set(api.ETag, data.Quote(objInfo.ETag(h.cfg.MD5Enabled())))
|
|
w.WriteHeader(status)
|
|
}
|
|
|
|
func checkPostPolicy(r *http.Request, reqInfo *middleware.ReqInfo, metadata map[string]string) (*postPolicy, error) {
|
|
policy := &postPolicy{empty: true}
|
|
if policyStr := auth.MultipartFormValue(r, "policy"); policyStr != "" {
|
|
policyData, err := base64.StdEncoding.DecodeString(policyStr)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("could not decode policy: %w", err)
|
|
}
|
|
if err = json.Unmarshal(policyData, policy); err != nil {
|
|
return nil, fmt.Errorf("could not unmarshal policy: %w", err)
|
|
}
|
|
if policy.Expiration.Before(time.Now()) {
|
|
return nil, fmt.Errorf("policy is expired: %w", errors.GetAPIError(errors.ErrInvalidArgument))
|
|
}
|
|
policy.empty = false
|
|
}
|
|
|
|
if r.MultipartForm == nil {
|
|
return nil, stderrors.New("empty multipart form")
|
|
}
|
|
|
|
for key, v := range r.MultipartForm.Value {
|
|
if key == "file" || key == "policy" || key == "x-amz-signature" || strings.HasPrefix(key, "x-ignore-") {
|
|
continue
|
|
}
|
|
|
|
if len(v) != 1 {
|
|
return nil, fmt.Errorf("empty multipart value for key '%s'", key)
|
|
}
|
|
|
|
value := v[0]
|
|
|
|
if err := policy.CheckField(key, value); err != nil {
|
|
return nil, fmt.Errorf("'%s' form field doesn't match the policy: %w", key, err)
|
|
}
|
|
|
|
prefix := strings.ToLower(api.MetadataPrefix)
|
|
if strings.HasPrefix(key, prefix) {
|
|
metadata[strings.TrimPrefix(key, prefix)] = value
|
|
}
|
|
|
|
if key == "content-type" {
|
|
metadata[api.ContentType] = value
|
|
}
|
|
|
|
if key == "key" {
|
|
reqInfo.ObjectName = value
|
|
}
|
|
}
|
|
|
|
for _, cond := range policy.Conditions {
|
|
if cond.Key == "bucket" {
|
|
if !cond.match(reqInfo.BucketName) {
|
|
return nil, errors.GetAPIError(errors.ErrPostPolicyConditionInvalidFormat)
|
|
}
|
|
}
|
|
}
|
|
|
|
return policy, nil
|
|
}
|
|
|
|
type aclStatus int
|
|
|
|
const (
|
|
// aclStatusNo means no acl headers at all.
|
|
aclStatusNo aclStatus = iota
|
|
// aclStatusYesAPECompatible means that only X-Amz-Acl present and equals to private.
|
|
aclStatusYesAPECompatible
|
|
// aclStatusYes means any other acl headers configuration.
|
|
aclStatusYes
|
|
)
|
|
|
|
func aclHeadersStatus(r *http.Request) aclStatus {
|
|
if r.Header.Get(api.AmzGrantRead) != "" ||
|
|
r.Header.Get(api.AmzGrantFullControl) != "" ||
|
|
r.Header.Get(api.AmzGrantWrite) != "" {
|
|
return aclStatusYes
|
|
}
|
|
|
|
cannedACL := r.Header.Get(api.AmzACL)
|
|
if cannedACL != "" {
|
|
if cannedACL == basicACLPrivate {
|
|
return aclStatusYesAPECompatible
|
|
}
|
|
return aclStatusYes
|
|
}
|
|
|
|
return aclStatusNo
|
|
}
|
|
|
|
func (h *handler) getNewEAclTable(r *http.Request, bktInfo *data.BucketInfo, objInfo *data.ObjectInfo) (*eacl.Table, error) {
|
|
var newEaclTable *eacl.Table
|
|
key, err := h.bearerTokenIssuerKey(r.Context())
|
|
if err != nil {
|
|
return nil, fmt.Errorf("get bearer token issuer: %w", err)
|
|
}
|
|
objectACL, err := parseACLHeaders(r.Header, key)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("could not parse object acl: %w", err)
|
|
}
|
|
|
|
resInfo := &resourceInfo{
|
|
Bucket: objInfo.Bucket,
|
|
Object: objInfo.Name,
|
|
Version: objInfo.VersionID(),
|
|
}
|
|
|
|
bktPolicy, err := aclToPolicy(objectACL, resInfo)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("could not translate object acl to bucket policy: %w", err)
|
|
}
|
|
|
|
astChild, err := policyToAst(bktPolicy)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("could not translate policy to ast: %w", err)
|
|
}
|
|
|
|
bacl, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("could not get bucket eacl: %w", err)
|
|
}
|
|
|
|
parentAst := tableToAst(bacl.EACL, objInfo.Bucket)
|
|
strCID := bacl.Info.CID.EncodeToString()
|
|
|
|
for _, resource := range parentAst.Resources {
|
|
if resource.Bucket == strCID {
|
|
resource.Bucket = objInfo.Bucket
|
|
}
|
|
}
|
|
|
|
if resAst, updated := mergeAst(parentAst, astChild); updated {
|
|
if newEaclTable, err = astToTable(resAst); err != nil {
|
|
return nil, fmt.Errorf("could not translate ast to table: %w", err)
|
|
}
|
|
}
|
|
|
|
return newEaclTable, nil
|
|
}
|
|
|
|
func parseTaggingHeader(header http.Header) (map[string]string, error) {
|
|
var tagSet map[string]string
|
|
if tagging := header.Get(api.AmzTagging); len(tagging) > 0 {
|
|
queries, err := url.ParseQuery(tagging)
|
|
if err != nil {
|
|
return nil, errors.GetAPIError(errors.ErrInvalidArgument)
|
|
}
|
|
if len(queries) > maxTags {
|
|
return nil, errors.GetAPIError(errors.ErrInvalidTagsSizeExceed)
|
|
}
|
|
tagSet = make(map[string]string, len(queries))
|
|
for k, v := range queries {
|
|
tag := Tag{Key: k, Value: v[0]}
|
|
if err = checkTag(tag); err != nil {
|
|
return nil, err
|
|
}
|
|
tagSet[tag.Key] = tag.Value
|
|
}
|
|
}
|
|
return tagSet, nil
|
|
}
|
|
|
|
func parseMetadata(r *http.Request) map[string]string {
|
|
res := make(map[string]string)
|
|
for k, v := range r.Header {
|
|
if strings.HasPrefix(k, api.MetadataPrefix) {
|
|
key := strings.ToLower(strings.TrimPrefix(k, api.MetadataPrefix))
|
|
res[key] = v[0]
|
|
}
|
|
}
|
|
return res
|
|
}
|
|
|
|
func parseCannedACL(header http.Header) (string, error) {
|
|
acl := header.Get(api.AmzACL)
|
|
if len(acl) == 0 {
|
|
return basicACLPrivate, nil
|
|
}
|
|
|
|
if acl == basicACLPrivate || acl == basicACLPublic ||
|
|
acl == cannedACLAuthRead || acl == basicACLReadOnly {
|
|
return acl, nil
|
|
}
|
|
|
|
return "", fmt.Errorf("unknown acl: %s", acl)
|
|
}
|
|
|
|
func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|
if h.cfg.ACLEnabled() {
|
|
h.createBucketHandlerACL(w, r)
|
|
return
|
|
}
|
|
|
|
h.createBucketHandlerPolicy(w, r)
|
|
}
|
|
|
|
func (h *handler) parseCommonCreateBucketParams(reqInfo *middleware.ReqInfo, boxData *accessbox.Box, r *http.Request) (*keys.PublicKey, *layer.CreateBucketParams, error) {
|
|
p := &layer.CreateBucketParams{
|
|
Name: reqInfo.BucketName,
|
|
Namespace: reqInfo.Namespace,
|
|
SessionContainerCreation: boxData.Gate.SessionTokenForPut(),
|
|
}
|
|
|
|
if p.SessionContainerCreation == nil {
|
|
return nil, nil, fmt.Errorf("%w: couldn't find session token for put", errors.GetAPIError(errors.ErrAccessDenied))
|
|
}
|
|
|
|
if err := checkBucketName(reqInfo.BucketName); err != nil {
|
|
return nil, nil, fmt.Errorf("invalid bucket name: %w", err)
|
|
}
|
|
|
|
key, err := getTokenIssuerKey(boxData)
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf("couldn't get bearer token signature key: %w", err)
|
|
}
|
|
|
|
createParams, err := h.parseLocationConstraint(r)
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf("could not parse location contraint: %w", err)
|
|
}
|
|
|
|
if err = h.setPlacementPolicy(p, reqInfo.Namespace, createParams.LocationConstraint, boxData.Policies); err != nil {
|
|
return nil, nil, fmt.Errorf("couldn't set placement policy: %w", err)
|
|
}
|
|
|
|
p.ObjectLockEnabled = isLockEnabled(h.reqLogger(r.Context()), r.Header)
|
|
|
|
return key, p, nil
|
|
}
|
|
|
|
func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Request) {
|
|
ctx := r.Context()
|
|
reqInfo := middleware.GetReqInfo(ctx)
|
|
|
|
boxData, err := middleware.GetBoxData(ctx)
|
|
if err != nil {
|
|
h.logAndSendError(w, "get access box from request", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
key, p, err := h.parseCommonCreateBucketParams(reqInfo, boxData, r)
|
|
if err != nil {
|
|
h.logAndSendError(w, "parse create bucket params", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
cannedACL, err := parseCannedACL(r.Header)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could not parse canned ACL", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
p.APEEnabled = true
|
|
bktInfo, err := h.obj.CreateBucket(ctx, p)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could not create bucket", reqInfo, err)
|
|
return
|
|
}
|
|
h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID))
|
|
|
|
chains := bucketCannedACLToAPERules(cannedACL, reqInfo, key, bktInfo.CID)
|
|
if err = h.ape.SaveACLChains(reqInfo.Namespace, chains); err != nil {
|
|
h.logAndSendError(w, "failed to add morph rule chain", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
sp := &layer.PutSettingsParams{
|
|
BktInfo: bktInfo,
|
|
Settings: &data.BucketSettings{
|
|
CannedACL: cannedACL,
|
|
OwnerKey: key,
|
|
Versioning: data.VersioningUnversioned,
|
|
},
|
|
}
|
|
|
|
if p.ObjectLockEnabled {
|
|
sp.Settings.Versioning = data.VersioningEnabled
|
|
}
|
|
|
|
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
|
|
h.logAndSendError(w, "couldn't save bucket settings", reqInfo, err,
|
|
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
|
return
|
|
}
|
|
|
|
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
|
h.logAndSendError(w, "write response", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
func (h *handler) createBucketHandlerACL(w http.ResponseWriter, r *http.Request) {
|
|
ctx := r.Context()
|
|
reqInfo := middleware.GetReqInfo(ctx)
|
|
|
|
boxData, err := middleware.GetBoxData(ctx)
|
|
if err != nil {
|
|
h.logAndSendError(w, "get access box from request", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
key, p, err := h.parseCommonCreateBucketParams(reqInfo, boxData, r)
|
|
if err != nil {
|
|
h.logAndSendError(w, "parse create bucket params", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
aclPrm := &layer.PutBucketACLParams{SessionToken: boxData.Gate.SessionTokenForSetEACL()}
|
|
if aclPrm.SessionToken == nil {
|
|
h.logAndSendError(w, "couldn't find session token for setEACL", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
|
return
|
|
}
|
|
|
|
bktACL, err := parseACLHeaders(r.Header, key)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could not parse bucket acl", reqInfo, err)
|
|
return
|
|
}
|
|
resInfo := &resourceInfo{Bucket: reqInfo.BucketName}
|
|
|
|
aclPrm.EACL, err = bucketACLToTable(bktACL, resInfo)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could translate bucket acl to eacl", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
bktInfo, err := h.obj.CreateBucket(ctx, p)
|
|
if err != nil {
|
|
h.logAndSendError(w, "could not create bucket", reqInfo, err)
|
|
return
|
|
}
|
|
h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID))
|
|
|
|
aclPrm.BktInfo = bktInfo
|
|
if err = h.obj.PutBucketACL(r.Context(), aclPrm); err != nil {
|
|
h.logAndSendError(w, "could not put bucket e/ACL", reqInfo, err)
|
|
return
|
|
}
|
|
|
|
sp := &layer.PutSettingsParams{
|
|
BktInfo: bktInfo,
|
|
Settings: &data.BucketSettings{
|
|
OwnerKey: key,
|
|
Versioning: data.VersioningUnversioned,
|
|
},
|
|
}
|
|
|
|
if p.ObjectLockEnabled {
|
|
sp.Settings.Versioning = data.VersioningEnabled
|
|
}
|
|
|
|
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
|
|
h.logAndSendError(w, "couldn't save bucket settings", reqInfo, err,
|
|
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
|
return
|
|
}
|
|
|
|
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
|
|
h.logAndSendError(w, "write response", reqInfo, err)
|
|
return
|
|
}
|
|
}
|
|
|
|
const s3ActionPrefix = "s3:"
|
|
|
|
var (
|
|
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html
|
|
|
|
writeACLBucketS3Actions = []string{
|
|
s3ActionPrefix + middleware.PutObjectOperation,
|
|
s3ActionPrefix + middleware.PostObjectOperation,
|
|
s3ActionPrefix + middleware.CopyObjectOperation,
|
|
s3ActionPrefix + middleware.UploadPartOperation,
|
|
s3ActionPrefix + middleware.UploadPartCopyOperation,
|
|
s3ActionPrefix + middleware.CreateMultipartUploadOperation,
|
|
s3ActionPrefix + middleware.CompleteMultipartUploadOperation,
|
|
}
|
|
|
|
readACLBucketS3Actions = []string{
|
|
s3ActionPrefix + middleware.HeadBucketOperation,
|
|
s3ActionPrefix + middleware.GetBucketLocationOperation,
|
|
s3ActionPrefix + middleware.ListObjectsV1Operation,
|
|
s3ActionPrefix + middleware.ListObjectsV2Operation,
|
|
s3ActionPrefix + middleware.ListBucketObjectVersionsOperation,
|
|
s3ActionPrefix + middleware.ListMultipartUploadsOperation,
|
|
}
|
|
|
|
writeACLBucketNativeActions = []string{
|
|
native.MethodPutObject,
|
|
}
|
|
|
|
readACLBucketNativeActions = []string{
|
|
native.MethodGetContainer,
|
|
native.MethodGetObject,
|
|
native.MethodHeadObject,
|
|
native.MethodSearchObject,
|
|
native.MethodRangeObject,
|
|
native.MethodHashObject,
|
|
}
|
|
)
|
|
|
|
func bucketCannedACLToAPERules(cannedACL string, reqInfo *middleware.ReqInfo, key *keys.PublicKey, cnrID cid.ID) []*chain.Chain {
|
|
cnrIDStr := cnrID.EncodeToString()
|
|
|
|
chains := []*chain.Chain{
|
|
{
|
|
ID: getBucketCannedChainID(chain.S3, cnrID),
|
|
Rules: []chain.Rule{{
|
|
Status: chain.Allow,
|
|
Actions: chain.Actions{Names: []string{"s3:*"}},
|
|
Resources: chain.Resources{Names: []string{
|
|
fmt.Sprintf(s3.ResourceFormatS3Bucket, reqInfo.BucketName),
|
|
fmt.Sprintf(s3.ResourceFormatS3BucketObjects, reqInfo.BucketName),
|
|
}},
|
|
Condition: []chain.Condition{{
|
|
Op: chain.CondStringEquals,
|
|
Object: chain.ObjectRequest,
|
|
Key: s3.PropertyKeyOwner,
|
|
Value: key.Address(),
|
|
}},
|
|
}}},
|
|
{
|
|
ID: getBucketCannedChainID(chain.Ingress, cnrID),
|
|
Rules: []chain.Rule{{
|
|
Status: chain.Allow,
|
|
Actions: chain.Actions{Names: []string{"*"}},
|
|
Resources: chain.Resources{Names: []string{
|
|
fmt.Sprintf(native.ResourceFormatNamespaceContainer, reqInfo.Namespace, cnrIDStr),
|
|
fmt.Sprintf(native.ResourceFormatNamespaceContainerObjects, reqInfo.Namespace, cnrIDStr),
|
|
}},
|
|
Condition: []chain.Condition{{
|
|
Op: chain.CondStringEquals,
|
|
Object: chain.ObjectRequest,
|
|
Key: native.PropertyKeyActorPublicKey,
|
|
Value: hex.EncodeToString(key.Bytes()),
|
|
}},
|
|
}},
|
|
},
|
|
}
|
|
|
|
switch cannedACL {
|
|
case basicACLPrivate:
|
|
case cannedACLAuthRead:
|
|
fallthrough
|
|
case basicACLReadOnly:
|
|
chains[0].Rules = append(chains[0].Rules, chain.Rule{
|
|
Status: chain.Allow,
|
|
Actions: chain.Actions{Names: readACLBucketS3Actions},
|
|
Resources: chain.Resources{Names: []string{
|
|
fmt.Sprintf(s3.ResourceFormatS3Bucket, reqInfo.BucketName),
|
|
fmt.Sprintf(s3.ResourceFormatS3BucketObjects, reqInfo.BucketName),
|
|
}},
|
|
})
|
|
|
|
chains[1].Rules = append(chains[1].Rules, chain.Rule{
|
|
Status: chain.Allow,
|
|
Actions: chain.Actions{Names: readACLBucketNativeActions},
|
|
Resources: chain.Resources{Names: []string{
|
|
fmt.Sprintf(native.ResourceFormatNamespaceContainer, reqInfo.Namespace, cnrIDStr),
|
|
fmt.Sprintf(native.ResourceFormatNamespaceContainerObjects, reqInfo.Namespace, cnrIDStr),
|
|
}},
|
|
})
|
|
case basicACLPublic:
|
|
chains[0].Rules = append(chains[0].Rules, chain.Rule{
|
|
Status: chain.Allow,
|
|
Actions: chain.Actions{Names: append(readACLBucketS3Actions, writeACLBucketS3Actions...)},
|
|
Resources: chain.Resources{Names: []string{
|
|
fmt.Sprintf(s3.ResourceFormatS3Bucket, reqInfo.BucketName),
|
|
fmt.Sprintf(s3.ResourceFormatS3BucketObjects, reqInfo.BucketName),
|
|
}},
|
|
})
|
|
|
|
chains[1].Rules = append(chains[1].Rules, chain.Rule{
|
|
Status: chain.Allow,
|
|
Actions: chain.Actions{Names: append(readACLBucketNativeActions, writeACLBucketNativeActions...)},
|
|
Resources: chain.Resources{Names: []string{
|
|
fmt.Sprintf(native.ResourceFormatNamespaceContainer, reqInfo.Namespace, cnrIDStr),
|
|
fmt.Sprintf(native.ResourceFormatNamespaceContainerObjects, reqInfo.Namespace, cnrIDStr),
|
|
}},
|
|
})
|
|
default:
|
|
panic("unknown canned acl") // this should never happen
|
|
}
|
|
|
|
return chains
|
|
}
|
|
|
|
func getBucketCannedChainID(prefix chain.Name, cnrID cid.ID) chain.ID {
|
|
return chain.ID(string(prefix) + ":bktCanned" + string(cnrID[:]))
|
|
}
|
|
|
|
func (h handler) setPlacementPolicy(prm *layer.CreateBucketParams, namespace, locationConstraint string, userPolicies []*accessbox.ContainerPolicy) error {
|
|
prm.Policy = h.cfg.DefaultPlacementPolicy(namespace)
|
|
prm.LocationConstraint = locationConstraint
|
|
|
|
if locationConstraint == "" {
|
|
return nil
|
|
}
|
|
|
|
for _, placementPolicy := range userPolicies {
|
|
if placementPolicy.LocationConstraint == locationConstraint {
|
|
prm.Policy = placementPolicy.Policy
|
|
return nil
|
|
}
|
|
}
|
|
|
|
if policy, ok := h.cfg.PlacementPolicy(namespace, locationConstraint); ok {
|
|
prm.Policy = policy
|
|
return nil
|
|
}
|
|
|
|
return errors.GetAPIError(errors.ErrInvalidLocationConstraint)
|
|
}
|
|
|
|
func isLockEnabled(log *zap.Logger, header http.Header) bool {
|
|
lockEnabledStr := header.Get(api.AmzBucketObjectLockEnabled)
|
|
if len(lockEnabledStr) == 0 {
|
|
return false
|
|
}
|
|
|
|
lockEnabled, err := strconv.ParseBool(lockEnabledStr)
|
|
if err != nil {
|
|
log.Warn(logs.InvalidBucketObjectLockEnabledHeader, zap.String("header", lockEnabledStr), zap.Error(err))
|
|
}
|
|
|
|
return lockEnabled
|
|
}
|
|
|
|
func checkBucketName(bucketName string) error {
|
|
if len(bucketName) < 3 || len(bucketName) > 63 {
|
|
return errors.GetAPIError(errors.ErrInvalidBucketName)
|
|
}
|
|
|
|
if strings.HasPrefix(bucketName, "xn--") || strings.HasSuffix(bucketName, "-s3alias") {
|
|
return errors.GetAPIError(errors.ErrInvalidBucketName)
|
|
}
|
|
if net.ParseIP(bucketName) != nil {
|
|
return errors.GetAPIError(errors.ErrInvalidBucketName)
|
|
}
|
|
|
|
labels := strings.Split(bucketName, ".")
|
|
for _, label := range labels {
|
|
if len(label) == 0 {
|
|
return errors.GetAPIError(errors.ErrInvalidBucketName)
|
|
}
|
|
for i, r := range label {
|
|
if !isAlphaNum(r) && r != '-' {
|
|
return errors.GetAPIError(errors.ErrInvalidBucketName)
|
|
}
|
|
if (i == 0 || i == len(label)-1) && r == '-' {
|
|
return errors.GetAPIError(errors.ErrInvalidBucketName)
|
|
}
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func isAlphaNum(char int32) bool {
|
|
return 'a' <= char && char <= 'z' || '0' <= char && char <= '9'
|
|
}
|
|
|
|
func (h *handler) parseLocationConstraint(r *http.Request) (*createBucketParams, error) {
|
|
if r.ContentLength == 0 {
|
|
return new(createBucketParams), nil
|
|
}
|
|
|
|
params := new(createBucketParams)
|
|
if err := h.cfg.NewXMLDecoder(r.Body).Decode(params); err != nil {
|
|
return nil, errors.GetAPIError(errors.ErrMalformedXML)
|
|
}
|
|
return params, nil
|
|
}
|