package handler import ( "bytes" "context" "crypto/md5" "encoding/base64" "encoding/json" "encoding/xml" "errors" "fmt" "io" "mime/multipart" "net" "net/http" "net/url" "strconv" "strings" "time" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/retryer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "git.frostfs.info/TrueCloudLab/policy-engine/schema/native" "git.frostfs.info/TrueCloudLab/policy-engine/schema/s3" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/nspcc-dev/neo-go/pkg/crypto/keys" "go.uber.org/zap" ) type postPolicy struct { Expiration time.Time `json:"expiration"` Conditions []*policyCondition `json:"conditions"` empty bool } func (p *postPolicy) condition(key string) *policyCondition { for _, condition := range p.Conditions { if condition.Key == key { return condition } } return nil } func (p *postPolicy) CheckContentLength(size uint64) bool { if p.empty { return true } for _, condition := range p.Conditions { if condition.Matching == "content-length-range" { length := strconv.FormatUint(size, 10) return condition.Key <= length && length <= condition.Value } } return true } func (p *policyCondition) match(value string) bool { switch p.Matching { case "eq": p.Matched = p.Value == value case "starts-with": if p.Key == api.ContentType { p.Matched = true for _, contentType := range strings.Split(value, ",") { if !strings.HasPrefix(contentType, p.Value) { p.Matched = false } } } else { p.Matched = strings.HasPrefix(value, p.Value) } } return p.Matched } func (p *postPolicy) CheckField(key string, value string) error { if p.empty { return nil } cond := p.condition(key) if cond == nil { return apierr.GetAPIError(apierr.ErrPostPolicyConditionInvalidFormat) } if !cond.match(value) { return apierr.GetAPIError(apierr.ErrPostPolicyConditionInvalidFormat) } return nil } func (p *postPolicy) AllConditionMatched() bool { for _, condition := range p.Conditions { if !condition.Matched { return false } } return true } type policyCondition struct { Matching string Key string Value string Matched bool } var errInvalidCondition = fmt.Errorf("invalid condition") func (p *policyCondition) UnmarshalJSON(data []byte) error { var ( ok bool v interface{} ) if err := json.Unmarshal(data, &v); err != nil { return fmt.Errorf("unmarshal policy condition: %w", err) } switch v := v.(type) { case []interface{}: if len(v) != 3 { return errInvalidCondition } if p.Matching, ok = v[0].(string); !ok { return errInvalidCondition } if p.Matching == "content-length-range" { min, ok := v[1].(float64) max, ok2 := v[2].(float64) if !ok || !ok2 { return errInvalidCondition } p.Key = strconv.FormatFloat(min, 'f', 0, 32) p.Value = strconv.FormatFloat(max, 'f', 0, 32) } else { key, ok2 := v[1].(string) p.Value, ok = v[2].(string) if !ok || !ok2 { return errInvalidCondition } p.Key = strings.ToLower(strings.TrimPrefix(key, "$")) } case map[string]interface{}: p.Matching = "eq" for key, val := range v { p.Key = strings.ToLower(key) if p.Value, ok = val.(string); !ok { return errInvalidCondition } } default: return fmt.Errorf("unknown condition type") } return nil } // keywords of predefined basic ACL values. const ( basicACLPrivate = "private" basicACLReadOnly = "public-read" basicACLPublic = "public-read-write" ) type createBucketParams struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` LocationConstraint string } func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) { var ( err error cannedACLStatus = aclHeadersStatus(r) ctx = r.Context() reqInfo = middleware.GetReqInfo(ctx) ) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) if err != nil { h.logAndSendError(w, "could not get bucket objInfo", reqInfo, err) return } settings, err := h.obj.GetBucketSettings(ctx, bktInfo) if err != nil { h.logAndSendError(w, "could not get bucket settings", reqInfo, err) return } if cannedACLStatus == aclStatusYes { h.logAndSendError(w, "acl not supported for this bucket", reqInfo, apierr.GetAPIError(apierr.ErrAccessControlListNotSupported)) return } tagSet, err := parseTaggingHeader(r.Header) if err != nil { h.logAndSendError(w, "could not parse tagging header", reqInfo, err) return } metadata := parseMetadata(r) if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 { metadata[api.ContentType] = contentType } if cacheControl := r.Header.Get(api.CacheControl); len(cacheControl) > 0 { metadata[api.CacheControl] = cacheControl } if expires := r.Header.Get(api.Expires); len(expires) > 0 { metadata[api.Expires] = expires } if contentLanguage := r.Header.Get(api.ContentLanguage); len(contentLanguage) > 0 { metadata[api.ContentLanguage] = contentLanguage } encryptionParams, err := formEncryptionParams(r) if err != nil { h.logAndSendError(w, "invalid sse headers", reqInfo, err) return } body, err := h.getBodyReader(r) if err != nil { h.logAndSendError(w, "failed to get body reader", reqInfo, err) return } if encodings := r.Header.Get(api.ContentEncoding); len(encodings) > 0 { metadata[api.ContentEncoding] = encodings } size := h.getPutPayloadSize(r) params := &layer.PutObjectParams{ BktInfo: bktInfo, Object: reqInfo.ObjectName, Reader: body, Header: metadata, Encryption: encryptionParams, ContentMD5: r.Header.Get(api.ContentMD5), ContentSHA256Hash: r.Header.Get(api.AmzContentSha256), } if size > 0 { params.Size = &size } params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, bktInfo.LocationConstraint) if err != nil { h.logAndSendError(w, "invalid copies number", reqInfo, err) return } params.Lock, err = formObjectLock(ctx, bktInfo, settings.LockConfiguration, r.Header) if err != nil { h.logAndSendError(w, "could not form object lock", reqInfo, err) return } extendedObjInfo, err := h.obj.PutObject(ctx, params) if err != nil { _, err2 := io.Copy(io.Discard, body) err3 := body.Close() h.logAndSendError(w, "could not upload object", reqInfo, err, zap.Errors("body close errors", []error{err2, err3})) return } objInfo := extendedObjInfo.ObjectInfo if tagSet != nil { tagPrm := &data.PutObjectTaggingParams{ ObjectVersion: &data.ObjectVersion{ BktInfo: bktInfo, ObjectName: objInfo.Name, VersionID: objInfo.VersionID(), }, TagSet: tagSet, NodeVersion: extendedObjInfo.NodeVersion, } if err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil { h.logAndSendError(w, "could not upload object tagging", reqInfo, err) return } } if settings.VersioningEnabled() { w.Header().Set(api.AmzVersionID, objInfo.VersionID()) } if encryptionParams.Enabled() { addSSECHeaders(w.Header(), r.Header) } w.Header().Set(api.ETag, data.Quote(objInfo.ETag(h.cfg.MD5Enabled()))) if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil { h.logAndSendError(w, "write response", reqInfo, err) return } } func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) { if !api.IsSignedStreamingV4(r) { return r.Body, nil } encodings := r.Header.Values(api.ContentEncoding) var chunkedEncoding bool resultContentEncoding := make([]string, 0, len(encodings)) for _, enc := range encodings { for _, e := range strings.Split(enc, ",") { e = strings.TrimSpace(e) if e == api.AwsChunked { // probably we should also check position of this header value chunkedEncoding = true } else { resultContentEncoding = append(resultContentEncoding, e) } } } r.Header.Set(api.ContentEncoding, strings.Join(resultContentEncoding, ",")) if !chunkedEncoding && !h.cfg.BypassContentEncodingInChunks() { return nil, fmt.Errorf("%w: request is not chunk encoded, encodings '%s'", apierr.GetAPIError(apierr.ErrInvalidEncodingMethod), strings.Join(encodings, ",")) } decodeContentSize := r.Header.Get(api.AmzDecodedContentLength) if len(decodeContentSize) == 0 { return nil, apierr.GetAPIError(apierr.ErrMissingContentLength) } if _, err := strconv.Atoi(decodeContentSize); err != nil { return nil, fmt.Errorf("%w: parse decoded content length: %s", apierr.GetAPIError(apierr.ErrMissingContentLength), err.Error()) } chunkReader, err := newSignV4ChunkedReader(r) if err != nil { return nil, fmt.Errorf("initialize chunk reader: %w", err) } return chunkReader, nil } func formEncryptionParams(r *http.Request) (enc encryption.Params, err error) { return formEncryptionParamsBase(r, false) } func formCopySourceEncryptionParams(r *http.Request) (enc encryption.Params, err error) { return formEncryptionParamsBase(r, true) } func formEncryptionParamsBase(r *http.Request, isCopySource bool) (enc encryption.Params, err error) { var sseCustomerAlgorithm, sseCustomerKey, sseCustomerKeyMD5 string if isCopySource { sseCustomerAlgorithm = r.Header.Get(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm) sseCustomerKey = r.Header.Get(api.AmzCopySourceServerSideEncryptionCustomerKey) sseCustomerKeyMD5 = r.Header.Get(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5) } else { sseCustomerAlgorithm = r.Header.Get(api.AmzServerSideEncryptionCustomerAlgorithm) sseCustomerKey = r.Header.Get(api.AmzServerSideEncryptionCustomerKey) sseCustomerKeyMD5 = r.Header.Get(api.AmzServerSideEncryptionCustomerKeyMD5) } if len(sseCustomerAlgorithm) == 0 && len(sseCustomerKey) == 0 && len(sseCustomerKeyMD5) == 0 { return } if r.TLS == nil { return enc, apierr.GetAPIError(apierr.ErrInsecureSSECustomerRequest) } if len(sseCustomerKey) > 0 && len(sseCustomerAlgorithm) == 0 { return enc, apierr.GetAPIError(apierr.ErrMissingSSECustomerAlgorithm) } if len(sseCustomerAlgorithm) > 0 && len(sseCustomerKey) == 0 { return enc, apierr.GetAPIError(apierr.ErrMissingSSECustomerKey) } if sseCustomerAlgorithm != layer.AESEncryptionAlgorithm { return enc, apierr.GetAPIError(apierr.ErrInvalidEncryptionAlgorithm) } key, err := base64.StdEncoding.DecodeString(sseCustomerKey) if err != nil { if isCopySource { return enc, apierr.GetAPIError(apierr.ErrInvalidSSECustomerParameters) } return enc, apierr.GetAPIError(apierr.ErrInvalidSSECustomerKey) } if len(key) != layer.AESKeySize { if isCopySource { return enc, apierr.GetAPIError(apierr.ErrInvalidSSECustomerParameters) } return enc, apierr.GetAPIError(apierr.ErrInvalidSSECustomerKey) } keyMD5, err := base64.StdEncoding.DecodeString(sseCustomerKeyMD5) if err != nil { return enc, apierr.GetAPIError(apierr.ErrSSECustomerKeyMD5Mismatch) } md5Sum := md5.Sum(key) if !bytes.Equal(md5Sum[:], keyMD5) { return enc, apierr.GetAPIError(apierr.ErrSSECustomerKeyMD5Mismatch) } params, err := encryption.NewParams(key) if err == nil { enc = *params } return enc, err } func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) { var ( tagSet map[string]string ctx = r.Context() reqInfo = middleware.GetReqInfo(ctx) metadata = make(map[string]string) ) policy, err := checkPostPolicy(r, reqInfo, metadata) if err != nil { h.logAndSendError(w, "failed check policy", reqInfo, err) return } if tagging := auth.MultipartFormValue(r, "tagging"); tagging != "" { buffer := bytes.NewBufferString(tagging) tags := new(data.Tagging) if err = h.cfg.NewXMLDecoder(buffer).Decode(tags); err != nil { h.logAndSendError(w, "could not decode tag set", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error())) return } tagSet, err = h.readTagSet(tags) if err != nil { h.logAndSendError(w, "could not read tag set", reqInfo, err) return } } bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) if err != nil { h.logAndSendError(w, "could not get bucket objInfo", reqInfo, err) return } settings, err := h.obj.GetBucketSettings(ctx, bktInfo) if err != nil { h.logAndSendError(w, "could not get bucket settings", reqInfo, err) return } if acl := auth.MultipartFormValue(r, "acl"); acl != "" && acl != basicACLPrivate { h.logAndSendError(w, "acl not supported for this bucket", reqInfo, apierr.GetAPIError(apierr.ErrAccessControlListNotSupported)) return } reqInfo.ObjectName = auth.MultipartFormValue(r, "key") var contentReader io.Reader var size uint64 var filename string if content, ok := r.MultipartForm.Value["file"]; ok { fullContent := strings.Join(content, "") contentReader = bytes.NewBufferString(fullContent) size = uint64(len(fullContent)) if reqInfo.ObjectName == "" || strings.Contains(reqInfo.ObjectName, "${filename}") { _, head, err := r.FormFile("file") if err != nil { h.logAndSendError(w, "could not parse file field", reqInfo, err) return } filename = head.Filename } } else { var head *multipart.FileHeader contentReader, head, err = r.FormFile("file") if err != nil { h.logAndSendError(w, "could not parse file field", reqInfo, err) return } size = uint64(head.Size) filename = head.Filename } if reqInfo.ObjectName == "" { reqInfo.ObjectName = filename } else { reqInfo.ObjectName = strings.ReplaceAll(reqInfo.ObjectName, "${filename}", filename) } if reqInfo.ObjectName == "" { h.logAndSendError(w, "missing object name", reqInfo, apierr.GetAPIError(apierr.ErrInvalidArgument)) return } if !policy.CheckContentLength(size) { h.logAndSendError(w, "invalid content-length", reqInfo, apierr.GetAPIError(apierr.ErrInvalidArgument)) return } params := &layer.PutObjectParams{ BktInfo: bktInfo, Object: reqInfo.ObjectName, Reader: contentReader, Size: &size, Header: metadata, } extendedObjInfo, err := h.obj.PutObject(ctx, params) if err != nil { h.logAndSendError(w, "could not upload object", reqInfo, err) return } objInfo := extendedObjInfo.ObjectInfo if tagSet != nil { tagPrm := &data.PutObjectTaggingParams{ ObjectVersion: &data.ObjectVersion{ BktInfo: bktInfo, ObjectName: objInfo.Name, VersionID: objInfo.VersionID(), }, NodeVersion: extendedObjInfo.NodeVersion, } if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil { h.logAndSendError(w, "could not upload object tagging", reqInfo, err) return } } if settings.VersioningEnabled() { w.Header().Set(api.AmzVersionID, objInfo.VersionID()) } if redirectURL := auth.MultipartFormValue(r, "success_action_redirect"); redirectURL != "" { http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) return } status := http.StatusNoContent if statusStr := auth.MultipartFormValue(r, "success_action_status"); statusStr != "" { switch statusStr { case "200": status = http.StatusOK case "201": status = http.StatusCreated resp := &PostResponse{ Bucket: objInfo.Bucket, Key: objInfo.Name, ETag: data.Quote(objInfo.ETag(h.cfg.MD5Enabled())), } w.WriteHeader(status) respData, err := middleware.EncodeResponse(resp) if err != nil { h.logAndSendError(w, "encode response", reqInfo, err) } if _, err = w.Write(respData); err != nil { h.logAndSendError(w, "something went wrong", reqInfo, err) } return } } w.Header().Set(api.ETag, data.Quote(objInfo.ETag(h.cfg.MD5Enabled()))) w.WriteHeader(status) } func checkPostPolicy(r *http.Request, reqInfo *middleware.ReqInfo, metadata map[string]string) (*postPolicy, error) { policy := &postPolicy{empty: true} if policyStr := auth.MultipartFormValue(r, "policy"); policyStr != "" { policyData, err := base64.StdEncoding.DecodeString(policyStr) if err != nil { return nil, fmt.Errorf("could not decode policy: %w", err) } if err = json.Unmarshal(policyData, policy); err != nil { return nil, fmt.Errorf("could not unmarshal policy: %w", err) } if policy.Expiration.Before(time.Now()) { return nil, fmt.Errorf("policy is expired: %w", apierr.GetAPIError(apierr.ErrInvalidArgument)) } policy.empty = false } if r.MultipartForm == nil { return nil, errors.New("empty multipart form") } for key, v := range r.MultipartForm.Value { if key == "file" || key == "policy" || key == "x-amz-signature" || strings.HasPrefix(key, "x-ignore-") { continue } if len(v) != 1 { return nil, fmt.Errorf("empty multipart value for key '%s'", key) } value := v[0] if err := policy.CheckField(key, value); err != nil { return nil, fmt.Errorf("'%s' form field doesn't match the policy: %w", key, err) } prefix := strings.ToLower(api.MetadataPrefix) if strings.HasPrefix(key, prefix) { metadata[strings.TrimPrefix(key, prefix)] = value } if key == "content-type" { metadata[api.ContentType] = value } } for _, cond := range policy.Conditions { if cond.Key == "bucket" { if !cond.match(reqInfo.BucketName) { return nil, apierr.GetAPIError(apierr.ErrPostPolicyConditionInvalidFormat) } } } return policy, nil } type aclStatus int const ( // aclStatusNo means no acl headers at all. aclStatusNo aclStatus = iota // aclStatusYesAPECompatible means that only X-Amz-Acl present and equals to private. aclStatusYesAPECompatible // aclStatusYes means any other acl headers configuration. aclStatusYes ) func aclHeadersStatus(r *http.Request) aclStatus { if r.Header.Get(api.AmzGrantRead) != "" || r.Header.Get(api.AmzGrantFullControl) != "" || r.Header.Get(api.AmzGrantWrite) != "" { return aclStatusYes } cannedACL := r.Header.Get(api.AmzACL) if cannedACL != "" { if cannedACL == basicACLPrivate { return aclStatusYesAPECompatible } return aclStatusYes } return aclStatusNo } func parseTaggingHeader(header http.Header) (map[string]string, error) { var tagSet map[string]string if tagging := header.Get(api.AmzTagging); len(tagging) > 0 { queries, err := url.ParseQuery(tagging) if err != nil { return nil, apierr.GetAPIError(apierr.ErrInvalidArgument) } if len(queries) > maxTags { return nil, apierr.GetAPIError(apierr.ErrInvalidTagsSizeExceed) } tagSet = make(map[string]string, len(queries)) for k, v := range queries { tag := data.Tag{Key: k, Value: v[0]} if err = checkTag(tag); err != nil { return nil, err } tagSet[tag.Key] = tag.Value } } return tagSet, nil } func parseMetadata(r *http.Request) map[string]string { res := make(map[string]string) for k, v := range r.Header { if strings.HasPrefix(k, api.MetadataPrefix) { key := strings.ToLower(strings.TrimPrefix(k, api.MetadataPrefix)) res[key] = v[0] } } return res } func parseCannedACL(header http.Header) (string, error) { acl := header.Get(api.AmzACL) if len(acl) == 0 { return basicACLPrivate, nil } if acl == basicACLPrivate || acl == basicACLPublic || acl == basicACLReadOnly { return acl, nil } return "", fmt.Errorf("unknown acl: %s", acl) } func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { h.createBucketHandlerPolicy(w, r) } func (h *handler) parseCommonCreateBucketParams(reqInfo *middleware.ReqInfo, boxData *accessbox.Box, r *http.Request) (*keys.PublicKey, *layer.CreateBucketParams, error) { p := &layer.CreateBucketParams{ Name: reqInfo.BucketName, Namespace: reqInfo.Namespace, SessionContainerCreation: boxData.Gate.SessionTokenForPut(), } if p.SessionContainerCreation == nil { return nil, nil, fmt.Errorf("%w: couldn't find session token for put", apierr.GetAPIError(apierr.ErrAccessDenied)) } if err := checkBucketName(reqInfo.BucketName); err != nil { return nil, nil, fmt.Errorf("invalid bucket name: %w", err) } key, err := getTokenIssuerKey(boxData) if err != nil { return nil, nil, fmt.Errorf("couldn't get bearer token signature key: %w", err) } createParams, err := h.parseLocationConstraint(r) if err != nil { return nil, nil, fmt.Errorf("could not parse location contraint: %w", err) } if err = h.setPlacementPolicy(p, reqInfo.Namespace, createParams.LocationConstraint, boxData.Policies); err != nil { return nil, nil, fmt.Errorf("couldn't set placement policy: %w", err) } p.ObjectLockEnabled = isLockEnabled(h.reqLogger(r.Context()), r.Header) return key, p, nil } func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Request) { ctx := r.Context() reqInfo := middleware.GetReqInfo(ctx) boxData, err := middleware.GetBoxData(ctx) if err != nil { h.logAndSendError(w, "get access box from request", reqInfo, err) return } key, p, err := h.parseCommonCreateBucketParams(reqInfo, boxData, r) if err != nil { h.logAndSendError(w, "parse create bucket params", reqInfo, err) return } cannedACL, err := parseCannedACL(r.Header) if err != nil { h.logAndSendError(w, "could not parse canned ACL", reqInfo, err) return } bktInfo, err := h.obj.CreateBucket(ctx, p) if err != nil { h.logAndSendError(w, "could not create bucket", reqInfo, err) return } h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID)) chains := bucketCannedACLToAPERules(cannedACL, reqInfo, bktInfo.CID) if err = h.ape.SaveACLChains(bktInfo.CID.EncodeToString(), chains); err != nil { cleanErr := h.cleanupBucketCreation(ctx, reqInfo, bktInfo, boxData, chains) h.logAndSendError(w, "failed to add morph rule chain", reqInfo, err, zap.NamedError("cleanup_error", cleanErr)) return } sp := &layer.PutSettingsParams{ BktInfo: bktInfo, Settings: &data.BucketSettings{ CannedACL: cannedACL, OwnerKey: key, Versioning: data.VersioningUnversioned, }, } if p.ObjectLockEnabled { sp.Settings.Versioning = data.VersioningEnabled } err = retryer.MakeWithRetry(ctx, func() error { return h.obj.PutBucketSettings(ctx, sp) }, h.putBucketSettingsRetryer()) if err != nil { cleanErr := h.cleanupBucketCreation(ctx, reqInfo, bktInfo, boxData, chains) h.logAndSendError(w, "couldn't save bucket settings", reqInfo, err, zap.String("container_id", bktInfo.CID.EncodeToString()), zap.NamedError("cleanup_error", cleanErr)) return } if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil { h.logAndSendError(w, "write response", reqInfo, err) return } } func (h *handler) cleanupBucketCreation(ctx context.Context, reqInfo *middleware.ReqInfo, bktInfo *data.BucketInfo, boxData *accessbox.Box, chains []*chain.Chain) error { prm := &layer.DeleteBucketParams{ BktInfo: bktInfo, SessionToken: boxData.Gate.SessionTokenForDelete(), } if err := h.obj.DeleteContainer(ctx, prm); err != nil { return err } chainIDs := make([]chain.ID, len(chains)) for i, c := range chains { chainIDs[i] = c.ID } if err := h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil { return fmt.Errorf("delete bucket acl policy: %w", err) } return nil } func (h *handler) putBucketSettingsRetryer() aws.RetryerV2 { return retry.NewStandard(func(options *retry.StandardOptions) { options.MaxAttempts = h.cfg.RetryMaxAttempts() options.MaxBackoff = h.cfg.RetryMaxBackoff() if h.cfg.RetryStrategy() == RetryStrategyExponential { options.Backoff = retry.NewExponentialJitterBackoff(options.MaxBackoff) } else { options.Backoff = retry.BackoffDelayerFunc(func(int, error) (time.Duration, error) { return options.MaxBackoff, nil }) } options.Retryables = []retry.IsErrorRetryable{retry.IsErrorRetryableFunc(func(err error) aws.Ternary { if errors.Is(err, tree.ErrNodeAccessDenied) { return aws.TrueTernary } return aws.FalseTernary })} }) } const s3ActionPrefix = "s3:" var ( // https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html writeACLBucketS3Actions = []string{ s3ActionPrefix + middleware.PutObjectOperation, s3ActionPrefix + middleware.PostObjectOperation, s3ActionPrefix + middleware.CopyObjectOperation, s3ActionPrefix + middleware.UploadPartOperation, s3ActionPrefix + middleware.UploadPartCopyOperation, s3ActionPrefix + middleware.CreateMultipartUploadOperation, s3ActionPrefix + middleware.CompleteMultipartUploadOperation, } readACLBucketS3Actions = []string{ s3ActionPrefix + middleware.HeadBucketOperation, s3ActionPrefix + middleware.GetBucketLocationOperation, s3ActionPrefix + middleware.ListObjectsV1Operation, s3ActionPrefix + middleware.ListObjectsV2Operation, s3ActionPrefix + middleware.ListBucketObjectVersionsOperation, s3ActionPrefix + middleware.ListMultipartUploadsOperation, } writeACLBucketNativeActions = []string{ native.MethodPutObject, } readACLBucketNativeActions = []string{ native.MethodGetContainer, native.MethodGetObject, native.MethodHeadObject, native.MethodSearchObject, native.MethodRangeObject, native.MethodHashObject, } ) func bucketCannedACLToAPERules(cannedACL string, reqInfo *middleware.ReqInfo, cnrID cid.ID) []*chain.Chain { cnrIDStr := cnrID.EncodeToString() chains := []*chain.Chain{ { ID: getBucketCannedChainID(chain.S3, cnrID), Rules: []chain.Rule{}, }, { ID: getBucketCannedChainID(chain.Ingress, cnrID), Rules: []chain.Rule{}, }, } switch cannedACL { case basicACLPrivate: case basicACLReadOnly: chains[0].Rules = append(chains[0].Rules, chain.Rule{ Status: chain.Allow, Actions: chain.Actions{Names: readACLBucketS3Actions}, Resources: chain.Resources{Names: []string{ fmt.Sprintf(s3.ResourceFormatS3Bucket, reqInfo.BucketName), fmt.Sprintf(s3.ResourceFormatS3BucketObjects, reqInfo.BucketName), }}, }) chains[1].Rules = append(chains[1].Rules, chain.Rule{ Status: chain.Allow, Actions: chain.Actions{Names: readACLBucketNativeActions}, Resources: chain.Resources{Names: []string{ fmt.Sprintf(native.ResourceFormatNamespaceContainer, reqInfo.Namespace, cnrIDStr), fmt.Sprintf(native.ResourceFormatNamespaceContainerObjects, reqInfo.Namespace, cnrIDStr), }}, }) case basicACLPublic: chains[0].Rules = append(chains[0].Rules, chain.Rule{ Status: chain.Allow, Actions: chain.Actions{Names: append(readACLBucketS3Actions, writeACLBucketS3Actions...)}, Resources: chain.Resources{Names: []string{ fmt.Sprintf(s3.ResourceFormatS3Bucket, reqInfo.BucketName), fmt.Sprintf(s3.ResourceFormatS3BucketObjects, reqInfo.BucketName), }}, }) chains[1].Rules = append(chains[1].Rules, chain.Rule{ Status: chain.Allow, Actions: chain.Actions{Names: append(readACLBucketNativeActions, writeACLBucketNativeActions...)}, Resources: chain.Resources{Names: []string{ fmt.Sprintf(native.ResourceFormatNamespaceContainer, reqInfo.Namespace, cnrIDStr), fmt.Sprintf(native.ResourceFormatNamespaceContainerObjects, reqInfo.Namespace, cnrIDStr), }}, }) default: panic("unknown canned acl") // this should never happen } return chains } func getBucketCannedChainID(prefix chain.Name, cnrID cid.ID) chain.ID { return chain.ID(string(prefix) + ":bktCanned" + string(cnrID[:])) } func (h handler) setPlacementPolicy(prm *layer.CreateBucketParams, namespace, locationConstraint string, userPolicies []*accessbox.ContainerPolicy) error { prm.Policy = h.cfg.DefaultPlacementPolicy(namespace) prm.LocationConstraint = locationConstraint if locationConstraint == "" { return nil } for _, placementPolicy := range userPolicies { if placementPolicy.LocationConstraint == locationConstraint { prm.Policy = placementPolicy.Policy return nil } } if policy, ok := h.cfg.PlacementPolicy(namespace, locationConstraint); ok { prm.Policy = policy return nil } return apierr.GetAPIError(apierr.ErrInvalidLocationConstraint) } func isLockEnabled(log *zap.Logger, header http.Header) bool { lockEnabledStr := header.Get(api.AmzBucketObjectLockEnabled) if len(lockEnabledStr) == 0 { return false } lockEnabled, err := strconv.ParseBool(lockEnabledStr) if err != nil { log.Warn(logs.InvalidBucketObjectLockEnabledHeader, zap.String("header", lockEnabledStr), zap.Error(err)) } return lockEnabled } func checkBucketName(bucketName string) error { if len(bucketName) < 3 || len(bucketName) > 63 { return apierr.GetAPIError(apierr.ErrInvalidBucketName) } if strings.HasPrefix(bucketName, "xn--") || strings.HasSuffix(bucketName, "-s3alias") { return apierr.GetAPIError(apierr.ErrInvalidBucketName) } if net.ParseIP(bucketName) != nil { return apierr.GetAPIError(apierr.ErrInvalidBucketName) } labels := strings.Split(bucketName, ".") for _, label := range labels { if len(label) == 0 { return apierr.GetAPIError(apierr.ErrInvalidBucketName) } for i, r := range label { if !isAlphaNum(r) && r != '-' { return apierr.GetAPIError(apierr.ErrInvalidBucketName) } if (i == 0 || i == len(label)-1) && r == '-' { return apierr.GetAPIError(apierr.ErrInvalidBucketName) } } } return nil } func isAlphaNum(char int32) bool { return 'a' <= char && char <= 'z' || '0' <= char && char <= '9' } func (h *handler) parseLocationConstraint(r *http.Request) (*createBucketParams, error) { if r.ContentLength == 0 { return new(createBucketParams), nil } params := new(createBucketParams) if err := h.cfg.NewXMLDecoder(r.Body).Decode(params); err != nil { return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()) } return params, nil }