[#42] Support expiration lifecycle
All checks were successful
/ Builds (1.20) (pull_request) Successful in 2m9s
/ Builds (1.21) (pull_request) Successful in 1m34s
/ Vulncheck (pull_request) Successful in 1m52s
/ Lint (pull_request) Successful in 4m11s
/ Tests (1.20) (pull_request) Successful in 2m37s
/ Tests (1.21) (pull_request) Successful in 2m17s
/ DCO (pull_request) Successful in 51s

Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
This commit is contained in:
Marina Biryukova 2024-07-03 16:36:43 +03:00
parent f86b82351a
commit 0e1ab11a1b
25 changed files with 1260 additions and 61 deletions

20
api/cache/system.go vendored
View file

@ -88,6 +88,22 @@ func (o *SystemCache) GetCORS(key string) *data.CORSConfiguration {
return result
}
func (o *SystemCache) GetLifecycleConfiguration(key string) *data.LifecycleConfiguration {
entry, err := o.cache.Get(key)
if err != nil {
return nil
}
result, ok := entry.(*data.LifecycleConfiguration)
if !ok {
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
return result
}
func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
entry, err := o.cache.Get(key)
if err != nil {
@ -133,6 +149,10 @@ func (o *SystemCache) PutCORS(key string, obj *data.CORSConfiguration) error {
return o.cache.Set(key, obj)
}
func (o *SystemCache) PutLifecycleConfiguration(key string, obj *data.LifecycleConfiguration) error {
return o.cache.Set(key, obj)
}
func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error {
return o.cache.Set(key, settings)
}

View file

@ -12,8 +12,9 @@ import (
)
const (
bktSettingsObject = ".s3-settings"
bktCORSConfigurationObject = ".s3-cors"
bktSettingsObject = ".s3-settings"
bktCORSConfigurationObject = ".s3-cors"
bktLifecycleConfigurationObject = ".s3-lifecycle"
VersioningUnversioned = "Unversioned"
VersioningEnabled = "Enabled"
@ -89,6 +90,10 @@ func (b *BucketInfo) SettingsObjectName() string { return bktSettingsObject }
// CORSObjectName returns a system name for a bucket CORS configuration file.
func (b *BucketInfo) CORSObjectName() string { return bktCORSConfigurationObject }
func (b *BucketInfo) LifecycleConfigurationObjectName() string {
return b.CID.EncodeToString() + bktLifecycleConfigurationObject
}
// VersionID returns object version from ObjectInfo.
func (o *ObjectInfo) VersionID() string { return o.ID.EncodeToString() }

54
api/data/lifecycle.go Normal file
View file

@ -0,0 +1,54 @@
package data
import "encoding/xml"
const (
LifecycleStatusEnabled = "Enabled"
LifecycleStatusDisabled = "Disabled"
)
type (
LifecycleConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LifecycleConfiguration" json:"-"`
Rules []LifecycleRule `xml:"Rule"`
}
LifecycleRule struct {
Status string `xml:"Status,omitempty"`
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"`
Expiration *LifecycleExpiration `xml:"Expiration,omitempty"`
Filter *LifecycleRuleFilter `xml:"Filter,omitempty"`
ID string `xml:"ID,omitempty"`
NonCurrentVersionExpiration *NonCurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
}
AbortIncompleteMultipartUpload struct {
DaysAfterInitiation *int `xml:"DaysAfterInitiation,omitempty"`
}
LifecycleExpiration struct {
Date string `xml:"Date,omitempty"`
Days *int `xml:"Days,omitempty"`
ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"`
}
LifecycleRuleFilter struct {
And *LifecycleRuleAndOperator `xml:"And,omitempty"`
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
Prefix string `xml:"Prefix,omitempty"`
Tag *Tag `xml:"Tag,omitempty"`
}
LifecycleRuleAndOperator struct {
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
Prefix string `xml:"Prefix,omitempty"`
Tags []Tag `xml:"Tag"`
}
NonCurrentVersionExpiration struct {
NewerNonCurrentVersions *int `xml:"NewerNoncurrentVersions,omitempty"`
NonCurrentDays *int `xml:"NoncurrentDays,omitempty"`
}
)

View file

@ -165,13 +165,18 @@ func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *hand
features := &layer.FeatureSettingsMock{}
res, err := tp.CreateContainer(context.Background(), layer.PrmContainerCreate{Name: ".bucket-lifecycles"})
require.NoError(t, err)
layerCfg := &layer.Config{
Cache: layer.NewCache(cacheCfg),
AnonKey: layer.AnonymousKey{Key: key},
Resolver: testResolver,
TreeService: treeMock,
Features: features,
GateOwner: owner,
Cache: layer.NewCache(cacheCfg),
AnonKey: layer.AnonymousKey{Key: key},
Resolver: testResolver,
TreeService: treeMock,
Features: features,
GateOwner: owner,
LifecycleCnrInfo: &data.BucketInfo{CID: res.ContainerID},
GateKey: key,
}
var pp netmap.PlacementPolicy

235
api/handler/lifecycle.go Normal file
View file

@ -0,0 +1,235 @@
package handler
import (
"bytes"
"fmt"
"io"
"net/http"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apiErr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
)
const (
maxRules = 1000
maxRuleIDLen = 255
maxNewerNoncurrentVersions = 100
)
func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return
}
cfg, err := h.obj.GetBucketLifecycleConfiguration(ctx, bktInfo)
if err != nil {
h.logAndSendError(w, "could not get bucket lifecycle configuration", reqInfo, err)
return
}
if err = middleware.EncodeToResponse(w, cfg); err != nil {
h.logAndSendError(w, "could not encode GetBucketLifecycle response", reqInfo, err)
return
}
}
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
var buf bytes.Buffer
tee := io.TeeReader(r.Body, &buf)
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
// Content-Md5 is required and should be set
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
if _, ok := r.Header[api.ContentMD5]; !ok {
h.logAndSendError(w, "missing Content-MD5", reqInfo, apiErr.GetAPIError(apiErr.ErrMissingContentMD5))
return
}
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return
}
cfg := new(data.LifecycleConfiguration)
if err = h.cfg.NewXMLDecoder(tee).Decode(cfg); err != nil {
h.logAndSendError(w, "could not decode body", reqInfo, fmt.Errorf("%w: %s", apiErr.GetAPIError(apiErr.ErrMalformedXML), err.Error()))
return
}
if err = checkLifecycleConfiguration(cfg); err != nil {
h.logAndSendError(w, "invalid lifecycle configuration", reqInfo, fmt.Errorf("%w: %s", apiErr.GetAPIError(apiErr.ErrMalformedXML), err.Error()))
return
}
params := &layer.PutBucketLifecycleParams{
BktInfo: bktInfo,
LifecycleCfg: cfg,
LifecycleReader: &buf,
MD5Hash: r.Header.Get(api.ContentMD5),
}
params.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil {
h.logAndSendError(w, "invalid copies number", reqInfo, err)
return
}
if err = h.obj.PutBucketLifecycleConfiguration(ctx, params); err != nil {
h.logAndSendError(w, "could not put bucket lifecycle configuration", reqInfo, err)
return
}
}
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return
}
if err = h.obj.DeleteBucketLifecycleConfiguration(ctx, bktInfo); err != nil {
h.logAndSendError(w, "could not delete bucket lifecycle configuration", reqInfo, err)
return
}
w.WriteHeader(http.StatusNoContent)
}
func checkLifecycleConfiguration(cfg *data.LifecycleConfiguration) error {
if len(cfg.Rules) > maxRules {
return fmt.Errorf("number of rules cannot be greater than %d", maxRules)
}
ids := make(map[string]struct{}, len(cfg.Rules))
for _, rule := range cfg.Rules {
if _, ok := ids[rule.ID]; ok && rule.ID != "" {
return fmt.Errorf("duplicate 'ID': %s", rule.ID)
}
ids[rule.ID] = struct{}{}
if len(rule.ID) > maxRuleIDLen {
return fmt.Errorf("'ID' value cannot be longer than %d characters", maxRuleIDLen)
}
if rule.Status != data.LifecycleStatusEnabled && rule.Status != data.LifecycleStatusDisabled {
return fmt.Errorf("invalid lifecycle status: %s", rule.Status)
}
if rule.AbortIncompleteMultipartUpload == nil && rule.Expiration == nil && rule.NonCurrentVersionExpiration == nil {
return fmt.Errorf("at least one action needs to be specified in a rule")
}
if rule.AbortIncompleteMultipartUpload != nil {
if rule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil &&
*rule.AbortIncompleteMultipartUpload.DaysAfterInitiation <= 0 {
return fmt.Errorf("days after initiation must be a positive integer: %d", *rule.AbortIncompleteMultipartUpload.DaysAfterInitiation)
}
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
return fmt.Errorf("abort incomplete multipart upload cannot be specified with tags")
}
}
if rule.Expiration != nil {
if rule.Expiration.ExpiredObjectDeleteMarker != nil {
if rule.Expiration.Days != nil || rule.Expiration.Date != "" {
return fmt.Errorf("expired object delete marker cannot be specified with days or date")
}
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
return fmt.Errorf("expired object delete marker cannot be specified with tags")
}
}
if rule.Expiration.Days != nil && *rule.Expiration.Days <= 0 {
return fmt.Errorf("expiration days must be a positive integer: %d", *rule.Expiration.Days)
}
if _, err := time.Parse("2006-01-02T15:04:05.000Z", rule.Expiration.Date); rule.Expiration.Date != "" && err != nil {
return fmt.Errorf("invalid value of expiration date: %s", rule.Expiration.Date)
}
}
if rule.NonCurrentVersionExpiration != nil {
if rule.NonCurrentVersionExpiration.NewerNonCurrentVersions != nil &&
(*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions > maxNewerNoncurrentVersions ||
*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions <= 0) {
return fmt.Errorf("invalid value of newer noncurrent versions: %d", *rule.NonCurrentVersionExpiration.NewerNonCurrentVersions)
}
if rule.NonCurrentVersionExpiration.NonCurrentDays != nil && *rule.NonCurrentVersionExpiration.NonCurrentDays <= 0 {
return fmt.Errorf("invalid value of noncurrent days: %d", *rule.NonCurrentVersionExpiration.NonCurrentDays)
}
}
if err := checkLifecycleRuleFilter(rule.Filter); err != nil {
return err
}
}
return nil
}
func checkLifecycleRuleFilter(filter *data.LifecycleRuleFilter) error {
if filter == nil {
return nil
}
var fields int
if filter.And != nil {
fields++
for _, tag := range filter.And.Tags {
err := checkTag(tag)
if err != nil {
return err
}
}
if filter.And.ObjectSizeGreaterThan != nil && filter.And.ObjectSizeLessThan != nil &&
*filter.And.ObjectSizeLessThan <= *filter.And.ObjectSizeGreaterThan {
return fmt.Errorf("the maximum object size must be larger than the minimum object size")
}
}
if filter.ObjectSizeGreaterThan != nil {
fields++
}
if filter.ObjectSizeLessThan != nil {
fields++
}
if filter.Prefix != "" {
fields++
}
if filter.Tag != nil {
fields++
err := checkTag(*filter.Tag)
if err != nil {
return err
}
}
if fields > 1 {
return fmt.Errorf("filter cannot have more than one field")
}
return nil
}

View file

@ -0,0 +1,457 @@
package handler
import (
"crypto/md5"
"crypto/rand"
"encoding/base64"
"encoding/xml"
"io"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"github.com/mr-tron/base58"
"github.com/stretchr/testify/require"
)
func TestPutBucketLifecycleConfiguration(t *testing.T) {
hc := prepareHandlerContextWithMinCache(t)
bktName := "bucket-lifecycle"
createBucket(hc, bktName)
for _, tc := range []struct {
name string
body *data.LifecycleConfiguration
error bool
}{
{
name: "correct configuration",
body: &data.LifecycleConfiguration{
XMLName: xml.Name{
Space: `http://s3.amazonaws.com/doc/2006-03-01/`,
Local: "LifecycleConfiguration",
},
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
Date: time.Now().Format("2006-01-02T15:04:05.000Z"),
},
Filter: &data.LifecycleRuleFilter{
And: &data.LifecycleRuleAndOperator{
Prefix: "prefix/",
Tags: []data.Tag{{Key: "key", Value: "value"}, {Key: "tag", Value: ""}},
ObjectSizeGreaterThan: ptr(uint64(100)),
},
},
},
{
Status: data.LifecycleStatusEnabled,
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
DaysAfterInitiation: ptr(14),
},
Expiration: &data.LifecycleExpiration{
ExpiredObjectDeleteMarker: ptr(true),
},
Filter: &data.LifecycleRuleFilter{
ObjectSizeLessThan: ptr(uint64(100)),
},
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
NewerNonCurrentVersions: ptr(1),
NonCurrentDays: ptr(21),
},
},
},
},
},
{
name: "too many rules",
body: func() *data.LifecycleConfiguration {
lifecycle := new(data.LifecycleConfiguration)
for i := 0; i <= maxRules; i++ {
lifecycle.Rules = append(lifecycle.Rules, data.LifecycleRule{
ID: "Rule" + strconv.Itoa(i),
})
}
return lifecycle
}(),
error: true,
},
{
name: "duplicate rule ID",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
ID: "Rule",
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
},
{
ID: "Rule",
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
},
},
},
error: true,
},
{
name: "too long rule ID",
body: func() *data.LifecycleConfiguration {
id := make([]byte, maxRuleIDLen+1)
_, err := io.ReadFull(rand.Reader, id)
require.NoError(t, err)
return &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
ID: base58.Encode(id)[:maxRuleIDLen+1],
},
},
}
}(),
error: true,
},
{
name: "invalid status",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: "invalid",
},
},
},
error: true,
},
{
name: "no actions",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Filter: &data.LifecycleRuleFilter{
Prefix: "prefix/",
},
},
},
},
error: true,
},
{
name: "invalid days after initiation",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
DaysAfterInitiation: ptr(0),
},
},
},
},
error: true,
},
{
name: "invalid expired object delete marker declaration",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
ExpiredObjectDeleteMarker: ptr(false),
},
},
},
},
error: true,
},
{
name: "invalid expiration days",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(0),
},
},
},
},
error: true,
},
{
name: "invalid expiration date",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Date: "invalid",
},
},
},
},
error: true,
},
{
name: "newer noncurrent versions is too small",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
NewerNonCurrentVersions: ptr(0),
},
},
},
},
error: true,
},
{
name: "newer noncurrent versions is too large",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
NewerNonCurrentVersions: ptr(101),
},
},
},
},
error: true,
},
{
name: "invalid noncurrent days",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
NonCurrentDays: ptr(0),
},
},
},
},
error: true,
},
{
name: "more than one filter field",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
Filter: &data.LifecycleRuleFilter{
Prefix: "prefix/",
ObjectSizeGreaterThan: ptr(uint64(100)),
},
},
},
},
error: true,
},
{
name: "invalid tag in filter",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
Filter: &data.LifecycleRuleFilter{
Tag: &data.Tag{},
},
},
},
},
error: true,
},
{
name: "abort incomplete multipart upload with tag",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
DaysAfterInitiation: ptr(14),
},
Filter: &data.LifecycleRuleFilter{
Tag: &data.Tag{Key: "key", Value: "value"},
},
},
},
},
error: true,
},
{
name: "expired object delete marker with tag",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
ExpiredObjectDeleteMarker: ptr(true),
},
Filter: &data.LifecycleRuleFilter{
And: &data.LifecycleRuleAndOperator{
Tags: []data.Tag{{Key: "key", Value: "value"}},
},
},
},
},
},
error: true,
},
{
name: "invalid size range",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
Filter: &data.LifecycleRuleFilter{
And: &data.LifecycleRuleAndOperator{
ObjectSizeGreaterThan: ptr(uint64(100)),
ObjectSizeLessThan: ptr(uint64(100)),
},
},
},
},
},
error: true,
},
} {
t.Run(tc.name, func(t *testing.T) {
if tc.error {
putBucketLifecycleConfigurationErr(hc, bktName, tc.body, apiErrors.GetAPIError(apiErrors.ErrMalformedXML))
return
}
putBucketLifecycleConfiguration(hc, bktName, tc.body)
cfg := getBucketLifecycleConfiguration(hc, bktName)
require.Equal(t, *tc.body, *cfg)
deleteBucketLifecycleConfiguration(hc, bktName)
getBucketLifecycleConfigurationErr(hc, bktName, apiErrors.GetAPIError(apiErrors.ErrNoSuchLifecycleConfiguration))
})
}
}
func TestPutBucketLifecycleInvalidMD5(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-lifecycle-md5"
createBucket(hc, bktName)
lifecycle := &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
},
},
}
w, r := prepareTestRequest(hc, bktName, "", lifecycle)
hc.Handler().PutBucketLifecycleHandler(w, r)
assertS3Error(hc.t, w, apiErrors.GetAPIError(apiErrors.ErrMissingContentMD5))
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
r.Header.Set(api.ContentMD5, "")
hc.Handler().PutBucketLifecycleHandler(w, r)
assertS3Error(hc.t, w, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest))
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
r.Header.Set(api.ContentMD5, "some-hash")
hc.Handler().PutBucketLifecycleHandler(w, r)
assertS3Error(hc.t, w, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest))
}
func TestPutBucketLifecycleInvalidXML(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-lifecycle-invalid-xml"
createBucket(hc, bktName)
w, r := prepareTestRequest(hc, bktName, "", &data.CORSConfiguration{})
r.Header.Set(api.ContentMD5, "")
hc.Handler().PutBucketLifecycleHandler(w, r)
assertS3Error(hc.t, w, apiErrors.GetAPIError(apiErrors.ErrMalformedXML))
}
func putBucketLifecycleConfiguration(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration) {
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg)
assertStatus(hc.t, w, http.StatusOK)
}
func putBucketLifecycleConfigurationErr(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration, err apiErrors.Error) {
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg)
assertS3Error(hc.t, w, err)
}
func putBucketLifecycleConfigurationBase(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration) *httptest.ResponseRecorder {
w, r := prepareTestRequest(hc, bktName, "", cfg)
rawBody, err := xml.Marshal(cfg)
require.NoError(hc.t, err)
hash := md5.New()
hash.Write(rawBody)
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(hash.Sum(nil)))
hc.Handler().PutBucketLifecycleHandler(w, r)
return w
}
func getBucketLifecycleConfiguration(hc *handlerContext, bktName string) *data.LifecycleConfiguration {
w := getBucketLifecycleConfigurationBase(hc, bktName)
assertStatus(hc.t, w, http.StatusOK)
res := &data.LifecycleConfiguration{}
parseTestResponse(hc.t, w, res)
return res
}
func getBucketLifecycleConfigurationErr(hc *handlerContext, bktName string, err apiErrors.Error) {
w := getBucketLifecycleConfigurationBase(hc, bktName)
assertS3Error(hc.t, w, err)
}
func getBucketLifecycleConfigurationBase(hc *handlerContext, bktName string) *httptest.ResponseRecorder {
w, r := prepareTestRequest(hc, bktName, "", nil)
hc.Handler().GetBucketLifecycleHandler(w, r)
return w
}
func deleteBucketLifecycleConfiguration(hc *handlerContext, bktName string) {
w := deleteBucketLifecycleConfigurationBase(hc, bktName)
assertStatus(hc.t, w, http.StatusNoContent)
}
func deleteBucketLifecycleConfigurationBase(hc *handlerContext, bktName string) *httptest.ResponseRecorder {
w, r := prepareTestRequest(hc, bktName, "", nil)
hc.Handler().DeleteBucketLifecycleHandler(w, r)
return w
}
func ptr[T any](t T) *T {
return &t
}

View file

@ -7,10 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
)
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
}
func (h *handler) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
}

View file

@ -11,10 +11,6 @@ func (h *handler) SelectObjectContentHandler(w http.ResponseWriter, r *http.Requ
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
@ -51,10 +47,6 @@ func (h *handler) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request)
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}

View file

@ -257,3 +257,29 @@ func (c *Cache) PutCORS(owner user.ID, bkt *data.BucketInfo, cors *data.CORSConf
func (c *Cache) DeleteCORS(bktInfo *data.BucketInfo) {
c.systemCache.Delete(bktInfo.Name + bktInfo.CORSObjectName())
}
func (c *Cache) GetLifecycleConfiguration(owner user.ID, bkt *data.BucketInfo) *data.LifecycleConfiguration {
key := bkt.LifecycleConfigurationObjectName()
if !c.accessCache.Get(owner, key) {
return nil
}
return c.systemCache.GetLifecycleConfiguration(key)
}
func (c *Cache) PutLifecycleConfiguration(owner user.ID, bkt *data.BucketInfo, cfg *data.LifecycleConfiguration) {
key := bkt.LifecycleConfigurationObjectName()
if err := c.systemCache.PutLifecycleConfiguration(key, cfg); err != nil {
c.logger.Warn(logs.CouldntCacheLifecycleConfiguration, zap.String("bucket", bkt.Name), zap.Error(err))
}
if err := c.accessCache.Put(owner, key); err != nil {
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
}
}
func (c *Cache) DeleteLifecycleConfiguration(bktInfo *data.BucketInfo) {
c.systemCache.Delete(bktInfo.LifecycleConfigurationObjectName())
}

View file

@ -43,24 +43,28 @@ type (
}
Layer struct {
frostFS FrostFS
gateOwner user.ID
log *zap.Logger
anonKey AnonymousKey
resolver BucketResolver
cache *Cache
treeService TreeService
features FeatureSettings
frostFS FrostFS
gateOwner user.ID
log *zap.Logger
anonKey AnonymousKey
resolver BucketResolver
cache *Cache
treeService TreeService
features FeatureSettings
lifecycleCnrInfo *data.BucketInfo
gateKey *keys.PrivateKey
}
Config struct {
GateOwner user.ID
ChainAddress string
Cache *Cache
AnonKey AnonymousKey
Resolver BucketResolver
TreeService TreeService
Features FeatureSettings
GateOwner user.ID
ChainAddress string
Cache *Cache
AnonKey AnonymousKey
Resolver BucketResolver
TreeService TreeService
Features FeatureSettings
LifecycleCnrInfo *data.BucketInfo
GateKey *keys.PrivateKey
}
// AnonymousKey contains data for anonymous requests.
@ -225,14 +229,16 @@ func (p HeadObjectParams) Versioned() bool {
// and establishes gRPC connection with the node.
func NewLayer(log *zap.Logger, frostFS FrostFS, config *Config) *Layer {
return &Layer{
frostFS: frostFS,
log: log,
gateOwner: config.GateOwner,
anonKey: config.AnonKey,
resolver: config.Resolver,
cache: config.Cache,
treeService: config.TreeService,
features: config.Features,
frostFS: frostFS,
log: log,
gateOwner: config.GateOwner,
anonKey: config.AnonKey,
resolver: config.Resolver,
cache: config.Cache,
treeService: config.TreeService,
features: config.Features,
lifecycleCnrInfo: config.LifecycleCnrInfo,
gateKey: config.GateKey,
}
}
@ -285,6 +291,10 @@ func (n *Layer) reqLogger(ctx context.Context) *zap.Logger {
}
func (n *Layer) prepareAuthParameters(ctx context.Context, prm *PrmAuth, bktOwner user.ID) {
if prm.BearerToken != nil || prm.PrivateKey != nil {
return
}
if bd, err := middleware.GetBoxData(ctx); err == nil && bd.Gate.BearerToken != nil {
if bd.Gate.BearerToken.Impersonate() || bktOwner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
prm.BearerToken = bd.Gate.BearerToken

152
api/layer/lifecycle.go Normal file
View file

@ -0,0 +1,152 @@
package layer
import (
"bytes"
"context"
"encoding/base64"
"encoding/xml"
"errors"
"fmt"
"io"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apiErr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
type PutBucketLifecycleParams struct {
BktInfo *data.BucketInfo
LifecycleCfg *data.LifecycleConfiguration
LifecycleReader io.Reader
CopiesNumbers []uint32
MD5Hash string
}
func (n *Layer) PutBucketLifecycleConfiguration(ctx context.Context, p *PutBucketLifecycleParams) error {
prm := PrmObjectCreate{
Payload: p.LifecycleReader,
Filepath: p.BktInfo.LifecycleConfigurationObjectName(),
CreationTime: TimeNow(ctx),
}
var lifecycleBkt *data.BucketInfo
if n.lifecycleCnrInfo == nil {
lifecycleBkt = p.BktInfo
prm.CopiesNumber = p.CopiesNumbers
} else {
lifecycleBkt = n.lifecycleCnrInfo
prm.PrmAuth.PrivateKey = &n.gateKey.PrivateKey
}
prm.Container = lifecycleBkt.CID
_, objID, _, md5, err := n.objectPutAndHash(ctx, prm, lifecycleBkt)
if err != nil {
return err
}
hashBytes, err := base64.StdEncoding.DecodeString(p.MD5Hash)
if err != nil {
return apiErr.GetAPIError(apiErr.ErrInvalidDigest)
}
if !bytes.Equal(hashBytes, md5) {
n.deleteLifecycleObject(ctx, lifecycleBkt, objID)
return apiErr.GetAPIError(apiErr.ErrInvalidDigest)
}
objIDToDelete, err := n.treeService.PutBucketLifecycleConfiguration(ctx, p.BktInfo, objID)
objIDToDeleteNotFound := errors.Is(err, ErrNoNodeToRemove)
if err != nil && !objIDToDeleteNotFound {
return err
}
if !objIDToDeleteNotFound {
n.deleteLifecycleObject(ctx, lifecycleBkt, objIDToDelete)
}
n.cache.PutLifecycleConfiguration(n.BearerOwner(ctx), p.BktInfo, p.LifecycleCfg)
return nil
}
// deleteLifecycleObject removes object and logs in case of error.
func (n *Layer) deleteLifecycleObject(ctx context.Context, lifecycleBkt *data.BucketInfo, objID oid.ID) {
var err error
if n.lifecycleCnrInfo == nil {
err = n.objectDelete(ctx, lifecycleBkt, objID)
} else {
err = n.objectDeleteWithAuth(ctx, lifecycleBkt, objID, PrmAuth{PrivateKey: &n.gateKey.PrivateKey})
}
if err != nil {
n.reqLogger(ctx).Error(logs.CouldntDeleteLifecycleObject, zap.Error(err),
zap.String("cid", lifecycleBkt.CID.EncodeToString()),
zap.String("oid", objID.EncodeToString()))
}
}
func (n *Layer) GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (*data.LifecycleConfiguration, error) {
owner := n.BearerOwner(ctx)
if cfg := n.cache.GetLifecycleConfiguration(owner, bktInfo); cfg != nil {
return cfg, nil
}
objID, err := n.treeService.GetBucketLifecycleConfiguration(ctx, bktInfo)
objIDNotFound := errors.Is(err, ErrNodeNotFound)
if err != nil && !objIDNotFound {
return nil, err
}
if objIDNotFound {
return nil, fmt.Errorf("%w: %s", apiErr.GetAPIError(apiErr.ErrNoSuchLifecycleConfiguration), err.Error())
}
var obj *object.Object
if n.lifecycleCnrInfo == nil {
if obj, err = n.objectGet(ctx, bktInfo, objID); err != nil {
return nil, err
}
} else {
if obj, err = n.objectGetWithAuth(ctx, n.lifecycleCnrInfo, objID, PrmAuth{PrivateKey: &n.gateKey.PrivateKey}); err != nil {
return nil, err
}
}
lifecycleCfg := &data.LifecycleConfiguration{}
if err = xml.Unmarshal(obj.Payload(), &lifecycleCfg); err != nil {
return nil, fmt.Errorf("unmarshal lifecycle configuration: %w", err)
}
n.cache.PutLifecycleConfiguration(owner, bktInfo, lifecycleCfg)
return lifecycleCfg, nil
}
func (n *Layer) DeleteBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) error {
objID, err := n.treeService.DeleteBucketLifecycleConfiguration(ctx, bktInfo)
objIDNotFound := errors.Is(err, ErrNoNodeToRemove)
if err != nil && !objIDNotFound {
return err
}
if !objIDNotFound {
if n.lifecycleCnrInfo == nil {
if err = n.objectDelete(ctx, bktInfo, objID); err != nil {
return err
}
} else {
if err = n.objectDeleteWithAuth(ctx, n.lifecycleCnrInfo, objID, PrmAuth{PrivateKey: &n.gateKey.PrivateKey}); err != nil {
return err
}
}
}
n.cache.DeleteLifecycleConfiguration(bktInfo)
return nil
}

View file

@ -0,0 +1,65 @@
package layer
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/xml"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apiErr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
frostfsErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
"github.com/stretchr/testify/require"
)
func TestBucketLifecycle(t *testing.T) {
tc := prepareContext(t)
lifecycle := &data.LifecycleConfiguration{
XMLName: xml.Name{
Space: `http://s3.amazonaws.com/doc/2006-03-01/`,
Local: "LifecycleConfiguration",
},
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
},
},
}
lifecycleBytes, err := xml.Marshal(lifecycle)
require.NoError(t, err)
hash := md5.New()
hash.Write(lifecycleBytes)
_, err = tc.layer.GetBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
require.Equal(t, apiErr.GetAPIError(apiErr.ErrNoSuchLifecycleConfiguration), frostfsErrors.UnwrapErr(err))
err = tc.layer.DeleteBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
require.NoError(t, err)
err = tc.layer.PutBucketLifecycleConfiguration(tc.ctx, &PutBucketLifecycleParams{
BktInfo: tc.bktInfo,
LifecycleCfg: lifecycle,
LifecycleReader: bytes.NewReader(lifecycleBytes),
MD5Hash: base64.StdEncoding.EncodeToString(hash.Sum(nil)),
})
require.NoError(t, err)
cfg, err := tc.layer.GetBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
require.NoError(t, err)
require.Equal(t, *lifecycle, *cfg)
err = tc.layer.DeleteBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
require.NoError(t, err)
_, err = tc.layer.GetBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
require.Equal(t, apiErr.GetAPIError(apiErr.ErrNoSuchLifecycleConfiguration), frostfsErrors.UnwrapErr(err))
}
func ptr[T any](t T) *T {
return &t
}

View file

@ -151,7 +151,17 @@ func (n *Layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFS
// objectGet returns an object with payload in the object.
func (n *Layer) objectGet(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (*object.Object, error) {
return n.objectGetBase(ctx, bktInfo, objID, PrmAuth{})
}
// objectGetWithAuth returns an object with payload in the object. Uses provided PrmAuth.
func (n *Layer) objectGetWithAuth(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, auth PrmAuth) (*object.Object, error) {
return n.objectGetBase(ctx, bktInfo, objID, auth)
}
func (n *Layer) objectGetBase(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, auth PrmAuth) (*object.Object, error) {
prm := PrmObjectRead{
PrmAuth: auth,
Container: bktInfo.CID,
Object: objID,
WithHeader: true,
@ -460,7 +470,17 @@ func (n *Layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
// objectDelete puts tombstone object into frostfs.
func (n *Layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) error {
return n.objectDeleteBase(ctx, bktInfo, idObj, PrmAuth{})
}
// objectDelete puts tombstone object into frostfs. Uses provided PrmAuth.
func (n *Layer) objectDeleteWithAuth(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID, auth PrmAuth) error {
return n.objectDeleteBase(ctx, bktInfo, idObj, auth)
}
func (n *Layer) objectDeleteBase(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID, auth PrmAuth) error {
prm := PrmObjectDelete{
PrmAuth: auth,
Container: bktInfo.CID,
Object: idObj,
}

View file

@ -392,6 +392,51 @@ LOOP:
return result, nil
}
func (t *TreeServiceMock) PutBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
if !ok {
systemMap = make(map[string]*data.BaseNodeVersion)
}
systemMap["lifecycle"] = &data.BaseNodeVersion{
OID: objID,
}
t.system[bktInfo.CID.EncodeToString()] = systemMap
return oid.ID{}, ErrNoNodeToRemove
}
func (t *TreeServiceMock) GetBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
if !ok {
return oid.ID{}, ErrNodeNotFound
}
node, ok := systemMap["lifecycle"]
if !ok {
return oid.ID{}, ErrNodeNotFound
}
return node.OID, nil
}
func (t *TreeServiceMock) DeleteBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
if !ok {
return oid.ID{}, ErrNoNodeToRemove
}
node, ok := systemMap["lifecycle"]
if !ok {
return oid.ID{}, ErrNoNodeToRemove
}
delete(systemMap, "lifecycle")
return node.OID, nil
}
func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, multipartInfo *data.MultipartInfo) error {
cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()]

View file

@ -63,6 +63,10 @@ type TreeService interface {
AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error)
GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfo, error)
PutBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oldObjIDToDelete oid.ID, err error)
GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error)
DeleteBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error)
// Compound methods for optimizations
// GetObjectTaggingAndLock unifies GetObjectTagging and GetLock methods in single tree service invocation.

View file

@ -168,11 +168,13 @@ func prepareContext(t *testing.T, cachesConfig ...*CachesConfig) *testContext {
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
layerCfg := &Config{
Cache: NewCache(config),
AnonKey: AnonymousKey{Key: key},
TreeService: NewTreeService(),
Features: &FeatureSettingsMock{},
GateOwner: owner,
Cache: NewCache(config),
AnonKey: AnonymousKey{Key: key},
TreeService: NewTreeService(),
Features: &FeatureSettingsMock{},
GateOwner: owner,
LifecycleCnrInfo: &data.BucketInfo{CID: res.ContainerID},
GateKey: key,
}
return &testContext{

View file

@ -356,7 +356,7 @@ func bucketRouter(h Handler, log *zap.Logger) chi.Router {
Handler(named(s3middleware.DeleteBucketPolicyOperation, h.DeleteBucketPolicyHandler))).
Add(NewFilter().
Queries(s3middleware.LifecycleQuery).
Handler(named(s3middleware.PutBucketLifecycleOperation, h.PutBucketLifecycleHandler))).
Handler(named(s3middleware.DeleteBucketLifecycleOperation, h.DeleteBucketLifecycleHandler))).
Add(NewFilter().
Queries(s3middleware.EncryptionQuery).
Handler(named(s3middleware.DeleteBucketEncryptionOperation, h.DeleteBucketEncryptionHandler))).

View file

@ -21,6 +21,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/handler"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
s3middleware "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
@ -37,6 +38,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
@ -153,13 +156,13 @@ func (a *App) init(ctx context.Context) {
a.setRuntimeParameters()
a.initFrostfsID(ctx)
a.initPolicyStorage(ctx)
a.initAPI()
a.initAPI(ctx)
a.initMetrics()
a.initServers(ctx)
a.initTracing(ctx)
}
func (a *App) initLayer() {
func (a *App) initLayer(ctx context.Context) {
a.initResolver()
// prepare random key for anonymous requests
@ -171,15 +174,25 @@ func (a *App) initLayer() {
var gateOwner user.ID
user.IDFromKey(&gateOwner, a.key.PrivateKey.PublicKey)
var lifecycleCnrInfo *data.BucketInfo
if a.cfg.IsSet(cfgContainersLifecycle) {
lifecycleCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersLifecycle)
if err != nil {
a.log.Fatal(logs.CouldNotFetchLifecycleContainerInfo, zap.Error(err))
}
}
layerCfg := &layer.Config{
Cache: layer.NewCache(getCacheOptions(a.cfg, a.log)),
AnonKey: layer.AnonymousKey{
Key: randomKey,
},
GateOwner: gateOwner,
Resolver: a.bucketResolver,
TreeService: tree.NewTree(services.NewPoolWrapper(a.treePool), a.log),
Features: a.settings,
GateOwner: gateOwner,
Resolver: a.bucketResolver,
TreeService: tree.NewTree(services.NewPoolWrapper(a.treePool), a.log),
Features: a.settings,
LifecycleCnrInfo: lifecycleCnrInfo,
GateKey: a.key,
}
// prepare object layer
@ -434,8 +447,8 @@ func (s *appSettings) RetryStrategy() handler.RetryStrategy {
return s.retryStrategy
}
func (a *App) initAPI() {
a.initLayer()
func (a *App) initAPI(ctx context.Context) {
a.initLayer(ctx)
a.initHandler()
}
@ -1034,3 +1047,32 @@ func (a *App) tryReconnect(ctx context.Context, sr *http.Server) bool {
return len(a.unbindServers) == 0
}
func (a *App) fetchContainerInfo(ctx context.Context, cfgKey string) (info *data.BucketInfo, err error) {
containerString := a.cfg.GetString(cfgKey)
var id cid.ID
if err = id.DecodeString(containerString); err != nil {
if id, err = a.bucketResolver.Resolve(ctx, containerString); err != nil {
return nil, fmt.Errorf("resolve container name %s: %w", containerString, err)
}
}
return getContainerInfo(ctx, id, a.pool)
}
func getContainerInfo(ctx context.Context, id cid.ID, frostFSPool *pool.Pool) (*data.BucketInfo, error) {
prm := pool.PrmContainerGet{
ContainerID: id,
}
res, err := frostFSPool.GetContainer(ctx, prm)
if err != nil {
return nil, err
}
return &data.BucketInfo{
CID: id,
HomomorphicHashDisabled: container.IsHomomorphicHashingDisabled(res),
}, nil
}

View file

@ -176,6 +176,9 @@ const ( // Settings.
cfgSourceIPHeader = "source_ip_header"
// Containers.
cfgContainersLifecycle = "containers.lifecycle"
// Command line args.
cmdHelp = "help"
cmdVersion = "version"

View file

@ -216,3 +216,5 @@ S3_GW_RETRY_MAX_BACKOFF=30s
# Backoff strategy. `exponential` and `constant` are allowed.
S3_GW_RETRY_STRATEGY=exponential
# Containers properties
S3_GW_CONTAINERS_LIFECYCLE=AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj

View file

@ -252,3 +252,7 @@ retry:
max_backoff: 30s
# Backoff strategy. `exponential` and `constant` are allowed.
strategy: exponential
# Containers properties
containers:
lifecycle: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj

View file

@ -192,6 +192,7 @@ There are some custom types used for brevity:
| `proxy` | [Proxy contract configuration](#proxy-section) |
| `namespaces` | [Namespaces configuration](#namespaces-section) |
| `retry` | [Retry configuration](#retry-section) |
| `containers` | [Containers configuration](#containers-section) |
### General section
@ -708,3 +709,15 @@ retry:
| `max_backoff` | `duration` | yes | `30s` | Max delay before next attempt. |
| `strategy` | `string` | yes | `exponential` | Backoff strategy. `exponential` and `constant` are allowed. |
# `containers` section
Section for well-known containers to store s3-related data and settings.
```yaml
containers:
lifecycle: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-------------|----------|---------------|---------------|-------------------------------------------------------------------------------------------|
| `lifecycle` | `string` | no | | Container name for lifecycle configurations. If not set, container of the bucket is used. |

2
go.mod
View file

@ -15,6 +15,7 @@ require (
github.com/go-chi/chi/v5 v5.0.8
github.com/google/uuid v1.3.1
github.com/minio/sio v0.3.0
github.com/mr-tron/base58 v1.2.0
github.com/nspcc-dev/neo-go v0.105.0
github.com/panjf2000/ants/v2 v2.5.0
github.com/prometheus/client_golang v1.15.1
@ -66,7 +67,6 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20231123160306-3374ff1e7a3c // indirect
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20231127165613-b35f351f0ba0 // indirect
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect

View file

@ -137,4 +137,7 @@ const (
CouldntCacheSubject = "couldn't cache subject info"
UserGroupsListIsEmpty = "user groups list is empty, subject not found"
CouldntCacheUserKey = "couldn't cache user key"
CouldntDeleteLifecycleObject = "couldn't delete lifecycle configuration object"
CouldntCacheLifecycleConfiguration = "couldn't cache lifecycle configuration"
CouldNotFetchLifecycleContainerInfo = "couldn't fetch lifecycle container info"
)

View file

@ -107,9 +107,10 @@ const (
ownerKV = "Owner"
createdKV = "Created"
settingsFileName = "bucket-settings"
corsFilename = "bucket-cors"
bucketTaggingFilename = "bucket-tagging"
settingsFileName = "bucket-settings"
corsFilename = "bucket-cors"
bucketTaggingFilename = "bucket-tagging"
bucketLifecycleFilename = "bucket-lifecycle"
// versionTree -- ID of a tree with object versions.
versionTree = "version"
@ -1201,6 +1202,49 @@ func (c *Tree) GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipart
return result, nil
}
func (c *Tree) PutBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
node, err := c.getSystemNode(ctx, bktInfo, []string{bucketLifecycleFilename}, []string{oidKV})
isErrNotFound := errors.Is(err, layer.ErrNodeNotFound)
if err != nil && !isErrNotFound {
return oid.ID{}, fmt.Errorf("couldn't get node: %w", err)
}
meta := make(map[string]string)
meta[FileNameKey] = bucketLifecycleFilename
meta[oidKV] = objID.EncodeToString()
if isErrNotFound {
if _, err = c.service.AddNode(ctx, bktInfo, systemTree, 0, meta); err != nil {
return oid.ID{}, err
}
return oid.ID{}, layer.ErrNoNodeToRemove
}
return node.ObjID, c.service.MoveNode(ctx, bktInfo, systemTree, node.ID, 0, meta)
}
func (c *Tree) GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
node, err := c.getSystemNode(ctx, bktInfo, []string{bucketLifecycleFilename}, []string{oidKV})
if err != nil {
return oid.ID{}, err
}
return node.ObjID, nil
}
func (c *Tree) DeleteBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
node, err := c.getSystemNode(ctx, bktInfo, []string{bucketLifecycleFilename}, []string{oidKV})
if err != nil && !errors.Is(err, layer.ErrNodeNotFound) {
return oid.ID{}, err
}
if node != nil {
return node.ObjID, c.service.RemoveNode(ctx, bktInfo, systemTree, node.ID)
}
return oid.ID{}, layer.ErrNoNodeToRemove
}
func (c *Tree) DeleteMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, multipartInfo *data.MultipartInfo) error {
err := c.service.RemoveNode(ctx, bktInfo, systemTree, multipartInfo.ID)
if err != nil {