feature/lifecycle_convert_date #516
9 changed files with 147 additions and 70 deletions
|
@ -27,9 +27,10 @@ type (
|
|||
}
|
||||
|
||||
LifecycleExpiration struct {
|
||||
Date string `xml:"Date,omitempty"`
|
||||
Days *int `xml:"Days,omitempty"`
|
||||
ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"`
|
||||
Date string `xml:"Date,omitempty"`
|
||||
Days *int `xml:"Days,omitempty"`
|
||||
Epoch *uint64 `xml:"Epoch,omitempty"`
|
||||
dkirillov marked this conversation as resolved
Outdated
|
||||
ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"`
|
||||
}
|
||||
|
||||
LifecycleRuleFilter struct {
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
|
@ -12,6 +12,8 @@ import (
|
|||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -43,9 +45,6 @@ func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
tee := io.TeeReader(r.Body, &buf)
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
|
@ -56,6 +55,11 @@ func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Reque
|
|||
return
|
||||
}
|
||||
|
||||
if _, err := base64.StdEncoding.DecodeString(r.Header.Get(api.ContentMD5)); err != nil {
|
||||
h.logAndSendError(w, "invalid Content-MD5", reqInfo, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
|
@ -63,21 +67,25 @@ func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
cfg := new(data.LifecycleConfiguration)
|
||||
if err = h.cfg.NewXMLDecoder(tee).Decode(cfg); err != nil {
|
||||
if err = h.cfg.NewXMLDecoder(r.Body).Decode(cfg); err != nil {
|
||||
h.logAndSendError(w, "could not decode body", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = checkLifecycleConfiguration(cfg); err != nil {
|
||||
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get network info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = checkLifecycleConfiguration(ctx, cfg, &networkInfo); err != nil {
|
||||
h.logAndSendError(w, "invalid lifecycle configuration", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
params := &layer.PutBucketLifecycleParams{
|
||||
BktInfo: bktInfo,
|
||||
LifecycleCfg: cfg,
|
||||
LifecycleReader: &buf,
|
||||
MD5Hash: r.Header.Get(api.ContentMD5),
|
||||
BktInfo: bktInfo,
|
||||
LifecycleCfg: cfg,
|
||||
}
|
||||
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
|
@ -110,13 +118,15 @@ func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Re
|
|||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func checkLifecycleConfiguration(cfg *data.LifecycleConfiguration) error {
|
||||
func checkLifecycleConfiguration(ctx context.Context, cfg *data.LifecycleConfiguration, ni *netmap.NetworkInfo) error {
|
||||
now := layer.TimeNow(ctx)
|
||||
|
||||
if len(cfg.Rules) > maxRules {
|
||||
return fmt.Errorf("number of rules cannot be greater than %d", maxRules)
|
||||
}
|
||||
|
||||
ids := make(map[string]struct{}, len(cfg.Rules))
|
||||
for _, rule := range cfg.Rules {
|
||||
for i, rule := range cfg.Rules {
|
||||
if _, ok := ids[rule.ID]; ok && rule.ID != "" {
|
||||
return fmt.Errorf("duplicate 'ID': %s", rule.ID)
|
||||
}
|
||||
|
@ -160,8 +170,18 @@ func checkLifecycleConfiguration(cfg *data.LifecycleConfiguration) error {
|
|||
return fmt.Errorf("expiration days must be a positive integer: %d", *rule.Expiration.Days)
|
||||
}
|
||||
|
||||
if _, err := time.Parse("2006-01-02T15:04:05Z", rule.Expiration.Date); rule.Expiration.Date != "" && err != nil {
|
||||
return fmt.Errorf("invalid value of expiration date: %s", rule.Expiration.Date)
|
||||
if rule.Expiration.Date != "" {
|
||||
parsedTime, err := time.Parse("2006-01-02T15:04:05Z", rule.Expiration.Date)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value of expiration date: %s", rule.Expiration.Date)
|
||||
}
|
||||
|
||||
epoch, err := util.TimeToEpoch(ni, now, parsedTime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert time to epoch: %w", err)
|
||||
}
|
||||
|
||||
cfg.Rules[i].Expiration.Epoch = &epoch
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -376,11 +376,6 @@ func TestPutBucketLifecycleInvalidMD5(t *testing.T) {
|
|||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMissingContentMD5))
|
||||
|
||||
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
||||
r.Header.Set(api.ContentMD5, "")
|
||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
|
||||
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
||||
r.Header.Set(api.ContentMD5, "some-hash")
|
||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||
|
|
|
@ -413,6 +413,8 @@ func (t *TestFrostFS) SearchObjects(_ context.Context, prm frostfs.PrmObjectSear
|
|||
func (t *TestFrostFS) NetworkInfo(context.Context) (netmap.NetworkInfo, error) {
|
||||
ni := netmap.NetworkInfo{}
|
||||
ni.SetCurrentEpoch(t.currentEpoch)
|
||||
ni.SetEpochDuration(60)
|
||||
ni.SetMsPerBlock(1000)
|
||||
|
||||
return ni, nil
|
||||
}
|
||||
|
|
|
@ -3,11 +3,9 @@ package layer
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
|
@ -19,16 +17,19 @@ import (
|
|||
)
|
||||
|
||||
type PutBucketLifecycleParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
LifecycleCfg *data.LifecycleConfiguration
|
||||
LifecycleReader io.Reader
|
||||
CopiesNumbers []uint32
|
||||
MD5Hash string
|
||||
BktInfo *data.BucketInfo
|
||||
LifecycleCfg *data.LifecycleConfiguration
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
func (n *Layer) PutBucketLifecycleConfiguration(ctx context.Context, p *PutBucketLifecycleParams) error {
|
||||
cfgBytes, err := xml.Marshal(p.LifecycleCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal lifecycle configuration: %w", err)
|
||||
}
|
||||
|
||||
prm := frostfs.PrmObjectCreate{
|
||||
Payload: p.LifecycleReader,
|
||||
Payload: bytes.NewReader(cfgBytes),
|
||||
Filepath: p.BktInfo.LifecycleConfigurationObjectName(),
|
||||
CreationTime: TimeNow(ctx),
|
||||
}
|
||||
|
@ -49,17 +50,6 @@ func (n *Layer) PutBucketLifecycleConfiguration(ctx context.Context, p *PutBucke
|
|||
return fmt.Errorf("put lifecycle object: %w", err)
|
||||
}
|
||||
|
||||
hashBytes, err := base64.StdEncoding.DecodeString(p.MD5Hash)
|
||||
if err != nil {
|
||||
return apierr.GetAPIError(apierr.ErrInvalidDigest)
|
||||
}
|
||||
|
||||
if !bytes.Equal(hashBytes, createdObj.MD5Sum) {
|
||||
n.deleteLifecycleObject(ctx, p.BktInfo, newAddress(lifecycleBkt.CID, createdObj.ID))
|
||||
|
||||
return apierr.GetAPIError(apierr.ErrInvalidDigest)
|
||||
}
|
||||
|
||||
objsToDelete, err := n.treeService.PutBucketLifecycleConfiguration(ctx, p.BktInfo, newAddress(lifecycleBkt.CID, createdObj.ID))
|
||||
objsToDeleteNotFound := errors.Is(err, tree.ErrNoNodeToRemove)
|
||||
if err != nil && !objsToDeleteNotFound {
|
||||
|
@ -129,6 +119,12 @@ func (n *Layer) GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *da
|
|||
|
||||
n.cache.PutLifecycleConfiguration(owner, bktInfo, lifecycleCfg)
|
||||
|
||||
for i := range lifecycleCfg.Rules {
|
||||
if lifecycleCfg.Rules[i].Expiration != nil {
|
||||
lifecycleCfg.Rules[i].Expiration.Epoch = nil
|
||||
}
|
||||
}
|
||||
|
||||
return lifecycleCfg, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"testing"
|
||||
|
||||
|
@ -42,10 +40,8 @@ func TestBucketLifecycle(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
err = tc.layer.PutBucketLifecycleConfiguration(tc.ctx, &PutBucketLifecycleParams{
|
||||
BktInfo: tc.bktInfo,
|
||||
LifecycleCfg: lifecycle,
|
||||
LifecycleReader: bytes.NewReader(lifecycleBytes),
|
||||
MD5Hash: base64.StdEncoding.EncodeToString(hash.Sum(nil)),
|
||||
BktInfo: tc.bktInfo,
|
||||
LifecycleCfg: lifecycle,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -5,13 +5,13 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
|
@ -54,8 +54,7 @@ func NewFrostFS(p *pool.Pool, key *keys.PrivateKey) *FrostFS {
|
|||
|
||||
// TimeToEpoch implements layer.FrostFS interface method.
|
||||
func (x *FrostFS) TimeToEpoch(ctx context.Context, now, futureTime time.Time) (uint64, uint64, error) {
|
||||
dur := futureTime.Sub(now)
|
||||
if dur < 0 {
|
||||
if futureTime.Before(now) {
|
||||
return 0, 0, fmt.Errorf("time '%s' must be in the future (after %s)",
|
||||
futureTime.Format(time.RFC3339), now.Format(time.RFC3339))
|
||||
}
|
||||
|
@ -65,27 +64,12 @@ func (x *FrostFS) TimeToEpoch(ctx context.Context, now, futureTime time.Time) (u
|
|||
return 0, 0, handleObjectError("get network info via client", err)
|
||||
}
|
||||
|
||||
durEpoch := networkInfo.EpochDuration()
|
||||
if durEpoch == 0 {
|
||||
return 0, 0, errors.New("epoch duration is missing or zero")
|
||||
epoch, err := util.TimeToEpoch(&networkInfo, now, futureTime)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
curr := networkInfo.CurrentEpoch()
|
||||
msPerEpoch := durEpoch * uint64(networkInfo.MsPerBlock())
|
||||
|
||||
epochLifetime := uint64(dur.Milliseconds()) / msPerEpoch
|
||||
if uint64(dur.Milliseconds())%msPerEpoch != 0 {
|
||||
epochLifetime++
|
||||
}
|
||||
|
||||
var epoch uint64
|
||||
if epochLifetime >= math.MaxUint64-curr {
|
||||
epoch = math.MaxUint64
|
||||
} else {
|
||||
epoch = curr + epochLifetime
|
||||
}
|
||||
|
||||
return curr, epoch, nil
|
||||
return networkInfo.CurrentEpoch(), epoch, nil
|
||||
}
|
||||
|
||||
// Container implements layer.FrostFS interface method.
|
||||
|
|
|
@ -2,9 +2,12 @@ package util
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
)
|
||||
|
@ -32,3 +35,39 @@ func ResolveContractHash(contractHash, rpcAddress string) (util.Uint160, error)
|
|||
|
||||
return nns.ResolveContractHash(domain)
|
||||
}
|
||||
|
||||
func TimeToEpoch(ni *netmap.NetworkInfo, now, t time.Time) (uint64, error) {
|
||||
duration := t.Sub(now)
|
||||
durationAbs := duration.Abs()
|
||||
|
||||
durEpoch := ni.EpochDuration()
|
||||
if durEpoch == 0 {
|
||||
return 0, fmt.Errorf("epoch duration is missing or zero")
|
||||
}
|
||||
|
||||
msPerEpoch := durEpoch * uint64(ni.MsPerBlock())
|
||||
epochLifetime := uint64(durationAbs.Milliseconds()) / msPerEpoch
|
||||
|
||||
if uint64(durationAbs.Milliseconds())%msPerEpoch != 0 {
|
||||
epochLifetime++
|
||||
}
|
||||
|
||||
curr := ni.CurrentEpoch()
|
||||
|
||||
var epoch uint64
|
||||
if duration > 0 {
|
||||
if epochLifetime >= math.MaxUint64-curr {
|
||||
epoch = math.MaxUint64
|
||||
} else {
|
||||
epoch = curr + epochLifetime
|
||||
}
|
||||
} else {
|
||||
if epochLifetime >= curr {
|
||||
epoch = 0
|
||||
} else {
|
||||
epoch = curr - epochLifetime
|
||||
}
|
||||
}
|
||||
|
||||
return epoch, nil
|
||||
}
|
||||
|
|
44
internal/frostfs/util/util_test.go
Normal file
44
internal/frostfs/util/util_test.go
Normal file
|
@ -0,0 +1,44 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTimeToEpoch(t *testing.T) {
|
||||
ni := netmap.NetworkInfo{}
|
||||
ni.SetCurrentEpoch(10)
|
||||
|
||||
_, err := TimeToEpoch(&ni, time.Now(), time.Now())
|
||||
require.Error(t, err)
|
||||
|
||||
ni.SetEpochDuration(60)
|
||||
ni.SetMsPerBlock(1000)
|
||||
|
||||
epoch, err := TimeToEpoch(&ni, time.Now(), time.Now())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(10), epoch)
|
||||
|
||||
epoch, err = TimeToEpoch(&ni, time.Now(), time.Now().Add(30*time.Second))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(11), epoch)
|
||||
|
||||
epoch, err = TimeToEpoch(&ni, time.Now(), time.Now().Add(90*time.Second))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(12), epoch)
|
||||
|
||||
epoch, err = TimeToEpoch(&ni, time.Now(), time.Now().Add(-30*time.Second))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(9), epoch)
|
||||
|
||||
epoch, err = TimeToEpoch(&ni, time.Now(), time.Now().Add(-90*time.Second))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(8), epoch)
|
||||
|
||||
epoch, err = TimeToEpoch(&ni, time.Now(), time.Now().Add(-10*time.Minute))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), epoch)
|
||||
}
|
Loading…
Reference in a new issue
Should we add the similar field (
Epochs
) toAbortIncompleteMultipartUpload
andNonCurrentVersionExpiration
I’m not sure if this is necessary, current date is not involved in days conversion
Probably it's better use duration not in days but in epochs (abort multipart not after 2 days but after N epochs after initiation)
Otherwise, it seems we will have some inconsistency. For example it we tick some epochs (e.g. in tests) we will expire objects that have specific expiration date. But we won't expire object if they have
Days
expiration@alexvanin