feature/lifecycle_convert_date #516
9 changed files with 147 additions and 70 deletions
|
@ -29,6 +29,7 @@ type (
|
||||||
LifecycleExpiration struct {
|
LifecycleExpiration struct {
|
||||||
Date string `xml:"Date,omitempty"`
|
Date string `xml:"Date,omitempty"`
|
||||||
Days *int `xml:"Days,omitempty"`
|
Days *int `xml:"Days,omitempty"`
|
||||||
|
Epoch *uint64 `xml:"Epoch,omitempty"`
|
||||||
dkirillov marked this conversation as resolved
Outdated
|
|||||||
ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"`
|
ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -12,6 +12,8 @@ import (
|
||||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -43,9 +45,6 @@ func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Reque
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
tee := io.TeeReader(r.Body, &buf)
|
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
|
@ -56,6 +55,11 @@ func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Reque
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _, err := base64.StdEncoding.DecodeString(r.Header.Get(api.ContentMD5)); err != nil {
|
||||||
|
h.logAndSendError(w, "invalid Content-MD5", reqInfo, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
|
@ -63,12 +67,18 @@ func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Reque
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := new(data.LifecycleConfiguration)
|
cfg := new(data.LifecycleConfiguration)
|
||||||
if err = h.cfg.NewXMLDecoder(tee).Decode(cfg); err != nil {
|
if err = h.cfg.NewXMLDecoder(r.Body).Decode(cfg); err != nil {
|
||||||
h.logAndSendError(w, "could not decode body", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
|
h.logAndSendError(w, "could not decode body", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkLifecycleConfiguration(cfg); err != nil {
|
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not get network info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkLifecycleConfiguration(ctx, cfg, &networkInfo); err != nil {
|
||||||
h.logAndSendError(w, "invalid lifecycle configuration", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
|
h.logAndSendError(w, "invalid lifecycle configuration", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -76,8 +86,6 @@ func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Reque
|
||||||
params := &layer.PutBucketLifecycleParams{
|
params := &layer.PutBucketLifecycleParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
LifecycleCfg: cfg,
|
LifecycleCfg: cfg,
|
||||||
LifecycleReader: &buf,
|
|
||||||
MD5Hash: r.Header.Get(api.ContentMD5),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
params.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
params.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||||
|
@ -110,13 +118,15 @@ func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Re
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkLifecycleConfiguration(cfg *data.LifecycleConfiguration) error {
|
func checkLifecycleConfiguration(ctx context.Context, cfg *data.LifecycleConfiguration, ni *netmap.NetworkInfo) error {
|
||||||
|
now := layer.TimeNow(ctx)
|
||||||
|
|
||||||
if len(cfg.Rules) > maxRules {
|
if len(cfg.Rules) > maxRules {
|
||||||
return fmt.Errorf("number of rules cannot be greater than %d", maxRules)
|
return fmt.Errorf("number of rules cannot be greater than %d", maxRules)
|
||||||
}
|
}
|
||||||
|
|
||||||
ids := make(map[string]struct{}, len(cfg.Rules))
|
ids := make(map[string]struct{}, len(cfg.Rules))
|
||||||
for _, rule := range cfg.Rules {
|
for i, rule := range cfg.Rules {
|
||||||
if _, ok := ids[rule.ID]; ok && rule.ID != "" {
|
if _, ok := ids[rule.ID]; ok && rule.ID != "" {
|
||||||
return fmt.Errorf("duplicate 'ID': %s", rule.ID)
|
return fmt.Errorf("duplicate 'ID': %s", rule.ID)
|
||||||
}
|
}
|
||||||
|
@ -160,9 +170,19 @@ func checkLifecycleConfiguration(cfg *data.LifecycleConfiguration) error {
|
||||||
return fmt.Errorf("expiration days must be a positive integer: %d", *rule.Expiration.Days)
|
return fmt.Errorf("expiration days must be a positive integer: %d", *rule.Expiration.Days)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := time.Parse("2006-01-02T15:04:05Z", rule.Expiration.Date); rule.Expiration.Date != "" && err != nil {
|
if rule.Expiration.Date != "" {
|
||||||
|
parsedTime, err := time.Parse("2006-01-02T15:04:05Z", rule.Expiration.Date)
|
||||||
|
if err != nil {
|
||||||
return fmt.Errorf("invalid value of expiration date: %s", rule.Expiration.Date)
|
return fmt.Errorf("invalid value of expiration date: %s", rule.Expiration.Date)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
epoch, err := util.TimeToEpoch(ni, now, parsedTime)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("convert time to epoch: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg.Rules[i].Expiration.Epoch = &epoch
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if rule.NonCurrentVersionExpiration != nil {
|
if rule.NonCurrentVersionExpiration != nil {
|
||||||
|
|
|
@ -376,11 +376,6 @@ func TestPutBucketLifecycleInvalidMD5(t *testing.T) {
|
||||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMissingContentMD5))
|
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMissingContentMD5))
|
||||||
|
|
||||||
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
|
||||||
r.Header.Set(api.ContentMD5, "")
|
|
||||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
|
||||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
|
||||||
|
|
||||||
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
||||||
r.Header.Set(api.ContentMD5, "some-hash")
|
r.Header.Set(api.ContentMD5, "some-hash")
|
||||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||||
|
|
|
@ -413,6 +413,8 @@ func (t *TestFrostFS) SearchObjects(_ context.Context, prm frostfs.PrmObjectSear
|
||||||
func (t *TestFrostFS) NetworkInfo(context.Context) (netmap.NetworkInfo, error) {
|
func (t *TestFrostFS) NetworkInfo(context.Context) (netmap.NetworkInfo, error) {
|
||||||
ni := netmap.NetworkInfo{}
|
ni := netmap.NetworkInfo{}
|
||||||
ni.SetCurrentEpoch(t.currentEpoch)
|
ni.SetCurrentEpoch(t.currentEpoch)
|
||||||
|
ni.SetEpochDuration(60)
|
||||||
|
ni.SetMsPerBlock(1000)
|
||||||
|
|
||||||
return ni, nil
|
return ni, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,11 +3,9 @@ package layer
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
@ -21,14 +19,17 @@ import (
|
||||||
type PutBucketLifecycleParams struct {
|
type PutBucketLifecycleParams struct {
|
||||||
BktInfo *data.BucketInfo
|
BktInfo *data.BucketInfo
|
||||||
LifecycleCfg *data.LifecycleConfiguration
|
LifecycleCfg *data.LifecycleConfiguration
|
||||||
LifecycleReader io.Reader
|
|
||||||
CopiesNumbers []uint32
|
CopiesNumbers []uint32
|
||||||
MD5Hash string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Layer) PutBucketLifecycleConfiguration(ctx context.Context, p *PutBucketLifecycleParams) error {
|
func (n *Layer) PutBucketLifecycleConfiguration(ctx context.Context, p *PutBucketLifecycleParams) error {
|
||||||
|
cfgBytes, err := xml.Marshal(p.LifecycleCfg)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("marshal lifecycle configuration: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
prm := frostfs.PrmObjectCreate{
|
prm := frostfs.PrmObjectCreate{
|
||||||
Payload: p.LifecycleReader,
|
Payload: bytes.NewReader(cfgBytes),
|
||||||
Filepath: p.BktInfo.LifecycleConfigurationObjectName(),
|
Filepath: p.BktInfo.LifecycleConfigurationObjectName(),
|
||||||
CreationTime: TimeNow(ctx),
|
CreationTime: TimeNow(ctx),
|
||||||
}
|
}
|
||||||
|
@ -49,17 +50,6 @@ func (n *Layer) PutBucketLifecycleConfiguration(ctx context.Context, p *PutBucke
|
||||||
return fmt.Errorf("put lifecycle object: %w", err)
|
return fmt.Errorf("put lifecycle object: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
hashBytes, err := base64.StdEncoding.DecodeString(p.MD5Hash)
|
|
||||||
if err != nil {
|
|
||||||
return apierr.GetAPIError(apierr.ErrInvalidDigest)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(hashBytes, createdObj.MD5Sum) {
|
|
||||||
n.deleteLifecycleObject(ctx, p.BktInfo, newAddress(lifecycleBkt.CID, createdObj.ID))
|
|
||||||
|
|
||||||
return apierr.GetAPIError(apierr.ErrInvalidDigest)
|
|
||||||
}
|
|
||||||
|
|
||||||
objsToDelete, err := n.treeService.PutBucketLifecycleConfiguration(ctx, p.BktInfo, newAddress(lifecycleBkt.CID, createdObj.ID))
|
objsToDelete, err := n.treeService.PutBucketLifecycleConfiguration(ctx, p.BktInfo, newAddress(lifecycleBkt.CID, createdObj.ID))
|
||||||
objsToDeleteNotFound := errors.Is(err, tree.ErrNoNodeToRemove)
|
objsToDeleteNotFound := errors.Is(err, tree.ErrNoNodeToRemove)
|
||||||
if err != nil && !objsToDeleteNotFound {
|
if err != nil && !objsToDeleteNotFound {
|
||||||
|
@ -129,6 +119,12 @@ func (n *Layer) GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *da
|
||||||
|
|
||||||
n.cache.PutLifecycleConfiguration(owner, bktInfo, lifecycleCfg)
|
n.cache.PutLifecycleConfiguration(owner, bktInfo, lifecycleCfg)
|
||||||
|
|
||||||
|
for i := range lifecycleCfg.Rules {
|
||||||
|
if lifecycleCfg.Rules[i].Expiration != nil {
|
||||||
|
lifecycleCfg.Rules[i].Expiration.Epoch = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return lifecycleCfg, nil
|
return lifecycleCfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,7 @@
|
||||||
package layer
|
package layer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/base64"
|
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -44,8 +42,6 @@ func TestBucketLifecycle(t *testing.T) {
|
||||||
err = tc.layer.PutBucketLifecycleConfiguration(tc.ctx, &PutBucketLifecycleParams{
|
err = tc.layer.PutBucketLifecycleConfiguration(tc.ctx, &PutBucketLifecycleParams{
|
||||||
BktInfo: tc.bktInfo,
|
BktInfo: tc.bktInfo,
|
||||||
LifecycleCfg: lifecycle,
|
LifecycleCfg: lifecycle,
|
||||||
LifecycleReader: bytes.NewReader(lifecycleBytes),
|
|
||||||
MD5Hash: base64.StdEncoding.EncodeToString(hash.Sum(nil)),
|
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
|
|
@ -5,13 +5,13 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||||
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
@ -54,8 +54,7 @@ func NewFrostFS(p *pool.Pool, key *keys.PrivateKey) *FrostFS {
|
||||||
|
|
||||||
// TimeToEpoch implements layer.FrostFS interface method.
|
// TimeToEpoch implements layer.FrostFS interface method.
|
||||||
func (x *FrostFS) TimeToEpoch(ctx context.Context, now, futureTime time.Time) (uint64, uint64, error) {
|
func (x *FrostFS) TimeToEpoch(ctx context.Context, now, futureTime time.Time) (uint64, uint64, error) {
|
||||||
dur := futureTime.Sub(now)
|
if futureTime.Before(now) {
|
||||||
if dur < 0 {
|
|
||||||
return 0, 0, fmt.Errorf("time '%s' must be in the future (after %s)",
|
return 0, 0, fmt.Errorf("time '%s' must be in the future (after %s)",
|
||||||
futureTime.Format(time.RFC3339), now.Format(time.RFC3339))
|
futureTime.Format(time.RFC3339), now.Format(time.RFC3339))
|
||||||
}
|
}
|
||||||
|
@ -65,27 +64,12 @@ func (x *FrostFS) TimeToEpoch(ctx context.Context, now, futureTime time.Time) (u
|
||||||
return 0, 0, handleObjectError("get network info via client", err)
|
return 0, 0, handleObjectError("get network info via client", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
durEpoch := networkInfo.EpochDuration()
|
epoch, err := util.TimeToEpoch(&networkInfo, now, futureTime)
|
||||||
if durEpoch == 0 {
|
if err != nil {
|
||||||
return 0, 0, errors.New("epoch duration is missing or zero")
|
return 0, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
curr := networkInfo.CurrentEpoch()
|
return networkInfo.CurrentEpoch(), epoch, nil
|
||||||
msPerEpoch := durEpoch * uint64(networkInfo.MsPerBlock())
|
|
||||||
|
|
||||||
epochLifetime := uint64(dur.Milliseconds()) / msPerEpoch
|
|
||||||
if uint64(dur.Milliseconds())%msPerEpoch != 0 {
|
|
||||||
epochLifetime++
|
|
||||||
}
|
|
||||||
|
|
||||||
var epoch uint64
|
|
||||||
if epochLifetime >= math.MaxUint64-curr {
|
|
||||||
epoch = math.MaxUint64
|
|
||||||
} else {
|
|
||||||
epoch = curr + epochLifetime
|
|
||||||
}
|
|
||||||
|
|
||||||
return curr, epoch, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container implements layer.FrostFS interface method.
|
// Container implements layer.FrostFS interface method.
|
||||||
|
|
|
@ -2,9 +2,12 @@ package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
)
|
)
|
||||||
|
@ -32,3 +35,39 @@ func ResolveContractHash(contractHash, rpcAddress string) (util.Uint160, error)
|
||||||
|
|
||||||
return nns.ResolveContractHash(domain)
|
return nns.ResolveContractHash(domain)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TimeToEpoch(ni *netmap.NetworkInfo, now, t time.Time) (uint64, error) {
|
||||||
|
duration := t.Sub(now)
|
||||||
|
durationAbs := duration.Abs()
|
||||||
|
|
||||||
|
durEpoch := ni.EpochDuration()
|
||||||
|
if durEpoch == 0 {
|
||||||
|
return 0, fmt.Errorf("epoch duration is missing or zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
msPerEpoch := durEpoch * uint64(ni.MsPerBlock())
|
||||||
|
epochLifetime := uint64(durationAbs.Milliseconds()) / msPerEpoch
|
||||||
|
|
||||||
|
if uint64(durationAbs.Milliseconds())%msPerEpoch != 0 {
|
||||||
|
epochLifetime++
|
||||||
|
}
|
||||||
|
|
||||||
|
curr := ni.CurrentEpoch()
|
||||||
|
|
||||||
|
var epoch uint64
|
||||||
|
if duration > 0 {
|
||||||
|
if epochLifetime >= math.MaxUint64-curr {
|
||||||
|
epoch = math.MaxUint64
|
||||||
|
} else {
|
||||||
|
epoch = curr + epochLifetime
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if epochLifetime >= curr {
|
||||||
|
epoch = 0
|
||||||
|
} else {
|
||||||
|
epoch = curr - epochLifetime
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return epoch, nil
|
||||||
|
}
|
||||||
|
|
44
internal/frostfs/util/util_test.go
Normal file
44
internal/frostfs/util/util_test.go
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTimeToEpoch(t *testing.T) {
|
||||||
|
ni := netmap.NetworkInfo{}
|
||||||
|
ni.SetCurrentEpoch(10)
|
||||||
|
|
||||||
|
_, err := TimeToEpoch(&ni, time.Now(), time.Now())
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
ni.SetEpochDuration(60)
|
||||||
|
ni.SetMsPerBlock(1000)
|
||||||
|
|
||||||
|
epoch, err := TimeToEpoch(&ni, time.Now(), time.Now())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(10), epoch)
|
||||||
|
|
||||||
|
epoch, err = TimeToEpoch(&ni, time.Now(), time.Now().Add(30*time.Second))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(11), epoch)
|
||||||
|
|
||||||
|
epoch, err = TimeToEpoch(&ni, time.Now(), time.Now().Add(90*time.Second))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(12), epoch)
|
||||||
|
|
||||||
|
epoch, err = TimeToEpoch(&ni, time.Now(), time.Now().Add(-30*time.Second))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(9), epoch)
|
||||||
|
|
||||||
|
epoch, err = TimeToEpoch(&ni, time.Now(), time.Now().Add(-90*time.Second))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(8), epoch)
|
||||||
|
|
||||||
|
epoch, err = TimeToEpoch(&ni, time.Now(), time.Now().Add(-10*time.Minute))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, uint64(0), epoch)
|
||||||
|
}
|
Loading…
Reference in a new issue
Should we add the similar field (
Epochs
) toAbortIncompleteMultipartUpload
andNonCurrentVersionExpiration
I’m not sure if this is necessary, current date is not involved in days conversion
Probably it's better use duration not in days but in epochs (abort multipart not after 2 days but after N epochs after initiation)
Otherwise, it seems we will have some inconsistency. For example it we tick some epochs (e.g. in tests) we will expire objects that have specific expiration date. But we won't expire object if they have
Days
expiration@alexvanin