2022-09-12 11:48:21 +00:00
|
|
|
package engine
|
|
|
|
|
|
|
|
import (
|
2023-03-23 14:59:14 +00:00
|
|
|
"context"
|
2022-09-19 10:31:55 +00:00
|
|
|
"errors"
|
2022-09-12 11:48:21 +00:00
|
|
|
"fmt"
|
|
|
|
"path/filepath"
|
|
|
|
"strconv"
|
2024-09-18 09:15:32 +00:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2022-09-12 11:48:21 +00:00
|
|
|
"testing"
|
2023-05-04 10:58:26 +00:00
|
|
|
"time"
|
2022-09-12 11:48:21 +00:00
|
|
|
|
2024-09-03 12:42:38 +00:00
|
|
|
coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
|
2023-03-07 13:38:26 +00:00
|
|
|
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
2023-03-20 14:10:26 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
|
2023-03-07 13:38:26 +00:00
|
|
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
2024-02-06 10:59:50 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
|
2023-03-07 13:38:26 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
2023-08-23 07:53:42 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
|
2024-09-03 12:42:38 +00:00
|
|
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
2024-02-06 14:34:32 +00:00
|
|
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
2023-03-07 13:38:26 +00:00
|
|
|
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
2024-09-03 12:42:38 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
2023-03-07 13:38:26 +00:00
|
|
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
2022-09-12 11:48:21 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2023-05-04 10:58:26 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2022-09-12 11:48:21 +00:00
|
|
|
)
|
|
|
|
|
2024-09-03 12:42:38 +00:00
|
|
|
type containerStorage struct {
|
|
|
|
cntmap map[cid.ID]*container.Container
|
|
|
|
latency time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *containerStorage) Get(id cid.ID) (*coreContainer.Container, error) {
|
|
|
|
time.Sleep(cs.latency)
|
|
|
|
v, ok := cs.cntmap[id]
|
|
|
|
if !ok {
|
|
|
|
return nil, new(apistatus.ContainerNotFound)
|
|
|
|
}
|
|
|
|
coreCnt := coreContainer.Container{
|
|
|
|
Value: *v,
|
|
|
|
}
|
|
|
|
return &coreCnt, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2022-09-19 10:31:55 +00:00
|
|
|
func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEngine, []*shard.ID, []*objectSDK.Object) {
|
2023-05-05 07:38:59 +00:00
|
|
|
dir := t.TempDir()
|
2022-09-12 11:48:21 +00:00
|
|
|
|
2023-07-11 13:13:03 +00:00
|
|
|
te := testNewEngine(t).
|
2023-03-30 11:58:20 +00:00
|
|
|
setShardsNumOpts(t, shardNum, func(id int) []shard.Option {
|
|
|
|
return []shard.Option{
|
2024-01-09 08:37:41 +00:00
|
|
|
shard.WithLogger(test.NewLogger(t)),
|
2023-03-30 11:58:20 +00:00
|
|
|
shard.WithBlobStorOptions(
|
|
|
|
blobstor.WithStorages([]blobstor.SubStorage{{
|
|
|
|
Storage: fstree.New(
|
|
|
|
fstree.WithPath(filepath.Join(dir, strconv.Itoa(id))),
|
|
|
|
fstree.WithDepth(1)),
|
|
|
|
}})),
|
|
|
|
shard.WithMetaBaseOptions(
|
|
|
|
meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))),
|
2023-10-31 11:56:55 +00:00
|
|
|
meta.WithPermissions(0o700),
|
2023-03-30 11:58:20 +00:00
|
|
|
meta.WithEpochState(epochState{})),
|
2024-02-06 10:59:50 +00:00
|
|
|
shard.WithPiloramaOptions(
|
|
|
|
pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))),
|
|
|
|
pilorama.WithPerm(0o700),
|
|
|
|
),
|
2023-03-30 11:58:20 +00:00
|
|
|
}
|
2024-11-13 10:30:16 +00:00
|
|
|
}).
|
|
|
|
prepare(t)
|
2023-03-30 11:58:20 +00:00
|
|
|
e, ids := te.engine, te.shardIDs
|
2022-09-12 11:48:21 +00:00
|
|
|
|
|
|
|
objects := make([]*objectSDK.Object, 0, objPerShard*len(ids))
|
2024-02-06 10:59:50 +00:00
|
|
|
treeID := "version"
|
|
|
|
meta := []pilorama.KeyValue{
|
|
|
|
{Key: pilorama.AttributeVersion, Value: []byte("XXX")},
|
|
|
|
{Key: pilorama.AttributeFilename, Value: []byte("file.txt")},
|
2023-01-16 06:38:50 +00:00
|
|
|
}
|
2024-09-03 12:42:38 +00:00
|
|
|
cnrMap := make(map[cid.ID]*container.Container)
|
2024-02-06 10:59:50 +00:00
|
|
|
for _, sh := range ids {
|
2024-09-03 12:42:38 +00:00
|
|
|
for i := range objPerShard {
|
|
|
|
// Create dummy container
|
|
|
|
cnr1 := container.Container{}
|
|
|
|
cnr1.SetAttribute("cnr", "cnr"+strconv.Itoa(i))
|
2024-02-06 10:59:50 +00:00
|
|
|
contID := cidtest.ID()
|
2024-09-03 12:42:38 +00:00
|
|
|
cnrMap[contID] = &cnr1
|
|
|
|
|
2024-02-06 10:59:50 +00:00
|
|
|
obj := testutil.GenerateObjectWithCID(contID)
|
|
|
|
objects = append(objects, obj)
|
|
|
|
|
|
|
|
var putPrm shard.PutPrm
|
|
|
|
putPrm.SetObject(obj)
|
|
|
|
_, err := e.shards[sh.String()].Put(context.Background(), putPrm)
|
|
|
|
require.NoError(t, err)
|
2022-09-12 11:48:21 +00:00
|
|
|
|
2024-02-06 10:59:50 +00:00
|
|
|
_, err = e.shards[sh.String()].TreeAddByPath(context.Background(), pilorama.CIDDescriptor{CID: contID, Position: 0, Size: 1},
|
|
|
|
treeID, pilorama.AttributeFilename, []string{"path", "to", "the", "file"}, meta)
|
|
|
|
require.NoError(t, err)
|
2022-09-12 11:48:21 +00:00
|
|
|
}
|
|
|
|
}
|
2024-09-03 12:42:38 +00:00
|
|
|
e.SetContainerSource(&containerStorage{cntmap: cnrMap})
|
2022-09-19 10:31:55 +00:00
|
|
|
return e, ids, objects
|
|
|
|
}
|
|
|
|
|
2024-02-06 10:59:50 +00:00
|
|
|
func TestEvacuateShardObjects(t *testing.T) {
|
2023-05-05 13:17:29 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2022-09-19 10:31:55 +00:00
|
|
|
const objPerShard = 3
|
|
|
|
|
|
|
|
e, ids, objects := newEngineEvacuate(t, 3, objPerShard)
|
2024-01-18 19:24:34 +00:00
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
2022-09-19 10:31:55 +00:00
|
|
|
|
|
|
|
evacuateShardID := ids[2].String()
|
2022-09-12 11:48:21 +00:00
|
|
|
|
|
|
|
checkHasObjects := func(t *testing.T) {
|
|
|
|
for i := range objects {
|
|
|
|
var prm GetPrm
|
|
|
|
prm.WithAddress(objectCore.AddressOf(objects[i]))
|
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
_, err := e.Get(context.Background(), prm)
|
2022-09-12 11:48:21 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
checkHasObjects(t)
|
|
|
|
|
|
|
|
var prm EvacuateShardPrm
|
2024-02-05 12:42:30 +00:00
|
|
|
prm.ShardID = ids[2:3]
|
2024-02-06 10:59:50 +00:00
|
|
|
prm.Scope = EvacuateScopeObjects
|
2022-09-12 11:48:21 +00:00
|
|
|
|
|
|
|
t.Run("must be read-only", func(t *testing.T) {
|
2023-03-13 11:37:35 +00:00
|
|
|
res, err := e.Evacuate(context.Background(), prm)
|
2023-04-14 06:38:29 +00:00
|
|
|
require.ErrorIs(t, err, ErrMustBeReadOnly)
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(0), res.ObjectsEvacuated())
|
2022-09-12 11:48:21 +00:00
|
|
|
})
|
|
|
|
|
2024-10-21 08:56:38 +00:00
|
|
|
require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly))
|
2022-09-12 11:48:21 +00:00
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
res, err := e.Evacuate(context.Background(), prm)
|
2022-09-12 11:48:21 +00:00
|
|
|
require.NoError(t, err)
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(objPerShard), res.ObjectsEvacuated())
|
2022-09-12 11:48:21 +00:00
|
|
|
|
|
|
|
// We check that all objects are available both before and after shard removal.
|
|
|
|
// First case is a real-world use-case. It ensures that an object can be put in presense
|
|
|
|
// of all metabase checks/marks.
|
|
|
|
// Second case ensures that all objects are indeed moved and available.
|
|
|
|
checkHasObjects(t)
|
|
|
|
|
2024-09-03 09:18:10 +00:00
|
|
|
// Objects on evacuated shards should be logically unavailable, but persisted on disk.
|
|
|
|
// This is necessary to prevent removing it by policer in case of `REP 1` policy.
|
|
|
|
for _, obj := range objects[len(objects)-objPerShard:] {
|
|
|
|
var prmGet shard.GetPrm
|
|
|
|
prmGet.SetAddress(objectCore.AddressOf(obj))
|
|
|
|
_, err = e.shards[evacuateShardID].Get(context.Background(), prmGet)
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
prmGet.SkipEvacCheck(true)
|
|
|
|
_, err = e.shards[evacuateShardID].Get(context.Background(), prmGet)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var prmHead shard.HeadPrm
|
|
|
|
prmHead.SetAddress(objectCore.AddressOf(obj))
|
|
|
|
_, err = e.shards[evacuateShardID].Head(context.Background(), prmHead)
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
var existsPrm shard.ExistsPrm
|
|
|
|
existsPrm.Address = objectCore.AddressOf(obj)
|
|
|
|
_, err = e.shards[evacuateShardID].Exists(context.Background(), existsPrm)
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
var rngPrm shard.RngPrm
|
|
|
|
rngPrm.SetAddress(objectCore.AddressOf(obj))
|
|
|
|
_, err = e.shards[evacuateShardID].GetRange(context.Background(), rngPrm)
|
|
|
|
require.Error(t, err)
|
|
|
|
}
|
|
|
|
|
2022-09-13 11:18:00 +00:00
|
|
|
// Calling it again is OK, but all objects are already moved, so no new PUTs should be done.
|
2023-03-13 11:37:35 +00:00
|
|
|
res, err = e.Evacuate(context.Background(), prm)
|
2022-09-13 11:18:00 +00:00
|
|
|
require.NoError(t, err)
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(0), res.ObjectsEvacuated())
|
2022-09-13 11:18:00 +00:00
|
|
|
|
|
|
|
checkHasObjects(t)
|
|
|
|
|
2022-09-12 11:48:21 +00:00
|
|
|
e.mtx.Lock()
|
|
|
|
delete(e.shards, evacuateShardID)
|
|
|
|
delete(e.shardPools, evacuateShardID)
|
|
|
|
e.mtx.Unlock()
|
|
|
|
|
|
|
|
checkHasObjects(t)
|
|
|
|
}
|
2022-09-19 10:31:55 +00:00
|
|
|
|
2024-02-06 10:59:50 +00:00
|
|
|
func TestEvacuateObjectsNetwork(t *testing.T) {
|
2023-05-05 13:17:29 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2023-10-31 11:56:55 +00:00
|
|
|
errReplication := errors.New("handler error")
|
2022-09-19 10:31:55 +00:00
|
|
|
|
2024-07-03 06:55:04 +00:00
|
|
|
acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) {
|
2024-09-18 09:15:32 +00:00
|
|
|
var n atomic.Uint64
|
2024-09-03 12:42:38 +00:00
|
|
|
var mtx sync.Mutex
|
2024-07-03 06:55:04 +00:00
|
|
|
return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
|
2024-09-03 12:42:38 +00:00
|
|
|
mtx.Lock()
|
|
|
|
defer mtx.Unlock()
|
2024-09-18 09:15:32 +00:00
|
|
|
if n.Load() == max {
|
2024-07-03 06:55:04 +00:00
|
|
|
return false, errReplication
|
2022-09-19 10:31:55 +00:00
|
|
|
}
|
|
|
|
|
2024-09-18 09:15:32 +00:00
|
|
|
n.Add(1)
|
2022-09-19 10:31:55 +00:00
|
|
|
for i := range objects {
|
|
|
|
if addr == objectCore.AddressOf(objects[i]) {
|
|
|
|
require.Equal(t, objects[i], obj)
|
2024-07-03 06:55:04 +00:00
|
|
|
return true, nil
|
2022-09-19 10:31:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
require.FailNow(t, "handler was called with an unexpected object: %s", addr)
|
|
|
|
panic("unreachable")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("single shard", func(t *testing.T) {
|
2023-05-05 13:17:29 +00:00
|
|
|
t.Parallel()
|
2022-09-19 10:31:55 +00:00
|
|
|
e, ids, objects := newEngineEvacuate(t, 1, 3)
|
2024-01-18 19:24:34 +00:00
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
|
|
|
|
2022-09-19 10:31:55 +00:00
|
|
|
evacuateShardID := ids[0].String()
|
|
|
|
|
2024-10-21 08:56:38 +00:00
|
|
|
require.NoError(t, e.shards[evacuateShardID].SetMode(context.Background(), mode.ReadOnly))
|
2022-09-19 10:31:55 +00:00
|
|
|
|
|
|
|
var prm EvacuateShardPrm
|
2024-02-05 12:42:30 +00:00
|
|
|
prm.ShardID = ids[0:1]
|
2024-02-06 10:59:50 +00:00
|
|
|
prm.Scope = EvacuateScopeObjects
|
2022-09-19 10:31:55 +00:00
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
res, err := e.Evacuate(context.Background(), prm)
|
2022-09-19 10:31:55 +00:00
|
|
|
require.ErrorIs(t, err, errMustHaveTwoShards)
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(0), res.ObjectsEvacuated())
|
2022-09-19 10:31:55 +00:00
|
|
|
|
2024-02-05 14:48:43 +00:00
|
|
|
prm.ObjectsHandler = acceptOneOf(objects, 2)
|
2022-09-19 10:31:55 +00:00
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
res, err = e.Evacuate(context.Background(), prm)
|
2022-09-19 10:31:55 +00:00
|
|
|
require.ErrorIs(t, err, errReplication)
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(2), res.ObjectsEvacuated())
|
2022-09-19 10:31:55 +00:00
|
|
|
})
|
2022-10-10 17:54:14 +00:00
|
|
|
t.Run("multiple shards, evacuate one", func(t *testing.T) {
|
2023-05-05 13:17:29 +00:00
|
|
|
t.Parallel()
|
2022-09-19 10:31:55 +00:00
|
|
|
e, ids, objects := newEngineEvacuate(t, 2, 3)
|
2024-01-18 19:24:34 +00:00
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
2022-09-19 10:31:55 +00:00
|
|
|
|
2024-10-21 08:56:38 +00:00
|
|
|
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
|
|
|
|
require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
|
2022-09-19 10:31:55 +00:00
|
|
|
|
|
|
|
var prm EvacuateShardPrm
|
2024-02-05 12:42:30 +00:00
|
|
|
prm.ShardID = ids[1:2]
|
2024-02-05 14:48:43 +00:00
|
|
|
prm.ObjectsHandler = acceptOneOf(objects, 2)
|
2024-02-06 10:59:50 +00:00
|
|
|
prm.Scope = EvacuateScopeObjects
|
2022-09-19 10:31:55 +00:00
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
res, err := e.Evacuate(context.Background(), prm)
|
2022-09-19 10:31:55 +00:00
|
|
|
require.ErrorIs(t, err, errReplication)
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(2), res.ObjectsEvacuated())
|
2022-09-19 10:31:55 +00:00
|
|
|
|
|
|
|
t.Run("no errors", func(t *testing.T) {
|
2024-02-05 14:48:43 +00:00
|
|
|
prm.ObjectsHandler = acceptOneOf(objects, 3)
|
2022-09-19 10:31:55 +00:00
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
res, err := e.Evacuate(context.Background(), prm)
|
2022-09-19 10:31:55 +00:00
|
|
|
require.NoError(t, err)
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(3), res.ObjectsEvacuated())
|
2022-09-19 10:31:55 +00:00
|
|
|
})
|
|
|
|
})
|
2022-10-10 17:54:14 +00:00
|
|
|
t.Run("multiple shards, evacuate many", func(t *testing.T) {
|
2023-05-05 13:17:29 +00:00
|
|
|
t.Parallel()
|
2022-10-10 17:54:14 +00:00
|
|
|
e, ids, objects := newEngineEvacuate(t, 4, 5)
|
2024-01-18 19:24:34 +00:00
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
|
|
|
|
2022-10-10 17:54:14 +00:00
|
|
|
evacuateIDs := ids[0:3]
|
|
|
|
|
2023-05-04 10:58:26 +00:00
|
|
|
var totalCount uint64
|
2022-10-10 17:54:14 +00:00
|
|
|
for i := range evacuateIDs {
|
2023-06-06 09:27:19 +00:00
|
|
|
res, err := e.shards[ids[i].String()].List(context.Background())
|
2022-10-10 17:54:14 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-05-04 10:58:26 +00:00
|
|
|
totalCount += uint64(len(res.AddressList()))
|
2022-10-10 17:54:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for i := range ids {
|
2024-10-21 08:56:38 +00:00
|
|
|
require.NoError(t, e.shards[ids[i].String()].SetMode(context.Background(), mode.ReadOnly))
|
2022-10-10 17:54:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var prm EvacuateShardPrm
|
2024-02-05 12:42:30 +00:00
|
|
|
prm.ShardID = evacuateIDs
|
2024-02-05 14:48:43 +00:00
|
|
|
prm.ObjectsHandler = acceptOneOf(objects, totalCount-1)
|
2024-02-06 10:59:50 +00:00
|
|
|
prm.Scope = EvacuateScopeObjects
|
2022-10-10 17:54:14 +00:00
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
res, err := e.Evacuate(context.Background(), prm)
|
2022-10-10 17:54:14 +00:00
|
|
|
require.ErrorIs(t, err, errReplication)
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, totalCount-1, res.ObjectsEvacuated())
|
2022-10-10 17:54:14 +00:00
|
|
|
|
|
|
|
t.Run("no errors", func(t *testing.T) {
|
2024-02-05 14:48:43 +00:00
|
|
|
prm.ObjectsHandler = acceptOneOf(objects, totalCount)
|
2022-10-10 17:54:14 +00:00
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
res, err := e.Evacuate(context.Background(), prm)
|
2022-10-10 17:54:14 +00:00
|
|
|
require.NoError(t, err)
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, totalCount, res.ObjectsEvacuated())
|
2022-10-10 17:54:14 +00:00
|
|
|
})
|
|
|
|
})
|
2022-09-19 10:31:55 +00:00
|
|
|
}
|
2023-05-02 11:16:13 +00:00
|
|
|
|
|
|
|
func TestEvacuateCancellation(t *testing.T) {
|
2023-05-05 13:17:29 +00:00
|
|
|
t.Parallel()
|
2023-05-02 11:16:13 +00:00
|
|
|
e, ids, _ := newEngineEvacuate(t, 2, 3)
|
2024-01-18 19:24:34 +00:00
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
2023-05-02 11:16:13 +00:00
|
|
|
|
2024-10-21 08:56:38 +00:00
|
|
|
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
|
|
|
|
require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
|
2023-05-02 11:16:13 +00:00
|
|
|
|
|
|
|
var prm EvacuateShardPrm
|
2024-02-05 12:42:30 +00:00
|
|
|
prm.ShardID = ids[1:2]
|
2024-07-03 06:55:04 +00:00
|
|
|
prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
|
2023-05-02 11:16:13 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2024-07-03 06:55:04 +00:00
|
|
|
return false, ctx.Err()
|
2023-05-02 11:16:13 +00:00
|
|
|
default:
|
|
|
|
}
|
2024-07-03 06:55:04 +00:00
|
|
|
return true, nil
|
2023-05-02 11:16:13 +00:00
|
|
|
}
|
2024-02-06 10:59:50 +00:00
|
|
|
prm.Scope = EvacuateScopeObjects
|
2023-05-02 11:16:13 +00:00
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
cancel()
|
|
|
|
|
|
|
|
res, err := e.Evacuate(ctx, prm)
|
|
|
|
require.ErrorContains(t, err, "context canceled")
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(0), res.ObjectsEvacuated())
|
2023-05-04 10:58:26 +00:00
|
|
|
}
|
|
|
|
|
2024-09-18 09:15:32 +00:00
|
|
|
func TestEvacuateCancellationByError(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
e, ids, _ := newEngineEvacuate(t, 2, 10)
|
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
|
|
|
|
2024-10-21 08:56:38 +00:00
|
|
|
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
|
|
|
|
require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
|
2024-09-18 09:15:32 +00:00
|
|
|
|
|
|
|
var prm EvacuateShardPrm
|
|
|
|
prm.ShardID = ids[1:2]
|
|
|
|
var once atomic.Bool
|
|
|
|
prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
|
|
|
|
var err error
|
|
|
|
flag := true
|
|
|
|
if once.CompareAndSwap(false, true) {
|
|
|
|
err = errors.New("test error")
|
|
|
|
flag = false
|
|
|
|
}
|
|
|
|
return flag, err
|
|
|
|
}
|
|
|
|
prm.Scope = EvacuateScopeObjects
|
|
|
|
prm.ObjectWorkerCount = 2
|
|
|
|
prm.ContainerWorkerCount = 2
|
|
|
|
|
|
|
|
_, err := e.Evacuate(context.Background(), prm)
|
|
|
|
require.ErrorContains(t, err, "test error")
|
|
|
|
}
|
|
|
|
|
2023-05-04 10:58:26 +00:00
|
|
|
func TestEvacuateSingleProcess(t *testing.T) {
|
|
|
|
e, ids, _ := newEngineEvacuate(t, 2, 3)
|
2024-01-18 19:24:34 +00:00
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
2023-05-04 10:58:26 +00:00
|
|
|
|
2024-10-21 08:56:38 +00:00
|
|
|
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
|
|
|
|
require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
|
2023-05-04 10:58:26 +00:00
|
|
|
|
|
|
|
blocker := make(chan interface{})
|
|
|
|
running := make(chan interface{})
|
|
|
|
|
|
|
|
var prm EvacuateShardPrm
|
2024-02-05 12:42:30 +00:00
|
|
|
prm.ShardID = ids[1:2]
|
2024-02-06 10:59:50 +00:00
|
|
|
prm.Scope = EvacuateScopeObjects
|
2024-07-03 06:55:04 +00:00
|
|
|
prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
|
2023-05-04 10:58:26 +00:00
|
|
|
select {
|
|
|
|
case <-running:
|
|
|
|
default:
|
|
|
|
close(running)
|
|
|
|
}
|
|
|
|
<-blocker
|
2024-07-03 06:55:04 +00:00
|
|
|
return true, nil
|
2023-05-04 10:58:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
eg, egCtx := errgroup.WithContext(context.Background())
|
|
|
|
eg.Go(func() error {
|
|
|
|
res, err := e.Evacuate(egCtx, prm)
|
|
|
|
require.NoError(t, err, "first evacuation failed")
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(3), res.ObjectsEvacuated())
|
2023-05-04 10:58:26 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
eg.Go(func() error {
|
|
|
|
<-running
|
|
|
|
res, err := e.Evacuate(egCtx, prm)
|
|
|
|
require.ErrorContains(t, err, "evacuate is already running for shard ids", "second evacuation not failed")
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(0), res.ObjectsEvacuated())
|
2023-05-04 10:58:26 +00:00
|
|
|
close(blocker)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
require.NoError(t, eg.Wait())
|
|
|
|
}
|
|
|
|
|
2024-02-06 10:59:50 +00:00
|
|
|
func TestEvacuateObjectsAsync(t *testing.T) {
|
2023-05-04 10:58:26 +00:00
|
|
|
e, ids, _ := newEngineEvacuate(t, 2, 3)
|
2024-01-18 19:24:34 +00:00
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
2023-05-04 10:58:26 +00:00
|
|
|
|
2024-10-21 08:56:38 +00:00
|
|
|
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
|
|
|
|
require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
|
2023-05-04 10:58:26 +00:00
|
|
|
|
|
|
|
blocker := make(chan interface{})
|
|
|
|
running := make(chan interface{})
|
|
|
|
|
|
|
|
var prm EvacuateShardPrm
|
2024-02-05 12:42:30 +00:00
|
|
|
prm.ShardID = ids[1:2]
|
2024-02-06 10:59:50 +00:00
|
|
|
prm.Scope = EvacuateScopeObjects
|
2024-07-03 06:55:04 +00:00
|
|
|
prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
|
2023-05-04 10:58:26 +00:00
|
|
|
select {
|
|
|
|
case <-running:
|
|
|
|
default:
|
|
|
|
close(running)
|
|
|
|
}
|
|
|
|
<-blocker
|
2024-07-03 06:55:04 +00:00
|
|
|
return true, nil
|
2023-05-04 10:58:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
st, err := e.GetEvacuationState(context.Background())
|
|
|
|
require.NoError(t, err, "get init state failed")
|
|
|
|
require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state")
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(0), st.ObjectsEvacuated(), "invalid init count")
|
2023-05-04 10:58:26 +00:00
|
|
|
require.Nil(t, st.StartedAt(), "invalid init started at")
|
|
|
|
require.Nil(t, st.FinishedAt(), "invalid init finished at")
|
|
|
|
require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
|
|
|
|
require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
|
|
|
|
|
|
|
|
eg, egCtx := errgroup.WithContext(context.Background())
|
|
|
|
eg.Go(func() error {
|
|
|
|
res, err := e.Evacuate(egCtx, prm)
|
|
|
|
require.NoError(t, err, "first evacuation failed")
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(3), res.ObjectsEvacuated())
|
2023-05-04 10:58:26 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
<-running
|
|
|
|
|
|
|
|
st, err = e.GetEvacuationState(context.Background())
|
|
|
|
require.NoError(t, err, "get running state failed")
|
|
|
|
require.Equal(t, EvacuateProcessStateRunning, st.ProcessingStatus(), "invalid running state")
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(0), st.ObjectsEvacuated(), "invalid running count")
|
2023-05-04 10:58:26 +00:00
|
|
|
require.NotNil(t, st.StartedAt(), "invalid running started at")
|
|
|
|
require.Nil(t, st.FinishedAt(), "invalid init finished at")
|
|
|
|
expectedShardIDs := make([]string, 0, 2)
|
|
|
|
for _, id := range ids[1:2] {
|
|
|
|
expectedShardIDs = append(expectedShardIDs, id.String())
|
|
|
|
}
|
|
|
|
require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid running shard ids")
|
|
|
|
require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
|
|
|
|
|
2024-03-12 15:57:38 +00:00
|
|
|
require.Error(t, e.ResetEvacuationStatus(context.Background()))
|
|
|
|
|
2023-05-04 10:58:26 +00:00
|
|
|
close(blocker)
|
|
|
|
|
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
st, err = e.GetEvacuationState(context.Background())
|
|
|
|
return st.ProcessingStatus() == EvacuateProcessStateCompleted
|
|
|
|
}, 3*time.Second, 10*time.Millisecond, "invalid final state")
|
|
|
|
|
|
|
|
require.NoError(t, err, "get final state failed")
|
2024-02-05 14:48:43 +00:00
|
|
|
require.Equal(t, uint64(3), st.ObjectsEvacuated(), "invalid final count")
|
2023-05-04 10:58:26 +00:00
|
|
|
require.NotNil(t, st.StartedAt(), "invalid final started at")
|
|
|
|
require.NotNil(t, st.FinishedAt(), "invalid final finished at")
|
|
|
|
require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids")
|
|
|
|
require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
|
|
|
|
|
|
|
|
require.NoError(t, eg.Wait())
|
2024-03-12 15:57:38 +00:00
|
|
|
|
|
|
|
require.NoError(t, e.ResetEvacuationStatus(context.Background()))
|
|
|
|
st, err = e.GetEvacuationState(context.Background())
|
|
|
|
require.NoError(t, err, "get state after reset failed")
|
|
|
|
require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid state after reset")
|
|
|
|
require.Equal(t, uint64(0), st.ObjectsEvacuated(), "invalid count after reset")
|
|
|
|
require.Nil(t, st.StartedAt(), "invalid started at after reset")
|
|
|
|
require.Nil(t, st.FinishedAt(), "invalid finished at after reset")
|
|
|
|
require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid shard ids after reset")
|
|
|
|
require.Equal(t, "", st.ErrorMessage(), "invalid error message after reset")
|
2023-05-02 11:16:13 +00:00
|
|
|
}
|
2024-02-06 10:59:50 +00:00
|
|
|
|
|
|
|
func TestEvacuateTreesLocal(t *testing.T) {
|
|
|
|
e, ids, _ := newEngineEvacuate(t, 2, 3)
|
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
|
|
|
|
2024-10-21 08:56:38 +00:00
|
|
|
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
|
2024-02-06 10:59:50 +00:00
|
|
|
|
|
|
|
var prm EvacuateShardPrm
|
|
|
|
prm.ShardID = ids[0:1]
|
|
|
|
prm.Scope = EvacuateScopeTrees
|
|
|
|
|
|
|
|
expectedShardIDs := make([]string, 0, 1)
|
|
|
|
for _, id := range ids[0:1] {
|
|
|
|
expectedShardIDs = append(expectedShardIDs, id.String())
|
|
|
|
}
|
|
|
|
|
|
|
|
st, err := e.GetEvacuationState(context.Background())
|
|
|
|
require.NoError(t, err, "get init state failed")
|
|
|
|
require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state")
|
|
|
|
require.Equal(t, uint64(0), st.TreesEvacuated(), "invalid init count")
|
|
|
|
require.Nil(t, st.StartedAt(), "invalid init started at")
|
|
|
|
require.Nil(t, st.FinishedAt(), "invalid init finished at")
|
|
|
|
require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
|
|
|
|
require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
|
|
|
|
|
|
|
|
res, err := e.Evacuate(context.Background(), prm)
|
|
|
|
require.NotNil(t, res, "sync evacuation result must be not nil")
|
|
|
|
require.NoError(t, err, "evacuation failed")
|
|
|
|
|
|
|
|
st, err = e.GetEvacuationState(context.Background())
|
|
|
|
require.NoError(t, err, "get evacuation state failed")
|
|
|
|
require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus())
|
|
|
|
|
|
|
|
require.Equal(t, uint64(3), st.TreesTotal(), "invalid trees total count")
|
|
|
|
require.Equal(t, uint64(3), st.TreesEvacuated(), "invalid trees evacuated count")
|
|
|
|
require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count")
|
|
|
|
require.NotNil(t, st.StartedAt(), "invalid final started at")
|
|
|
|
require.NotNil(t, st.FinishedAt(), "invalid final finished at")
|
|
|
|
require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids")
|
|
|
|
require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
|
|
|
|
|
|
|
|
sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[0].String()])
|
|
|
|
require.NoError(t, err, "list source trees failed")
|
|
|
|
require.Len(t, sourceTrees, 3)
|
|
|
|
|
|
|
|
for _, tr := range sourceTrees {
|
|
|
|
exists, err := e.shards[ids[1].String()].TreeExists(context.Background(), tr.CID, tr.TreeID)
|
|
|
|
require.NoError(t, err, "failed to check tree existance")
|
|
|
|
require.True(t, exists, "tree doesn't exists on target shard")
|
|
|
|
|
|
|
|
var height uint64
|
|
|
|
var sourceOps []pilorama.Move
|
|
|
|
for {
|
|
|
|
op, err := e.shards[ids[0].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height)
|
|
|
|
require.NoError(t, err)
|
|
|
|
if op.Time == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
sourceOps = append(sourceOps, op)
|
|
|
|
height = op.Time + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
height = 0
|
|
|
|
var targetOps []pilorama.Move
|
|
|
|
for {
|
|
|
|
op, err := e.shards[ids[1].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height)
|
|
|
|
require.NoError(t, err)
|
|
|
|
if op.Time == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
targetOps = append(targetOps, op)
|
|
|
|
height = op.Time + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, sourceOps, targetOps)
|
|
|
|
}
|
|
|
|
}
|
2024-02-06 14:34:32 +00:00
|
|
|
|
|
|
|
func TestEvacuateTreesRemote(t *testing.T) {
|
|
|
|
e, ids, _ := newEngineEvacuate(t, 2, 3)
|
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
|
|
|
|
2024-10-21 08:56:38 +00:00
|
|
|
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
|
|
|
|
require.NoError(t, e.shards[ids[1].String()].SetMode(context.Background(), mode.ReadOnly))
|
2024-02-06 14:34:32 +00:00
|
|
|
|
2024-09-18 09:15:32 +00:00
|
|
|
mutex := sync.Mutex{}
|
2024-02-06 14:34:32 +00:00
|
|
|
evacuatedTreeOps := make(map[string][]*pilorama.Move)
|
|
|
|
var prm EvacuateShardPrm
|
|
|
|
prm.ShardID = ids
|
|
|
|
prm.Scope = EvacuateScopeTrees
|
2024-07-03 08:47:50 +00:00
|
|
|
prm.TreeHandler = func(ctx context.Context, contID cid.ID, treeID string, f pilorama.Forest) (bool, string, error) {
|
2024-02-06 14:34:32 +00:00
|
|
|
key := contID.String() + treeID
|
|
|
|
var height uint64
|
|
|
|
for {
|
|
|
|
op, err := f.TreeGetOpLog(ctx, contID, treeID, height)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if op.Time == 0 {
|
2024-07-03 08:47:50 +00:00
|
|
|
return true, "", nil
|
2024-02-06 14:34:32 +00:00
|
|
|
}
|
2024-09-18 09:15:32 +00:00
|
|
|
mutex.Lock()
|
2024-02-06 14:34:32 +00:00
|
|
|
evacuatedTreeOps[key] = append(evacuatedTreeOps[key], &op)
|
2024-09-18 09:15:32 +00:00
|
|
|
mutex.Unlock()
|
2024-02-06 14:34:32 +00:00
|
|
|
height = op.Time + 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedShardIDs := make([]string, 0, len(ids))
|
|
|
|
for _, id := range ids {
|
|
|
|
expectedShardIDs = append(expectedShardIDs, id.String())
|
|
|
|
}
|
|
|
|
|
|
|
|
st, err := e.GetEvacuationState(context.Background())
|
|
|
|
require.NoError(t, err, "get init state failed")
|
|
|
|
require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state")
|
|
|
|
require.Equal(t, uint64(0), st.TreesEvacuated(), "invalid init count")
|
|
|
|
require.Nil(t, st.StartedAt(), "invalid init started at")
|
|
|
|
require.Nil(t, st.FinishedAt(), "invalid init finished at")
|
|
|
|
require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
|
|
|
|
require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
|
|
|
|
|
|
|
|
res, err := e.Evacuate(context.Background(), prm)
|
|
|
|
require.NotNil(t, res, "sync evacuation must return not nil")
|
|
|
|
require.NoError(t, err, "evacuation failed")
|
|
|
|
|
|
|
|
st, err = e.GetEvacuationState(context.Background())
|
|
|
|
require.NoError(t, err, "get evacuation state failed")
|
|
|
|
require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus())
|
|
|
|
|
|
|
|
require.NoError(t, err, "get final state failed")
|
|
|
|
require.Equal(t, uint64(6), st.TreesTotal(), "invalid trees total count")
|
|
|
|
require.Equal(t, uint64(6), st.TreesEvacuated(), "invalid trees evacuated count")
|
|
|
|
require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count")
|
|
|
|
require.NotNil(t, st.StartedAt(), "invalid final started at")
|
|
|
|
require.NotNil(t, st.FinishedAt(), "invalid final finished at")
|
|
|
|
require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids")
|
|
|
|
require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
|
|
|
|
|
|
|
|
expectedTreeOps := make(map[string][]*pilorama.Move)
|
2024-08-30 16:20:55 +00:00
|
|
|
for i := range len(e.shards) {
|
2024-02-06 14:34:32 +00:00
|
|
|
sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[i].String()])
|
|
|
|
require.NoError(t, err, "list source trees failed")
|
|
|
|
require.Len(t, sourceTrees, 3)
|
|
|
|
|
|
|
|
for _, tr := range sourceTrees {
|
|
|
|
key := tr.CID.String() + tr.TreeID
|
|
|
|
var height uint64
|
|
|
|
for {
|
|
|
|
op, err := e.shards[ids[i].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
if op.Time == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
expectedTreeOps[key] = append(expectedTreeOps[key], &op)
|
|
|
|
height = op.Time + 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, expectedTreeOps, evacuatedTreeOps)
|
|
|
|
}
|
2024-09-03 12:42:38 +00:00
|
|
|
|
|
|
|
func TestEvacuateShardObjectsRepOneOnly(t *testing.T) {
|
|
|
|
e, ids, _ := newEngineEvacuate(t, 2, 0)
|
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Create container with policy REP 2
|
|
|
|
cnr1 := container.Container{}
|
|
|
|
p1 := netmap.PlacementPolicy{}
|
|
|
|
p1.SetContainerBackupFactor(1)
|
|
|
|
x1 := netmap.ReplicaDescriptor{}
|
|
|
|
x1.SetNumberOfObjects(2)
|
|
|
|
p1.AddReplicas(x1)
|
|
|
|
x1 = netmap.ReplicaDescriptor{}
|
|
|
|
x1.SetNumberOfObjects(1)
|
|
|
|
p1.AddReplicas(x1)
|
|
|
|
cnr1.SetPlacementPolicy(p1)
|
|
|
|
cnr1.SetAttribute("cnr", "cnr1")
|
|
|
|
|
|
|
|
var idCnr1 cid.ID
|
|
|
|
container.CalculateID(&idCnr1, cnr1)
|
|
|
|
|
|
|
|
cnrmap := make(map[cid.ID]*container.Container)
|
|
|
|
var cids []cid.ID
|
|
|
|
cnrmap[idCnr1] = &cnr1
|
|
|
|
cids = append(cids, idCnr1)
|
|
|
|
|
|
|
|
// Create container with policy REP 1
|
|
|
|
cnr2 := container.Container{}
|
|
|
|
p2 := netmap.PlacementPolicy{}
|
|
|
|
p2.SetContainerBackupFactor(1)
|
|
|
|
x2 := netmap.ReplicaDescriptor{}
|
|
|
|
x2.SetNumberOfObjects(1)
|
|
|
|
p2.AddReplicas(x2)
|
|
|
|
x2 = netmap.ReplicaDescriptor{}
|
|
|
|
x2.SetNumberOfObjects(1)
|
|
|
|
p2.AddReplicas(x2)
|
|
|
|
cnr2.SetPlacementPolicy(p2)
|
|
|
|
cnr2.SetAttribute("cnr", "cnr2")
|
|
|
|
|
|
|
|
var idCnr2 cid.ID
|
|
|
|
container.CalculateID(&idCnr2, cnr2)
|
|
|
|
cnrmap[idCnr2] = &cnr2
|
|
|
|
cids = append(cids, idCnr2)
|
|
|
|
|
|
|
|
// Create container for simulate removing
|
|
|
|
cnr3 := container.Container{}
|
|
|
|
p3 := netmap.PlacementPolicy{}
|
|
|
|
p3.SetContainerBackupFactor(1)
|
|
|
|
x3 := netmap.ReplicaDescriptor{}
|
|
|
|
x3.SetNumberOfObjects(1)
|
|
|
|
p3.AddReplicas(x3)
|
|
|
|
cnr3.SetPlacementPolicy(p3)
|
|
|
|
cnr3.SetAttribute("cnr", "cnr3")
|
|
|
|
|
|
|
|
var idCnr3 cid.ID
|
|
|
|
container.CalculateID(&idCnr3, cnr3)
|
|
|
|
cids = append(cids, idCnr3)
|
|
|
|
|
|
|
|
e.SetContainerSource(&containerStorage{cntmap: cnrmap})
|
|
|
|
|
|
|
|
for _, sh := range ids {
|
|
|
|
for j := range 3 {
|
|
|
|
for range 4 {
|
|
|
|
obj := testutil.GenerateObjectWithCID(cids[j])
|
|
|
|
var putPrm shard.PutPrm
|
|
|
|
putPrm.SetObject(obj)
|
|
|
|
_, err := e.shards[sh.String()].Put(context.Background(), putPrm)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var prm EvacuateShardPrm
|
|
|
|
prm.ShardID = ids[0:1]
|
|
|
|
prm.Scope = EvacuateScopeObjects
|
|
|
|
prm.RepOneOnly = true
|
|
|
|
|
2024-10-21 08:56:38 +00:00
|
|
|
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
|
2024-09-03 12:42:38 +00:00
|
|
|
|
|
|
|
res, err := e.Evacuate(context.Background(), prm)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, uint64(4), res.ObjectsEvacuated())
|
|
|
|
require.Equal(t, uint64(8), res.ObjectsSkipped())
|
|
|
|
require.Equal(t, uint64(0), res.ObjectsFailed())
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) {
|
|
|
|
t.Skip()
|
|
|
|
e, ids, _ := newEngineEvacuate(t, 2, 0)
|
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
|
|
|
|
|
|
|
cnrmap := make(map[cid.ID]*container.Container)
|
|
|
|
var cids []cid.ID
|
|
|
|
// Create containers with policy REP 1
|
|
|
|
for i := range 10_000 {
|
|
|
|
cnr1 := container.Container{}
|
|
|
|
p1 := netmap.PlacementPolicy{}
|
|
|
|
p1.SetContainerBackupFactor(1)
|
|
|
|
x1 := netmap.ReplicaDescriptor{}
|
|
|
|
x1.SetNumberOfObjects(2)
|
|
|
|
p1.AddReplicas(x1)
|
|
|
|
cnr1.SetPlacementPolicy(p1)
|
|
|
|
cnr1.SetAttribute("i", strconv.Itoa(i))
|
|
|
|
|
|
|
|
var idCnr1 cid.ID
|
|
|
|
container.CalculateID(&idCnr1, cnr1)
|
|
|
|
|
|
|
|
cnrmap[idCnr1] = &cnr1
|
|
|
|
cids = append(cids, idCnr1)
|
|
|
|
}
|
|
|
|
|
|
|
|
e.SetContainerSource(&containerStorage{
|
|
|
|
cntmap: cnrmap,
|
|
|
|
latency: time.Millisecond * 100,
|
|
|
|
})
|
|
|
|
|
|
|
|
for _, cnt := range cids {
|
|
|
|
for range 1 {
|
|
|
|
obj := testutil.GenerateObjectWithCID(cnt)
|
|
|
|
var putPrm shard.PutPrm
|
|
|
|
putPrm.SetObject(obj)
|
|
|
|
_, err := e.shards[ids[0].String()].Put(context.Background(), putPrm)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var prm EvacuateShardPrm
|
|
|
|
prm.ShardID = ids[0:1]
|
|
|
|
prm.Scope = EvacuateScopeObjects
|
|
|
|
prm.RepOneOnly = true
|
|
|
|
prm.ContainerWorkerCount = 10
|
|
|
|
|
2024-10-21 08:56:38 +00:00
|
|
|
require.NoError(t, e.shards[ids[0].String()].SetMode(context.Background(), mode.ReadOnly))
|
2024-09-03 12:42:38 +00:00
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
_, err := e.Evacuate(context.Background(), prm)
|
|
|
|
t.Logf("evacuate took %v\n", time.Since(start))
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|