forked from TrueCloudLab/frostfs-node
[#947] engine: Evacuate trees to local shards
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
e4064c4394
commit
728150d1d2
6 changed files with 463 additions and 35 deletions
|
@ -14,6 +14,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
|
||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
|
||||
|
@ -41,6 +42,10 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
|
|||
meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))),
|
||||
meta.WithPermissions(0o700),
|
||||
meta.WithEpochState(epochState{})),
|
||||
shard.WithPiloramaOptions(
|
||||
pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))),
|
||||
pilorama.WithPerm(0o700),
|
||||
),
|
||||
}
|
||||
})
|
||||
e, ids := te.engine, te.shardIDs
|
||||
|
@ -48,36 +53,32 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
|
|||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
objects := make([]*objectSDK.Object, 0, objPerShard*len(ids))
|
||||
|
||||
for _, sh := range ids {
|
||||
obj := testutil.GenerateObjectWithCID(cidtest.ID())
|
||||
objects = append(objects, obj)
|
||||
|
||||
var putPrm shard.PutPrm
|
||||
putPrm.SetObject(obj)
|
||||
_, err := e.shards[sh.String()].Put(context.Background(), putPrm)
|
||||
require.NoError(t, err)
|
||||
treeID := "version"
|
||||
meta := []pilorama.KeyValue{
|
||||
{Key: pilorama.AttributeVersion, Value: []byte("XXX")},
|
||||
{Key: pilorama.AttributeFilename, Value: []byte("file.txt")},
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
objects = append(objects, testutil.GenerateObjectWithCID(cidtest.ID()))
|
||||
for _, sh := range ids {
|
||||
for i := 0; i < objPerShard; i++ {
|
||||
contID := cidtest.ID()
|
||||
obj := testutil.GenerateObjectWithCID(contID)
|
||||
objects = append(objects, obj)
|
||||
|
||||
var putPrm PutPrm
|
||||
putPrm.WithObject(objects[len(objects)-1])
|
||||
var putPrm shard.PutPrm
|
||||
putPrm.SetObject(obj)
|
||||
_, err := e.shards[sh.String()].Put(context.Background(), putPrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
err := e.Put(context.Background(), putPrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err := e.shards[ids[len(ids)-1].String()].List(context.Background())
|
||||
require.NoError(t, err)
|
||||
if len(res.AddressList()) == objPerShard {
|
||||
break
|
||||
_, err = e.shards[sh.String()].TreeAddByPath(context.Background(), pilorama.CIDDescriptor{CID: contID, Position: 0, Size: 1},
|
||||
treeID, pilorama.AttributeFilename, []string{"path", "to", "the", "file"}, meta)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
return e, ids, objects
|
||||
}
|
||||
|
||||
func TestEvacuateShard(t *testing.T) {
|
||||
func TestEvacuateShardObjects(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const objPerShard = 3
|
||||
|
@ -103,6 +104,7 @@ func TestEvacuateShard(t *testing.T) {
|
|||
|
||||
var prm EvacuateShardPrm
|
||||
prm.ShardID = ids[2:3]
|
||||
prm.Scope = EvacuateScopeObjects
|
||||
|
||||
t.Run("must be read-only", func(t *testing.T) {
|
||||
res, err := e.Evacuate(context.Background(), prm)
|
||||
|
@ -137,7 +139,7 @@ func TestEvacuateShard(t *testing.T) {
|
|||
checkHasObjects(t)
|
||||
}
|
||||
|
||||
func TestEvacuateNetwork(t *testing.T) {
|
||||
func TestEvacuateObjectsNetwork(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
errReplication := errors.New("handler error")
|
||||
|
@ -174,6 +176,7 @@ func TestEvacuateNetwork(t *testing.T) {
|
|||
|
||||
var prm EvacuateShardPrm
|
||||
prm.ShardID = ids[0:1]
|
||||
prm.Scope = EvacuateScopeObjects
|
||||
|
||||
res, err := e.Evacuate(context.Background(), prm)
|
||||
require.ErrorIs(t, err, errMustHaveTwoShards)
|
||||
|
@ -198,6 +201,7 @@ func TestEvacuateNetwork(t *testing.T) {
|
|||
var prm EvacuateShardPrm
|
||||
prm.ShardID = ids[1:2]
|
||||
prm.ObjectsHandler = acceptOneOf(objects, 2)
|
||||
prm.Scope = EvacuateScopeObjects
|
||||
|
||||
res, err := e.Evacuate(context.Background(), prm)
|
||||
require.ErrorIs(t, err, errReplication)
|
||||
|
@ -235,6 +239,7 @@ func TestEvacuateNetwork(t *testing.T) {
|
|||
var prm EvacuateShardPrm
|
||||
prm.ShardID = evacuateIDs
|
||||
prm.ObjectsHandler = acceptOneOf(objects, totalCount-1)
|
||||
prm.Scope = EvacuateScopeObjects
|
||||
|
||||
res, err := e.Evacuate(context.Background(), prm)
|
||||
require.ErrorIs(t, err, errReplication)
|
||||
|
@ -270,6 +275,7 @@ func TestEvacuateCancellation(t *testing.T) {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
prm.Scope = EvacuateScopeObjects
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
@ -293,6 +299,7 @@ func TestEvacuateSingleProcess(t *testing.T) {
|
|||
|
||||
var prm EvacuateShardPrm
|
||||
prm.ShardID = ids[1:2]
|
||||
prm.Scope = EvacuateScopeObjects
|
||||
prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) error {
|
||||
select {
|
||||
case <-running:
|
||||
|
@ -321,7 +328,7 @@ func TestEvacuateSingleProcess(t *testing.T) {
|
|||
require.NoError(t, eg.Wait())
|
||||
}
|
||||
|
||||
func TestEvacuateAsync(t *testing.T) {
|
||||
func TestEvacuateObjectsAsync(t *testing.T) {
|
||||
e, ids, _ := newEngineEvacuate(t, 2, 3)
|
||||
defer func() {
|
||||
require.NoError(t, e.Close(context.Background()))
|
||||
|
@ -335,6 +342,7 @@ func TestEvacuateAsync(t *testing.T) {
|
|||
|
||||
var prm EvacuateShardPrm
|
||||
prm.ShardID = ids[1:2]
|
||||
prm.Scope = EvacuateScopeObjects
|
||||
prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) error {
|
||||
select {
|
||||
case <-running:
|
||||
|
@ -393,3 +401,82 @@ func TestEvacuateAsync(t *testing.T) {
|
|||
|
||||
require.NoError(t, eg.Wait())
|
||||
}
|
||||
|
||||
func TestEvacuateTreesLocal(t *testing.T) {
|
||||
e, ids, _ := newEngineEvacuate(t, 2, 3)
|
||||
defer func() {
|
||||
require.NoError(t, e.Close(context.Background()))
|
||||
}()
|
||||
|
||||
require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
|
||||
|
||||
var prm EvacuateShardPrm
|
||||
prm.ShardID = ids[0:1]
|
||||
prm.Scope = EvacuateScopeTrees
|
||||
|
||||
expectedShardIDs := make([]string, 0, 1)
|
||||
for _, id := range ids[0:1] {
|
||||
expectedShardIDs = append(expectedShardIDs, id.String())
|
||||
}
|
||||
|
||||
st, err := e.GetEvacuationState(context.Background())
|
||||
require.NoError(t, err, "get init state failed")
|
||||
require.Equal(t, EvacuateProcessStateUndefined, st.ProcessingStatus(), "invalid init state")
|
||||
require.Equal(t, uint64(0), st.TreesEvacuated(), "invalid init count")
|
||||
require.Nil(t, st.StartedAt(), "invalid init started at")
|
||||
require.Nil(t, st.FinishedAt(), "invalid init finished at")
|
||||
require.ElementsMatch(t, []string{}, st.ShardIDs(), "invalid init shard ids")
|
||||
require.Equal(t, "", st.ErrorMessage(), "invalid init error message")
|
||||
|
||||
res, err := e.Evacuate(context.Background(), prm)
|
||||
require.NotNil(t, res, "sync evacuation result must be not nil")
|
||||
require.NoError(t, err, "evacuation failed")
|
||||
|
||||
st, err = e.GetEvacuationState(context.Background())
|
||||
require.NoError(t, err, "get evacuation state failed")
|
||||
require.Equal(t, EvacuateProcessStateCompleted, st.ProcessingStatus())
|
||||
|
||||
require.Equal(t, uint64(3), st.TreesTotal(), "invalid trees total count")
|
||||
require.Equal(t, uint64(3), st.TreesEvacuated(), "invalid trees evacuated count")
|
||||
require.Equal(t, uint64(0), st.TreesFailed(), "invalid trees failed count")
|
||||
require.NotNil(t, st.StartedAt(), "invalid final started at")
|
||||
require.NotNil(t, st.FinishedAt(), "invalid final finished at")
|
||||
require.ElementsMatch(t, expectedShardIDs, st.ShardIDs(), "invalid final shard ids")
|
||||
require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
|
||||
|
||||
sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[0].String()])
|
||||
require.NoError(t, err, "list source trees failed")
|
||||
require.Len(t, sourceTrees, 3)
|
||||
|
||||
for _, tr := range sourceTrees {
|
||||
exists, err := e.shards[ids[1].String()].TreeExists(context.Background(), tr.CID, tr.TreeID)
|
||||
require.NoError(t, err, "failed to check tree existance")
|
||||
require.True(t, exists, "tree doesn't exists on target shard")
|
||||
|
||||
var height uint64
|
||||
var sourceOps []pilorama.Move
|
||||
for {
|
||||
op, err := e.shards[ids[0].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height)
|
||||
require.NoError(t, err)
|
||||
if op.Time == 0 {
|
||||
break
|
||||
}
|
||||
sourceOps = append(sourceOps, op)
|
||||
height = op.Time + 1
|
||||
}
|
||||
|
||||
height = 0
|
||||
var targetOps []pilorama.Move
|
||||
for {
|
||||
op, err := e.shards[ids[1].String()].TreeGetOpLog(context.Background(), tr.CID, tr.TreeID, height)
|
||||
require.NoError(t, err)
|
||||
if op.Time == 0 {
|
||||
break
|
||||
}
|
||||
targetOps = append(targetOps, op)
|
||||
height = op.Time + 1
|
||||
}
|
||||
|
||||
require.Equal(t, sourceOps, targetOps)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue