engine/test: Rework StorageEngine
's test utils #1494
11 changed files with 138 additions and 175 deletions
|
@ -164,7 +164,7 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O
|
|||
}
|
||||
|
||||
func TestExecBlocks(t *testing.T) {
|
||||
e := testNewEngine(t).setShardsNum(t, 2).engine // number doesn't matter in this test, 2 is several but not many
|
||||
e := testNewEngine(t).setShardsNum(t, 2).prepare(t).engine // number doesn't matter in this test, 2 is several but not many
|
||||
|
||||
// put some object
|
||||
obj := testutil.GenerateObjectWithCID(cidtest.ID())
|
||||
|
@ -302,7 +302,8 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
|
|||
meta.WithEpochState(epochState{}),
|
||||
),
|
||||
}
|
||||
})
|
||||
}).
|
||||
prepare(t)
|
||||
e, ids := te.engine, te.shardIDs
|
||||
|
||||
for _, id := range ids {
|
||||
|
@ -312,8 +313,5 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
|
|||
require.Equal(t, num, len(e.shards))
|
||||
require.Equal(t, num, len(e.shardPools))
|
||||
|
||||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
return e, currShards
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -49,13 +48,8 @@ func TestDeleteBigObject(t *testing.T) {
|
|||
link.SetSplitID(splitID)
|
||||
link.SetChildren(childIDs...)
|
||||
|
||||
s1 := testNewShard(t)
|
||||
s2 := testNewShard(t)
|
||||
s3 := testNewShard(t)
|
||||
|
||||
e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine
|
||||
e.log = test.NewLogger(t)
|
||||
defer e.Close(context.Background())
|
||||
e := testNewEngine(t).setShardsNum(t, 3).prepare(t).engine
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
for i := range children {
|
||||
require.NoError(t, Put(context.Background(), e, children[i], false))
|
||||
|
@ -119,11 +113,13 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) {
|
|||
link.SetSplitID(splitID)
|
||||
link.SetChildren(childIDs...)
|
||||
|
||||
s1 := testNewShard(t, shard.WithDisabledGC())
|
||||
te := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
|
||||
return []shard.Option{shard.WithDisabledGC()}
|
||||
}).prepare(t)
|
||||
e := te.engine
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
e := testNewEngine(t).setInitializedShards(t, s1).engine
|
||||
e.log = test.NewLogger(t)
|
||||
defer e.Close(context.Background())
|
||||
s1 := te.shards[0]
|
||||
|
||||
for i := range children {
|
||||
require.NoError(t, Put(context.Background(), e, children[i], false))
|
||||
|
|
|
@ -3,24 +3,17 @@ package engine
|
|||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
|
||||
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
|
||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"git.frostfs.info/TrueCloudLab/hrw"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -30,113 +23,79 @@ func (s epochState) CurrentEpoch() uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func BenchmarkExists(b *testing.B) {
|
||||
b.Run("2 shards", func(b *testing.B) {
|
||||
benchmarkExists(b, 2)
|
||||
})
|
||||
b.Run("4 shards", func(b *testing.B) {
|
||||
benchmarkExists(b, 4)
|
||||
})
|
||||
b.Run("8 shards", func(b *testing.B) {
|
||||
benchmarkExists(b, 8)
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkExists(b *testing.B, shardNum int) {
|
||||
shards := make([]*shard.Shard, shardNum)
|
||||
for i := range shardNum {
|
||||
shards[i] = testNewShard(b)
|
||||
}
|
||||
|
||||
e := testNewEngine(b).setInitializedShards(b, shards...).engine
|
||||
defer func() { require.NoError(b, e.Close(context.Background())) }()
|
||||
|
||||
addr := oidtest.Address()
|
||||
for range 100 {
|
||||
obj := testutil.GenerateObjectWithCID(cidtest.ID())
|
||||
err := Put(context.Background(), e, obj, false)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for range b.N {
|
||||
var shPrm shard.ExistsPrm
|
||||
shPrm.Address = addr
|
||||
shPrm.ParentAddress = oid.Address{}
|
||||
ok, _, err := e.exists(context.Background(), shPrm)
|
||||
if err != nil || ok {
|
||||
b.Fatalf("%t %v", ok, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testEngineWrapper struct {
|
||||
engine *StorageEngine
|
||||
shards []*shard.Shard
|
||||
shardIDs []*shard.ID
|
||||
}
|
||||
|
||||
func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
|
||||
engine := New(WithLogger(test.NewLogger(t)))
|
||||
for _, opt := range opts {
|
||||
opt(engine.cfg)
|
||||
}
|
||||
return &testEngineWrapper{
|
||||
engine: engine,
|
||||
}
|
||||
}
|
||||
|
||||
func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard.Shard) *testEngineWrapper {
|
||||
for _, s := range shards {
|
||||
pool, err := ants.NewPool(10, ants.WithNonblocking(true))
|
||||
require.NoError(t, err)
|
||||
|
||||
te.engine.shards[s.ID().String()] = hashedShard{
|
||||
shardWrapper: shardWrapper{
|
||||
errorCount: new(atomic.Uint32),
|
||||
Shard: s,
|
||||
},
|
||||
hash: hrw.StringHash(s.ID().String()),
|
||||
}
|
||||
te.engine.shardPools[s.ID().String()] = pool
|
||||
te.shardIDs = append(te.shardIDs, s.ID())
|
||||
}
|
||||
return te
|
||||
opts = append(testGetDefaultEngineOptions(t), opts...)
|
||||
return &testEngineWrapper{engine: New(opts...)}
|
||||
}
|
||||
|
||||
func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
|
||||
shards := make([]*shard.Shard, 0, num)
|
||||
|
||||
for range num {
|
||||
shards = append(shards, testNewShard(t))
|
||||
return te.setShardsNumOpts(t, num, func(_ int) []shard.Option {
|
||||
return testGetDefaultShardOptions(t)
|
||||
})
|
||||
}
|
||||
|
||||
return te.setInitializedShards(t, shards...)
|
||||
}
|
||||
|
||||
func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
|
||||
func (te *testEngineWrapper) setShardsNumOpts(
|
||||
t testing.TB, num int, shardOpts func(id int) []shard.Option,
|
||||
) *testEngineWrapper {
|
||||
te.shards = make([]*shard.Shard, num)
|
||||
te.shardIDs = make([]*shard.ID, num)
|
||||
for i := range num {
|
||||
opts := shardOpts(i)
|
||||
id, err := te.engine.AddShard(context.Background(), opts...)
|
||||
shard, err := te.engine.createShard(context.Background(), shardOpts(i))
|
||||
require.NoError(t, err)
|
||||
te.shardIDs = append(te.shardIDs, id)
|
||||
require.NoError(t, te.engine.addShard(shard))
|
||||
te.shards[i] = shard
|
||||
te.shardIDs[i] = shard.ID()
|
||||
}
|
||||
require.Len(t, te.engine.shards, num)
|
||||
require.Len(t, te.engine.shardPools, num)
|
||||
return te
|
||||
}
|
||||
|
||||
func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
|
||||
for i := range num {
|
||||
defaultOpts := testDefaultShardOptions(t)
|
||||
opts := append(defaultOpts, shardOpts(i)...)
|
||||
id, err := te.engine.AddShard(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
te.shardIDs = append(te.shardIDs, id)
|
||||
func (te *testEngineWrapper) setShardsNumAdditionalOpts(
|
||||
t testing.TB, num int, shardOpts func(id int) []shard.Option,
|
||||
) *testEngineWrapper {
|
||||
return te.setShardsNumOpts(t, num, func(id int) []shard.Option {
|
||||
return append(testGetDefaultShardOptions(t), shardOpts(id)...)
|
||||
})
|
||||
}
|
||||
|
||||
// prepare calls Open and Init on the created engine.
|
||||
func (te *testEngineWrapper) prepare(t testing.TB) *testEngineWrapper {
|
||||
require.NoError(t, te.engine.Open(context.Background()))
|
||||
require.NoError(t, te.engine.Init(context.Background()))
|
||||
return te
|
||||
}
|
||||
|
||||
func testGetDefaultEngineOptions(t testing.TB) []Option {
|
||||
return []Option{
|
||||
WithLogger(test.NewLogger(t)),
|
||||
}
|
||||
}
|
||||
|
||||
func testGetDefaultShardOptions(t testing.TB) []shard.Option {
|
||||
return []shard.Option{
|
||||
shard.WithLogger(test.NewLogger(t)),
|
||||
shard.WithBlobStorOptions(
|
||||
blobstor.WithStorages(
|
||||
newStorages(t, t.TempDir(), 1<<20)),
|
||||
blobstor.WithLogger(test.NewLogger(t)),
|
||||
),
|
||||
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
|
||||
shard.WithMetaBaseOptions(
|
||||
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
|
||||
meta.WithPermissions(0o700),
|
||||
meta.WithEpochState(epochState{}),
|
||||
meta.WithLogger(test.NewLogger(t)),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage {
|
||||
return []blobstor.SubStorage{
|
||||
{
|
||||
|
@ -186,34 +145,3 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes
|
|||
},
|
||||
}, smallFileStorage, largeFileStorage
|
||||
}
|
||||
|
||||
func testNewShard(t testing.TB, opts ...shard.Option) *shard.Shard {
|
||||
sid, err := generateShardID()
|
||||
require.NoError(t, err)
|
||||
|
||||
shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t)...)
|
||||
s := shard.New(append(shardOpts, opts...)...)
|
||||
|
||||
require.NoError(t, s.Open(context.Background()))
|
||||
require.NoError(t, s.Init(context.Background()))
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func testDefaultShardOptions(t testing.TB) []shard.Option {
|
||||
return []shard.Option{
|
||||
shard.WithLogger(test.NewLogger(t)),
|
||||
shard.WithBlobStorOptions(
|
||||
blobstor.WithStorages(
|
||||
newStorages(t, t.TempDir(), 1<<20)),
|
||||
blobstor.WithLogger(test.NewLogger(t)),
|
||||
),
|
||||
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
|
||||
shard.WithMetaBaseOptions(
|
||||
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
|
||||
meta.WithPermissions(0o700),
|
||||
meta.WithEpochState(epochState{}),
|
||||
meta.WithLogger(test.NewLogger(t)),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,10 +67,8 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
|
|||
pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))),
|
||||
pilorama.WithPerm(0o700)),
|
||||
}
|
||||
})
|
||||
}).prepare(t)
|
||||
e := te.engine
|
||||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
for i, id := range te.shardIDs {
|
||||
testShards[i].id = id
|
||||
|
|
|
@ -75,10 +75,9 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
|
|||
pilorama.WithPerm(0o700),
|
||||
),
|
||||
}
|
||||
})
|
||||
}).
|
||||
prepare(t)
|
||||
e, ids := te.engine, te.shardIDs
|
||||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
objects := make([]*objectSDK.Object, 0, objPerShard*len(ids))
|
||||
treeID := "version"
|
||||
|
|
51
pkg/local_object_storage/engine/exists_test.go
Normal file
51
pkg/local_object_storage/engine/exists_test.go
Normal file
|
@ -0,0 +1,51 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkExists(b *testing.B) {
|
||||
b.Run("2 shards", func(b *testing.B) {
|
||||
benchmarkExists(b, 2)
|
||||
})
|
||||
b.Run("4 shards", func(b *testing.B) {
|
||||
benchmarkExists(b, 4)
|
||||
})
|
||||
b.Run("8 shards", func(b *testing.B) {
|
||||
benchmarkExists(b, 8)
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkExists(b *testing.B, shardNum int) {
|
||||
e := testNewEngine(b).setShardsNum(b, shardNum).prepare(b).engine
|
||||
defer func() { require.NoError(b, e.Close(context.Background())) }()
|
||||
|
||||
addr := oidtest.Address()
|
||||
for range 100 {
|
||||
obj := testutil.GenerateObjectWithCID(cidtest.ID())
|
||||
err := Put(context.Background(), e, obj, false)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for range b.N {
|
||||
var shPrm shard.ExistsPrm
|
||||
shPrm.Address = addr
|
||||
shPrm.ParentAddress = oid.Address{}
|
||||
ok, _, err := e.exists(context.Background(), shPrm)
|
||||
if err != nil || ok {
|
||||
b.Fatalf("%t %v", ok, err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -39,11 +39,11 @@ func TestHeadRaw(t *testing.T) {
|
|||
link.SetSplitID(splitID)
|
||||
|
||||
t.Run("virtual object split in different shards", func(t *testing.T) {
|
||||
s1 := testNewShard(t)
|
||||
s2 := testNewShard(t)
|
||||
te := testNewEngine(t).setShardsNum(t, 2).prepare(t)
|
||||
e := te.engine
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
|
||||
defer e.Close(context.Background())
|
||||
s1, s2 := te.shards[0], te.shards[1]
|
||||
|
||||
var putPrmLeft shard.PutPrm
|
||||
putPrmLeft.SetObject(child)
|
||||
|
|
|
@ -37,8 +37,8 @@ func TestStorageEngine_Inhume(t *testing.T) {
|
|||
|
||||
t.Run("delete small object", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
e := testNewEngine(t).setShardsNum(t, 1).engine
|
||||
defer e.Close(context.Background())
|
||||
e := testNewEngine(t).setShardsNum(t, 1).prepare(t).engine
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
err := Put(context.Background(), e, parent, false)
|
||||
require.NoError(t, err)
|
||||
|
@ -56,11 +56,12 @@ func TestStorageEngine_Inhume(t *testing.T) {
|
|||
|
||||
t.Run("delete big object", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
s1 := testNewShard(t)
|
||||
s2 := testNewShard(t)
|
||||
|
||||
e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
|
||||
defer e.Close(context.Background())
|
||||
te := testNewEngine(t).setShardsNum(t, 2).prepare(t)
|
||||
e := te.engine
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
s1, s2 := te.shards[0], te.shards[1]
|
||||
|
||||
var putChild shard.PutPrm
|
||||
putChild.SetObject(child)
|
||||
|
|
|
@ -68,10 +68,7 @@ func TestListWithCursor(t *testing.T) {
|
|||
meta.WithEpochState(epochState{}),
|
||||
),
|
||||
}
|
||||
}).engine
|
||||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
}).prepare(t).engine
|
||||
defer func() {
|
||||
require.NoError(t, e.Close(context.Background()))
|
||||
}()
|
||||
|
|
|
@ -57,11 +57,9 @@ func TestLockUserScenario(t *testing.T) {
|
|||
}),
|
||||
shard.WithTombstoneSource(tss{lockerExpiresAfter}),
|
||||
}
|
||||
})
|
||||
}).
|
||||
prepare(t)
|
||||
e := testEngine.engine
|
||||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
lockerID := oidtest.ID()
|
||||
|
@ -162,11 +160,9 @@ func TestLockExpiration(t *testing.T) {
|
|||
return pool
|
||||
}),
|
||||
}
|
||||
})
|
||||
}).
|
||||
prepare(t)
|
||||
e := testEngine.engine
|
||||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
const lockerExpiresAfter = 13
|
||||
|
@ -243,9 +239,8 @@ func TestLockForceRemoval(t *testing.T) {
|
|||
}),
|
||||
shard.WithDeletedLockCallback(e.processDeletedLocks),
|
||||
}
|
||||
}).engine
|
||||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
}).
|
||||
prepare(t).engine
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
func TestRemoveShard(t *testing.T) {
|
||||
const numOfShards = 6
|
||||
|
||||
te := testNewEngine(t).setShardsNum(t, numOfShards)
|
||||
te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
|
||||
e, ids := te.engine, te.shardIDs
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
|
@ -51,7 +51,7 @@ func TestDisableShards(t *testing.T) {
|
|||
|
||||
const numOfShards = 2
|
||||
|
||||
te := testNewEngine(t).setShardsNum(t, numOfShards)
|
||||
te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
|
||||
e, ids := te.engine, te.shardIDs
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
|
|
Loading…
Reference in a new issue