diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index f954d906..2c44eb16 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -192,7 +192,8 @@ func testEngineFailInitAndReload(t *testing.T, errOnAdd bool, opts []shard.Optio } func TestExecBlocks(t *testing.T) { - e := testNewEngineWithShardNum(t, 2) // number doesn't matter in this test, 2 is several but not many + e := testNewEngine(t).setShardsNum(t, 2).engine // number doesn't matter in this test, 2 is several but not many + t.Cleanup(func() { os.RemoveAll(t.Name()) }) @@ -314,25 +315,26 @@ func TestReload(t *testing.T) { // engineWithShards creates engine with specified number of shards. Returns // slice of paths to their metabase and the engine. -// TODO: #1776 unify engine construction in tests func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []string) { addPath := filepath.Join(path, "add") currShards := make([]string, 0, num) - e := New() - for i := 0; i < num; i++ { - id, err := e.AddShard( - shard.WithBlobStorOptions( - blobstor.WithStorages(newStorages(filepath.Join(addPath, strconv.Itoa(i)), errSmallSize))), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(addPath, fmt.Sprintf("%d.metabase", i))), - meta.WithPermissions(0700), - meta.WithEpochState(epochState{}), - ), - ) - require.NoError(t, err) + te := testNewEngine(t). + setShardsNumAdditionalOpts(t, num, func(id int) []shard.Option { + return []shard.Option{ + shard.WithBlobStorOptions( + blobstor.WithStorages(newStorages(filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))), + shard.WithMetaBaseOptions( + meta.WithPath(filepath.Join(addPath, fmt.Sprintf("%d.metabase", id))), + meta.WithPermissions(0700), + meta.WithEpochState(epochState{}), + ), + } + }) + e, ids := te.engine, te.shardIDs + for _, id := range ids { currShards = append(currShards, calculateShardID(e.shards[id.String()].DumpInfo())) } diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go index 8a4e6a7f..54d73cee 100644 --- a/pkg/local_object_storage/engine/delete_test.go +++ b/pkg/local_object_storage/engine/delete_test.go @@ -54,7 +54,7 @@ func TestDeleteBigObject(t *testing.T) { s2 := testNewShard(t, 2) s3 := testNewShard(t, 3) - e := testNewEngineWithShards(t, s1, s2, s3) + e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine e.log = &logger.Logger{Logger: zaptest.NewLogger(t)} defer e.Close() diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 83dbcd09..ddaf88d1 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/atomic" "go.uber.org/zap" + "go.uber.org/zap/zaptest" ) type epochState struct{} @@ -50,7 +51,7 @@ func benchmarkExists(b *testing.B, shardNum int) { shards[i] = testNewShard(b, i) } - e := testNewEngineWithShards(b, shards...) + e := testNewEngine(b).setInitializedShards(b, shards...).engine b.Cleanup(func() { _ = e.Close() _ = os.RemoveAll(b.Name()) @@ -75,24 +76,68 @@ func benchmarkExists(b *testing.B, shardNum int) { } } -func testNewEngineWithShards(t testing.TB, shards ...*shard.Shard) *StorageEngine { - engine := New() +type testEngineWrapper struct { + engine *StorageEngine + shardIDs []*shard.ID +} +func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper { + engine := New(WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)})) + for _, opt := range opts { + opt(engine.cfg) + } + return &testEngineWrapper{ + engine: engine, + } +} + +func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard.Shard) *testEngineWrapper { for _, s := range shards { pool, err := ants.NewPool(10, ants.WithNonblocking(true)) require.NoError(t, err) - engine.shards[s.ID().String()] = hashedShard{ + te.engine.shards[s.ID().String()] = hashedShard{ shardWrapper: shardWrapper{ errorCount: atomic.NewUint32(0), Shard: s, }, hash: hrw.Hash([]byte(s.ID().String())), } - engine.shardPools[s.ID().String()] = pool + te.engine.shardPools[s.ID().String()] = pool + te.shardIDs = append(te.shardIDs, s.ID()) + } + return te +} + +func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper { + shards := make([]*shard.Shard, 0, num) + + for i := 0; i < num; i++ { + shards = append(shards, testNewShard(t, i)) } - return engine + return te.setInitializedShards(t, shards...) +} + +func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper { + for i := 0; i < num; i++ { + opts := shardOpts(i) + id, err := te.engine.AddShard(opts...) + require.NoError(t, err) + te.shardIDs = append(te.shardIDs, id) + } + return te +} + +func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper { + for i := 0; i < num; i++ { + defaultOpts := testDefaultShardOptions(t, i) + opts := append(defaultOpts, shardOpts(i)...) + id, err := te.engine.AddShard(opts...) + require.NoError(t, err) + te.shardIDs = append(te.shardIDs, id) + } + return te } func newStorages(root string, smallSize uint64) []blobstor.SubStorage { @@ -145,8 +190,17 @@ func testNewShard(t testing.TB, id int) *shard.Shard { sid, err := generateShardID() require.NoError(t, err) - s := shard.New( - shard.WithID(sid), + shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t, id)...) + s := shard.New(shardOpts...) + + require.NoError(t, s.Open()) + require.NoError(t, s.Init(context.Background())) + + return s +} + +func testDefaultShardOptions(t testing.TB, id int) []shard.Option { + return []shard.Option{ shard.WithLogger(&logger.Logger{Logger: zap.L()}), shard.WithBlobStorOptions( blobstor.WithStorages( @@ -157,46 +211,5 @@ func testNewShard(t testing.TB, id int) *shard.Shard { meta.WithPath(filepath.Join(t.Name(), fmt.Sprintf("%d.metabase", id))), meta.WithPermissions(0700), meta.WithEpochState(epochState{}), - )) - - require.NoError(t, s.Open()) - require.NoError(t, s.Init(context.Background())) - - return s -} - -func testEngineFromShardOpts(t *testing.T, num int, extraOpts []shard.Option) *StorageEngine { - engine := New() - for i := 0; i < num; i++ { - _, err := engine.AddShard(append([]shard.Option{ - shard.WithBlobStorOptions( - blobstor.WithStorages( - newStorages(filepath.Join(t.Name(), fmt.Sprintf("blobstor%d", i)), - 1<<20)), - ), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(t.Name(), fmt.Sprintf("metabase%d", i))), - meta.WithPermissions(0700), - meta.WithEpochState(epochState{}), - ), - shard.WithPiloramaOptions( - pilorama.WithPath(filepath.Join(t.Name(), fmt.Sprintf("pilorama%d", i)))), - }, extraOpts...)...) - require.NoError(t, err) - } - - require.NoError(t, engine.Open()) - require.NoError(t, engine.Init(context.Background())) - - return engine -} - -func testNewEngineWithShardNum(t *testing.T, num int) *StorageEngine { - shards := make([]*shard.Shard, 0, num) - - for i := 0; i < num; i++ { - shards = append(shards, testNewShard(t, i)) - } - - return testNewEngineWithShards(t, shards...) + )} } diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go index 8a32c8b6..c9b194f6 100644 --- a/pkg/local_object_storage/engine/error_test.go +++ b/pkg/local_object_storage/engine/error_test.go @@ -48,37 +48,39 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32) t.Cleanup(func() { _ = os.RemoveAll(dir) }) } - e := New( - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), - WithShardPoolSize(1), - WithErrorThreshold(errThreshold)) - var testShards [2]*testShard - for i := range testShards { - storages, smallFileStorage, largeFileStorage := newTestStorages(filepath.Join(dir, strconv.Itoa(i)), errSmallSize) - id, err := e.AddShard( - shard.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), - shard.WithBlobStorOptions(blobstor.WithStorages(storages)), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", i))), - meta.WithPermissions(0700), - meta.WithEpochState(epochState{}), - ), - shard.WithPiloramaOptions( - pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", i))), - pilorama.WithPerm(0700))) - require.NoError(t, err) - - testShards[i] = &testShard{ - id: id, - smallFileStorage: smallFileStorage, - largeFileStorage: largeFileStorage, - } - } + te := testNewEngine(t, + WithShardPoolSize(1), + WithErrorThreshold(errThreshold), + ). + setShardsNumOpts(t, 2, func(id int) []shard.Option { + storages, smallFileStorage, largeFileStorage := newTestStorages(filepath.Join(dir, strconv.Itoa(id)), errSmallSize) + testShards[id] = &testShard{ + smallFileStorage: smallFileStorage, + largeFileStorage: largeFileStorage, + } + return []shard.Option{ + shard.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + shard.WithBlobStorOptions(blobstor.WithStorages(storages)), + shard.WithMetaBaseOptions( + meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))), + meta.WithPermissions(0700), + meta.WithEpochState(epochState{}), + ), + shard.WithPiloramaOptions( + pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))), + pilorama.WithPerm(0700)), + } + }) + e := te.engine require.NoError(t, e.Open()) require.NoError(t, e.Init(context.Background())) + for i, id := range te.shardIDs { + testShards[i].id = id + } + return &testEngine{ ng: e, dir: dir, diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index 04d68d2e..51abc4b1 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -29,28 +29,23 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(dir) }) - e := New( - WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), - WithShardPoolSize(1)) - - ids := make([]*shard.ID, shardNum) - - for i := range ids { - ids[i], err = e.AddShard( - shard.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), - shard.WithBlobStorOptions( - blobstor.WithStorages([]blobstor.SubStorage{{ - Storage: fstree.New( - fstree.WithPath(filepath.Join(dir, strconv.Itoa(i))), - fstree.WithDepth(1)), - }})), - shard.WithMetaBaseOptions( - meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", i))), - meta.WithPermissions(0700), - meta.WithEpochState(epochState{}), - )) - require.NoError(t, err) - } + te := testNewEngine(t, WithShardPoolSize(1)). + setShardsNumOpts(t, shardNum, func(id int) []shard.Option { + return []shard.Option{ + shard.WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}), + shard.WithBlobStorOptions( + blobstor.WithStorages([]blobstor.SubStorage{{ + Storage: fstree.New( + fstree.WithPath(filepath.Join(dir, strconv.Itoa(id))), + fstree.WithDepth(1)), + }})), + shard.WithMetaBaseOptions( + meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))), + meta.WithPermissions(0700), + meta.WithEpochState(epochState{})), + } + }) + e, ids := te.engine, te.shardIDs require.NoError(t, e.Open()) require.NoError(t, e.Init(context.Background())) diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go index 1ddfedc5..e2a1edc9 100644 --- a/pkg/local_object_storage/engine/head_test.go +++ b/pkg/local_object_storage/engine/head_test.go @@ -44,7 +44,7 @@ func TestHeadRaw(t *testing.T) { s1 := testNewShard(t, 1) s2 := testNewShard(t, 2) - e := testNewEngineWithShards(t, s1, s2) + e := testNewEngine(t).setInitializedShards(t, s1, s2).engine defer e.Close() var putPrmLeft shard.PutPrm diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index e3150c17..4f8c96b9 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -39,7 +39,7 @@ func TestStorageEngine_Inhume(t *testing.T) { link.SetSplitID(splitID) t.Run("delete small object", func(t *testing.T) { - e := testNewEngineWithShardNum(t, 1) + e := testNewEngine(t).setShardsNum(t, 1).engine defer e.Close() err := Put(e, parent) @@ -60,7 +60,7 @@ func TestStorageEngine_Inhume(t *testing.T) { s1 := testNewShard(t, 1) s2 := testNewShard(t, 2) - e := testNewEngineWithShards(t, s1, s2) + e := testNewEngine(t).setInitializedShards(t, s1, s2).engine defer e.Close() var putChild shard.PutPrm diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go index ad0eb191..1261de9d 100644 --- a/pkg/local_object_storage/engine/list_test.go +++ b/pkg/local_object_storage/engine/list_test.go @@ -16,7 +16,7 @@ import ( func TestListWithCursor(t *testing.T) { s1 := testNewShard(t, 1) s2 := testNewShard(t, 2) - e := testNewEngineWithShards(t, s1, s2) + e := testNewEngine(t).setInitializedShards(t, s1, s2).engine t.Cleanup(func() { e.Close() diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go index 1014b214..fd3b04ef 100644 --- a/pkg/local_object_storage/engine/lock_test.go +++ b/pkg/local_object_storage/engine/lock_test.go @@ -45,15 +45,21 @@ func TestLockUserScenario(t *testing.T) { tombForLockID := oidtest.ID() tombObj.SetID(tombForLockID) - e := testEngineFromShardOpts(t, 2, []shard.Option{ - shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - require.NoError(t, err) + testEngine := testNewEngine(t). + setShardsNumAdditionalOpts(t, 2, func(id int) []shard.Option { + return []shard.Option{ + shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { + pool, err := ants.NewPool(sz) + require.NoError(t, err) - return pool - }), - shard.WithTombstoneSource(tss{lockerExpiresAfter}), - }) + return pool + }), + shard.WithTombstoneSource(tss{lockerExpiresAfter}), + } + }) + e := testEngine.engine + require.NoError(t, e.Open()) + require.NoError(t, e.Init(context.Background())) t.Cleanup(func() { _ = e.Close() @@ -146,14 +152,20 @@ func TestLockExpiration(t *testing.T) { // 3. lock expiration epoch is coming // 4. after some delay the object is not locked anymore - e := testEngineFromShardOpts(t, 2, []shard.Option{ - shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - require.NoError(t, err) + testEngine := testNewEngine(t). + setShardsNumAdditionalOpts(t, 2, func(id int) []shard.Option { + return []shard.Option{ + shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { + pool, err := ants.NewPool(sz) + require.NoError(t, err) - return pool - }), - }) + return pool + }), + } + }) + e := testEngine.engine + require.NoError(t, e.Open()) + require.NoError(t, e.Init(context.Background())) t.Cleanup(func() { _ = e.Close() @@ -218,16 +230,20 @@ func TestLockForceRemoval(t *testing.T) { // 5. the object is not locked anymore var e *StorageEngine - e = testEngineFromShardOpts(t, 2, []shard.Option{ - shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { - pool, err := ants.NewPool(sz) - require.NoError(t, err) - - return pool - }), - shard.WithDeletedLockCallback(e.processDeletedLocks), - }) + e = testNewEngine(t). + setShardsNumAdditionalOpts(t, 2, func(id int) []shard.Option { + return []shard.Option{ + shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool { + pool, err := ants.NewPool(sz) + require.NoError(t, err) + return pool + }), + shard.WithDeletedLockCallback(e.processDeletedLocks), + } + }).engine + require.NoError(t, e.Open()) + require.NoError(t, e.Init(context.Background())) t.Cleanup(func() { _ = e.Close() _ = os.RemoveAll(t.Name()) diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go index 67a006b5..1bc0b880 100644 --- a/pkg/local_object_storage/engine/shards_test.go +++ b/pkg/local_object_storage/engine/shards_test.go @@ -10,7 +10,8 @@ import ( func TestRemoveShard(t *testing.T) { const numOfShards = 6 - e := testNewEngineWithShardNum(t, numOfShards) + te := testNewEngine(t).setShardsNum(t, numOfShards) + e, ids := te.engine, te.shardIDs t.Cleanup(func() { e.Close() os.RemoveAll(t.Name()) @@ -22,12 +23,12 @@ func TestRemoveShard(t *testing.T) { removedNum := numOfShards / 2 mSh := make(map[string]bool, numOfShards) - for i, sh := range e.DumpInfo().Shards { + for i, id := range ids { if i == removedNum { break } - mSh[sh.ID.String()] = true + mSh[id.String()] = true } for id, remove := range mSh {