[#1491] engine/test: Rework engine test utils
All checks were successful
Tests and linters / Run gofumpt (pull_request) Successful in 2m24s
DCO action / DCO (pull_request) Successful in 2m37s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m46s
Vulncheck / Vulncheck (pull_request) Successful in 3m46s
Build / Build Components (pull_request) Successful in 4m4s
Tests and linters / Staticcheck (pull_request) Successful in 4m1s
Tests and linters / gopls check (pull_request) Successful in 4m14s
Tests and linters / Lint (pull_request) Successful in 4m49s
Tests and linters / Tests with -race (pull_request) Successful in 5m25s
Tests and linters / Tests (pull_request) Successful in 5m46s

- Remove `testNewShard` and `setInitializedShards` because they
violated the default engine workflow. The correct workflow is:
first use `New()`, followed by `Open()`, and then `Init()`. As a
result, adding new logic to `(*StorageEngine).Init` caused several
tests to fail with a panic when attempting to access uninitialized
resources. Now, all engines created with the test utils must be
initialized manually. The new helper method `prepare` can be used
for that purpose.
- Additionally, `setInitializedShards` hardcoded the shard worker
pool size, which prevented it from being configured in tests and
benchmarks. This has been fixed as well.
- Ensure engine initialization is done wherever it was missing.
- Refactor `setShardsNumOpts`, `setShardsNumAdditionalOpts`, and
`setShardsNum`. Make them all depend on `setShardsNumOpts`.

Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
This commit is contained in:
Aleksey Savchuk 2024-11-13 13:30:16 +03:00
parent 7ef36749d0
commit 7fc6101bec
Signed by: a-savchuk
GPG key ID: 70C0A7FF6F9C4639
11 changed files with 88 additions and 123 deletions

View file

@ -164,7 +164,7 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O
}
func TestExecBlocks(t *testing.T) {
e := testNewEngine(t).setShardsNum(t, 2).engine // number doesn't matter in this test, 2 is several but not many
e := testNewEngine(t).setShardsNum(t, 2).prepare(t).engine // number doesn't matter in this test, 2 is several but not many
// put some object
obj := testutil.GenerateObjectWithCID(cidtest.ID())
@ -302,7 +302,8 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
meta.WithEpochState(epochState{}),
),
}
})
}).
prepare(t)
e, ids := te.engine, te.shardIDs
for _, id := range ids {
@ -312,8 +313,5 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
require.Equal(t, num, len(e.shards))
require.Equal(t, num, len(e.shardPools))
require.NoError(t, e.Open(context.Background()))
require.NoError(t, e.Init(context.Background()))
return e, currShards
}

View file

@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -49,9 +48,8 @@ func TestDeleteBigObject(t *testing.T) {
link.SetSplitID(splitID)
link.SetChildren(childIDs...)
e := testNewEngine(t).setShardsNum(t, 3).engine
e.log = test.NewLogger(t)
defer e.Close(context.Background())
e := testNewEngine(t).setShardsNum(t, 3).prepare(t).engine
defer func() { require.NoError(t, e.Close(context.Background())) }()
for i := range children {
require.NoError(t, Put(context.Background(), e, children[i], false))
@ -115,11 +113,13 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) {
link.SetSplitID(splitID)
link.SetChildren(childIDs...)
s1 := testNewShard(t, shard.WithDisabledGC())
te := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
return []shard.Option{shard.WithDisabledGC()}
}).prepare(t)
e := te.engine
defer func() { require.NoError(t, e.Close(context.Background())) }()
e := testNewEngine(t).setInitializedShards(t, s1).engine
e.log = test.NewLogger(t)
defer e.Close(context.Background())
s1 := te.shards[0]
for i := range children {
require.NoError(t, Put(context.Background(), e, children[i], false))

View file

@ -3,7 +3,6 @@ package engine
import (
"context"
"path/filepath"
"sync/atomic"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
@ -26,68 +25,77 @@ func (s epochState) CurrentEpoch() uint64 {
type testEngineWrapper struct {
engine *StorageEngine
shards []*shard.Shard
shardIDs []*shard.ID
}
func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
engine := New(WithLogger(test.NewLogger(t)))
for _, opt := range opts {
opt(engine.cfg)
}
return &testEngineWrapper{
engine: engine,
}
}
func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard.Shard) *testEngineWrapper {
for _, s := range shards {
pool, err := ants.NewPool(10, ants.WithNonblocking(true))
require.NoError(t, err)
te.engine.shards[s.ID().String()] = hashedShard{
shardWrapper: shardWrapper{
errorCount: new(atomic.Uint32),
Shard: s,
},
hash: hrw.StringHash(s.ID().String()),
}
te.engine.shardPools[s.ID().String()] = pool
te.shardIDs = append(te.shardIDs, s.ID())
}
return te
opts = append(testGetDefaultEngineOptions(t), opts...)
return &testEngineWrapper{engine: New(opts...)}
}
func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
shards := make([]*shard.Shard, 0, num)
for range num {
shards = append(shards, testNewShard(t))
}
return te.setInitializedShards(t, shards...)
return te.setShardsNumOpts(t, num, func(_ int) []shard.Option {
return testGetDefaultShardOptions(t)
})
}
func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
func (te *testEngineWrapper) setShardsNumOpts(
t testing.TB, num int, shardOpts func(id int) []shard.Option,
) *testEngineWrapper {
te.shards = make([]*shard.Shard, num)
te.shardIDs = make([]*shard.ID, num)
for i := range num {
opts := shardOpts(i)
id, err := te.engine.AddShard(context.Background(), opts...)
shard, err := te.engine.createShard(context.Background(), shardOpts(i))
require.NoError(t, err)
te.shardIDs = append(te.shardIDs, id)
require.NoError(t, te.engine.addShard(shard))
te.shards[i] = shard
te.shardIDs[i] = shard.ID()
}
require.Len(t, te.engine.shards, num)
require.Len(t, te.engine.shardPools, num)
return te
}
func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
for i := range num {
defaultOpts := testDefaultShardOptions(t)
opts := append(defaultOpts, shardOpts(i)...)
id, err := te.engine.AddShard(context.Background(), opts...)
require.NoError(t, err)
te.shardIDs = append(te.shardIDs, id)
}
func (te *testEngineWrapper) setShardsNumAdditionalOpts(
t testing.TB, num int, shardOpts func(id int) []shard.Option,
) *testEngineWrapper {
return te.setShardsNumOpts(t, num, func(id int) []shard.Option {
return append(testGetDefaultShardOptions(t), shardOpts(id)...)
})
}
// prepare calls Open and Init on the created engine.
func (te *testEngineWrapper) prepare(t testing.TB) *testEngineWrapper {
require.NoError(t, te.engine.Open(context.Background()))
require.NoError(t, te.engine.Init(context.Background()))
return te
}
func testGetDefaultEngineOptions(t testing.TB) []Option {
return []Option{
WithLogger(test.NewLogger(t)),
}
}
func testGetDefaultShardOptions(t testing.TB) []shard.Option {
return []shard.Option{
shard.WithLogger(test.NewLogger(t)),
shard.WithBlobStorOptions(
blobstor.WithStorages(
newStorages(t, t.TempDir(), 1<<20)),
blobstor.WithLogger(test.NewLogger(t)),
),
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
shard.WithMetaBaseOptions(
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
meta.WithPermissions(0o700),
meta.WithEpochState(epochState{}),
meta.WithLogger(test.NewLogger(t)),
),
}
}
func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage {
return []blobstor.SubStorage{
{
@ -137,34 +145,3 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes
},
}, smallFileStorage, largeFileStorage
}
func testNewShard(t testing.TB, opts ...shard.Option) *shard.Shard {
sid, err := generateShardID()
require.NoError(t, err)
shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t)...)
s := shard.New(append(shardOpts, opts...)...)
require.NoError(t, s.Open(context.Background()))
require.NoError(t, s.Init(context.Background()))
return s
}
func testDefaultShardOptions(t testing.TB) []shard.Option {
return []shard.Option{
shard.WithLogger(test.NewLogger(t)),
shard.WithBlobStorOptions(
blobstor.WithStorages(
newStorages(t, t.TempDir(), 1<<20)),
blobstor.WithLogger(test.NewLogger(t)),
),
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
shard.WithMetaBaseOptions(
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
meta.WithPermissions(0o700),
meta.WithEpochState(epochState{}),
meta.WithLogger(test.NewLogger(t)),
),
}
}

View file

@ -67,10 +67,8 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
pilorama.WithPath(filepath.Join(dir, fmt.Sprintf("%d.pilorama", id))),
pilorama.WithPerm(0o700)),
}
})
}).prepare(t)
e := te.engine
require.NoError(t, e.Open(context.Background()))
require.NoError(t, e.Init(context.Background()))
for i, id := range te.shardIDs {
testShards[i].id = id

View file

@ -75,10 +75,9 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
pilorama.WithPerm(0o700),
),
}
})
}).
prepare(t)
e, ids := te.engine, te.shardIDs
require.NoError(t, e.Open(context.Background()))
require.NoError(t, e.Init(context.Background()))
objects := make([]*objectSDK.Object, 0, objPerShard*len(ids))
treeID := "version"

View file

@ -25,7 +25,7 @@ func BenchmarkExists(b *testing.B) {
}
func benchmarkExists(b *testing.B, shardNum int) {
e := testNewEngine(b).setShardsNum(b, shardNum).engine
e := testNewEngine(b).setShardsNum(b, shardNum).prepare(b).engine
defer func() { require.NoError(b, e.Close(context.Background())) }()
addr := oidtest.Address()

View file

@ -39,11 +39,11 @@ func TestHeadRaw(t *testing.T) {
link.SetSplitID(splitID)
t.Run("virtual object split in different shards", func(t *testing.T) {
s1 := testNewShard(t)
s2 := testNewShard(t)
te := testNewEngine(t).setShardsNum(t, 2).prepare(t)
e := te.engine
defer func() { require.NoError(t, e.Close(context.Background())) }()
e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
defer e.Close(context.Background())
s1, s2 := te.shards[0], te.shards[1]
var putPrmLeft shard.PutPrm
putPrmLeft.SetObject(child)

View file

@ -37,8 +37,8 @@ func TestStorageEngine_Inhume(t *testing.T) {
t.Run("delete small object", func(t *testing.T) {
t.Parallel()
e := testNewEngine(t).setShardsNum(t, 1).engine
defer e.Close(context.Background())
e := testNewEngine(t).setShardsNum(t, 1).prepare(t).engine
defer func() { require.NoError(t, e.Close(context.Background())) }()
err := Put(context.Background(), e, parent, false)
require.NoError(t, err)
@ -56,11 +56,12 @@ func TestStorageEngine_Inhume(t *testing.T) {
t.Run("delete big object", func(t *testing.T) {
t.Parallel()
s1 := testNewShard(t)
s2 := testNewShard(t)
e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
defer e.Close(context.Background())
te := testNewEngine(t).setShardsNum(t, 2).prepare(t)
e := te.engine
defer func() { require.NoError(t, e.Close(context.Background())) }()
s1, s2 := te.shards[0], te.shards[1]
var putChild shard.PutPrm
putChild.SetObject(child)

View file

@ -68,10 +68,7 @@ func TestListWithCursor(t *testing.T) {
meta.WithEpochState(epochState{}),
),
}
}).engine
require.NoError(t, e.Open(context.Background()))
require.NoError(t, e.Init(context.Background()))
}).prepare(t).engine
defer func() {
require.NoError(t, e.Close(context.Background()))
}()

View file

@ -57,11 +57,9 @@ func TestLockUserScenario(t *testing.T) {
}),
shard.WithTombstoneSource(tss{lockerExpiresAfter}),
}
})
}).
prepare(t)
e := testEngine.engine
require.NoError(t, e.Open(context.Background()))
require.NoError(t, e.Init(context.Background()))
defer func() { require.NoError(t, e.Close(context.Background())) }()
lockerID := oidtest.ID()
@ -162,11 +160,9 @@ func TestLockExpiration(t *testing.T) {
return pool
}),
}
})
}).
prepare(t)
e := testEngine.engine
require.NoError(t, e.Open(context.Background()))
require.NoError(t, e.Init(context.Background()))
defer func() { require.NoError(t, e.Close(context.Background())) }()
const lockerExpiresAfter = 13
@ -243,9 +239,8 @@ func TestLockForceRemoval(t *testing.T) {
}),
shard.WithDeletedLockCallback(e.processDeletedLocks),
}
}).engine
require.NoError(t, e.Open(context.Background()))
require.NoError(t, e.Init(context.Background()))
}).
prepare(t).engine
defer func() { require.NoError(t, e.Close(context.Background())) }()
cnr := cidtest.ID()

View file

@ -13,7 +13,7 @@ import (
func TestRemoveShard(t *testing.T) {
const numOfShards = 6
te := testNewEngine(t).setShardsNum(t, numOfShards)
te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
e, ids := te.engine, te.shardIDs
defer func() { require.NoError(t, e.Close(context.Background())) }()
@ -51,7 +51,7 @@ func TestDisableShards(t *testing.T) {
const numOfShards = 2
te := testNewEngine(t).setShardsNum(t, numOfShards)
te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
e, ids := te.engine, te.shardIDs
defer func() { require.NoError(t, e.Close(context.Background())) }()