[#1491] engine/test: Rework engine test utils
All checks were successful
Tests and linters / Run gofumpt (pull_request) Successful in 2m45s
DCO action / DCO (pull_request) Successful in 3m43s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m40s
Tests and linters / Tests (pull_request) Successful in 4m4s
Vulncheck / Vulncheck (pull_request) Successful in 4m11s
Tests and linters / gopls check (pull_request) Successful in 4m42s
Build / Build Components (pull_request) Successful in 4m52s
Tests and linters / Staticcheck (pull_request) Successful in 5m20s
Tests and linters / Lint (pull_request) Successful in 6m10s
Tests and linters / Tests with -race (pull_request) Successful in 7m1s
All checks were successful
Tests and linters / Run gofumpt (pull_request) Successful in 2m45s
DCO action / DCO (pull_request) Successful in 3m43s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m40s
Tests and linters / Tests (pull_request) Successful in 4m4s
Vulncheck / Vulncheck (pull_request) Successful in 4m11s
Tests and linters / gopls check (pull_request) Successful in 4m42s
Build / Build Components (pull_request) Successful in 4m52s
Tests and linters / Staticcheck (pull_request) Successful in 5m20s
Tests and linters / Lint (pull_request) Successful in 6m10s
Tests and linters / Tests with -race (pull_request) Successful in 7m1s
- Remove `testNewShard` and `setInitializedShards`. They violated the default engine workflow, which should be: first use `New()`, followed by `Open()`, and then `Init()`. As a result, adding new logic to `(*StorageEngine).Init` caused several tests to fail with a panic when attempting to access uninitialized resources. Now all engines created with test utils must be initialized manually. - Also, `setInitializedShards` hardcoded the shard worker pool size, preventing it from being configured in tests/benchmarks Fix it too. - Ensure engine initialization is done wherever it's missing. - Refactor `setShardsNumOpts`, `setShardsNumAdditionalOpts`, and `setShardsNum`. Make them all depend on `setShardsNumOpts`. Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
This commit is contained in:
parent
7ef36749d0
commit
bd772c4da3
7 changed files with 84 additions and 95 deletions
|
@ -165,6 +165,8 @@ func testEngineFailInitAndReload(t *testing.T, degradedMode bool, opts []shard.O
|
||||||
|
|
||||||
func TestExecBlocks(t *testing.T) {
|
func TestExecBlocks(t *testing.T) {
|
||||||
e := testNewEngine(t).setShardsNum(t, 2).engine // number doesn't matter in this test, 2 is several but not many
|
e := testNewEngine(t).setShardsNum(t, 2).engine // number doesn't matter in this test, 2 is several but not many
|
||||||
|
require.NoError(t, e.Open(context.Background()))
|
||||||
|
require.NoError(t, e.Init(context.Background()))
|
||||||
|
|
||||||
// put some object
|
// put some object
|
||||||
obj := testutil.GenerateObjectWithCID(cidtest.ID())
|
obj := testutil.GenerateObjectWithCID(cidtest.ID())
|
||||||
|
@ -304,6 +306,8 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
e, ids := te.engine, te.shardIDs
|
e, ids := te.engine, te.shardIDs
|
||||||
|
require.NoError(t, e.Open(context.Background()))
|
||||||
|
require.NoError(t, e.Init(context.Background()))
|
||||||
|
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
currShards = append(currShards, calculateShardID(e.shards[id.String()].DumpInfo()))
|
currShards = append(currShards, calculateShardID(e.shards[id.String()].DumpInfo()))
|
||||||
|
@ -312,8 +316,5 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
|
||||||
require.Equal(t, num, len(e.shards))
|
require.Equal(t, num, len(e.shards))
|
||||||
require.Equal(t, num, len(e.shardPools))
|
require.Equal(t, num, len(e.shardPools))
|
||||||
|
|
||||||
require.NoError(t, e.Open(context.Background()))
|
|
||||||
require.NoError(t, e.Init(context.Background()))
|
|
||||||
|
|
||||||
return e, currShards
|
return e, currShards
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
@ -50,8 +49,9 @@ func TestDeleteBigObject(t *testing.T) {
|
||||||
link.SetChildren(childIDs...)
|
link.SetChildren(childIDs...)
|
||||||
|
|
||||||
e := testNewEngine(t).setShardsNum(t, 3).engine
|
e := testNewEngine(t).setShardsNum(t, 3).engine
|
||||||
e.log = test.NewLogger(t)
|
require.NoError(t, e.Open(context.Background()))
|
||||||
defer e.Close(context.Background())
|
require.NoError(t, e.Init(context.Background()))
|
||||||
|
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||||
|
|
||||||
for i := range children {
|
for i := range children {
|
||||||
require.NoError(t, Put(context.Background(), e, children[i], false))
|
require.NoError(t, Put(context.Background(), e, children[i], false))
|
||||||
|
@ -115,12 +115,16 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) {
|
||||||
link.SetSplitID(splitID)
|
link.SetSplitID(splitID)
|
||||||
link.SetChildren(childIDs...)
|
link.SetChildren(childIDs...)
|
||||||
|
|
||||||
s1 := testNewShard(t, shard.WithDisabledGC())
|
te := testNewEngine(t).setShardsNumAdditionalOpts(t, 1, func(_ int) []shard.Option {
|
||||||
|
return []shard.Option{shard.WithDisabledGC()}
|
||||||
e := testNewEngine(t).setInitializedShards(t, s1).engine
|
})
|
||||||
e.log = test.NewLogger(t)
|
e := te.engine
|
||||||
|
require.NoError(t, e.Open(context.Background()))
|
||||||
|
require.NoError(t, e.Init(context.Background()))
|
||||||
defer e.Close(context.Background())
|
defer e.Close(context.Background())
|
||||||
|
|
||||||
|
s1 := te.shards[0]
|
||||||
|
|
||||||
for i := range children {
|
for i := range children {
|
||||||
require.NoError(t, Put(context.Background(), e, children[i], false))
|
require.NoError(t, Put(context.Background(), e, children[i], false))
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,6 @@ package engine
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
||||||
|
@ -26,66 +25,68 @@ func (s epochState) CurrentEpoch() uint64 {
|
||||||
|
|
||||||
type testEngineWrapper struct {
|
type testEngineWrapper struct {
|
||||||
engine *StorageEngine
|
engine *StorageEngine
|
||||||
|
shards []*shard.Shard
|
||||||
shardIDs []*shard.ID
|
shardIDs []*shard.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
|
func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
|
||||||
engine := New(WithLogger(test.NewLogger(t)))
|
opts = append(testGetDefaultEngineOptions(t), opts...)
|
||||||
for _, opt := range opts {
|
return &testEngineWrapper{engine: New(opts...)}
|
||||||
opt(engine.cfg)
|
|
||||||
}
|
|
||||||
return &testEngineWrapper{
|
|
||||||
engine: engine,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard.Shard) *testEngineWrapper {
|
|
||||||
for _, s := range shards {
|
|
||||||
pool, err := ants.NewPool(10, ants.WithNonblocking(true))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
te.engine.shards[s.ID().String()] = hashedShard{
|
|
||||||
shardWrapper: shardWrapper{
|
|
||||||
errorCount: new(atomic.Uint32),
|
|
||||||
Shard: s,
|
|
||||||
},
|
|
||||||
hash: hrw.StringHash(s.ID().String()),
|
|
||||||
}
|
|
||||||
te.engine.shardPools[s.ID().String()] = pool
|
|
||||||
te.shardIDs = append(te.shardIDs, s.ID())
|
|
||||||
}
|
|
||||||
return te
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
|
func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
|
||||||
shards := make([]*shard.Shard, 0, num)
|
return te.setShardsNumOpts(t, num, func(_ int) []shard.Option {
|
||||||
|
return testGetDefaultShardOptions(t)
|
||||||
for range num {
|
})
|
||||||
shards = append(shards, testNewShard(t))
|
|
||||||
}
|
|
||||||
|
|
||||||
return te.setInitializedShards(t, shards...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
|
func (te *testEngineWrapper) setShardsNumOpts(
|
||||||
|
t testing.TB, num int, shardOpts func(id int) []shard.Option,
|
||||||
|
) *testEngineWrapper {
|
||||||
|
te.shards = make([]*shard.Shard, num)
|
||||||
|
te.shardIDs = make([]*shard.ID, num)
|
||||||
for i := range num {
|
for i := range num {
|
||||||
opts := shardOpts(i)
|
shard, err := te.engine.createShard(context.Background(), shardOpts(i))
|
||||||
id, err := te.engine.AddShard(context.Background(), opts...)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
te.shardIDs = append(te.shardIDs, id)
|
require.NoError(t, te.engine.addShard(shard))
|
||||||
|
te.shards[i] = shard
|
||||||
|
te.shardIDs[i] = shard.ID()
|
||||||
}
|
}
|
||||||
|
require.Len(t, te.engine.shards, num)
|
||||||
|
require.Len(t, te.engine.shardPools, num)
|
||||||
return te
|
return te
|
||||||
}
|
}
|
||||||
|
|
||||||
func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
|
func (te *testEngineWrapper) setShardsNumAdditionalOpts(
|
||||||
for i := range num {
|
t testing.TB, num int, shardOpts func(id int) []shard.Option,
|
||||||
defaultOpts := testDefaultShardOptions(t)
|
) *testEngineWrapper {
|
||||||
opts := append(defaultOpts, shardOpts(i)...)
|
return te.setShardsNumOpts(t, num, func(id int) []shard.Option {
|
||||||
id, err := te.engine.AddShard(context.Background(), opts...)
|
return append(testGetDefaultShardOptions(t), shardOpts(id)...)
|
||||||
require.NoError(t, err)
|
})
|
||||||
te.shardIDs = append(te.shardIDs, id)
|
}
|
||||||
|
|
||||||
|
func testGetDefaultEngineOptions(t testing.TB) []Option {
|
||||||
|
return []Option{
|
||||||
|
WithLogger(test.NewLogger(t)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testGetDefaultShardOptions(t testing.TB) []shard.Option {
|
||||||
|
return []shard.Option{
|
||||||
|
shard.WithLogger(test.NewLogger(t)),
|
||||||
|
shard.WithBlobStorOptions(
|
||||||
|
blobstor.WithStorages(
|
||||||
|
newStorages(t, t.TempDir(), 1<<20)),
|
||||||
|
blobstor.WithLogger(test.NewLogger(t)),
|
||||||
|
),
|
||||||
|
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
|
||||||
|
shard.WithMetaBaseOptions(
|
||||||
|
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
|
||||||
|
meta.WithPermissions(0o700),
|
||||||
|
meta.WithEpochState(epochState{}),
|
||||||
|
meta.WithLogger(test.NewLogger(t)),
|
||||||
|
),
|
||||||
}
|
}
|
||||||
return te
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage {
|
func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage {
|
||||||
|
@ -137,34 +138,3 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes
|
||||||
},
|
},
|
||||||
}, smallFileStorage, largeFileStorage
|
}, smallFileStorage, largeFileStorage
|
||||||
}
|
}
|
||||||
|
|
||||||
func testNewShard(t testing.TB, opts ...shard.Option) *shard.Shard {
|
|
||||||
sid, err := generateShardID()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t)...)
|
|
||||||
s := shard.New(append(shardOpts, opts...)...)
|
|
||||||
|
|
||||||
require.NoError(t, s.Open(context.Background()))
|
|
||||||
require.NoError(t, s.Init(context.Background()))
|
|
||||||
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func testDefaultShardOptions(t testing.TB) []shard.Option {
|
|
||||||
return []shard.Option{
|
|
||||||
shard.WithLogger(test.NewLogger(t)),
|
|
||||||
shard.WithBlobStorOptions(
|
|
||||||
blobstor.WithStorages(
|
|
||||||
newStorages(t, t.TempDir(), 1<<20)),
|
|
||||||
blobstor.WithLogger(test.NewLogger(t)),
|
|
||||||
),
|
|
||||||
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
|
|
||||||
shard.WithMetaBaseOptions(
|
|
||||||
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
|
|
||||||
meta.WithPermissions(0o700),
|
|
||||||
meta.WithEpochState(epochState{}),
|
|
||||||
meta.WithLogger(test.NewLogger(t)),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -26,6 +26,8 @@ func BenchmarkExists(b *testing.B) {
|
||||||
|
|
||||||
func benchmarkExists(b *testing.B, shardNum int) {
|
func benchmarkExists(b *testing.B, shardNum int) {
|
||||||
e := testNewEngine(b).setShardsNum(b, shardNum).engine
|
e := testNewEngine(b).setShardsNum(b, shardNum).engine
|
||||||
|
require.NoError(b, e.Open(context.Background()))
|
||||||
|
require.NoError(b, e.Init(context.Background()))
|
||||||
defer func() { require.NoError(b, e.Close(context.Background())) }()
|
defer func() { require.NoError(b, e.Close(context.Background())) }()
|
||||||
|
|
||||||
addr := oidtest.Address()
|
addr := oidtest.Address()
|
||||||
|
|
|
@ -39,11 +39,13 @@ func TestHeadRaw(t *testing.T) {
|
||||||
link.SetSplitID(splitID)
|
link.SetSplitID(splitID)
|
||||||
|
|
||||||
t.Run("virtual object split in different shards", func(t *testing.T) {
|
t.Run("virtual object split in different shards", func(t *testing.T) {
|
||||||
s1 := testNewShard(t)
|
te := testNewEngine(t).setShardsNum(t, 2)
|
||||||
s2 := testNewShard(t)
|
e := te.engine
|
||||||
|
require.NoError(t, e.Open(context.Background()))
|
||||||
|
require.NoError(t, e.Init(context.Background()))
|
||||||
|
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||||
|
|
||||||
e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
|
s1, s2 := te.shards[0], te.shards[1]
|
||||||
defer e.Close(context.Background())
|
|
||||||
|
|
||||||
var putPrmLeft shard.PutPrm
|
var putPrmLeft shard.PutPrm
|
||||||
putPrmLeft.SetObject(child)
|
putPrmLeft.SetObject(child)
|
||||||
|
|
|
@ -38,7 +38,9 @@ func TestStorageEngine_Inhume(t *testing.T) {
|
||||||
t.Run("delete small object", func(t *testing.T) {
|
t.Run("delete small object", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
e := testNewEngine(t).setShardsNum(t, 1).engine
|
e := testNewEngine(t).setShardsNum(t, 1).engine
|
||||||
defer e.Close(context.Background())
|
require.NoError(t, e.Open(context.Background()))
|
||||||
|
require.NoError(t, e.Init(context.Background()))
|
||||||
|
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||||
|
|
||||||
err := Put(context.Background(), e, parent, false)
|
err := Put(context.Background(), e, parent, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -56,11 +58,15 @@ func TestStorageEngine_Inhume(t *testing.T) {
|
||||||
|
|
||||||
t.Run("delete big object", func(t *testing.T) {
|
t.Run("delete big object", func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
s1 := testNewShard(t)
|
|
||||||
s2 := testNewShard(t)
|
|
||||||
|
|
||||||
e := testNewEngine(t).setInitializedShards(t, s1, s2).engine
|
te := testNewEngine(t).setShardsNum(t, 2)
|
||||||
defer e.Close(context.Background())
|
|
||||||
|
e := te.engine
|
||||||
|
require.NoError(t, e.Open(context.Background()))
|
||||||
|
require.NoError(t, e.Init(context.Background()))
|
||||||
|
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||||
|
|
||||||
|
s1, s2 := te.shards[0], te.shards[1]
|
||||||
|
|
||||||
var putChild shard.PutPrm
|
var putChild shard.PutPrm
|
||||||
putChild.SetObject(child)
|
putChild.SetObject(child)
|
||||||
|
|
|
@ -15,6 +15,8 @@ func TestRemoveShard(t *testing.T) {
|
||||||
|
|
||||||
te := testNewEngine(t).setShardsNum(t, numOfShards)
|
te := testNewEngine(t).setShardsNum(t, numOfShards)
|
||||||
e, ids := te.engine, te.shardIDs
|
e, ids := te.engine, te.shardIDs
|
||||||
|
require.NoError(t, e.Open(context.Background()))
|
||||||
|
require.NoError(t, e.Init(context.Background()))
|
||||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||||
|
|
||||||
require.Equal(t, numOfShards, len(e.shardPools))
|
require.Equal(t, numOfShards, len(e.shardPools))
|
||||||
|
@ -53,6 +55,8 @@ func TestDisableShards(t *testing.T) {
|
||||||
|
|
||||||
te := testNewEngine(t).setShardsNum(t, numOfShards)
|
te := testNewEngine(t).setShardsNum(t, numOfShards)
|
||||||
e, ids := te.engine, te.shardIDs
|
e, ids := te.engine, te.shardIDs
|
||||||
|
require.NoError(t, e.Open(context.Background()))
|
||||||
|
require.NoError(t, e.Init(context.Background()))
|
||||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||||
|
|
||||||
require.ErrorAs(t, e.DetachShards(context.Background(), ids), new(logicerr.Logical))
|
require.ErrorAs(t, e.DetachShards(context.Background(), ids), new(logicerr.Logical))
|
||||||
|
|
Loading…
Reference in a new issue