Aleksey Savchuk
7fc6101bec
All checks were successful
Tests and linters / Run gofumpt (pull_request) Successful in 2m24s
DCO action / DCO (pull_request) Successful in 2m37s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m46s
Vulncheck / Vulncheck (pull_request) Successful in 3m46s
Build / Build Components (pull_request) Successful in 4m4s
Tests and linters / Staticcheck (pull_request) Successful in 4m1s
Tests and linters / gopls check (pull_request) Successful in 4m14s
Tests and linters / Lint (pull_request) Successful in 4m49s
Tests and linters / Tests with -race (pull_request) Successful in 5m25s
Tests and linters / Tests (pull_request) Successful in 5m46s
- Remove `testNewShard` and `setInitializedShards` because they violated the default engine workflow. The correct workflow is: first use `New()`, followed by `Open()`, and then `Init()`. As a result, adding new logic to `(*StorageEngine).Init` caused several tests to fail with a panic when attempting to access uninitialized resources. Now, all engines created with the test utils must be initialized manually. The new helper method `prepare` can be used for that purpose. - Additionally, `setInitializedShards` hardcoded the shard worker pool size, which prevented it from being configured in tests and benchmarks. This has been fixed as well. - Ensure engine initialization is done wherever it was missing. - Refactor `setShardsNumOpts`, `setShardsNumAdditionalOpts`, and `setShardsNum`. Make them all depend on `setShardsNumOpts`. Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
89 lines
2.2 KiB
Go
89 lines
2.2 KiB
Go
package engine
|
|
|
|
import (
|
|
"context"
|
|
"testing"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
|
"git.frostfs.info/TrueCloudLab/hrw"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
func TestRemoveShard(t *testing.T) {
|
|
const numOfShards = 6
|
|
|
|
te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
|
|
e, ids := te.engine, te.shardIDs
|
|
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
|
|
|
require.Equal(t, numOfShards, len(e.shardPools))
|
|
require.Equal(t, numOfShards, len(e.shards))
|
|
|
|
removedNum := numOfShards / 2
|
|
|
|
mSh := make(map[string]bool, numOfShards)
|
|
for i, id := range ids {
|
|
if i == removedNum {
|
|
break
|
|
}
|
|
|
|
mSh[id.String()] = true
|
|
}
|
|
|
|
for id, remove := range mSh {
|
|
if remove {
|
|
e.removeShards(context.Background(), id)
|
|
}
|
|
}
|
|
|
|
require.Equal(t, numOfShards-removedNum, len(e.shardPools))
|
|
require.Equal(t, numOfShards-removedNum, len(e.shards))
|
|
|
|
for id, removed := range mSh {
|
|
_, ok := e.shards[id]
|
|
require.True(t, ok != removed)
|
|
}
|
|
}
|
|
|
|
func TestDisableShards(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
const numOfShards = 2
|
|
|
|
te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
|
|
e, ids := te.engine, te.shardIDs
|
|
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
|
|
|
require.ErrorAs(t, e.DetachShards(context.Background(), ids), new(logicerr.Logical))
|
|
require.ErrorAs(t, e.DetachShards(context.Background(), nil), new(logicerr.Logical))
|
|
require.ErrorAs(t, e.DetachShards(context.Background(), []*shard.ID{}), new(logicerr.Logical))
|
|
|
|
require.NoError(t, e.DetachShards(context.Background(), []*shard.ID{ids[0]}))
|
|
|
|
require.Equal(t, 1, len(e.shards))
|
|
}
|
|
|
|
func TestSortShardsByWeight(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
const numOfShards = 500
|
|
|
|
var shards1 []hashedShard
|
|
var weights1 []float64
|
|
var shards2 []hashedShard
|
|
for i := range numOfShards {
|
|
shards1 = append(shards1, hashedShard{
|
|
hash: uint64(i),
|
|
})
|
|
weights1 = append(weights1, 0)
|
|
shards2 = append(shards2, hashedShard{
|
|
hash: uint64(i),
|
|
})
|
|
}
|
|
|
|
hrw.SortHasherSliceByWeightValue(shards1, weights1, 0)
|
|
hrw.SortHasherSliceByValue(shards2, 0)
|
|
|
|
require.Equal(t, shards1, shards2)
|
|
}
|