All checks were successful
DCO action / DCO (pull_request) Successful in 47s
Vulncheck / Vulncheck (pull_request) Successful in 1m4s
Pre-commit hooks / Pre-commit (pull_request) Successful in 1m25s
Build / Build Components (pull_request) Successful in 1m53s
Tests and linters / Run gofumpt (pull_request) Successful in 2m43s
Tests and linters / Lint (pull_request) Successful in 3m13s
Tests and linters / Tests (pull_request) Successful in 3m24s
Tests and linters / gopls check (pull_request) Successful in 3m30s
Tests and linters / Staticcheck (pull_request) Successful in 3m33s
Tests and linters / Tests with -race (pull_request) Successful in 3m35s
Vulncheck / Vulncheck (push) Successful in 1m1s
Pre-commit hooks / Pre-commit (push) Successful in 1m27s
Build / Build Components (push) Successful in 1m58s
Tests and linters / Run gofumpt (push) Successful in 2m40s
Tests and linters / Staticcheck (push) Successful in 3m3s
Tests and linters / Lint (push) Successful in 3m7s
Tests and linters / Tests (push) Successful in 3m23s
Tests and linters / gopls check (push) Successful in 3m41s
Tests and linters / Tests with -race (push) Successful in 3m47s
OCI image / Build container images (push) Successful in 4m45s
After adding an ops limiter, shard's `put` pool is redundant. Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
87 lines
2.1 KiB
Go
87 lines
2.1 KiB
Go
package engine
|
|
|
|
import (
|
|
"context"
|
|
"testing"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
|
|
"git.frostfs.info/TrueCloudLab/hrw"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
func TestRemoveShard(t *testing.T) {
|
|
const numOfShards = 6
|
|
|
|
te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
|
|
e, ids := te.engine, te.shardIDs
|
|
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
|
|
|
require.Equal(t, numOfShards, len(e.shards))
|
|
|
|
removedNum := numOfShards / 2
|
|
|
|
mSh := make(map[string]bool, numOfShards)
|
|
for i, id := range ids {
|
|
if i == removedNum {
|
|
break
|
|
}
|
|
|
|
mSh[id.String()] = true
|
|
}
|
|
|
|
for id, remove := range mSh {
|
|
if remove {
|
|
e.removeShards(context.Background(), id)
|
|
}
|
|
}
|
|
|
|
require.Equal(t, numOfShards-removedNum, len(e.shards))
|
|
|
|
for id, removed := range mSh {
|
|
_, ok := e.shards[id]
|
|
require.True(t, ok != removed)
|
|
}
|
|
}
|
|
|
|
func TestDisableShards(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
const numOfShards = 2
|
|
|
|
te := testNewEngine(t).setShardsNum(t, numOfShards).prepare(t)
|
|
e, ids := te.engine, te.shardIDs
|
|
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
|
|
|
require.ErrorAs(t, e.DetachShards(context.Background(), ids), new(logicerr.Logical))
|
|
require.ErrorAs(t, e.DetachShards(context.Background(), nil), new(logicerr.Logical))
|
|
require.ErrorAs(t, e.DetachShards(context.Background(), []*shard.ID{}), new(logicerr.Logical))
|
|
|
|
require.NoError(t, e.DetachShards(context.Background(), []*shard.ID{ids[0]}))
|
|
|
|
require.Equal(t, 1, len(e.shards))
|
|
}
|
|
|
|
func TestSortShardsByWeight(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
const numOfShards = 500
|
|
|
|
var shards1 []hashedShard
|
|
var weights1 []float64
|
|
var shards2 []hashedShard
|
|
for i := range numOfShards {
|
|
shards1 = append(shards1, hashedShard{
|
|
hash: uint64(i),
|
|
})
|
|
weights1 = append(weights1, 0)
|
|
shards2 = append(shards2, hashedShard{
|
|
hash: uint64(i),
|
|
})
|
|
}
|
|
|
|
hrw.SortHasherSliceByWeightValue(shards1, weights1, 0)
|
|
hrw.SortHasherSliceByValue(shards2, 0)
|
|
|
|
require.Equal(t, shards1, shards2)
|
|
}
|