Some checks failed
Vulncheck / Vulncheck (push) Successful in 1m0s
Pre-commit hooks / Pre-commit (push) Successful in 1m24s
Build / Build Components (push) Successful in 2m4s
OCI image / Build container images (push) Has been cancelled
Tests and linters / Run gofumpt (push) Successful in 3m37s
Tests and linters / Tests with -race (push) Successful in 4m9s
Tests and linters / gopls check (push) Successful in 4m15s
Tests and linters / Tests (push) Successful in 4m46s
Tests and linters / Staticcheck (push) Successful in 4m49s
Tests and linters / Lint (push) Successful in 5m30s
Change-Id: I4952769ca431d1049955823b41b99b0984b385fc Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
233 lines
6.3 KiB
Go
233 lines
6.3 KiB
Go
package engine
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"path/filepath"
|
|
"runtime/debug"
|
|
"strings"
|
|
"sync"
|
|
"testing"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
|
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
|
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
type epochState struct {
|
|
currEpoch uint64
|
|
}
|
|
|
|
func (s epochState) CurrentEpoch() uint64 {
|
|
return s.currEpoch
|
|
}
|
|
|
|
type testEngineWrapper struct {
|
|
engine *StorageEngine
|
|
shards []*shard.Shard
|
|
shardIDs []*shard.ID
|
|
}
|
|
|
|
func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
|
|
opts = append(testGetDefaultEngineOptions(t), opts...)
|
|
return &testEngineWrapper{engine: New(opts...)}
|
|
}
|
|
|
|
func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
|
|
return te.setShardsNumOpts(t, num, func(_ int) []shard.Option {
|
|
return testGetDefaultShardOptions(t)
|
|
})
|
|
}
|
|
|
|
func (te *testEngineWrapper) setShardsNumOpts(
|
|
t testing.TB, num int, shardOpts func(id int) []shard.Option,
|
|
) *testEngineWrapper {
|
|
te.shards = make([]*shard.Shard, num)
|
|
te.shardIDs = make([]*shard.ID, num)
|
|
for i := range num {
|
|
shard, err := te.engine.createShard(context.Background(), shardOpts(i))
|
|
require.NoError(t, err)
|
|
require.NoError(t, te.engine.addShard(shard))
|
|
te.shards[i] = shard
|
|
te.shardIDs[i] = shard.ID()
|
|
}
|
|
require.Len(t, te.engine.shards, num)
|
|
return te
|
|
}
|
|
|
|
func (te *testEngineWrapper) setShardsNumAdditionalOpts(
|
|
t testing.TB, num int, shardOpts func(id int) []shard.Option,
|
|
) *testEngineWrapper {
|
|
return te.setShardsNumOpts(t, num, func(id int) []shard.Option {
|
|
return append(testGetDefaultShardOptions(t), shardOpts(id)...)
|
|
})
|
|
}
|
|
|
|
// prepare calls Open and Init on the created engine.
|
|
func (te *testEngineWrapper) prepare(t testing.TB) *testEngineWrapper {
|
|
require.NoError(t, te.engine.Open(context.Background()))
|
|
require.NoError(t, te.engine.Init(context.Background()))
|
|
return te
|
|
}
|
|
|
|
func testGetDefaultEngineOptions(t testing.TB) []Option {
|
|
return []Option{
|
|
WithLogger(test.NewLogger(t)),
|
|
}
|
|
}
|
|
|
|
func testGetDefaultShardOptions(t testing.TB) []shard.Option {
|
|
return []shard.Option{
|
|
shard.WithLogger(test.NewLogger(t)),
|
|
shard.WithBlobStorOptions(
|
|
blobstor.WithStorages(
|
|
newStorages(t, t.TempDir(), 1<<20)),
|
|
blobstor.WithLogger(test.NewLogger(t)),
|
|
),
|
|
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
|
|
shard.WithMetaBaseOptions(testGetDefaultMetabaseOptions(t)...),
|
|
shard.WithLimiter(&testQoSLimiter{t: t}),
|
|
}
|
|
}
|
|
|
|
func testGetDefaultMetabaseOptions(t testing.TB) []meta.Option {
|
|
return []meta.Option{
|
|
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
|
|
meta.WithPermissions(0o700),
|
|
meta.WithEpochState(epochState{}),
|
|
meta.WithLogger(test.NewLogger(t)),
|
|
}
|
|
}
|
|
|
|
func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage {
|
|
return []blobstor.SubStorage{
|
|
{
|
|
Storage: blobovniczatree.NewBlobovniczaTree(
|
|
context.Background(),
|
|
blobovniczatree.WithRootPath(filepath.Join(root, "blobovnicza")),
|
|
blobovniczatree.WithBlobovniczaShallowDepth(1),
|
|
blobovniczatree.WithBlobovniczaShallowWidth(1),
|
|
blobovniczatree.WithPermissions(0o700),
|
|
blobovniczatree.WithLogger(test.NewLogger(t))),
|
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
|
return uint64(len(data)) < smallSize
|
|
},
|
|
},
|
|
{
|
|
Storage: fstree.New(
|
|
fstree.WithPath(root),
|
|
fstree.WithDepth(1),
|
|
fstree.WithLogger(test.NewLogger(t))),
|
|
},
|
|
}
|
|
}
|
|
|
|
func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *teststore.TestStore, *teststore.TestStore) {
|
|
smallFileStorage := teststore.New(
|
|
teststore.WithSubstorage(blobovniczatree.NewBlobovniczaTree(
|
|
context.Background(),
|
|
blobovniczatree.WithRootPath(filepath.Join(root, "blobovnicza")),
|
|
blobovniczatree.WithBlobovniczaShallowDepth(1),
|
|
blobovniczatree.WithBlobovniczaShallowWidth(1),
|
|
blobovniczatree.WithPermissions(0o700)),
|
|
))
|
|
largeFileStorage := teststore.New(
|
|
teststore.WithSubstorage(fstree.New(
|
|
fstree.WithPath(root),
|
|
fstree.WithDepth(1)),
|
|
))
|
|
return []blobstor.SubStorage{
|
|
{
|
|
Storage: smallFileStorage,
|
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
|
return uint64(len(data)) < smallSize
|
|
},
|
|
},
|
|
{
|
|
Storage: largeFileStorage,
|
|
},
|
|
}, smallFileStorage, largeFileStorage
|
|
}
|
|
|
|
var _ qos.Limiter = (*testQoSLimiter)(nil)
|
|
|
|
type testQoSLimiter struct {
|
|
t testing.TB
|
|
quard sync.Mutex
|
|
id int64
|
|
readStacks map[int64][]byte
|
|
writeStacks map[int64][]byte
|
|
}
|
|
|
|
func (t *testQoSLimiter) SetMetrics(qos.Metrics) {}
|
|
|
|
func (t *testQoSLimiter) Close() {
|
|
t.quard.Lock()
|
|
defer t.quard.Unlock()
|
|
|
|
var sb strings.Builder
|
|
var seqN int
|
|
for _, stack := range t.readStacks {
|
|
seqN++
|
|
sb.WriteString(fmt.Sprintf("%d\n read request stack after limiter close: %s\n", seqN, string(stack)))
|
|
}
|
|
for _, stack := range t.writeStacks {
|
|
seqN++
|
|
sb.WriteString(fmt.Sprintf("%d\n write request stack after limiter close: %s\n", seqN, string(stack)))
|
|
}
|
|
require.True(t.t, seqN == 0, sb.String())
|
|
}
|
|
|
|
func (t *testQoSLimiter) ReadRequest(context.Context) (qos.ReleaseFunc, error) {
|
|
t.quard.Lock()
|
|
defer t.quard.Unlock()
|
|
|
|
stack := debug.Stack()
|
|
|
|
t.id++
|
|
id := t.id
|
|
|
|
if t.readStacks == nil {
|
|
t.readStacks = make(map[int64][]byte)
|
|
}
|
|
t.readStacks[id] = stack
|
|
|
|
return func() {
|
|
t.quard.Lock()
|
|
defer t.quard.Unlock()
|
|
|
|
delete(t.readStacks, id)
|
|
}, nil
|
|
}
|
|
|
|
func (t *testQoSLimiter) WriteRequest(context.Context) (qos.ReleaseFunc, error) {
|
|
t.quard.Lock()
|
|
defer t.quard.Unlock()
|
|
|
|
stack := debug.Stack()
|
|
|
|
t.id++
|
|
id := t.id
|
|
|
|
if t.writeStacks == nil {
|
|
t.writeStacks = make(map[int64][]byte)
|
|
}
|
|
t.writeStacks[id] = stack
|
|
|
|
return func() {
|
|
t.quard.Lock()
|
|
defer t.quard.Unlock()
|
|
|
|
delete(t.writeStacks, id)
|
|
}, nil
|
|
}
|
|
|
|
func (t *testQoSLimiter) SetParentID(string) {}
|