2021-11-11 13:58:07 +00:00
|
|
|
package engine
|
|
|
|
|
|
|
|
import (
|
2023-03-23 14:59:14 +00:00
|
|
|
"context"
|
2021-11-11 13:58:07 +00:00
|
|
|
"errors"
|
2022-09-26 21:39:34 +00:00
|
|
|
"fmt"
|
2023-03-21 10:38:44 +00:00
|
|
|
"io/fs"
|
2021-10-27 14:50:58 +00:00
|
|
|
"os"
|
2022-03-01 08:59:05 +00:00
|
|
|
"path/filepath"
|
2022-09-26 21:39:34 +00:00
|
|
|
"strconv"
|
2023-05-19 15:06:20 +00:00
|
|
|
"sync/atomic"
|
2021-11-11 13:58:07 +00:00
|
|
|
"testing"
|
2023-02-06 10:28:29 +00:00
|
|
|
"time"
|
2021-11-11 13:58:07 +00:00
|
|
|
|
2023-03-07 13:38:26 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
2023-03-21 10:38:44 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
|
2023-03-20 14:10:26 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
|
2023-03-07 13:38:26 +00:00
|
|
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
2023-12-22 09:58:20 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
|
2023-08-23 07:53:42 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
|
2023-03-07 13:38:26 +00:00
|
|
|
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
2021-11-11 13:58:07 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2023-02-06 10:28:29 +00:00
|
|
|
"go.etcd.io/bbolt"
|
2021-11-11 13:58:07 +00:00
|
|
|
)
|
|
|
|
|
2023-02-06 10:28:29 +00:00
|
|
|
// TestInitializationFailure checks that shard is initialized and closed even if media
|
2023-03-21 10:38:44 +00:00
|
|
|
// under any single component is absent.
|
2023-02-06 10:28:29 +00:00
|
|
|
func TestInitializationFailure(t *testing.T) {
|
2023-03-21 10:38:44 +00:00
|
|
|
type openFileFunc func(string, int, fs.FileMode) (*os.File, error)
|
2023-02-06 10:28:29 +00:00
|
|
|
|
2023-03-21 10:38:44 +00:00
|
|
|
type testShardOpts struct {
|
2023-06-22 11:55:30 +00:00
|
|
|
openFileMetabase openFileFunc
|
|
|
|
openFilePilorama openFileFunc
|
2023-03-21 10:38:44 +00:00
|
|
|
}
|
2023-02-06 10:28:29 +00:00
|
|
|
|
2023-03-21 10:38:44 +00:00
|
|
|
testShard := func(opts testShardOpts) ([]shard.Option, *teststore.TestStore, *teststore.TestStore) {
|
2023-02-06 10:28:29 +00:00
|
|
|
sid, err := generateShardID()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-08-14 11:01:39 +00:00
|
|
|
storages, smallFileStorage, largeFileStorage := newTestStorages(t.TempDir(), 1<<20)
|
2023-03-21 10:38:44 +00:00
|
|
|
|
2023-12-22 09:58:20 +00:00
|
|
|
wcOpts := []writecache.Option{
|
|
|
|
writecache.WithPath(t.TempDir()),
|
2023-06-22 11:55:30 +00:00
|
|
|
}
|
|
|
|
|
2023-02-06 10:28:29 +00:00
|
|
|
return []shard.Option{
|
|
|
|
shard.WithID(sid),
|
2024-01-09 08:37:41 +00:00
|
|
|
shard.WithLogger(test.NewLogger(t)),
|
2023-02-06 10:28:29 +00:00
|
|
|
shard.WithBlobStorOptions(
|
2023-03-21 10:38:44 +00:00
|
|
|
blobstor.WithStorages(storages)),
|
2023-02-06 10:28:29 +00:00
|
|
|
shard.WithMetaBaseOptions(
|
|
|
|
meta.WithBoltDBOptions(&bbolt.Options{
|
2023-03-21 10:38:44 +00:00
|
|
|
Timeout: 100 * time.Millisecond,
|
|
|
|
OpenFile: opts.openFileMetabase,
|
2023-02-06 10:28:29 +00:00
|
|
|
}),
|
2023-08-14 11:01:39 +00:00
|
|
|
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
|
2023-10-31 11:56:55 +00:00
|
|
|
meta.WithPermissions(0o700),
|
2023-02-06 10:28:29 +00:00
|
|
|
meta.WithEpochState(epochState{})),
|
|
|
|
shard.WithWriteCache(true),
|
2023-06-22 11:55:30 +00:00
|
|
|
shard.WithWriteCacheOptions(wcOpts),
|
2023-03-21 10:38:44 +00:00
|
|
|
shard.WithPiloramaOptions(
|
2023-08-14 11:01:39 +00:00
|
|
|
pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama")),
|
2023-03-21 10:38:44 +00:00
|
|
|
pilorama.WithOpenFile(opts.openFilePilorama),
|
|
|
|
),
|
|
|
|
}, smallFileStorage, largeFileStorage
|
2023-02-06 10:28:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("blobstor", func(t *testing.T) {
|
2023-03-21 10:38:44 +00:00
|
|
|
shardOpts, _, largeFileStorage := testShard(testShardOpts{
|
2023-06-22 11:55:30 +00:00
|
|
|
openFileMetabase: os.OpenFile,
|
|
|
|
openFilePilorama: os.OpenFile,
|
2023-03-21 10:38:44 +00:00
|
|
|
})
|
|
|
|
largeFileStorage.SetOption(teststore.WithOpen(func(ro bool) error {
|
|
|
|
return teststore.ErrDiskExploded
|
2023-02-06 10:28:29 +00:00
|
|
|
}))
|
2023-03-21 10:38:44 +00:00
|
|
|
beforeReload := func() {
|
|
|
|
largeFileStorage.SetOption(teststore.WithOpen(nil))
|
|
|
|
}
|
|
|
|
testEngineFailInitAndReload(t, false, shardOpts, beforeReload)
|
2023-02-06 10:28:29 +00:00
|
|
|
})
|
|
|
|
t.Run("metabase", func(t *testing.T) {
|
2023-03-21 10:38:44 +00:00
|
|
|
var openFileMetabaseSucceed atomic.Bool
|
|
|
|
openFileMetabase := func(p string, f int, mode fs.FileMode) (*os.File, error) {
|
|
|
|
if openFileMetabaseSucceed.Load() {
|
|
|
|
return os.OpenFile(p, f, mode)
|
|
|
|
}
|
|
|
|
return nil, teststore.ErrDiskExploded
|
|
|
|
}
|
|
|
|
beforeReload := func() {
|
|
|
|
openFileMetabaseSucceed.Store(true)
|
|
|
|
}
|
|
|
|
shardOpts, _, _ := testShard(testShardOpts{
|
2023-06-22 11:55:30 +00:00
|
|
|
openFileMetabase: openFileMetabase,
|
|
|
|
openFilePilorama: os.OpenFile,
|
2023-03-21 10:38:44 +00:00
|
|
|
})
|
|
|
|
testEngineFailInitAndReload(t, true, shardOpts, beforeReload)
|
2023-02-06 10:28:29 +00:00
|
|
|
})
|
|
|
|
t.Run("pilorama", func(t *testing.T) {
|
2023-03-21 10:38:44 +00:00
|
|
|
var openFilePiloramaSucceed atomic.Bool
|
|
|
|
openFilePilorama := func(p string, f int, mode fs.FileMode) (*os.File, error) {
|
|
|
|
if openFilePiloramaSucceed.Load() {
|
|
|
|
return os.OpenFile(p, f, mode)
|
|
|
|
}
|
|
|
|
return nil, teststore.ErrDiskExploded
|
|
|
|
}
|
|
|
|
beforeReload := func() {
|
|
|
|
openFilePiloramaSucceed.Store(true)
|
|
|
|
}
|
|
|
|
shardOpts, _, _ := testShard(testShardOpts{
|
2023-06-22 11:55:30 +00:00
|
|
|
openFileMetabase: os.OpenFile,
|
|
|
|
openFilePilorama: openFilePilorama,
|
2023-03-21 10:38:44 +00:00
|
|
|
})
|
|
|
|
testEngineFailInitAndReload(t, false, shardOpts, beforeReload)
|
2023-02-06 10:28:29 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-03-21 10:38:44 +00:00
|
|
|
func testEngineFailInitAndReload(t *testing.T, errOnAdd bool, opts []shard.Option, beforeReload func()) {
|
2023-02-06 10:28:29 +00:00
|
|
|
var configID string
|
|
|
|
|
|
|
|
e := New()
|
2024-01-09 13:26:43 +00:00
|
|
|
defer func() {
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
|
|
|
}()
|
2023-08-31 16:26:47 +00:00
|
|
|
_, err := e.AddShard(context.Background(), opts...)
|
2023-02-06 10:28:29 +00:00
|
|
|
if errOnAdd {
|
|
|
|
require.Error(t, err)
|
|
|
|
// This branch is only taken when we cannot update shard ID in the metabase.
|
|
|
|
// The id cannot be encountered during normal operation, but it is ok for tests:
|
|
|
|
// it is only compared for equality with other ids and we have 0 shards here.
|
|
|
|
configID = "id"
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
e.mtx.RLock()
|
|
|
|
var id string
|
|
|
|
for id = range e.shards {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
configID = calculateShardID(e.shards[id].Shard.DumpInfo())
|
|
|
|
e.mtx.RUnlock()
|
|
|
|
|
2023-08-31 16:26:47 +00:00
|
|
|
err = e.Open(context.Background())
|
2023-02-06 10:28:29 +00:00
|
|
|
if err == nil {
|
2023-03-23 14:59:14 +00:00
|
|
|
require.Error(t, e.Init(context.Background()))
|
2023-02-06 10:28:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
e.mtx.RLock()
|
|
|
|
shardCount := len(e.shards)
|
|
|
|
e.mtx.RUnlock()
|
|
|
|
require.Equal(t, 0, shardCount)
|
|
|
|
|
2023-03-21 10:38:44 +00:00
|
|
|
beforeReload()
|
|
|
|
|
2023-03-23 14:59:14 +00:00
|
|
|
require.NoError(t, e.Reload(context.Background(), ReConfiguration{
|
2023-03-21 10:38:44 +00:00
|
|
|
shards: map[string][]shard.Option{configID: opts},
|
2023-02-06 10:28:29 +00:00
|
|
|
}))
|
|
|
|
|
|
|
|
e.mtx.RLock()
|
|
|
|
shardCount = len(e.shards)
|
|
|
|
e.mtx.RUnlock()
|
|
|
|
require.Equal(t, 1, shardCount)
|
|
|
|
}
|
|
|
|
|
2021-11-11 13:58:07 +00:00
|
|
|
func TestExecBlocks(t *testing.T) {
|
2023-03-30 11:58:20 +00:00
|
|
|
e := testNewEngine(t).setShardsNum(t, 2).engine // number doesn't matter in this test, 2 is several but not many
|
|
|
|
|
2021-11-11 13:58:07 +00:00
|
|
|
// put some object
|
2023-03-20 14:10:26 +00:00
|
|
|
obj := testutil.GenerateObjectWithCID(cidtest.ID())
|
2021-11-11 13:58:07 +00:00
|
|
|
|
2022-03-03 14:19:05 +00:00
|
|
|
addr := object.AddressOf(obj)
|
2021-11-11 13:58:07 +00:00
|
|
|
|
2023-04-12 14:01:29 +00:00
|
|
|
require.NoError(t, Put(context.Background(), e, obj))
|
2021-11-11 13:58:07 +00:00
|
|
|
|
|
|
|
// block executions
|
|
|
|
errBlock := errors.New("block exec err")
|
|
|
|
|
|
|
|
require.NoError(t, e.BlockExecution(errBlock))
|
|
|
|
|
|
|
|
// try to exec some op
|
2023-03-13 11:37:35 +00:00
|
|
|
_, err := Head(context.Background(), e, addr)
|
2021-11-11 13:58:07 +00:00
|
|
|
require.ErrorIs(t, err, errBlock)
|
|
|
|
|
|
|
|
// resume executions
|
|
|
|
require.NoError(t, e.ResumeExecution())
|
|
|
|
|
2023-03-13 11:37:35 +00:00
|
|
|
_, err = Head(context.Background(), e, addr) // can be any data-related op
|
2021-11-11 13:58:07 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// close
|
2023-08-31 16:26:47 +00:00
|
|
|
require.NoError(t, e.Close(context.Background()))
|
2021-11-11 13:58:07 +00:00
|
|
|
|
|
|
|
// try exec after close
|
2023-03-13 11:37:35 +00:00
|
|
|
_, err = Head(context.Background(), e, addr)
|
2021-11-11 13:58:07 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
// try to resume
|
|
|
|
require.Error(t, e.ResumeExecution())
|
|
|
|
}
|
2022-03-01 08:59:05 +00:00
|
|
|
|
|
|
|
func TestPersistentShardID(t *testing.T) {
|
2023-05-05 07:38:59 +00:00
|
|
|
dir := t.TempDir()
|
2022-03-01 08:59:05 +00:00
|
|
|
|
2023-03-21 10:38:44 +00:00
|
|
|
te := newEngineWithErrorThreshold(t, dir, 1)
|
2022-03-01 08:59:05 +00:00
|
|
|
|
2023-03-21 10:38:44 +00:00
|
|
|
checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
|
2023-08-31 16:26:47 +00:00
|
|
|
require.NoError(t, te.ng.Close(context.Background()))
|
2022-03-01 08:59:05 +00:00
|
|
|
|
2023-03-21 10:38:44 +00:00
|
|
|
newTe := newEngineWithErrorThreshold(t, dir, 1)
|
|
|
|
for i := 0; i < len(newTe.shards); i++ {
|
|
|
|
require.Equal(t, te.shards[i].id, newTe.shards[i].id)
|
|
|
|
}
|
2023-08-31 16:26:47 +00:00
|
|
|
require.NoError(t, newTe.ng.Close(context.Background()))
|
2022-03-01 08:59:05 +00:00
|
|
|
|
2023-03-21 10:38:44 +00:00
|
|
|
p1 := newTe.ng.shards[te.shards[0].id.String()].Shard.DumpInfo().MetaBaseInfo.Path
|
|
|
|
p2 := newTe.ng.shards[te.shards[1].id.String()].Shard.DumpInfo().MetaBaseInfo.Path
|
2022-03-01 08:59:05 +00:00
|
|
|
tmp := filepath.Join(dir, "tmp")
|
|
|
|
require.NoError(t, os.Rename(p1, tmp))
|
|
|
|
require.NoError(t, os.Rename(p2, p1))
|
|
|
|
require.NoError(t, os.Rename(tmp, p2))
|
|
|
|
|
2023-03-21 10:38:44 +00:00
|
|
|
newTe = newEngineWithErrorThreshold(t, dir, 1)
|
|
|
|
require.Equal(t, te.shards[1].id, newTe.shards[0].id)
|
|
|
|
require.Equal(t, te.shards[0].id, newTe.shards[1].id)
|
2023-08-31 16:26:47 +00:00
|
|
|
require.NoError(t, newTe.ng.Close(context.Background()))
|
2022-03-01 08:59:05 +00:00
|
|
|
}
|
2022-09-26 21:39:34 +00:00
|
|
|
|
|
|
|
func TestReload(t *testing.T) {
|
|
|
|
path := t.TempDir()
|
|
|
|
|
|
|
|
t.Run("add shards", func(t *testing.T) {
|
|
|
|
const shardNum = 4
|
|
|
|
addPath := filepath.Join(path, "add")
|
|
|
|
|
|
|
|
e, currShards := engineWithShards(t, addPath, shardNum)
|
|
|
|
|
|
|
|
var rcfg ReConfiguration
|
|
|
|
for _, p := range currShards {
|
|
|
|
rcfg.AddShard(p, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
rcfg.AddShard(currShards[0], nil) // same path
|
2023-03-23 14:59:14 +00:00
|
|
|
require.NoError(t, e.Reload(context.Background(), rcfg))
|
2022-09-26 21:39:34 +00:00
|
|
|
|
|
|
|
// no new paths => no new shards
|
|
|
|
require.Equal(t, shardNum, len(e.shards))
|
|
|
|
require.Equal(t, shardNum, len(e.shardPools))
|
|
|
|
|
|
|
|
newMeta := filepath.Join(addPath, fmt.Sprintf("%d.metabase", shardNum))
|
|
|
|
|
|
|
|
// add new shard
|
|
|
|
rcfg.AddShard(newMeta, []shard.Option{shard.WithMetaBaseOptions(
|
|
|
|
meta.WithPath(newMeta),
|
|
|
|
meta.WithEpochState(epochState{}),
|
|
|
|
)})
|
2023-03-23 14:59:14 +00:00
|
|
|
require.NoError(t, e.Reload(context.Background(), rcfg))
|
2022-09-26 21:39:34 +00:00
|
|
|
|
|
|
|
require.Equal(t, shardNum+1, len(e.shards))
|
|
|
|
require.Equal(t, shardNum+1, len(e.shardPools))
|
2024-01-09 13:26:43 +00:00
|
|
|
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
2022-09-26 21:39:34 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("remove shards", func(t *testing.T) {
|
|
|
|
const shardNum = 4
|
|
|
|
removePath := filepath.Join(path, "remove")
|
|
|
|
|
|
|
|
e, currShards := engineWithShards(t, removePath, shardNum)
|
|
|
|
|
|
|
|
var rcfg ReConfiguration
|
|
|
|
for i := 0; i < len(currShards)-1; i++ { // without one of the shards
|
|
|
|
rcfg.AddShard(currShards[i], nil)
|
|
|
|
}
|
|
|
|
|
2023-03-23 14:59:14 +00:00
|
|
|
require.NoError(t, e.Reload(context.Background(), rcfg))
|
2022-09-26 21:39:34 +00:00
|
|
|
|
|
|
|
// removed one
|
|
|
|
require.Equal(t, shardNum-1, len(e.shards))
|
|
|
|
require.Equal(t, shardNum-1, len(e.shardPools))
|
2024-01-09 13:26:43 +00:00
|
|
|
|
|
|
|
require.NoError(t, e.Close(context.Background()))
|
2022-09-26 21:39:34 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// engineWithShards creates engine with specified number of shards. Returns
|
|
|
|
// slice of paths to their metabase and the engine.
|
|
|
|
func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []string) {
|
|
|
|
addPath := filepath.Join(path, "add")
|
|
|
|
|
|
|
|
currShards := make([]string, 0, num)
|
|
|
|
|
2023-03-30 11:58:20 +00:00
|
|
|
te := testNewEngine(t).
|
2023-04-05 15:47:11 +00:00
|
|
|
setShardsNumOpts(t, num, func(id int) []shard.Option {
|
2023-03-30 11:58:20 +00:00
|
|
|
return []shard.Option{
|
2024-01-09 08:37:41 +00:00
|
|
|
shard.WithLogger(test.NewLogger(t)),
|
2023-03-30 11:58:20 +00:00
|
|
|
shard.WithBlobStorOptions(
|
|
|
|
blobstor.WithStorages(newStorages(filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))),
|
|
|
|
shard.WithMetaBaseOptions(
|
|
|
|
meta.WithPath(filepath.Join(addPath, fmt.Sprintf("%d.metabase", id))),
|
2023-10-31 11:56:55 +00:00
|
|
|
meta.WithPermissions(0o700),
|
2023-03-30 11:58:20 +00:00
|
|
|
meta.WithEpochState(epochState{}),
|
|
|
|
),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
e, ids := te.engine, te.shardIDs
|
2022-10-05 12:53:42 +00:00
|
|
|
|
2023-03-30 11:58:20 +00:00
|
|
|
for _, id := range ids {
|
2022-10-05 12:53:42 +00:00
|
|
|
currShards = append(currShards, calculateShardID(e.shards[id.String()].DumpInfo()))
|
2022-09-26 21:39:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, num, len(e.shards))
|
|
|
|
require.Equal(t, num, len(e.shardPools))
|
|
|
|
|
2023-08-31 16:26:47 +00:00
|
|
|
require.NoError(t, e.Open(context.Background()))
|
2023-03-23 14:59:14 +00:00
|
|
|
require.NoError(t, e.Init(context.Background()))
|
2022-09-26 21:39:34 +00:00
|
|
|
|
|
|
|
return e, currShards
|
|
|
|
}
|