frostfs-node/cmd/frostfs-adm/internal/modules/maintenance/zombie/storage_engine.go
Dmitrii Stepanov f93b96c601
[#1712] adm: Add maintenance zombie commands
Change-Id: I1b73e561a8daad67d0a8ffc0d293cbdd09aaab6b
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2025-04-10 10:06:30 +03:00

203 lines
7.9 KiB
Go

package zombie
import (
"context"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
blobovniczaconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/blobovnicza"
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/qos"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/writecache"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/panjf2000/ants/v2"
"github.com/spf13/cobra"
"go.etcd.io/bbolt"
"go.uber.org/zap"
)
func newEngine(cmd *cobra.Command, c *config.Config) *engine.StorageEngine {
ngOpts := storageEngineOptions(c)
shardOpts := shardOptions(cmd, c)
e := engine.New(ngOpts...)
for _, opts := range shardOpts {
_, err := e.AddShard(cmd.Context(), opts...)
commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
}
commonCmd.ExitOnErr(cmd, "open storage engine: %w", e.Open(cmd.Context()))
commonCmd.ExitOnErr(cmd, "init storage engine: %w", e.Init(cmd.Context()))
return e
}
func storageEngineOptions(c *config.Config) []engine.Option {
return []engine.Option{
engine.WithErrorThreshold(engineconfig.ShardErrorThreshold(c)),
engine.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
engine.WithLowMemoryConsumption(engineconfig.EngineLowMemoryConsumption(c)),
}
}
func shardOptions(cmd *cobra.Command, c *config.Config) [][]shard.Option {
var result [][]shard.Option
err := engineconfig.IterateShards(c, false, func(sh *shardconfig.Config) error {
result = append(result, getShardOpts(cmd, c, sh))
return nil
})
commonCmd.ExitOnErr(cmd, "iterate shards from config: %w", err)
return result
}
func getShardOpts(cmd *cobra.Command, c *config.Config, sh *shardconfig.Config) []shard.Option {
wc, wcEnabled := getWriteCacheOpts(sh)
return []shard.Option{
shard.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
shard.WithRefillMetabase(sh.RefillMetabase()),
shard.WithRefillMetabaseWorkersCount(sh.RefillMetabaseWorkersCount()),
shard.WithMode(sh.Mode()),
shard.WithBlobStorOptions(getBlobstorOpts(cmd.Context(), sh)...),
shard.WithMetaBaseOptions(getMetabaseOpts(sh)...),
shard.WithPiloramaOptions(getPiloramaOpts(c, sh)...),
shard.WithWriteCache(wcEnabled),
shard.WithWriteCacheOptions(wc),
shard.WithRemoverBatchSize(sh.GC().RemoverBatchSize()),
shard.WithGCRemoverSleepInterval(sh.GC().RemoverSleepInterval()),
shard.WithExpiredCollectorBatchSize(sh.GC().ExpiredCollectorBatchSize()),
shard.WithExpiredCollectorWorkerCount(sh.GC().ExpiredCollectorWorkerCount()),
shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
pool, err := ants.NewPool(sz)
commonCmd.ExitOnErr(cmd, "init GC pool: %w", err)
return pool
}),
shard.WithLimiter(qos.NewNoopLimiter()),
}
}
func getWriteCacheOpts(sh *shardconfig.Config) ([]writecache.Option, bool) {
if wc := sh.WriteCache(); wc != nil && wc.Enabled() {
var result []writecache.Option
result = append(result,
writecache.WithPath(wc.Path()),
writecache.WithFlushSizeLimit(wc.MaxFlushingObjectsSize()),
writecache.WithMaxObjectSize(wc.MaxObjectSize()),
writecache.WithFlushWorkersCount(wc.WorkerCount()),
writecache.WithMaxCacheSize(wc.SizeLimit()),
writecache.WithMaxCacheCount(wc.CountLimit()),
writecache.WithNoSync(wc.NoSync()),
writecache.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
writecache.WithQoSLimiter(qos.NewNoopLimiter()),
)
return result, true
}
return nil, false
}
func getPiloramaOpts(c *config.Config, sh *shardconfig.Config) []pilorama.Option {
var piloramaOpts []pilorama.Option
if config.BoolSafe(c.Sub("tree"), "enabled") {
pr := sh.Pilorama()
piloramaOpts = append(piloramaOpts,
pilorama.WithPath(pr.Path()),
pilorama.WithPerm(pr.Perm()),
pilorama.WithNoSync(pr.NoSync()),
pilorama.WithMaxBatchSize(pr.MaxBatchSize()),
pilorama.WithMaxBatchDelay(pr.MaxBatchDelay()),
)
}
return piloramaOpts
}
func getMetabaseOpts(sh *shardconfig.Config) []meta.Option {
return []meta.Option{
meta.WithPath(sh.Metabase().Path()),
meta.WithPermissions(sh.Metabase().BoltDB().Perm()),
meta.WithMaxBatchSize(sh.Metabase().BoltDB().MaxBatchSize()),
meta.WithMaxBatchDelay(sh.Metabase().BoltDB().MaxBatchDelay()),
meta.WithBoltDBOptions(&bbolt.Options{
Timeout: 100 * time.Millisecond,
}),
meta.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
meta.WithEpochState(&epochState{}),
}
}
func getBlobstorOpts(ctx context.Context, sh *shardconfig.Config) []blobstor.Option {
result := []blobstor.Option{
blobstor.WithCompressObjects(sh.Compress()),
blobstor.WithUncompressableContentTypes(sh.UncompressableContentTypes()),
blobstor.WithCompressibilityEstimate(sh.EstimateCompressibility()),
blobstor.WithCompressibilityEstimateThreshold(sh.EstimateCompressibilityThreshold()),
blobstor.WithStorages(getSubStorages(ctx, sh)),
blobstor.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
}
return result
}
func getSubStorages(ctx context.Context, sh *shardconfig.Config) []blobstor.SubStorage {
var ss []blobstor.SubStorage
for _, storage := range sh.BlobStor().Storages() {
switch storage.Type() {
case blobovniczatree.Type:
sub := blobovniczaconfig.From((*config.Config)(storage))
blobTreeOpts := []blobovniczatree.Option{
blobovniczatree.WithRootPath(storage.Path()),
blobovniczatree.WithPermissions(storage.Perm()),
blobovniczatree.WithBlobovniczaSize(sub.Size()),
blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()),
blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()),
blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize()),
blobovniczatree.WithOpenedCacheTTL(sub.OpenedCacheTTL()),
blobovniczatree.WithOpenedCacheExpInterval(sub.OpenedCacheExpInterval()),
blobovniczatree.WithInitWorkerCount(sub.InitWorkerCount()),
blobovniczatree.WithWaitBeforeDropDB(sub.RebuildDropTimeout()),
blobovniczatree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
blobovniczatree.WithObjectSizeLimit(sh.SmallSizeLimit()),
}
ss = append(ss, blobstor.SubStorage{
Storage: blobovniczatree.NewBlobovniczaTree(ctx, blobTreeOpts...),
Policy: func(_ *objectSDK.Object, data []byte) bool {
return uint64(len(data)) < sh.SmallSizeLimit()
},
})
case fstree.Type:
sub := fstreeconfig.From((*config.Config)(storage))
fstreeOpts := []fstree.Option{
fstree.WithPath(storage.Path()),
fstree.WithPerm(storage.Perm()),
fstree.WithDepth(sub.Depth()),
fstree.WithNoSync(sub.NoSync()),
fstree.WithLogger(logger.NewLoggerWrapper(zap.NewNop())),
}
ss = append(ss, blobstor.SubStorage{
Storage: fstree.New(fstreeOpts...),
Policy: func(_ *objectSDK.Object, _ []byte) bool {
return true
},
})
default:
// should never happen, that has already
// been handled: when the config was read
}
}
return ss
}
type epochState struct{}
func (epochState) CurrentEpoch() uint64 {
return 0
}