forked from TrueCloudLab/frostfs-node
[#1523] neofs-node: Refactor configuration
1. Move compression parameters to the `shard` section. 2. Allow to use multiple sub-storage components in the blobstor. Signed-off-by: Evgenii Stratonikov <evgeniy@nspcc.ru>
This commit is contained in:
parent
13cdbde2e2
commit
26b4a258e0
37 changed files with 595 additions and 419 deletions
|
@ -15,6 +15,8 @@ import (
|
||||||
contractsconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/contracts"
|
contractsconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/contracts"
|
||||||
engineconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine"
|
engineconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine"
|
||||||
shardconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine/shard"
|
shardconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine/shard"
|
||||||
|
blobovniczaconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine/shard/blobstor/blobovnicza"
|
||||||
|
fstreeconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine/shard/blobstor/fstree"
|
||||||
loggerconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/logger"
|
loggerconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/logger"
|
||||||
metricsconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/metrics"
|
metricsconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/metrics"
|
||||||
nodeconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/node"
|
nodeconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/node"
|
||||||
|
@ -22,6 +24,8 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/core/container"
|
"github.com/nspcc-dev/neofs-node/pkg/core/container"
|
||||||
netmapCore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
|
netmapCore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/engine"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/engine"
|
||||||
meta "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/metabase"
|
meta "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/metabase"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/pilorama"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/pilorama"
|
||||||
|
@ -47,6 +51,7 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/util/logger"
|
"github.com/nspcc-dev/neofs-node/pkg/util/logger"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/util/state"
|
"github.com/nspcc-dev/neofs-node/pkg/util/state"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||||
|
objectSDK "github.com/nspcc-dev/neofs-sdk-go/object"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/version"
|
"github.com/nspcc-dev/neofs-sdk-go/version"
|
||||||
"github.com/panjf2000/ants/v2"
|
"github.com/panjf2000/ants/v2"
|
||||||
|
@ -418,7 +423,7 @@ func initShardOptions(c *cfg) {
|
||||||
}
|
}
|
||||||
|
|
||||||
blobStorCfg := sc.BlobStor()
|
blobStorCfg := sc.BlobStor()
|
||||||
blobovniczaCfg := blobStorCfg.Blobovnicza()
|
storages := blobStorCfg.Storages()
|
||||||
metabaseCfg := sc.Metabase()
|
metabaseCfg := sc.Metabase()
|
||||||
gcCfg := sc.GC()
|
gcCfg := sc.GC()
|
||||||
|
|
||||||
|
@ -426,19 +431,47 @@ func initShardOptions(c *cfg) {
|
||||||
|
|
||||||
piloramaCfg := sc.Pilorama()
|
piloramaCfg := sc.Pilorama()
|
||||||
if config.BoolSafe(c.appCfg.Sub("tree"), "enabled") {
|
if config.BoolSafe(c.appCfg.Sub("tree"), "enabled") {
|
||||||
piloramaPath := piloramaCfg.Path()
|
|
||||||
if piloramaPath == "" {
|
|
||||||
piloramaPath = filepath.Join(blobStorCfg.Path(), "pilorama.db")
|
|
||||||
}
|
|
||||||
|
|
||||||
piloramaOpts = []pilorama.Option{
|
piloramaOpts = []pilorama.Option{
|
||||||
pilorama.WithPath(piloramaPath),
|
pilorama.WithPath(piloramaCfg.Path()),
|
||||||
pilorama.WithPerm(piloramaCfg.Perm()),
|
pilorama.WithPerm(piloramaCfg.Perm()),
|
||||||
pilorama.WithNoSync(piloramaCfg.NoSync()),
|
pilorama.WithNoSync(piloramaCfg.NoSync()),
|
||||||
pilorama.WithMaxBatchSize(piloramaCfg.MaxBatchSize()),
|
pilorama.WithMaxBatchSize(piloramaCfg.MaxBatchSize()),
|
||||||
pilorama.WithMaxBatchDelay(piloramaCfg.MaxBatchDelay())}
|
pilorama.WithMaxBatchDelay(piloramaCfg.MaxBatchDelay())}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var st []blobstor.SubStorage
|
||||||
|
for i := range storages {
|
||||||
|
switch storages[i].Type() {
|
||||||
|
case "blobovniczas":
|
||||||
|
sub := blobovniczaconfig.From((*config.Config)(storages[i]))
|
||||||
|
lim := sc.SmallSizeLimit()
|
||||||
|
st = append(st, blobstor.SubStorage{
|
||||||
|
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||||
|
blobovniczatree.WithLogger(c.log),
|
||||||
|
blobovniczatree.WithRootPath(storages[i].Path()),
|
||||||
|
blobovniczatree.WithPermissions(storages[i].Perm()),
|
||||||
|
blobovniczatree.WithBlobovniczaSize(sub.Size()),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowDepth(sub.ShallowDepth()),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowWidth(sub.ShallowWidth()),
|
||||||
|
blobovniczatree.WithOpenedCacheSize(sub.OpenedCacheSize())),
|
||||||
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||||
|
return uint64(len(data)) < lim
|
||||||
|
},
|
||||||
|
})
|
||||||
|
case "fstree":
|
||||||
|
sub := fstreeconfig.From((*config.Config)(storages[i]))
|
||||||
|
st = append(st, blobstor.SubStorage{
|
||||||
|
Storage: fstree.New(
|
||||||
|
fstree.WithPath(storages[i].Path()),
|
||||||
|
fstree.WithPerm(storages[i].Perm()),
|
||||||
|
fstree.WithDepth(sub.Depth())),
|
||||||
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metaPath := metabaseCfg.Path()
|
metaPath := metabaseCfg.Path()
|
||||||
metaPerm := metabaseCfg.BoltDB().Perm()
|
metaPerm := metabaseCfg.BoltDB().Perm()
|
||||||
fatalOnErr(util.MkdirAllX(filepath.Dir(metaPath), metaPerm))
|
fatalOnErr(util.MkdirAllX(filepath.Dir(metaPath), metaPerm))
|
||||||
|
@ -453,15 +486,9 @@ func initShardOptions(c *cfg) {
|
||||||
shard.WithRefillMetabase(sc.RefillMetabase()),
|
shard.WithRefillMetabase(sc.RefillMetabase()),
|
||||||
shard.WithMode(sc.Mode()),
|
shard.WithMode(sc.Mode()),
|
||||||
shard.WithBlobStorOptions(
|
shard.WithBlobStorOptions(
|
||||||
blobstor.WithRootPath(blobStorCfg.Path()),
|
blobstor.WithCompressObjects(sc.Compress()),
|
||||||
blobstor.WithCompressObjects(blobStorCfg.Compress()),
|
blobstor.WithUncompressableContentTypes(sc.UncompressableContentTypes()),
|
||||||
blobstor.WithRootPerm(blobStorCfg.Perm()),
|
blobstor.WithStorages(st),
|
||||||
blobstor.WithShallowDepth(blobStorCfg.ShallowDepth()),
|
|
||||||
blobstor.WithSmallSizeLimit(blobStorCfg.SmallSizeLimit()),
|
|
||||||
blobstor.WithBlobovniczaSize(blobovniczaCfg.Size()),
|
|
||||||
blobstor.WithBlobovniczaShallowDepth(blobovniczaCfg.ShallowDepth()),
|
|
||||||
blobstor.WithBlobovniczaShallowWidth(blobovniczaCfg.ShallowWidth()),
|
|
||||||
blobstor.WithBlobovniczaOpenedCacheSize(blobovniczaCfg.OpenedCacheSize()),
|
|
||||||
blobstor.WithLogger(c.log),
|
blobstor.WithLogger(c.log),
|
||||||
),
|
),
|
||||||
shard.WithMetaBaseOptions(
|
shard.WithMetaBaseOptions(
|
||||||
|
|
|
@ -39,7 +39,7 @@ func IterateShards(c *config.Config, required bool, f func(*shardconfig.Config))
|
||||||
// must have different paths, so if it is missing, the shard is not here.
|
// must have different paths, so if it is missing, the shard is not here.
|
||||||
// At the same time checking for "blobstor" section doesn't work proper
|
// At the same time checking for "blobstor" section doesn't work proper
|
||||||
// with configuration via the environment.
|
// with configuration via the environment.
|
||||||
if (*config.Config)(sc).Value("blobstor.path") == nil {
|
if (*config.Config)(sc).Value("metabase.path") == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
(*config.Config)(sc).SetDefault(def)
|
(*config.Config)(sc).SetDefault(def)
|
||||||
|
|
|
@ -8,6 +8,8 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-node/cmd/neofs-node/config"
|
"github.com/nspcc-dev/neofs-node/cmd/neofs-node/config"
|
||||||
engineconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine"
|
engineconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine"
|
||||||
shardconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine/shard"
|
shardconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine/shard"
|
||||||
|
blobovniczaconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine/shard/blobstor/blobovnicza"
|
||||||
|
fstreeconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine/shard/blobstor/fstree"
|
||||||
piloramaconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine/shard/pilorama"
|
piloramaconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine/shard/pilorama"
|
||||||
configtest "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/test"
|
configtest "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/test"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard/mode"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard/mode"
|
||||||
|
@ -53,7 +55,7 @@ func TestEngineSection(t *testing.T) {
|
||||||
wc := sc.WriteCache()
|
wc := sc.WriteCache()
|
||||||
meta := sc.Metabase()
|
meta := sc.Metabase()
|
||||||
blob := sc.BlobStor()
|
blob := sc.BlobStor()
|
||||||
blz := blob.Blobovnicza()
|
ss := blob.Storages()
|
||||||
pl := sc.Pilorama()
|
pl := sc.Pilorama()
|
||||||
gc := sc.GC()
|
gc := sc.GC()
|
||||||
|
|
||||||
|
@ -79,18 +81,23 @@ func TestEngineSection(t *testing.T) {
|
||||||
require.Equal(t, 100, meta.BoltDB().MaxBatchSize())
|
require.Equal(t, 100, meta.BoltDB().MaxBatchSize())
|
||||||
require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay())
|
require.Equal(t, 10*time.Millisecond, meta.BoltDB().MaxBatchDelay())
|
||||||
|
|
||||||
require.Equal(t, "tmp/0/blob", blob.Path())
|
require.Equal(t, true, sc.Compress())
|
||||||
require.EqualValues(t, 0644, blob.Perm())
|
require.Equal(t, []string{"audio/*", "video/*"}, sc.UncompressableContentTypes())
|
||||||
require.Equal(t, true, blob.Compress())
|
require.EqualValues(t, 102400, sc.SmallSizeLimit())
|
||||||
require.Equal(t, []string{"audio/*", "video/*"}, blob.UncompressableContentTypes())
|
|
||||||
require.EqualValues(t, 5, blob.ShallowDepth())
|
|
||||||
require.EqualValues(t, 102400, blob.SmallSizeLimit())
|
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(ss))
|
||||||
|
blz := blobovniczaconfig.From((*config.Config)(ss[0]))
|
||||||
|
require.Equal(t, "tmp/0/blob/blobovnicza", ss[0].Path())
|
||||||
|
require.EqualValues(t, 0644, blz.BoltDB().Perm())
|
||||||
require.EqualValues(t, 4194304, blz.Size())
|
require.EqualValues(t, 4194304, blz.Size())
|
||||||
require.EqualValues(t, 1, blz.ShallowDepth())
|
require.EqualValues(t, 1, blz.ShallowDepth())
|
||||||
require.EqualValues(t, 4, blz.ShallowWidth())
|
require.EqualValues(t, 4, blz.ShallowWidth())
|
||||||
require.EqualValues(t, 50, blz.OpenedCacheSize())
|
require.EqualValues(t, 50, blz.OpenedCacheSize())
|
||||||
|
|
||||||
|
require.Equal(t, "tmp/0/blob", ss[1].Path())
|
||||||
|
require.EqualValues(t, 0644, ss[1].Perm())
|
||||||
|
require.EqualValues(t, 5, fstreeconfig.From((*config.Config)(ss[1])).Depth())
|
||||||
|
|
||||||
require.EqualValues(t, 150, gc.RemoverBatchSize())
|
require.EqualValues(t, 150, gc.RemoverBatchSize())
|
||||||
require.Equal(t, 2*time.Minute, gc.RemoverSleepInterval())
|
require.Equal(t, 2*time.Minute, gc.RemoverSleepInterval())
|
||||||
|
|
||||||
|
@ -117,18 +124,23 @@ func TestEngineSection(t *testing.T) {
|
||||||
require.Equal(t, 200, meta.BoltDB().MaxBatchSize())
|
require.Equal(t, 200, meta.BoltDB().MaxBatchSize())
|
||||||
require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay())
|
require.Equal(t, 20*time.Millisecond, meta.BoltDB().MaxBatchDelay())
|
||||||
|
|
||||||
require.Equal(t, "tmp/1/blob", blob.Path())
|
require.Equal(t, false, sc.Compress())
|
||||||
require.EqualValues(t, 0644, blob.Perm())
|
require.Equal(t, []string(nil), sc.UncompressableContentTypes())
|
||||||
require.Equal(t, false, blob.Compress())
|
require.EqualValues(t, 102400, sc.SmallSizeLimit())
|
||||||
require.Equal(t, []string(nil), blob.UncompressableContentTypes())
|
|
||||||
require.EqualValues(t, 5, blob.ShallowDepth())
|
|
||||||
require.EqualValues(t, 102400, blob.SmallSizeLimit())
|
|
||||||
|
|
||||||
|
require.Equal(t, 2, len(ss))
|
||||||
|
|
||||||
|
blz := blobovniczaconfig.From((*config.Config)(ss[0]))
|
||||||
|
require.Equal(t, "tmp/1/blob/blobovnicza", ss[0].Path())
|
||||||
require.EqualValues(t, 4194304, blz.Size())
|
require.EqualValues(t, 4194304, blz.Size())
|
||||||
require.EqualValues(t, 1, blz.ShallowDepth())
|
require.EqualValues(t, 1, blz.ShallowDepth())
|
||||||
require.EqualValues(t, 4, blz.ShallowWidth())
|
require.EqualValues(t, 4, blz.ShallowWidth())
|
||||||
require.EqualValues(t, 50, blz.OpenedCacheSize())
|
require.EqualValues(t, 50, blz.OpenedCacheSize())
|
||||||
|
|
||||||
|
require.Equal(t, "tmp/1/blob", ss[1].Path())
|
||||||
|
require.EqualValues(t, 0644, ss[1].Perm())
|
||||||
|
require.EqualValues(t, 5, fstreeconfig.From((*config.Config)(ss[1])).Depth())
|
||||||
|
|
||||||
require.EqualValues(t, 200, gc.RemoverBatchSize())
|
require.EqualValues(t, 200, gc.RemoverBatchSize())
|
||||||
require.Equal(t, 5*time.Minute, gc.RemoverSleepInterval())
|
require.Equal(t, 5*time.Minute, gc.RemoverSleepInterval())
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,11 @@ func From(c *config.Config) *Config {
|
||||||
return (*Config)(c)
|
return (*Config)(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Type returns the storage type.
|
||||||
|
func (x *Config) Type() string {
|
||||||
|
return "blobovnicza"
|
||||||
|
}
|
||||||
|
|
||||||
// Size returns the value of "size" config parameter.
|
// Size returns the value of "size" config parameter.
|
||||||
//
|
//
|
||||||
// Returns SizeDefault if the value is not a positive number.
|
// Returns SizeDefault if the value is not a positive number.
|
||||||
|
|
|
@ -1,122 +1,36 @@
|
||||||
package blobstorconfig
|
package blobstorconfig
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/fs"
|
"strconv"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-node/cmd/neofs-node/config"
|
"github.com/nspcc-dev/neofs-node/cmd/neofs-node/config"
|
||||||
blobovniczaconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine/shard/blobstor/blobovnicza"
|
"github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/engine/shard/blobstor/storage"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config is a wrapper over the config section
|
// Config is a wrapper over the config section
|
||||||
// which provides access to BlobStor configurations.
|
// which provides access to BlobStor configurations.
|
||||||
type Config config.Config
|
type Config config.Config
|
||||||
|
|
||||||
// config defaults
|
|
||||||
const (
|
|
||||||
// PermDefault are default permission bits for BlobStor data.
|
|
||||||
PermDefault = 0660
|
|
||||||
|
|
||||||
// ShallowDepthDefault is a default shallow dir depth.
|
|
||||||
ShallowDepthDefault = 4
|
|
||||||
|
|
||||||
// SmallSizeLimitDefault is a default limit of small objects payload in bytes.
|
|
||||||
SmallSizeLimitDefault = 1 << 20
|
|
||||||
)
|
|
||||||
|
|
||||||
// From wraps config section into Config.
|
// From wraps config section into Config.
|
||||||
func From(c *config.Config) *Config {
|
func From(c *config.Config) *Config {
|
||||||
return (*Config)(c)
|
return (*Config)(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Path returns the value of "path" config parameter.
|
// Storages returns the value of storage subcomponents.
|
||||||
//
|
func (x *Config) Storages() []*storage.Config {
|
||||||
// Panics if the value is not a non-empty string.
|
var ss []*storage.Config
|
||||||
func (x *Config) Path() string {
|
for i := 0; ; i++ {
|
||||||
p := config.String(
|
typ := config.String(
|
||||||
(*config.Config)(x),
|
(*config.Config)(x),
|
||||||
"path",
|
strconv.Itoa(i)+".type")
|
||||||
)
|
switch typ {
|
||||||
|
case "":
|
||||||
if p == "" {
|
return ss
|
||||||
panic("blobstor path not set")
|
case "fstree", "blobovnicza":
|
||||||
|
sub := storage.From((*config.Config)(x).Sub(strconv.Itoa(i)))
|
||||||
|
ss = append(ss, sub)
|
||||||
|
default:
|
||||||
|
panic("invalid type")
|
||||||
}
|
}
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Perm returns the value of "perm" config parameter as a fs.FileMode.
|
|
||||||
//
|
|
||||||
// Returns PermDefault if the value is not a non-zero number.
|
|
||||||
func (x *Config) Perm() fs.FileMode {
|
|
||||||
p := config.UintSafe(
|
|
||||||
(*config.Config)(x),
|
|
||||||
"perm",
|
|
||||||
)
|
|
||||||
|
|
||||||
if p == 0 {
|
|
||||||
p = PermDefault
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.FileMode(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShallowDepth returns the value of "depth" config parameter.
|
|
||||||
//
|
|
||||||
// Returns ShallowDepthDefault if the value is out of
|
|
||||||
// [1:fstree.MaxDepth] range.
|
|
||||||
func (x *Config) ShallowDepth() int {
|
|
||||||
d := config.IntSafe(
|
|
||||||
(*config.Config)(x),
|
|
||||||
"depth",
|
|
||||||
)
|
|
||||||
|
|
||||||
if d >= 1 && d <= fstree.MaxDepth {
|
|
||||||
return int(d)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ShallowDepthDefault
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compress returns the value of "compress" config parameter.
|
|
||||||
//
|
|
||||||
// Returns false if the value is not a valid bool.
|
|
||||||
func (x *Config) Compress() bool {
|
|
||||||
return config.BoolSafe(
|
|
||||||
(*config.Config)(x),
|
|
||||||
"compress",
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter.
|
|
||||||
//
|
|
||||||
// Returns nil if a the value is missing or is invalid.
|
|
||||||
func (x *Config) UncompressableContentTypes() []string {
|
|
||||||
return config.StringSliceSafe(
|
|
||||||
(*config.Config)(x),
|
|
||||||
"compression_exclude_content_types")
|
|
||||||
}
|
|
||||||
|
|
||||||
// SmallSizeLimit returns the value of "small_object_size" config parameter.
|
|
||||||
//
|
|
||||||
// Returns SmallSizeLimitDefault if the value is not a positive number.
|
|
||||||
func (x *Config) SmallSizeLimit() uint64 {
|
|
||||||
l := config.SizeInBytesSafe(
|
|
||||||
(*config.Config)(x),
|
|
||||||
"small_object_size",
|
|
||||||
)
|
|
||||||
|
|
||||||
if l > 0 {
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
return SmallSizeLimitDefault
|
|
||||||
}
|
|
||||||
|
|
||||||
// Blobovnicza returns "blobovnicza" subsection as a blobovniczaconfig.Config.
|
|
||||||
func (x *Config) Blobovnicza() *blobovniczaconfig.Config {
|
|
||||||
return blobovniczaconfig.From(
|
|
||||||
(*config.Config)(x).
|
|
||||||
Sub("blobovnicza"),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
40
cmd/neofs-node/config/engine/shard/blobstor/fstree/config.go
Normal file
40
cmd/neofs-node/config/engine/shard/blobstor/fstree/config.go
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
package fstree
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/nspcc-dev/neofs-node/cmd/neofs-node/config"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config is a wrapper over the config section
|
||||||
|
// which provides access to Blobovnicza configurations.
|
||||||
|
type Config config.Config
|
||||||
|
|
||||||
|
// DepthDefault is a default shallow dir depth.
|
||||||
|
const DepthDefault = 4
|
||||||
|
|
||||||
|
// From wraps config section into Config.
|
||||||
|
func From(c *config.Config) *Config {
|
||||||
|
return (*Config)(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the storage type.
|
||||||
|
func (x *Config) Type() string {
|
||||||
|
return "fstree"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Depth returns the value of "depth" config parameter.
|
||||||
|
//
|
||||||
|
// Returns DepthDefault if the value is out of
|
||||||
|
// [1:fstree.MaxDepth] range.
|
||||||
|
func (x *Config) Depth() int {
|
||||||
|
d := config.IntSafe(
|
||||||
|
(*config.Config)(x),
|
||||||
|
"depth",
|
||||||
|
)
|
||||||
|
|
||||||
|
if d >= 1 && d <= fstree.MaxDepth {
|
||||||
|
return int(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
return DepthDefault
|
||||||
|
}
|
|
@ -0,0 +1,55 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/fs"
|
||||||
|
|
||||||
|
"github.com/nspcc-dev/neofs-node/cmd/neofs-node/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config config.Config
|
||||||
|
|
||||||
|
// PermDefault are default permission bits for BlobStor data.
|
||||||
|
const PermDefault = 0660
|
||||||
|
|
||||||
|
func From(x *config.Config) *Config {
|
||||||
|
return (*Config)(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns storage type.
|
||||||
|
func (x *Config) Type() string {
|
||||||
|
return config.String(
|
||||||
|
(*config.Config)(x),
|
||||||
|
"type")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns the value of "path" config parameter.
|
||||||
|
//
|
||||||
|
// Panics if the value is not a non-empty string.
|
||||||
|
func (x *Config) Path() string {
|
||||||
|
p := config.String(
|
||||||
|
(*config.Config)(x),
|
||||||
|
"path",
|
||||||
|
)
|
||||||
|
|
||||||
|
if p == "" {
|
||||||
|
panic("blobstor path not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perm returns the value of "perm" config parameter as a fs.FileMode.
|
||||||
|
//
|
||||||
|
// Returns PermDefault if the value is not a non-zero number.
|
||||||
|
func (x *Config) Perm() fs.FileMode {
|
||||||
|
p := config.UintSafe(
|
||||||
|
(*config.Config)(x),
|
||||||
|
"perm",
|
||||||
|
)
|
||||||
|
|
||||||
|
if p == 0 {
|
||||||
|
p = PermDefault
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.FileMode(p)
|
||||||
|
}
|
|
@ -16,11 +16,49 @@ import (
|
||||||
// which provides access to Shard configurations.
|
// which provides access to Shard configurations.
|
||||||
type Config config.Config
|
type Config config.Config
|
||||||
|
|
||||||
|
// SmallSizeLimitDefault is a default limit of small objects payload in bytes.
|
||||||
|
const SmallSizeLimitDefault = 1 << 20
|
||||||
|
|
||||||
// From wraps config section into Config.
|
// From wraps config section into Config.
|
||||||
func From(c *config.Config) *Config {
|
func From(c *config.Config) *Config {
|
||||||
return (*Config)(c)
|
return (*Config)(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compress returns the value of "compress" config parameter.
|
||||||
|
//
|
||||||
|
// Returns false if the value is not a valid bool.
|
||||||
|
func (x *Config) Compress() bool {
|
||||||
|
return config.BoolSafe(
|
||||||
|
(*config.Config)(x),
|
||||||
|
"compress",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UncompressableContentTypes returns the value of "compress_skip_content_types" config parameter.
|
||||||
|
//
|
||||||
|
// Returns nil if a the value is missing or is invalid.
|
||||||
|
func (x *Config) UncompressableContentTypes() []string {
|
||||||
|
return config.StringSliceSafe(
|
||||||
|
(*config.Config)(x),
|
||||||
|
"compression_exclude_content_types")
|
||||||
|
}
|
||||||
|
|
||||||
|
// SmallSizeLimit returns the value of "small_object_size" config parameter.
|
||||||
|
//
|
||||||
|
// Returns SmallSizeLimitDefault if the value is not a positive number.
|
||||||
|
func (x *Config) SmallSizeLimit() uint64 {
|
||||||
|
l := config.SizeInBytesSafe(
|
||||||
|
(*config.Config)(x),
|
||||||
|
"small_object_size",
|
||||||
|
)
|
||||||
|
|
||||||
|
if l > 0 {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
return SmallSizeLimitDefault
|
||||||
|
}
|
||||||
|
|
||||||
// BlobStor returns "blobstor" subsection as a blobstorconfig.Config.
|
// BlobStor returns "blobstor" subsection as a blobstorconfig.Config.
|
||||||
func (x *Config) BlobStor() *blobstorconfig.Config {
|
func (x *Config) BlobStor() *blobstorconfig.Config {
|
||||||
return blobstorconfig.From(
|
return blobstorconfig.From(
|
||||||
|
|
|
@ -101,17 +101,22 @@ NEOFS_STORAGE_SHARD_0_METABASE_PERM=0644
|
||||||
NEOFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100
|
NEOFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_SIZE=100
|
||||||
NEOFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms
|
NEOFS_STORAGE_SHARD_0_METABASE_MAX_BATCH_DELAY=10ms
|
||||||
### Blobstor config
|
### Blobstor config
|
||||||
NEOFS_STORAGE_SHARD_0_BLOBSTOR_PATH=tmp/0/blob
|
NEOFS_STORAGE_SHARD_0_COMPRESS=true
|
||||||
NEOFS_STORAGE_SHARD_0_BLOBSTOR_PERM=0644
|
NEOFS_STORAGE_SHARD_0_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*"
|
||||||
NEOFS_STORAGE_SHARD_0_BLOBSTOR_COMPRESS=true
|
NEOFS_STORAGE_SHARD_0_SMALL_OBJECT_SIZE=102400
|
||||||
NEOFS_STORAGE_SHARD_0_BLOBSTOR_COMPRESSION_EXCLUDE_CONTENT_TYPES="audio/* video/*"
|
|
||||||
NEOFS_STORAGE_SHARD_0_BLOBSTOR_DEPTH=5
|
|
||||||
NEOFS_STORAGE_SHARD_0_BLOBSTOR_SMALL_OBJECT_SIZE=102400
|
|
||||||
### Blobovnicza config
|
### Blobovnicza config
|
||||||
NEOFS_STORAGE_SHARD_0_BLOBSTOR_BLOBOVNICZA_SIZE=4194304
|
NEOFS_STORAGE_SHARD_0_BLOBSTOR_0_PATH=tmp/0/blob/blobovnicza
|
||||||
NEOFS_STORAGE_SHARD_0_BLOBSTOR_BLOBOVNICZA_DEPTH=1
|
NEOFS_STORAGE_SHARD_0_BLOBSTOR_0_PERM=0644
|
||||||
NEOFS_STORAGE_SHARD_0_BLOBSTOR_BLOBOVNICZA_WIDTH=4
|
NEOFS_STORAGE_SHARD_0_BLOBSTOR_0_TYPE=blobovnicza
|
||||||
NEOFS_STORAGE_SHARD_0_BLOBSTOR_BLOBOVNICZA_OPENED_CACHE_CAPACITY=50
|
NEOFS_STORAGE_SHARD_0_BLOBSTOR_0_SIZE=4194304
|
||||||
|
NEOFS_STORAGE_SHARD_0_BLOBSTOR_0_DEPTH=1
|
||||||
|
NEOFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH=4
|
||||||
|
NEOFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50
|
||||||
|
### FSTree config
|
||||||
|
NEOFS_STORAGE_SHARD_0_BLOBSTOR_1_TYPE=fstree
|
||||||
|
NEOFS_STORAGE_SHARD_0_BLOBSTOR_1_PATH=tmp/0/blob
|
||||||
|
NEOFS_STORAGE_SHARD_0_BLOBSTOR_1_PERM=0644
|
||||||
|
NEOFS_STORAGE_SHARD_0_BLOBSTOR_1_DEPTH=5
|
||||||
### Pilorama config
|
### Pilorama config
|
||||||
NEOFS_STORAGE_SHARD_0_PILORAMA_PATH="tmp/0/blob/pilorama.db"
|
NEOFS_STORAGE_SHARD_0_PILORAMA_PATH="tmp/0/blob/pilorama.db"
|
||||||
NEOFS_STORAGE_SHARD_0_PILORAMA_MAX_BATCH_DELAY=10ms
|
NEOFS_STORAGE_SHARD_0_PILORAMA_MAX_BATCH_DELAY=10ms
|
||||||
|
@ -141,16 +146,20 @@ NEOFS_STORAGE_SHARD_1_METABASE_PERM=0644
|
||||||
NEOFS_STORAGE_SHARD_1_METABASE_MAX_BATCH_SIZE=200
|
NEOFS_STORAGE_SHARD_1_METABASE_MAX_BATCH_SIZE=200
|
||||||
NEOFS_STORAGE_SHARD_1_METABASE_MAX_BATCH_DELAY=20ms
|
NEOFS_STORAGE_SHARD_1_METABASE_MAX_BATCH_DELAY=20ms
|
||||||
### Blobstor config
|
### Blobstor config
|
||||||
NEOFS_STORAGE_SHARD_1_BLOBSTOR_PATH=tmp/1/blob
|
NEOFS_STORAGE_SHARD_1_COMPRESS=false
|
||||||
NEOFS_STORAGE_SHARD_1_BLOBSTOR_PERM=0644
|
NEOFS_STORAGE_SHARD_1_SMALL_OBJECT_SIZE=102400
|
||||||
NEOFS_STORAGE_SHARD_1_BLOBSTOR_COMPRESS=false
|
|
||||||
NEOFS_STORAGE_SHARD_1_BLOBSTOR_DEPTH=5
|
|
||||||
NEOFS_STORAGE_SHARD_1_BLOBSTOR_SMALL_OBJECT_SIZE=102400
|
|
||||||
### Blobovnicza config
|
### Blobovnicza config
|
||||||
NEOFS_STORAGE_SHARD_1_BLOBSTOR_BLOBOVNICZA_SIZE=4194304
|
NEOFS_STORAGE_SHARD_1_BLOBSTOR_0_TYPE=blobovnicza
|
||||||
NEOFS_STORAGE_SHARD_1_BLOBSTOR_BLOBOVNICZA_DEPTH=1
|
NEOFS_STORAGE_SHARD_1_BLOBSTOR_0_PATH=tmp/1/blob/blobovnicza
|
||||||
NEOFS_STORAGE_SHARD_1_BLOBSTOR_BLOBOVNICZA_WIDTH=4
|
NEOFS_STORAGE_SHARD_1_BLOBSTOR_0_SIZE=4194304
|
||||||
NEOFS_STORAGE_SHARD_1_BLOBSTOR_BLOBOVNICZA_OPENED_CACHE_CAPACITY=50
|
NEOFS_STORAGE_SHARD_1_BLOBSTOR_0_DEPTH=1
|
||||||
|
NEOFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH=4
|
||||||
|
NEOFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50
|
||||||
|
### FSTree config
|
||||||
|
NEOFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE=fstree
|
||||||
|
NEOFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH=tmp/1/blob
|
||||||
|
NEOFS_STORAGE_SHARD_1_BLOBSTOR_1_PERM=0644
|
||||||
|
NEOFS_STORAGE_SHARD_1_BLOBSTOR_1_DEPTH=5
|
||||||
### Pilorama config
|
### Pilorama config
|
||||||
NEOFS_STORAGE_SHARD_1_PILORAMA_PATH="tmp/1/blob/pilorama.db"
|
NEOFS_STORAGE_SHARD_1_PILORAMA_PATH="tmp/1/blob/pilorama.db"
|
||||||
NEOFS_STORAGE_SHARD_1_PILORAMA_PERM=0644
|
NEOFS_STORAGE_SHARD_1_PILORAMA_PERM=0644
|
||||||
|
|
|
@ -147,22 +147,28 @@
|
||||||
"max_batch_size": 100,
|
"max_batch_size": 100,
|
||||||
"max_batch_delay": "10ms"
|
"max_batch_delay": "10ms"
|
||||||
},
|
},
|
||||||
"blobstor": {
|
|
||||||
"path": "tmp/0/blob",
|
|
||||||
"perm": "0644",
|
|
||||||
"compress": true,
|
"compress": true,
|
||||||
"compression_exclude_content_types": [
|
"compression_exclude_content_types": [
|
||||||
"audio/*", "video/*"
|
"audio/*", "video/*"
|
||||||
],
|
],
|
||||||
"depth": 5,
|
|
||||||
"small_object_size": 102400,
|
"small_object_size": 102400,
|
||||||
"blobovnicza": {
|
"blobstor": [
|
||||||
|
{
|
||||||
|
"type": "blobovnicza",
|
||||||
|
"path": "tmp/0/blob/blobovnicza",
|
||||||
|
"perm": "0644",
|
||||||
"size": 4194304,
|
"size": 4194304,
|
||||||
"depth": 1,
|
"depth": 1,
|
||||||
"width": 4,
|
"width": 4,
|
||||||
"opened_cache_capacity": 50
|
"opened_cache_capacity": 50
|
||||||
}
|
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"type": "fstree",
|
||||||
|
"path": "tmp/0/blob",
|
||||||
|
"perm": "0644",
|
||||||
|
"depth": 5
|
||||||
|
}
|
||||||
|
],
|
||||||
"pilorama": {
|
"pilorama": {
|
||||||
"path": "tmp/0/blob/pilorama.db",
|
"path": "tmp/0/blob/pilorama.db",
|
||||||
"max_batch_delay": "10ms",
|
"max_batch_delay": "10ms",
|
||||||
|
@ -191,19 +197,25 @@
|
||||||
"max_batch_size": 200,
|
"max_batch_size": 200,
|
||||||
"max_batch_delay": "20ms"
|
"max_batch_delay": "20ms"
|
||||||
},
|
},
|
||||||
"blobstor": {
|
|
||||||
"path": "tmp/1/blob",
|
|
||||||
"perm": "0644",
|
|
||||||
"compress": false,
|
"compress": false,
|
||||||
"depth": 5,
|
|
||||||
"small_object_size": 102400,
|
"small_object_size": 102400,
|
||||||
"blobovnicza": {
|
"blobstor": [
|
||||||
|
{
|
||||||
|
"type": "blobovnicza",
|
||||||
|
"path": "tmp/1/blob/blobovnicza",
|
||||||
|
"perm": "0644",
|
||||||
"size": 4194304,
|
"size": 4194304,
|
||||||
"depth": 1,
|
"depth": 1,
|
||||||
"width": 4,
|
"width": 4,
|
||||||
"opened_cache_capacity": 50
|
"opened_cache_capacity": 50
|
||||||
}
|
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"type": "fstree",
|
||||||
|
"path": "tmp/1/blob",
|
||||||
|
"perm": "0644",
|
||||||
|
"depth": 5
|
||||||
|
}
|
||||||
|
],
|
||||||
"pilorama": {
|
"pilorama": {
|
||||||
"path": "tmp/1/blob/pilorama.db",
|
"path": "tmp/1/blob/pilorama.db",
|
||||||
"perm": "0644",
|
"perm": "0644",
|
||||||
|
|
|
@ -126,17 +126,17 @@ storage:
|
||||||
max_batch_delay: 5ms # maximum delay for a batch of operations to be executed
|
max_batch_delay: 5ms # maximum delay for a batch of operations to be executed
|
||||||
max_batch_size: 100 # maximum amount of operations in a single batch
|
max_batch_size: 100 # maximum amount of operations in a single batch
|
||||||
|
|
||||||
blobstor:
|
|
||||||
compress: false # turn on/off zstd(level 3) compression of stored objects
|
compress: false # turn on/off zstd(level 3) compression of stored objects
|
||||||
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
|
|
||||||
depth: 5 # max depth of object tree storage in FS
|
|
||||||
small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes
|
small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes
|
||||||
|
|
||||||
blobovnicza:
|
blobstor:
|
||||||
size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
|
- size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes
|
||||||
|
perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
|
||||||
depth: 1 # max depth of object tree storage in key-value DB
|
depth: 1 # max depth of object tree storage in key-value DB
|
||||||
width: 4 # max width of object tree storage in key-value DB
|
width: 4 # max width of object tree storage in key-value DB
|
||||||
opened_cache_capacity: 50 # maximum number of opened database files
|
opened_cache_capacity: 50 # maximum number of opened database files
|
||||||
|
- perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
|
||||||
|
depth: 5 # max depth of object tree storage in FS
|
||||||
|
|
||||||
gc:
|
gc:
|
||||||
remover_batch_size: 200 # number of objects to be removed by the garbage collector
|
remover_batch_size: 200 # number of objects to be removed by the garbage collector
|
||||||
|
@ -156,13 +156,17 @@ storage:
|
||||||
max_batch_size: 100
|
max_batch_size: 100
|
||||||
max_batch_delay: 10ms
|
max_batch_delay: 10ms
|
||||||
|
|
||||||
blobstor:
|
|
||||||
path: tmp/0/blob # blobstor path
|
|
||||||
compress: true # turn on/off zstd(level 3) compression of stored objects
|
compress: true # turn on/off zstd(level 3) compression of stored objects
|
||||||
compression_exclude_content_types:
|
compression_exclude_content_types:
|
||||||
- audio/*
|
- audio/*
|
||||||
- video/*
|
- video/*
|
||||||
|
|
||||||
|
blobstor:
|
||||||
|
- type: blobovnicza
|
||||||
|
path: tmp/0/blob/blobovnicza
|
||||||
|
- type: fstree
|
||||||
|
path: tmp/0/blob # blobstor path
|
||||||
|
|
||||||
pilorama:
|
pilorama:
|
||||||
path: tmp/0/blob/pilorama.db # path to the pilorama database. If omitted, `pilorama.db` file is created blobstor.path
|
path: tmp/0/blob/pilorama.db # path to the pilorama database. If omitted, `pilorama.db` file is created blobstor.path
|
||||||
max_batch_delay: 10ms
|
max_batch_delay: 10ms
|
||||||
|
@ -181,9 +185,11 @@ storage:
|
||||||
path: tmp/1/meta # metabase path
|
path: tmp/1/meta # metabase path
|
||||||
|
|
||||||
blobstor:
|
blobstor:
|
||||||
|
- type: blobovnicza
|
||||||
|
path: tmp/1/blob/blobovnicza
|
||||||
|
- type: fstree
|
||||||
path: tmp/1/blob # blobstor path
|
path: tmp/1/blob # blobstor path
|
||||||
|
|
||||||
|
|
||||||
pilorama:
|
pilorama:
|
||||||
path: tmp/1/blob/pilorama.db
|
path: tmp/1/blob/pilorama.db
|
||||||
no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted.
|
no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted.
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/nspcc-dev/hrw"
|
"github.com/nspcc-dev/hrw"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobovnicza"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobovnicza"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/common"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/common"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/compression"
|
||||||
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
||||||
objectSDK "github.com/nspcc-dev/neofs-sdk-go/object"
|
objectSDK "github.com/nspcc-dev/neofs-sdk-go/object"
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||||
|
@ -76,8 +77,6 @@ type Blobovniczas struct {
|
||||||
// list of active (opened, non-filled) Blobovniczas
|
// list of active (opened, non-filled) Blobovniczas
|
||||||
activeMtx sync.RWMutex
|
activeMtx sync.RWMutex
|
||||||
active map[string]blobovniczaWithIndex
|
active map[string]blobovniczaWithIndex
|
||||||
|
|
||||||
onClose []func()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type blobovniczaWithIndex struct {
|
type blobovniczaWithIndex struct {
|
||||||
|
@ -887,3 +886,8 @@ func u64FromHexString(str string) uint64 {
|
||||||
func (b *Blobovniczas) Type() string {
|
func (b *Blobovniczas) Type() string {
|
||||||
return "blobovniczas"
|
return "blobovniczas"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetCompressor implements common.Storage.
|
||||||
|
func (b *Blobovniczas) SetCompressor(cc *compression.CConfig) {
|
||||||
|
b.CConfig = cc
|
||||||
|
}
|
||||||
|
|
|
@ -19,16 +19,6 @@ func (b *Blobovniczas) Open(readOnly bool) error {
|
||||||
func (b *Blobovniczas) Init() error {
|
func (b *Blobovniczas) Init() error {
|
||||||
b.log.Debug("initializing Blobovnicza's")
|
b.log.Debug("initializing Blobovnicza's")
|
||||||
|
|
||||||
err := b.CConfig.Init()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b.onClose = append(b.onClose, func() {
|
|
||||||
if err := b.CConfig.Close(); err != nil {
|
|
||||||
b.log.Debug("can't close zstd compressor", zap.String("err", err.Error()))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
if b.readOnly {
|
if b.readOnly {
|
||||||
b.log.Debug("read-only mode, skip blobovniczas initialization...")
|
b.log.Debug("read-only mode, skip blobovniczas initialization...")
|
||||||
return nil
|
return nil
|
||||||
|
@ -78,9 +68,5 @@ func (b *Blobovniczas) Close() error {
|
||||||
|
|
||||||
b.activeMtx.Unlock()
|
b.activeMtx.Unlock()
|
||||||
|
|
||||||
for i := range b.onClose {
|
|
||||||
b.onClose[i]()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,6 @@ func initConfig(c *cfg) {
|
||||||
openedCacheSize: defaultOpenedCacheSize,
|
openedCacheSize: defaultOpenedCacheSize,
|
||||||
blzShallowDepth: defaultBlzShallowDepth,
|
blzShallowDepth: defaultBlzShallowDepth,
|
||||||
blzShallowWidth: defaultBlzShallowWidth,
|
blzShallowWidth: defaultBlzShallowWidth,
|
||||||
CConfig: new(compression.CConfig),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,12 +52,6 @@ func WithPermissions(perm fs.FileMode) Option {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func WithCompressionConfig(cc *compression.CConfig) Option {
|
|
||||||
return func(c *cfg) {
|
|
||||||
c.CConfig = cc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func WithBlobovniczaShallowWidth(width uint64) Option {
|
func WithBlobovniczaShallowWidth(width uint64) Option {
|
||||||
return func(c *cfg) {
|
return func(c *cfg) {
|
||||||
c.blzShallowWidth = width
|
c.blzShallowWidth = width
|
||||||
|
|
|
@ -1,12 +1,8 @@
|
||||||
package blobstor
|
package blobstor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
|
||||||
"io/fs"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/common"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/common"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/compression"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/compression"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
||||||
|
@ -28,8 +24,6 @@ type BlobStor struct {
|
||||||
|
|
||||||
modeMtx sync.RWMutex
|
modeMtx sync.RWMutex
|
||||||
mode mode.Mode
|
mode mode.Mode
|
||||||
|
|
||||||
storage [2]SubStorage
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Info = fstree.Info
|
type Info = fstree.Info
|
||||||
|
@ -39,37 +33,12 @@ type Option func(*cfg)
|
||||||
|
|
||||||
type cfg struct {
|
type cfg struct {
|
||||||
compression.CConfig
|
compression.CConfig
|
||||||
|
|
||||||
fsTreeDepth int
|
|
||||||
fsTreeInfo fstree.Info
|
|
||||||
|
|
||||||
smallSizeLimit uint64
|
|
||||||
|
|
||||||
log *logger.Logger
|
log *logger.Logger
|
||||||
|
storage []SubStorage
|
||||||
blzOpts []blobovniczatree.Option
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
defaultShallowDepth = 4
|
|
||||||
defaultPerm = 0700
|
|
||||||
|
|
||||||
defaultSmallSizeLimit = 1 << 20 // 1MB
|
|
||||||
)
|
|
||||||
|
|
||||||
const blobovniczaDir = "blobovnicza"
|
|
||||||
|
|
||||||
func initConfig(c *cfg) {
|
func initConfig(c *cfg) {
|
||||||
*c = cfg{
|
c.log = zap.L()
|
||||||
fsTreeDepth: defaultShallowDepth,
|
|
||||||
fsTreeInfo: Info{
|
|
||||||
Permissions: defaultPerm,
|
|
||||||
RootPath: "./",
|
|
||||||
},
|
|
||||||
smallSizeLimit: defaultSmallSizeLimit,
|
|
||||||
log: zap.L(),
|
|
||||||
}
|
|
||||||
c.blzOpts = []blobovniczatree.Option{blobovniczatree.WithCompressionConfig(&c.CConfig)}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates, initializes and returns new BlobStor instance.
|
// New creates, initializes and returns new BlobStor instance.
|
||||||
|
@ -81,23 +50,10 @@ func New(opts ...Option) *BlobStor {
|
||||||
opts[i](&bs.cfg)
|
opts[i](&bs.cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
bs.storage[0].Storage = blobovniczatree.NewBlobovniczaTree(bs.blzOpts...)
|
for i := range bs.storage {
|
||||||
bs.storage[0].Policy = func(_ *objectSDK.Object, data []byte) bool {
|
bs.storage[i].Storage.SetCompressor(&bs.CConfig)
|
||||||
return uint64(len(data)) <= bs.cfg.smallSizeLimit
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bs.storage[1].Storage = &fstree.FSTree{
|
|
||||||
Info: bs.cfg.fsTreeInfo,
|
|
||||||
Depth: bs.cfg.fsTreeDepth,
|
|
||||||
DirNameLen: hex.EncodedLen(fstree.DirNameLen),
|
|
||||||
CConfig: &bs.cfg.CConfig,
|
|
||||||
}
|
|
||||||
bs.storage[1].Policy = func(*objectSDK.Object, []byte) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
bs.blzOpts = nil
|
|
||||||
|
|
||||||
return bs
|
return bs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,17 +62,17 @@ func (b *BlobStor) SetLogger(l *zap.Logger) {
|
||||||
b.log = l
|
b.log = l
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithShallowDepth returns option to set the
|
// WithStorages provides sub-blobstors.
|
||||||
// depth of the object file subdirectory tree.
|
func WithStorages(st []SubStorage) Option {
|
||||||
//
|
|
||||||
// Depth is reduced to maximum value in case of overflow.
|
|
||||||
func WithShallowDepth(depth int) Option {
|
|
||||||
return func(c *cfg) {
|
return func(c *cfg) {
|
||||||
if depth > fstree.MaxDepth {
|
c.storage = st
|
||||||
depth = fstree.MaxDepth
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
c.fsTreeDepth = depth
|
// WithLogger returns option to specify BlobStor's logger.
|
||||||
|
func WithLogger(l *logger.Logger) Option {
|
||||||
|
return func(c *cfg) {
|
||||||
|
c.log = l.With(zap.String("component", "BlobStor"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,70 +97,3 @@ func WithUncompressableContentTypes(values []string) Option {
|
||||||
c.UncompressableContentTypes = values
|
c.UncompressableContentTypes = values
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithRootPath returns option to set path to root directory
|
|
||||||
// of the fs tree to write the objects.
|
|
||||||
func WithRootPath(rootDir string) Option {
|
|
||||||
return func(c *cfg) {
|
|
||||||
c.fsTreeInfo.RootPath = rootDir
|
|
||||||
c.blzOpts = append(c.blzOpts, blobovniczatree.WithRootPath(filepath.Join(rootDir, blobovniczaDir)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithRootPerm returns option to set permission
|
|
||||||
// bits of the fs tree.
|
|
||||||
func WithRootPerm(perm fs.FileMode) Option {
|
|
||||||
return func(c *cfg) {
|
|
||||||
c.fsTreeInfo.Permissions = perm
|
|
||||||
c.blzOpts = append(c.blzOpts, blobovniczatree.WithPermissions(perm))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithSmallSizeLimit returns option to set maximum size of
|
|
||||||
// "small" object.
|
|
||||||
func WithSmallSizeLimit(lim uint64) Option {
|
|
||||||
return func(c *cfg) {
|
|
||||||
c.smallSizeLimit = lim
|
|
||||||
c.blzOpts = append(c.blzOpts, blobovniczatree.WithObjectSizeLimit(lim))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithLogger returns option to specify BlobStor's logger.
|
|
||||||
func WithLogger(l *logger.Logger) Option {
|
|
||||||
return func(c *cfg) {
|
|
||||||
c.log = l.With(zap.String("component", "BlobStor"))
|
|
||||||
c.blzOpts = append(c.blzOpts, blobovniczatree.WithLogger(c.log))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithBlobovniczaShallowDepth returns option to specify
|
|
||||||
// depth of blobovnicza directories.
|
|
||||||
func WithBlobovniczaShallowDepth(d uint64) Option {
|
|
||||||
return func(c *cfg) {
|
|
||||||
c.blzOpts = append(c.blzOpts, blobovniczatree.WithBlobovniczaShallowDepth(d))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithBlobovniczaShallowWidth returns option to specify
|
|
||||||
// width of blobovnicza directories.
|
|
||||||
func WithBlobovniczaShallowWidth(w uint64) Option {
|
|
||||||
return func(c *cfg) {
|
|
||||||
c.blzOpts = append(c.blzOpts, blobovniczatree.WithBlobovniczaShallowWidth(w))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithBlobovniczaOpenedCacheSize return option to specify
|
|
||||||
// maximum number of opened non-active blobovnicza's.
|
|
||||||
func WithBlobovniczaOpenedCacheSize(sz int) Option {
|
|
||||||
return func(c *cfg) {
|
|
||||||
c.blzOpts = append(c.blzOpts, blobovniczatree.WithOpenedCacheSize(sz))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithBlobovniczaSize returns option to specify maximum volume
|
|
||||||
// of each blobovnicza.
|
|
||||||
func WithBlobovniczaSize(sz uint64) Option {
|
|
||||||
return func(c *cfg) {
|
|
||||||
c.blzOpts = append(c.blzOpts, blobovniczatree.WithBlobovniczaSize(sz))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -2,14 +2,35 @@ package blobstor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/core/object"
|
"github.com/nspcc-dev/neofs-node/pkg/core/object"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/common"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/common"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
||||||
objectSDK "github.com/nspcc-dev/neofs-sdk-go/object"
|
objectSDK "github.com/nspcc-dev/neofs-sdk-go/object"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const blobovniczaDir = "blobovniczas"
|
||||||
|
|
||||||
|
func defaultStorages(p string, smallSizeLimit uint64) []SubStorage {
|
||||||
|
return []SubStorage{
|
||||||
|
{
|
||||||
|
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||||
|
blobovniczatree.WithRootPath(filepath.Join(p, "blobovniczas")),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowWidth(1)), // default width is 16, slow init
|
||||||
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||||
|
return uint64(len(data)) <= smallSizeLimit
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Storage: fstree.New(fstree.WithPath(p)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestCompression(t *testing.T) {
|
func TestCompression(t *testing.T) {
|
||||||
dir, err := os.MkdirTemp("", "neofs*")
|
dir, err := os.MkdirTemp("", "neofs*")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -21,10 +42,9 @@ func TestCompression(t *testing.T) {
|
||||||
)
|
)
|
||||||
|
|
||||||
newBlobStor := func(t *testing.T, compress bool) *BlobStor {
|
newBlobStor := func(t *testing.T, compress bool) *BlobStor {
|
||||||
bs := New(WithCompressObjects(compress),
|
bs := New(
|
||||||
WithRootPath(dir),
|
WithCompressObjects(compress),
|
||||||
WithSmallSizeLimit(smallSizeLimit),
|
WithStorages(defaultStorages(dir, smallSizeLimit)))
|
||||||
WithBlobovniczaShallowWidth(1)) // default width is 16, slow init
|
|
||||||
require.NoError(t, bs.Open(false))
|
require.NoError(t, bs.Open(false))
|
||||||
require.NoError(t, bs.Init())
|
require.NoError(t, bs.Init())
|
||||||
return bs
|
return bs
|
||||||
|
@ -86,11 +106,22 @@ func TestBlobstor_needsCompression(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
t.Cleanup(func() { _ = os.RemoveAll(dir) })
|
t.Cleanup(func() { _ = os.RemoveAll(dir) })
|
||||||
|
|
||||||
bs := New(WithCompressObjects(compress),
|
bs := New(
|
||||||
WithRootPath(dir),
|
WithCompressObjects(compress),
|
||||||
WithSmallSizeLimit(smallSizeLimit),
|
WithUncompressableContentTypes(ct),
|
||||||
WithBlobovniczaShallowWidth(1),
|
WithStorages([]SubStorage{
|
||||||
WithUncompressableContentTypes(ct))
|
{
|
||||||
|
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||||
|
blobovniczatree.WithRootPath(filepath.Join(dir, "blobovnicza")),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowWidth(1)), // default width is 16, slow init
|
||||||
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||||
|
return uint64(len(data)) < smallSizeLimit
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Storage: fstree.New(fstree.WithPath(dir)),
|
||||||
|
},
|
||||||
|
}))
|
||||||
require.NoError(t, bs.Open(false))
|
require.NoError(t, bs.Open(false))
|
||||||
require.NoError(t, bs.Init())
|
require.NoError(t, bs.Init())
|
||||||
return bs
|
return bs
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
package common
|
package common
|
||||||
|
|
||||||
|
import "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/compression"
|
||||||
|
|
||||||
// Storage represents key-value object storage.
|
// Storage represents key-value object storage.
|
||||||
// It is used as a building block for a blobstor of a shard.
|
// It is used as a building block for a blobstor of a shard.
|
||||||
type Storage interface {
|
type Storage interface {
|
||||||
|
@ -8,6 +10,7 @@ type Storage interface {
|
||||||
Close() error
|
Close() error
|
||||||
|
|
||||||
Type() string
|
Type() string
|
||||||
|
SetCompressor(cc *compression.CConfig)
|
||||||
|
|
||||||
Get(GetPrm) (GetRes, error)
|
Get(GetPrm) (GetRes, error)
|
||||||
GetRange(GetRangePrm) (GetRangeRes, error)
|
GetRange(GetRangePrm) (GetRangeRes, error)
|
||||||
|
|
|
@ -31,6 +31,10 @@ var ErrInitBlobovniczas = errors.New("failure on blobovnicza initialization stag
|
||||||
func (b *BlobStor) Init() error {
|
func (b *BlobStor) Init() error {
|
||||||
b.log.Debug("initializing...")
|
b.log.Debug("initializing...")
|
||||||
|
|
||||||
|
if err := b.CConfig.Init(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
for i := range b.storage {
|
for i := range b.storage {
|
||||||
err := b.storage[i].Storage.Init()
|
err := b.storage[i].Storage.Init()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -55,5 +59,10 @@ func (b *BlobStor) Close() error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err := b.CConfig.Close()
|
||||||
|
if firstErr == nil {
|
||||||
|
firstErr = err
|
||||||
|
}
|
||||||
return firstErr
|
return firstErr
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ func (b *BlobStor) Delete(prm common.DeletePrm) (common.DeleteRes, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(prm.StorageID) == 0 {
|
if len(prm.StorageID) == 0 {
|
||||||
return b.storage[1].Storage.Delete(prm)
|
return b.storage[len(b.storage)-1].Storage.Delete(prm)
|
||||||
}
|
}
|
||||||
return b.storage[0].Storage.Delete(prm)
|
return b.storage[0].Storage.Delete(prm)
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,9 +20,8 @@ func TestExists(t *testing.T) {
|
||||||
|
|
||||||
const smallSizeLimit = 512
|
const smallSizeLimit = 512
|
||||||
|
|
||||||
b := New(WithRootPath(dir),
|
b := New(
|
||||||
WithSmallSizeLimit(smallSizeLimit),
|
WithStorages(defaultStorages(dir, smallSizeLimit)))
|
||||||
WithBlobovniczaShallowWidth(1)) // default width is 16, slow init
|
|
||||||
require.NoError(t, b.Open(false))
|
require.NoError(t, b.Open(false))
|
||||||
require.NoError(t, b.Init())
|
require.NoError(t, b.Init())
|
||||||
|
|
||||||
|
@ -65,7 +64,7 @@ func TestExists(t *testing.T) {
|
||||||
require.NotEmpty(t, bigDir)
|
require.NotEmpty(t, bigDir)
|
||||||
|
|
||||||
require.NoError(t, os.Chmod(dir, 0))
|
require.NoError(t, os.Chmod(dir, 0))
|
||||||
t.Cleanup(func() { require.NoError(t, os.Chmod(dir, b.fsTreeInfo.Permissions)) })
|
t.Cleanup(func() { require.NoError(t, os.Chmod(dir, 0777)) })
|
||||||
|
|
||||||
// Object exists, first error is logged.
|
// Object exists, first error is logged.
|
||||||
prm.Address = objectCore.AddressOf(objects[0])
|
prm.Address = objectCore.AddressOf(objects[0])
|
||||||
|
|
|
@ -1,10 +1,16 @@
|
||||||
package fstree
|
package fstree
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/util"
|
||||||
|
)
|
||||||
|
|
||||||
// Open implements common.Storage.
|
// Open implements common.Storage.
|
||||||
func (*FSTree) Open(bool) error { return nil }
|
func (*FSTree) Open(bool) error { return nil }
|
||||||
|
|
||||||
// Init implements common.Storage.
|
// Init implements common.Storage.
|
||||||
func (*FSTree) Init() error { return nil }
|
func (t *FSTree) Init() error {
|
||||||
|
return util.MkdirAllX(t.RootPath, t.Permissions)
|
||||||
|
}
|
||||||
|
|
||||||
// Close implements common.Storage.
|
// Close implements common.Storage.
|
||||||
func (*FSTree) Close() error { return nil }
|
func (*FSTree) Close() error { return nil }
|
||||||
|
|
|
@ -45,6 +45,23 @@ const (
|
||||||
|
|
||||||
var _ common.Storage = (*FSTree)(nil)
|
var _ common.Storage = (*FSTree)(nil)
|
||||||
|
|
||||||
|
func New(opts ...Option) *FSTree {
|
||||||
|
f := &FSTree{
|
||||||
|
Info: Info{
|
||||||
|
Permissions: 0700,
|
||||||
|
RootPath: "./",
|
||||||
|
},
|
||||||
|
CConfig: nil,
|
||||||
|
Depth: 4,
|
||||||
|
DirNameLen: DirNameLen,
|
||||||
|
}
|
||||||
|
for i := range opts {
|
||||||
|
opts[i](f)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
func stringifyAddress(addr oid.Address) string {
|
func stringifyAddress(addr oid.Address) string {
|
||||||
return addr.Object().EncodeToString() + "." + addr.Container().EncodeToString()
|
return addr.Object().EncodeToString() + "." + addr.Container().EncodeToString()
|
||||||
}
|
}
|
||||||
|
@ -299,3 +316,8 @@ func (t *FSTree) NumberOfObjects() (uint64, error) {
|
||||||
func (*FSTree) Type() string {
|
func (*FSTree) Type() string {
|
||||||
return "fstree"
|
return "fstree"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetCompressor implements common.Storage.
|
||||||
|
func (t *FSTree) SetCompressor(cc *compression.CConfig) {
|
||||||
|
t.CConfig = cc
|
||||||
|
}
|
||||||
|
|
31
pkg/local_object_storage/blobstor/fstree/option.go
Normal file
31
pkg/local_object_storage/blobstor/fstree/option.go
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
package fstree
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Option func(*FSTree)
|
||||||
|
|
||||||
|
func WithDepth(d int) Option {
|
||||||
|
return func(f *FSTree) {
|
||||||
|
f.Depth = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithDirNameLen(l int) Option {
|
||||||
|
return func(f *FSTree) {
|
||||||
|
f.DirNameLen = l
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithPerm(p fs.FileMode) Option {
|
||||||
|
return func(f *FSTree) {
|
||||||
|
f.Permissions = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithPath(p string) Option {
|
||||||
|
return func(f *FSTree) {
|
||||||
|
f.RootPath = p
|
||||||
|
}
|
||||||
|
}
|
|
@ -23,7 +23,7 @@ func (b *BlobStor) Get(prm common.GetPrm) (common.GetRes, error) {
|
||||||
return common.GetRes{}, errNotFound
|
return common.GetRes{}, errNotFound
|
||||||
}
|
}
|
||||||
if len(prm.StorageID) == 0 {
|
if len(prm.StorageID) == 0 {
|
||||||
return b.storage[1].Storage.Get(prm)
|
return b.storage[len(b.storage)-1].Storage.Get(prm)
|
||||||
}
|
}
|
||||||
return b.storage[0].Storage.Get(prm)
|
return b.storage[0].Storage.Get(prm)
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ func (b *BlobStor) GetRange(prm common.GetRangePrm) (common.GetRangeRes, error)
|
||||||
return common.GetRangeRes{}, errNotFound
|
return common.GetRangeRes{}, errNotFound
|
||||||
}
|
}
|
||||||
if len(prm.StorageID) == 0 {
|
if len(prm.StorageID) == 0 {
|
||||||
return b.storage[1].Storage.GetRange(prm)
|
return b.storage[len(b.storage)-1].Storage.GetRange(prm)
|
||||||
}
|
}
|
||||||
return b.storage[0].Storage.GetRange(prm)
|
return b.storage[0].Storage.GetRange(prm)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,5 +4,10 @@ import "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree
|
||||||
|
|
||||||
// DumpInfo returns information about blob stor.
|
// DumpInfo returns information about blob stor.
|
||||||
func (b *BlobStor) DumpInfo() fstree.Info {
|
func (b *BlobStor) DumpInfo() fstree.Info {
|
||||||
return b.cfg.fsTreeInfo
|
for i := range b.storage {
|
||||||
|
if b.storage[i].Storage.Type() == "fstree" {
|
||||||
|
return b.storage[i].Storage.(*fstree.FSTree).Info
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fstree.Info{}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ func (b *BlobStor) Iterate(prm common.IteratePrm) (common.IterateRes, error) {
|
||||||
for i := range b.storage {
|
for i := range b.storage {
|
||||||
_, err := b.storage[i].Storage.Iterate(prm)
|
_, err := b.storage[i].Storage.Iterate(prm)
|
||||||
if err != nil && !prm.IgnoreErrors {
|
if err != nil && !prm.IgnoreErrors {
|
||||||
return common.IterateRes{}, fmt.Errorf("blobovnizas iterator failure: %w", err)
|
return common.IterateRes{}, fmt.Errorf("blobstor iterator failure: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return common.IterateRes{}, nil
|
return common.IterateRes{}, nil
|
||||||
|
|
|
@ -18,11 +18,8 @@ func TestIterateObjects(t *testing.T) {
|
||||||
|
|
||||||
// create BlobStor instance
|
// create BlobStor instance
|
||||||
blobStor := New(
|
blobStor := New(
|
||||||
|
WithStorages(defaultStorages(p, smalSz)),
|
||||||
WithCompressObjects(true),
|
WithCompressObjects(true),
|
||||||
WithRootPath(p),
|
|
||||||
WithSmallSizeLimit(smalSz),
|
|
||||||
WithBlobovniczaShallowWidth(1),
|
|
||||||
WithBlobovniczaShallowDepth(1),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
defer os.RemoveAll(p)
|
defer os.RemoveAll(p)
|
||||||
|
|
|
@ -33,7 +33,7 @@ func (b *BlobStor) Put(prm common.PutPrm) (common.PutRes, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range b.storage {
|
for i := range b.storage {
|
||||||
if b.storage[i].Policy(prm.Object, prm.RawData) {
|
if b.storage[i].Policy == nil || b.storage[i].Policy(prm.Object, prm.RawData) {
|
||||||
res, err := b.storage[i].Storage.Put(prm)
|
res, err := b.storage[i].Storage.Put(prm)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
storagelog.Write(b.log,
|
storagelog.Write(b.log,
|
||||||
|
|
|
@ -8,6 +8,8 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
||||||
meta "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/metabase"
|
meta "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/metabase"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/pilorama"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/pilorama"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
|
||||||
|
@ -94,6 +96,26 @@ func testNewEngineWithShards(shards ...*shard.Shard) *StorageEngine {
|
||||||
return engine
|
return engine
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newStorages(root string, smallSize uint64) []blobstor.SubStorage {
|
||||||
|
return []blobstor.SubStorage{
|
||||||
|
{
|
||||||
|
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||||
|
blobovniczatree.WithRootPath(filepath.Join(root, "blobovnicza")),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowDepth(1),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowWidth(1),
|
||||||
|
blobovniczatree.WithPermissions(0700)),
|
||||||
|
Policy: func(_ *object.Object, data []byte) bool {
|
||||||
|
return uint64(len(data)) < smallSize
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Storage: fstree.New(
|
||||||
|
fstree.WithPath(root),
|
||||||
|
fstree.WithDepth(1)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testNewShard(t testing.TB, id int) *shard.Shard {
|
func testNewShard(t testing.TB, id int) *shard.Shard {
|
||||||
sid, err := generateShardID()
|
sid, err := generateShardID()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -102,11 +124,9 @@ func testNewShard(t testing.TB, id int) *shard.Shard {
|
||||||
shard.WithID(sid),
|
shard.WithID(sid),
|
||||||
shard.WithLogger(zap.L()),
|
shard.WithLogger(zap.L()),
|
||||||
shard.WithBlobStorOptions(
|
shard.WithBlobStorOptions(
|
||||||
blobstor.WithRootPath(filepath.Join(t.Name(), fmt.Sprintf("%d.blobstor", id))),
|
blobstor.WithStorages(
|
||||||
blobstor.WithBlobovniczaShallowWidth(2),
|
newStorages(filepath.Join(t.Name(), fmt.Sprintf("%d.blobstor", id)),
|
||||||
blobstor.WithBlobovniczaShallowDepth(2),
|
1<<20))),
|
||||||
blobstor.WithRootPerm(0700),
|
|
||||||
),
|
|
||||||
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.Name(), fmt.Sprintf("%d.pilorama", id)))),
|
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.Name(), fmt.Sprintf("%d.pilorama", id)))),
|
||||||
shard.WithMetaBaseOptions(
|
shard.WithMetaBaseOptions(
|
||||||
meta.WithPath(filepath.Join(t.Name(), fmt.Sprintf("%d.metabase", id))),
|
meta.WithPath(filepath.Join(t.Name(), fmt.Sprintf("%d.metabase", id))),
|
||||||
|
@ -125,10 +145,9 @@ func testEngineFromShardOpts(t *testing.T, num int, extraOpts func(int) []shard.
|
||||||
for i := 0; i < num; i++ {
|
for i := 0; i < num; i++ {
|
||||||
_, err := engine.AddShard(append([]shard.Option{
|
_, err := engine.AddShard(append([]shard.Option{
|
||||||
shard.WithBlobStorOptions(
|
shard.WithBlobStorOptions(
|
||||||
blobstor.WithRootPath(filepath.Join(t.Name(), fmt.Sprintf("blobstor%d", i))),
|
blobstor.WithStorages(
|
||||||
blobstor.WithBlobovniczaShallowWidth(1),
|
newStorages(filepath.Join(t.Name(), fmt.Sprintf("blobstor%d", i)),
|
||||||
blobstor.WithBlobovniczaShallowDepth(1),
|
1<<20)),
|
||||||
blobstor.WithRootPerm(0700),
|
|
||||||
),
|
),
|
||||||
shard.WithMetaBaseOptions(
|
shard.WithMetaBaseOptions(
|
||||||
meta.WithPath(filepath.Join(t.Name(), fmt.Sprintf("metabase%d", i))),
|
meta.WithPath(filepath.Join(t.Name(), fmt.Sprintf("metabase%d", i))),
|
||||||
|
|
|
@ -43,12 +43,7 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
|
||||||
ids[i], err = e.AddShard(
|
ids[i], err = e.AddShard(
|
||||||
shard.WithLogger(zaptest.NewLogger(t)),
|
shard.WithLogger(zaptest.NewLogger(t)),
|
||||||
shard.WithBlobStorOptions(
|
shard.WithBlobStorOptions(
|
||||||
blobstor.WithRootPath(filepath.Join(dir, strconv.Itoa(i))),
|
blobstor.WithStorages(newStorages(filepath.Join(dir, strconv.Itoa(i)), errSmallSize))),
|
||||||
blobstor.WithShallowDepth(1),
|
|
||||||
blobstor.WithBlobovniczaShallowWidth(1),
|
|
||||||
blobstor.WithBlobovniczaShallowDepth(1),
|
|
||||||
blobstor.WithSmallSizeLimit(errSmallSize),
|
|
||||||
blobstor.WithRootPerm(0700)),
|
|
||||||
shard.WithMetaBaseOptions(
|
shard.WithMetaBaseOptions(
|
||||||
meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", i))),
|
meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", i))),
|
||||||
meta.WithPermissions(0700),
|
meta.WithPermissions(0700),
|
||||||
|
|
|
@ -39,11 +39,14 @@ func TestShardOpen(t *testing.T) {
|
||||||
return New(
|
return New(
|
||||||
WithLogger(zaptest.NewLogger(t)),
|
WithLogger(zaptest.NewLogger(t)),
|
||||||
WithBlobStorOptions(
|
WithBlobStorOptions(
|
||||||
blobstor.WithRootPath(filepath.Join(dir, "blob")),
|
blobstor.WithStorages([]blobstor.SubStorage{
|
||||||
blobstor.WithShallowDepth(1),
|
{
|
||||||
blobstor.WithSmallSizeLimit(1),
|
Storage: fstree.New(
|
||||||
blobstor.WithBlobovniczaShallowWidth(1),
|
fstree.WithDirNameLen(2),
|
||||||
blobstor.WithBlobovniczaShallowDepth(1)),
|
fstree.WithPath(filepath.Join(dir, "blob")),
|
||||||
|
fstree.WithDepth(1)),
|
||||||
|
},
|
||||||
|
})),
|
||||||
WithMetaBaseOptions(meta.WithPath(metaPath), meta.WithEpochState(epochState{})),
|
WithMetaBaseOptions(meta.WithPath(metaPath), meta.WithEpochState(epochState{})),
|
||||||
WithPiloramaOptions(
|
WithPiloramaOptions(
|
||||||
pilorama.WithPath(filepath.Join(dir, "pilorama"))),
|
pilorama.WithPath(filepath.Join(dir, "pilorama"))),
|
||||||
|
@ -81,11 +84,15 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
|
|
||||||
blobOpts := []blobstor.Option{
|
blobOpts := []blobstor.Option{
|
||||||
blobstor.WithRootPath(filepath.Join(dir, "blob")),
|
blobstor.WithStorages([]blobstor.SubStorage{
|
||||||
blobstor.WithShallowDepth(1),
|
{
|
||||||
blobstor.WithSmallSizeLimit(1),
|
Storage: fstree.New(
|
||||||
blobstor.WithBlobovniczaShallowWidth(1),
|
fstree.WithDirNameLen(2),
|
||||||
blobstor.WithBlobovniczaShallowDepth(1)}
|
fstree.WithPath(filepath.Join(dir, "blob")),
|
||||||
|
fstree.WithDepth(1)),
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
|
||||||
sh := New(
|
sh := New(
|
||||||
WithBlobStorOptions(blobOpts...),
|
WithBlobStorOptions(blobOpts...),
|
||||||
|
@ -134,9 +141,13 @@ func TestRefillMetabase(t *testing.T) {
|
||||||
defer os.RemoveAll(p)
|
defer os.RemoveAll(p)
|
||||||
|
|
||||||
blobOpts := []blobstor.Option{
|
blobOpts := []blobstor.Option{
|
||||||
blobstor.WithRootPath(filepath.Join(p, "blob")),
|
blobstor.WithStorages([]blobstor.SubStorage{
|
||||||
blobstor.WithBlobovniczaShallowWidth(1),
|
{
|
||||||
blobstor.WithBlobovniczaShallowDepth(1),
|
Storage: fstree.New(
|
||||||
|
fstree.WithPath(filepath.Join(p, "blob")),
|
||||||
|
fstree.WithDepth(1)),
|
||||||
|
},
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
sh := New(
|
sh := New(
|
||||||
|
|
|
@ -13,6 +13,8 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/core/object"
|
"github.com/nspcc-dev/neofs-node/pkg/core/object"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobovnicza"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobovnicza"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard/mode"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard/mode"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/writecache"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/writecache"
|
||||||
|
@ -52,9 +54,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
|
||||||
writecache.WithMaxObjectSize(wcBigObjectSize),
|
writecache.WithMaxObjectSize(wcBigObjectSize),
|
||||||
writecache.WithLogger(zaptest.NewLogger(t)),
|
writecache.WithLogger(zaptest.NewLogger(t)),
|
||||||
},
|
},
|
||||||
[]blobstor.Option{
|
nil)
|
||||||
blobstor.WithLogger(zaptest.NewLogger(t)),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
defer releaseShard(sh, t)
|
defer releaseShard(sh, t)
|
||||||
|
|
||||||
|
@ -186,7 +186,7 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) {
|
||||||
|
|
||||||
t.Run("skip errors", func(t *testing.T) {
|
t.Run("skip errors", func(t *testing.T) {
|
||||||
sh := newCustomShard(t, filepath.Join(t.TempDir(), "ignore"), false, nil, nil)
|
sh := newCustomShard(t, filepath.Join(t.TempDir(), "ignore"), false, nil, nil)
|
||||||
defer releaseShard(sh, t)
|
t.Cleanup(func() { require.NoError(t, sh.Close()) })
|
||||||
|
|
||||||
var restorePrm shard.RestorePrm
|
var restorePrm shard.RestorePrm
|
||||||
restorePrm.WithPath(out)
|
restorePrm.WithPath(out)
|
||||||
|
@ -292,14 +292,27 @@ func TestDumpIgnoreErrors(t *testing.T) {
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
bsPath := filepath.Join(dir, "blob")
|
bsPath := filepath.Join(dir, "blob")
|
||||||
bsOpts := []blobstor.Option{
|
bsOpts := func(sw uint64) []blobstor.Option {
|
||||||
blobstor.WithSmallSizeLimit(bsSmallObjectSize),
|
return []blobstor.Option{
|
||||||
blobstor.WithRootPath(bsPath),
|
|
||||||
blobstor.WithCompressObjects(true),
|
blobstor.WithCompressObjects(true),
|
||||||
blobstor.WithShallowDepth(1),
|
blobstor.WithStorages([]blobstor.SubStorage{
|
||||||
blobstor.WithBlobovniczaShallowDepth(1),
|
{
|
||||||
blobstor.WithBlobovniczaShallowWidth(2),
|
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||||
blobstor.WithBlobovniczaOpenedCacheSize(1),
|
blobovniczatree.WithRootPath(filepath.Join(bsPath, "blobovnicza")),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowDepth(1),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowWidth(sw),
|
||||||
|
blobovniczatree.WithOpenedCacheSize(1)),
|
||||||
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||||
|
return len(data) < bsSmallObjectSize
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Storage: fstree.New(
|
||||||
|
fstree.WithPath(bsPath),
|
||||||
|
fstree.WithDepth(1)),
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
wcPath := filepath.Join(dir, "writecache")
|
wcPath := filepath.Join(dir, "writecache")
|
||||||
wcOpts := []writecache.Option{
|
wcOpts := []writecache.Option{
|
||||||
|
@ -307,7 +320,7 @@ func TestDumpIgnoreErrors(t *testing.T) {
|
||||||
writecache.WithSmallObjectSize(wcSmallObjectSize),
|
writecache.WithSmallObjectSize(wcSmallObjectSize),
|
||||||
writecache.WithMaxObjectSize(wcBigObjectSize),
|
writecache.WithMaxObjectSize(wcBigObjectSize),
|
||||||
}
|
}
|
||||||
sh := newCustomShard(t, dir, true, wcOpts, bsOpts)
|
sh := newCustomShard(t, dir, true, wcOpts, bsOpts(2))
|
||||||
|
|
||||||
objects := make([]*objectSDK.Object, objCount)
|
objects := make([]*objectSDK.Object, objCount)
|
||||||
for i := 0; i < objCount; i++ {
|
for i := 0; i < objCount; i++ {
|
||||||
|
@ -355,8 +368,7 @@ func TestDumpIgnoreErrors(t *testing.T) {
|
||||||
require.NoError(t, os.MkdirAll(filepath.Join(bsPath, "ZZ"), 0))
|
require.NoError(t, os.MkdirAll(filepath.Join(bsPath, "ZZ"), 0))
|
||||||
}
|
}
|
||||||
|
|
||||||
bsOpts = append(bsOpts, blobstor.WithBlobovniczaShallowWidth(3))
|
sh = newCustomShard(t, dir, true, wcOpts, bsOpts(3))
|
||||||
sh = newCustomShard(t, dir, true, wcOpts, bsOpts)
|
|
||||||
require.NoError(t, sh.SetMode(mode.ReadOnly))
|
require.NoError(t, sh.SetMode(mode.ReadOnly))
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
|
@ -7,6 +7,8 @@ import (
|
||||||
|
|
||||||
objectcore "github.com/nspcc-dev/neofs-node/pkg/core/object"
|
objectcore "github.com/nspcc-dev/neofs-node/pkg/core/object"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
||||||
meta "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/metabase"
|
meta "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/metabase"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
|
||||||
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
||||||
|
@ -24,11 +26,21 @@ func TestShard_Lock(t *testing.T) {
|
||||||
opts := []shard.Option{
|
opts := []shard.Option{
|
||||||
shard.WithLogger(zap.NewNop()),
|
shard.WithLogger(zap.NewNop()),
|
||||||
shard.WithBlobStorOptions(
|
shard.WithBlobStorOptions(
|
||||||
[]blobstor.Option{
|
blobstor.WithStorages([]blobstor.SubStorage{
|
||||||
blobstor.WithRootPath(filepath.Join(rootPath, "blob")),
|
{
|
||||||
blobstor.WithBlobovniczaShallowWidth(2),
|
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||||
blobstor.WithBlobovniczaShallowDepth(2),
|
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
|
||||||
}...,
|
blobovniczatree.WithBlobovniczaShallowDepth(2),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowWidth(2)),
|
||||||
|
Policy: func(_ *object.Object, data []byte) bool {
|
||||||
|
return len(data) <= 1<<20
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Storage: fstree.New(
|
||||||
|
fstree.WithPath(filepath.Join(rootPath, "blob"))),
|
||||||
|
},
|
||||||
|
}),
|
||||||
),
|
),
|
||||||
shard.WithMetaBaseOptions(
|
shard.WithMetaBaseOptions(
|
||||||
meta.WithPath(filepath.Join(rootPath, "meta")),
|
meta.WithPath(filepath.Join(rootPath, "meta")),
|
||||||
|
|
|
@ -2,16 +2,20 @@ package shard_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util/slice"
|
"github.com/nspcc-dev/neo-go/pkg/util/slice"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/core/object"
|
"github.com/nspcc-dev/neofs-node/pkg/core/object"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/writecache"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/writecache"
|
||||||
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
||||||
objectSDK "github.com/nspcc-dev/neofs-sdk-go/object"
|
objectSDK "github.com/nspcc-dev/neofs-sdk-go/object"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestShard_GetRange(t *testing.T) {
|
func TestShard_GetRange(t *testing.T) {
|
||||||
|
@ -62,7 +66,22 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
|
||||||
|
|
||||||
sh := newCustomShard(t, t.TempDir(), hasWriteCache,
|
sh := newCustomShard(t, t.TempDir(), hasWriteCache,
|
||||||
[]writecache.Option{writecache.WithMaxMemSize(0), writecache.WithMaxObjectSize(writeCacheMaxSize)},
|
[]writecache.Option{writecache.WithMaxMemSize(0), writecache.WithMaxObjectSize(writeCacheMaxSize)},
|
||||||
[]blobstor.Option{blobstor.WithSmallSizeLimit(smallObjectSize)})
|
[]blobstor.Option{blobstor.WithStorages([]blobstor.SubStorage{
|
||||||
|
{
|
||||||
|
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||||
|
blobovniczatree.WithLogger(zaptest.NewLogger(t)),
|
||||||
|
blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowDepth(1),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowWidth(1)),
|
||||||
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||||
|
return len(data) <= smallObjectSize
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Storage: fstree.New(
|
||||||
|
fstree.WithPath(filepath.Join(t.TempDir(), "blob"))),
|
||||||
|
},
|
||||||
|
})})
|
||||||
defer releaseShard(sh, t)
|
defer releaseShard(sh, t)
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
|
|
|
@ -4,12 +4,12 @@ import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
||||||
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
|
||||||
meta "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/metabase"
|
meta "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/metabase"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/pilorama"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/pilorama"
|
||||||
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard"
|
||||||
|
@ -24,6 +24,7 @@ import (
|
||||||
"github.com/nspcc-dev/tzhash/tz"
|
"github.com/nspcc-dev/tzhash/tz"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
)
|
)
|
||||||
|
|
||||||
type epochState struct{}
|
type epochState struct{}
|
||||||
|
@ -45,15 +46,31 @@ func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts
|
||||||
rootPath = filepath.Join(rootPath, "nowc")
|
rootPath = filepath.Join(rootPath, "nowc")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if bsOpts == nil {
|
||||||
|
bsOpts = []blobstor.Option{
|
||||||
|
blobstor.WithLogger(zaptest.NewLogger(t)),
|
||||||
|
blobstor.WithStorages([]blobstor.SubStorage{
|
||||||
|
{
|
||||||
|
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||||
|
blobovniczatree.WithLogger(zaptest.NewLogger(t)),
|
||||||
|
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowDepth(1),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowWidth(1)),
|
||||||
|
Policy: func(_ *object.Object, data []byte) bool {
|
||||||
|
return len(data) <= 1<<20
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Storage: fstree.New(
|
||||||
|
fstree.WithPath(filepath.Join(rootPath, "blob"))),
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
opts := []shard.Option{
|
opts := []shard.Option{
|
||||||
shard.WithLogger(zap.L()),
|
shard.WithLogger(zap.L()),
|
||||||
shard.WithBlobStorOptions(
|
shard.WithBlobStorOptions(bsOpts...),
|
||||||
append([]blobstor.Option{
|
|
||||||
blobstor.WithRootPath(filepath.Join(rootPath, "blob")),
|
|
||||||
blobstor.WithBlobovniczaShallowWidth(2),
|
|
||||||
blobstor.WithBlobovniczaShallowDepth(2),
|
|
||||||
}, bsOpts...)...,
|
|
||||||
),
|
|
||||||
shard.WithMetaBaseOptions(
|
shard.WithMetaBaseOptions(
|
||||||
meta.WithPath(filepath.Join(rootPath, "meta")),
|
meta.WithPath(filepath.Join(rootPath, "meta")),
|
||||||
meta.WithEpochState(epochState{}),
|
meta.WithEpochState(epochState{}),
|
||||||
|
@ -76,8 +93,7 @@ func newCustomShard(t testing.TB, rootPath string, enableWriteCache bool, wcOpts
|
||||||
}
|
}
|
||||||
|
|
||||||
func releaseShard(s *shard.Shard, t testing.TB) {
|
func releaseShard(s *shard.Shard, t testing.TB) {
|
||||||
s.Close()
|
require.NoError(t, s.Close())
|
||||||
os.RemoveAll(strings.Split(t.Name(), string(os.PathSeparator))[0])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateObject(t *testing.T) *object.Object {
|
func generateObject(t *testing.T) *object.Object {
|
||||||
|
|
|
@ -27,7 +27,6 @@ func (c *cache) SetMode(m mode.Mode) error {
|
||||||
if err := c.db.Close(); err != nil {
|
if err := c.db.Close(); err != nil {
|
||||||
return fmt.Errorf("can't close write-cache database: %w", err)
|
return fmt.Errorf("can't close write-cache database: %w", err)
|
||||||
}
|
}
|
||||||
c.db = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Suspend producers to ensure there are channel send operations in fly.
|
// Suspend producers to ensure there are channel send operations in fly.
|
||||||
|
|
Loading…
Reference in a new issue