[#645] config: Add storage_engine
parameter for blobovnicza
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
f0d72870de
commit
22f3b51f69
4 changed files with 98 additions and 52 deletions
|
@ -90,6 +90,15 @@ const maxMsgSize = 4 << 20 // transport msg limit 4 MiB
|
||||||
// for each contract listener.
|
// for each contract listener.
|
||||||
const notificationHandlerPoolSize = 10
|
const notificationHandlerPoolSize = 10
|
||||||
|
|
||||||
|
const (
|
||||||
|
storageTypeBlobovnicza = "blobovnicza"
|
||||||
|
storageTypeFStree = "fstree"
|
||||||
|
|
||||||
|
storageEngineUnspecified = ""
|
||||||
|
storageEngineBBolt = "bbolt"
|
||||||
|
storageEngineBadger = "badger"
|
||||||
|
)
|
||||||
|
|
||||||
// applicationConfiguration reads and stores component-specific configuration
|
// applicationConfiguration reads and stores component-specific configuration
|
||||||
// values. It should not store any application helpers structs (pointers to shared
|
// values. It should not store any application helpers structs (pointers to shared
|
||||||
// structs).
|
// structs).
|
||||||
|
@ -183,6 +192,7 @@ type subStorageCfg struct {
|
||||||
perm fs.FileMode
|
perm fs.FileMode
|
||||||
depth uint64
|
depth uint64
|
||||||
noSync bool
|
noSync bool
|
||||||
|
engine string
|
||||||
|
|
||||||
// blobovnicza-specific
|
// blobovnicza-specific
|
||||||
size uint64
|
size uint64
|
||||||
|
@ -310,31 +320,36 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol
|
||||||
sCfg.typ = storagesCfg[i].Type()
|
sCfg.typ = storagesCfg[i].Type()
|
||||||
sCfg.path = storagesCfg[i].Path()
|
sCfg.path = storagesCfg[i].Path()
|
||||||
sCfg.perm = storagesCfg[i].Perm()
|
sCfg.perm = storagesCfg[i].Perm()
|
||||||
|
sCfg.engine = storagesCfg[i].StorageEngine()
|
||||||
|
|
||||||
switch storagesCfg[i].Type() {
|
switch storagesCfg[i].Type() {
|
||||||
case blobovniczatree.Type:
|
case storageTypeBlobovnicza:
|
||||||
sub := blobovniczaconfig.From((*config.Config)(storagesCfg[i]))
|
if sCfg.engine == storageEngineUnspecified || sCfg.engine == storageEngineBBolt {
|
||||||
|
sub := blobovniczaconfig.From((*config.Config)(storagesCfg[i]))
|
||||||
|
|
||||||
sCfg.size = sub.Size()
|
sCfg.size = sub.Size()
|
||||||
sCfg.depth = sub.ShallowDepth()
|
sCfg.depth = sub.ShallowDepth()
|
||||||
sCfg.width = sub.ShallowWidth()
|
sCfg.width = sub.ShallowWidth()
|
||||||
sCfg.leafWidth = sub.LeafWidth()
|
sCfg.leafWidth = sub.LeafWidth()
|
||||||
sCfg.openedCacheSize = sub.OpenedCacheSize()
|
sCfg.openedCacheSize = sub.OpenedCacheSize()
|
||||||
sCfg.initWorkerCount = sub.InitWorkerCount()
|
sCfg.initWorkerCount = sub.InitWorkerCount()
|
||||||
sCfg.initInAdvance = sub.InitInAdvance()
|
sCfg.initInAdvance = sub.InitInAdvance()
|
||||||
sCfg.rebuildDropTimeout = sub.RebuildDropTimeout()
|
sCfg.rebuildDropTimeout = sub.RebuildDropTimeout()
|
||||||
case fstree.Type:
|
} else if sCfg.engine == storageEngineBadger {
|
||||||
|
sub := badgerstoreconfig.From((*config.Config)(storagesCfg[i]))
|
||||||
|
sCfg.indexCacheSize = sub.IndexCacheSize()
|
||||||
|
sCfg.memTablesCount = sub.MemTablesCount()
|
||||||
|
sCfg.compactorsCount = sub.CompactorsCount()
|
||||||
|
sCfg.gcInterval = sub.GCInterval()
|
||||||
|
sCfg.gcDiscardRatio = sub.GCDiscardRatio()
|
||||||
|
sCfg.valueLogFileSize = sub.ValueLogFileSize()
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("invalid storage engine: %s", sCfg.engine)
|
||||||
|
}
|
||||||
|
case storageTypeFStree:
|
||||||
sub := fstreeconfig.From((*config.Config)(storagesCfg[i]))
|
sub := fstreeconfig.From((*config.Config)(storagesCfg[i]))
|
||||||
sCfg.depth = sub.Depth()
|
sCfg.depth = sub.Depth()
|
||||||
sCfg.noSync = sub.NoSync()
|
sCfg.noSync = sub.NoSync()
|
||||||
case badgerstore.Type:
|
|
||||||
sub := badgerstoreconfig.From((*config.Config)(storagesCfg[i]))
|
|
||||||
sCfg.indexCacheSize = sub.IndexCacheSize()
|
|
||||||
sCfg.memTablesCount = sub.MemTablesCount()
|
|
||||||
sCfg.compactorsCount = sub.CompactorsCount()
|
|
||||||
sCfg.gcInterval = sub.GCInterval()
|
|
||||||
sCfg.gcDiscardRatio = sub.GCDiscardRatio()
|
|
||||||
sCfg.valueLogFileSize = sub.ValueLogFileSize()
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("invalid storage type: %s", storagesCfg[i].Type())
|
return fmt.Errorf("invalid storage type: %s", storagesCfg[i].Type())
|
||||||
}
|
}
|
||||||
|
@ -908,28 +923,30 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
|
||||||
var ss []blobstor.SubStorage
|
var ss []blobstor.SubStorage
|
||||||
for _, sRead := range shCfg.subStorages {
|
for _, sRead := range shCfg.subStorages {
|
||||||
switch sRead.typ {
|
switch sRead.typ {
|
||||||
case blobovniczatree.Type:
|
case storageTypeBlobovnicza:
|
||||||
blobovniczaTreeOpts := c.getBlobovniczaTreeOpts(sRead)
|
if sRead.engine == storageEngineUnspecified || sRead.engine == storageEngineBBolt {
|
||||||
ss = append(ss, blobstor.SubStorage{
|
blobovniczaTreeOpts := c.getBlobovniczaTreeOpts(sRead)
|
||||||
Storage: blobovniczatree.NewBlobovniczaTree(blobovniczaTreeOpts...),
|
ss = append(ss, blobstor.SubStorage{
|
||||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
Storage: blobovniczatree.NewBlobovniczaTree(blobovniczaTreeOpts...),
|
||||||
return uint64(len(data)) < shCfg.smallSizeObjectLimit
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||||
},
|
return uint64(len(data)) < shCfg.smallSizeObjectLimit
|
||||||
})
|
},
|
||||||
case fstree.Type:
|
})
|
||||||
|
} else if sRead.engine == storageEngineBadger {
|
||||||
|
badgerStoreOpts := c.getBadgerStoreOpts(sRead)
|
||||||
|
ss = append(ss, blobstor.SubStorage{
|
||||||
|
Storage: badgerstore.New(badgerStoreOpts...),
|
||||||
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||||
|
return uint64(len(data)) < shCfg.smallSizeObjectLimit
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
case storageTypeFStree:
|
||||||
fstreeOpts := c.getFSTreeOpts(sRead)
|
fstreeOpts := c.getFSTreeOpts(sRead)
|
||||||
ss = append(ss, blobstor.SubStorage{
|
ss = append(ss, blobstor.SubStorage{
|
||||||
Storage: fstree.New(fstreeOpts...),
|
Storage: fstree.New(fstreeOpts...),
|
||||||
Policy: func(_ *objectSDK.Object, _ []byte) bool {
|
|
||||||
return true
|
|
||||||
},
|
|
||||||
})
|
|
||||||
case badgerstore.Type:
|
|
||||||
badgerStoreOpts := c.getBadgerStoreOpts(sRead)
|
|
||||||
ss = append(ss, blobstor.SubStorage{
|
|
||||||
Storage: badgerstore.New(badgerStoreOpts...),
|
|
||||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||||
return uint64(len(data)) < shCfg.smallSizeObjectLimit
|
return true
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -53,3 +53,10 @@ func (x *Config) Perm() fs.FileMode {
|
||||||
|
|
||||||
return fs.FileMode(p)
|
return fs.FileMode(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StorageEngine returns storage engine.
|
||||||
|
func (x *Config) StorageEngine() string {
|
||||||
|
return config.String(
|
||||||
|
(*config.Config)(x),
|
||||||
|
"storage_engine")
|
||||||
|
}
|
||||||
|
|
|
@ -9,9 +9,6 @@ import (
|
||||||
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
|
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
|
||||||
loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger"
|
loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger"
|
||||||
treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree"
|
treeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/badgerstore"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -61,7 +58,12 @@ func validateConfig(c *config.Config) error {
|
||||||
}
|
}
|
||||||
for i := range blobstor {
|
for i := range blobstor {
|
||||||
switch blobstor[i].Type() {
|
switch blobstor[i].Type() {
|
||||||
case fstree.Type, blobovniczatree.Type, badgerstore.Type:
|
case storageTypeBlobovnicza:
|
||||||
|
storageEngine := blobstor[i].StorageEngine()
|
||||||
|
if storageEngine != storageEngineUnspecified && storageEngine != storageEngineBBolt && storageEngine != storageEngineBadger {
|
||||||
|
return fmt.Errorf("unexpected storage engine: %s (shard %d)", storageEngine, shardNum)
|
||||||
|
}
|
||||||
|
case storageTypeFStree:
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unexpected storage type: %s (shard %d)", blobstor[i].Type(), shardNum)
|
return fmt.Errorf("unexpected storage type: %s (shard %d)", blobstor[i].Type(), shardNum)
|
||||||
}
|
}
|
||||||
|
|
|
@ -227,17 +227,37 @@ blobstor:
|
||||||
| `depth` | `int` | `4` | File-system tree depth. |
|
| `depth` | `int` | `4` | File-system tree depth. |
|
||||||
|
|
||||||
#### `blobovnicza` type options
|
#### `blobovnicza` type options
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | Default value | Description |
|
||||||
| ----------------------- | ---------- | ------------- | --------------------------------------------------------------------- |
|
| ---------------- | --------- | ------------- | ------------------------------------------------------|
|
||||||
| `path` | `string` | | Path to the root of the blobstor. |
|
| `path` | `string` | | Path to the root of the blobstor. |
|
||||||
| `perm` | file mode | `0660` | Default permission for created files and directories. |
|
| `perm` | file mode | `0660` | Default permission for created files and directories. |
|
||||||
| `size` | `size` | `1 G` | Maximum size of a single blobovnicza |
|
| `storage_engine` | `string` | | Storage engine for blobovnicza. `bbolt` or `badger`. |
|
||||||
| `depth` | `int` | `2` | Blobovnicza tree depth. |
|
|
||||||
| `width` | `int` | `16` | Blobovnicza tree width. |
|
|
||||||
| `opened_cache_capacity` | `int` | `16` | Maximum number of simultaneously opened blobovniczas. |
|
##### `bbolt` engine options
|
||||||
| `init_worker_count` | `int` | `5` | Maximum number of concurrent initialization workers. |
|
|
||||||
| `init_in_advance` | `bool` | `false` | If `true`, than all the blobovnicza files will be created on startup. |
|
| Parameter | Type | Default value | Description |
|
||||||
| `rebuild_drop_timeout` | `duration` | `10s` | Timeout before drop empty blobovnicza file during rebuild. |
|
| ----------------------- | ---------- | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `size` | `size` | `1 G` | Maximum size of a single blobovnicza |
|
||||||
|
| `depth` | `int` | `2` | Blobovnicza tree depth. |
|
||||||
|
| `width` | `int` | `16` | Blobovnicza tree width. |
|
||||||
|
| `opened_cache_capacity` | `int` | `16` | Maximum number of simultaneously opened blobovniczas. |
|
||||||
|
| `opened_cache_ttl` | `duration` | `0` | TTL in cache for opened blobovniczas(disabled by default). In case of heavy random-read and 10 shards each with 10_000 databases and accessing 400 objects per-second we will access each db approximately once per ((10 * 10_000 / 400) = 250 seconds <= 300 seconds = 5 min). Also take in mind that in this scenario they will probably be closed earlier because of the cache capacity, so bigger values are likely to be of no use. |
|
||||||
|
| `init_worker_count` | `int` | `5` | Maximum number of concurrent initialization workers. |
|
||||||
|
| `init_in_advance` | `bool` | `false` | If `true`, than all the blobovnicza files will be created on startup. |
|
||||||
|
| `rebuild_drop_timeout` | `duration` | `10s` | Timeout before drop empty blobovnicza file during rebuild. |
|
||||||
|
|
||||||
|
##### `badger` engine options
|
||||||
|
|
||||||
|
| Parameter | Type | Default value | Description |
|
||||||
|
| --------------------- | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `index_cache_size` | `size` | `256 MB` | How much memory should be used by badger table indices. |
|
||||||
|
| `mem_tables_count` | `int` | `32` | Maximum number of tables to keep in memory before stalling. |
|
||||||
|
| `compactors_count` | `int` | `64` | Number of compaction workers to run concurrently. |
|
||||||
|
| `gc_interval` | `duration` | `10 m` | Delay between garbage collector runs. |
|
||||||
|
| `gc_discard_percent` | `uint` | `20` | Garbage collector will rewrite value log file if it can discard at least `gc_discard_percent` space of that file. |
|
||||||
|
| `value_log_file_size` | `size` | `1 GB` | Maximum size of single value log file. |
|
||||||
|
|
|
||||||
|
|
||||||
### `gc` subsection
|
### `gc` subsection
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue