forked from TrueCloudLab/frostfs-node
[#168] node: Refactor node config read
Resolve funlen linter for readConfig method Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
a7c79c773a
commit
3bbb516528
1 changed files with 101 additions and 86 deletions
|
@ -173,8 +173,6 @@ type subStorageCfg struct {
|
||||||
|
|
||||||
// readConfig fills applicationConfiguration with raw configuration values
|
// readConfig fills applicationConfiguration with raw configuration values
|
||||||
// not modifying them.
|
// not modifying them.
|
||||||
//
|
|
||||||
// nolint: funlen
|
|
||||||
func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
||||||
if a._read {
|
if a._read {
|
||||||
err := c.Reload()
|
err := c.Reload()
|
||||||
|
@ -202,100 +200,117 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
||||||
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
|
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
|
||||||
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
|
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
|
||||||
|
|
||||||
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
|
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
|
||||||
var sh shardCfg
|
}
|
||||||
|
|
||||||
sh.refillMetabase = sc.RefillMetabase()
|
func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error {
|
||||||
sh.mode = sc.Mode()
|
var newConfig shardCfg
|
||||||
sh.compress = sc.Compress()
|
|
||||||
sh.uncompressableContentType = sc.UncompressableContentTypes()
|
|
||||||
sh.smallSizeObjectLimit = sc.SmallSizeLimit()
|
|
||||||
|
|
||||||
// write-cache
|
newConfig.refillMetabase = oldConfig.RefillMetabase()
|
||||||
|
newConfig.mode = oldConfig.Mode()
|
||||||
|
newConfig.compress = oldConfig.Compress()
|
||||||
|
newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
|
||||||
|
newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
|
||||||
|
|
||||||
writeCacheCfg := sc.WriteCache()
|
a.setShardWriteCacheConfig(&newConfig, oldConfig)
|
||||||
if writeCacheCfg.Enabled() {
|
|
||||||
wc := &sh.writecacheCfg
|
|
||||||
|
|
||||||
wc.enabled = true
|
a.setShardPiloramaConfig(c, &newConfig, oldConfig)
|
||||||
wc.path = writeCacheCfg.Path()
|
|
||||||
wc.maxBatchSize = writeCacheCfg.BoltDB().MaxBatchSize()
|
if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil {
|
||||||
wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay()
|
return err
|
||||||
wc.maxObjSize = writeCacheCfg.MaxObjectSize()
|
}
|
||||||
wc.smallObjectSize = writeCacheCfg.SmallObjectSize()
|
|
||||||
wc.flushWorkerCount = writeCacheCfg.WorkersNumber()
|
a.setMetabaseConfig(&newConfig, oldConfig)
|
||||||
wc.sizeLimit = writeCacheCfg.SizeLimit()
|
|
||||||
wc.noSync = writeCacheCfg.NoSync()
|
a.setGCConfig(&newConfig, oldConfig)
|
||||||
|
|
||||||
|
a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||||
|
writeCacheCfg := oldConfig.WriteCache()
|
||||||
|
if writeCacheCfg.Enabled() {
|
||||||
|
wc := &newConfig.writecacheCfg
|
||||||
|
|
||||||
|
wc.enabled = true
|
||||||
|
wc.path = writeCacheCfg.Path()
|
||||||
|
wc.maxBatchSize = writeCacheCfg.BoltDB().MaxBatchSize()
|
||||||
|
wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay()
|
||||||
|
wc.maxObjSize = writeCacheCfg.MaxObjectSize()
|
||||||
|
wc.smallObjectSize = writeCacheCfg.SmallObjectSize()
|
||||||
|
wc.flushWorkerCount = writeCacheCfg.WorkersNumber()
|
||||||
|
wc.sizeLimit = writeCacheCfg.SizeLimit()
|
||||||
|
wc.noSync = writeCacheCfg.NoSync()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||||
|
if config.BoolSafe(c.Sub("tree"), "enabled") {
|
||||||
|
piloramaCfg := oldConfig.Pilorama()
|
||||||
|
pr := &newConfig.piloramaCfg
|
||||||
|
|
||||||
|
pr.enabled = true
|
||||||
|
pr.path = piloramaCfg.Path()
|
||||||
|
pr.perm = piloramaCfg.Perm()
|
||||||
|
pr.noSync = piloramaCfg.NoSync()
|
||||||
|
pr.maxBatchSize = piloramaCfg.MaxBatchSize()
|
||||||
|
pr.maxBatchDelay = piloramaCfg.MaxBatchDelay()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
|
||||||
|
blobStorCfg := oldConfig.BlobStor()
|
||||||
|
storagesCfg := blobStorCfg.Storages()
|
||||||
|
|
||||||
|
ss := make([]subStorageCfg, 0, len(storagesCfg))
|
||||||
|
for i := range storagesCfg {
|
||||||
|
var sCfg subStorageCfg
|
||||||
|
|
||||||
|
sCfg.typ = storagesCfg[i].Type()
|
||||||
|
sCfg.path = storagesCfg[i].Path()
|
||||||
|
sCfg.perm = storagesCfg[i].Perm()
|
||||||
|
|
||||||
|
switch storagesCfg[i].Type() {
|
||||||
|
case blobovniczatree.Type:
|
||||||
|
sub := blobovniczaconfig.From((*config.Config)(storagesCfg[i]))
|
||||||
|
|
||||||
|
sCfg.size = sub.Size()
|
||||||
|
sCfg.depth = sub.ShallowDepth()
|
||||||
|
sCfg.width = sub.ShallowWidth()
|
||||||
|
sCfg.openedCacheSize = sub.OpenedCacheSize()
|
||||||
|
case fstree.Type:
|
||||||
|
sub := fstreeconfig.From((*config.Config)(storagesCfg[i]))
|
||||||
|
sCfg.depth = sub.Depth()
|
||||||
|
sCfg.noSync = sub.NoSync()
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid storage type: %s", storagesCfg[i].Type())
|
||||||
}
|
}
|
||||||
|
|
||||||
// blobstor with substorages
|
ss = append(ss, sCfg)
|
||||||
|
}
|
||||||
|
|
||||||
blobStorCfg := sc.BlobStor()
|
newConfig.subStorages = ss
|
||||||
storagesCfg := blobStorCfg.Storages()
|
return nil
|
||||||
metabaseCfg := sc.Metabase()
|
}
|
||||||
gcCfg := sc.GC()
|
|
||||||
|
|
||||||
if config.BoolSafe(c.Sub("tree"), "enabled") {
|
func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||||
piloramaCfg := sc.Pilorama()
|
metabaseCfg := oldConfig.Metabase()
|
||||||
pr := &sh.piloramaCfg
|
m := &newConfig.metaCfg
|
||||||
|
|
||||||
pr.enabled = true
|
m.path = metabaseCfg.Path()
|
||||||
pr.path = piloramaCfg.Path()
|
m.perm = metabaseCfg.BoltDB().Perm()
|
||||||
pr.perm = piloramaCfg.Perm()
|
m.maxBatchDelay = metabaseCfg.BoltDB().MaxBatchDelay()
|
||||||
pr.noSync = piloramaCfg.NoSync()
|
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
|
||||||
pr.maxBatchSize = piloramaCfg.MaxBatchSize()
|
}
|
||||||
pr.maxBatchDelay = piloramaCfg.MaxBatchDelay()
|
|
||||||
}
|
|
||||||
|
|
||||||
ss := make([]subStorageCfg, 0, len(storagesCfg))
|
func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||||
for i := range storagesCfg {
|
gcCfg := oldConfig.GC()
|
||||||
var sCfg subStorageCfg
|
newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
||||||
|
newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
||||||
sCfg.typ = storagesCfg[i].Type()
|
newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
||||||
sCfg.path = storagesCfg[i].Path()
|
newConfig.gcCfg.expiredCollectorWorkersCount = gcCfg.ExpiredCollectorWorkersCount()
|
||||||
sCfg.perm = storagesCfg[i].Perm()
|
|
||||||
|
|
||||||
switch storagesCfg[i].Type() {
|
|
||||||
case blobovniczatree.Type:
|
|
||||||
sub := blobovniczaconfig.From((*config.Config)(storagesCfg[i]))
|
|
||||||
|
|
||||||
sCfg.size = sub.Size()
|
|
||||||
sCfg.depth = sub.ShallowDepth()
|
|
||||||
sCfg.width = sub.ShallowWidth()
|
|
||||||
sCfg.openedCacheSize = sub.OpenedCacheSize()
|
|
||||||
case fstree.Type:
|
|
||||||
sub := fstreeconfig.From((*config.Config)(storagesCfg[i]))
|
|
||||||
sCfg.depth = sub.Depth()
|
|
||||||
sCfg.noSync = sub.NoSync()
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid storage type: %s", storagesCfg[i].Type())
|
|
||||||
}
|
|
||||||
|
|
||||||
ss = append(ss, sCfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
sh.subStorages = ss
|
|
||||||
|
|
||||||
// meta
|
|
||||||
|
|
||||||
m := &sh.metaCfg
|
|
||||||
|
|
||||||
m.path = metabaseCfg.Path()
|
|
||||||
m.perm = metabaseCfg.BoltDB().Perm()
|
|
||||||
m.maxBatchDelay = metabaseCfg.BoltDB().MaxBatchDelay()
|
|
||||||
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
|
|
||||||
|
|
||||||
// GC
|
|
||||||
|
|
||||||
sh.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
|
||||||
sh.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
|
||||||
sh.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
|
||||||
sh.gcCfg.expiredCollectorWorkersCount = gcCfg.ExpiredCollectorWorkersCount()
|
|
||||||
|
|
||||||
a.EngineCfg.shards = append(a.EngineCfg.shards, sh)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// internals contains application-specific internals that are created
|
// internals contains application-specific internals that are created
|
||||||
|
|
Loading…
Reference in a new issue