[#168] node: Refactor node config read
Resolve funlen linter for readConfig method Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
a7c79c773a
commit
3bbb516528
1 changed files with 101 additions and 86 deletions
|
@ -173,8 +173,6 @@ type subStorageCfg struct {
|
|||
|
||||
// readConfig fills applicationConfiguration with raw configuration values
|
||||
// not modifying them.
|
||||
//
|
||||
// nolint: funlen
|
||||
func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
||||
if a._read {
|
||||
err := c.Reload()
|
||||
|
@ -202,20 +200,39 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
|||
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
|
||||
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
|
||||
|
||||
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error {
|
||||
var sh shardCfg
|
||||
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
|
||||
}
|
||||
|
||||
sh.refillMetabase = sc.RefillMetabase()
|
||||
sh.mode = sc.Mode()
|
||||
sh.compress = sc.Compress()
|
||||
sh.uncompressableContentType = sc.UncompressableContentTypes()
|
||||
sh.smallSizeObjectLimit = sc.SmallSizeLimit()
|
||||
func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error {
|
||||
var newConfig shardCfg
|
||||
|
||||
// write-cache
|
||||
newConfig.refillMetabase = oldConfig.RefillMetabase()
|
||||
newConfig.mode = oldConfig.Mode()
|
||||
newConfig.compress = oldConfig.Compress()
|
||||
newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes()
|
||||
newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit()
|
||||
|
||||
writeCacheCfg := sc.WriteCache()
|
||||
a.setShardWriteCacheConfig(&newConfig, oldConfig)
|
||||
|
||||
a.setShardPiloramaConfig(c, &newConfig, oldConfig)
|
||||
|
||||
if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a.setMetabaseConfig(&newConfig, oldConfig)
|
||||
|
||||
a.setGCConfig(&newConfig, oldConfig)
|
||||
|
||||
a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||
writeCacheCfg := oldConfig.WriteCache()
|
||||
if writeCacheCfg.Enabled() {
|
||||
wc := &sh.writecacheCfg
|
||||
wc := &newConfig.writecacheCfg
|
||||
|
||||
wc.enabled = true
|
||||
wc.path = writeCacheCfg.Path()
|
||||
|
@ -227,17 +244,12 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
|||
wc.sizeLimit = writeCacheCfg.SizeLimit()
|
||||
wc.noSync = writeCacheCfg.NoSync()
|
||||
}
|
||||
}
|
||||
|
||||
// blobstor with substorages
|
||||
|
||||
blobStorCfg := sc.BlobStor()
|
||||
storagesCfg := blobStorCfg.Storages()
|
||||
metabaseCfg := sc.Metabase()
|
||||
gcCfg := sc.GC()
|
||||
|
||||
func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||
if config.BoolSafe(c.Sub("tree"), "enabled") {
|
||||
piloramaCfg := sc.Pilorama()
|
||||
pr := &sh.piloramaCfg
|
||||
piloramaCfg := oldConfig.Pilorama()
|
||||
pr := &newConfig.piloramaCfg
|
||||
|
||||
pr.enabled = true
|
||||
pr.path = piloramaCfg.Path()
|
||||
|
@ -246,6 +258,11 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
|||
pr.maxBatchSize = piloramaCfg.MaxBatchSize()
|
||||
pr.maxBatchDelay = piloramaCfg.MaxBatchDelay()
|
||||
}
|
||||
}
|
||||
|
||||
func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error {
|
||||
blobStorCfg := oldConfig.BlobStor()
|
||||
storagesCfg := blobStorCfg.Storages()
|
||||
|
||||
ss := make([]subStorageCfg, 0, len(storagesCfg))
|
||||
for i := range storagesCfg {
|
||||
|
@ -274,28 +291,26 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
|
|||
ss = append(ss, sCfg)
|
||||
}
|
||||
|
||||
sh.subStorages = ss
|
||||
newConfig.subStorages = ss
|
||||
return nil
|
||||
}
|
||||
|
||||
// meta
|
||||
|
||||
m := &sh.metaCfg
|
||||
func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||
metabaseCfg := oldConfig.Metabase()
|
||||
m := &newConfig.metaCfg
|
||||
|
||||
m.path = metabaseCfg.Path()
|
||||
m.perm = metabaseCfg.BoltDB().Perm()
|
||||
m.maxBatchDelay = metabaseCfg.BoltDB().MaxBatchDelay()
|
||||
m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize()
|
||||
}
|
||||
|
||||
// GC
|
||||
|
||||
sh.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
||||
sh.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
||||
sh.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
||||
sh.gcCfg.expiredCollectorWorkersCount = gcCfg.ExpiredCollectorWorkersCount()
|
||||
|
||||
a.EngineCfg.shards = append(a.EngineCfg.shards, sh)
|
||||
|
||||
return nil
|
||||
})
|
||||
func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) {
|
||||
gcCfg := oldConfig.GC()
|
||||
newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
||||
newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
||||
newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
||||
newConfig.gcCfg.expiredCollectorWorkersCount = gcCfg.ExpiredCollectorWorkersCount()
|
||||
}
|
||||
|
||||
// internals contains application-specific internals that are created
|
||||
|
|
Loading…
Reference in a new issue