From 3bbb5165281c8283487a25b47ef041990fd2080d Mon Sep 17 00:00:00 2001 From: Dmitrii Stepanov Date: Thu, 23 Mar 2023 18:35:21 +0300 Subject: [PATCH] [#168] node: Refactor node config read Resolve funlen linter for readConfig method Signed-off-by: Dmitrii Stepanov --- cmd/frostfs-node/config.go | 187 ++++++++++++++++++++----------------- 1 file changed, 101 insertions(+), 86 deletions(-) diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go index 1575ce07..cea75045 100644 --- a/cmd/frostfs-node/config.go +++ b/cmd/frostfs-node/config.go @@ -173,8 +173,6 @@ type subStorageCfg struct { // readConfig fills applicationConfiguration with raw configuration values // not modifying them. -// -// nolint: funlen func (a *applicationConfiguration) readConfig(c *config.Config) error { if a._read { err := c.Reload() @@ -202,100 +200,117 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error { a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c) a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c) - return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { - var sh shardCfg + return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) }) +} - sh.refillMetabase = sc.RefillMetabase() - sh.mode = sc.Mode() - sh.compress = sc.Compress() - sh.uncompressableContentType = sc.UncompressableContentTypes() - sh.smallSizeObjectLimit = sc.SmallSizeLimit() +func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig *shardconfig.Config) error { + var newConfig shardCfg - // write-cache + newConfig.refillMetabase = oldConfig.RefillMetabase() + newConfig.mode = oldConfig.Mode() + newConfig.compress = oldConfig.Compress() + newConfig.uncompressableContentType = oldConfig.UncompressableContentTypes() + newConfig.smallSizeObjectLimit = oldConfig.SmallSizeLimit() - writeCacheCfg := sc.WriteCache() - if writeCacheCfg.Enabled() { - wc := &sh.writecacheCfg + a.setShardWriteCacheConfig(&newConfig, oldConfig) - wc.enabled = true - wc.path = writeCacheCfg.Path() - wc.maxBatchSize = writeCacheCfg.BoltDB().MaxBatchSize() - wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay() - wc.maxObjSize = writeCacheCfg.MaxObjectSize() - wc.smallObjectSize = writeCacheCfg.SmallObjectSize() - wc.flushWorkerCount = writeCacheCfg.WorkersNumber() - wc.sizeLimit = writeCacheCfg.SizeLimit() - wc.noSync = writeCacheCfg.NoSync() + a.setShardPiloramaConfig(c, &newConfig, oldConfig) + + if err := a.setShardStorageConfig(&newConfig, oldConfig); err != nil { + return err + } + + a.setMetabaseConfig(&newConfig, oldConfig) + + a.setGCConfig(&newConfig, oldConfig) + + a.EngineCfg.shards = append(a.EngineCfg.shards, newConfig) + + return nil +} + +func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { + writeCacheCfg := oldConfig.WriteCache() + if writeCacheCfg.Enabled() { + wc := &newConfig.writecacheCfg + + wc.enabled = true + wc.path = writeCacheCfg.Path() + wc.maxBatchSize = writeCacheCfg.BoltDB().MaxBatchSize() + wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay() + wc.maxObjSize = writeCacheCfg.MaxObjectSize() + wc.smallObjectSize = writeCacheCfg.SmallObjectSize() + wc.flushWorkerCount = writeCacheCfg.WorkersNumber() + wc.sizeLimit = writeCacheCfg.SizeLimit() + wc.noSync = writeCacheCfg.NoSync() + } +} + +func (a *applicationConfiguration) setShardPiloramaConfig(c *config.Config, newConfig *shardCfg, oldConfig *shardconfig.Config) { + if config.BoolSafe(c.Sub("tree"), "enabled") { + piloramaCfg := oldConfig.Pilorama() + pr := &newConfig.piloramaCfg + + pr.enabled = true + pr.path = piloramaCfg.Path() + pr.perm = piloramaCfg.Perm() + pr.noSync = piloramaCfg.NoSync() + pr.maxBatchSize = piloramaCfg.MaxBatchSize() + pr.maxBatchDelay = piloramaCfg.MaxBatchDelay() + } +} + +func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) error { + blobStorCfg := oldConfig.BlobStor() + storagesCfg := blobStorCfg.Storages() + + ss := make([]subStorageCfg, 0, len(storagesCfg)) + for i := range storagesCfg { + var sCfg subStorageCfg + + sCfg.typ = storagesCfg[i].Type() + sCfg.path = storagesCfg[i].Path() + sCfg.perm = storagesCfg[i].Perm() + + switch storagesCfg[i].Type() { + case blobovniczatree.Type: + sub := blobovniczaconfig.From((*config.Config)(storagesCfg[i])) + + sCfg.size = sub.Size() + sCfg.depth = sub.ShallowDepth() + sCfg.width = sub.ShallowWidth() + sCfg.openedCacheSize = sub.OpenedCacheSize() + case fstree.Type: + sub := fstreeconfig.From((*config.Config)(storagesCfg[i])) + sCfg.depth = sub.Depth() + sCfg.noSync = sub.NoSync() + default: + return fmt.Errorf("invalid storage type: %s", storagesCfg[i].Type()) } - // blobstor with substorages + ss = append(ss, sCfg) + } - blobStorCfg := sc.BlobStor() - storagesCfg := blobStorCfg.Storages() - metabaseCfg := sc.Metabase() - gcCfg := sc.GC() + newConfig.subStorages = ss + return nil +} - if config.BoolSafe(c.Sub("tree"), "enabled") { - piloramaCfg := sc.Pilorama() - pr := &sh.piloramaCfg +func (a *applicationConfiguration) setMetabaseConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { + metabaseCfg := oldConfig.Metabase() + m := &newConfig.metaCfg - pr.enabled = true - pr.path = piloramaCfg.Path() - pr.perm = piloramaCfg.Perm() - pr.noSync = piloramaCfg.NoSync() - pr.maxBatchSize = piloramaCfg.MaxBatchSize() - pr.maxBatchDelay = piloramaCfg.MaxBatchDelay() - } + m.path = metabaseCfg.Path() + m.perm = metabaseCfg.BoltDB().Perm() + m.maxBatchDelay = metabaseCfg.BoltDB().MaxBatchDelay() + m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize() +} - ss := make([]subStorageCfg, 0, len(storagesCfg)) - for i := range storagesCfg { - var sCfg subStorageCfg - - sCfg.typ = storagesCfg[i].Type() - sCfg.path = storagesCfg[i].Path() - sCfg.perm = storagesCfg[i].Perm() - - switch storagesCfg[i].Type() { - case blobovniczatree.Type: - sub := blobovniczaconfig.From((*config.Config)(storagesCfg[i])) - - sCfg.size = sub.Size() - sCfg.depth = sub.ShallowDepth() - sCfg.width = sub.ShallowWidth() - sCfg.openedCacheSize = sub.OpenedCacheSize() - case fstree.Type: - sub := fstreeconfig.From((*config.Config)(storagesCfg[i])) - sCfg.depth = sub.Depth() - sCfg.noSync = sub.NoSync() - default: - return fmt.Errorf("invalid storage type: %s", storagesCfg[i].Type()) - } - - ss = append(ss, sCfg) - } - - sh.subStorages = ss - - // meta - - m := &sh.metaCfg - - m.path = metabaseCfg.Path() - m.perm = metabaseCfg.BoltDB().Perm() - m.maxBatchDelay = metabaseCfg.BoltDB().MaxBatchDelay() - m.maxBatchSize = metabaseCfg.BoltDB().MaxBatchSize() - - // GC - - sh.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() - sh.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() - sh.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() - sh.gcCfg.expiredCollectorWorkersCount = gcCfg.ExpiredCollectorWorkersCount() - - a.EngineCfg.shards = append(a.EngineCfg.shards, sh) - - return nil - }) +func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *shardconfig.Config) { + gcCfg := oldConfig.GC() + newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize() + newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval() + newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize() + newConfig.gcCfg.expiredCollectorWorkersCount = gcCfg.ExpiredCollectorWorkersCount() } // internals contains application-specific internals that are created