[#645] config: Resolve funlen linter
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
13cda5de8e
commit
9c10844eb0
1 changed files with 80 additions and 67 deletions
|
@ -185,21 +185,21 @@ type subStorageCfg struct {
|
||||||
noSync bool
|
noSync bool
|
||||||
|
|
||||||
// blobovnicza-specific
|
// blobovnicza-specific
|
||||||
size uint64
|
size uint64
|
||||||
width uint64
|
width uint64
|
||||||
leafWidth uint64
|
leafWidth uint64
|
||||||
openedCacheSize int
|
openedCacheSize int
|
||||||
initWorkerCount int
|
initWorkerCount int
|
||||||
initInAdvance bool
|
initInAdvance bool
|
||||||
|
|
||||||
// badgerstore-specific
|
|
||||||
indexCacheSize int64
|
|
||||||
memTablesCount int
|
|
||||||
compactorsCount int
|
|
||||||
gcInterval time.Duration
|
|
||||||
gcDiscardRatio float64
|
|
||||||
valueLogFileSize int64
|
|
||||||
rebuildDropTimeout time.Duration
|
rebuildDropTimeout time.Duration
|
||||||
|
|
||||||
|
// badgerstore-specific
|
||||||
|
indexCacheSize int64
|
||||||
|
memTablesCount int
|
||||||
|
compactorsCount int
|
||||||
|
gcInterval time.Duration
|
||||||
|
gcDiscardRatio float64
|
||||||
|
valueLogFileSize int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// readConfig fills applicationConfiguration with raw configuration values
|
// readConfig fills applicationConfiguration with raw configuration values
|
||||||
|
@ -909,50 +909,15 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
|
||||||
for _, sRead := range shCfg.subStorages {
|
for _, sRead := range shCfg.subStorages {
|
||||||
switch sRead.typ {
|
switch sRead.typ {
|
||||||
case blobovniczatree.Type:
|
case blobovniczatree.Type:
|
||||||
blobTreeOpts := []blobovniczatree.Option{
|
blobovniczaTreeOpts := c.getBlobovniczaTreeOpts(sRead)
|
||||||
blobovniczatree.WithRootPath(sRead.path),
|
|
||||||
blobovniczatree.WithPermissions(sRead.perm),
|
|
||||||
blobovniczatree.WithBlobovniczaSize(sRead.size),
|
|
||||||
blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth),
|
|
||||||
blobovniczatree.WithBlobovniczaShallowWidth(sRead.width),
|
|
||||||
blobovniczatree.WithBlobovniczaLeafWidth(sRead.leafWidth),
|
|
||||||
blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize),
|
|
||||||
blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount),
|
|
||||||
blobovniczatree.WithInitInAdvance(sRead.initInAdvance),
|
|
||||||
blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout),
|
|
||||||
blobovniczatree.WithLogger(c.log),
|
|
||||||
blobovniczatree.WithObjectSizeLimit(shCfg.smallSizeObjectLimit),
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.metricsCollector != nil {
|
|
||||||
blobTreeOpts = append(blobTreeOpts,
|
|
||||||
blobovniczatree.WithMetrics(
|
|
||||||
lsmetrics.NewBlobovniczaTreeMetrics(sRead.path, c.metricsCollector.BlobobvnizcaTreeMetrics()),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
ss = append(ss, blobstor.SubStorage{
|
ss = append(ss, blobstor.SubStorage{
|
||||||
Storage: blobovniczatree.NewBlobovniczaTree(blobTreeOpts...),
|
Storage: blobovniczatree.NewBlobovniczaTree(blobovniczaTreeOpts...),
|
||||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||||
return uint64(len(data)) < shCfg.smallSizeObjectLimit
|
return uint64(len(data)) < shCfg.smallSizeObjectLimit
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
case fstree.Type:
|
case fstree.Type:
|
||||||
fstreeOpts := []fstree.Option{
|
fstreeOpts := c.getFSTreeOpts(sRead)
|
||||||
fstree.WithPath(sRead.path),
|
|
||||||
fstree.WithPerm(sRead.perm),
|
|
||||||
fstree.WithDepth(sRead.depth),
|
|
||||||
fstree.WithNoSync(sRead.noSync),
|
|
||||||
fstree.WithLogger(c.log),
|
|
||||||
}
|
|
||||||
if c.metricsCollector != nil {
|
|
||||||
fstreeOpts = append(fstreeOpts,
|
|
||||||
fstree.WithMetrics(
|
|
||||||
lsmetrics.NewFSTreeMetricsWithoutShardID(sRead.path, c.metricsCollector.FSTree()),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
ss = append(ss, blobstor.SubStorage{
|
ss = append(ss, blobstor.SubStorage{
|
||||||
Storage: fstree.New(fstreeOpts...),
|
Storage: fstree.New(fstreeOpts...),
|
||||||
Policy: func(_ *objectSDK.Object, _ []byte) bool {
|
Policy: func(_ *objectSDK.Object, _ []byte) bool {
|
||||||
|
@ -960,21 +925,7 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
case badgerstore.Type:
|
case badgerstore.Type:
|
||||||
badgerStoreOpts := []badgerstore.Option{
|
badgerStoreOpts := c.getBadgerStoreOpts(sRead)
|
||||||
badgerstore.WithPath(sRead.path),
|
|
||||||
badgerstore.WithPermissions(sRead.perm),
|
|
||||||
badgerstore.WithCompactorsCount(sRead.compactorsCount),
|
|
||||||
badgerstore.WithGCDiscardRatio(sRead.gcDiscardRatio),
|
|
||||||
badgerstore.WithGCInterval(sRead.gcInterval),
|
|
||||||
badgerstore.WithIndexCacheSize(sRead.indexCacheSize),
|
|
||||||
badgerstore.WithMemTablesCount(sRead.memTablesCount),
|
|
||||||
badgerstore.WithValueLogSize(sRead.valueLogFileSize),
|
|
||||||
}
|
|
||||||
if c.metricsCollector != nil {
|
|
||||||
badgerStoreOpts = append(badgerStoreOpts,
|
|
||||||
badgerstore.WithMetrics(
|
|
||||||
lsmetrics.NewBadgerStoreMetrics(sRead.path, c.metricsCollector.BadgerStoreMetrics())))
|
|
||||||
}
|
|
||||||
ss = append(ss, blobstor.SubStorage{
|
ss = append(ss, blobstor.SubStorage{
|
||||||
Storage: badgerstore.New(badgerStoreOpts...),
|
Storage: badgerstore.New(badgerStoreOpts...),
|
||||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||||
|
@ -989,6 +940,68 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
|
||||||
return ss
|
return ss
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *cfg) getBadgerStoreOpts(sRead subStorageCfg) []badgerstore.Option {
|
||||||
|
badgerStoreOpts := []badgerstore.Option{
|
||||||
|
badgerstore.WithPath(sRead.path),
|
||||||
|
badgerstore.WithPermissions(sRead.perm),
|
||||||
|
badgerstore.WithCompactorsCount(sRead.compactorsCount),
|
||||||
|
badgerstore.WithGCDiscardRatio(sRead.gcDiscardRatio),
|
||||||
|
badgerstore.WithGCInterval(sRead.gcInterval),
|
||||||
|
badgerstore.WithIndexCacheSize(sRead.indexCacheSize),
|
||||||
|
badgerstore.WithMemTablesCount(sRead.memTablesCount),
|
||||||
|
badgerstore.WithValueLogSize(sRead.valueLogFileSize),
|
||||||
|
}
|
||||||
|
if c.metricsCollector != nil {
|
||||||
|
badgerStoreOpts = append(badgerStoreOpts,
|
||||||
|
badgerstore.WithMetrics(
|
||||||
|
lsmetrics.NewBadgerStoreMetrics(sRead.path, c.metricsCollector.BadgerStoreMetrics())))
|
||||||
|
}
|
||||||
|
return badgerStoreOpts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cfg) getFSTreeOpts(sRead subStorageCfg) []fstree.Option {
|
||||||
|
fstreeOpts := []fstree.Option{
|
||||||
|
fstree.WithPath(sRead.path),
|
||||||
|
fstree.WithPerm(sRead.perm),
|
||||||
|
fstree.WithDepth(sRead.depth),
|
||||||
|
fstree.WithNoSync(sRead.noSync),
|
||||||
|
fstree.WithLogger(c.log),
|
||||||
|
}
|
||||||
|
if c.metricsCollector != nil {
|
||||||
|
fstreeOpts = append(fstreeOpts,
|
||||||
|
fstree.WithMetrics(
|
||||||
|
lsmetrics.NewFSTreeMetricsWithoutShardID(sRead.path, c.metricsCollector.FSTree()),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return fstreeOpts
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cfg) getBlobovniczaTreeOpts(sRead subStorageCfg) []blobovniczatree.Option {
|
||||||
|
blobTreeOpts := []blobovniczatree.Option{
|
||||||
|
blobovniczatree.WithRootPath(sRead.path),
|
||||||
|
blobovniczatree.WithPermissions(sRead.perm),
|
||||||
|
blobovniczatree.WithBlobovniczaSize(sRead.size),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth),
|
||||||
|
blobovniczatree.WithBlobovniczaShallowWidth(sRead.width),
|
||||||
|
blobovniczatree.WithBlobovniczaLeafWidth(sRead.leafWidth),
|
||||||
|
blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize),
|
||||||
|
blobovniczatree.WithInitWorkerCount(sRead.initWorkerCount),
|
||||||
|
blobovniczatree.WithInitInAdvance(sRead.initInAdvance),
|
||||||
|
blobovniczatree.WithLogger(c.log),
|
||||||
|
blobovniczatree.WithWaitBeforeDropDB(sRead.rebuildDropTimeout),
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.metricsCollector != nil {
|
||||||
|
blobTreeOpts = append(blobTreeOpts,
|
||||||
|
blobovniczatree.WithMetrics(
|
||||||
|
lsmetrics.NewBlobovniczaTreeMetrics(sRead.path, c.metricsCollector.BlobobvnizcaTreeMetrics()),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return blobTreeOpts
|
||||||
|
}
|
||||||
|
|
||||||
func (c *cfg) getShardOpts(shCfg shardCfg) shardOptsWithID {
|
func (c *cfg) getShardOpts(shCfg shardCfg) shardOptsWithID {
|
||||||
writeCacheOpts := c.getWriteCacheOpts(shCfg)
|
writeCacheOpts := c.getWriteCacheOpts(shCfg)
|
||||||
piloramaOpts := c.getPiloramaOpts(shCfg)
|
piloramaOpts := c.getPiloramaOpts(shCfg)
|
||||||
|
|
Loading…
Add table
Reference in a new issue