[#645] config: Resolve funlen linter
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
588113b7d6
commit
e7c379044f
1 changed files with 82 additions and 63 deletions
|
@ -186,7 +186,7 @@ type subStorageCfg struct {
|
|||
leafWidth uint64
|
||||
openedCacheSize int
|
||||
|
||||
//badgerstore-specific
|
||||
// badgerstore-specific
|
||||
indexCacheSize int64
|
||||
memTablesCount int
|
||||
compactorsCount int
|
||||
|
@ -812,46 +812,15 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
|
|||
for _, sRead := range shCfg.subStorages {
|
||||
switch sRead.typ {
|
||||
case blobovniczatree.Type:
|
||||
blobTreeOpts := []blobovniczatree.Option{
|
||||
blobovniczatree.WithRootPath(sRead.path),
|
||||
blobovniczatree.WithPermissions(sRead.perm),
|
||||
blobovniczatree.WithBlobovniczaSize(sRead.size),
|
||||
blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth),
|
||||
blobovniczatree.WithBlobovniczaShallowWidth(sRead.width),
|
||||
blobovniczatree.WithBlobovniczaLeafWidth(sRead.leafWidth),
|
||||
blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize),
|
||||
blobovniczatree.WithLogger(c.log),
|
||||
}
|
||||
|
||||
if c.metricsCollector != nil {
|
||||
blobTreeOpts = append(blobTreeOpts,
|
||||
blobovniczatree.WithMetrics(
|
||||
lsmetrics.NewBlobovniczaTreeMetrics(sRead.path, c.metricsCollector.BlobovniczaTreeMetrics()),
|
||||
),
|
||||
)
|
||||
}
|
||||
blobovniczaTreeOpts := c.getBlobovniczaTreeOpts(sRead)
|
||||
ss = append(ss, blobstor.SubStorage{
|
||||
Storage: blobovniczatree.NewBlobovniczaTree(blobTreeOpts...),
|
||||
Storage: blobovniczatree.NewBlobovniczaTree(blobovniczaTreeOpts...),
|
||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||
return uint64(len(data)) < shCfg.smallSizeObjectLimit
|
||||
},
|
||||
})
|
||||
case fstree.Type:
|
||||
fstreeOpts := []fstree.Option{
|
||||
fstree.WithPath(sRead.path),
|
||||
fstree.WithPerm(sRead.perm),
|
||||
fstree.WithDepth(sRead.depth),
|
||||
fstree.WithNoSync(sRead.noSync),
|
||||
fstree.WithLogger(c.log),
|
||||
}
|
||||
if c.metricsCollector != nil {
|
||||
fstreeOpts = append(fstreeOpts,
|
||||
fstree.WithMetrics(
|
||||
lsmetrics.NewFSTreeMetricsWithoutShardID(sRead.path, c.metricsCollector.FSTree()),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
fstreeOpts := c.getFSTreeOpts(sRead)
|
||||
ss = append(ss, blobstor.SubStorage{
|
||||
Storage: fstree.New(fstreeOpts...),
|
||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||
|
@ -859,19 +828,7 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
|
|||
},
|
||||
})
|
||||
case blobtree.Type:
|
||||
blobTreeOpts := []blobtree.Option{
|
||||
blobtree.WithPath(sRead.path),
|
||||
blobtree.WithPerm(sRead.perm),
|
||||
blobtree.WithDepth(sRead.depth),
|
||||
blobtree.WithTargetSize(sRead.size),
|
||||
}
|
||||
if c.metricsCollector != nil {
|
||||
blobTreeOpts = append(blobTreeOpts,
|
||||
blobtree.WithMetrics(
|
||||
lsmetrics.NewBlobTreeMetrics(sRead.path, c.metricsCollector.BlobTreeMetrics()),
|
||||
),
|
||||
)
|
||||
}
|
||||
blobTreeOpts := c.getBlobTreeOpts(sRead)
|
||||
ss = append(ss, blobstor.SubStorage{
|
||||
Storage: blobtree.New(blobTreeOpts...),
|
||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||
|
@ -879,21 +836,7 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
|
|||
},
|
||||
})
|
||||
case badgerstore.Type:
|
||||
badgerStoreOpts := []badgerstore.Option{
|
||||
badgerstore.WithPath(sRead.path),
|
||||
badgerstore.WithPermissions(sRead.perm),
|
||||
badgerstore.WithCompactorsCount(sRead.compactorsCount),
|
||||
badgerstore.WithGCDiscardRatio(sRead.gcDiscardRatio),
|
||||
badgerstore.WithGCInterval(sRead.gcInterval),
|
||||
badgerstore.WithIndexCacheSize(sRead.indexCacheSize),
|
||||
badgerstore.WithMemTablesCount(sRead.memTablesCount),
|
||||
badgerstore.WithValueLogSize(sRead.valueLogFileSize),
|
||||
}
|
||||
if c.metricsCollector != nil {
|
||||
badgerStoreOpts = append(badgerStoreOpts,
|
||||
badgerstore.WithMetrics(
|
||||
lsmetrics.NewBadgerStoreMetrics(sRead.path, c.metricsCollector.BadgerStoreMetrics())))
|
||||
}
|
||||
badgerStoreOpts := c.getBadgerStoreOpts(sRead)
|
||||
ss = append(ss, blobstor.SubStorage{
|
||||
Storage: badgerstore.New(badgerStoreOpts...),
|
||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||
|
@ -908,6 +851,82 @@ func (c *cfg) getSubstorageOpts(shCfg shardCfg) []blobstor.SubStorage {
|
|||
return ss
|
||||
}
|
||||
|
||||
func (c *cfg) getBadgerStoreOpts(sRead subStorageCfg) []badgerstore.Option {
|
||||
badgerStoreOpts := []badgerstore.Option{
|
||||
badgerstore.WithPath(sRead.path),
|
||||
badgerstore.WithPermissions(sRead.perm),
|
||||
badgerstore.WithCompactorsCount(sRead.compactorsCount),
|
||||
badgerstore.WithGCDiscardRatio(sRead.gcDiscardRatio),
|
||||
badgerstore.WithGCInterval(sRead.gcInterval),
|
||||
badgerstore.WithIndexCacheSize(sRead.indexCacheSize),
|
||||
badgerstore.WithMemTablesCount(sRead.memTablesCount),
|
||||
badgerstore.WithValueLogSize(sRead.valueLogFileSize),
|
||||
}
|
||||
if c.metricsCollector != nil {
|
||||
badgerStoreOpts = append(badgerStoreOpts,
|
||||
badgerstore.WithMetrics(
|
||||
lsmetrics.NewBadgerStoreMetrics(sRead.path, c.metricsCollector.BadgerStoreMetrics())))
|
||||
}
|
||||
return badgerStoreOpts
|
||||
}
|
||||
|
||||
func (c *cfg) getBlobTreeOpts(sRead subStorageCfg) []blobtree.Option {
|
||||
blobTreeOpts := []blobtree.Option{
|
||||
blobtree.WithPath(sRead.path),
|
||||
blobtree.WithPerm(sRead.perm),
|
||||
blobtree.WithDepth(sRead.depth),
|
||||
blobtree.WithTargetSize(sRead.size),
|
||||
}
|
||||
if c.metricsCollector != nil {
|
||||
blobTreeOpts = append(blobTreeOpts,
|
||||
blobtree.WithMetrics(
|
||||
lsmetrics.NewBlobTreeMetrics(sRead.path, c.metricsCollector.BlobTreeMetrics()),
|
||||
),
|
||||
)
|
||||
}
|
||||
return blobTreeOpts
|
||||
}
|
||||
|
||||
func (c *cfg) getFSTreeOpts(sRead subStorageCfg) []fstree.Option {
|
||||
fstreeOpts := []fstree.Option{
|
||||
fstree.WithPath(sRead.path),
|
||||
fstree.WithPerm(sRead.perm),
|
||||
fstree.WithDepth(sRead.depth),
|
||||
fstree.WithNoSync(sRead.noSync),
|
||||
fstree.WithLogger(c.log),
|
||||
}
|
||||
if c.metricsCollector != nil {
|
||||
fstreeOpts = append(fstreeOpts,
|
||||
fstree.WithMetrics(
|
||||
lsmetrics.NewFSTreeMetricsWithoutShardID(sRead.path, c.metricsCollector.FSTree()),
|
||||
),
|
||||
)
|
||||
}
|
||||
return fstreeOpts
|
||||
}
|
||||
|
||||
func (c *cfg) getBlobovniczaTreeOpts(sRead subStorageCfg) []blobovniczatree.Option {
|
||||
blobTreeOpts := []blobovniczatree.Option{
|
||||
blobovniczatree.WithRootPath(sRead.path),
|
||||
blobovniczatree.WithPermissions(sRead.perm),
|
||||
blobovniczatree.WithBlobovniczaSize(sRead.size),
|
||||
blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth),
|
||||
blobovniczatree.WithBlobovniczaShallowWidth(sRead.width),
|
||||
blobovniczatree.WithBlobovniczaLeafWidth(sRead.leafWidth),
|
||||
blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize),
|
||||
blobovniczatree.WithLogger(c.log),
|
||||
}
|
||||
|
||||
if c.metricsCollector != nil {
|
||||
blobTreeOpts = append(blobTreeOpts,
|
||||
blobovniczatree.WithMetrics(
|
||||
lsmetrics.NewBlobovniczaTreeMetrics(sRead.path, c.metricsCollector.BlobovniczaTreeMetrics()),
|
||||
),
|
||||
)
|
||||
}
|
||||
return blobTreeOpts
|
||||
}
|
||||
|
||||
func (c *cfg) getShardOpts(shCfg shardCfg) shardOptsWithID {
|
||||
writeCacheOpts := c.getWriteCacheOpts(shCfg)
|
||||
piloramaOpts := c.getPiloramaOpts(shCfg)
|
||||
|
|
Loading…
Reference in a new issue