[#1226] blobovniczatree: Drop leaf width limitation

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
Dmitrii Stepanov 2024-07-04 09:18:17 +03:00
parent 40c9ddb6ba
commit 78b1d9b18d
10 changed files with 50 additions and 55 deletions

View file

@ -188,7 +188,6 @@ type subStorageCfg struct {
// blobovnicza-specific
size uint64
width uint64
leafWidth uint64
openedCacheSize int
initWorkerCount int
rebuildDropTimeout time.Duration
@ -313,7 +312,6 @@ func (a *applicationConfiguration) setShardStorageConfig(newConfig *shardCfg, ol
sCfg.size = sub.Size()
sCfg.depth = sub.ShallowDepth()
sCfg.width = sub.ShallowWidth()
sCfg.leafWidth = sub.LeafWidth()
sCfg.openedCacheSize = sub.OpenedCacheSize()
sCfg.openedCacheTTL = sub.OpenedCacheTTL()
sCfg.openedCacheExpInterval = sub.OpenedCacheExpInterval()
@ -904,7 +902,6 @@ func (c *cfg) getSubstorageOpts(ctx context.Context, shCfg shardCfg) []blobstor.
blobovniczatree.WithBlobovniczaSize(sRead.size),
blobovniczatree.WithBlobovniczaShallowDepth(sRead.depth),
blobovniczatree.WithBlobovniczaShallowWidth(sRead.width),
blobovniczatree.WithBlobovniczaLeafWidth(sRead.leafWidth),
blobovniczatree.WithOpenedCacheSize(sRead.openedCacheSize),
blobovniczatree.WithOpenedCacheTTL(sRead.openedCacheTTL),
blobovniczatree.WithOpenedCacheExpInterval(sRead.openedCacheExpInterval),

View file

@ -100,7 +100,6 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 50, blz.OpenedCacheSize())
require.EqualValues(t, time.Minute, blz.OpenedCacheTTL())
require.EqualValues(t, 30*time.Second, blz.OpenedCacheExpInterval())
require.EqualValues(t, 10, blz.LeafWidth())
require.EqualValues(t, 10, blz.InitWorkerCount())
require.EqualValues(t, 30*time.Second, blz.RebuildDropTimeout())
@ -154,7 +153,6 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 50, blz.OpenedCacheSize())
require.EqualValues(t, 5*time.Minute, blz.OpenedCacheTTL())
require.EqualValues(t, 15*time.Second, blz.OpenedCacheExpInterval())
require.EqualValues(t, 10, blz.LeafWidth())
require.EqualValues(t, blobovniczaconfig.InitWorkerCountDefault, blz.InitWorkerCount())
require.EqualValues(t, blobovniczaconfig.RebuildDropTimeoutDefault, blz.RebuildDropTimeout())

View file

@ -149,16 +149,6 @@ func (x *Config) BoltDB() *boltdbconfig.Config {
return (*boltdbconfig.Config)(x)
}
// LeafWidth returns the value of "leaf_width" config parameter.
//
// Returns 0 if the value is not a positive number.
func (x *Config) LeafWidth() uint64 {
return config.UintSafe(
(*config.Config)(x),
"leaf_width",
)
}
// InitWorkerCount returns the value of "init_worker_count" config parameter.
//
// Returns InitWorkerCountDefault if the value is not a positive number.

View file

@ -126,7 +126,6 @@ FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_WIDTH=4
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_TTL=1m
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_OPENED_CACHE_EXP_INTERVAL=30s
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_LEAF_WIDTH=10
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_INIT_WORKER_COUNT=10
FROSTFS_STORAGE_SHARD_0_BLOBSTOR_0_REBUILD_DROP_TIMEOUT=30s
### FSTree config
@ -177,7 +176,6 @@ FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_WIDTH=4
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_CAPACITY=50
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_TTL=5m
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_OPENED_CACHE_EXP_INTERVAL=15s
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_0_LEAF_WIDTH=10
### FSTree config
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_TYPE=fstree
FROSTFS_STORAGE_SHARD_1_BLOBSTOR_1_PATH=tmp/1/blob

View file

@ -174,7 +174,6 @@
"opened_cache_capacity": 50,
"opened_cache_ttl": "1m",
"opened_cache_exp_interval": "30s",
"leaf_width": 10,
"init_worker_count": 10,
"rebuild_drop_timeout": "30s"
},
@ -227,8 +226,7 @@
"width": 4,
"opened_cache_capacity": 50,
"opened_cache_ttl": "5m",
"opened_cache_exp_interval": "15s",
"leaf_width": 10
"opened_cache_exp_interval": "15s"
},
{
"type": "fstree",

View file

@ -149,7 +149,6 @@ storage:
opened_cache_capacity: 50 # maximum number of opened database files
opened_cache_ttl: 5m # ttl for opened database file
opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's
leaf_width: 10 # max count of key-value DB on leafs of object tree storage
- perm: 0644 # permissions for blobstor files(directories: +x for current user and group)
depth: 5 # max depth of object tree storage in FS

View file

@ -37,17 +37,17 @@ type activeDBManager struct {
closed bool
dbManager *dbManager
leafWidth uint64
rootPath string
}
func newActiveDBManager(dbManager *dbManager, leafWidth uint64) *activeDBManager {
func newActiveDBManager(dbManager *dbManager, rootPath string) *activeDBManager {
return &activeDBManager{
levelToActiveDBGuard: &sync.RWMutex{},
levelToActiveDB: make(map[string]*sharedDB),
levelLock: utilSync.NewKeyLocker[string](),
dbManager: dbManager,
leafWidth: leafWidth,
rootPath: rootPath,
}
}
@ -144,30 +144,25 @@ func (m *activeDBManager) updateAndGetActive(lvlPath string) (*activeDB, error)
}
func (m *activeDBManager) getNextSharedDB(lvlPath string) (*sharedDB, error) {
var idx uint64
var iterCount uint64
var nextActiveDBIdx uint64
hasActive, currentIdx := m.hasActiveDB(lvlPath)
if hasActive {
idx = (currentIdx + 1) % m.leafWidth
}
var next *sharedDB
for iterCount < m.leafWidth {
path := filepath.Join(lvlPath, u64ToHexStringExt(idx))
shDB := m.dbManager.GetByPath(path)
db, err := shDB.Open() // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
nextActiveDBIdx = currentIdx + 1
} else {
hasDBs, maxIdx, err := getBlobovniczaMaxIndex(filepath.Join(m.rootPath, lvlPath))
if err != nil {
return nil, err
}
if db.IsFull() {
shDB.Close()
} else {
next = shDB
break
if hasDBs {
nextActiveDBIdx = maxIdx
}
idx = (idx + 1) % m.leafWidth
iterCount++
}
path := filepath.Join(lvlPath, u64ToHexStringExt(nextActiveDBIdx))
next := m.dbManager.GetByPath(path)
_, err := next.Open() // open db to hold active DB open, will be closed if db is full, after m.replace or by activeDBManager.Close()
if err != nil {
return nil, err
}
previous, updated := m.replace(lvlPath, next)

View file

@ -3,6 +3,7 @@ package blobovniczatree
import (
"context"
"errors"
"os"
"strconv"
"strings"
"sync"
@ -81,12 +82,8 @@ func NewBlobovniczaTree(ctx context.Context, opts ...Option) (blz *Blobovniczas)
opts[i](&blz.cfg)
}
if blz.blzLeafWidth == 0 {
blz.blzLeafWidth = blz.blzShallowWidth
}
blz.commondbManager = newDBManager(blz.rootPath, blz.blzOpts, blz.readOnly, blz.metrics.Blobovnicza(), blz.log)
blz.activeDBManager = newActiveDBManager(blz.commondbManager, blz.blzLeafWidth)
blz.activeDBManager = newActiveDBManager(blz.commondbManager, blz.rootPath)
blz.dbCache = newDBCache(ctx, blz.openedCacheSize,
blz.openedCacheTTL, blz.openedCacheExpInterval, blz.commondbManager)
blz.deleteProtectedObjects = newAddressMap()
@ -124,6 +121,29 @@ func u64FromHexString(str string) uint64 {
return v
}
func getBlobovniczaMaxIndex(directory string) (bool, uint64, error) {
entries, err := os.ReadDir(directory)
if os.IsNotExist(err) { // non initialized tree
return false, 0, nil
}
if err != nil {
return false, 0, err
}
if len(entries) == 0 {
return false, 0, nil
}
var hasDBs bool
var maxIdx uint64
for _, e := range entries {
if e.IsDir() {
continue
}
hasDBs = true
maxIdx = max(u64FromHexString(e.Name()), maxIdx)
}
return hasDBs, maxIdx, nil
}
// Type is blobovniczatree storage type used in logs and configuration.
const Type = "blobovnicza"

View file

@ -130,7 +130,14 @@ func (b *Blobovniczas) iterateSorted(ctx context.Context, addr *oid.Address, cur
isLeafLevel := uint64(len(curPath)) == b.blzShallowDepth
levelWidth := b.blzShallowWidth
if isLeafLevel {
levelWidth = b.blzLeafWidth
hasDBs, maxIdx, err := getBlobovniczaMaxIndex(filepath.Join(append([]string{b.rootPath}, curPath...)...))
if err != nil {
return false, err
}
levelWidth = 0
if hasDBs {
levelWidth = maxIdx + 1
}
}
indices := indexSlice(levelWidth)

View file

@ -18,7 +18,6 @@ type cfg struct {
openedCacheSize int
blzShallowDepth uint64
blzShallowWidth uint64
blzLeafWidth uint64
compression *compression.Config
blzOpts []blobovnicza.Option
reportError func(string, error) // reportError is the function called when encountering disk errors.
@ -82,12 +81,6 @@ func WithBlobovniczaShallowWidth(width uint64) Option {
}
}
func WithBlobovniczaLeafWidth(w uint64) Option {
return func(c *cfg) {
c.blzLeafWidth = w
}
}
func WithBlobovniczaShallowDepth(depth uint64) Option {
return func(c *cfg) {
c.blzShallowDepth = depth