Add page_size config parameter for writecache #1302

Merged
fyrchik merged 1 commit from dstepanov-yadro/frostfs-node:fix/writecache_page_size into master 2024-09-04 19:51:11 +00:00
12 changed files with 35 additions and 4 deletions

View file

@ -25,7 +25,7 @@ func init() {
func inspectFunc(cmd *cobra.Command, _ []string) {
var data []byte
db, err := writecache.OpenDB(vPath, true, os.OpenFile)
db, err := writecache.OpenDB(vPath, true, os.OpenFile, 0)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
defer db.Close()

View file

@ -31,7 +31,7 @@ func listFunc(cmd *cobra.Command, _ []string) {
return err
}
db, err := writecache.OpenDB(vPath, true, os.OpenFile)
db, err := writecache.OpenDB(vPath, true, os.OpenFile, 0)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
defer db.Close()

View file

@ -154,6 +154,7 @@ type shardCfg struct {
sizeLimit uint64
countLimit uint64
noSync bool
pageSize int
}
piloramaCfg struct {
@ -272,6 +273,7 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
wc.path = writeCacheCfg.Path()
wc.maxBatchSize = writeCacheCfg.BoltDB().MaxBatchSize()
wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay()
wc.pageSize = writeCacheCfg.BoltDB().PageSize()
wc.maxObjSize = writeCacheCfg.MaxObjectSize()
wc.smallObjectSize = writeCacheCfg.SmallObjectSize()
wc.flushWorkerCount = writeCacheCfg.WorkerCount()
@ -865,6 +867,7 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
writecache.WithPath(wcRead.path),
writecache.WithMaxBatchSize(wcRead.maxBatchSize),
writecache.WithMaxBatchDelay(wcRead.maxBatchDelay),
writecache.WithPageSize(wcRead.pageSize),
writecache.WithMaxObjectSize(wcRead.maxObjSize),
writecache.WithSmallObjectSize(wcRead.smallObjectSize),
writecache.WithFlushWorkersCount(wcRead.flushWorkerCount),

View file

@ -79,6 +79,7 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 134217728, wc.MaxObjectSize())
require.EqualValues(t, 30, wc.WorkerCount())
require.EqualValues(t, 3221225472, wc.SizeLimit())
require.EqualValues(t, 4096, wc.BoltDB().PageSize())
require.EqualValues(t, 49, wc.CountLimit())
require.Equal(t, "tmp/0/meta", meta.Path())
@ -135,6 +136,7 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 134217728, wc.MaxObjectSize())
require.EqualValues(t, 30, wc.WorkerCount())
require.EqualValues(t, 4294967296, wc.SizeLimit())
require.EqualValues(t, 0, wc.BoltDB().PageSize())
require.EqualValues(t, writecacheconfig.CountLimitDefault, wc.CountLimit())
require.Equal(t, "tmp/1/meta", meta.Path())

View file

@ -60,3 +60,14 @@ func (x *Config) MaxBatchSize() int {
func (x *Config) NoSync() bool {
return config.BoolSafe((*config.Config)(x), "no_sync")
}
// PageSize returns the value of "page_size" config parameter.
//
// Returns 0 if the value is not a positive number.
func (x *Config) PageSize() int {
s := int(config.SizeInBytesSafe((*config.Config)(x), "page_size"))
if s < 0 {
s = 0
}
return s
}

View file

@ -105,6 +105,7 @@ FROSTFS_STORAGE_SHARD_0_WRITECACHE_SMALL_OBJECT_SIZE=16384
FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_SIZE=134217728
FROSTFS_STORAGE_SHARD_0_WRITECACHE_FLUSH_WORKER_COUNT=30
FROSTFS_STORAGE_SHARD_0_WRITECACHE_CAPACITY=3221225472
FROSTFS_STORAGE_SHARD_0_WRITECACHE_PAGE_SIZE=4096
FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_COUNT=49
### Metabase config
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=tmp/0/meta

View file

@ -149,6 +149,7 @@
"max_object_size": 134217728,
"flush_worker_count": 30,
"capacity": 3221225472,
"page_size": 4096,
"max_object_count": 49
},
"metabase": {

View file

@ -172,6 +172,7 @@ storage:
path: tmp/0/cache # write-cache root directory
capacity: 3221225472 # approximate write-cache total size, bytes
max_object_count: 49
page_size: 4k
metabase:
path: tmp/0/meta # metabase path

View file

@ -290,6 +290,7 @@ writecache:
small_object_size: 16384
max_object_size: 134217728
flush_worker_count: 30
page_size: '4k'
```
| Parameter | Type | Default value | Description |
@ -302,6 +303,7 @@ writecache:
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
| `page_size` | `size` | `0` | Page size overrides the default OS page size for small objects storage. Does not affect the existing storage. |
# `node` section

View file

@ -48,6 +48,8 @@ type options struct {
metrics Metrics
// disableBackgroundFlush is for testing purposes only.
disableBackgroundFlush bool
// pageSize is bbolt's page size config value
pageSize int
}
// WithLogger sets logger.
@ -173,3 +175,10 @@ func WithDisableBackgroundFlush() Option {
o.disableBackgroundFlush = true
}
}
// WithPageSize sets bbolt's page size.
func WithPageSize(s int) Option {
return func(o *options) {
o.pageSize = s
}
}

View file

@ -32,7 +32,7 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
return err
}
c.db, err = OpenDB(c.path, mod.ReadOnly(), c.openFile)
c.db, err = OpenDB(c.path, mod.ReadOnly(), c.openFile, c.pageSize)
if err != nil {
return fmt.Errorf("could not open database: %w", err)
}

View file

@ -10,11 +10,12 @@ import (
)
// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) {
func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error), pageSize int) (*bbolt.DB, error) {
fyrchik marked this conversation as resolved Outdated

It should be noted, that this parameter does nothing if the DB already exists.

It should be noted, that this parameter does nothing if the DB already exists.

Done in storage-node-configuration.md

Done in storage-node-configuration.md
return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
NoFreelistSync: true,
ReadOnly: ro,
Timeout: 100 * time.Millisecond,
OpenFile: openFile,
PageSize: pageSize,
})
}