[#715] node: Unify config parameter names
All checks were successful
DCO action / DCO (pull_request) Successful in 2m54s
Vulncheck / Vulncheck (pull_request) Successful in 3m11s
Tests and linters / Staticcheck (pull_request) Successful in 3m55s
Build / Build Components (1.21) (pull_request) Successful in 3m51s
Build / Build Components (1.20) (pull_request) Successful in 4m6s
Tests and linters / Tests (1.21) (pull_request) Successful in 5m10s
Tests and linters / Lint (pull_request) Successful in 5m24s
Tests and linters / Tests (1.20) (pull_request) Successful in 6m56s
Tests and linters / Tests with -race (pull_request) Successful in 7m46s
All checks were successful
DCO action / DCO (pull_request) Successful in 2m54s
Vulncheck / Vulncheck (pull_request) Successful in 3m11s
Tests and linters / Staticcheck (pull_request) Successful in 3m55s
Build / Build Components (1.21) (pull_request) Successful in 3m51s
Build / Build Components (1.20) (pull_request) Successful in 4m6s
Tests and linters / Tests (1.21) (pull_request) Successful in 5m10s
Tests and linters / Lint (pull_request) Successful in 5m24s
Tests and linters / Tests (1.20) (pull_request) Successful in 6m56s
Tests and linters / Tests with -race (pull_request) Successful in 7m46s
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
8d18fa159e
commit
07390ad4e3
12 changed files with 54 additions and 54 deletions
|
@ -126,10 +126,10 @@ type shardCfg struct {
|
||||||
subStorages []subStorageCfg
|
subStorages []subStorageCfg
|
||||||
|
|
||||||
gcCfg struct {
|
gcCfg struct {
|
||||||
removerBatchSize int
|
removerBatchSize int
|
||||||
removerSleepInterval time.Duration
|
removerSleepInterval time.Duration
|
||||||
expiredCollectorBatchSize int
|
expiredCollectorBatchSize int
|
||||||
expiredCollectorWorkersCount int
|
expiredCollectorWorkerCount int
|
||||||
}
|
}
|
||||||
|
|
||||||
writecacheCfg struct {
|
writecacheCfg struct {
|
||||||
|
@ -256,7 +256,7 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
|
||||||
wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay()
|
wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay()
|
||||||
wc.maxObjSize = writeCacheCfg.MaxObjectSize()
|
wc.maxObjSize = writeCacheCfg.MaxObjectSize()
|
||||||
wc.smallObjectSize = writeCacheCfg.SmallObjectSize()
|
wc.smallObjectSize = writeCacheCfg.SmallObjectSize()
|
||||||
wc.flushWorkerCount = writeCacheCfg.WorkersNumber()
|
wc.flushWorkerCount = writeCacheCfg.WorkerCount()
|
||||||
wc.sizeLimit = writeCacheCfg.SizeLimit()
|
wc.sizeLimit = writeCacheCfg.SizeLimit()
|
||||||
wc.noSync = writeCacheCfg.NoSync()
|
wc.noSync = writeCacheCfg.NoSync()
|
||||||
wc.gcInterval = writeCacheCfg.GCInterval()
|
wc.gcInterval = writeCacheCfg.GCInterval()
|
||||||
|
@ -328,7 +328,7 @@ func (a *applicationConfiguration) setGCConfig(newConfig *shardCfg, oldConfig *s
|
||||||
newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
newConfig.gcCfg.removerBatchSize = gcCfg.RemoverBatchSize()
|
||||||
newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
newConfig.gcCfg.removerSleepInterval = gcCfg.RemoverSleepInterval()
|
||||||
newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
newConfig.gcCfg.expiredCollectorBatchSize = gcCfg.ExpiredCollectorBatchSize()
|
||||||
newConfig.gcCfg.expiredCollectorWorkersCount = gcCfg.ExpiredCollectorWorkersCount()
|
newConfig.gcCfg.expiredCollectorWorkerCount = gcCfg.ExpiredCollectorWorkerCount()
|
||||||
}
|
}
|
||||||
|
|
||||||
// internals contains application-specific internals that are created
|
// internals contains application-specific internals that are created
|
||||||
|
@ -888,7 +888,7 @@ func (c *cfg) getShardOpts(shCfg shardCfg) shardOptsWithID {
|
||||||
shard.WithRemoverBatchSize(shCfg.gcCfg.removerBatchSize),
|
shard.WithRemoverBatchSize(shCfg.gcCfg.removerBatchSize),
|
||||||
shard.WithGCRemoverSleepInterval(shCfg.gcCfg.removerSleepInterval),
|
shard.WithGCRemoverSleepInterval(shCfg.gcCfg.removerSleepInterval),
|
||||||
shard.WithExpiredCollectorBatchSize(shCfg.gcCfg.expiredCollectorBatchSize),
|
shard.WithExpiredCollectorBatchSize(shCfg.gcCfg.expiredCollectorBatchSize),
|
||||||
shard.WithExpiredCollectorWorkersCount(shCfg.gcCfg.expiredCollectorWorkersCount),
|
shard.WithExpiredCollectorWorkerCount(shCfg.gcCfg.expiredCollectorWorkerCount),
|
||||||
shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
|
shard.WithGCWorkerPoolInitializer(func(sz int) util.WorkerPool {
|
||||||
pool, err := ants.NewPool(sz)
|
pool, err := ants.NewPool(sz)
|
||||||
fatalOnErr(err)
|
fatalOnErr(err)
|
||||||
|
|
|
@ -74,7 +74,7 @@ func TestEngineSection(t *testing.T) {
|
||||||
require.Equal(t, "tmp/0/cache", wc.Path())
|
require.Equal(t, "tmp/0/cache", wc.Path())
|
||||||
require.EqualValues(t, 16384, wc.SmallObjectSize())
|
require.EqualValues(t, 16384, wc.SmallObjectSize())
|
||||||
require.EqualValues(t, 134217728, wc.MaxObjectSize())
|
require.EqualValues(t, 134217728, wc.MaxObjectSize())
|
||||||
require.EqualValues(t, 30, wc.WorkersNumber())
|
require.EqualValues(t, 30, wc.WorkerCount())
|
||||||
require.EqualValues(t, 3221225472, wc.SizeLimit())
|
require.EqualValues(t, 3221225472, wc.SizeLimit())
|
||||||
|
|
||||||
require.Equal(t, "tmp/0/meta", meta.Path())
|
require.Equal(t, "tmp/0/meta", meta.Path())
|
||||||
|
@ -108,7 +108,7 @@ func TestEngineSection(t *testing.T) {
|
||||||
require.EqualValues(t, 150, gc.RemoverBatchSize())
|
require.EqualValues(t, 150, gc.RemoverBatchSize())
|
||||||
require.Equal(t, 2*time.Minute, gc.RemoverSleepInterval())
|
require.Equal(t, 2*time.Minute, gc.RemoverSleepInterval())
|
||||||
require.Equal(t, 1500, gc.ExpiredCollectorBatchSize())
|
require.Equal(t, 1500, gc.ExpiredCollectorBatchSize())
|
||||||
require.Equal(t, 15, gc.ExpiredCollectorWorkersCount())
|
require.Equal(t, 15, gc.ExpiredCollectorWorkerCount())
|
||||||
|
|
||||||
require.Equal(t, false, sc.RefillMetabase())
|
require.Equal(t, false, sc.RefillMetabase())
|
||||||
require.Equal(t, mode.ReadOnly, sc.Mode())
|
require.Equal(t, mode.ReadOnly, sc.Mode())
|
||||||
|
@ -125,7 +125,7 @@ func TestEngineSection(t *testing.T) {
|
||||||
require.Equal(t, "tmp/1/cache", wc.Path())
|
require.Equal(t, "tmp/1/cache", wc.Path())
|
||||||
require.EqualValues(t, 16384, wc.SmallObjectSize())
|
require.EqualValues(t, 16384, wc.SmallObjectSize())
|
||||||
require.EqualValues(t, 134217728, wc.MaxObjectSize())
|
require.EqualValues(t, 134217728, wc.MaxObjectSize())
|
||||||
require.EqualValues(t, 30, wc.WorkersNumber())
|
require.EqualValues(t, 30, wc.WorkerCount())
|
||||||
require.EqualValues(t, 4294967296, wc.SizeLimit())
|
require.EqualValues(t, 4294967296, wc.SizeLimit())
|
||||||
|
|
||||||
require.Equal(t, "tmp/1/meta", meta.Path())
|
require.Equal(t, "tmp/1/meta", meta.Path())
|
||||||
|
@ -157,7 +157,7 @@ func TestEngineSection(t *testing.T) {
|
||||||
require.EqualValues(t, 200, gc.RemoverBatchSize())
|
require.EqualValues(t, 200, gc.RemoverBatchSize())
|
||||||
require.Equal(t, 5*time.Minute, gc.RemoverSleepInterval())
|
require.Equal(t, 5*time.Minute, gc.RemoverSleepInterval())
|
||||||
require.Equal(t, gcconfig.ExpiredCollectorBatchSizeDefault, gc.ExpiredCollectorBatchSize())
|
require.Equal(t, gcconfig.ExpiredCollectorBatchSizeDefault, gc.ExpiredCollectorBatchSize())
|
||||||
require.Equal(t, gcconfig.ExpiredCollectorWorkersCountDefault, gc.ExpiredCollectorWorkersCount())
|
require.Equal(t, gcconfig.ExpiredCollectorWorkersCountDefault, gc.ExpiredCollectorWorkerCount())
|
||||||
|
|
||||||
require.Equal(t, true, sc.RefillMetabase())
|
require.Equal(t, true, sc.RefillMetabase())
|
||||||
require.Equal(t, mode.ReadWrite, sc.Mode())
|
require.Equal(t, mode.ReadWrite, sc.Mode())
|
||||||
|
|
|
@ -63,14 +63,14 @@ func (x *Config) RemoverSleepInterval() time.Duration {
|
||||||
return RemoverSleepIntervalDefault
|
return RemoverSleepIntervalDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExpiredCollectorWorkersCount returns the value of "expired_collector_workers_count"
|
// ExpiredCollectorWorkerCount returns the value of "expired_collector_worker_count"
|
||||||
// config parameter.
|
// config parameter.
|
||||||
//
|
//
|
||||||
// Returns ExpiredCollectorWorkersCountDefault if the value is not a positive number.
|
// Returns ExpiredCollectorWorkersCountDefault if the value is not a positive number.
|
||||||
func (x *Config) ExpiredCollectorWorkersCount() int {
|
func (x *Config) ExpiredCollectorWorkerCount() int {
|
||||||
s := config.IntSafe(
|
s := config.IntSafe(
|
||||||
(*config.Config)(x),
|
(*config.Config)(x),
|
||||||
"expired_collector_workers_count",
|
"expired_collector_worker_count",
|
||||||
)
|
)
|
||||||
|
|
||||||
if s > 0 {
|
if s > 0 {
|
||||||
|
|
|
@ -106,13 +106,13 @@ func (x *Config) MaxObjectSize() uint64 {
|
||||||
return MaxSizeDefault
|
return MaxSizeDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
// WorkersNumber returns the value of "workers_number" config parameter.
|
// WorkerCount returns the value of "flush_worker_count" config parameter.
|
||||||
//
|
//
|
||||||
// Returns WorkersNumberDefault if the value is not a positive number.
|
// Returns WorkersNumberDefault if the value is not a positive number.
|
||||||
func (x *Config) WorkersNumber() int {
|
func (x *Config) WorkerCount() int {
|
||||||
c := config.IntSafe(
|
c := config.IntSafe(
|
||||||
(*config.Config)(x),
|
(*config.Config)(x),
|
||||||
"workers_number",
|
"flush_worker_count",
|
||||||
)
|
)
|
||||||
|
|
||||||
if c > 0 {
|
if c > 0 {
|
||||||
|
|
|
@ -28,11 +28,11 @@ func Put(c *config.Config) PutConfig {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PoolSizeRemote returns the value of "pool_size_remote" config parameter.
|
// PoolSizeRemote returns the value of "remote_pool_size" config parameter.
|
||||||
//
|
//
|
||||||
// Returns PutPoolSizeDefault if the value is not a positive number.
|
// Returns PutPoolSizeDefault if the value is not a positive number.
|
||||||
func (g PutConfig) PoolSizeRemote() int {
|
func (g PutConfig) PoolSizeRemote() int {
|
||||||
v := config.Int(g.cfg, "pool_size_remote")
|
v := config.Int(g.cfg, "remote_pool_size")
|
||||||
if v > 0 {
|
if v > 0 {
|
||||||
return int(v)
|
return int(v)
|
||||||
}
|
}
|
||||||
|
@ -40,11 +40,11 @@ func (g PutConfig) PoolSizeRemote() int {
|
||||||
return PutPoolSizeDefault
|
return PutPoolSizeDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
// PoolSizeLocal returns the value of "pool_size_local" config parameter.
|
// PoolSizeLocal returns the value of "local_pool_size" config parameter.
|
||||||
//
|
//
|
||||||
// Returns PutPoolSizeDefault if the value is not a positive number.
|
// Returns PutPoolSizeDefault if the value is not a positive number.
|
||||||
func (g PutConfig) PoolSizeLocal() int {
|
func (g PutConfig) PoolSizeLocal() int {
|
||||||
v := config.Int(g.cfg, "pool_size_local")
|
v := config.Int(g.cfg, "local_pool_size")
|
||||||
if v > 0 {
|
if v > 0 {
|
||||||
return int(v)
|
return int(v)
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,8 +84,8 @@ FROSTFS_REPLICATOR_PUT_TIMEOUT=15s
|
||||||
FROSTFS_REPLICATOR_POOL_SIZE=10
|
FROSTFS_REPLICATOR_POOL_SIZE=10
|
||||||
|
|
||||||
# Object service section
|
# Object service section
|
||||||
FROSTFS_OBJECT_PUT_POOL_SIZE_REMOTE=100
|
FROSTFS_OBJECT_PUT_REMOTE_POOL_SIZE=100
|
||||||
FROSTFS_OBJECT_PUT_POOL_SIZE_LOCAL=200
|
FROSTFS_OBJECT_PUT_LOCAL_POOL_SIZE=200
|
||||||
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
|
FROSTFS_OBJECT_PUT_SKIP_SESSION_TOKEN_ISSUER_VERIFICATION=true
|
||||||
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
|
FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ FROSTFS_STORAGE_SHARD_0_WRITECACHE_NO_SYNC=true
|
||||||
FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH=tmp/0/cache
|
FROSTFS_STORAGE_SHARD_0_WRITECACHE_PATH=tmp/0/cache
|
||||||
FROSTFS_STORAGE_SHARD_0_WRITECACHE_SMALL_OBJECT_SIZE=16384
|
FROSTFS_STORAGE_SHARD_0_WRITECACHE_SMALL_OBJECT_SIZE=16384
|
||||||
FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_SIZE=134217728
|
FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_SIZE=134217728
|
||||||
FROSTFS_STORAGE_SHARD_0_WRITECACHE_WORKERS_NUMBER=30
|
FROSTFS_STORAGE_SHARD_0_WRITECACHE_FLUSH_WORKER_COUNT=30
|
||||||
FROSTFS_STORAGE_SHARD_0_WRITECACHE_CAPACITY=3221225472
|
FROSTFS_STORAGE_SHARD_0_WRITECACHE_CAPACITY=3221225472
|
||||||
### Metabase config
|
### Metabase config
|
||||||
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=tmp/0/meta
|
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=tmp/0/meta
|
||||||
|
@ -142,7 +142,7 @@ FROSTFS_STORAGE_SHARD_0_GC_REMOVER_SLEEP_INTERVAL=2m
|
||||||
#### Limit of objects to be marked expired by the garbage collector
|
#### Limit of objects to be marked expired by the garbage collector
|
||||||
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500
|
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_BATCH_SIZE=1500
|
||||||
#### Limit of concurrent workers collecting expired objects by the garbage collector
|
#### Limit of concurrent workers collecting expired objects by the garbage collector
|
||||||
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKERS_COUNT=15
|
FROSTFS_STORAGE_SHARD_0_GC_EXPIRED_COLLECTOR_WORKER_COUNT=15
|
||||||
|
|
||||||
## 1 shard
|
## 1 shard
|
||||||
### Flag to refill Metabase from BlobStor
|
### Flag to refill Metabase from BlobStor
|
||||||
|
@ -154,7 +154,7 @@ FROSTFS_STORAGE_SHARD_1_WRITECACHE_ENABLED=true
|
||||||
FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH=tmp/1/cache
|
FROSTFS_STORAGE_SHARD_1_WRITECACHE_PATH=tmp/1/cache
|
||||||
FROSTFS_STORAGE_SHARD_1_WRITECACHE_SMALL_OBJECT_SIZE=16384
|
FROSTFS_STORAGE_SHARD_1_WRITECACHE_SMALL_OBJECT_SIZE=16384
|
||||||
FROSTFS_STORAGE_SHARD_1_WRITECACHE_MAX_OBJECT_SIZE=134217728
|
FROSTFS_STORAGE_SHARD_1_WRITECACHE_MAX_OBJECT_SIZE=134217728
|
||||||
FROSTFS_STORAGE_SHARD_1_WRITECACHE_WORKERS_NUMBER=30
|
FROSTFS_STORAGE_SHARD_1_WRITECACHE_FLUSH_WORKER_COUNT=30
|
||||||
FROSTFS_STORAGE_SHARD_1_WRITECACHE_CAPACITY=4294967296
|
FROSTFS_STORAGE_SHARD_1_WRITECACHE_CAPACITY=4294967296
|
||||||
### Metabase config
|
### Metabase config
|
||||||
FROSTFS_STORAGE_SHARD_1_METABASE_PATH=tmp/1/meta
|
FROSTFS_STORAGE_SHARD_1_METABASE_PATH=tmp/1/meta
|
||||||
|
|
|
@ -129,8 +129,8 @@
|
||||||
"tombstone_lifetime": 10
|
"tombstone_lifetime": 10
|
||||||
},
|
},
|
||||||
"put": {
|
"put": {
|
||||||
"pool_size_remote": 100,
|
"remote_pool_size": 100,
|
||||||
"pool_size_local": 200,
|
"local_pool_size": 200,
|
||||||
"skip_session_token_issuer_verification": true
|
"skip_session_token_issuer_verification": true
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -147,7 +147,7 @@
|
||||||
"path": "tmp/0/cache",
|
"path": "tmp/0/cache",
|
||||||
"small_object_size": 16384,
|
"small_object_size": 16384,
|
||||||
"max_object_size": 134217728,
|
"max_object_size": 134217728,
|
||||||
"workers_number": 30,
|
"flush_worker_count": 30,
|
||||||
"capacity": 3221225472
|
"capacity": 3221225472
|
||||||
},
|
},
|
||||||
"metabase": {
|
"metabase": {
|
||||||
|
@ -190,7 +190,7 @@
|
||||||
"remover_batch_size": 150,
|
"remover_batch_size": 150,
|
||||||
"remover_sleep_interval": "2m",
|
"remover_sleep_interval": "2m",
|
||||||
"expired_collector_batch_size": 1500,
|
"expired_collector_batch_size": 1500,
|
||||||
"expired_collector_workers_count": 15
|
"expired_collector_worker_count": 15
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"1": {
|
"1": {
|
||||||
|
@ -203,7 +203,7 @@
|
||||||
"memcache_capacity": 2147483648,
|
"memcache_capacity": 2147483648,
|
||||||
"small_object_size": 16384,
|
"small_object_size": 16384,
|
||||||
"max_object_size": 134217728,
|
"max_object_size": 134217728,
|
||||||
"workers_number": 30,
|
"flush_worker_count": 30,
|
||||||
"capacity": 4294967296
|
"capacity": 4294967296
|
||||||
},
|
},
|
||||||
"metabase": {
|
"metabase": {
|
||||||
|
|
|
@ -108,8 +108,8 @@ object:
|
||||||
delete:
|
delete:
|
||||||
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
|
tombstone_lifetime: 10 # tombstone "local" lifetime in epochs
|
||||||
put:
|
put:
|
||||||
pool_size_remote: 100 # number of async workers for remote PUT operations
|
remote_pool_size: 100 # number of async workers for remote PUT operations
|
||||||
pool_size_local: 200 # number of async workers for local PUT operations
|
local_pool_size: 200 # number of async workers for local PUT operations
|
||||||
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
|
skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true
|
||||||
|
|
||||||
storage:
|
storage:
|
||||||
|
@ -126,7 +126,7 @@ storage:
|
||||||
type: bbolt
|
type: bbolt
|
||||||
small_object_size: 16k # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes
|
small_object_size: 16k # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes
|
||||||
max_object_size: 134217728 # size threshold for "big" objects which bypass write-cache and go to the storage directly, bytes
|
max_object_size: 134217728 # size threshold for "big" objects which bypass write-cache and go to the storage directly, bytes
|
||||||
workers_number: 30 # number of write-cache flusher threads
|
flush_worker_count: 30 # number of write-cache flusher threads
|
||||||
|
|
||||||
metabase:
|
metabase:
|
||||||
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
|
perm: 0644 # permissions for metabase files(directories: +x for current user and group)
|
||||||
|
@ -196,7 +196,7 @@ storage:
|
||||||
remover_batch_size: 150 # number of objects to be removed by the garbage collector
|
remover_batch_size: 150 # number of objects to be removed by the garbage collector
|
||||||
remover_sleep_interval: 2m # frequency of the garbage collector invocation
|
remover_sleep_interval: 2m # frequency of the garbage collector invocation
|
||||||
expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector
|
expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector
|
||||||
expired_collector_workers_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
|
expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector
|
||||||
|
|
||||||
1:
|
1:
|
||||||
writecache:
|
writecache:
|
||||||
|
|
|
@ -50,8 +50,8 @@ prometheus:
|
||||||
|
|
||||||
object:
|
object:
|
||||||
put:
|
put:
|
||||||
pool_size_remote: 100
|
remote_pool_size: 100
|
||||||
pool_size_local: 100
|
local_pool_size: 100
|
||||||
|
|
||||||
morph:
|
morph:
|
||||||
rpc_endpoint:
|
rpc_endpoint:
|
||||||
|
|
|
@ -243,7 +243,7 @@ gc:
|
||||||
remover_batch_size: 200
|
remover_batch_size: 200
|
||||||
remover_sleep_interval: 5m
|
remover_sleep_interval: 5m
|
||||||
expired_collector_batch_size: 500
|
expired_collector_batch_size: 500
|
||||||
expired_collector_workers_count: 5
|
expired_collector_worker_count: 5
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | Default value | Description |
|
||||||
|
@ -251,7 +251,7 @@ gc:
|
||||||
| `remover_batch_size` | `int` | `100` | Amount of objects to grab in a single batch. |
|
| `remover_batch_size` | `int` | `100` | Amount of objects to grab in a single batch. |
|
||||||
| `remover_sleep_interval` | `duration` | `1m` | Time to sleep between iterations. |
|
| `remover_sleep_interval` | `duration` | `1m` | Time to sleep between iterations. |
|
||||||
| `expired_collector_batch_size` | `int` | `500` | Max amount of expired objects to grab in a single batch. |
|
| `expired_collector_batch_size` | `int` | `500` | Max amount of expired objects to grab in a single batch. |
|
||||||
| `expired_collector_workers_count` | `int` | `5` | Max amount of concurrent expired objects workers. |
|
| `expired_collector_worker_count` | `int` | `5` | Max amount of concurrent expired objects workers. |
|
||||||
|
|
||||||
### `metabase` subsection
|
### `metabase` subsection
|
||||||
|
|
||||||
|
@ -280,7 +280,7 @@ writecache:
|
||||||
capacity: 4294967296
|
capacity: 4294967296
|
||||||
small_object_size: 16384
|
small_object_size: 16384
|
||||||
max_object_size: 134217728
|
max_object_size: 134217728
|
||||||
workers_number: 30
|
flush_worker_count: 30
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | Default value | Description |
|
||||||
|
@ -290,7 +290,7 @@ writecache:
|
||||||
| `capacity` | `size` | unrestricted | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
|
| `capacity` | `size` | unrestricted | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
|
||||||
| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
|
| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
|
||||||
| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
|
| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
|
||||||
| `workers_number` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
|
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
|
||||||
| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
|
| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
|
||||||
| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
|
| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
|
||||||
|
|
||||||
|
@ -415,7 +415,7 @@ replicator:
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | Default value | Description |
|
||||||
|---------------|------------|----------------------------------------|---------------------------------------------|
|
|---------------|------------|----------------------------------------|---------------------------------------------|
|
||||||
| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
|
| `put_timeout` | `duration` | `5s` | Timeout for performing the `PUT` operation. |
|
||||||
| `pool_size` | `int` | Equal to `object.put.pool_size_remote` | Maximum amount of concurrent replications. |
|
| `pool_size` | `int` | Equal to `object.put.remote_pool_size` | Maximum amount of concurrent replications. |
|
||||||
|
|
||||||
# `object` section
|
# `object` section
|
||||||
Contains object-service related parameters.
|
Contains object-service related parameters.
|
||||||
|
@ -423,14 +423,14 @@ Contains object-service related parameters.
|
||||||
```yaml
|
```yaml
|
||||||
object:
|
object:
|
||||||
put:
|
put:
|
||||||
pool_size_remote: 100
|
remote_pool_size: 100
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | Default value | Description |
|
||||||
|-----------------------------|-------|---------------|------------------------------------------------------------------------------------------------|
|
|-----------------------------|-------|---------------|------------------------------------------------------------------------------------------------|
|
||||||
| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
|
| `delete.tombstone_lifetime` | `int` | `5` | Tombstone lifetime for removed objects in epochs. |
|
||||||
| `put.pool_size_remote` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
|
| `put.remote_pool_size` | `int` | `10` | Max pool size for performing remote `PUT` operations. Used by Policer and Replicator services. |
|
||||||
| `put.pool_size_local` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
|
| `put.local_pool_size` | `int` | `10` | Max pool size for performing local `PUT` operations. Used by Policer and Replicator services. |
|
||||||
|
|
||||||
# `runtime` section
|
# `runtime` section
|
||||||
Contains runtime parameters.
|
Contains runtime parameters.
|
||||||
|
|
|
@ -115,8 +115,8 @@ type gcCfg struct {
|
||||||
|
|
||||||
workerPoolInit func(int) util.WorkerPool
|
workerPoolInit func(int) util.WorkerPool
|
||||||
|
|
||||||
expiredCollectorWorkersCount int
|
expiredCollectorWorkerCount int
|
||||||
expiredCollectorBatchSize int
|
expiredCollectorBatchSize int
|
||||||
|
|
||||||
metrics GCMectrics
|
metrics GCMectrics
|
||||||
|
|
||||||
|
@ -313,16 +313,16 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Shard) getExpiredObjectsParameters() (workersCount, batchSize int) {
|
func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
|
||||||
workersCount = minExpiredWorkers
|
workerCount = minExpiredWorkers
|
||||||
batchSize = minExpiredBatchSize
|
batchSize = minExpiredBatchSize
|
||||||
|
|
||||||
if s.gc.gcCfg.expiredCollectorBatchSize > batchSize {
|
if s.gc.gcCfg.expiredCollectorBatchSize > batchSize {
|
||||||
batchSize = s.gc.gcCfg.expiredCollectorBatchSize
|
batchSize = s.gc.gcCfg.expiredCollectorBatchSize
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.gc.gcCfg.expiredCollectorWorkersCount > workersCount {
|
if s.gc.gcCfg.expiredCollectorWorkerCount > workerCount {
|
||||||
workersCount = s.gc.gcCfg.expiredCollectorWorkersCount
|
workerCount = s.gc.gcCfg.expiredCollectorWorkerCount
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -365,11 +365,11 @@ func WithExpiredCollectorBatchSize(size int) Option {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithExpiredCollectorWorkersCount returns option to set concurrent
|
// WithExpiredCollectorWorkerCount returns option to set concurrent
|
||||||
// workers count of expired object collection operation.
|
// workers count of expired object collection operation.
|
||||||
func WithExpiredCollectorWorkersCount(count int) Option {
|
func WithExpiredCollectorWorkerCount(count int) Option {
|
||||||
return func(c *cfg) {
|
return func(c *cfg) {
|
||||||
c.gcCfg.expiredCollectorWorkersCount = count
|
c.gcCfg.expiredCollectorWorkerCount = count
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue