logger: level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal" destination: journald # logger destination: one of "stdout" (default), "journald" systemdnotify: enabled: true pprof: enabled: true address: localhost:6060 # endpoint for Node profiling shutdown_timeout: 15s # timeout for profiling HTTP server graceful shutdown block_rate: 10000 # sampling rate: an average of one blocking event per rate nanoseconds spent blocked is reported; "1" reports every blocking event; "0" disables profiler mutex_rate: 10000 # sampling rate: on average 1/rate events are reported; "0" disables profiler prometheus: enabled: true address: localhost:9090 # endpoint for Node metrics shutdown_timeout: 15s # timeout for metrics HTTP server graceful shutdown node: key: ./wallet.key # path to a binary private key wallet: path: "./wallet.json" # path to a NEO wallet; ignored if key is presented address: "NcpJzXcSDrh5CCizf4K9Ro6w4t59J5LKzz" # address of a NEO account in the wallet; ignored if key is presented password: "password" # password for a NEO account in the wallet; ignored if key is presented addresses: # list of addresses announced by Storage node in the Network map - s01.frostfs.devenv:8080 - /dns4/s02.frostfs.devenv/tcp/8081 - grpc://127.0.0.1:8082 - grpcs://localhost:8083 attribute_0: "Price:11" attribute_1: UN-LOCODE:RU MSK relay: true # start Storage node in relay mode without bootstrapping into the Network map persistent_sessions: path: /sessions # path to persistent session tokens file of Storage node (default: in-memory sessions) persistent_state: path: /state # path to persistent state file of Storage node grpc: - endpoint: s01.frostfs.devenv:8080 # endpoint for gRPC server tls: enabled: true # use TLS for a gRPC connection (min version is TLS 1.2) certificate: /path/to/cert # path to TLS certificate key: /path/to/key # path to TLS key - endpoint: s02.frostfs.devenv:8080 # endpoint for gRPC server tls: enabled: false # use TLS for a gRPC connection - endpoint: s03.frostfs.devenv:8080 tls: enabled: true use_insecure_crypto: true # allow using insecure ciphers with TLS 1.2 tree: enabled: true cache_size: 15 replication_worker_count: 32 replication_channel_capacity: 32 replication_timeout: 5s sync_interval: 1h authorized_keys: # list of hex-encoded public keys that have rights to use the Tree Service with frostfs-cli - 0397d207ea77909f7d66fa6f36d08daae22ace672be7ea4f53513484dde8a142a0 - 02053819235c20d784132deba10bb3061629e3a5c819a039ef091841d9d35dad56 control: authorized_keys: # list of hex-encoded public keys that have rights to use the Control Service - 035839e45d472a3b7769a2a1bd7d54c4ccd4943c3b40f547870e83a8fcbfb3ce11 - 028f42cfcb74499d7b15b35d9bff260a1c8d27de4f446a627406a382d8961486d6 grpc: endpoint: localhost:8090 # endpoint that is listened by the Control Service contracts: # side chain NEOFS contract script hashes; optional, override values retrieved from NNS contract balance: 5263abba1abedbf79bb57f3e40b50b4425d2d6cd container: 5d084790d7aa36cea7b53fe897380dab11d2cd3c netmap: 0cce9e948dca43a6b592efe59ddb4ecb89bdd9ca proxy: ad7c6b55b737b696e5c82c85445040964a03e97f morph: dial_timeout: 30s # timeout for side chain NEO RPC client connection cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching. # Default value: block time. It is recommended to have this value less or equal to block time. # Cached entities: containers, container lists, eACL tables. switch_interval: 3m # interval b/w RPC switch attempts if the node is connected not to the highest priority node rpc_endpoint: # side chain NEO RPC endpoints; are shuffled and used one by one until the first success - address: wss://rpc1.morph.frostfs.info:40341/ws priority: 0 trusted_ca_list: - "/path/to/ca.pem" certificate: "/path/to/cert" key: "/path/to/key" - address: wss://rpc2.morph.frostfs.info:40341/ws priority: 2 ape_chain_cache_size: 100000 apiclient: dial_timeout: 15s # timeout for FrostFS API client connection stream_timeout: 20s # timeout for individual operations in a streaming RPC allow_external: true # allow to fallback to addresses in `ExternalAddr` attribute reconnect_timeout: 30s # time to wait before reconnecting to a failed node policer: head_timeout: 15s # timeout for the Policer HEAD remote operation replicator: put_timeout: 15s # timeout for the Replicator PUT remote operation pool_size: 10 # maximum amount of concurrent replications object: delete: tombstone_lifetime: 10 # tombstone "local" lifetime in epochs put: remote_pool_size: 100 # number of async workers for remote PUT operations local_pool_size: 200 # number of async workers for local PUT operations skip_session_token_issuer_verification: true # session token issuer verification will be skipped if true storage: # note: shard configuration can be omitted for relay node (see `node.relay`) shard_pool_size: 15 # size of per-shard worker pools used for PUT operations shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors) shard: default: # section with the default shard parameters resync_metabase: true # sync metabase with blobstor on start, expensive, leave false until complete understanding writecache: enabled: true small_object_size: 16k # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes max_object_size: 134217728 # size threshold for "big" objects which bypass write-cache and go to the storage directly, bytes flush_worker_count: 30 # number of write-cache flusher threads metabase: perm: 0644 # permissions for metabase files(directories: +x for current user and group) max_batch_size: 200 max_batch_delay: 20ms pilorama: max_batch_delay: 5ms # maximum delay for a batch of operations to be executed max_batch_size: 100 # maximum amount of operations in a single batch compress: false # turn on/off zstd(level 3) compression of stored objects small_object_size: 100 kb # size threshold for "small" objects which are cached in key-value DB, not in FS, bytes blobstor: - size: 4m # approximate size limit of single blobovnicza instance, total size will be: size*width^(depth+1), bytes perm: 0644 # permissions for blobstor files(directories: +x for current user and group) depth: 1 # max depth of object tree storage in key-value DB width: 4 # max width of object tree storage in key-value DB opened_cache_capacity: 50 # maximum number of opened database files opened_cache_ttl: 5m # ttl for opened database file opened_cache_exp_interval: 15s # cache cleanup interval for expired blobovnicza's - perm: 0644 # permissions for blobstor files(directories: +x for current user and group) depth: 5 # max depth of object tree storage in FS gc: remover_batch_size: 200 # number of objects to be removed by the garbage collector remover_sleep_interval: 5m # frequency of the garbage collector invocation 0: mode: read-only # mode of the shard, must be one of the: # read-write (default) # read-only # degraded # degraded-read-only # disabled (do not work with the shard, allows to not remove it from the config) resync_metabase: false # sync metabase with blobstor on start, expensive, leave false until complete understanding resync_metabase_worker_count: 100 rebuild_worker_count: 1000 # count of rebuild storage concurrent workers writecache: enabled: false no_sync: true path: tmp/0/cache # write-cache root directory capacity: 3221225472 # approximate write-cache total size, bytes max_object_count: 49 page_size: 4k metabase: path: tmp/0/meta # metabase path max_batch_size: 100 max_batch_delay: 10ms compress: true # turn on/off zstd(level 3) compression of stored objects compression_exclude_content_types: - audio/* - video/* compression_estimate_compressibility: true compression_estimate_compressibility_threshold: 0.7 blobstor: - type: blobovnicza path: tmp/0/blob/blobovnicza init_worker_count: 10 #count of workers to initialize blobovniczas rebuild_drop_timeout: 30s # timeout before drop single blobovnicza opened_cache_ttl: 1m opened_cache_exp_interval: 30s - type: fstree path: tmp/0/blob # blobstor path pilorama: path: tmp/0/blob/pilorama.db # path to the pilorama database. If omitted, `pilorama.db` file is created blobstor.path max_batch_delay: 10ms max_batch_size: 200 gc: remover_batch_size: 150 # number of objects to be removed by the garbage collector remover_sleep_interval: 2m # frequency of the garbage collector invocation expired_collector_batch_size: 1500 # number of objects to be marked expired by the garbage collector expired_collector_worker_count: 15 # number of concurrent workers collecting expired objects by the garbage collector 1: writecache: path: tmp/1/cache # write-cache root directory capacity: 4 G # approximate write-cache total size, bytes metabase: path: tmp/1/meta # metabase path blobstor: - type: blobovnicza path: tmp/1/blob/blobovnicza - type: fstree path: tmp/1/blob # blobstor path no_sync: true pilorama: path: tmp/1/blob/pilorama.db no_sync: true # USE WITH CAUTION. Return to user before pages have been persisted. perm: 0644 # permission to use for the database file and intermediate directories tracing: enabled: true exporter: "otlp_grpc" endpoint: "localhost" runtime: soft_memory_limit: 1gb audit: enabled: true