[#1375] node: Configure of the container cache size
All checks were successful
Tests and linters / Run gofumpt (pull_request) Successful in 2m27s
Build / Build Components (pull_request) Successful in 3m50s
Tests and linters / Staticcheck (pull_request) Successful in 4m19s
DCO action / DCO (pull_request) Successful in 4m57s
Vulncheck / Vulncheck (pull_request) Successful in 6m20s
Pre-commit hooks / Pre-commit (pull_request) Successful in 6m33s
Tests and linters / Lint (pull_request) Successful in 9m57s
Tests and linters / Tests (pull_request) Successful in 11m32s
Tests and linters / Tests with -race (pull_request) Successful in 11m36s
Tests and linters / gopls check (pull_request) Successful in 2m50s
All checks were successful
Tests and linters / Run gofumpt (pull_request) Successful in 2m27s
Build / Build Components (pull_request) Successful in 3m50s
Tests and linters / Staticcheck (pull_request) Successful in 4m19s
DCO action / DCO (pull_request) Successful in 4m57s
Vulncheck / Vulncheck (pull_request) Successful in 6m20s
Pre-commit hooks / Pre-commit (pull_request) Successful in 6m33s
Tests and linters / Lint (pull_request) Successful in 9m57s
Tests and linters / Tests (pull_request) Successful in 11m32s
Tests and linters / Tests with -race (pull_request) Successful in 11m36s
Tests and linters / gopls check (pull_request) Successful in 2m50s
Signed-off-by: Alexander Chuprov <a.chuprov@yadro.com>
This commit is contained in:
parent
b33559754d
commit
3052d113e8
8 changed files with 63 additions and 14 deletions
|
@ -165,13 +165,11 @@ type ttlContainerStorage struct {
|
|||
delInfoCache *ttlNetCache[cid.ID, *container.DelInfo]
|
||||
}
|
||||
|
||||
func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage {
|
||||
const containerCacheSize = 100
|
||||
|
||||
lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) {
|
||||
func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage {
|
||||
lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) {
|
||||
return v.Get(id)
|
||||
}, metrics.NewCacheMetrics("container"))
|
||||
lruDelInfoCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.DelInfo, error) {
|
||||
lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) {
|
||||
return v.DeletionInfo(id)
|
||||
}, metrics.NewCacheMetrics("container_deletion_info"))
|
||||
|
||||
|
|
|
@ -568,6 +568,8 @@ type cfgMorph struct {
|
|||
// TTL of Sidechain cached values. Non-positive value disables caching.
|
||||
cacheTTL time.Duration
|
||||
|
||||
containerCacheSize uint32
|
||||
|
||||
proxyScriptHash neogoutil.Uint160
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,9 @@ const (
|
|||
|
||||
// FrostfsIDCacheSizeDefault is a default value of APE chain cache.
|
||||
FrostfsIDCacheSizeDefault = 10_000
|
||||
|
||||
// ContainerCacheSizeDefault represents the default size for the container cache.
|
||||
ContainerCacheSizeDefault = 100
|
||||
)
|
||||
|
||||
var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section")
|
||||
|
@ -103,6 +106,18 @@ func CacheTTL(c *config.Config) time.Duration {
|
|||
return CacheTTLDefault
|
||||
}
|
||||
|
||||
// ContainerCacheSize returns the value of "container_cache_size" config parameter
|
||||
// from "morph" section.
|
||||
//
|
||||
// Returns 0 if the value is not positive integer.
|
||||
// Returns ContainerCacheSizeDefault if the value is missing.
|
||||
func ContainerCacheSize(c *config.Config) uint32 {
|
||||
if c.Sub(subsection).Value("container_cache_size") == nil {
|
||||
return ContainerCacheSizeDefault
|
||||
}
|
||||
return config.Uint32Safe(c.Sub(subsection), "container_cache_size")
|
||||
}
|
||||
|
||||
// SwitchInterval returns the value of "switch_interval" config parameter
|
||||
// from "morph" section.
|
||||
//
|
||||
|
|
|
@ -87,7 +87,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
|
|||
cnrRdr.lister = client
|
||||
} else {
|
||||
// use RPC node as source of Container contract items (with caching)
|
||||
cachedContainerStorage := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL)
|
||||
cachedContainerStorage := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize)
|
||||
cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL)
|
||||
|
||||
subscribeToContainerCreation(c, func(e event.Event) {
|
||||
|
|
|
@ -90,6 +90,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
|
|||
|
||||
var netmapSource netmap.Source
|
||||
|
||||
c.cfgMorph.containerCacheSize = morphconfig.ContainerCacheSize(c.appCfg)
|
||||
c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg)
|
||||
|
||||
if c.cfgMorph.cacheTTL == 0 {
|
||||
|
|
|
@ -80,6 +80,7 @@ morph:
|
|||
cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching.
|
||||
# Default value: block time. It is recommended to have this value less or equal to block time.
|
||||
# Cached entities: containers, container lists, eACL tables.
|
||||
container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache.
|
||||
switch_interval: 3m # interval b/w RPC switch attempts if the node is connected not to the highest priority node
|
||||
rpc_endpoint: # side chain NEO RPC endpoints; are shuffled and used one by one until the first success
|
||||
- address: wss://rpc1.morph.frostfs.info:40341/ws
|
||||
|
|
|
@ -150,7 +150,8 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
|
|||
continue // ignore removed objects
|
||||
}
|
||||
|
||||
if !db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch) {
|
||||
addr, match := db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch)
|
||||
if !match {
|
||||
continue // ignore objects with unmatched slow filters
|
||||
}
|
||||
|
||||
|
@ -382,15 +383,16 @@ func (db *DB) selectObjectID(
|
|||
}
|
||||
|
||||
// matchSlowFilters return true if object header is matched by all slow filters.
|
||||
func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) bool {
|
||||
func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
|
||||
result := addr
|
||||
if len(f) == 0 {
|
||||
return true
|
||||
return result, true
|
||||
}
|
||||
|
||||
buf := make([]byte, addressKeySize)
|
||||
obj, err := db.get(tx, addr, buf, true, false, currEpoch)
|
||||
if err != nil {
|
||||
return false
|
||||
return result, false
|
||||
}
|
||||
|
||||
for i := range f {
|
||||
|
@ -415,23 +417,26 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
|
|||
default: // user attribute
|
||||
v, ok := attributeValue(obj, f[i].Header())
|
||||
if ok {
|
||||
if ech := obj.ECHeader(); ech != nil {
|
||||
result.SetObject(ech.Parent())
|
||||
}
|
||||
data = []byte(v)
|
||||
} else {
|
||||
return f[i].Operation() == objectSDK.MatchNotPresent
|
||||
return result, f[i].Operation() == objectSDK.MatchNotPresent
|
||||
}
|
||||
}
|
||||
|
||||
matchFunc, ok := db.matchers[f[i].Operation()]
|
||||
if !ok {
|
||||
return false
|
||||
return result, false
|
||||
}
|
||||
|
||||
if !matchFunc.matchSlow(f[i].Header(), data, f[i].Value()) {
|
||||
return false
|
||||
return result, false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
return result, true
|
||||
}
|
||||
|
||||
func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
|
||||
|
|
|
@ -70,6 +70,22 @@ func TestDB_SelectUserAttributes(t *testing.T) {
|
|||
err = putBig(db, raw6)
|
||||
require.NoError(t, err)
|
||||
|
||||
raw7 := testutil.GenerateObjectWithCID(cnr)
|
||||
var attr objectSDK.Attribute
|
||||
attr.SetKey("path")
|
||||
attr.SetValue("test/3/4")
|
||||
attrs := raw7.Attributes()
|
||||
attrs = append(attrs, attr)
|
||||
ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{
|
||||
ID: oidtest.ID(),
|
||||
Attributes: attrs,
|
||||
}, 0, 3, []byte{}, 0)
|
||||
raw7.SetECHeader(ech)
|
||||
require.NoError(t, putBig(db, raw7))
|
||||
var raw7Parent oid.Address
|
||||
raw7Parent.SetContainer(cnr)
|
||||
raw7Parent.SetObject(ech.Parent())
|
||||
|
||||
fs := objectSDK.SearchFilters{}
|
||||
fs.AddFilter("foo", "bar", objectSDK.MatchStringEqual)
|
||||
testSelect(t, db, cnr, fs,
|
||||
|
@ -100,6 +116,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
|
|||
object.AddressOf(raw4),
|
||||
object.AddressOf(raw5),
|
||||
object.AddressOf(raw6),
|
||||
object.AddressOf(raw7),
|
||||
)
|
||||
|
||||
fs = objectSDK.SearchFilters{}
|
||||
|
@ -110,6 +127,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
|
|||
object.AddressOf(raw4),
|
||||
object.AddressOf(raw5),
|
||||
object.AddressOf(raw6),
|
||||
object.AddressOf(raw7),
|
||||
)
|
||||
|
||||
fs = objectSDK.SearchFilters{}
|
||||
|
@ -120,6 +138,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
|
|||
object.AddressOf(raw4),
|
||||
object.AddressOf(raw5),
|
||||
object.AddressOf(raw6),
|
||||
object.AddressOf(raw7),
|
||||
)
|
||||
|
||||
fs = objectSDK.SearchFilters{}
|
||||
|
@ -131,6 +150,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
|
|||
object.AddressOf(raw4),
|
||||
object.AddressOf(raw5),
|
||||
object.AddressOf(raw6),
|
||||
object.AddressOf(raw7),
|
||||
)
|
||||
|
||||
fs = objectSDK.SearchFilters{}
|
||||
|
@ -139,6 +159,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
|
|||
object.AddressOf(raw4),
|
||||
object.AddressOf(raw5),
|
||||
object.AddressOf(raw6),
|
||||
raw7Parent,
|
||||
)
|
||||
|
||||
fs = objectSDK.SearchFilters{}
|
||||
|
@ -147,6 +168,12 @@ func TestDB_SelectUserAttributes(t *testing.T) {
|
|||
object.AddressOf(raw4),
|
||||
object.AddressOf(raw5),
|
||||
)
|
||||
|
||||
fs = objectSDK.SearchFilters{}
|
||||
fs.AddFilter("path", "test/3/4", objectSDK.MatchStringEqual)
|
||||
testSelect(t, db, cnr, fs,
|
||||
raw7Parent,
|
||||
)
|
||||
}
|
||||
|
||||
func TestDB_SelectRootPhyParent(t *testing.T) {
|
||||
|
|
Loading…
Reference in a new issue