[#1248] node: Do not update cache twice #30

Merged
carpawell merged 1 commit from carpawell/fix/multiple-cache-update-requests-FROST into master 2023-04-21 09:45:55 +00:00
2 changed files with 56 additions and 9 deletions

View file

@ -69,6 +69,7 @@ Changelog for FrostFS Node
- Non-alphabet nodes do not try to handle alphabet events (#181)
- Failing SN and IR transactions because of incorrect scopes (#2230, #2263)
- Global scope used for some transactions (#2230, #2263)
- Concurrent morph cache misses (#30)
### Removed
### Updated

View file

@ -24,9 +24,19 @@ type valueWithTime[V any] struct {
e error
}
// valueInProgress is a struct that contains
// values that are being fetched/updated.
type valueInProgress[V any] struct {
m sync.RWMutex
v V
e error
}
// entity that provides TTL cache interface.
type ttlNetCache[K comparable, V any] struct {
ttl time.Duration
m sync.RWMutex // protects progMap
progMap map[K]*valueInProgress[V] // contains fetch-in-progress keys
ttl time.Duration
sz int
@ -41,32 +51,68 @@ func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr n
fatalOnErr(err)
return &ttlNetCache[K, V]{
ttl: ttl,
sz: sz,
cache: cache,
netRdr: netRdr,
ttl: ttl,
sz: sz,
cache: cache,
netRdr: netRdr,
progMap: make(map[K]*valueInProgress[V]),
}
}
func waitForUpdate[V any](vip *valueInProgress[V]) (V, error) {
vip.m.RLock()
defer vip.m.RUnlock()
return vip.v, vip.e
}
// reads value by the key.
//
// updates the value from the network on cache miss or by TTL.
//
// returned value should not be modified.
func (c *ttlNetCache[K, V]) get(key K) (V, error) {
val, ok := c.cache.Peek(key)
valWithTime, ok := c.cache.Peek(key)
if ok {
if time.Since(val.t) < c.ttl {
return val.v, val.e
if time.Since(valWithTime.t) < c.ttl {
return valWithTime.v, valWithTime.e
}
c.cache.Remove(key)
}
v, err := c.netRdr(key)
c.m.RLock()
valInProg, ok := c.progMap[key]
c.m.RUnlock()
if ok {
return waitForUpdate(valInProg)
}
c.m.Lock()
valInProg, ok = c.progMap[key]
if ok {
c.m.Unlock()
return waitForUpdate(valInProg)
}
valInProg = &valueInProgress[V]{}
valInProg.m.Lock()
c.progMap[key] = valInProg
c.m.Unlock()
v, err := c.netRdr(key)
c.set(key, v, err)
Review

Do we still need this line?

Do we still need this line?
Review

yes, that's a cache still. why did you doubt? got the value (not concurrently with that PR) -- updated the cache

yes, that's a cache still. why did you doubt? got the value (not concurrently with that PR) -- updated the cache
Review

Because below we explicitly set valInProg values

Because below we explicitly set `valInProg` values
Review

below we explicitly set valInProg values

it is for a concurrent routines (that wait when network job is done) only. all the other will take a val directly from the cache cause no cache miss will be faced after an update (well, for 30s only but anyway)

> below we explicitly set valInProg values it is for a concurrent routines (that wait when network job is done) only. all the other will take a val directly from the cache cause no cache miss will be faced after an update (well, for 30s only but anyway)
valInProg.v = v
valInProg.e = err
valInProg.m.Unlock()
c.m.Lock()
delete(c.progMap, key)
c.m.Unlock()
return v, err
}