[#506] node: Invalidate list cache after container add/removal
All checks were successful
DCO action / DCO (pull_request) Successful in 1m32s
Build / Build Components (1.21) (pull_request) Successful in 3m54s
Vulncheck / Vulncheck (pull_request) Successful in 3m31s
Build / Build Components (1.20) (pull_request) Successful in 4m14s
Tests and linters / Staticcheck (pull_request) Successful in 5m18s
Tests and linters / Lint (pull_request) Successful in 5m54s
Tests and linters / Tests (1.21) (pull_request) Successful in 6m55s
Tests and linters / Tests (1.20) (pull_request) Successful in 7m7s
Tests and linters / Tests with -race (pull_request) Successful in 7m14s

`update` already has problems mentioned in its doc-comment and the code
itself is not straightforward. Invalidating cache altogether seems like
a better option because we don't construct cache output ourselves (thus, no
"impossible" results).

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
This commit is contained in:
Evgenii Stratonikov 2023-11-14 14:35:23 +03:00
parent 3f61b3a677
commit 6fc0d791d1
2 changed files with 4 additions and 46 deletions

View file

@ -308,51 +308,9 @@ func (s ttlContainerLister) List(id *user.ID) ([]cid.ID, error) {
return res, nil
}
// updates cached list of owner's containers: cnr is added if flag is true, otherwise it's removed.
// Concurrent calls can lead to some races:
// - two parallel additions to missing owner's cache can lead to only one container to be cached
// - async cache value eviction can lead to idle addition
//
// All described race cases aren't critical since cache values expire anyway, we just try
// to increase cache actuality w/o huge overhead on synchronization.
func (s *ttlContainerLister) update(owner user.ID, cnr cid.ID, add bool) {
func (s *ttlContainerLister) invalidate(owner user.ID) {
strOwner := owner.EncodeToString()
val, ok := s.inner.cache.Peek(strOwner)
if !ok {
// we could cache the single cnr but in this case we will disperse
// with the Sidechain a lot
return
}
if s.inner.ttl <= time.Since(val.t) {
return
}
item := val.v
item.mtx.Lock()
{
found := false
for i := range item.list {
if found = item.list[i].Equals(cnr); found {
if !add {
item.list = append(item.list[:i], item.list[i+1:]...)
// if list became empty we don't remove the value from the cache
// since empty list is a correct value, and we don't want to insta
// re-request it from the Sidechain
}
break
}
}
if add && !found {
item.list = append(item.list, cnr)
}
}
item.mtx.Unlock()
s.inner.remove(strOwner)
}
type cachedIRFetcher struct {

View file

@ -74,7 +74,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
// creation success are most commonly tracked by polling GET op.
cnr, err := cnrSrc.Get(ev.ID)
if err == nil {
cachedContainerLister.update(cnr.Value.Owner(), ev.ID, true)
cachedContainerLister.invalidate(cnr.Value.Owner())
cachedContainerStorage.containerCache.set(ev.ID, cnr, nil)
} else {
// unlike removal, we expect successful receive of the container
@ -96,7 +96,7 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
cachedContainerStorage.handleRemoval(ev.ID)
info, err := cachedContainerStorage.DeletionInfo(ev.ID)
if err == nil {
cachedContainerLister.update(info.Owner, ev.ID, false)
cachedContainerLister.invalidate(info.Owner)
}
c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,