[#206] Refactor ListObjectsCache

Replaced conflicting names

Signed-off-by: Angira Kekteeva <kira@nspcc.ru>
This commit is contained in:
Angira Kekteeva 2021-08-10 15:23:55 +03:00 committed by Kirillov Denis
parent 4ca56d3d6e
commit ae19eb4ad4
4 changed files with 15 additions and 14 deletions

View file

@ -23,9 +23,10 @@ import (
type ( type (
layer struct { layer struct {
pool pool.Pool pool pool.Pool
log *zap.Logger log *zap.Logger
cache ObjectsListCache listObjCache ObjectsListCache
} }
// Params stores basic API parameters. // Params stores basic API parameters.
@ -130,9 +131,9 @@ const (
// and establishes gRPC connection with node. // and establishes gRPC connection with node.
func NewLayer(log *zap.Logger, conns pool.Pool) Client { func NewLayer(log *zap.Logger, conns pool.Pool) Client {
return &layer{ return &layer{
pool: conns, pool: conns,
log: log, log: log,
cache: newListObjectsCache(defaultCacheLifetime), listObjCache: newListObjectsCache(defaultObjectsListCacheLifetime),
} }
} }

View file

@ -374,7 +374,7 @@ func (n *layer) listAllObjects(ctx context.Context, p ListObjectsParamsCommon) (
return nil, err return nil, err
} }
allObjects = n.cache.Get(cacheKey) allObjects = n.listObjCache.Get(cacheKey)
if allObjects == nil { if allObjects == nil {
allObjects, err = n.listSortedObjectsFromNeoFS(ctx, allObjectParams{ allObjects, err = n.listSortedObjectsFromNeoFS(ctx, allObjectParams{
@ -387,7 +387,7 @@ func (n *layer) listAllObjects(ctx context.Context, p ListObjectsParamsCommon) (
} }
// putting to cache a copy of allObjects because allObjects can be modified further // putting to cache a copy of allObjects because allObjects can be modified further
n.cache.Put(cacheKey, append([]*ObjectInfo(nil), allObjects...)) n.listObjCache.Put(cacheKey, append([]*ObjectInfo(nil), allObjects...))
} }
return allObjects, nil return allObjects, nil

View file

@ -13,7 +13,7 @@ import (
request. request.
The cache is a map which has a key: cacheOptions struct and a value: list of objects. After putting a record we The cache is a map which has a key: cacheOptions struct and a value: list of objects. After putting a record we
start a timer (via time.AfterFunc) that removes the record after defaultCacheLifetime value. start a timer (via time.AfterFunc) that removes the record after defaultObjectsListCacheLifetime value.
When we get a request from the user we just try to find the suitable and non-expired cache and then we return When we get a request from the user we just try to find the suitable and non-expired cache and then we return
the list of objects. Otherwise we send the request to NeoFS. the list of objects. Otherwise we send the request to NeoFS.
@ -27,15 +27,15 @@ type (
} }
) )
const defaultCacheLifetime = time.Second * 60 const defaultObjectsListCacheLifetime = time.Second * 60
type ( type (
listObjectsCache struct { listObjectsCache struct {
cacheLifetime time.Duration cacheLifetime time.Duration
caches map[cacheOptions]cache caches map[cacheOptions]cacheEntry
mtx sync.RWMutex mtx sync.RWMutex
} }
cache struct { cacheEntry struct {
list []*ObjectInfo list []*ObjectInfo
} }
cacheOptions struct { cacheOptions struct {
@ -47,7 +47,7 @@ type (
func newListObjectsCache(lifetime time.Duration) *listObjectsCache { func newListObjectsCache(lifetime time.Duration) *listObjectsCache {
return &listObjectsCache{ return &listObjectsCache{
caches: make(map[cacheOptions]cache), caches: make(map[cacheOptions]cacheEntry),
cacheLifetime: lifetime, cacheLifetime: lifetime,
} }
} }
@ -65,7 +65,7 @@ func (l *listObjectsCache) Put(key cacheOptions, objects []*ObjectInfo) {
if len(objects) == 0 { if len(objects) == 0 {
return return
} }
var c cache var c cacheEntry
l.mtx.Lock() l.mtx.Lock()
defer l.mtx.Unlock() defer l.mtx.Unlock()
c.list = objects c.list = objects