Bump go version, update hashicorp/lru to v2 #5

Merged
fyrchik merged 9 commits from update-go into master 2022-12-31 20:04:07 +00:00
26 changed files with 131 additions and 160 deletions

View file

@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-20.04
strategy:
matrix:
go: [ '1.17.x', '1.18.x', '1.19.x' ]
go: [ '1.18.x', '1.19.x' ]
steps:
- name: Setup go
uses: actions/setup-go@v3

View file

@ -14,6 +14,7 @@ Changelog for NeoFS Node
- `golang.org/x/term` to `v0.3.0`
- `google.golang.org/grpc` to `v1.51.0`
- `github.com/nats-io/nats.go` to `v1.22.1`
- Minimum go version to v1.18
### Updating from v0.35.0

View file

@ -7,7 +7,7 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8
HUB_IMAGE ?= truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.17
GO_VERSION ?= 1.19
LINT_VERSION ?= 1.50.0
ARCH = amd64

View file

@ -52,7 +52,7 @@ The latest version of neofs-node works with neofs-contract
# Building
To make all binaries you need Go 1.17+ and `make`:
To make all binaries you need Go 1.18+ and `make`:
```
make all
```

View file

@ -12,35 +12,35 @@ import (
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "github.com/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/TrueCloudLab/frostfs-sdk-go/user"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
)
type netValueReader func(interface{}) (interface{}, error)
type netValueReader[K any, V any] func(K) (V, error)
type valueWithTime struct {
v interface{}
type valueWithTime[V any] struct {
v V
t time.Time
// cached error in order to not repeat failed request for some time
e error
}
// entity that provides TTL cache interface.
type ttlNetCache struct {
type ttlNetCache[K comparable, V any] struct {
ttl time.Duration
sz int
cache *lru.Cache
cache *lru.Cache[K, *valueWithTime[V]]
netRdr netValueReader
netRdr netValueReader[K, V]
}
// complicates netValueReader with TTL caching mechanism.
func newNetworkTTLCache(sz int, ttl time.Duration, netRdr netValueReader) *ttlNetCache {
cache, err := lru.New(sz)
func newNetworkTTLCache[K comparable, V any](sz int, ttl time.Duration, netRdr netValueReader[K, V]) *ttlNetCache[K, V] {
cache, err := lru.New[K, *valueWithTime[V]](sz)
fatalOnErr(err)
return &ttlNetCache{
return &ttlNetCache[K, V]{
ttl: ttl,
sz: sz,
cache: cache,
@ -53,47 +53,45 @@ func newNetworkTTLCache(sz int, ttl time.Duration, netRdr netValueReader) *ttlNe
// updates the value from the network on cache miss or by TTL.
//
// returned value should not be modified.
func (c *ttlNetCache) get(key interface{}) (interface{}, error) {
func (c *ttlNetCache[K, V]) get(key K) (V, error) {
val, ok := c.cache.Peek(key)
if ok {
valWithTime := val.(*valueWithTime)
if time.Since(valWithTime.t) < c.ttl {
return valWithTime.v, valWithTime.e
if time.Since(val.t) < c.ttl {
return val.v, val.e
}
c.cache.Remove(key)
}
val, err := c.netRdr(key)
v, err := c.netRdr(key)
c.set(key, val, err)
c.set(key, v, err)
return val, err
return v, err
}
func (c *ttlNetCache) set(k, v interface{}, e error) {
c.cache.Add(k, &valueWithTime{
func (c *ttlNetCache[K, V]) set(k K, v V, e error) {
c.cache.Add(k, &valueWithTime[V]{
v: v,
t: time.Now(),
e: e,
})
}
func (c *ttlNetCache) remove(key interface{}) {
func (c *ttlNetCache[K, V]) remove(key K) {
c.cache.Remove(key)
}
// entity that provides LRU cache interface.
type lruNetCache struct {
cache *lru.Cache
cache *lru.Cache[uint64, *netmapSDK.NetMap]
netRdr netValueReader
netRdr netValueReader[uint64, *netmapSDK.NetMap]
}
// newNetworkLRUCache returns wrapper over netValueReader with LRU cache.
func newNetworkLRUCache(sz int, netRdr netValueReader) *lruNetCache {
cache, err := lru.New(sz)
func newNetworkLRUCache(sz int, netRdr netValueReader[uint64, *netmapSDK.NetMap]) *lruNetCache {
cache, err := lru.New[uint64, *netmapSDK.NetMap](sz)
fatalOnErr(err)
return &lruNetCache{
@ -107,7 +105,7 @@ func newNetworkLRUCache(sz int, netRdr netValueReader) *lruNetCache {
// updates the value from the network on cache miss.
//
// returned value should not be modified.
func (c *lruNetCache) get(key interface{}) (interface{}, error) {
func (c *lruNetCache) get(key uint64) (*netmapSDK.NetMap, error) {
val, ok := c.cache.Get(key)
if ok {
return val, nil
@ -125,73 +123,53 @@ func (c *lruNetCache) get(key interface{}) (interface{}, error) {
// wrapper over TTL cache of values read from the network
// that implements container storage.
type ttlContainerStorage ttlNetCache
func newCachedContainerStorage(v container.Source, ttl time.Duration) *ttlContainerStorage {
const containerCacheSize = 100
lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(key interface{}) (interface{}, error) {
var id cid.ID
err := id.DecodeString(key.(string))
if err != nil {
return nil, err
type ttlContainerStorage struct {
*ttlNetCache[cid.ID, *container.Container]
}
func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage {
const containerCacheSize = 100
lruCnrCache := newNetworkTTLCache[cid.ID, *container.Container](containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) {
return v.Get(id)
})
return (*ttlContainerStorage)(lruCnrCache)
return ttlContainerStorage{lruCnrCache}
}
func (s *ttlContainerStorage) handleRemoval(cnr cid.ID) {
(*ttlNetCache)(s).set(cnr.EncodeToString(), nil, apistatus.ContainerNotFound{})
func (s ttlContainerStorage) handleRemoval(cnr cid.ID) {
s.set(cnr, nil, apistatus.ContainerNotFound{})
}
// Get returns container value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates the cache.
func (s *ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
val, err := (*ttlNetCache)(s).get(cnr.EncodeToString())
if err != nil {
return nil, err
func (s ttlContainerStorage) Get(cnr cid.ID) (*container.Container, error) {
return s.get(cnr)
}
return val.(*container.Container), nil
type ttlEACLStorage struct {
*ttlNetCache[cid.ID, *container.EACL]
}
type ttlEACLStorage ttlNetCache
func newCachedEACLStorage(v container.EACLSource, ttl time.Duration) *ttlEACLStorage {
func newCachedEACLStorage(v container.EACLSource, ttl time.Duration) ttlEACLStorage {
const eaclCacheSize = 100
lruCnrCache := newNetworkTTLCache(eaclCacheSize, ttl, func(key interface{}) (interface{}, error) {
var id cid.ID
err := id.DecodeString(key.(string))
if err != nil {
return nil, err
}
lruCnrCache := newNetworkTTLCache(eaclCacheSize, ttl, func(id cid.ID) (*container.EACL, error) {
return v.GetEACL(id)
})
return (*ttlEACLStorage)(lruCnrCache)
return ttlEACLStorage{lruCnrCache}
}
// GetEACL returns eACL value from the cache. If value is missing in the cache
// or expired, then it returns value from side chain and updates cache.
func (s *ttlEACLStorage) GetEACL(cnr cid.ID) (*container.EACL, error) {
val, err := (*ttlNetCache)(s).get(cnr.EncodeToString())
if err != nil {
return nil, err
}
return val.(*container.EACL), nil
func (s ttlEACLStorage) GetEACL(cnr cid.ID) (*container.EACL, error) {
return s.get(cnr)
}
// InvalidateEACL removes cached eACL value.
func (s *ttlEACLStorage) InvalidateEACL(cnr cid.ID) {
(*ttlNetCache)(s).remove(cnr.EncodeToString())
func (s ttlEACLStorage) InvalidateEACL(cnr cid.ID) {
s.remove(cnr)
}
type lruNetmapSource struct {
@ -203,8 +181,8 @@ type lruNetmapSource struct {
func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
const netmapCacheSize = 10
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key interface{}) (interface{}, error) {
return v.GetNetMapByEpoch(key.(uint64))
lruNetmapCache := newNetworkLRUCache(netmapCacheSize, func(key uint64) (*netmapSDK.NetMap, error) {
return v.GetNetMapByEpoch(key)
})
return &lruNetmapSource{
@ -227,7 +205,7 @@ func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, err
return nil, err
}
return val.(*netmapSDK.NetMap), nil
return val, nil
}
func (s *lruNetmapSource) Epoch() (uint64, error) {
@ -236,7 +214,9 @@ func (s *lruNetmapSource) Epoch() (uint64, error) {
// wrapper over TTL cache of values read from the network
// that implements container lister.
type ttlContainerLister ttlNetCache
type ttlContainerLister struct {
*ttlNetCache[string, *cacheItemContainerList]
}
// value type for ttlNetCache used by ttlContainerLister.
type cacheItemContainerList struct {
@ -246,14 +226,11 @@ type cacheItemContainerList struct {
list []cid.ID
}
func newCachedContainerLister(c *cntClient.Client, ttl time.Duration) *ttlContainerLister {
func newCachedContainerLister(c *cntClient.Client, ttl time.Duration) ttlContainerLister {
const containerListerCacheSize = 100
lruCnrListerCache := newNetworkTTLCache(containerListerCacheSize, ttl, func(key interface{}) (interface{}, error) {
var (
id *user.ID
strID = key.(string)
)
lruCnrListerCache := newNetworkTTLCache(containerListerCacheSize, ttl, func(strID string) (*cacheItemContainerList, error) {
var id *user.ID
if strID != "" {
id = new(user.ID)
@ -274,28 +251,24 @@ func newCachedContainerLister(c *cntClient.Client, ttl time.Duration) *ttlContai
}, nil
})
return (*ttlContainerLister)(lruCnrListerCache)
return ttlContainerLister{lruCnrListerCache}
}
// List returns list of container IDs from the cache. If list is missing in the
// cache or expired, then it returns container IDs from side chain and updates
// the cache.
func (s *ttlContainerLister) List(id *user.ID) ([]cid.ID, error) {
func (s ttlContainerLister) List(id *user.ID) ([]cid.ID, error) {
var str string
if id != nil {
str = id.EncodeToString()
}
val, err := (*ttlNetCache)(s).get(str)
item, err := s.get(str)
if err != nil {
return nil, err
}
// panic on typecast below is OK since developer must be careful,
// runtime can do nothing with wrong type occurrence
item := val.(*cacheItemContainerList)
item.mtx.RLock()
res := make([]cid.ID, len(item.list))
copy(res, item.list)
@ -314,16 +287,14 @@ func (s *ttlContainerLister) List(id *user.ID) ([]cid.ID, error) {
func (s *ttlContainerLister) update(owner user.ID, cnr cid.ID, add bool) {
strOwner := owner.EncodeToString()
val, ok := (*ttlNetCache)(s).cache.Get(strOwner)
val, ok := s.cache.Get(strOwner)
if !ok {
// we could cache the single cnr but in this case we will disperse
// with the Sidechain a lot
return
}
// panic on typecast below is OK since developer must be careful,
// runtime can do nothing with wrong type occurrence
item := val.(*valueWithTime).v.(*cacheItemContainerList)
item := val.v
item.mtx.Lock()
{
@ -349,9 +320,11 @@ func (s *ttlContainerLister) update(owner user.ID, cnr cid.ID, add bool) {
item.mtx.Unlock()
}
type cachedIRFetcher ttlNetCache
type cachedIRFetcher struct {
*ttlNetCache[struct{}, [][]byte]
}
func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) *cachedIRFetcher {
func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) cachedIRFetcher {
const (
irFetcherCacheSize = 1 // we intend to store only one value
@ -364,25 +337,25 @@ func newCachedIRFetcher(f interface{ InnerRingKeys() ([][]byte, error) }) *cache
irFetcherCacheTTL = 30 * time.Second
)
irFetcherCache := newNetworkTTLCache(irFetcherCacheSize, irFetcherCacheTTL,
func(key interface{}) (interface{}, error) {
irFetcherCache := newNetworkTTLCache[struct{}, [][]byte](irFetcherCacheSize, irFetcherCacheTTL,
func(_ struct{}) ([][]byte, error) {
return f.InnerRingKeys()
},
)
return (*cachedIRFetcher)(irFetcherCache)
return cachedIRFetcher{irFetcherCache}
}
// InnerRingKeys returns cached list of Inner Ring keys. If keys are missing in
// the cache or expired, then it returns keys from side chain and updates
// the cache.
func (f *cachedIRFetcher) InnerRingKeys() ([][]byte, error) {
val, err := (*ttlNetCache)(f).get("")
func (f cachedIRFetcher) InnerRingKeys() ([][]byte, error) {
val, err := f.get(struct{}{})
if err != nil {
return nil, err
}
return val.([][]byte), nil
return val, nil
}
type ttlMaxObjectSizeCache struct {

View file

@ -657,8 +657,8 @@ type morphContainerWriter struct {
neoClient *cntClient.Client
cacheEnabled bool
eacls *ttlEACLStorage
lists *ttlContainerLister
eacls ttlEACLStorage
lists ttlContainerLister
}
func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) {

5
go.mod
View file

@ -1,6 +1,6 @@
module github.com/TrueCloudLab/frostfs-node
go 1.17
go 1.18
require (
github.com/TrueCloudLab/frostfs-api-go/v2 v2.0.0-20221212144048-1351b6656d68
@ -13,7 +13,7 @@ require (
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
github.com/google/go-github/v39 v39.2.0
github.com/google/uuid v1.3.0
github.com/hashicorp/golang-lru v0.5.4
github.com/hashicorp/golang-lru/v2 v2.0.1
github.com/klauspost/compress v1.15.13
github.com/mitchellh/go-homedir v1.1.0
github.com/mr-tron/base58 v1.2.0
@ -54,6 +54,7 @@ require (
github.com/golang/snappy v0.0.3 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/uint256 v1.2.0 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect

BIN
go.sum

Binary file not shown.

View file

@ -42,10 +42,10 @@ func (np *Processor) processDeposit(deposit *frostfsEvent.Deposit) {
defer np.mintEmitLock.Unlock()
val, ok := np.mintEmitCache.Get(receiver.String())
if ok && val.(uint64)+np.mintEmitThreshold >= curEpoch {
if ok && val+np.mintEmitThreshold >= curEpoch {
np.log.Warn("double mint emission declined",
zap.String("receiver", receiver.String()),
zap.Uint64("last_emission", val.(uint64)),
zap.Uint64("last_emission", val),
zap.Uint64("current_epoch", curEpoch))
return

View file

@ -12,7 +12,7 @@ import (
"github.com/TrueCloudLab/frostfs-node/pkg/morph/event"
frostfsEvent "github.com/TrueCloudLab/frostfs-node/pkg/morph/event/neofs"
"github.com/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/panjf2000/ants/v2"
@ -47,7 +47,7 @@ type (
alphabetState AlphabetState
converter PrecisionConverter
mintEmitLock *sync.Mutex
mintEmitCache *lru.Cache
mintEmitCache *lru.Cache[string, uint64]
mintEmitThreshold uint64
mintEmitValue fixedn.Fixed8
gasBalanceThreshold int64
@ -105,7 +105,7 @@ func New(p *Params) (*Processor, error) {
return nil, fmt.Errorf("ir/frostfs: can't create worker pool: %w", err)
}
lruCache, err := lru.New(p.MintEmitCacheSize)
lruCache, err := lru.New[string, uint64](p.MintEmitCacheSize)
if err != nil {
return nil, fmt.Errorf("ir/frostfs: can't create LRU cache for gas emission: %w", err)
}

View file

@ -13,7 +13,7 @@ import (
"github.com/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/TrueCloudLab/hrw"
"github.com/hashicorp/golang-lru/simplelru"
"github.com/hashicorp/golang-lru/v2/simplelru"
"go.uber.org/zap"
)
@ -61,7 +61,7 @@ type Blobovniczas struct {
cfg
// cache of opened filled Blobovniczas
opened *simplelru.LRU
opened *simplelru.LRU[string, *blobovnicza.Blobovnicza]
// lruMtx protects opened cache.
// It isn't RWMutex because `Get` calls must
// lock this mutex on write, as LRU info is updated.
@ -97,21 +97,20 @@ func NewBlobovniczaTree(opts ...Option) (blz *Blobovniczas) {
opts[i](&blz.cfg)
}
cache, err := simplelru.NewLRU(blz.openedCacheSize, func(key interface{}, value interface{}) {
p := key.(string)
cache, err := simplelru.NewLRU[string, *blobovnicza.Blobovnicza](blz.openedCacheSize, func(p string, value *blobovnicza.Blobovnicza) {
lvlPath := filepath.Dir(p)
if b, ok := blz.active[lvlPath]; ok && b.ind == u64FromHexString(filepath.Base(p)) {
// This branch is taken if we have recently updated active blobovnicza and remove
// it from opened cache.
return
} else if err := value.(*blobovnicza.Blobovnicza).Close(); err != nil {
} else if err := value.Close(); err != nil {
blz.log.Error("could not close Blobovnicza",
zap.String("id", key.(string)),
zap.String("id", p),
zap.String("error", err.Error()),
)
} else {
blz.log.Debug("blobovnicza successfully closed on evict",
zap.String("id", key.(string)),
zap.String("id", p),
)
}
})

View file

@ -57,11 +57,10 @@ func (b *Blobovniczas) Close() error {
b.opened.Remove(p)
}
for _, k := range b.opened.Keys() {
v, _ := b.opened.Get(k)
blz := v.(*blobovnicza.Blobovnicza)
blz, _ := b.opened.Get(k)
if err := blz.Close(); err != nil {
b.log.Debug("could not close active blobovnicza",
zap.String("path", k.(string)),
zap.String("path", k),
zap.String("error", err.Error()),
)
}
@ -86,7 +85,7 @@ func (b *Blobovniczas) openBlobovnicza(p string) (*blobovnicza.Blobovnicza, erro
b.lruMtx.Unlock()
if ok {
// blobovnicza should be opened in cache
return v.(*blobovnicza.Blobovnicza), nil
return v, nil
}
lvlPath := filepath.Dir(p)
@ -105,7 +104,7 @@ func (b *Blobovniczas) openBlobovnicza(p string) (*blobovnicza.Blobovnicza, erro
v, ok = b.opened.Get(p)
if ok {
return v.(*blobovnicza.Blobovnicza), nil
return v, nil
}
blz, err := b.openBlobovniczaNoCache(p)

View file

@ -80,7 +80,7 @@ func (b *Blobovniczas) deleteObjectFromLevel(prm blobovnicza.DeletePrm, blzPath
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
if res, err := b.deleteObject(v.(*blobovnicza.Blobovnicza), prm, dp); err == nil {
if res, err := b.deleteObject(v, prm, dp); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not remove object from opened blobovnicza",

View file

@ -72,7 +72,7 @@ func (b *Blobovniczas) getObjectFromLevel(prm blobovnicza.GetPrm, blzPath string
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
if res, err := b.getObject(v.(*blobovnicza.Blobovnicza), prm); err == nil {
if res, err := b.getObject(v, prm); err == nil {
return res, err
} else if !blobovnicza.IsErrNotFound(err) {
b.log.Debug("could not read object from opened blobovnicza",

View file

@ -76,7 +76,7 @@ func (b *Blobovniczas) getRangeFromLevel(prm common.GetRangePrm, blzPath string,
v, ok := b.opened.Get(blzPath)
b.lruMtx.Unlock()
if ok {
res, err := b.getObjectRange(v.(*blobovnicza.Blobovnicza), prm)
res, err := b.getObjectRange(v, prm)
switch {
case err == nil,
isErrOutOfRange(err):

View file

@ -11,8 +11,8 @@ import (
"github.com/TrueCloudLab/frostfs-node/pkg/util"
apistatus "github.com/TrueCloudLab/frostfs-sdk-go/client/status"
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru"
"github.com/hashicorp/golang-lru/simplelru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/hashicorp/golang-lru/v2/simplelru"
"go.etcd.io/bbolt"
"go.uber.org/zap"
)
@ -23,7 +23,7 @@ type store struct {
maxFlushedMarksCount int
maxRemoveBatchSize int
flushed simplelru.LRUCache
flushed simplelru.LRUCache[string, bool]
db *bbolt.DB
dbKeysToRemove []string
@ -69,7 +69,7 @@ func (c *cache) openStore(readOnly bool) error {
// Write-cache can be opened multiple times during `SetMode`.
// flushed map must not be re-created in this case.
if c.flushed == nil {
c.flushed, _ = lru.NewWithEvict(c.maxFlushedMarksCount, c.removeFlushed)
c.flushed, _ = lru.NewWithEvict[string, bool](c.maxFlushedMarksCount, c.removeFlushed)
}
return nil
}
@ -78,12 +78,12 @@ func (c *cache) openStore(readOnly bool) error {
// To minimize interference with the client operations, the actual removal
// is done in batches.
// It is not thread-safe and is used only as an evict callback to LRU cache.
func (c *cache) removeFlushed(key, value interface{}) {
fromDatabase := value.(bool)
func (c *cache) removeFlushed(key string, value bool) {
fromDatabase := value
if fromDatabase {
c.dbKeysToRemove = append(c.dbKeysToRemove, key.(string))
c.dbKeysToRemove = append(c.dbKeysToRemove, key)
} else {
c.fsKeysToRemove = append(c.fsKeysToRemove, key.(string))
c.fsKeysToRemove = append(c.fsKeysToRemove, key)
}
if len(c.dbKeysToRemove)+len(c.fsKeysToRemove) >= c.maxRemoveBatchSize {

View file

@ -9,7 +9,7 @@ import (
"time"
"github.com/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@ -96,7 +96,7 @@ type cache struct {
nnsHash *util.Uint160
gKey *keys.PublicKey
txHeights *lru.Cache
txHeights *lru.Cache[util.Uint256, uint32]
}
func (c cache) nns() *util.Uint160 {

View file

@ -8,7 +8,7 @@ import (
"time"
"github.com/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
@ -185,7 +185,7 @@ func newActor(ws *rpcclient.WSClient, acc *wallet.Account, cfg cfg) (*actor.Acto
}
func newClientCache() cache {
c, _ := lru.New(100) // returns error only if size is negative
c, _ := lru.New[util.Uint256, uint32](100) // returns error only if size is negative
return cache{
m: &sync.RWMutex{},
txHeights: c,

View file

@ -892,7 +892,7 @@ func (c *Client) CalculateNonceAndVUB(hash util.Uint256) (nonce uint32, vub uint
func (c *Client) getTransactionHeight(h util.Uint256) (uint32, error) {
if rh, ok := c.cache.txHeights.Get(h); ok {
return rh.(uint32), nil
return rh, nil
}
height, err := c.client.GetTransactionHeight(h)
if err != nil {

View file

@ -9,7 +9,7 @@ import (
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "github.com/TrueCloudLab/frostfs-sdk-go/netmap"
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/hashicorp/golang-lru/simplelru"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
type netMapBuilder struct {
@ -19,7 +19,7 @@ type netMapBuilder struct {
lastNm *netmapSDK.NetMap
// containerCache caches container nodes by ID. It is used to skip `GetContainerNodes` invocation if
// neither netmap nor container has changed.
containerCache simplelru.LRUCache
containerCache simplelru.LRUCache[string, [][]netmapSDK.NodeInfo]
}
type netMapSrc struct {
@ -32,7 +32,7 @@ type netMapSrc struct {
const defaultContainerCacheSize = 10
func NewNetworkMapBuilder(nm *netmapSDK.NetMap) Builder {
cache, _ := simplelru.NewLRU(defaultContainerCacheSize, nil) // no error
cache, _ := simplelru.NewLRU[string, [][]netmapSDK.NodeInfo](defaultContainerCacheSize, nil) // no error
return &netMapBuilder{
nmSrc: &netMapSrc{nm: nm},
containerCache: cache,
@ -40,7 +40,7 @@ func NewNetworkMapBuilder(nm *netmapSDK.NetMap) Builder {
}
func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder {
cache, _ := simplelru.NewLRU(defaultContainerCacheSize, nil) // no error
cache, _ := simplelru.NewLRU[string, [][]netmapSDK.NodeInfo](defaultContainerCacheSize, nil) // no error
return &netMapBuilder{
nmSrc: nmSrc,
containerCache: cache,
@ -65,8 +65,7 @@ func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p netmapSDK.Plac
raw, ok := b.containerCache.Get(string(binCnr))
b.mtx.Unlock()
if ok {
cn := raw.([][]netmapSDK.NodeInfo)
return BuildObjectPlacement(nm, cn, obj)
return BuildObjectPlacement(nm, raw, obj)
}
} else {
b.containerCache.Purge()

View file

@ -8,7 +8,7 @@ import (
"github.com/TrueCloudLab/frostfs-node/pkg/util/logger"
"github.com/TrueCloudLab/frostfs-sdk-go/object"
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"go.uber.org/zap"
)
@ -31,7 +31,7 @@ type Source interface {
// `ExpirationChecker{}` declarations leads to undefined behaviour
// and may lead to panics.
type ExpirationChecker struct {
cache *lru.Cache
cache *lru.Cache[string, uint64]
log *logger.Logger
@ -51,7 +51,7 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
expEpoch, ok := g.cache.Get(addrStr)
if ok {
return expEpoch.(uint64) > epoch
return expEpoch > epoch
}
ts, err := g.tsSource.Tombstone(ctx, a, epoch)

View file

@ -4,7 +4,7 @@ import (
"fmt"
"github.com/TrueCloudLab/frostfs-node/pkg/util/logger"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"go.uber.org/zap"
)
@ -48,7 +48,7 @@ func NewChecker(oo ...Option) *ExpirationChecker {
panicOnNil(cfg.tsSource, "Tombstone source")
cache, err := lru.New(cfg.cacheSize)
cache, err := lru.New[string, uint64](cfg.cacheSize)
if err != nil {
panic(fmt.Errorf("could not create LRU cache with %d size: %w", cfg.cacheSize, err))
}

View file

@ -12,7 +12,7 @@ import (
"github.com/TrueCloudLab/frostfs-node/pkg/services/replicator"
"github.com/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/panjf2000/ants/v2"
"go.uber.org/zap"
)
@ -53,7 +53,7 @@ func (oiw *objectsInWork) add(addr oid.Address) {
type Policer struct {
*cfg
cache *lru.Cache
cache *lru.Cache[oid.Address, time.Time]
objsInWork *objectsInWork
}
@ -115,7 +115,7 @@ func New(opts ...Option) *Policer {
c.log = &logger.Logger{Logger: c.log.With(zap.String("component", "Object Policer"))}
cache, err := lru.New(int(c.cacheSize))
cache, err := lru.New[oid.Address, time.Time](int(c.cacheSize))
if err != nil {
panic(err)
}

View file

@ -56,7 +56,7 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
err = p.taskPool.Submit(func() {
v, ok := p.cache.Get(addr.Address)
if ok && time.Since(v.(time.Time)) < p.evictDuration {
if ok && time.Since(v) < p.evictDuration {
return
}

View file

@ -9,7 +9,7 @@ import (
"time"
"github.com/TrueCloudLab/frostfs-node/pkg/network"
"github.com/hashicorp/golang-lru/simplelru"
"github.com/hashicorp/golang-lru/v2/simplelru"
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials/insecure"
@ -17,7 +17,7 @@ import (
type clientCache struct {
sync.Mutex
simplelru.LRU
simplelru.LRU[string, cacheItem]
}
type cacheItem struct {
@ -34,8 +34,8 @@ const (
var errRecentlyFailed = errors.New("client has recently failed")
func (c *clientCache) init() {
l, _ := simplelru.NewLRU(defaultClientCacheSize, func(key, value interface{}) {
_ = value.(*grpc.ClientConn).Close()
l, _ := simplelru.NewLRU[string, cacheItem](defaultClientCacheSize, func(_ string, value cacheItem) {
_ = value.cc.Close()
})
c.LRU = *l
}
@ -46,7 +46,7 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
c.Unlock()
if ok {
item := ccInt.(cacheItem)
item := ccInt
if item.cc == nil {
if d := time.Since(item.lastTry); d < defaultReconnectInterval {
return nil, fmt.Errorf("%w: %s till the next reconnection to %s",

View file

@ -10,17 +10,17 @@ import (
"github.com/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
cidSDK "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
netmapSDK "github.com/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/hashicorp/golang-lru/simplelru"
"github.com/hashicorp/golang-lru/v2/simplelru"
)
type containerCache struct {
sync.Mutex
nm *netmapSDK.NetMap
lru *simplelru.LRU
lru *simplelru.LRU[string, containerCacheItem]
}
func (c *containerCache) init(size int) {
c.lru, _ = simplelru.NewLRU(size, nil) // no error, size is positive
c.lru, _ = simplelru.NewLRU[string, containerCacheItem](size, nil) // no error, size is positive
}
type containerCacheItem struct {
@ -48,8 +48,7 @@ func (s *Service) getContainerNodes(cid cidSDK.ID) ([]netmapSDK.NodeInfo, int, e
s.containerCache.Lock()
if s.containerCache.nm != nm {
s.containerCache.lru.Purge()
} else if v, ok := s.containerCache.lru.Get(cidStr); ok {
item := v.(containerCacheItem)
} else if item, ok := s.containerCache.lru.Get(cidStr); ok {
if item.cnr == cnr {
s.containerCache.Unlock()
return item.nodes, item.local, nil