2021-04-06 10:56:06 +00:00
|
|
|
package writecache
|
|
|
|
|
|
|
|
import (
|
|
|
|
"sort"
|
|
|
|
"time"
|
|
|
|
|
2022-03-03 14:19:05 +00:00
|
|
|
"github.com/nspcc-dev/neofs-node/pkg/core/object"
|
2022-07-06 13:41:35 +00:00
|
|
|
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/common"
|
2021-09-06 14:28:55 +00:00
|
|
|
storagelog "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/internal/log"
|
2021-04-06 10:56:06 +00:00
|
|
|
"go.etcd.io/bbolt"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
|
|
|
const defaultPersistInterval = time.Second
|
|
|
|
|
|
|
|
// persistLoop persists object accumulated in memory to the database.
|
|
|
|
func (c *cache) persistLoop() {
|
|
|
|
tick := time.NewTicker(defaultPersistInterval)
|
|
|
|
defer tick.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-tick.C:
|
2022-01-18 12:47:16 +00:00
|
|
|
c.modeMtx.RLock()
|
2022-03-17 11:55:25 +00:00
|
|
|
if c.readOnly() {
|
2022-01-18 12:47:16 +00:00
|
|
|
c.modeMtx.RUnlock()
|
|
|
|
continue
|
2021-09-06 14:28:55 +00:00
|
|
|
}
|
2022-01-18 12:47:16 +00:00
|
|
|
c.persistMemoryCache()
|
|
|
|
c.modeMtx.RUnlock()
|
2021-04-06 10:56:06 +00:00
|
|
|
case <-c.closeCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-18 12:47:16 +00:00
|
|
|
func (c *cache) persistMemoryCache() {
|
|
|
|
c.mtx.RLock()
|
|
|
|
m := c.mem
|
|
|
|
c.mtx.RUnlock()
|
|
|
|
|
|
|
|
if len(m) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Slice(m, func(i, j int) bool { return m[i].addr < m[j].addr })
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
c.persistSmallObjects(m)
|
|
|
|
c.log.Debug("persisted items to disk",
|
|
|
|
zap.Duration("took", time.Since(start)),
|
|
|
|
zap.Int("total", len(m)))
|
|
|
|
|
|
|
|
for i := range m {
|
|
|
|
storagelog.Write(c.log,
|
|
|
|
storagelog.AddressField(m[i].addr),
|
|
|
|
storagelog.OpField("in-mem DELETE persist"),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
c.mtx.Lock()
|
|
|
|
c.curMemSize = 0
|
|
|
|
n := copy(c.mem, c.mem[len(m):])
|
|
|
|
c.mem = c.mem[:n]
|
|
|
|
for i := range c.mem {
|
|
|
|
c.curMemSize += uint64(len(c.mem[i].data))
|
|
|
|
}
|
|
|
|
c.mtx.Unlock()
|
|
|
|
}
|
|
|
|
|
2021-10-27 11:48:33 +00:00
|
|
|
// persistSmallObjects persists small objects to the write-cache database and
|
|
|
|
// pushes the to the flush workers queue.
|
|
|
|
func (c *cache) persistSmallObjects(objs []objectInfo) {
|
|
|
|
cacheSize := c.estimateCacheSize()
|
|
|
|
overflowIndex := len(objs)
|
|
|
|
for i := range objs {
|
|
|
|
newSize := c.incSizeDB(cacheSize)
|
|
|
|
if c.maxCacheSize < newSize {
|
|
|
|
overflowIndex = i
|
|
|
|
break
|
|
|
|
}
|
|
|
|
cacheSize = newSize
|
|
|
|
}
|
|
|
|
|
2022-06-02 17:06:36 +00:00
|
|
|
err := c.db.Batch(func(tx *bbolt.Tx) error {
|
2021-04-06 10:56:06 +00:00
|
|
|
b := tx.Bucket(defaultBucket)
|
2021-10-27 11:48:33 +00:00
|
|
|
for i := 0; i < overflowIndex; i++ {
|
2021-04-06 10:56:06 +00:00
|
|
|
err := b.Put([]byte(objs[i].addr), objs[i].data)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
2021-10-27 11:48:33 +00:00
|
|
|
if err != nil {
|
|
|
|
overflowIndex = 0
|
|
|
|
} else {
|
2022-01-19 09:51:58 +00:00
|
|
|
c.evictObjects(len(objs) - overflowIndex)
|
2021-06-21 09:42:09 +00:00
|
|
|
}
|
2021-10-27 11:48:33 +00:00
|
|
|
|
|
|
|
for i := 0; i < overflowIndex; i++ {
|
|
|
|
storagelog.Write(c.log, storagelog.AddressField(objs[i].addr), storagelog.OpField("db PUT"))
|
|
|
|
c.objCounters.IncDB()
|
2022-01-19 09:51:58 +00:00
|
|
|
}
|
|
|
|
for i := overflowIndex; i < len(objs); i++ {
|
2021-10-27 11:48:33 +00:00
|
|
|
c.flushed.Add(objs[i].addr, true)
|
2021-04-06 10:56:06 +00:00
|
|
|
}
|
|
|
|
|
2021-10-27 11:48:33 +00:00
|
|
|
c.addToFlushQueue(objs, overflowIndex)
|
|
|
|
}
|
2021-04-06 10:56:06 +00:00
|
|
|
|
2021-10-27 11:48:33 +00:00
|
|
|
// persistBigObject writes object to FSTree and pushes it to the flush workers queue.
|
|
|
|
func (c *cache) persistBigObject(objInfo objectInfo) {
|
2021-09-08 09:32:20 +00:00
|
|
|
cacheSz := c.estimateCacheSize()
|
2021-10-27 11:48:33 +00:00
|
|
|
metaIndex := 0
|
|
|
|
if c.incSizeFS(cacheSz) <= c.maxCacheSize {
|
2022-07-06 13:41:35 +00:00
|
|
|
var prm common.PutPrm
|
|
|
|
prm.Address = object.AddressOf(objInfo.obj)
|
|
|
|
prm.RawData = objInfo.data
|
|
|
|
|
|
|
|
_, err := c.fsTree.Put(prm)
|
2021-10-27 11:48:33 +00:00
|
|
|
if err == nil {
|
|
|
|
metaIndex = 1
|
2022-01-11 11:33:04 +00:00
|
|
|
if c.blobstor.NeedsCompression(objInfo.obj) {
|
|
|
|
c.mtx.Lock()
|
|
|
|
c.compressFlags[objInfo.addr] = struct{}{}
|
|
|
|
c.mtx.Unlock()
|
|
|
|
}
|
2021-09-08 09:32:20 +00:00
|
|
|
c.objCounters.IncFS()
|
2021-10-27 11:48:33 +00:00
|
|
|
storagelog.Write(c.log, storagelog.AddressField(objInfo.addr), storagelog.OpField("fstree PUT"))
|
2021-04-06 10:56:06 +00:00
|
|
|
}
|
|
|
|
}
|
2021-10-27 11:48:33 +00:00
|
|
|
c.addToFlushQueue([]objectInfo{objInfo}, metaIndex)
|
2021-04-06 10:56:06 +00:00
|
|
|
}
|
|
|
|
|
2021-10-27 11:48:33 +00:00
|
|
|
// addToFlushQueue pushes objects to the flush workers queue.
|
|
|
|
// For objects below metaIndex only meta information will be flushed.
|
|
|
|
func (c *cache) addToFlushQueue(objs []objectInfo, metaIndex int) {
|
|
|
|
for i := 0; i < metaIndex; i++ {
|
|
|
|
select {
|
|
|
|
case c.metaCh <- objs[i].obj:
|
|
|
|
case <-c.closeCh:
|
|
|
|
return
|
2021-04-06 10:56:06 +00:00
|
|
|
}
|
2021-10-27 11:48:33 +00:00
|
|
|
}
|
|
|
|
for i := metaIndex; i < len(objs); i++ {
|
2021-04-06 10:56:06 +00:00
|
|
|
select {
|
2021-10-27 11:48:33 +00:00
|
|
|
case c.directCh <- objs[i].obj:
|
2021-04-06 10:56:06 +00:00
|
|
|
case <-c.closeCh:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|