[#2074] write-cache: Do not flush same object twice

Signed-off-by: Pavel Karpy <carpawell@nspcc.ru>
remotes/fyrchik/remove-useless-logs
Pavel Karpy 2022-11-17 19:44:58 +03:00 committed by fyrchik
parent dd225906a0
commit ed4351aab0
2 changed files with 22 additions and 2 deletions

View File

@ -35,6 +35,7 @@ Changelog for NeoFS Node
- Removing all trees by container ID if tree ID is empty in `pilorama.Forest.TreeDrop` (#1940)
- Concurrent mode changes in the metabase and blobstor (#2057)
- Panic in IR when performing HEAD requests (#2069)
- Write-cache flush duplication (#2074)
### Removed
### Updated

View File

@ -1,6 +1,7 @@
package writecache
import (
"bytes"
"errors"
"time"
@ -57,7 +58,7 @@ func (c *cache) runFlushLoop() {
}
func (c *cache) flushDB() {
lastKey := []byte{}
var lastKey []byte
var m []objectInfo
for {
select {
@ -79,7 +80,25 @@ func (c *cache) flushDB() {
_ = c.db.View(func(tx *bbolt.Tx) error {
b := tx.Bucket(defaultBucket)
cs := b.Cursor()
for k, v := cs.Seek(lastKey); k != nil && len(m) < flushBatchSize; k, v = cs.Next() {
var k, v []byte
if len(lastKey) == 0 {
k, v = cs.First()
} else {
k, v = cs.Seek(lastKey)
if bytes.Equal(k, lastKey) {
k, v = cs.Next()
}
}
for ; k != nil && len(m) < flushBatchSize; k, v = cs.Next() {
if len(lastKey) == len(k) {
copy(lastKey, k)
} else {
lastKey = slice.Copy(k)
}
if _, ok := c.flushed.Peek(string(k)); ok {
continue
}