forked from TrueCloudLab/frostfs-node
[#1825] writecache: Flush cache when moving to the DEGRADED mode
Degraded mode allows us to operate without an SSD, thus writecache should be unavailable in this mode. Signed-off-by: Evgenii Stratonikov <evgeniy@morphbits.ru>
This commit is contained in:
parent
236414df49
commit
8b3b16fe62
5 changed files with 50 additions and 11 deletions
|
@ -16,6 +16,7 @@ Changelog for NeoFS Node
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
- Allow to evacuate shard data with `EvacuateShard` control RPC (#1800)
|
- Allow to evacuate shard data with `EvacuateShard` control RPC (#1800)
|
||||||
|
- Flush write-cache when moving shard to DEGRADED mode (#1825)
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
- Description of command `netmap nodeinfo` (#1821)
|
- Description of command `netmap nodeinfo` (#1821)
|
||||||
|
|
|
@ -49,10 +49,11 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) {
|
||||||
|
|
||||||
// exist check are not performed there, these checks should be executed
|
// exist check are not performed there, these checks should be executed
|
||||||
// ahead of `Put` by storage engine
|
// ahead of `Put` by storage engine
|
||||||
if s.hasWriteCache() {
|
tryCache := s.hasWriteCache() && !m.NoMetabase()
|
||||||
|
if tryCache {
|
||||||
res, err = s.writeCache.Put(putPrm)
|
res, err = s.writeCache.Put(putPrm)
|
||||||
}
|
}
|
||||||
if err != nil || !s.hasWriteCache() {
|
if err != nil || !tryCache {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.log.Debug("can't put object to the write-cache, trying blobstor",
|
s.log.Debug("can't put object to the write-cache, trying blobstor",
|
||||||
zap.String("err", err.Error()))
|
zap.String("err", err.Error()))
|
||||||
|
|
|
@ -50,7 +50,7 @@ func (c *cache) runFlushLoop() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-tt.C:
|
case <-tt.C:
|
||||||
c.flush()
|
c.flushDB()
|
||||||
tt.Reset(defaultFlushInterval)
|
tt.Reset(defaultFlushInterval)
|
||||||
case <-c.closeCh:
|
case <-c.closeCh:
|
||||||
return
|
return
|
||||||
|
@ -59,7 +59,7 @@ func (c *cache) runFlushLoop() {
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cache) flush() {
|
func (c *cache) flushDB() {
|
||||||
lastKey := []byte{}
|
lastKey := []byte{}
|
||||||
var m []objectInfo
|
var m []objectInfo
|
||||||
for {
|
for {
|
||||||
|
@ -241,6 +241,10 @@ func (c *cache) Flush(ignoreErrors bool) error {
|
||||||
return errMustBeReadOnly
|
return errMustBeReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return c.flush(ignoreErrors)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cache) flush(ignoreErrors bool) error {
|
||||||
var prm common.IteratePrm
|
var prm common.IteratePrm
|
||||||
prm.IgnoreErrors = ignoreErrors
|
prm.IgnoreErrors = ignoreErrors
|
||||||
prm.LazyHandler = func(addr oid.Address, f func() ([]byte, error)) error {
|
prm.LazyHandler = func(addr oid.Address, f func() ([]byte, error)) error {
|
||||||
|
|
|
@ -109,10 +109,6 @@ func TestFlush(t *testing.T) {
|
||||||
wc, bs, mb := newCache(t)
|
wc, bs, mb := newCache(t)
|
||||||
objects := putObjects(t, wc)
|
objects := putObjects(t, wc)
|
||||||
|
|
||||||
t.Run("must be read-only", func(t *testing.T) {
|
|
||||||
require.ErrorIs(t, wc.Flush(false), errMustBeReadOnly)
|
|
||||||
})
|
|
||||||
|
|
||||||
require.NoError(t, wc.SetMode(mode.ReadOnly))
|
require.NoError(t, wc.SetMode(mode.ReadOnly))
|
||||||
require.NoError(t, bs.SetMode(mode.ReadWrite))
|
require.NoError(t, bs.SetMode(mode.ReadWrite))
|
||||||
require.NoError(t, mb.SetMode(mode.ReadWrite))
|
require.NoError(t, mb.SetMode(mode.ReadWrite))
|
||||||
|
@ -135,6 +131,36 @@ func TestFlush(t *testing.T) {
|
||||||
check(t, mb, bs, objects[2:])
|
check(t, mb, bs, objects[2:])
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("flush on moving to degraded mode", func(t *testing.T) {
|
||||||
|
wc, bs, mb := newCache(t)
|
||||||
|
objects := putObjects(t, wc)
|
||||||
|
|
||||||
|
// Blobstor is read-only, so we expect en error from `flush` here.
|
||||||
|
require.Error(t, wc.SetMode(mode.Degraded))
|
||||||
|
|
||||||
|
// First move to read-only mode to close background workers.
|
||||||
|
require.NoError(t, wc.SetMode(mode.ReadOnly))
|
||||||
|
require.NoError(t, bs.SetMode(mode.ReadWrite))
|
||||||
|
require.NoError(t, mb.SetMode(mode.ReadWrite))
|
||||||
|
|
||||||
|
wc.(*cache).flushed.Add(objects[0].addr.EncodeToString(), true)
|
||||||
|
wc.(*cache).flushed.Add(objects[1].addr.EncodeToString(), false)
|
||||||
|
|
||||||
|
require.NoError(t, wc.SetMode(mode.Degraded))
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
var mPrm meta.GetPrm
|
||||||
|
mPrm.SetAddress(objects[i].addr)
|
||||||
|
_, err := mb.Get(mPrm)
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
_, err = bs.Get(common.GetPrm{Address: objects[i].addr})
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
check(t, mb, bs, objects[2:])
|
||||||
|
})
|
||||||
|
|
||||||
t.Run("ignore errors", func(t *testing.T) {
|
t.Run("ignore errors", func(t *testing.T) {
|
||||||
testIgnoreErrors := func(t *testing.T, f func(*cache)) {
|
testIgnoreErrors := func(t *testing.T, f func(*cache)) {
|
||||||
wc, bs, mb := newCache(t)
|
wc, bs, mb := newCache(t)
|
||||||
|
|
|
@ -18,9 +18,11 @@ func (c *cache) SetMode(m mode.Mode) error {
|
||||||
c.modeMtx.Lock()
|
c.modeMtx.Lock()
|
||||||
defer c.modeMtx.Unlock()
|
defer c.modeMtx.Unlock()
|
||||||
|
|
||||||
if m.ReadOnly() == c.readOnly() {
|
if m.NoMetabase() && !c.mode.NoMetabase() {
|
||||||
c.mode = m
|
err := c.flush(true)
|
||||||
return nil
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.db != nil {
|
if c.db != nil {
|
||||||
|
@ -37,6 +39,11 @@ func (c *cache) SetMode(m mode.Mode) error {
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if m.NoMetabase() {
|
||||||
|
c.mode = m
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
if err := c.openStore(m.ReadOnly()); err != nil {
|
if err := c.openStore(m.ReadOnly()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue