neoneo-go/pkg/core/storage/memcached_store_test.go
Roman Khimov b9be892bf9 storage: allow accessing MemCachedStore during Persist
Persist by its definition doesn't change MemCachedStore visible state, all KV
pairs that were acessible via it before Persist remain accessible after
Persist. The only thing it does is flushing of the current set of KV pairs
from memory to peristent store. To do that it needs read-only access to the
current KV pair set, but technically it then replaces maps, so we have to use
full write lock which makes MemCachedStore inaccessible for the duration of
Persist. And Persist can take a lot of time, it's about disk access for
regular DBs.

What we do here is we create new in-memory maps for MemCachedStore before
flushing old ones to the persistent store. Then a fake persistent store is
created which actually is a MemCachedStore with old maps, so it has exactly
the same visible state. This Store is never accessed for writes, so we can
read it without taking any internal locks and at the same time we no longer
need write locks for original MemCachedStore, we're not using it. All of this
makes it possible to use MemCachedStore as normally reads are handled going
down to whatever level is needed and writes are handled by new maps. So while
Persist for (*Blockchain).dao does its most time-consuming work we can process
other blocks (reading data for transactions and persisting storeBlock caches
to (*Blockchain).dao).

The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 62800
with VerifyBlocks set to false) on i7-8565U.

Reference results (bbe4e9cd7b):

Ryzen 9 5950X:
RPS     23616.969 22817.086 23222.378  ≈ 23218   ± 1.72%
TPS     23047.316 22608.578 22735.540  ≈ 22797   ± 0.99%
CPU %      23.434    25.553    23.848  ≈    24.3 ± 4.63%
Mem MB    600.636   503.060   582.043  ≈   562   ± 9.22%

Core i7-8565U:
RPS     6594.007 6499.501 6572.902  ≈ 6555   ± 0.76%
TPS     6561.680 6444.545 6510.120  ≈ 6505   ± 0.90%
CPU %     58.452   60.568   62.474    ≈ 60.5 ± 3.33%
Mem MB   234.893  285.067  269.081   ≈ 263   ± 9.75%

DB restore:
real    0m22.237s 0m23.471s 0m23.409s  ≈ 23.04 ± 3.02%
user    0m35.435s 0m38.943s 0m39.247s  ≈ 37.88 ± 5.59%
sys      0m3.085s  0m3.360s  0m3.144s  ≈  3.20 ± 4.53%

After the change:

Ryzen 9 5950X:
RPS     27747.349 27407.726 27520.210  ≈ 27558   ± 0.63%  ↑ 18.69%
TPS     26992.010 26993.468 27010.966  ≈ 26999   ± 0.04%  ↑ 18.43%
CPU %      28.928    28.096    29.105  ≈    28.7 ± 1.88%  ↑ 18.1%
Mem MB    760.385   726.320   756.118  ≈   748   ± 2.48%  ↑ 33.10%

Core i7-8565U:
RPS     7783.229 7628.409 7542.340  ≈ 7651   ± 1.60%  ↑ 16.72%
TPS     7708.436 7607.397 7489.459  ≈ 7602   ± 1.44%  ↑ 16.85%
CPU %     74.899   71.020   72.697  ≈   72.9 ± 2.67%  ↑ 20.50%
Mem MB   438.047  436.967  416.350  ≈  430   ± 2.84%  ↑ 63.50%

DB restore:
real    0m20.838s 0m21.895s 0m21.794s  ≈ 21.51 ± 2.71%  ↓ 6.64%
user    0m39.091s 0m40.565s 0m41.493s  ≈ 40.38 ± 3.00%  ↑ 6.60%
sys      0m3.184s  0m2.923s  0m3.062s  ≈  3.06 ± 4.27%  ↓ 4.38%

It obviously uses more memory now and utilizes CPU more aggressively, but at
the same time it allows to improve all relevant metrics and finally reach a
situation where we process 50K transactions in less than second on Ryzen 9
5950X (going higher than 25K TPS). The other observation is much more stable
block time, on Ryzen 9 it's as close to 1 second as it could be.
2021-08-02 16:33:00 +03:00

241 lines
6.5 KiB
Go

package storage
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func testMemCachedStorePersist(t *testing.T, ps Store) {
// cached Store
ts := NewMemCachedStore(ps)
// persisting nothing should do nothing
c, err := ts.Persist()
assert.Equal(t, nil, err)
assert.Equal(t, 0, c)
// persisting one key should result in one key in ps and nothing in ts
assert.NoError(t, ts.Put([]byte("key"), []byte("value")))
checkBatch(t, ts, []KeyValue{{Key: []byte("key"), Value: []byte("value")}}, nil)
c, err = ts.Persist()
checkBatch(t, ts, nil, nil)
assert.Equal(t, nil, err)
assert.Equal(t, 1, c)
v, err := ps.Get([]byte("key"))
assert.Equal(t, nil, err)
assert.Equal(t, []byte("value"), v)
v, err = ts.MemoryStore.Get([]byte("key"))
assert.Equal(t, ErrKeyNotFound, err)
assert.Equal(t, []byte(nil), v)
// now we overwrite the previous `key` contents and also add `key2`,
assert.NoError(t, ts.Put([]byte("key"), []byte("newvalue")))
assert.NoError(t, ts.Put([]byte("key2"), []byte("value2")))
// this is to check that now key is written into the ps before we do
// persist
v, err = ps.Get([]byte("key2"))
assert.Equal(t, ErrKeyNotFound, err)
assert.Equal(t, []byte(nil), v)
checkBatch(t, ts, []KeyValue{
{Key: []byte("key"), Value: []byte("newvalue"), Exists: true},
{Key: []byte("key2"), Value: []byte("value2")},
}, nil)
// two keys should be persisted (one overwritten and one new) and
// available in the ps
c, err = ts.Persist()
checkBatch(t, ts, nil, nil)
assert.Equal(t, nil, err)
assert.Equal(t, 2, c)
v, err = ts.MemoryStore.Get([]byte("key"))
assert.Equal(t, ErrKeyNotFound, err)
assert.Equal(t, []byte(nil), v)
v, err = ts.MemoryStore.Get([]byte("key2"))
assert.Equal(t, ErrKeyNotFound, err)
assert.Equal(t, []byte(nil), v)
v, err = ps.Get([]byte("key"))
assert.Equal(t, nil, err)
assert.Equal(t, []byte("newvalue"), v)
v, err = ps.Get([]byte("key2"))
assert.Equal(t, nil, err)
assert.Equal(t, []byte("value2"), v)
checkBatch(t, ts, nil, nil)
// we've persisted some values, make sure successive persist is a no-op
c, err = ts.Persist()
assert.Equal(t, nil, err)
assert.Equal(t, 0, c)
// test persisting deletions
err = ts.Delete([]byte("key"))
assert.Equal(t, nil, err)
checkBatch(t, ts, nil, []KeyValue{{Key: []byte("key"), Exists: true}})
c, err = ts.Persist()
checkBatch(t, ts, nil, nil)
assert.Equal(t, nil, err)
assert.Equal(t, 0, c)
v, err = ps.Get([]byte("key"))
assert.Equal(t, ErrKeyNotFound, err)
assert.Equal(t, []byte(nil), v)
v, err = ps.Get([]byte("key2"))
assert.Equal(t, nil, err)
assert.Equal(t, []byte("value2"), v)
}
func checkBatch(t *testing.T, ts *MemCachedStore, put []KeyValue, del []KeyValue) {
b := ts.GetBatch()
assert.Equal(t, len(put), len(b.Put), "wrong number of put elements in a batch")
assert.Equal(t, len(del), len(b.Deleted), "wrong number of deleted elements in a batch")
for i := range put {
assert.Contains(t, b.Put, put[i])
}
for i := range del {
assert.Contains(t, b.Deleted, del[i])
}
}
func TestMemCachedPersist(t *testing.T) {
t.Run("MemoryStore", func(t *testing.T) {
ps := NewMemoryStore()
testMemCachedStorePersist(t, ps)
})
t.Run("MemoryCachedStore", func(t *testing.T) {
ps1 := NewMemoryStore()
ps2 := NewMemCachedStore(ps1)
testMemCachedStorePersist(t, ps2)
})
t.Run("BoltDBStore", func(t *testing.T) {
ps := newBoltStoreForTesting(t)
testMemCachedStorePersist(t, ps)
})
}
func TestCachedGetFromPersistent(t *testing.T) {
key := []byte("key")
value := []byte("value")
ps := NewMemoryStore()
ts := NewMemCachedStore(ps)
assert.NoError(t, ps.Put(key, value))
val, err := ts.Get(key)
assert.Nil(t, err)
assert.Equal(t, value, val)
assert.NoError(t, ts.Delete(key))
val, err = ts.Get(key)
assert.Equal(t, err, ErrKeyNotFound)
assert.Nil(t, val)
}
func TestCachedSeek(t *testing.T) {
var (
// Given this prefix...
goodPrefix = []byte{'f'}
// these pairs should be found...
lowerKVs = []kvSeen{
{[]byte("foo"), []byte("bar"), false},
{[]byte("faa"), []byte("bra"), false},
}
// and these should be not.
deletedKVs = []kvSeen{
{[]byte("fee"), []byte("pow"), false},
{[]byte("fii"), []byte("qaz"), false},
}
// and these should be not.
updatedKVs = []kvSeen{
{[]byte("fuu"), []byte("wop"), false},
{[]byte("fyy"), []byte("zaq"), false},
}
ps = NewMemoryStore()
ts = NewMemCachedStore(ps)
)
for _, v := range lowerKVs {
require.NoError(t, ps.Put(v.key, v.val))
}
for _, v := range deletedKVs {
require.NoError(t, ps.Put(v.key, v.val))
require.NoError(t, ts.Delete(v.key))
}
for _, v := range updatedKVs {
require.NoError(t, ps.Put(v.key, []byte("stub")))
require.NoError(t, ts.Put(v.key, v.val))
}
foundKVs := make(map[string][]byte)
ts.Seek(goodPrefix, func(k, v []byte) {
foundKVs[string(k)] = v
})
assert.Equal(t, len(foundKVs), len(lowerKVs)+len(updatedKVs))
for _, kv := range lowerKVs {
assert.Equal(t, kv.val, foundKVs[string(kv.key)])
}
for _, kv := range deletedKVs {
_, ok := foundKVs[string(kv.key)]
assert.Equal(t, false, ok)
}
for _, kv := range updatedKVs {
assert.Equal(t, kv.val, foundKVs[string(kv.key)])
}
}
func newMemCachedStoreForTesting(t *testing.T) Store {
return NewMemCachedStore(NewMemoryStore())
}
type BadBatch struct{}
func (b BadBatch) Delete(k []byte) {}
func (b BadBatch) Put(k, v []byte) {}
type BadStore struct {
onPutBatch func()
}
func (b *BadStore) Batch() Batch {
return BadBatch{}
}
func (b *BadStore) Delete(k []byte) error {
return nil
}
func (b *BadStore) Get([]byte) ([]byte, error) {
return nil, ErrKeyNotFound
}
func (b *BadStore) Put(k, v []byte) error {
return nil
}
func (b *BadStore) PutBatch(Batch) error {
b.onPutBatch()
return ErrKeyNotFound
}
func (b *BadStore) Seek(k []byte, f func(k, v []byte)) {
}
func (b *BadStore) Close() error {
return nil
}
func TestMemCachedPersistFailing(t *testing.T) {
var (
bs BadStore
t1 = []byte("t1")
t2 = []byte("t2")
b1 = []byte("b1")
)
// cached Store
ts := NewMemCachedStore(&bs)
// Set a pair of keys.
require.NoError(t, ts.Put(t1, t1))
require.NoError(t, ts.Put(t2, t2))
// This will be called during Persist().
bs.onPutBatch = func() {
// Drop one, add one.
require.NoError(t, ts.Put(b1, b1))
require.NoError(t, ts.Delete(t1))
}
_, err := ts.Persist()
require.Error(t, err)
// PutBatch() failed in Persist, but we still should have proper state.
_, err = ts.Get(t1)
require.Error(t, err)
res, err := ts.Get(t2)
require.NoError(t, err)
require.Equal(t, t2, res)
res, err = ts.Get(b1)
require.NoError(t, err)
require.Equal(t, b1, res)
}