2019-10-16 13:41:50 +00:00
|
|
|
package storage
|
|
|
|
|
2021-09-22 15:58:48 +00:00
|
|
|
import (
|
|
|
|
"bytes"
|
2021-10-06 12:54:44 +00:00
|
|
|
"context"
|
2022-04-15 14:48:58 +00:00
|
|
|
"fmt"
|
2021-09-22 15:58:48 +00:00
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/util/slice"
|
|
|
|
)
|
storage: allow accessing MemCachedStore during Persist
Persist by its definition doesn't change MemCachedStore visible state, all KV
pairs that were acessible via it before Persist remain accessible after
Persist. The only thing it does is flushing of the current set of KV pairs
from memory to peristent store. To do that it needs read-only access to the
current KV pair set, but technically it then replaces maps, so we have to use
full write lock which makes MemCachedStore inaccessible for the duration of
Persist. And Persist can take a lot of time, it's about disk access for
regular DBs.
What we do here is we create new in-memory maps for MemCachedStore before
flushing old ones to the persistent store. Then a fake persistent store is
created which actually is a MemCachedStore with old maps, so it has exactly
the same visible state. This Store is never accessed for writes, so we can
read it without taking any internal locks and at the same time we no longer
need write locks for original MemCachedStore, we're not using it. All of this
makes it possible to use MemCachedStore as normally reads are handled going
down to whatever level is needed and writes are handled by new maps. So while
Persist for (*Blockchain).dao does its most time-consuming work we can process
other blocks (reading data for transactions and persisting storeBlock caches
to (*Blockchain).dao).
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 62800
with VerifyBlocks set to false) on i7-8565U.
Reference results (bbe4e9cd7bb33428633586f080f64494cd6ac9cf):
Ryzen 9 5950X:
RPS 23616.969 22817.086 23222.378 ≈ 23218 ± 1.72%
TPS 23047.316 22608.578 22735.540 ≈ 22797 ± 0.99%
CPU % 23.434 25.553 23.848 ≈ 24.3 ± 4.63%
Mem MB 600.636 503.060 582.043 ≈ 562 ± 9.22%
Core i7-8565U:
RPS 6594.007 6499.501 6572.902 ≈ 6555 ± 0.76%
TPS 6561.680 6444.545 6510.120 ≈ 6505 ± 0.90%
CPU % 58.452 60.568 62.474 ≈ 60.5 ± 3.33%
Mem MB 234.893 285.067 269.081 ≈ 263 ± 9.75%
DB restore:
real 0m22.237s 0m23.471s 0m23.409s ≈ 23.04 ± 3.02%
user 0m35.435s 0m38.943s 0m39.247s ≈ 37.88 ± 5.59%
sys 0m3.085s 0m3.360s 0m3.144s ≈ 3.20 ± 4.53%
After the change:
Ryzen 9 5950X:
RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% ↑ 18.69%
TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% ↑ 18.43%
CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% ↑ 18.1%
Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% ↑ 33.10%
Core i7-8565U:
RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% ↑ 16.72%
TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% ↑ 16.85%
CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% ↑ 20.50%
Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% ↑ 63.50%
DB restore:
real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% ↓ 6.64%
user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% ↑ 6.60%
sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% ↓ 4.38%
It obviously uses more memory now and utilizes CPU more aggressively, but at
the same time it allows to improve all relevant metrics and finally reach a
situation where we process 50K transactions in less than second on Ryzen 9
5950X (going higher than 25K TPS). The other observation is much more stable
block time, on Ryzen 9 it's as close to 1 second as it could be.
2021-07-30 20:35:03 +00:00
|
|
|
|
2019-10-16 13:41:50 +00:00
|
|
|
// MemCachedStore is a wrapper around persistent store that caches all changes
|
|
|
|
// being made for them to be later flushed in one batch.
|
|
|
|
type MemCachedStore struct {
|
|
|
|
MemoryStore
|
|
|
|
|
2022-04-12 14:29:11 +00:00
|
|
|
nativeCacheLock sync.RWMutex
|
2022-04-15 14:48:58 +00:00
|
|
|
nativeCache map[int32]NativeContractCache
|
2022-04-12 14:29:11 +00:00
|
|
|
|
2022-02-16 16:13:06 +00:00
|
|
|
private bool
|
storage: allow accessing MemCachedStore during Persist
Persist by its definition doesn't change MemCachedStore visible state, all KV
pairs that were acessible via it before Persist remain accessible after
Persist. The only thing it does is flushing of the current set of KV pairs
from memory to peristent store. To do that it needs read-only access to the
current KV pair set, but technically it then replaces maps, so we have to use
full write lock which makes MemCachedStore inaccessible for the duration of
Persist. And Persist can take a lot of time, it's about disk access for
regular DBs.
What we do here is we create new in-memory maps for MemCachedStore before
flushing old ones to the persistent store. Then a fake persistent store is
created which actually is a MemCachedStore with old maps, so it has exactly
the same visible state. This Store is never accessed for writes, so we can
read it without taking any internal locks and at the same time we no longer
need write locks for original MemCachedStore, we're not using it. All of this
makes it possible to use MemCachedStore as normally reads are handled going
down to whatever level is needed and writes are handled by new maps. So while
Persist for (*Blockchain).dao does its most time-consuming work we can process
other blocks (reading data for transactions and persisting storeBlock caches
to (*Blockchain).dao).
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 62800
with VerifyBlocks set to false) on i7-8565U.
Reference results (bbe4e9cd7bb33428633586f080f64494cd6ac9cf):
Ryzen 9 5950X:
RPS 23616.969 22817.086 23222.378 ≈ 23218 ± 1.72%
TPS 23047.316 22608.578 22735.540 ≈ 22797 ± 0.99%
CPU % 23.434 25.553 23.848 ≈ 24.3 ± 4.63%
Mem MB 600.636 503.060 582.043 ≈ 562 ± 9.22%
Core i7-8565U:
RPS 6594.007 6499.501 6572.902 ≈ 6555 ± 0.76%
TPS 6561.680 6444.545 6510.120 ≈ 6505 ± 0.90%
CPU % 58.452 60.568 62.474 ≈ 60.5 ± 3.33%
Mem MB 234.893 285.067 269.081 ≈ 263 ± 9.75%
DB restore:
real 0m22.237s 0m23.471s 0m23.409s ≈ 23.04 ± 3.02%
user 0m35.435s 0m38.943s 0m39.247s ≈ 37.88 ± 5.59%
sys 0m3.085s 0m3.360s 0m3.144s ≈ 3.20 ± 4.53%
After the change:
Ryzen 9 5950X:
RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% ↑ 18.69%
TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% ↑ 18.43%
CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% ↑ 18.1%
Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% ↑ 33.10%
Core i7-8565U:
RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% ↑ 16.72%
TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% ↑ 16.85%
CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% ↑ 20.50%
Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% ↑ 63.50%
DB restore:
real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% ↓ 6.64%
user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% ↑ 6.60%
sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% ↓ 4.38%
It obviously uses more memory now and utilizes CPU more aggressively, but at
the same time it allows to improve all relevant metrics and finally reach a
situation where we process 50K transactions in less than second on Ryzen 9
5950X (going higher than 25K TPS). The other observation is much more stable
block time, on Ryzen 9 it's as close to 1 second as it could be.
2021-07-30 20:35:03 +00:00
|
|
|
// plock protects Persist from double entrance.
|
|
|
|
plock sync.Mutex
|
2019-10-16 13:41:50 +00:00
|
|
|
// Persistent Store.
|
|
|
|
ps Store
|
|
|
|
}
|
|
|
|
|
2022-04-15 14:48:58 +00:00
|
|
|
// NativeContractCache is an interface representing cache for a native contract.
|
|
|
|
// Cache can be copied to create a wrapper around current DAO layer. Wrapped cache
|
|
|
|
// can be persisted to the underlying DAO native cache.
|
|
|
|
type NativeContractCache interface {
|
|
|
|
// Copy returns a copy of native cache item that can safely be changed within
|
|
|
|
// the subsequent DAO operations.
|
|
|
|
Copy() NativeContractCache
|
|
|
|
// Persist persists changes from upper native cache wrapper to the underlying
|
|
|
|
// native cache `ps`. The resulting up-to-date cache and an error are returned.
|
|
|
|
Persist(ps NativeContractCache) (NativeContractCache, error)
|
2022-04-12 14:29:11 +00:00
|
|
|
}
|
|
|
|
|
2020-02-06 13:11:32 +00:00
|
|
|
type (
|
|
|
|
// KeyValue represents key-value pair.
|
|
|
|
KeyValue struct {
|
|
|
|
Key []byte
|
|
|
|
Value []byte
|
2021-10-05 06:54:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// KeyValueExists represents key-value pair with indicator whether the item
|
|
|
|
// exists in the persistent storage.
|
|
|
|
KeyValueExists struct {
|
|
|
|
KeyValue
|
2020-02-07 12:08:25 +00:00
|
|
|
|
|
|
|
Exists bool
|
2020-02-06 13:11:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// MemBatch represents a changeset to be persisted.
|
|
|
|
MemBatch struct {
|
2021-10-05 06:54:03 +00:00
|
|
|
Put []KeyValueExists
|
|
|
|
Deleted []KeyValueExists
|
2020-02-06 13:11:32 +00:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2019-10-16 13:41:50 +00:00
|
|
|
// NewMemCachedStore creates a new MemCachedStore object.
|
|
|
|
func NewMemCachedStore(lower Store) *MemCachedStore {
|
2022-04-15 14:48:58 +00:00
|
|
|
// Do not copy cache from ps; instead should create clear map: GetRWCache and
|
|
|
|
// GetROCache will retrieve cache from the underlying nativeCache if requested.
|
|
|
|
cache := make(map[int32]NativeContractCache)
|
2019-10-16 13:41:50 +00:00
|
|
|
return &MemCachedStore{
|
|
|
|
MemoryStore: *NewMemoryStore(),
|
2022-04-12 14:29:11 +00:00
|
|
|
nativeCache: cache,
|
2019-10-16 13:41:50 +00:00
|
|
|
ps: lower,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-16 16:13:06 +00:00
|
|
|
// NewPrivateMemCachedStore creates a new private (unlocked) MemCachedStore object.
|
|
|
|
// Private cached stores are closed after Persist.
|
|
|
|
func NewPrivateMemCachedStore(lower Store) *MemCachedStore {
|
2022-04-15 14:48:58 +00:00
|
|
|
// Do not copy cache from ps; instead should create clear map: GetRWCache and
|
|
|
|
// GetROCache will retrieve cache from the underlying nativeCache if requested.
|
|
|
|
// The lowest underlying store MUST have its native cache initialized, otherwise
|
|
|
|
// GetROCache and GetRWCache won't work properly.
|
|
|
|
cache := make(map[int32]NativeContractCache)
|
2022-02-16 16:13:06 +00:00
|
|
|
return &MemCachedStore{
|
|
|
|
MemoryStore: *NewMemoryStore(),
|
2022-04-12 14:29:11 +00:00
|
|
|
nativeCache: cache,
|
2022-02-16 16:13:06 +00:00
|
|
|
private: true,
|
|
|
|
ps: lower,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lock write-locks non-private store.
|
|
|
|
func (s *MemCachedStore) lock() {
|
|
|
|
if !s.private {
|
|
|
|
s.mut.Lock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// unlock unlocks non-private store.
|
|
|
|
func (s *MemCachedStore) unlock() {
|
|
|
|
if !s.private {
|
|
|
|
s.mut.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// rlock read-locks non-private store.
|
|
|
|
func (s *MemCachedStore) rlock() {
|
|
|
|
if !s.private {
|
|
|
|
s.mut.RLock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// runlock drops read lock for non-private stores.
|
|
|
|
func (s *MemCachedStore) runlock() {
|
|
|
|
if !s.private {
|
|
|
|
s.mut.RUnlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-16 13:41:50 +00:00
|
|
|
// Get implements the Store interface.
|
|
|
|
func (s *MemCachedStore) Get(key []byte) ([]byte, error) {
|
2022-02-16 16:13:06 +00:00
|
|
|
s.rlock()
|
|
|
|
defer s.runlock()
|
storage: use two maps for MemoryStore
Simple and dumb as it is, this allows to separate contract storage from other
things and dramatically improve Seek() time over storage (even though it's
still unordered!) which in turn improves block processing speed.
LevelDB LevelDB (KeepOnlyLatest) BoltDB BoltDB (KeepOnlyLatest)
Master real 16m27,936s real 10m9,440s real 16m39,369s real 8m1,227s
user 20m12,619s user 26m13,925s user 18m9,162s user 18m5,846s
sys 2m56,377s sys 1m32,051s sys 9m52,576s sys 2m9,455s
2 maps real 10m49,495s real 8m53,342s real 11m46,204s real 5m56,043s
user 14m19,922s user 24m6,225s user 13m25,691s user 15m4,694s
sys 1m53,021s sys 1m23,006s sys 4m31,735s sys 2m8,714s
neo-bench performance is mostly unaffected, ~0.5% for 1-1 test and 4% for
10K-10K test both fall within regular test error range.
2022-02-15 16:07:59 +00:00
|
|
|
m := s.chooseMap(key)
|
|
|
|
if val, ok := m[string(key)]; ok {
|
2022-01-29 08:54:25 +00:00
|
|
|
if val == nil {
|
|
|
|
return nil, ErrKeyNotFound
|
|
|
|
}
|
2019-10-16 13:41:50 +00:00
|
|
|
return val, nil
|
|
|
|
}
|
|
|
|
return s.ps.Get(key)
|
|
|
|
}
|
|
|
|
|
2022-02-16 14:48:15 +00:00
|
|
|
// Put puts new KV pair into the store.
|
|
|
|
func (s *MemCachedStore) Put(key, value []byte) {
|
2022-02-16 13:48:47 +00:00
|
|
|
newKey := string(key)
|
|
|
|
vcopy := slice.Copy(value)
|
2022-02-16 16:13:06 +00:00
|
|
|
s.lock()
|
2022-02-16 13:48:47 +00:00
|
|
|
put(s.chooseMap(key), newKey, vcopy)
|
2022-02-16 16:13:06 +00:00
|
|
|
s.unlock()
|
2022-02-16 13:48:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete drops KV pair from the store. Never returns an error.
|
2022-02-16 14:48:15 +00:00
|
|
|
func (s *MemCachedStore) Delete(key []byte) {
|
2022-02-16 13:48:47 +00:00
|
|
|
newKey := string(key)
|
2022-02-16 16:13:06 +00:00
|
|
|
s.lock()
|
2022-02-16 13:48:47 +00:00
|
|
|
put(s.chooseMap(key), newKey, nil)
|
2022-02-16 16:13:06 +00:00
|
|
|
s.unlock()
|
2022-02-16 13:48:47 +00:00
|
|
|
}
|
|
|
|
|
2020-02-06 13:11:32 +00:00
|
|
|
// GetBatch returns currently accumulated changeset.
|
|
|
|
func (s *MemCachedStore) GetBatch() *MemBatch {
|
2022-02-16 16:13:06 +00:00
|
|
|
s.rlock()
|
|
|
|
defer s.runlock()
|
2020-02-06 13:11:32 +00:00
|
|
|
var b MemBatch
|
|
|
|
|
storage: use two maps for MemoryStore
Simple and dumb as it is, this allows to separate contract storage from other
things and dramatically improve Seek() time over storage (even though it's
still unordered!) which in turn improves block processing speed.
LevelDB LevelDB (KeepOnlyLatest) BoltDB BoltDB (KeepOnlyLatest)
Master real 16m27,936s real 10m9,440s real 16m39,369s real 8m1,227s
user 20m12,619s user 26m13,925s user 18m9,162s user 18m5,846s
sys 2m56,377s sys 1m32,051s sys 9m52,576s sys 2m9,455s
2 maps real 10m49,495s real 8m53,342s real 11m46,204s real 5m56,043s
user 14m19,922s user 24m6,225s user 13m25,691s user 15m4,694s
sys 1m53,021s sys 1m23,006s sys 4m31,735s sys 2m8,714s
neo-bench performance is mostly unaffected, ~0.5% for 1-1 test and 4% for
10K-10K test both fall within regular test error range.
2022-02-15 16:07:59 +00:00
|
|
|
b.Put = make([]KeyValueExists, 0, len(s.mem)+len(s.stor))
|
2022-01-29 08:54:25 +00:00
|
|
|
b.Deleted = make([]KeyValueExists, 0)
|
storage: use two maps for MemoryStore
Simple and dumb as it is, this allows to separate contract storage from other
things and dramatically improve Seek() time over storage (even though it's
still unordered!) which in turn improves block processing speed.
LevelDB LevelDB (KeepOnlyLatest) BoltDB BoltDB (KeepOnlyLatest)
Master real 16m27,936s real 10m9,440s real 16m39,369s real 8m1,227s
user 20m12,619s user 26m13,925s user 18m9,162s user 18m5,846s
sys 2m56,377s sys 1m32,051s sys 9m52,576s sys 2m9,455s
2 maps real 10m49,495s real 8m53,342s real 11m46,204s real 5m56,043s
user 14m19,922s user 24m6,225s user 13m25,691s user 15m4,694s
sys 1m53,021s sys 1m23,006s sys 4m31,735s sys 2m8,714s
neo-bench performance is mostly unaffected, ~0.5% for 1-1 test and 4% for
10K-10K test both fall within regular test error range.
2022-02-15 16:07:59 +00:00
|
|
|
for _, m := range []map[string][]byte{s.mem, s.stor} {
|
|
|
|
for k, v := range m {
|
|
|
|
key := []byte(k)
|
|
|
|
_, err := s.ps.Get(key)
|
|
|
|
if v == nil {
|
|
|
|
b.Deleted = append(b.Deleted, KeyValueExists{KeyValue: KeyValue{Key: key}, Exists: err == nil})
|
|
|
|
} else {
|
|
|
|
b.Put = append(b.Put, KeyValueExists{KeyValue: KeyValue{Key: key, Value: v}, Exists: err == nil})
|
|
|
|
}
|
2022-01-29 08:54:25 +00:00
|
|
|
}
|
2020-02-06 13:11:32 +00:00
|
|
|
}
|
|
|
|
return &b
|
|
|
|
}
|
|
|
|
|
2022-02-16 16:13:06 +00:00
|
|
|
// PutChangeSet implements the Store interface. Never returns an error.
|
|
|
|
func (s *MemCachedStore) PutChangeSet(puts map[string][]byte, stores map[string][]byte) error {
|
|
|
|
s.lock()
|
|
|
|
s.MemoryStore.putChangeSet(puts, stores)
|
|
|
|
s.unlock()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-10-16 13:41:50 +00:00
|
|
|
// Seek implements the Store interface.
|
2022-01-17 17:41:51 +00:00
|
|
|
func (s *MemCachedStore) Seek(rng SeekRange, f func(k, v []byte) bool) {
|
2021-12-16 13:55:50 +00:00
|
|
|
s.seek(context.Background(), rng, false, f)
|
2021-10-04 14:01:42 +00:00
|
|
|
}
|
|
|
|
|
2022-02-17 09:11:59 +00:00
|
|
|
// GetStorageChanges returns all current storage changes. It can only be done for private
|
|
|
|
// MemCachedStore.
|
|
|
|
func (s *MemCachedStore) GetStorageChanges() map[string][]byte {
|
2022-02-16 16:13:06 +00:00
|
|
|
if !s.private {
|
2022-02-17 09:11:59 +00:00
|
|
|
panic("GetStorageChanges called on shared MemCachedStore")
|
2022-02-16 16:13:06 +00:00
|
|
|
}
|
2022-02-17 09:11:59 +00:00
|
|
|
return s.stor
|
2022-02-16 16:13:06 +00:00
|
|
|
}
|
|
|
|
|
2021-10-04 14:01:42 +00:00
|
|
|
// SeekAsync returns non-buffered channel with matching KeyValue pairs. Key and
|
|
|
|
// value slices may not be copied and may be modified. SeekAsync can guarantee
|
|
|
|
// that key-value items are sorted by key in ascending way.
|
2021-12-16 13:55:50 +00:00
|
|
|
func (s *MemCachedStore) SeekAsync(ctx context.Context, rng SeekRange, cutPrefix bool) chan KeyValue {
|
2021-10-19 15:03:47 +00:00
|
|
|
res := make(chan KeyValue)
|
|
|
|
go func() {
|
2022-01-17 17:41:51 +00:00
|
|
|
s.seek(ctx, rng, cutPrefix, func(k, v []byte) bool {
|
storage: provide a way to escape from SeekAsync goroutine
A routine blocked on channel send here can't really exit, so avoid goroutine
leak:
goroutine 2813725 [chan send, 6 minutes]:
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).SeekAsync.func1.1(0xc01a7118f7, 0x2, 0x25, 0xc01a7118f9, 0x23, 0x23, 0xc0366c7c01)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:120 +0x86
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).seek.func2(0xc0079e7920, 0xa, 0x30, 0xc0079e792a, 0x26, 0x26, 0x1)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:183 +0x347
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).seek(0xc000458480, 0x135c028, 0xc0000445d0, 0xc00f1721d0, 0x7, 0x7, 0x0, 0x0, 0x0, 0x0, ...)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:224 +0x4f4
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).Seek(0xc000458480, 0xc00f1721d0, 0x7, 0x7, 0x0, 0x0, 0x0, 0x0, 0xc0357c6620)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:110 +0x8a
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).seek(0xc0331a4f00, 0x135bff0, 0xc00ae26ec0, 0xc00f1721d0, 0x7, 0x7, 0x0, 0x0, 0x0, 0x0, ...)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:210 +0x379
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).SeekAsync.func1(0xc0331a4f00, 0x135bff0, 0xc00ae26ec0, 0xc00f1721d0, 0x7, 0x7, 0x0, 0x0, 0x0, 0x0, ...)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:119 +0xc5
created by github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).SeekAsync
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:118 +0xc8
goroutine 2822823 [chan send, 6 minutes]:
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).SeekAsync.func1.1(0xc011859b77, 0x3, 0x3, 0xc017bea8d0, 0x26, 0x26, 0xc00f1afc00)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:120 +0x86
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).seek.func2(0xc011859b60, 0xa, 0xa, 0xc017bea8a0, 0x26, 0x26, 0xc00ad9fb00)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:200 +0x47e
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).seek.func2(0xc01d5d8c90, 0xa, 0x30, 0xc01d5d8c9a, 0x26, 0x26, 0x1)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:200 +0x47e
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).seek(0xc035e12900, 0x135c028, 0xc0000445d0, 0xc01773bf60, 0x7, 0x7, 0x0, 0x0, 0x0, 0x0, ...)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:224 +0x4f4
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).Seek(0xc035e12900, 0xc01773bf60, 0x7, 0x7, 0x0, 0x0, 0x0, 0x0, 0xc030c9e0e0)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:110 +0x8a
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).seek(0xc000458480, 0x135c028, 0xc0000445d0, 0xc01773bf60, 0x7, 0x7, 0x0, 0x0, 0x0, 0x0, ...)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:210 +0x379
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).Seek(0xc000458480, 0xc01773bf60, 0x7, 0x7, 0x0, 0x0, 0x0, 0x0, 0xc030c9e070)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:110 +0x8a
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).seek(0xc00b340c60, 0x135bff0, 0xc00f1afbc0, 0xc01773bf60, 0x7, 0x7, 0x0, 0x0, 0x0, 0x0, ...)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:210 +0x379
github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).SeekAsync.func1(0xc00b340c60, 0x135bff0, 0xc00f1afbc0, 0xc01773bf60, 0x7, 0x7, 0x0, 0x0, 0x0, 0x0, ...)
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:119 +0xc5
created by github.com/nspcc-dev/neo-go/pkg/core/storage.(*MemCachedStore).SeekAsync
github.com/nspcc-dev/neo-go/pkg/core/storage/memcached_store.go:118 +0xc8
...
2022-02-05 07:53:45 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return false
|
|
|
|
case res <- KeyValue{Key: k, Value: v}:
|
|
|
|
return true
|
2021-10-19 15:03:47 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
close(res)
|
|
|
|
}()
|
|
|
|
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2021-12-16 13:55:50 +00:00
|
|
|
// seek is internal representations of Seek* capable of seeking for the given key
|
|
|
|
// and supporting early stop using provided context. `cutPrefix` denotes whether provided
|
|
|
|
// key needs to be cut off the resulting keys. `rng` specifies prefix items must match
|
2021-12-28 13:01:44 +00:00
|
|
|
// and point to start seeking from. Backwards seeking from some point is supported
|
|
|
|
// with corresponding `rng` field set.
|
2022-01-17 17:41:51 +00:00
|
|
|
func (s *MemCachedStore) seek(ctx context.Context, rng SeekRange, cutPrefix bool, f func(k, v []byte) bool) {
|
2021-09-22 15:58:48 +00:00
|
|
|
// Create memory store `mem` and `del` snapshot not to hold the lock.
|
2021-10-19 10:29:43 +00:00
|
|
|
var memRes []KeyValueExists
|
2021-12-16 13:55:50 +00:00
|
|
|
sPrefix := string(rng.Prefix)
|
|
|
|
lPrefix := len(sPrefix)
|
|
|
|
sStart := string(rng.Start)
|
|
|
|
lStart := len(sStart)
|
|
|
|
isKeyOK := func(key string) bool {
|
|
|
|
return strings.HasPrefix(key, sPrefix) && (lStart == 0 || strings.Compare(key[lPrefix:], sStart) >= 0)
|
|
|
|
}
|
2021-12-28 13:01:44 +00:00
|
|
|
if rng.Backwards {
|
|
|
|
isKeyOK = func(key string) bool {
|
|
|
|
return strings.HasPrefix(key, sPrefix) && (lStart == 0 || strings.Compare(key[lPrefix:], sStart) <= 0)
|
|
|
|
}
|
|
|
|
}
|
2022-02-16 16:13:06 +00:00
|
|
|
s.rlock()
|
storage: use two maps for MemoryStore
Simple and dumb as it is, this allows to separate contract storage from other
things and dramatically improve Seek() time over storage (even though it's
still unordered!) which in turn improves block processing speed.
LevelDB LevelDB (KeepOnlyLatest) BoltDB BoltDB (KeepOnlyLatest)
Master real 16m27,936s real 10m9,440s real 16m39,369s real 8m1,227s
user 20m12,619s user 26m13,925s user 18m9,162s user 18m5,846s
sys 2m56,377s sys 1m32,051s sys 9m52,576s sys 2m9,455s
2 maps real 10m49,495s real 8m53,342s real 11m46,204s real 5m56,043s
user 14m19,922s user 24m6,225s user 13m25,691s user 15m4,694s
sys 1m53,021s sys 1m23,006s sys 4m31,735s sys 2m8,714s
neo-bench performance is mostly unaffected, ~0.5% for 1-1 test and 4% for
10K-10K test both fall within regular test error range.
2022-02-15 16:07:59 +00:00
|
|
|
m := s.MemoryStore.chooseMap(rng.Prefix)
|
|
|
|
for k, v := range m {
|
2021-12-16 13:55:50 +00:00
|
|
|
if isKeyOK(k) {
|
2021-09-22 15:58:48 +00:00
|
|
|
memRes = append(memRes, KeyValueExists{
|
|
|
|
KeyValue: KeyValue{
|
|
|
|
Key: []byte(k),
|
2021-10-19 10:29:43 +00:00
|
|
|
Value: v,
|
2021-09-22 15:58:48 +00:00
|
|
|
},
|
2022-01-29 08:54:25 +00:00
|
|
|
Exists: v != nil,
|
2021-09-22 15:58:48 +00:00
|
|
|
})
|
2019-10-16 13:41:50 +00:00
|
|
|
}
|
2021-09-22 15:58:48 +00:00
|
|
|
}
|
2021-10-18 16:13:56 +00:00
|
|
|
ps := s.ps
|
2022-02-16 16:13:06 +00:00
|
|
|
s.runlock()
|
2021-12-28 13:01:44 +00:00
|
|
|
less := func(k1, k2 []byte) bool {
|
|
|
|
res := bytes.Compare(k1, k2)
|
|
|
|
return res != 0 && rng.Backwards == (res > 0)
|
|
|
|
}
|
2021-09-22 15:58:48 +00:00
|
|
|
// Sort memRes items for further comparison with ps items.
|
|
|
|
sort.Slice(memRes, func(i, j int) bool {
|
2021-12-28 13:01:44 +00:00
|
|
|
return less(memRes[i].Key, memRes[j].Key)
|
2019-10-16 13:41:50 +00:00
|
|
|
})
|
2021-09-22 15:58:48 +00:00
|
|
|
|
2021-10-19 15:03:47 +00:00
|
|
|
var (
|
|
|
|
done bool
|
|
|
|
iMem int
|
|
|
|
kvMem KeyValueExists
|
|
|
|
haveMem bool
|
|
|
|
)
|
|
|
|
if iMem < len(memRes) {
|
|
|
|
kvMem = memRes[iMem]
|
|
|
|
haveMem = true
|
|
|
|
iMem++
|
|
|
|
}
|
2022-01-17 17:41:51 +00:00
|
|
|
// Merge results of seek operations in ascending order. It returns whether iterating
|
|
|
|
// should be continued.
|
|
|
|
mergeFunc := func(k, v []byte) bool {
|
2021-10-19 15:03:47 +00:00
|
|
|
if done {
|
2022-01-17 17:41:51 +00:00
|
|
|
return false
|
2021-10-19 10:29:43 +00:00
|
|
|
}
|
2021-10-19 15:03:47 +00:00
|
|
|
kvPs := KeyValue{
|
|
|
|
Key: slice.Copy(k),
|
|
|
|
Value: slice.Copy(v),
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
done = true
|
2022-01-17 17:41:51 +00:00
|
|
|
return false
|
2021-10-19 15:03:47 +00:00
|
|
|
default:
|
2021-12-28 13:01:44 +00:00
|
|
|
var isMem = haveMem && less(kvMem.Key, kvPs.Key)
|
2021-10-19 15:03:47 +00:00
|
|
|
if isMem {
|
|
|
|
if kvMem.Exists {
|
|
|
|
if cutPrefix {
|
2021-12-16 13:55:50 +00:00
|
|
|
kvMem.Key = kvMem.Key[lPrefix:]
|
2021-10-19 12:10:12 +00:00
|
|
|
}
|
2022-01-17 17:41:51 +00:00
|
|
|
if !f(kvMem.Key, kvMem.Value) {
|
|
|
|
done = true
|
|
|
|
return false
|
|
|
|
}
|
2021-10-19 15:03:47 +00:00
|
|
|
}
|
|
|
|
if iMem < len(memRes) {
|
|
|
|
kvMem = memRes[iMem]
|
|
|
|
haveMem = true
|
|
|
|
iMem++
|
2021-10-19 12:10:12 +00:00
|
|
|
} else {
|
2021-10-19 15:03:47 +00:00
|
|
|
haveMem = false
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if !bytes.Equal(kvMem.Key, kvPs.Key) {
|
|
|
|
if cutPrefix {
|
2021-12-16 13:55:50 +00:00
|
|
|
kvPs.Key = kvPs.Key[lPrefix:]
|
2021-10-19 12:10:12 +00:00
|
|
|
}
|
2022-01-17 17:41:51 +00:00
|
|
|
if !f(kvPs.Key, kvPs.Value) {
|
|
|
|
done = true
|
|
|
|
return false
|
|
|
|
}
|
2021-09-22 15:58:48 +00:00
|
|
|
}
|
2022-01-17 17:41:51 +00:00
|
|
|
return true
|
2021-09-22 15:58:48 +00:00
|
|
|
}
|
2021-10-19 12:10:12 +00:00
|
|
|
}
|
2021-10-19 15:03:47 +00:00
|
|
|
}
|
2021-12-16 13:55:50 +00:00
|
|
|
}
|
|
|
|
ps.Seek(rng, mergeFunc)
|
|
|
|
|
2021-10-19 15:03:47 +00:00
|
|
|
if !done && haveMem {
|
|
|
|
loop:
|
|
|
|
for i := iMem - 1; i < len(memRes); i++ {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
break loop
|
|
|
|
default:
|
|
|
|
kvMem = memRes[i]
|
|
|
|
if kvMem.Exists {
|
|
|
|
if cutPrefix {
|
2021-12-16 13:55:50 +00:00
|
|
|
kvMem.Key = kvMem.Key[lPrefix:]
|
2021-10-04 14:01:42 +00:00
|
|
|
}
|
2022-01-17 17:41:51 +00:00
|
|
|
if !f(kvMem.Key, kvMem.Value) {
|
|
|
|
break loop
|
|
|
|
}
|
2021-09-22 15:58:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-10-19 15:03:47 +00:00
|
|
|
}
|
2019-10-16 13:41:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Persist flushes all the MemoryStore contents into the (supposedly) persistent
|
2021-11-22 07:41:40 +00:00
|
|
|
// store ps. MemCachedStore remains accessible for the most part of this action
|
|
|
|
// (any new changes will be cached in memory).
|
2019-10-16 13:41:50 +00:00
|
|
|
func (s *MemCachedStore) Persist() (int, error) {
|
2021-11-22 07:41:40 +00:00
|
|
|
return s.persist(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// PersistSync flushes all the MemoryStore contents into the (supposedly) persistent
|
|
|
|
// store ps. It's different from Persist in that it blocks MemCachedStore completely
|
|
|
|
// while flushing things from memory to persistent store.
|
|
|
|
func (s *MemCachedStore) PersistSync() (int, error) {
|
|
|
|
return s.persist(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *MemCachedStore) persist(isSync bool) (int, error) {
|
2020-03-27 12:40:23 +00:00
|
|
|
var err error
|
2022-01-29 08:54:25 +00:00
|
|
|
var keys int
|
2020-03-27 12:40:23 +00:00
|
|
|
|
2022-02-16 16:13:06 +00:00
|
|
|
if s.private {
|
|
|
|
keys = len(s.mem) + len(s.stor)
|
|
|
|
if keys == 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
err = s.ps.PutChangeSet(s.mem, s.stor)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
s.mem = nil
|
|
|
|
s.stor = nil
|
2022-04-15 14:48:58 +00:00
|
|
|
if cached, ok := s.ps.(*MemCachedStore); ok {
|
|
|
|
for id, nativeCache := range s.nativeCache {
|
|
|
|
updatedCache, err := nativeCache.Persist(cached.nativeCache[id])
|
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("failed to persist native cache changes for private MemCachedStore: %w", err)
|
|
|
|
}
|
|
|
|
cached.nativeCache[id] = updatedCache
|
|
|
|
}
|
|
|
|
s.nativeCache = nil
|
|
|
|
}
|
2022-02-16 16:13:06 +00:00
|
|
|
return keys, nil
|
|
|
|
}
|
|
|
|
|
storage: allow accessing MemCachedStore during Persist
Persist by its definition doesn't change MemCachedStore visible state, all KV
pairs that were acessible via it before Persist remain accessible after
Persist. The only thing it does is flushing of the current set of KV pairs
from memory to peristent store. To do that it needs read-only access to the
current KV pair set, but technically it then replaces maps, so we have to use
full write lock which makes MemCachedStore inaccessible for the duration of
Persist. And Persist can take a lot of time, it's about disk access for
regular DBs.
What we do here is we create new in-memory maps for MemCachedStore before
flushing old ones to the persistent store. Then a fake persistent store is
created which actually is a MemCachedStore with old maps, so it has exactly
the same visible state. This Store is never accessed for writes, so we can
read it without taking any internal locks and at the same time we no longer
need write locks for original MemCachedStore, we're not using it. All of this
makes it possible to use MemCachedStore as normally reads are handled going
down to whatever level is needed and writes are handled by new maps. So while
Persist for (*Blockchain).dao does its most time-consuming work we can process
other blocks (reading data for transactions and persisting storeBlock caches
to (*Blockchain).dao).
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 62800
with VerifyBlocks set to false) on i7-8565U.
Reference results (bbe4e9cd7bb33428633586f080f64494cd6ac9cf):
Ryzen 9 5950X:
RPS 23616.969 22817.086 23222.378 ≈ 23218 ± 1.72%
TPS 23047.316 22608.578 22735.540 ≈ 22797 ± 0.99%
CPU % 23.434 25.553 23.848 ≈ 24.3 ± 4.63%
Mem MB 600.636 503.060 582.043 ≈ 562 ± 9.22%
Core i7-8565U:
RPS 6594.007 6499.501 6572.902 ≈ 6555 ± 0.76%
TPS 6561.680 6444.545 6510.120 ≈ 6505 ± 0.90%
CPU % 58.452 60.568 62.474 ≈ 60.5 ± 3.33%
Mem MB 234.893 285.067 269.081 ≈ 263 ± 9.75%
DB restore:
real 0m22.237s 0m23.471s 0m23.409s ≈ 23.04 ± 3.02%
user 0m35.435s 0m38.943s 0m39.247s ≈ 37.88 ± 5.59%
sys 0m3.085s 0m3.360s 0m3.144s ≈ 3.20 ± 4.53%
After the change:
Ryzen 9 5950X:
RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% ↑ 18.69%
TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% ↑ 18.43%
CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% ↑ 18.1%
Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% ↑ 33.10%
Core i7-8565U:
RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% ↑ 16.72%
TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% ↑ 16.85%
CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% ↑ 20.50%
Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% ↑ 63.50%
DB restore:
real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% ↓ 6.64%
user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% ↑ 6.60%
sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% ↓ 4.38%
It obviously uses more memory now and utilizes CPU more aggressively, but at
the same time it allows to improve all relevant metrics and finally reach a
situation where we process 50K transactions in less than second on Ryzen 9
5950X (going higher than 25K TPS). The other observation is much more stable
block time, on Ryzen 9 it's as close to 1 second as it could be.
2021-07-30 20:35:03 +00:00
|
|
|
s.plock.Lock()
|
|
|
|
defer s.plock.Unlock()
|
2019-10-16 13:41:50 +00:00
|
|
|
s.mut.Lock()
|
2022-04-15 14:48:58 +00:00
|
|
|
s.nativeCacheLock.Lock()
|
2020-03-27 12:40:23 +00:00
|
|
|
|
storage: use two maps for MemoryStore
Simple and dumb as it is, this allows to separate contract storage from other
things and dramatically improve Seek() time over storage (even though it's
still unordered!) which in turn improves block processing speed.
LevelDB LevelDB (KeepOnlyLatest) BoltDB BoltDB (KeepOnlyLatest)
Master real 16m27,936s real 10m9,440s real 16m39,369s real 8m1,227s
user 20m12,619s user 26m13,925s user 18m9,162s user 18m5,846s
sys 2m56,377s sys 1m32,051s sys 9m52,576s sys 2m9,455s
2 maps real 10m49,495s real 8m53,342s real 11m46,204s real 5m56,043s
user 14m19,922s user 24m6,225s user 13m25,691s user 15m4,694s
sys 1m53,021s sys 1m23,006s sys 4m31,735s sys 2m8,714s
neo-bench performance is mostly unaffected, ~0.5% for 1-1 test and 4% for
10K-10K test both fall within regular test error range.
2022-02-15 16:07:59 +00:00
|
|
|
keys = len(s.mem) + len(s.stor)
|
2022-01-29 08:54:25 +00:00
|
|
|
if keys == 0 {
|
2022-04-15 14:48:58 +00:00
|
|
|
s.nativeCacheLock.Unlock()
|
storage: allow accessing MemCachedStore during Persist
Persist by its definition doesn't change MemCachedStore visible state, all KV
pairs that were acessible via it before Persist remain accessible after
Persist. The only thing it does is flushing of the current set of KV pairs
from memory to peristent store. To do that it needs read-only access to the
current KV pair set, but technically it then replaces maps, so we have to use
full write lock which makes MemCachedStore inaccessible for the duration of
Persist. And Persist can take a lot of time, it's about disk access for
regular DBs.
What we do here is we create new in-memory maps for MemCachedStore before
flushing old ones to the persistent store. Then a fake persistent store is
created which actually is a MemCachedStore with old maps, so it has exactly
the same visible state. This Store is never accessed for writes, so we can
read it without taking any internal locks and at the same time we no longer
need write locks for original MemCachedStore, we're not using it. All of this
makes it possible to use MemCachedStore as normally reads are handled going
down to whatever level is needed and writes are handled by new maps. So while
Persist for (*Blockchain).dao does its most time-consuming work we can process
other blocks (reading data for transactions and persisting storeBlock caches
to (*Blockchain).dao).
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 62800
with VerifyBlocks set to false) on i7-8565U.
Reference results (bbe4e9cd7bb33428633586f080f64494cd6ac9cf):
Ryzen 9 5950X:
RPS 23616.969 22817.086 23222.378 ≈ 23218 ± 1.72%
TPS 23047.316 22608.578 22735.540 ≈ 22797 ± 0.99%
CPU % 23.434 25.553 23.848 ≈ 24.3 ± 4.63%
Mem MB 600.636 503.060 582.043 ≈ 562 ± 9.22%
Core i7-8565U:
RPS 6594.007 6499.501 6572.902 ≈ 6555 ± 0.76%
TPS 6561.680 6444.545 6510.120 ≈ 6505 ± 0.90%
CPU % 58.452 60.568 62.474 ≈ 60.5 ± 3.33%
Mem MB 234.893 285.067 269.081 ≈ 263 ± 9.75%
DB restore:
real 0m22.237s 0m23.471s 0m23.409s ≈ 23.04 ± 3.02%
user 0m35.435s 0m38.943s 0m39.247s ≈ 37.88 ± 5.59%
sys 0m3.085s 0m3.360s 0m3.144s ≈ 3.20 ± 4.53%
After the change:
Ryzen 9 5950X:
RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% ↑ 18.69%
TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% ↑ 18.43%
CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% ↑ 18.1%
Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% ↑ 33.10%
Core i7-8565U:
RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% ↑ 16.72%
TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% ↑ 16.85%
CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% ↑ 20.50%
Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% ↑ 63.50%
DB restore:
real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% ↓ 6.64%
user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% ↑ 6.60%
sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% ↓ 4.38%
It obviously uses more memory now and utilizes CPU more aggressively, but at
the same time it allows to improve all relevant metrics and finally reach a
situation where we process 50K transactions in less than second on Ryzen 9
5950X (going higher than 25K TPS). The other observation is much more stable
block time, on Ryzen 9 it's as close to 1 second as it could be.
2021-07-30 20:35:03 +00:00
|
|
|
s.mut.Unlock()
|
2020-03-27 12:40:23 +00:00
|
|
|
return 0, nil
|
2019-10-16 13:41:50 +00:00
|
|
|
}
|
2020-03-27 12:40:23 +00:00
|
|
|
|
storage: allow accessing MemCachedStore during Persist
Persist by its definition doesn't change MemCachedStore visible state, all KV
pairs that were acessible via it before Persist remain accessible after
Persist. The only thing it does is flushing of the current set of KV pairs
from memory to peristent store. To do that it needs read-only access to the
current KV pair set, but technically it then replaces maps, so we have to use
full write lock which makes MemCachedStore inaccessible for the duration of
Persist. And Persist can take a lot of time, it's about disk access for
regular DBs.
What we do here is we create new in-memory maps for MemCachedStore before
flushing old ones to the persistent store. Then a fake persistent store is
created which actually is a MemCachedStore with old maps, so it has exactly
the same visible state. This Store is never accessed for writes, so we can
read it without taking any internal locks and at the same time we no longer
need write locks for original MemCachedStore, we're not using it. All of this
makes it possible to use MemCachedStore as normally reads are handled going
down to whatever level is needed and writes are handled by new maps. So while
Persist for (*Blockchain).dao does its most time-consuming work we can process
other blocks (reading data for transactions and persisting storeBlock caches
to (*Blockchain).dao).
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 62800
with VerifyBlocks set to false) on i7-8565U.
Reference results (bbe4e9cd7bb33428633586f080f64494cd6ac9cf):
Ryzen 9 5950X:
RPS 23616.969 22817.086 23222.378 ≈ 23218 ± 1.72%
TPS 23047.316 22608.578 22735.540 ≈ 22797 ± 0.99%
CPU % 23.434 25.553 23.848 ≈ 24.3 ± 4.63%
Mem MB 600.636 503.060 582.043 ≈ 562 ± 9.22%
Core i7-8565U:
RPS 6594.007 6499.501 6572.902 ≈ 6555 ± 0.76%
TPS 6561.680 6444.545 6510.120 ≈ 6505 ± 0.90%
CPU % 58.452 60.568 62.474 ≈ 60.5 ± 3.33%
Mem MB 234.893 285.067 269.081 ≈ 263 ± 9.75%
DB restore:
real 0m22.237s 0m23.471s 0m23.409s ≈ 23.04 ± 3.02%
user 0m35.435s 0m38.943s 0m39.247s ≈ 37.88 ± 5.59%
sys 0m3.085s 0m3.360s 0m3.144s ≈ 3.20 ± 4.53%
After the change:
Ryzen 9 5950X:
RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% ↑ 18.69%
TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% ↑ 18.43%
CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% ↑ 18.1%
Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% ↑ 33.10%
Core i7-8565U:
RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% ↑ 16.72%
TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% ↑ 16.85%
CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% ↑ 20.50%
Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% ↑ 63.50%
DB restore:
real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% ↓ 6.64%
user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% ↑ 6.60%
sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% ↓ 4.38%
It obviously uses more memory now and utilizes CPU more aggressively, but at
the same time it allows to improve all relevant metrics and finally reach a
situation where we process 50K transactions in less than second on Ryzen 9
5950X (going higher than 25K TPS). The other observation is much more stable
block time, on Ryzen 9 it's as close to 1 second as it could be.
2021-07-30 20:35:03 +00:00
|
|
|
// tempstore technically copies current s in lower layer while real s
|
|
|
|
// starts using fresh new maps. This tempstore is only known here and
|
|
|
|
// nothing ever changes it, therefore accesses to it (reads) can go
|
|
|
|
// unprotected while writes are handled by s proper.
|
2022-04-15 14:48:58 +00:00
|
|
|
var tempstore = &MemCachedStore{MemoryStore: MemoryStore{mem: s.mem, stor: s.stor}, ps: s.ps, nativeCache: s.nativeCache}
|
storage: allow accessing MemCachedStore during Persist
Persist by its definition doesn't change MemCachedStore visible state, all KV
pairs that were acessible via it before Persist remain accessible after
Persist. The only thing it does is flushing of the current set of KV pairs
from memory to peristent store. To do that it needs read-only access to the
current KV pair set, but technically it then replaces maps, so we have to use
full write lock which makes MemCachedStore inaccessible for the duration of
Persist. And Persist can take a lot of time, it's about disk access for
regular DBs.
What we do here is we create new in-memory maps for MemCachedStore before
flushing old ones to the persistent store. Then a fake persistent store is
created which actually is a MemCachedStore with old maps, so it has exactly
the same visible state. This Store is never accessed for writes, so we can
read it without taking any internal locks and at the same time we no longer
need write locks for original MemCachedStore, we're not using it. All of this
makes it possible to use MemCachedStore as normally reads are handled going
down to whatever level is needed and writes are handled by new maps. So while
Persist for (*Blockchain).dao does its most time-consuming work we can process
other blocks (reading data for transactions and persisting storeBlock caches
to (*Blockchain).dao).
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 62800
with VerifyBlocks set to false) on i7-8565U.
Reference results (bbe4e9cd7bb33428633586f080f64494cd6ac9cf):
Ryzen 9 5950X:
RPS 23616.969 22817.086 23222.378 ≈ 23218 ± 1.72%
TPS 23047.316 22608.578 22735.540 ≈ 22797 ± 0.99%
CPU % 23.434 25.553 23.848 ≈ 24.3 ± 4.63%
Mem MB 600.636 503.060 582.043 ≈ 562 ± 9.22%
Core i7-8565U:
RPS 6594.007 6499.501 6572.902 ≈ 6555 ± 0.76%
TPS 6561.680 6444.545 6510.120 ≈ 6505 ± 0.90%
CPU % 58.452 60.568 62.474 ≈ 60.5 ± 3.33%
Mem MB 234.893 285.067 269.081 ≈ 263 ± 9.75%
DB restore:
real 0m22.237s 0m23.471s 0m23.409s ≈ 23.04 ± 3.02%
user 0m35.435s 0m38.943s 0m39.247s ≈ 37.88 ± 5.59%
sys 0m3.085s 0m3.360s 0m3.144s ≈ 3.20 ± 4.53%
After the change:
Ryzen 9 5950X:
RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% ↑ 18.69%
TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% ↑ 18.43%
CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% ↑ 18.1%
Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% ↑ 33.10%
Core i7-8565U:
RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% ↑ 16.72%
TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% ↑ 16.85%
CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% ↑ 20.50%
Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% ↑ 63.50%
DB restore:
real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% ↓ 6.64%
user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% ↑ 6.60%
sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% ↓ 4.38%
It obviously uses more memory now and utilizes CPU more aggressively, but at
the same time it allows to improve all relevant metrics and finally reach a
situation where we process 50K transactions in less than second on Ryzen 9
5950X (going higher than 25K TPS). The other observation is much more stable
block time, on Ryzen 9 it's as close to 1 second as it could be.
2021-07-30 20:35:03 +00:00
|
|
|
s.ps = tempstore
|
2021-11-30 16:26:20 +00:00
|
|
|
s.mem = make(map[string][]byte, len(s.mem))
|
storage: use two maps for MemoryStore
Simple and dumb as it is, this allows to separate contract storage from other
things and dramatically improve Seek() time over storage (even though it's
still unordered!) which in turn improves block processing speed.
LevelDB LevelDB (KeepOnlyLatest) BoltDB BoltDB (KeepOnlyLatest)
Master real 16m27,936s real 10m9,440s real 16m39,369s real 8m1,227s
user 20m12,619s user 26m13,925s user 18m9,162s user 18m5,846s
sys 2m56,377s sys 1m32,051s sys 9m52,576s sys 2m9,455s
2 maps real 10m49,495s real 8m53,342s real 11m46,204s real 5m56,043s
user 14m19,922s user 24m6,225s user 13m25,691s user 15m4,694s
sys 1m53,021s sys 1m23,006s sys 4m31,735s sys 2m8,714s
neo-bench performance is mostly unaffected, ~0.5% for 1-1 test and 4% for
10K-10K test both fall within regular test error range.
2022-02-15 16:07:59 +00:00
|
|
|
s.stor = make(map[string][]byte, len(s.stor))
|
2022-04-15 14:48:58 +00:00
|
|
|
cached, isPSCached := tempstore.ps.(*MemCachedStore)
|
|
|
|
if isPSCached {
|
|
|
|
s.nativeCache = make(map[int32]NativeContractCache)
|
|
|
|
}
|
2021-11-22 07:41:40 +00:00
|
|
|
if !isSync {
|
2022-04-15 14:48:58 +00:00
|
|
|
s.nativeCacheLock.Unlock()
|
2021-11-22 07:41:40 +00:00
|
|
|
s.mut.Unlock()
|
|
|
|
}
|
2022-04-15 14:48:58 +00:00
|
|
|
if isPSCached {
|
|
|
|
cached.nativeCacheLock.Lock()
|
|
|
|
for id, nativeCache := range tempstore.nativeCache {
|
|
|
|
updatedCache, err := nativeCache.Persist(cached.nativeCache[id])
|
|
|
|
if err != nil {
|
|
|
|
cached.nativeCacheLock.Unlock()
|
|
|
|
return 0, fmt.Errorf("failed to persist native cache changes: %w", err)
|
|
|
|
}
|
|
|
|
cached.nativeCache[id] = updatedCache
|
|
|
|
}
|
|
|
|
cached.nativeCacheLock.Unlock()
|
|
|
|
}
|
storage: use two maps for MemoryStore
Simple and dumb as it is, this allows to separate contract storage from other
things and dramatically improve Seek() time over storage (even though it's
still unordered!) which in turn improves block processing speed.
LevelDB LevelDB (KeepOnlyLatest) BoltDB BoltDB (KeepOnlyLatest)
Master real 16m27,936s real 10m9,440s real 16m39,369s real 8m1,227s
user 20m12,619s user 26m13,925s user 18m9,162s user 18m5,846s
sys 2m56,377s sys 1m32,051s sys 9m52,576s sys 2m9,455s
2 maps real 10m49,495s real 8m53,342s real 11m46,204s real 5m56,043s
user 14m19,922s user 24m6,225s user 13m25,691s user 15m4,694s
sys 1m53,021s sys 1m23,006s sys 4m31,735s sys 2m8,714s
neo-bench performance is mostly unaffected, ~0.5% for 1-1 test and 4% for
10K-10K test both fall within regular test error range.
2022-02-15 16:07:59 +00:00
|
|
|
err = tempstore.ps.PutChangeSet(tempstore.mem, tempstore.stor)
|
2021-08-12 10:35:09 +00:00
|
|
|
|
2021-11-22 07:41:40 +00:00
|
|
|
if !isSync {
|
|
|
|
s.mut.Lock()
|
2022-04-15 14:48:58 +00:00
|
|
|
s.nativeCacheLock.Lock()
|
2021-11-22 07:41:40 +00:00
|
|
|
}
|
2019-10-16 13:41:50 +00:00
|
|
|
if err == nil {
|
storage: allow accessing MemCachedStore during Persist
Persist by its definition doesn't change MemCachedStore visible state, all KV
pairs that were acessible via it before Persist remain accessible after
Persist. The only thing it does is flushing of the current set of KV pairs
from memory to peristent store. To do that it needs read-only access to the
current KV pair set, but technically it then replaces maps, so we have to use
full write lock which makes MemCachedStore inaccessible for the duration of
Persist. And Persist can take a lot of time, it's about disk access for
regular DBs.
What we do here is we create new in-memory maps for MemCachedStore before
flushing old ones to the persistent store. Then a fake persistent store is
created which actually is a MemCachedStore with old maps, so it has exactly
the same visible state. This Store is never accessed for writes, so we can
read it without taking any internal locks and at the same time we no longer
need write locks for original MemCachedStore, we're not using it. All of this
makes it possible to use MemCachedStore as normally reads are handled going
down to whatever level is needed and writes are handled by new maps. So while
Persist for (*Blockchain).dao does its most time-consuming work we can process
other blocks (reading data for transactions and persisting storeBlock caches
to (*Blockchain).dao).
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 62800
with VerifyBlocks set to false) on i7-8565U.
Reference results (bbe4e9cd7bb33428633586f080f64494cd6ac9cf):
Ryzen 9 5950X:
RPS 23616.969 22817.086 23222.378 ≈ 23218 ± 1.72%
TPS 23047.316 22608.578 22735.540 ≈ 22797 ± 0.99%
CPU % 23.434 25.553 23.848 ≈ 24.3 ± 4.63%
Mem MB 600.636 503.060 582.043 ≈ 562 ± 9.22%
Core i7-8565U:
RPS 6594.007 6499.501 6572.902 ≈ 6555 ± 0.76%
TPS 6561.680 6444.545 6510.120 ≈ 6505 ± 0.90%
CPU % 58.452 60.568 62.474 ≈ 60.5 ± 3.33%
Mem MB 234.893 285.067 269.081 ≈ 263 ± 9.75%
DB restore:
real 0m22.237s 0m23.471s 0m23.409s ≈ 23.04 ± 3.02%
user 0m35.435s 0m38.943s 0m39.247s ≈ 37.88 ± 5.59%
sys 0m3.085s 0m3.360s 0m3.144s ≈ 3.20 ± 4.53%
After the change:
Ryzen 9 5950X:
RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% ↑ 18.69%
TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% ↑ 18.43%
CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% ↑ 18.1%
Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% ↑ 33.10%
Core i7-8565U:
RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% ↑ 16.72%
TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% ↑ 16.85%
CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% ↑ 20.50%
Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% ↑ 63.50%
DB restore:
real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% ↓ 6.64%
user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% ↑ 6.60%
sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% ↓ 4.38%
It obviously uses more memory now and utilizes CPU more aggressively, but at
the same time it allows to improve all relevant metrics and finally reach a
situation where we process 50K transactions in less than second on Ryzen 9
5950X (going higher than 25K TPS). The other observation is much more stable
block time, on Ryzen 9 it's as close to 1 second as it could be.
2021-07-30 20:35:03 +00:00
|
|
|
// tempstore.mem and tempstore.del are completely flushed now
|
|
|
|
// to tempstore.ps, so all KV pairs are the same and this
|
|
|
|
// substitution has no visible effects.
|
|
|
|
s.ps = tempstore.ps
|
|
|
|
} else {
|
|
|
|
// We're toast. We'll try to still keep proper state, but OOM
|
|
|
|
// killer will get to us eventually.
|
|
|
|
for k := range s.mem {
|
storage: use two maps for MemoryStore
Simple and dumb as it is, this allows to separate contract storage from other
things and dramatically improve Seek() time over storage (even though it's
still unordered!) which in turn improves block processing speed.
LevelDB LevelDB (KeepOnlyLatest) BoltDB BoltDB (KeepOnlyLatest)
Master real 16m27,936s real 10m9,440s real 16m39,369s real 8m1,227s
user 20m12,619s user 26m13,925s user 18m9,162s user 18m5,846s
sys 2m56,377s sys 1m32,051s sys 9m52,576s sys 2m9,455s
2 maps real 10m49,495s real 8m53,342s real 11m46,204s real 5m56,043s
user 14m19,922s user 24m6,225s user 13m25,691s user 15m4,694s
sys 1m53,021s sys 1m23,006s sys 4m31,735s sys 2m8,714s
neo-bench performance is mostly unaffected, ~0.5% for 1-1 test and 4% for
10K-10K test both fall within regular test error range.
2022-02-15 16:07:59 +00:00
|
|
|
put(tempstore.mem, k, s.mem[k])
|
|
|
|
}
|
|
|
|
for k := range s.stor {
|
|
|
|
put(tempstore.stor, k, s.stor[k])
|
storage: allow accessing MemCachedStore during Persist
Persist by its definition doesn't change MemCachedStore visible state, all KV
pairs that were acessible via it before Persist remain accessible after
Persist. The only thing it does is flushing of the current set of KV pairs
from memory to peristent store. To do that it needs read-only access to the
current KV pair set, but technically it then replaces maps, so we have to use
full write lock which makes MemCachedStore inaccessible for the duration of
Persist. And Persist can take a lot of time, it's about disk access for
regular DBs.
What we do here is we create new in-memory maps for MemCachedStore before
flushing old ones to the persistent store. Then a fake persistent store is
created which actually is a MemCachedStore with old maps, so it has exactly
the same visible state. This Store is never accessed for writes, so we can
read it without taking any internal locks and at the same time we no longer
need write locks for original MemCachedStore, we're not using it. All of this
makes it possible to use MemCachedStore as normally reads are handled going
down to whatever level is needed and writes are handled by new maps. So while
Persist for (*Blockchain).dao does its most time-consuming work we can process
other blocks (reading data for transactions and persisting storeBlock caches
to (*Blockchain).dao).
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 62800
with VerifyBlocks set to false) on i7-8565U.
Reference results (bbe4e9cd7bb33428633586f080f64494cd6ac9cf):
Ryzen 9 5950X:
RPS 23616.969 22817.086 23222.378 ≈ 23218 ± 1.72%
TPS 23047.316 22608.578 22735.540 ≈ 22797 ± 0.99%
CPU % 23.434 25.553 23.848 ≈ 24.3 ± 4.63%
Mem MB 600.636 503.060 582.043 ≈ 562 ± 9.22%
Core i7-8565U:
RPS 6594.007 6499.501 6572.902 ≈ 6555 ± 0.76%
TPS 6561.680 6444.545 6510.120 ≈ 6505 ± 0.90%
CPU % 58.452 60.568 62.474 ≈ 60.5 ± 3.33%
Mem MB 234.893 285.067 269.081 ≈ 263 ± 9.75%
DB restore:
real 0m22.237s 0m23.471s 0m23.409s ≈ 23.04 ± 3.02%
user 0m35.435s 0m38.943s 0m39.247s ≈ 37.88 ± 5.59%
sys 0m3.085s 0m3.360s 0m3.144s ≈ 3.20 ± 4.53%
After the change:
Ryzen 9 5950X:
RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% ↑ 18.69%
TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% ↑ 18.43%
CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% ↑ 18.1%
Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% ↑ 33.10%
Core i7-8565U:
RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% ↑ 16.72%
TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% ↑ 16.85%
CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% ↑ 20.50%
Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% ↑ 63.50%
DB restore:
real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% ↓ 6.64%
user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% ↑ 6.60%
sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% ↓ 4.38%
It obviously uses more memory now and utilizes CPU more aggressively, but at
the same time it allows to improve all relevant metrics and finally reach a
situation where we process 50K transactions in less than second on Ryzen 9
5950X (going higher than 25K TPS). The other observation is much more stable
block time, on Ryzen 9 it's as close to 1 second as it could be.
2021-07-30 20:35:03 +00:00
|
|
|
}
|
2022-04-15 14:48:58 +00:00
|
|
|
if isPSCached {
|
|
|
|
for id, nativeCache := range s.nativeCache {
|
|
|
|
updatedCache, err := nativeCache.Persist(tempstore.nativeCache[id])
|
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("failed to persist native cache changes: %w", err)
|
|
|
|
}
|
|
|
|
tempstore.nativeCache[id] = updatedCache
|
|
|
|
}
|
|
|
|
s.nativeCache = tempstore.nativeCache
|
|
|
|
}
|
storage: allow accessing MemCachedStore during Persist
Persist by its definition doesn't change MemCachedStore visible state, all KV
pairs that were acessible via it before Persist remain accessible after
Persist. The only thing it does is flushing of the current set of KV pairs
from memory to peristent store. To do that it needs read-only access to the
current KV pair set, but technically it then replaces maps, so we have to use
full write lock which makes MemCachedStore inaccessible for the duration of
Persist. And Persist can take a lot of time, it's about disk access for
regular DBs.
What we do here is we create new in-memory maps for MemCachedStore before
flushing old ones to the persistent store. Then a fake persistent store is
created which actually is a MemCachedStore with old maps, so it has exactly
the same visible state. This Store is never accessed for writes, so we can
read it without taking any internal locks and at the same time we no longer
need write locks for original MemCachedStore, we're not using it. All of this
makes it possible to use MemCachedStore as normally reads are handled going
down to whatever level is needed and writes are handled by new maps. So while
Persist for (*Blockchain).dao does its most time-consuming work we can process
other blocks (reading data for transactions and persisting storeBlock caches
to (*Blockchain).dao).
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 62800
with VerifyBlocks set to false) on i7-8565U.
Reference results (bbe4e9cd7bb33428633586f080f64494cd6ac9cf):
Ryzen 9 5950X:
RPS 23616.969 22817.086 23222.378 ≈ 23218 ± 1.72%
TPS 23047.316 22608.578 22735.540 ≈ 22797 ± 0.99%
CPU % 23.434 25.553 23.848 ≈ 24.3 ± 4.63%
Mem MB 600.636 503.060 582.043 ≈ 562 ± 9.22%
Core i7-8565U:
RPS 6594.007 6499.501 6572.902 ≈ 6555 ± 0.76%
TPS 6561.680 6444.545 6510.120 ≈ 6505 ± 0.90%
CPU % 58.452 60.568 62.474 ≈ 60.5 ± 3.33%
Mem MB 234.893 285.067 269.081 ≈ 263 ± 9.75%
DB restore:
real 0m22.237s 0m23.471s 0m23.409s ≈ 23.04 ± 3.02%
user 0m35.435s 0m38.943s 0m39.247s ≈ 37.88 ± 5.59%
sys 0m3.085s 0m3.360s 0m3.144s ≈ 3.20 ± 4.53%
After the change:
Ryzen 9 5950X:
RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% ↑ 18.69%
TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% ↑ 18.43%
CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% ↑ 18.1%
Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% ↑ 33.10%
Core i7-8565U:
RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% ↑ 16.72%
TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% ↑ 16.85%
CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% ↑ 20.50%
Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% ↑ 63.50%
DB restore:
real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% ↓ 6.64%
user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% ↑ 6.60%
sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% ↓ 4.38%
It obviously uses more memory now and utilizes CPU more aggressively, but at
the same time it allows to improve all relevant metrics and finally reach a
situation where we process 50K transactions in less than second on Ryzen 9
5950X (going higher than 25K TPS). The other observation is much more stable
block time, on Ryzen 9 it's as close to 1 second as it could be.
2021-07-30 20:35:03 +00:00
|
|
|
s.ps = tempstore.ps
|
|
|
|
s.mem = tempstore.mem
|
storage: use two maps for MemoryStore
Simple and dumb as it is, this allows to separate contract storage from other
things and dramatically improve Seek() time over storage (even though it's
still unordered!) which in turn improves block processing speed.
LevelDB LevelDB (KeepOnlyLatest) BoltDB BoltDB (KeepOnlyLatest)
Master real 16m27,936s real 10m9,440s real 16m39,369s real 8m1,227s
user 20m12,619s user 26m13,925s user 18m9,162s user 18m5,846s
sys 2m56,377s sys 1m32,051s sys 9m52,576s sys 2m9,455s
2 maps real 10m49,495s real 8m53,342s real 11m46,204s real 5m56,043s
user 14m19,922s user 24m6,225s user 13m25,691s user 15m4,694s
sys 1m53,021s sys 1m23,006s sys 4m31,735s sys 2m8,714s
neo-bench performance is mostly unaffected, ~0.5% for 1-1 test and 4% for
10K-10K test both fall within regular test error range.
2022-02-15 16:07:59 +00:00
|
|
|
s.stor = tempstore.stor
|
2019-10-16 13:41:50 +00:00
|
|
|
}
|
2022-04-15 14:48:58 +00:00
|
|
|
s.nativeCacheLock.Unlock()
|
storage: allow accessing MemCachedStore during Persist
Persist by its definition doesn't change MemCachedStore visible state, all KV
pairs that were acessible via it before Persist remain accessible after
Persist. The only thing it does is flushing of the current set of KV pairs
from memory to peristent store. To do that it needs read-only access to the
current KV pair set, but technically it then replaces maps, so we have to use
full write lock which makes MemCachedStore inaccessible for the duration of
Persist. And Persist can take a lot of time, it's about disk access for
regular DBs.
What we do here is we create new in-memory maps for MemCachedStore before
flushing old ones to the persistent store. Then a fake persistent store is
created which actually is a MemCachedStore with old maps, so it has exactly
the same visible state. This Store is never accessed for writes, so we can
read it without taking any internal locks and at the same time we no longer
need write locks for original MemCachedStore, we're not using it. All of this
makes it possible to use MemCachedStore as normally reads are handled going
down to whatever level is needed and writes are handled by new maps. So while
Persist for (*Blockchain).dao does its most time-consuming work we can process
other blocks (reading data for transactions and persisting storeBlock caches
to (*Blockchain).dao).
The change was tested for performance with neo-bench (single node, 10 workers,
LevelDB) on two machines and block dump processing (RC4 testnet up to 62800
with VerifyBlocks set to false) on i7-8565U.
Reference results (bbe4e9cd7bb33428633586f080f64494cd6ac9cf):
Ryzen 9 5950X:
RPS 23616.969 22817.086 23222.378 ≈ 23218 ± 1.72%
TPS 23047.316 22608.578 22735.540 ≈ 22797 ± 0.99%
CPU % 23.434 25.553 23.848 ≈ 24.3 ± 4.63%
Mem MB 600.636 503.060 582.043 ≈ 562 ± 9.22%
Core i7-8565U:
RPS 6594.007 6499.501 6572.902 ≈ 6555 ± 0.76%
TPS 6561.680 6444.545 6510.120 ≈ 6505 ± 0.90%
CPU % 58.452 60.568 62.474 ≈ 60.5 ± 3.33%
Mem MB 234.893 285.067 269.081 ≈ 263 ± 9.75%
DB restore:
real 0m22.237s 0m23.471s 0m23.409s ≈ 23.04 ± 3.02%
user 0m35.435s 0m38.943s 0m39.247s ≈ 37.88 ± 5.59%
sys 0m3.085s 0m3.360s 0m3.144s ≈ 3.20 ± 4.53%
After the change:
Ryzen 9 5950X:
RPS 27747.349 27407.726 27520.210 ≈ 27558 ± 0.63% ↑ 18.69%
TPS 26992.010 26993.468 27010.966 ≈ 26999 ± 0.04% ↑ 18.43%
CPU % 28.928 28.096 29.105 ≈ 28.7 ± 1.88% ↑ 18.1%
Mem MB 760.385 726.320 756.118 ≈ 748 ± 2.48% ↑ 33.10%
Core i7-8565U:
RPS 7783.229 7628.409 7542.340 ≈ 7651 ± 1.60% ↑ 16.72%
TPS 7708.436 7607.397 7489.459 ≈ 7602 ± 1.44% ↑ 16.85%
CPU % 74.899 71.020 72.697 ≈ 72.9 ± 2.67% ↑ 20.50%
Mem MB 438.047 436.967 416.350 ≈ 430 ± 2.84% ↑ 63.50%
DB restore:
real 0m20.838s 0m21.895s 0m21.794s ≈ 21.51 ± 2.71% ↓ 6.64%
user 0m39.091s 0m40.565s 0m41.493s ≈ 40.38 ± 3.00% ↑ 6.60%
sys 0m3.184s 0m2.923s 0m3.062s ≈ 3.06 ± 4.27% ↓ 4.38%
It obviously uses more memory now and utilizes CPU more aggressively, but at
the same time it allows to improve all relevant metrics and finally reach a
situation where we process 50K transactions in less than second on Ryzen 9
5950X (going higher than 25K TPS). The other observation is much more stable
block time, on Ryzen 9 it's as close to 1 second as it could be.
2021-07-30 20:35:03 +00:00
|
|
|
s.mut.Unlock()
|
2019-10-16 13:41:50 +00:00
|
|
|
return keys, err
|
|
|
|
}
|
|
|
|
|
2022-04-15 14:48:58 +00:00
|
|
|
// GetROCache returns native contact cache. The cache CAN NOT be modified by
|
|
|
|
// the caller. It's the caller's duty to keep it unmodified.
|
|
|
|
func (s *MemCachedStore) GetROCache(id int32) NativeContractCache {
|
2022-04-12 14:29:11 +00:00
|
|
|
s.nativeCacheLock.RLock()
|
|
|
|
defer s.nativeCacheLock.RUnlock()
|
|
|
|
|
2022-04-15 14:48:58 +00:00
|
|
|
return s.getCache(id, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetRWCache returns native contact cache. The cache CAN BE safely modified
|
|
|
|
// by the caller.
|
|
|
|
func (s *MemCachedStore) GetRWCache(k int32) NativeContractCache {
|
|
|
|
s.nativeCacheLock.Lock()
|
|
|
|
defer s.nativeCacheLock.Unlock()
|
|
|
|
|
|
|
|
return s.getCache(k, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *MemCachedStore) getCache(k int32, ro bool) NativeContractCache {
|
2022-04-12 14:29:11 +00:00
|
|
|
if itm, ok := s.nativeCache[k]; ok {
|
2022-04-15 14:48:58 +00:00
|
|
|
// Don't need to create itm copy, because its value was already copied
|
|
|
|
// the first time it was retrieved from loser ps.
|
|
|
|
return itm
|
2022-04-12 14:29:11 +00:00
|
|
|
}
|
2022-04-15 14:48:58 +00:00
|
|
|
|
|
|
|
if cached, ok := s.ps.(*MemCachedStore); ok {
|
|
|
|
if ro {
|
|
|
|
return cached.GetROCache(k)
|
|
|
|
}
|
|
|
|
v := cached.GetRWCache(k)
|
|
|
|
if v != nil {
|
|
|
|
// Create a copy here in order not to modify the existing cache.
|
|
|
|
cp := v.Copy()
|
|
|
|
s.nativeCache[k] = cp
|
|
|
|
return cp
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-12 14:29:11 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-04-15 14:48:58 +00:00
|
|
|
func (s *MemCachedStore) SetCache(k int32, v NativeContractCache) {
|
2022-04-12 14:29:11 +00:00
|
|
|
s.nativeCacheLock.Lock()
|
|
|
|
defer s.nativeCacheLock.Unlock()
|
|
|
|
|
2022-04-15 14:48:58 +00:00
|
|
|
s.nativeCache[k] = v
|
2022-04-12 14:29:11 +00:00
|
|
|
}
|
|
|
|
|
2019-10-16 13:41:50 +00:00
|
|
|
// Close implements Store interface, clears up memory and closes the lower layer
|
|
|
|
// Store.
|
|
|
|
func (s *MemCachedStore) Close() error {
|
|
|
|
// It's always successful.
|
|
|
|
_ = s.MemoryStore.Close()
|
|
|
|
return s.ps.Close()
|
|
|
|
}
|