forked from TrueCloudLab/neoneo-go
ae071d4542
We're using batches in wrong way during persist, we already have all changes accumulated in two maps and then we move them to batch and then this is applied. For some DBs like BoltDB this batch is just another MemoryStore, so we essentially just shuffle the changeset from one map to another, for others like LevelDB batch is just a serialized set of KV pairs, it doesn't help much on subsequent PutBatch, we just duplicate the changeset again. So introduce PutChangeSet that allows to take two maps with sets and deletes directly. It also allows to simplify MemCachedStore logic. neo-bench for single node with 10 workers, LevelDB: Reference: RPS 30189.132 30556.448 30390.482 ≈ 30379 ± 0.61% TPS 29427.344 29418.687 29434.273 ≈ 29427 ± 0.03% CPU % 33.304 27.179 33.860 ≈ 31.45 ± 11.79% Mem MB 800.677 798.389 715.042 ≈ 771 ± 6.33% Patched: RPS 30264.326 30386.364 30166.231 ≈ 30272 ± 0.36% ⇅ TPS 29444.673 29407.440 29452.478 ≈ 29435 ± 0.08% ⇅ CPU % 34.012 32.597 33.467 ≈ 33.36 ± 2.14% ⇅ Mem MB 549.126 523.656 517.684 ≈ 530 ± 3.15% ↓ 31.26% BoltDB: Reference: RPS 31937.647 31551.684 31850.408 ≈ 31780 ± 0.64% TPS 31292.049 30368.368 31307.724 ≈ 30989 ± 1.74% CPU % 33.792 22.339 35.887 ≈ 30.67 ± 23.78% Mem MB 1271.687 1254.472 1215.639 ≈ 1247 ± 2.30% Patched: RPS 31746.818 30859.485 31689.761 ≈ 31432 ± 1.58% ⇅ TPS 31271.499 30340.726 30342.568 ≈ 30652 ± 1.75% ⇅ CPU % 34.611 34.414 31.553 ≈ 33.53 ± 5.11% ⇅ Mem MB 1262.960 1231.389 1335.569 ≈ 1277 ± 4.18% ⇅
155 lines
3.6 KiB
Go
155 lines
3.6 KiB
Go
package storage
|
|
|
|
import "sync"
|
|
|
|
// MemCachedStore is a wrapper around persistent store that caches all changes
|
|
// being made for them to be later flushed in one batch.
|
|
type MemCachedStore struct {
|
|
MemoryStore
|
|
|
|
// plock protects Persist from double entrance.
|
|
plock sync.Mutex
|
|
// Persistent Store.
|
|
ps Store
|
|
}
|
|
|
|
type (
|
|
// KeyValue represents key-value pair.
|
|
KeyValue struct {
|
|
Key []byte
|
|
Value []byte
|
|
|
|
Exists bool
|
|
}
|
|
|
|
// MemBatch represents a changeset to be persisted.
|
|
MemBatch struct {
|
|
Put []KeyValue
|
|
Deleted []KeyValue
|
|
}
|
|
)
|
|
|
|
// NewMemCachedStore creates a new MemCachedStore object.
|
|
func NewMemCachedStore(lower Store) *MemCachedStore {
|
|
return &MemCachedStore{
|
|
MemoryStore: *NewMemoryStore(),
|
|
ps: lower,
|
|
}
|
|
}
|
|
|
|
// Get implements the Store interface.
|
|
func (s *MemCachedStore) Get(key []byte) ([]byte, error) {
|
|
s.mut.RLock()
|
|
defer s.mut.RUnlock()
|
|
k := string(key)
|
|
if val, ok := s.mem[k]; ok {
|
|
return val, nil
|
|
}
|
|
if _, ok := s.del[k]; ok {
|
|
return nil, ErrKeyNotFound
|
|
}
|
|
return s.ps.Get(key)
|
|
}
|
|
|
|
// GetBatch returns currently accumulated changeset.
|
|
func (s *MemCachedStore) GetBatch() *MemBatch {
|
|
s.mut.RLock()
|
|
defer s.mut.RUnlock()
|
|
|
|
var b MemBatch
|
|
|
|
b.Put = make([]KeyValue, 0, len(s.mem))
|
|
for k, v := range s.mem {
|
|
key := []byte(k)
|
|
_, err := s.ps.Get(key)
|
|
b.Put = append(b.Put, KeyValue{Key: key, Value: v, Exists: err == nil})
|
|
}
|
|
|
|
b.Deleted = make([]KeyValue, 0, len(s.del))
|
|
for k := range s.del {
|
|
key := []byte(k)
|
|
_, err := s.ps.Get(key)
|
|
b.Deleted = append(b.Deleted, KeyValue{Key: key, Exists: err == nil})
|
|
}
|
|
|
|
return &b
|
|
}
|
|
|
|
// Seek implements the Store interface.
|
|
func (s *MemCachedStore) Seek(key []byte, f func(k, v []byte)) {
|
|
s.mut.RLock()
|
|
defer s.mut.RUnlock()
|
|
s.MemoryStore.seek(key, f)
|
|
s.ps.Seek(key, func(k, v []byte) {
|
|
elem := string(k)
|
|
// If it's in mem, we already called f() for it in MemoryStore.Seek().
|
|
_, present := s.mem[elem]
|
|
if !present {
|
|
// If it's in del, we shouldn't be calling f() anyway.
|
|
_, present = s.del[elem]
|
|
}
|
|
if !present {
|
|
f(k, v)
|
|
}
|
|
})
|
|
}
|
|
|
|
// Persist flushes all the MemoryStore contents into the (supposedly) persistent
|
|
// store ps.
|
|
func (s *MemCachedStore) Persist() (int, error) {
|
|
var err error
|
|
var keys, dkeys int
|
|
|
|
s.plock.Lock()
|
|
defer s.plock.Unlock()
|
|
s.mut.Lock()
|
|
|
|
keys = len(s.mem)
|
|
dkeys = len(s.del)
|
|
if keys == 0 && dkeys == 0 {
|
|
s.mut.Unlock()
|
|
return 0, nil
|
|
}
|
|
|
|
// tempstore technically copies current s in lower layer while real s
|
|
// starts using fresh new maps. This tempstore is only known here and
|
|
// nothing ever changes it, therefore accesses to it (reads) can go
|
|
// unprotected while writes are handled by s proper.
|
|
var tempstore = &MemCachedStore{MemoryStore: MemoryStore{mem: s.mem, del: s.del}, ps: s.ps}
|
|
s.ps = tempstore
|
|
s.mem = make(map[string][]byte)
|
|
s.del = make(map[string]bool)
|
|
s.mut.Unlock()
|
|
|
|
err = tempstore.ps.PutChangeSet(tempstore.mem, tempstore.del)
|
|
|
|
s.mut.Lock()
|
|
if err == nil {
|
|
// tempstore.mem and tempstore.del are completely flushed now
|
|
// to tempstore.ps, so all KV pairs are the same and this
|
|
// substitution has no visible effects.
|
|
s.ps = tempstore.ps
|
|
} else {
|
|
// We're toast. We'll try to still keep proper state, but OOM
|
|
// killer will get to us eventually.
|
|
for k := range s.mem {
|
|
tempstore.put(k, s.mem[k])
|
|
}
|
|
for k := range s.del {
|
|
tempstore.drop(k)
|
|
}
|
|
s.ps = tempstore.ps
|
|
s.mem = tempstore.mem
|
|
s.del = tempstore.del
|
|
}
|
|
s.mut.Unlock()
|
|
return keys, err
|
|
}
|
|
|
|
// Close implements Store interface, clears up memory and closes the lower layer
|
|
// Store.
|
|
func (s *MemCachedStore) Close() error {
|
|
// It's always successful.
|
|
_ = s.MemoryStore.Close()
|
|
return s.ps.Close()
|
|
}
|