core: don't always store all hashes in memory
We're paging these hashes, so we need a previous full page and a current one plus some cache for various requests. Storing 1M of hashes is 32M of memory and it grows quickly. It also seriously affects node startup time, most of what it's doing is reading these hashes, the longer the chain the more time it needs to do that. Notice that this doesn't change the underlying DB scheme in any way.
This commit is contained in:
parent
0ad6e295ea
commit
1c38b45074
5 changed files with 255 additions and 160 deletions
|
@ -225,7 +225,7 @@ func TestBlockchain_InitWithIncompleteStateJump(t *testing.T) {
|
|||
t.Run("invalid state sync point", func(t *testing.T) {
|
||||
bcSpout.dao.Store.Put(bPrefix, []byte{byte(stateJumpStarted)})
|
||||
point := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(point, uint32(len(bcSpout.headerHashes)))
|
||||
binary.LittleEndian.PutUint32(point, bcSpout.lastHeaderIndex()+1)
|
||||
bcSpout.dao.Store.Put([]byte{byte(storage.SYSStateSyncPoint)}, point)
|
||||
checkNewBlockchainErr(t, boltCfg, bcSpout.dao.Store, "invalid state sync point")
|
||||
})
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue