diff --git a/internal/fakechain/fakechain.go b/internal/fakechain/fakechain.go index 3573d1286..584861d70 100644 --- a/internal/fakechain/fakechain.go +++ b/internal/fakechain/fakechain.go @@ -286,12 +286,12 @@ func (chain *FakeChain) GetTokenLastUpdated(acc util.Uint160) (map[int32]uint32, } // ForEachNEP17Transfer implements Blockchainer interface. -func (chain *FakeChain) ForEachNEP11Transfer(util.Uint160, func(*state.NEP11Transfer) (bool, error)) error { +func (chain *FakeChain) ForEachNEP11Transfer(util.Uint160, uint64, func(*state.NEP11Transfer) (bool, error)) error { panic("TODO") } // ForEachNEP17Transfer implements Blockchainer interface. -func (chain *FakeChain) ForEachNEP17Transfer(util.Uint160, func(*state.NEP17Transfer) (bool, error)) error { +func (chain *FakeChain) ForEachNEP17Transfer(util.Uint160, uint64, func(*state.NEP17Transfer) (bool, error)) error { panic("TODO") } diff --git a/pkg/core/bench_test.go b/pkg/core/bench_test.go index bf4ae9126..8bbb6b772 100644 --- a/pkg/core/bench_test.go +++ b/pkg/core/bench_test.go @@ -1,9 +1,18 @@ package core import ( + "fmt" "testing" + "github.com/nspcc-dev/neo-go/internal/random" + "github.com/nspcc-dev/neo-go/internal/testchain" "github.com/nspcc-dev/neo-go/pkg/config/netmode" + "github.com/nspcc-dev/neo-go/pkg/core/state" + "github.com/nspcc-dev/neo-go/pkg/core/storage" + "github.com/nspcc-dev/neo-go/pkg/core/transaction" + "github.com/nspcc-dev/neo-go/pkg/io" + "github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag" + "github.com/nspcc-dev/neo-go/pkg/vm/emit" "github.com/nspcc-dev/neo-go/pkg/vm/opcode" "github.com/nspcc-dev/neo-go/pkg/wallet" "github.com/stretchr/testify/require" @@ -22,3 +31,84 @@ func BenchmarkVerifyWitness(t *testing.B) { _, _ = bc.VerifyWitness(tx.Signers[0].Account, tx, &tx.Scripts[0], 100000000) } } + +func BenchmarkBlockchain_ForEachNEP17Transfer(t *testing.B) { + var stores = map[string]func(testing.TB) storage.Store{ + "MemPS": func(t testing.TB) storage.Store { + return storage.NewMemoryStore() + }, + "BoltPS": newBoltStoreForTesting, + "LevelPS": newLevelDBForTesting, + } + startFrom := []int{1, 100, 1000} + blocksToTake := []int{100, 1000} + for psName, newPS := range stores { + for _, startFromBlock := range startFrom { + for _, nBlocksToTake := range blocksToTake { + t.Run(fmt.Sprintf("%s_StartFromBlockN-%d_Take%dBlocks", psName, startFromBlock, nBlocksToTake), func(t *testing.B) { + ps := newPS(t) + t.Cleanup(func() { ps.Close() }) + benchmarkForEachNEP17Transfer(t, ps, startFromBlock, nBlocksToTake) + }) + } + } + } +} + +func benchmarkForEachNEP17Transfer(t *testing.B, ps storage.Store, startFromBlock, nBlocksToTake int) { + var ( + nonce uint32 = 1 + chainHeight = 2_100 // constant chain height to be able to compare paging results + transfersPerBlock = state.TokenTransferBatchSize/4 + // 4 blocks per batch + state.TokenTransferBatchSize/32 // shift + ) + + bc := newTestChainWithCustomCfgAndStore(t, ps, nil) + gasHash := bc.contracts.GAS.Hash + acc := random.Uint160() + + for j := 0; j < chainHeight; j++ { + w := io.NewBufBinWriter() + for i := 0; i < transfersPerBlock; i++ { + emit.AppCall(w.BinWriter, gasHash, "transfer", callflag.All, testchain.MultisigScriptHash(), acc, 1, nil) + emit.Opcodes(w.BinWriter, opcode.ASSERT) + require.NoError(t, w.Err) + } + script := w.Bytes() + tx := transaction.New(script, int64(1100_0000*transfersPerBlock)) + tx.ValidUntilBlock = bc.BlockHeight() + 1 + tx.Nonce = nonce + nonce++ + tx.Signers = []transaction.Signer{{ + Account: testchain.MultisigScriptHash(), + Scopes: transaction.CalledByEntry, + AllowedContracts: nil, + AllowedGroups: nil, + }} + require.NoError(t, testchain.SignTx(bc, tx)) + b := bc.newBlock(tx) + require.NoError(t, bc.AddBlock(b)) + checkTxHalt(t, bc, tx.Hash()) + } + + newestB, err := bc.GetBlock(bc.GetHeaderHash(int(bc.BlockHeight()) - startFromBlock + 1)) + require.NoError(t, err) + newestTimestamp := newestB.Timestamp + oldestB, err := bc.GetBlock(bc.GetHeaderHash(int(newestB.Index) - nBlocksToTake)) + require.NoError(t, err) + oldestTimestamp := oldestB.Timestamp + + t.ResetTimer() + t.ReportAllocs() + t.StartTimer() + for i := 0; i < t.N; i++ { + require.NoError(t, bc.ForEachNEP17Transfer(acc, newestTimestamp, func(t *state.NEP17Transfer) (bool, error) { + if t.Timestamp < oldestTimestamp { + // iterating from newest to oldest, already have reached the needed height + return false, nil + } + return true, nil + })) + } + t.StopTimer() +} diff --git a/pkg/core/blockchain.go b/pkg/core/blockchain.go index ad382a49a..ac772cab5 100644 --- a/pkg/core/blockchain.go +++ b/pkg/core/blockchain.go @@ -486,9 +486,10 @@ func (bc *Blockchain) removeOldStorageItems() { b := bc.dao.Store.Batch() prefix := statesync.TemporaryPrefix(bc.dao.Version.StoragePrefix) - bc.dao.Store.Seek(storage.SeekRange{Prefix: []byte{byte(prefix)}}, func(k, _ []byte) { + bc.dao.Store.Seek(storage.SeekRange{Prefix: []byte{byte(prefix)}}, func(k, _ []byte) bool { // #1468, but don't need to copy here, because it is done by Store. b.Delete(k) + return true }) b.Delete(storage.SYSCleanStorage.Bytes()) @@ -1022,14 +1023,14 @@ func (bc *Blockchain) storeBlock(block *block.Block, txpool *mempool.Pool) error return } if !trData.Info.NewNEP11Batch { - err = kvcache.PutTokenTransferLog(acc, trData.Info.NextNEP11Batch, true, &trData.Log11) + err = kvcache.PutTokenTransferLog(acc, trData.Info.NextNEP11NewestTimestamp, trData.Info.NextNEP11Batch, true, &trData.Log11) if err != nil { aerdone <- err return } } if !trData.Info.NewNEP17Batch { - err = kvcache.PutTokenTransferLog(acc, trData.Info.NextNEP17Batch, false, &trData.Log17) + err = kvcache.PutTokenTransferLog(acc, trData.Info.NextNEP17NewestTimestamp, trData.Info.NextNEP17Batch, false, &trData.Log17) if err != nil { aerdone <- err return @@ -1333,18 +1334,18 @@ func (bc *Blockchain) processTokenTransfer(cache dao.DAO, transCache map[util.Ui } if !from.Equals(util.Uint160{}) { _ = nep17xfer.Amount.Neg(amount) // We already have the Int. - if appendTokenTransfer(cache, transCache, from, transfer, id, b.Index, isNEP11) != nil { + if appendTokenTransfer(cache, transCache, from, transfer, id, b.Index, b.Timestamp, isNEP11) != nil { return } } if !to.Equals(util.Uint160{}) { - _ = nep17xfer.Amount.Set(amount) // We already have the Int. - _ = appendTokenTransfer(cache, transCache, to, transfer, id, b.Index, isNEP11) // Nothing useful we can do. + _ = nep17xfer.Amount.Set(amount) // We already have the Int. + _ = appendTokenTransfer(cache, transCache, to, transfer, id, b.Index, b.Timestamp, isNEP11) // Nothing useful we can do. } } func appendTokenTransfer(cache dao.DAO, transCache map[util.Uint160]transferData, addr util.Uint160, transfer io.Serializable, - token int32, bIndex uint32, isNEP11 bool) error { + token int32, bIndex uint32, bTimestamp uint64, isNEP11 bool) error { transferData, ok := transCache[addr] if !ok { balances, err := cache.GetTokenTransferInfo(addr) @@ -1352,14 +1353,14 @@ func appendTokenTransfer(cache dao.DAO, transCache map[util.Uint160]transferData return err } if !balances.NewNEP11Batch { - trLog, err := cache.GetTokenTransferLog(addr, balances.NextNEP11Batch, true) + trLog, err := cache.GetTokenTransferLog(addr, balances.NextNEP11NewestTimestamp, balances.NextNEP11Batch, true) if err != nil { return err } transferData.Log11 = *trLog } if !balances.NewNEP17Batch { - trLog, err := cache.GetTokenTransferLog(addr, balances.NextNEP17Batch, false) + trLog, err := cache.GetTokenTransferLog(addr, balances.NextNEP17NewestTimestamp, balances.NextNEP17Batch, false) if err != nil { return err } @@ -1368,18 +1369,21 @@ func appendTokenTransfer(cache dao.DAO, transCache map[util.Uint160]transferData transferData.Info = *balances } var ( - log *state.TokenTransferLog - newBatch *bool - nextBatch *uint32 + log *state.TokenTransferLog + newBatch *bool + nextBatch *uint32 + currTimestamp *uint64 ) if !isNEP11 { log = &transferData.Log17 newBatch = &transferData.Info.NewNEP17Batch nextBatch = &transferData.Info.NextNEP17Batch + currTimestamp = &transferData.Info.NextNEP17NewestTimestamp } else { log = &transferData.Log11 newBatch = &transferData.Info.NewNEP11Batch nextBatch = &transferData.Info.NextNEP11Batch + currTimestamp = &transferData.Info.NextNEP11NewestTimestamp } err := log.Append(transfer) if err != nil { @@ -1388,11 +1392,12 @@ func appendTokenTransfer(cache dao.DAO, transCache map[util.Uint160]transferData transferData.Info.LastUpdated[token] = bIndex *newBatch = log.Size() >= state.TokenTransferBatchSize if *newBatch { - err = cache.PutTokenTransferLog(addr, *nextBatch, isNEP11, log) + err = cache.PutTokenTransferLog(addr, *currTimestamp, *nextBatch, isNEP11, log) if err != nil { return err } *nextBatch++ + *currTimestamp = bTimestamp // Put makes a copy of it anyway. log.Raw = log.Raw[:0] } @@ -1400,48 +1405,18 @@ func appendTokenTransfer(cache dao.DAO, transCache map[util.Uint160]transferData return nil } -// ForEachNEP17Transfer executes f for each NEP-17 transfer in log. -func (bc *Blockchain) ForEachNEP17Transfer(acc util.Uint160, f func(*state.NEP17Transfer) (bool, error)) error { - balances, err := bc.dao.GetTokenTransferInfo(acc) - if err != nil { - return nil - } - for i := int(balances.NextNEP17Batch); i >= 0; i-- { - lg, err := bc.dao.GetTokenTransferLog(acc, uint32(i), false) - if err != nil { - return nil - } - cont, err := lg.ForEachNEP17(f) - if err != nil { - return err - } - if !cont { - break - } - } - return nil +// ForEachNEP17Transfer executes f for each NEP-17 transfer in log starting from +// the transfer with the newest timestamp up to the oldest transfer. It continues +// iteration until false is returned from f. The last non-nil error is returned. +func (bc *Blockchain) ForEachNEP17Transfer(acc util.Uint160, newestTimestamp uint64, f func(*state.NEP17Transfer) (bool, error)) error { + return bc.dao.SeekNEP17TransferLog(acc, newestTimestamp, f) } -// ForEachNEP11Transfer executes f for each NEP-11 transfer in log. -func (bc *Blockchain) ForEachNEP11Transfer(acc util.Uint160, f func(*state.NEP11Transfer) (bool, error)) error { - balances, err := bc.dao.GetTokenTransferInfo(acc) - if err != nil { - return nil - } - for i := int(balances.NextNEP11Batch); i >= 0; i-- { - lg, err := bc.dao.GetTokenTransferLog(acc, uint32(i), true) - if err != nil { - return nil - } - cont, err := lg.ForEachNEP11(f) - if err != nil { - return err - } - if !cont { - break - } - } - return nil +// ForEachNEP11Transfer executes f for each NEP-11 transfer in log starting from +// the transfer with the newest timestamp up to the oldest transfer. It continues +// iteration until false is returned from f. The last non-nil error is returned. +func (bc *Blockchain) ForEachNEP11Transfer(acc util.Uint160, newestTimestamp uint64, f func(*state.NEP11Transfer) (bool, error)) error { + return bc.dao.SeekNEP11TransferLog(acc, newestTimestamp, f) } // GetNEP17Contracts returns the list of deployed NEP-17 contracts. diff --git a/pkg/core/blockchain_test.go b/pkg/core/blockchain_test.go index fee36ac0d..89f13c864 100644 --- a/pkg/core/blockchain_test.go +++ b/pkg/core/blockchain_test.go @@ -1768,11 +1768,12 @@ func TestBlockchain_InitWithIncompleteStateJump(t *testing.T) { if bcSpout.dao.Version.StoragePrefix == tempPrefix { tempPrefix = storage.STStorage } - bcSpout.dao.Store.Seek(storage.SeekRange{Prefix: bcSpout.dao.Version.StoragePrefix.Bytes()}, func(k, v []byte) { + bcSpout.dao.Store.Seek(storage.SeekRange{Prefix: bcSpout.dao.Version.StoragePrefix.Bytes()}, func(k, v []byte) bool { key := slice.Copy(k) key[0] = byte(tempPrefix) value := slice.Copy(v) batch.Put(key, value) + return true }) require.NoError(t, bcSpout.dao.Store.PutBatch(batch)) diff --git a/pkg/core/blockchainer/blockchainer.go b/pkg/core/blockchainer/blockchainer.go index 710d5dfb1..d2320730a 100644 --- a/pkg/core/blockchainer/blockchainer.go +++ b/pkg/core/blockchainer/blockchainer.go @@ -36,8 +36,8 @@ type Blockchainer interface { GetContractScriptHash(id int32) (util.Uint160, error) GetEnrollments() ([]state.Validator, error) GetGoverningTokenBalance(acc util.Uint160) (*big.Int, uint32) - ForEachNEP11Transfer(util.Uint160, func(*state.NEP11Transfer) (bool, error)) error - ForEachNEP17Transfer(util.Uint160, func(*state.NEP17Transfer) (bool, error)) error + ForEachNEP11Transfer(acc util.Uint160, newestTimestamp uint64, f func(*state.NEP11Transfer) (bool, error)) error + ForEachNEP17Transfer(acc util.Uint160, newestTimestamp uint64, f func(*state.NEP17Transfer) (bool, error)) error GetHeaderHash(int) util.Uint256 GetHeader(hash util.Uint256) (*block.Header, error) CurrentHeaderHash() util.Uint256 diff --git a/pkg/core/dao/dao.go b/pkg/core/dao/dao.go index c137dd434..aff495107 100644 --- a/pkg/core/dao/dao.go +++ b/pkg/core/dao/dao.go @@ -42,7 +42,7 @@ type DAO interface { GetCurrentHeaderHeight() (i uint32, h util.Uint256, err error) GetHeaderHashes() ([]util.Uint256, error) GetTokenTransferInfo(acc util.Uint160) (*state.TokenTransferInfo, error) - GetTokenTransferLog(acc util.Uint160, index uint32, isNEP11 bool) (*state.TokenTransferLog, error) + GetTokenTransferLog(acc util.Uint160, start uint64, index uint32, isNEP11 bool) (*state.TokenTransferLog, error) GetStateSyncPoint() (uint32, error) GetStateSyncCurrentBlockHeight() (uint32, error) GetStorageItem(id int32, key []byte) state.StorageItem @@ -57,12 +57,12 @@ type DAO interface { PutContractID(id int32, hash util.Uint160) error PutCurrentHeader(hashAndIndex []byte) error PutTokenTransferInfo(acc util.Uint160, bs *state.TokenTransferInfo) error - PutTokenTransferLog(acc util.Uint160, index uint32, isNEP11 bool, lg *state.TokenTransferLog) error + PutTokenTransferLog(acc util.Uint160, start uint64, index uint32, isNEP11 bool, lg *state.TokenTransferLog) error PutStateSyncPoint(p uint32) error PutStateSyncCurrentBlockHeight(h uint32) error PutStorageItem(id int32, key []byte, si state.StorageItem) error PutVersion(v Version) error - Seek(id int32, rng storage.SeekRange, f func(k, v []byte)) + Seek(id int32, rng storage.SeekRange, f func(k, v []byte) bool) SeekAsync(ctx context.Context, id int32, rng storage.SeekRange) chan storage.KeyValue StoreAsBlock(block *block.Block, aer1 *state.AppExecResult, aer2 *state.AppExecResult, buf *io.BufBinWriter) error StoreAsCurrentBlock(block *block.Block, buf *io.BufBinWriter) error @@ -180,21 +180,66 @@ func (dao *Simple) putTokenTransferInfo(acc util.Uint160, bs *state.TokenTransfe // -- start transfer log. -func getTokenTransferLogKey(acc util.Uint160, index uint32, isNEP11 bool) []byte { - key := make([]byte, 1+util.Uint160Size+4) +func getTokenTransferLogKey(acc util.Uint160, newestTimestamp uint64, index uint32, isNEP11 bool) []byte { + key := make([]byte, 1+util.Uint160Size+8+4) if isNEP11 { key[0] = byte(storage.STNEP11Transfers) } else { key[0] = byte(storage.STNEP17Transfers) } copy(key[1:], acc.BytesBE()) - binary.LittleEndian.PutUint32(key[util.Uint160Size:], index) + binary.BigEndian.PutUint64(key[1+util.Uint160Size:], newestTimestamp) + binary.BigEndian.PutUint32(key[1+util.Uint160Size+8:], index) return key } +// SeekNEP17TransferLog executes f for each NEP-17 transfer in log starting from +// the transfer with the newest timestamp up to the oldest transfer. It continues +// iteration until false is returned from f. The last non-nil error is returned. +func (dao *Simple) SeekNEP17TransferLog(acc util.Uint160, newestTimestamp uint64, f func(*state.NEP17Transfer) (bool, error)) error { + key := getTokenTransferLogKey(acc, newestTimestamp, 0, false) + prefixLen := 1 + util.Uint160Size + var seekErr error + dao.Store.Seek(storage.SeekRange{ + Prefix: key[:prefixLen], + Start: key[prefixLen : prefixLen+8], + Backwards: true, + }, func(k, v []byte) bool { + lg := &state.TokenTransferLog{Raw: v} + cont, err := lg.ForEachNEP17(f) + if err != nil { + seekErr = err + } + return cont + }) + return seekErr +} + +// SeekNEP11TransferLog executes f for each NEP-11 transfer in log starting from +// the transfer with the newest timestamp up to the oldest transfer. It continues +// iteration until false is returned from f. The last non-nil error is returned. +func (dao *Simple) SeekNEP11TransferLog(acc util.Uint160, newestTimestamp uint64, f func(*state.NEP11Transfer) (bool, error)) error { + key := getTokenTransferLogKey(acc, newestTimestamp, 0, true) + prefixLen := 1 + util.Uint160Size + var seekErr error + dao.Store.Seek(storage.SeekRange{ + Prefix: key[:prefixLen], + Start: key[prefixLen : prefixLen+8], + Backwards: true, + }, func(k, v []byte) bool { + lg := &state.TokenTransferLog{Raw: v} + cont, err := lg.ForEachNEP11(f) + if err != nil { + seekErr = err + } + return cont + }) + return seekErr +} + // GetTokenTransferLog retrieves transfer log from the cache. -func (dao *Simple) GetTokenTransferLog(acc util.Uint160, index uint32, isNEP11 bool) (*state.TokenTransferLog, error) { - key := getTokenTransferLogKey(acc, index, isNEP11) +func (dao *Simple) GetTokenTransferLog(acc util.Uint160, newestTimestamp uint64, index uint32, isNEP11 bool) (*state.TokenTransferLog, error) { + key := getTokenTransferLogKey(acc, newestTimestamp, index, isNEP11) value, err := dao.Store.Get(key) if err != nil { if err == storage.ErrKeyNotFound { @@ -206,8 +251,8 @@ func (dao *Simple) GetTokenTransferLog(acc util.Uint160, index uint32, isNEP11 b } // PutTokenTransferLog saves given transfer log in the cache. -func (dao *Simple) PutTokenTransferLog(acc util.Uint160, index uint32, isNEP11 bool, lg *state.TokenTransferLog) error { - key := getTokenTransferLogKey(acc, index, isNEP11) +func (dao *Simple) PutTokenTransferLog(acc util.Uint160, start uint64, index uint32, isNEP11 bool, lg *state.TokenTransferLog) error { + key := getTokenTransferLogKey(acc, start, index, isNEP11) return dao.Store.Put(key, lg.Raw) } @@ -292,13 +337,14 @@ func (dao *Simple) GetStorageItems(id int32) ([]state.StorageItemWithKey, error) func (dao *Simple) GetStorageItemsWithPrefix(id int32, prefix []byte) ([]state.StorageItemWithKey, error) { var siArr []state.StorageItemWithKey - saveToArr := func(k, v []byte) { + saveToArr := func(k, v []byte) bool { // Cut prefix and hash. // #1468, but don't need to copy here, because it is done by Store. siArr = append(siArr, state.StorageItemWithKey{ Key: k, Item: state.StorageItem(v), }) + return true } dao.Seek(id, storage.SeekRange{Prefix: prefix}, saveToArr) return siArr, nil @@ -306,11 +352,11 @@ func (dao *Simple) GetStorageItemsWithPrefix(id int32, prefix []byte) ([]state.S // Seek executes f for all storage items matching a given `rng` (matching given prefix and // starting from the point specified). If key or value is to be used outside of f, they -// may not be copied. -func (dao *Simple) Seek(id int32, rng storage.SeekRange, f func(k, v []byte)) { +// may not be copied. Seek continues iterating until false is returned from f. +func (dao *Simple) Seek(id int32, rng storage.SeekRange, f func(k, v []byte) bool) { rng.Prefix = makeStorageItemKey(dao.Version.StoragePrefix, id, rng.Prefix) - dao.Store.Seek(rng, func(k, v []byte) { - f(k[len(rng.Prefix):], v) + dao.Store.Seek(rng, func(k, v []byte) bool { + return f(k[len(rng.Prefix):], v) }) } @@ -477,13 +523,14 @@ func (dao *Simple) GetHeaderHashes() ([]util.Uint256, error) { hashMap := make(map[uint32][]util.Uint256) dao.Store.Seek(storage.SeekRange{ Prefix: storage.IXHeaderHashList.Bytes(), - }, func(k, v []byte) { + }, func(k, v []byte) bool { storedCount := binary.LittleEndian.Uint32(k[1:]) hashes, err := read2000Uint256Hashes(v) if err != nil { panic(err) } hashMap[storedCount] = hashes + return true }) var ( diff --git a/pkg/core/native/management.go b/pkg/core/native/management.go index 0213fcb06..2696e573e 100644 --- a/pkg/core/native/management.go +++ b/pkg/core/native/management.go @@ -503,13 +503,14 @@ func (m *Management) InitializeCache(d dao.DAO) error { defer m.mtx.Unlock() var initErr error - d.Seek(m.ID, storage.SeekRange{Prefix: []byte{prefixContract}}, func(_, v []byte) { + d.Seek(m.ID, storage.SeekRange{Prefix: []byte{prefixContract}}, func(_, v []byte) bool { var cs = new(state.Contract) initErr = stackitem.DeserializeConvertible(v, cs) if initErr != nil { - return + return false } m.updateContractCache(cs) + return true }) return initErr } diff --git a/pkg/core/native/native_neo.go b/pkg/core/native/native_neo.go index 200911ccc..977f60b90 100644 --- a/pkg/core/native/native_neo.go +++ b/pkg/core/native/native_neo.go @@ -396,8 +396,8 @@ func (n *NEO) getGASPerVote(d dao.DAO, key []byte, indexes []uint32) []big.Int { Prefix: key, Start: start, Backwards: true, - }, func(k, v []byte) { - if collected < need && len(k) == 4 { + }, func(k, v []byte) bool { + if len(k) == 4 { num := binary.BigEndian.Uint32(k) for i, ind := range indexes { if reward[i].Sign() == 0 && num <= ind { @@ -406,6 +406,7 @@ func (n *NEO) getGASPerVote(d dao.DAO, key []byte, indexes []uint32) []big.Int { } } } + return collected < need }) return reward } @@ -601,8 +602,9 @@ func (n *NEO) dropCandidateIfZero(d dao.DAO, pub *keys.PublicKey, c *candidate) var toRemove []string voterKey := makeVoterKey(pub.Bytes()) - d.Seek(n.ID, storage.SeekRange{Prefix: voterKey}, func(k, v []byte) { + d.Seek(n.ID, storage.SeekRange{Prefix: voterKey}, func(k, v []byte) bool { toRemove = append(toRemove, string(k)) + return true }) for i := range toRemove { if err := d.DeleteStorageItem(n.ID, []byte(toRemove[i])); err != nil { diff --git a/pkg/core/state/tokens.go b/pkg/core/state/tokens.go index 2d1dc8f51..9470fa845 100644 --- a/pkg/core/state/tokens.go +++ b/pkg/core/state/tokens.go @@ -53,6 +53,10 @@ type TokenTransferInfo struct { NextNEP11Batch uint32 // NextNEP17Batch stores the index of the next NEP-17 transfer batch. NextNEP17Batch uint32 + // NextNEP11NewestTimestamp stores the block timestamp of the first NEP-11 transfer in raw. + NextNEP11NewestTimestamp uint64 + // NextNEP17NewestTimestamp stores the block timestamp of the first NEP-17 transfer in raw. + NextNEP17NewestTimestamp uint64 // NewNEP11Batch is true if batch with the `NextNEP11Batch` index should be created. NewNEP11Batch bool // NewNEP17Batch is true if batch with the `NextNEP17Batch` index should be created. @@ -72,6 +76,8 @@ func NewTokenTransferInfo() *TokenTransferInfo { func (bs *TokenTransferInfo) DecodeBinary(r *io.BinReader) { bs.NextNEP11Batch = r.ReadU32LE() bs.NextNEP17Batch = r.ReadU32LE() + bs.NextNEP11NewestTimestamp = r.ReadU64LE() + bs.NextNEP17NewestTimestamp = r.ReadU64LE() bs.NewNEP11Batch = r.ReadBool() bs.NewNEP17Batch = r.ReadBool() lenBalances := r.ReadVarUint() @@ -87,6 +93,8 @@ func (bs *TokenTransferInfo) DecodeBinary(r *io.BinReader) { func (bs *TokenTransferInfo) EncodeBinary(w *io.BinWriter) { w.WriteU32LE(bs.NextNEP11Batch) w.WriteU32LE(bs.NextNEP17Batch) + w.WriteU64LE(bs.NextNEP11NewestTimestamp) + w.WriteU64LE(bs.NextNEP17NewestTimestamp) w.WriteBool(bs.NewNEP11Batch) w.WriteBool(bs.NewNEP17Batch) w.WriteVarUint(uint64(len(bs.LastUpdated))) diff --git a/pkg/core/stateroot/module.go b/pkg/core/stateroot/module.go index fc6e68baf..2af4665fb 100644 --- a/pkg/core/stateroot/module.go +++ b/pkg/core/stateroot/module.go @@ -134,9 +134,10 @@ func (s *Module) CleanStorage() error { return fmt.Errorf("can't clean MPT data for non-genesis block: expected local stateroot height 0, got %d", s.localHeight.Load()) } b := s.Store.Batch() - s.Store.Seek(storage.SeekRange{Prefix: []byte{byte(storage.DataMPT)}}, func(k, _ []byte) { + s.Store.Seek(storage.SeekRange{Prefix: []byte{byte(storage.DataMPT)}}, func(k, _ []byte) bool { // #1468, but don't need to copy here, because it is done by Store. b.Delete(k) + return true }) err := s.Store.PutBatch(b) if err != nil { diff --git a/pkg/core/statesync/module_test.go b/pkg/core/statesync/module_test.go index ab4ba650e..a4a6a253a 100644 --- a/pkg/core/statesync/module_test.go +++ b/pkg/core/statesync/module_test.go @@ -32,7 +32,7 @@ func TestModule_PR2019_discussion_r689629704(t *testing.T) { nodes = make(map[util.Uint256][]byte) expectedItems []storage.KeyValue ) - expectedStorage.Seek(storage.SeekRange{Prefix: storage.DataMPT.Bytes()}, func(k, v []byte) { + expectedStorage.Seek(storage.SeekRange{Prefix: storage.DataMPT.Bytes()}, func(k, v []byte) bool { key := slice.Copy(k) value := slice.Copy(v) expectedItems = append(expectedItems, storage.KeyValue{ @@ -43,6 +43,7 @@ func TestModule_PR2019_discussion_r689629704(t *testing.T) { require.NoError(t, err) nodeBytes := value[:len(value)-4] nodes[hash] = nodeBytes + return true }) actualStorage := storage.NewMemCachedStore(storage.NewMemoryStore()) @@ -95,13 +96,14 @@ func TestModule_PR2019_discussion_r689629704(t *testing.T) { // Compare resulting storage items and refcounts. var actualItems []storage.KeyValue - expectedStorage.Seek(storage.SeekRange{Prefix: storage.DataMPT.Bytes()}, func(k, v []byte) { + expectedStorage.Seek(storage.SeekRange{Prefix: storage.DataMPT.Bytes()}, func(k, v []byte) bool { key := slice.Copy(k) value := slice.Copy(v) actualItems = append(actualItems, storage.KeyValue{ Key: key, Value: value, }) + return true }) require.ElementsMatch(t, expectedItems, actualItems) } diff --git a/pkg/core/statesync_test.go b/pkg/core/statesync_test.go index 9996579f2..9d62088a0 100644 --- a/pkg/core/statesync_test.go +++ b/pkg/core/statesync_test.go @@ -424,7 +424,7 @@ func TestStateSyncModule_RestoreBasicChain(t *testing.T) { // compare storage states fetchStorage := func(bc *Blockchain) []storage.KeyValue { var kv []storage.KeyValue - bc.dao.Store.Seek(storage.SeekRange{Prefix: bc.dao.Version.StoragePrefix.Bytes()}, func(k, v []byte) { + bc.dao.Store.Seek(storage.SeekRange{Prefix: bc.dao.Version.StoragePrefix.Bytes()}, func(k, v []byte) bool { key := slice.Copy(k) value := slice.Copy(v) if key[0] == byte(storage.STTempStorage) { @@ -434,6 +434,7 @@ func TestStateSyncModule_RestoreBasicChain(t *testing.T) { Key: key, Value: value, }) + return true }) return kv } @@ -444,8 +445,9 @@ func TestStateSyncModule_RestoreBasicChain(t *testing.T) { // no temp items should be left require.Eventually(t, func() bool { var haveItems bool - bcBolt.dao.Store.Seek(storage.SeekRange{Prefix: storage.STStorage.Bytes()}, func(_, _ []byte) { + bcBolt.dao.Store.Seek(storage.SeekRange{Prefix: storage.STStorage.Bytes()}, func(_, _ []byte) bool { haveItems = true + return false }) return !haveItems }, time.Second*5, time.Millisecond*100) diff --git a/pkg/core/storage/boltdb_store.go b/pkg/core/storage/boltdb_store.go index c1ac2ed5e..83f2f3144 100644 --- a/pkg/core/storage/boltdb_store.go +++ b/pkg/core/storage/boltdb_store.go @@ -109,7 +109,7 @@ func (s *BoltDBStore) PutChangeSet(puts map[string][]byte, dels map[string]bool) } // Seek implements the Store interface. -func (s *BoltDBStore) Seek(rng SeekRange, f func(k, v []byte)) { +func (s *BoltDBStore) Seek(rng SeekRange, f func(k, v []byte) bool) { start := make([]byte, len(rng.Prefix)+len(rng.Start)) copy(start, rng.Prefix) copy(start[len(rng.Prefix):], rng.Start) @@ -120,13 +120,15 @@ func (s *BoltDBStore) Seek(rng SeekRange, f func(k, v []byte)) { } } -func (s *BoltDBStore) seek(key []byte, start []byte, f func(k, v []byte)) { +func (s *BoltDBStore) seek(key []byte, start []byte, f func(k, v []byte) bool) { prefix := util.BytesPrefix(key) prefix.Start = start err := s.db.View(func(tx *bbolt.Tx) error { c := tx.Bucket(Bucket).Cursor() for k, v := c.Seek(prefix.Start); k != nil && (len(prefix.Limit) == 0 || bytes.Compare(k, prefix.Limit) <= 0); k, v = c.Next() { - f(k, v) + if !f(k, v) { + break + } } return nil }) @@ -135,7 +137,7 @@ func (s *BoltDBStore) seek(key []byte, start []byte, f func(k, v []byte)) { } } -func (s *BoltDBStore) seekBackwards(key []byte, start []byte, f func(k, v []byte)) { +func (s *BoltDBStore) seekBackwards(key []byte, start []byte, f func(k, v []byte) bool) { err := s.db.View(func(tx *bbolt.Tx) error { c := tx.Bucket(Bucket).Cursor() // Move cursor to the first kv pair which is followed by the pair matching the specified prefix. @@ -146,7 +148,9 @@ func (s *BoltDBStore) seekBackwards(key []byte, start []byte, f func(k, v []byte rng := util.BytesPrefix(start) // in fact, we only need limit based on start slice to iterate backwards starting from this limit c.Seek(rng.Limit) for k, v := c.Prev(); k != nil && bytes.HasPrefix(k, key); k, v = c.Prev() { - f(k, v) + if !f(k, v) { + break + } } return nil }) diff --git a/pkg/core/storage/leveldb_store.go b/pkg/core/storage/leveldb_store.go index 87ab09923..410b93e82 100644 --- a/pkg/core/storage/leveldb_store.go +++ b/pkg/core/storage/leveldb_store.go @@ -85,7 +85,7 @@ func (s *LevelDBStore) PutChangeSet(puts map[string][]byte, dels map[string]bool } // Seek implements the Store interface. -func (s *LevelDBStore) Seek(rng SeekRange, f func(k, v []byte)) { +func (s *LevelDBStore) Seek(rng SeekRange, f func(k, v []byte) bool) { start := make([]byte, len(rng.Prefix)+len(rng.Start)) copy(start, rng.Prefix) copy(start[len(rng.Prefix):], rng.Start) @@ -96,23 +96,27 @@ func (s *LevelDBStore) Seek(rng SeekRange, f func(k, v []byte)) { } } -func (s *LevelDBStore) seek(key []byte, start []byte, f func(k, v []byte)) { +func (s *LevelDBStore) seek(key []byte, start []byte, f func(k, v []byte) bool) { prefix := util.BytesPrefix(key) prefix.Start = start iter := s.db.NewIterator(prefix, nil) for iter.Next() { - f(iter.Key(), iter.Value()) + if !f(iter.Key(), iter.Value()) { + break + } } iter.Release() } -func (s *LevelDBStore) seekBackwards(key []byte, start []byte, f func(k, v []byte)) { +func (s *LevelDBStore) seekBackwards(key []byte, start []byte, f func(k, v []byte) bool) { iRange := util.BytesPrefix(start) iRange.Start = key iter := s.db.NewIterator(iRange, nil) for ok := iter.Last(); ok; ok = iter.Prev() { - f(iter.Key(), iter.Value()) + if !f(iter.Key(), iter.Value()) { + break + } } iter.Release() } diff --git a/pkg/core/storage/memcached_store.go b/pkg/core/storage/memcached_store.go index 7289e5f3c..a6e5278a1 100644 --- a/pkg/core/storage/memcached_store.go +++ b/pkg/core/storage/memcached_store.go @@ -90,7 +90,7 @@ func (s *MemCachedStore) GetBatch() *MemBatch { } // Seek implements the Store interface. -func (s *MemCachedStore) Seek(rng SeekRange, f func(k, v []byte)) { +func (s *MemCachedStore) Seek(rng SeekRange, f func(k, v []byte) bool) { s.seek(context.Background(), rng, false, f) } @@ -100,11 +100,12 @@ func (s *MemCachedStore) Seek(rng SeekRange, f func(k, v []byte)) { func (s *MemCachedStore) SeekAsync(ctx context.Context, rng SeekRange, cutPrefix bool) chan KeyValue { res := make(chan KeyValue) go func() { - s.seek(ctx, rng, cutPrefix, func(k, v []byte) { + s.seek(ctx, rng, cutPrefix, func(k, v []byte) bool { res <- KeyValue{ Key: k, Value: v, } + return true // always continue, we have context for early stop. }) close(res) }() @@ -117,7 +118,7 @@ func (s *MemCachedStore) SeekAsync(ctx context.Context, rng SeekRange, cutPrefix // key needs to be cut off the resulting keys. `rng` specifies prefix items must match // and point to start seeking from. Backwards seeking from some point is supported // with corresponding `rng` field set. -func (s *MemCachedStore) seek(ctx context.Context, rng SeekRange, cutPrefix bool, f func(k, v []byte)) { +func (s *MemCachedStore) seek(ctx context.Context, rng SeekRange, cutPrefix bool, f func(k, v []byte) bool) { // Create memory store `mem` and `del` snapshot not to hold the lock. var memRes []KeyValueExists sPrefix := string(rng.Prefix) @@ -176,21 +177,21 @@ func (s *MemCachedStore) seek(ctx context.Context, rng SeekRange, cutPrefix bool haveMem = true iMem++ } - // Merge results of seek operations in ascending order. - mergeFunc := func(k, v []byte) { + // Merge results of seek operations in ascending order. It returns whether iterating + // should be continued. + mergeFunc := func(k, v []byte) bool { if done { - return + return false } kvPs := KeyValue{ Key: slice.Copy(k), Value: slice.Copy(v), } - loop: for { select { case <-ctx.Done(): done = true - break loop + return false default: var isMem = haveMem && less(kvMem.Key, kvPs.Key) if isMem { @@ -198,7 +199,10 @@ func (s *MemCachedStore) seek(ctx context.Context, rng SeekRange, cutPrefix bool if cutPrefix { kvMem.Key = kvMem.Key[lPrefix:] } - f(kvMem.Key, kvMem.Value) + if !f(kvMem.Key, kvMem.Value) { + done = true + return false + } } if iMem < len(memRes) { kvMem = memRes[iMem] @@ -212,9 +216,12 @@ func (s *MemCachedStore) seek(ctx context.Context, rng SeekRange, cutPrefix bool if cutPrefix { kvPs.Key = kvPs.Key[lPrefix:] } - f(kvPs.Key, kvPs.Value) + if !f(kvPs.Key, kvPs.Value) { + done = true + return false + } } - break loop + return true } } } @@ -233,7 +240,9 @@ func (s *MemCachedStore) seek(ctx context.Context, rng SeekRange, cutPrefix bool if cutPrefix { kvMem.Key = kvMem.Key[lPrefix:] } - f(kvMem.Key, kvMem.Value) + if !f(kvMem.Key, kvMem.Value) { + break loop + } } } } diff --git a/pkg/core/storage/memcached_store_test.go b/pkg/core/storage/memcached_store_test.go index 44582e507..d46af8370 100644 --- a/pkg/core/storage/memcached_store_test.go +++ b/pkg/core/storage/memcached_store_test.go @@ -167,8 +167,9 @@ func TestCachedSeek(t *testing.T) { require.NoError(t, ts.Put(v.Key, v.Value)) } foundKVs := make(map[string][]byte) - ts.Seek(SeekRange{Prefix: goodPrefix}, func(k, v []byte) { + ts.Seek(SeekRange{Prefix: goodPrefix}, func(k, v []byte) bool { foundKVs[string(k)] = v + return true }) assert.Equal(t, len(foundKVs), len(lowerKVs)+len(updatedKVs)) for _, kv := range lowerKVs { @@ -232,7 +233,7 @@ func benchmarkCachedSeek(t *testing.B, ps Store, psElementsCount, tsElementsCoun t.ReportAllocs() t.ResetTimer() for n := 0; n < t.N; n++ { - ts.Seek(SeekRange{Prefix: searchPrefix}, func(k, v []byte) {}) + ts.Seek(SeekRange{Prefix: searchPrefix}, func(k, v []byte) bool { return true }) } t.StopTimer() } @@ -290,7 +291,7 @@ func (b *BadStore) PutChangeSet(_ map[string][]byte, _ map[string]bool) error { b.onPutBatch() return ErrKeyNotFound } -func (b *BadStore) Seek(rng SeekRange, f func(k, v []byte)) { +func (b *BadStore) Seek(rng SeekRange, f func(k, v []byte) bool) { } func (b *BadStore) Close() error { return nil @@ -365,8 +366,9 @@ func TestCachedSeekSorting(t *testing.T) { require.NoError(t, ts.Put(v.Key, v.Value)) } var foundKVs []KeyValue - ts.Seek(SeekRange{Prefix: goodPrefix}, func(k, v []byte) { + ts.Seek(SeekRange{Prefix: goodPrefix}, func(k, v []byte) bool { foundKVs = append(foundKVs, KeyValue{Key: slice.Copy(k), Value: slice.Copy(v)}) + return true }) assert.Equal(t, len(foundKVs), len(lowerKVs)+len(updatedKVs)) expected := append(lowerKVs, updatedKVs...) diff --git a/pkg/core/storage/memory_store.go b/pkg/core/storage/memory_store.go index edcb6eb30..506f76224 100644 --- a/pkg/core/storage/memory_store.go +++ b/pkg/core/storage/memory_store.go @@ -104,7 +104,7 @@ func (s *MemoryStore) PutChangeSet(puts map[string][]byte, dels map[string]bool) } // Seek implements the Store interface. -func (s *MemoryStore) Seek(rng SeekRange, f func(k, v []byte)) { +func (s *MemoryStore) Seek(rng SeekRange, f func(k, v []byte) bool) { s.mut.RLock() s.seek(rng, f) s.mut.RUnlock() @@ -130,7 +130,7 @@ func (s *MemoryStore) SeekAll(key []byte, f func(k, v []byte)) { // seek is an internal unlocked implementation of Seek. `start` denotes whether // seeking starting from the provided prefix should be performed. Backwards // seeking from some point is supported with corresponding SeekRange field set. -func (s *MemoryStore) seek(rng SeekRange, f func(k, v []byte)) { +func (s *MemoryStore) seek(rng SeekRange, f func(k, v []byte) bool) { sPrefix := string(rng.Prefix) lPrefix := len(sPrefix) sStart := string(rng.Start) @@ -162,7 +162,9 @@ func (s *MemoryStore) seek(rng SeekRange, f func(k, v []byte)) { return less(memList[i].Key, memList[j].Key) }) for _, kv := range memList { - f(kv.Key, kv.Value) + if !f(kv.Key, kv.Value) { + break + } } } diff --git a/pkg/core/storage/memory_store_test.go b/pkg/core/storage/memory_store_test.go index 4069cd37d..a8ae360be 100644 --- a/pkg/core/storage/memory_store_test.go +++ b/pkg/core/storage/memory_store_test.go @@ -28,7 +28,7 @@ func BenchmarkMemorySeek(t *testing.B) { t.ReportAllocs() t.ResetTimer() for n := 0; n < t.N; n++ { - ms.Seek(SeekRange{Prefix: searchPrefix}, func(k, v []byte) {}) + ms.Seek(SeekRange{Prefix: searchPrefix}, func(k, v []byte) bool { return false }) } }) } diff --git a/pkg/core/storage/store.go b/pkg/core/storage/store.go index 9e2772eca..8409e7a1f 100644 --- a/pkg/core/storage/store.go +++ b/pkg/core/storage/store.go @@ -60,7 +60,7 @@ type SeekRange struct { // Empty Prefix means seeking through all keys in the DB starting from // the Start if specified. Prefix []byte - // Start denotes value upended to the Prefix to start Seek from. + // Start denotes value appended to the Prefix to start Seek from. // Seeking starting from some key includes this key to the result; // if no matching key was found then next suitable key is picked up. // Start may be empty. Empty Start means seeking through all keys in @@ -90,9 +90,10 @@ type ( // PutChangeSet allows to push prepared changeset to the Store. PutChangeSet(puts map[string][]byte, dels map[string]bool) error // Seek can guarantee that provided key (k) and value (v) are the only valid until the next call to f. - // Key and value slices should not be modified. Seek can guarantee that key-value items are sorted by - // key in ascending way. - Seek(rng SeekRange, f func(k, v []byte)) + // Seek continues iteration until false is returned from f. + // Key and value slices should not be modified. + // Seek can guarantee that key-value items are sorted by key in ascending way. + Seek(rng SeekRange, f func(k, v []byte) bool) Close() error } diff --git a/pkg/core/storage/storeandbatch_test.go b/pkg/core/storage/storeandbatch_test.go index 273408ea7..355bcafdf 100644 --- a/pkg/core/storage/storeandbatch_test.go +++ b/pkg/core/storage/storeandbatch_test.go @@ -81,7 +81,7 @@ func testStoreSeek(t *testing.T, s Store) { require.NoError(t, s.Put(v.Key, v.Value)) } - check := func(t *testing.T, goodprefix, start []byte, goodkvs []KeyValue, backwards bool) { + check := func(t *testing.T, goodprefix, start []byte, goodkvs []KeyValue, backwards bool, cont func(k, v []byte) bool) { // Seek result expected to be sorted in an ascending (for forwards seeking) or descending (for backwards seeking) way. cmpFunc := func(i, j int) bool { return bytes.Compare(goodkvs[i].Key, goodkvs[j].Key) < 0 @@ -101,11 +101,15 @@ func testStoreSeek(t *testing.T, s Store) { rng.Backwards = true } actual := make([]KeyValue, 0, len(goodkvs)) - s.Seek(rng, func(k, v []byte) { + s.Seek(rng, func(k, v []byte) bool { actual = append(actual, KeyValue{ Key: slice.Copy(k), Value: slice.Copy(v), }) + if cont == nil { + return true + } + return cont(k, v) }) assert.Equal(t, goodkvs, actual) } @@ -123,12 +127,26 @@ func testStoreSeek(t *testing.T, s Store) { kvs[3], // key = "21" kvs[4], // key = "22" } - check(t, goodprefix, start, goodkvs, false) + check(t, goodprefix, start, goodkvs, false, nil) }) t.Run("no matching items", func(t *testing.T) { goodprefix := []byte("0") start := []byte{} - check(t, goodprefix, start, []KeyValue{}, false) + check(t, goodprefix, start, []KeyValue{}, false, nil) + }) + t.Run("early stop", func(t *testing.T) { + // Given this prefix... + goodprefix := []byte("2") + // and empty start range... + start := []byte{} + // these pairs should be found. + goodkvs := []KeyValue{ + kvs[2], // key = "20" + kvs[3], // key = "21" + } + check(t, goodprefix, start, goodkvs, false, func(k, v []byte) bool { + return string(k) < "21" + }) }) }) @@ -141,12 +159,23 @@ func testStoreSeek(t *testing.T, s Store) { kvs[3], // key = "21" kvs[2], // key = "20" } - check(t, goodprefix, start, goodkvs, true) + check(t, goodprefix, start, goodkvs, true, nil) }) t.Run("no matching items", func(t *testing.T) { goodprefix := []byte("0") start := []byte{} - check(t, goodprefix, start, []KeyValue{}, true) + check(t, goodprefix, start, []KeyValue{}, true, nil) + }) + t.Run("early stop", func(t *testing.T) { + goodprefix := []byte("2") + start := []byte{} + goodkvs := []KeyValue{ + kvs[4], // key = "22" + kvs[3], // key = "21" + } + check(t, goodprefix, start, goodkvs, true, func(k, v []byte) bool { + return string(k) > "21" + }) }) }) }) @@ -155,33 +184,55 @@ func testStoreSeek(t *testing.T, s Store) { t.Run("forwards", func(t *testing.T) { t.Run("good", func(t *testing.T) { goodprefix := []byte("2") - start := []byte("1") // start will be upended to goodprefix to start seek from + start := []byte("1") // start will be appended to goodprefix to start seek from goodkvs := []KeyValue{ kvs[3], // key = "21" kvs[4], // key = "22" } - check(t, goodprefix, start, goodkvs, false) + check(t, goodprefix, start, goodkvs, false, nil) }) t.Run("no matching items", func(t *testing.T) { goodprefix := []byte("2") start := []byte("3") // start is more than all keys prefixed by '2'. - check(t, goodprefix, start, []KeyValue{}, false) + check(t, goodprefix, start, []KeyValue{}, false, nil) + }) + t.Run("early stop", func(t *testing.T) { + goodprefix := []byte("2") + start := []byte("0") // start will be appended to goodprefix to start seek from + goodkvs := []KeyValue{ + kvs[2], // key = "20" + kvs[3], // key = "21" + } + check(t, goodprefix, start, goodkvs, false, func(k, v []byte) bool { + return string(k) < "21" + }) }) }) t.Run("backwards", func(t *testing.T) { t.Run("good", func(t *testing.T) { goodprefix := []byte("2") - start := []byte("1") // start will be upended to goodprefix to start seek from + start := []byte("1") // start will be appended to goodprefix to start seek from goodkvs := []KeyValue{ kvs[3], // key = "21" kvs[2], // key = "20" } - check(t, goodprefix, start, goodkvs, true) + check(t, goodprefix, start, goodkvs, true, nil) }) t.Run("no matching items", func(t *testing.T) { goodprefix := []byte("2") start := []byte(".") // start is less than all keys prefixed by '2'. - check(t, goodprefix, start, []KeyValue{}, true) + check(t, goodprefix, start, []KeyValue{}, true, nil) + }) + t.Run("early stop", func(t *testing.T) { + goodprefix := []byte("2") + start := []byte("2") // start will be appended to goodprefix to start seek from + goodkvs := []KeyValue{ + kvs[4], // key = "24" + kvs[3], // key = "21" + } + check(t, goodprefix, start, goodkvs, true, func(k, v []byte) bool { + return string(k) > "21" + }) }) }) }) @@ -197,12 +248,24 @@ func testStoreSeek(t *testing.T, s Store) { kvs[5], // key = "30" kvs[6], // key = "31" } - check(t, goodprefix, start, goodkvs, false) + check(t, goodprefix, start, goodkvs, false, nil) }) t.Run("no matching items", func(t *testing.T) { goodprefix := []byte{} start := []byte("32") // start is more than all keys. - check(t, goodprefix, start, []KeyValue{}, false) + check(t, goodprefix, start, []KeyValue{}, false, nil) + }) + t.Run("early stop", func(t *testing.T) { + goodprefix := []byte{} + start := []byte("21") + goodkvs := []KeyValue{ + kvs[3], // key = "21" + kvs[4], // key = "22" + kvs[5], // key = "30" + } + check(t, goodprefix, start, goodkvs, false, func(k, v []byte) bool { + return string(k) < "30" + }) }) }) t.Run("backwards", func(t *testing.T) { @@ -215,12 +278,24 @@ func testStoreSeek(t *testing.T, s Store) { kvs[1], // key = "11" kvs[0], // key = "10" } - check(t, goodprefix, start, goodkvs, true) + check(t, goodprefix, start, goodkvs, true, nil) }) t.Run("no matching items", func(t *testing.T) { goodprefix := []byte{} start := []byte("0") // start is less than all keys. - check(t, goodprefix, start, []KeyValue{}, true) + check(t, goodprefix, start, []KeyValue{}, true, nil) + }) + t.Run("early stop", func(t *testing.T) { + goodprefix := []byte{} + start := []byte("21") + goodkvs := []KeyValue{ + kvs[3], // key = "21" + kvs[2], // key = "20" + kvs[1], // key = "11" + } + check(t, goodprefix, start, goodkvs, true, func(k, v []byte) bool { + return string(k) > "11" + }) }) }) }) @@ -231,10 +306,36 @@ func testStoreSeek(t *testing.T, s Store) { goodkvs := make([]KeyValue, len(kvs)) copy(goodkvs, kvs) t.Run("forwards", func(t *testing.T) { - check(t, goodprefix, start, goodkvs, false) + t.Run("good", func(t *testing.T) { + check(t, goodprefix, start, goodkvs, false, nil) + }) + t.Run("early stop", func(t *testing.T) { + goodkvs := []KeyValue{ + kvs[0], // key = "10" + kvs[1], // key = "11" + kvs[2], // key = "20" + kvs[3], // key = "21" + } + check(t, goodprefix, start, goodkvs, false, func(k, v []byte) bool { + return string(k) < "21" + }) + }) }) t.Run("backwards", func(t *testing.T) { - check(t, goodprefix, start, goodkvs, true) + t.Run("good", func(t *testing.T) { + check(t, goodprefix, start, goodkvs, true, nil) + }) + t.Run("early stop", func(t *testing.T) { + goodkvs := []KeyValue{ + kvs[6], // key = "31" + kvs[5], // key = "30" + kvs[4], // key = "22" + kvs[3], // key = "21" + } + check(t, goodprefix, start, goodkvs, true, func(k, v []byte) bool { + return string(k) > "21" + }) + }) }) }) diff --git a/pkg/rpc/server/server.go b/pkg/rpc/server/server.go index f138d346b..2efebd2b8 100644 --- a/pkg/rpc/server/server.go +++ b/pkg/rpc/server/server.go @@ -1019,7 +1019,7 @@ func (s *Server) getTokenTransfers(ps request.Params, isNEP11 bool) (interface{} return received, sent, !(limit != 0 && resCount >= limit), nil } if !isNEP11 { - err = s.chain.ForEachNEP17Transfer(u, func(tr *state.NEP17Transfer) (bool, error) { + err = s.chain.ForEachNEP17Transfer(u, end, func(tr *state.NEP17Transfer) (bool, error) { r, s, res, err := handleTransfer(tr) if err == nil { if r != nil { @@ -1032,7 +1032,7 @@ func (s *Server) getTokenTransfers(ps request.Params, isNEP11 bool) (interface{} return res, err }) } else { - err = s.chain.ForEachNEP11Transfer(u, func(tr *state.NEP11Transfer) (bool, error) { + err = s.chain.ForEachNEP11Transfer(u, end, func(tr *state.NEP11Transfer) (bool, error) { r, s, res, err := handleTransfer(&tr.NEP17Transfer) if err == nil { id := hex.EncodeToString(tr.ID) @@ -1047,7 +1047,7 @@ func (s *Server) getTokenTransfers(ps request.Params, isNEP11 bool) (interface{} }) } if err != nil { - return nil, response.NewInternalServerError("invalid transfer log", err) + return nil, response.NewInternalServerError(fmt.Sprintf("invalid transfer log: %v", err), err) } return bs, nil }