2018-02-04 19:54:51 +00:00
|
|
|
package core
|
|
|
|
|
|
|
|
import (
|
2024-03-04 18:09:36 +00:00
|
|
|
"bytes"
|
2021-08-31 15:39:19 +00:00
|
|
|
"encoding/binary"
|
2021-02-17 07:45:39 +00:00
|
|
|
"fmt"
|
|
|
|
"strings"
|
2022-11-10 11:31:49 +00:00
|
|
|
"sync/atomic"
|
2018-02-04 19:54:51 +00:00
|
|
|
"testing"
|
2020-05-12 14:20:41 +00:00
|
|
|
"time"
|
2018-02-06 06:43:32 +00:00
|
|
|
|
2020-11-23 11:09:00 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/internal/testchain"
|
2020-11-26 09:33:34 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/config"
|
2020-03-03 14:21:42 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
2024-11-22 08:33:05 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/chaindump"
|
2020-05-12 14:20:41 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
2020-03-03 14:21:42 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
|
|
|
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
2024-11-22 08:33:05 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/io"
|
2020-09-24 13:33:40 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
2022-04-08 09:49:25 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
2020-03-03 14:21:42 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
2020-05-12 14:20:41 +00:00
|
|
|
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
|
2018-03-09 15:55:25 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2019-02-20 17:39:32 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2021-08-31 15:39:19 +00:00
|
|
|
"go.uber.org/zap/zaptest"
|
2018-02-04 19:54:51 +00:00
|
|
|
)
|
|
|
|
|
2020-08-13 10:57:30 +00:00
|
|
|
func TestVerifyHeader(t *testing.T) {
|
|
|
|
bc := newTestChain(t)
|
2021-03-01 13:44:47 +00:00
|
|
|
prev := bc.topBlock.Load().(*block.Block).Header
|
2020-08-13 10:57:30 +00:00
|
|
|
t.Run("Invalid", func(t *testing.T) {
|
|
|
|
t.Run("Hash", func(t *testing.T) {
|
|
|
|
h := prev.Hash()
|
|
|
|
h[0] = ^h[0]
|
2022-12-06 13:34:38 +00:00
|
|
|
hdr := newBlock(bc.config.ProtocolConfiguration, 1, h).Header
|
2023-05-04 14:03:06 +00:00
|
|
|
require.ErrorIs(t, bc.verifyHeader(&hdr, &prev), ErrHdrHashMismatch)
|
2020-08-13 10:57:30 +00:00
|
|
|
})
|
|
|
|
t.Run("Index", func(t *testing.T) {
|
2022-12-06 13:34:38 +00:00
|
|
|
hdr := newBlock(bc.config.ProtocolConfiguration, 3, prev.Hash()).Header
|
2023-05-04 14:03:06 +00:00
|
|
|
require.ErrorIs(t, bc.verifyHeader(&hdr, &prev), ErrHdrIndexMismatch)
|
2020-08-13 10:57:30 +00:00
|
|
|
})
|
|
|
|
t.Run("Timestamp", func(t *testing.T) {
|
2022-12-06 13:34:38 +00:00
|
|
|
hdr := newBlock(bc.config.ProtocolConfiguration, 1, prev.Hash()).Header
|
2020-08-13 10:57:30 +00:00
|
|
|
hdr.Timestamp = 0
|
2023-05-04 14:03:06 +00:00
|
|
|
require.ErrorIs(t, bc.verifyHeader(&hdr, &prev), ErrHdrInvalidTimestamp)
|
2020-08-13 10:57:30 +00:00
|
|
|
})
|
|
|
|
})
|
|
|
|
t.Run("Valid", func(t *testing.T) {
|
2022-12-06 13:34:38 +00:00
|
|
|
hdr := newBlock(bc.config.ProtocolConfiguration, 1, prev.Hash()).Header
|
2021-03-01 13:44:47 +00:00
|
|
|
require.NoError(t, bc.verifyHeader(&hdr, &prev))
|
2020-08-13 10:57:30 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
func TestAddBlock(t *testing.T) {
|
2020-02-29 14:16:13 +00:00
|
|
|
const size = 3
|
2018-03-17 11:53:21 +00:00
|
|
|
bc := newTestChain(t)
|
2020-02-29 14:16:13 +00:00
|
|
|
blocks, err := bc.genBlocks(size)
|
|
|
|
require.NoError(t, err)
|
2018-03-09 15:55:25 +00:00
|
|
|
|
|
|
|
lastBlock := blocks[len(blocks)-1]
|
|
|
|
assert.Equal(t, lastBlock.Index, bc.HeaderHeight())
|
|
|
|
assert.Equal(t, lastBlock.Hash(), bc.CurrentHeaderHash())
|
2018-03-25 10:45:54 +00:00
|
|
|
|
2019-09-24 15:51:20 +00:00
|
|
|
// This one tests persisting blocks, so it does need to persist()
|
2021-11-22 07:41:40 +00:00
|
|
|
_, err = bc.persist(false)
|
2021-07-30 20:47:48 +00:00
|
|
|
require.NoError(t, err)
|
2018-03-09 15:55:25 +00:00
|
|
|
|
2022-02-18 11:59:59 +00:00
|
|
|
key := make([]byte, 1+util.Uint256Size)
|
|
|
|
key[0] = byte(storage.DataExecutable)
|
2018-03-25 10:45:54 +00:00
|
|
|
for _, block := range blocks {
|
2022-02-18 11:59:59 +00:00
|
|
|
copy(key[1:], block.Hash().BytesBE())
|
2020-04-07 09:41:12 +00:00
|
|
|
_, err := bc.dao.Store.Get(key)
|
2020-02-29 15:55:16 +00:00
|
|
|
require.NoErrorf(t, err, "block %s not persisted", block.Hash())
|
2018-03-25 10:45:54 +00:00
|
|
|
}
|
|
|
|
|
2018-03-09 15:55:25 +00:00
|
|
|
assert.Equal(t, lastBlock.Index, bc.BlockHeight())
|
|
|
|
assert.Equal(t, lastBlock.Hash(), bc.CurrentHeaderHash())
|
|
|
|
}
|
|
|
|
|
2022-02-15 11:26:35 +00:00
|
|
|
func TestRemoveOldTransfers(t *testing.T) {
|
2022-09-02 14:20:39 +00:00
|
|
|
// Creating proper number of transfers/blocks takes unnecessary time, so emulate
|
2022-02-15 11:26:35 +00:00
|
|
|
// some DB with stale entries.
|
|
|
|
bc := newTestChain(t)
|
|
|
|
h, err := bc.GetHeader(bc.GetHeaderHash(0))
|
|
|
|
require.NoError(t, err)
|
|
|
|
older := h.Timestamp - 1000
|
|
|
|
newer := h.Timestamp + 1000
|
|
|
|
acc1 := util.Uint160{1}
|
|
|
|
acc2 := util.Uint160{2}
|
|
|
|
acc3 := util.Uint160{3}
|
|
|
|
ttl := state.TokenTransferLog{Raw: []byte{1}} // It's incorrect, but who cares.
|
|
|
|
|
2024-08-30 18:41:02 +00:00
|
|
|
for i := range uint32(3) {
|
2022-02-16 14:48:15 +00:00
|
|
|
bc.dao.PutTokenTransferLog(acc1, older, i, false, &ttl)
|
2022-02-15 11:26:35 +00:00
|
|
|
}
|
2024-08-30 18:41:02 +00:00
|
|
|
for i := range uint32(3) {
|
2022-02-16 14:48:15 +00:00
|
|
|
bc.dao.PutTokenTransferLog(acc2, newer, i, false, &ttl)
|
2022-02-15 11:26:35 +00:00
|
|
|
}
|
2024-08-30 18:41:02 +00:00
|
|
|
for i := range uint32(2) {
|
2022-02-16 14:48:15 +00:00
|
|
|
bc.dao.PutTokenTransferLog(acc3, older, i, true, &ttl)
|
2022-02-15 11:26:35 +00:00
|
|
|
}
|
2024-08-30 18:41:02 +00:00
|
|
|
for i := range uint32(2) {
|
2022-02-16 14:48:15 +00:00
|
|
|
bc.dao.PutTokenTransferLog(acc3, newer, i, true, &ttl)
|
2022-02-15 11:26:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
_, err = bc.dao.Persist()
|
|
|
|
require.NoError(t, err)
|
|
|
|
_ = bc.removeOldTransfers(0)
|
|
|
|
|
2024-08-30 18:41:02 +00:00
|
|
|
for i := range uint32(2) {
|
2022-02-15 11:26:35 +00:00
|
|
|
log, err := bc.dao.GetTokenTransferLog(acc1, older, i, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 0, len(log.Raw))
|
|
|
|
}
|
|
|
|
|
|
|
|
log, err := bc.dao.GetTokenTransferLog(acc1, older, 2, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEqual(t, 0, len(log.Raw))
|
|
|
|
|
2024-09-03 20:12:44 +00:00
|
|
|
for i := range uint32(3) {
|
2022-02-15 11:26:35 +00:00
|
|
|
log, err = bc.dao.GetTokenTransferLog(acc2, newer, i, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEqual(t, 0, len(log.Raw))
|
|
|
|
}
|
|
|
|
|
|
|
|
log, err = bc.dao.GetTokenTransferLog(acc3, older, 0, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 0, len(log.Raw))
|
|
|
|
|
|
|
|
log, err = bc.dao.GetTokenTransferLog(acc3, older, 1, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEqual(t, 0, len(log.Raw))
|
|
|
|
|
2024-09-03 20:12:44 +00:00
|
|
|
for i := range uint32(2) {
|
2022-02-15 11:26:35 +00:00
|
|
|
log, err = bc.dao.GetTokenTransferLog(acc3, newer, i, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEqual(t, 0, len(log.Raw))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-31 15:39:19 +00:00
|
|
|
func TestBlockchain_InitWithIncompleteStateJump(t *testing.T) {
|
|
|
|
var (
|
|
|
|
stateSyncInterval = 4
|
|
|
|
maxTraceable uint32 = 6
|
|
|
|
)
|
|
|
|
spountCfg := func(c *config.Config) {
|
2022-12-06 13:34:38 +00:00
|
|
|
c.ApplicationConfiguration.RemoveUntraceableBlocks = true
|
2021-08-31 15:39:19 +00:00
|
|
|
c.ProtocolConfiguration.StateRootInHeader = true
|
|
|
|
c.ProtocolConfiguration.P2PStateExchangeExtensions = true
|
|
|
|
c.ProtocolConfiguration.StateSyncInterval = stateSyncInterval
|
|
|
|
c.ProtocolConfiguration.MaxTraceableBlocks = maxTraceable
|
2022-12-06 13:34:38 +00:00
|
|
|
c.ApplicationConfiguration.KeepOnlyLatestState = true
|
2021-08-31 15:39:19 +00:00
|
|
|
}
|
|
|
|
bcSpout := newTestChainWithCustomCfg(t, spountCfg)
|
2022-03-10 14:12:04 +00:00
|
|
|
|
|
|
|
// Generate some content.
|
2024-08-30 18:41:02 +00:00
|
|
|
for range bcSpout.GetConfig().StandbyCommittee {
|
2022-03-10 14:12:04 +00:00
|
|
|
require.NoError(t, bcSpout.AddBlock(bcSpout.newBlock()))
|
|
|
|
}
|
2021-08-31 15:39:19 +00:00
|
|
|
|
|
|
|
// reach next to the latest state sync point and pretend that we've just restored
|
|
|
|
stateSyncPoint := (int(bcSpout.BlockHeight())/stateSyncInterval + 1) * stateSyncInterval
|
|
|
|
for i := bcSpout.BlockHeight() + 1; i <= uint32(stateSyncPoint); i++ {
|
|
|
|
require.NoError(t, bcSpout.AddBlock(bcSpout.newBlock()))
|
|
|
|
}
|
|
|
|
require.Equal(t, uint32(stateSyncPoint), bcSpout.BlockHeight())
|
|
|
|
b := bcSpout.newBlock()
|
|
|
|
require.NoError(t, bcSpout.AddHeaders(&b.Header))
|
|
|
|
|
|
|
|
// put storage items with STTemp prefix
|
2022-02-16 13:13:12 +00:00
|
|
|
batch := storage.NewMemCachedStore(bcSpout.dao.Store)
|
2021-09-27 13:35:25 +00:00
|
|
|
tempPrefix := storage.STTempStorage
|
2021-10-22 07:58:53 +00:00
|
|
|
if bcSpout.dao.Version.StoragePrefix == tempPrefix {
|
2021-09-27 13:35:25 +00:00
|
|
|
tempPrefix = storage.STStorage
|
|
|
|
}
|
2022-02-18 12:19:57 +00:00
|
|
|
bPrefix := make([]byte, 1)
|
|
|
|
bPrefix[0] = byte(bcSpout.dao.Version.StoragePrefix)
|
|
|
|
bcSpout.dao.Store.Seek(storage.SeekRange{Prefix: bPrefix}, func(k, v []byte) bool {
|
2024-03-04 18:09:36 +00:00
|
|
|
key := bytes.Clone(k)
|
2021-09-27 13:35:25 +00:00
|
|
|
key[0] = byte(tempPrefix)
|
2024-03-04 18:09:36 +00:00
|
|
|
value := bytes.Clone(v)
|
2022-02-16 14:48:15 +00:00
|
|
|
batch.Put(key, value)
|
2022-01-17 17:41:51 +00:00
|
|
|
return true
|
2021-08-31 15:39:19 +00:00
|
|
|
})
|
2022-02-16 13:13:12 +00:00
|
|
|
_, err := batch.Persist()
|
|
|
|
require.NoError(t, err)
|
2021-08-31 15:39:19 +00:00
|
|
|
|
2022-02-25 13:25:48 +00:00
|
|
|
checkNewBlockchainErr := func(t *testing.T, cfg func(c *config.Config), store storage.Store, errText string) {
|
2021-08-31 15:39:19 +00:00
|
|
|
unitTestNetCfg, err := config.Load("../../config", testchain.Network())
|
|
|
|
require.NoError(t, err)
|
|
|
|
cfg(&unitTestNetCfg)
|
|
|
|
log := zaptest.NewLogger(t)
|
2022-12-06 13:34:38 +00:00
|
|
|
_, err = NewBlockchain(store, unitTestNetCfg.Blockchain(), log)
|
2022-02-25 13:25:48 +00:00
|
|
|
if len(errText) != 0 {
|
2021-08-31 15:39:19 +00:00
|
|
|
require.Error(t, err)
|
2022-02-25 13:25:48 +00:00
|
|
|
require.True(t, strings.Contains(err.Error(), errText))
|
2021-08-31 15:39:19 +00:00
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
boltCfg := func(c *config.Config) {
|
|
|
|
spountCfg(c)
|
2022-12-06 13:34:38 +00:00
|
|
|
c.ApplicationConfiguration.KeepOnlyLatestState = true
|
2021-08-31 15:39:19 +00:00
|
|
|
}
|
|
|
|
// manually store statejump stage to check statejump recover process
|
2022-10-20 10:59:19 +00:00
|
|
|
bPrefix[0] = byte(storage.SYSStateChangeStage)
|
2021-08-31 15:39:19 +00:00
|
|
|
t.Run("invalid state jump stage format", func(t *testing.T) {
|
2022-02-18 12:19:57 +00:00
|
|
|
bcSpout.dao.Store.Put(bPrefix, []byte{0x01, 0x02})
|
2022-02-25 13:25:48 +00:00
|
|
|
checkNewBlockchainErr(t, boltCfg, bcSpout.dao.Store, "invalid state jump stage format")
|
2021-08-31 15:39:19 +00:00
|
|
|
})
|
|
|
|
t.Run("missing state sync point", func(t *testing.T) {
|
2022-02-18 12:19:57 +00:00
|
|
|
bcSpout.dao.Store.Put(bPrefix, []byte{byte(stateJumpStarted)})
|
2022-02-25 13:25:48 +00:00
|
|
|
checkNewBlockchainErr(t, boltCfg, bcSpout.dao.Store, "failed to get state sync point from the storage")
|
2021-08-31 15:39:19 +00:00
|
|
|
})
|
2022-10-20 10:59:19 +00:00
|
|
|
t.Run("invalid RemoveUntraceableBlocks setting", func(t *testing.T) {
|
|
|
|
bcSpout.dao.Store.Put(bPrefix, []byte{byte(stateJumpStarted)})
|
|
|
|
point := make([]byte, 4)
|
|
|
|
binary.LittleEndian.PutUint32(point, uint32(stateSyncPoint))
|
|
|
|
bcSpout.dao.Store.Put([]byte{byte(storage.SYSStateSyncPoint)}, point)
|
|
|
|
checkNewBlockchainErr(t, func(c *config.Config) {
|
|
|
|
boltCfg(c)
|
2022-12-06 13:34:38 +00:00
|
|
|
c.ApplicationConfiguration.RemoveUntraceableBlocks = false
|
2023-10-09 20:32:46 +00:00
|
|
|
}, bcSpout.dao.Store, "P2PStateExchangeExtensions can be enabled either on MPT-complete node")
|
2022-10-20 10:59:19 +00:00
|
|
|
})
|
2021-08-31 15:39:19 +00:00
|
|
|
t.Run("invalid state sync point", func(t *testing.T) {
|
2022-02-18 12:19:57 +00:00
|
|
|
bcSpout.dao.Store.Put(bPrefix, []byte{byte(stateJumpStarted)})
|
2021-08-31 15:39:19 +00:00
|
|
|
point := make([]byte, 4)
|
2022-11-18 20:06:39 +00:00
|
|
|
binary.LittleEndian.PutUint32(point, bcSpout.lastHeaderIndex()+1)
|
2022-02-18 12:19:57 +00:00
|
|
|
bcSpout.dao.Store.Put([]byte{byte(storage.SYSStateSyncPoint)}, point)
|
2022-02-25 13:25:48 +00:00
|
|
|
checkNewBlockchainErr(t, boltCfg, bcSpout.dao.Store, "invalid state sync point")
|
2021-08-31 15:39:19 +00:00
|
|
|
})
|
2022-10-20 10:59:19 +00:00
|
|
|
for _, stage := range []stateChangeStage{stateJumpStarted, newStorageItemsAdded, staleBlocksRemoved, 0x03} {
|
2021-08-31 15:39:19 +00:00
|
|
|
t.Run(fmt.Sprintf("state jump stage %d", stage), func(t *testing.T) {
|
2022-02-18 12:19:57 +00:00
|
|
|
bcSpout.dao.Store.Put(bPrefix, []byte{byte(stage)})
|
2021-08-31 15:39:19 +00:00
|
|
|
point := make([]byte, 4)
|
|
|
|
binary.LittleEndian.PutUint32(point, uint32(stateSyncPoint))
|
2022-02-18 12:19:57 +00:00
|
|
|
bcSpout.dao.Store.Put([]byte{byte(storage.SYSStateSyncPoint)}, point)
|
2022-02-25 13:25:48 +00:00
|
|
|
var errText string
|
|
|
|
if stage == 0x03 {
|
|
|
|
errText = "unknown state jump stage"
|
|
|
|
}
|
|
|
|
checkNewBlockchainErr(t, spountCfg, bcSpout.dao.Store, errText)
|
2021-08-31 15:39:19 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2021-12-09 17:23:58 +00:00
|
|
|
|
2022-01-21 02:33:06 +00:00
|
|
|
func TestChainWithVolatileNumOfValidators(t *testing.T) {
|
|
|
|
bc := newTestChainWithCustomCfg(t, func(c *config.Config) {
|
|
|
|
c.ProtocolConfiguration.ValidatorsCount = 0
|
2023-04-13 09:43:21 +00:00
|
|
|
c.ProtocolConfiguration.CommitteeHistory = map[uint32]uint32{
|
2022-01-21 02:33:06 +00:00
|
|
|
0: 1,
|
|
|
|
4: 4,
|
|
|
|
24: 6,
|
|
|
|
}
|
2023-04-13 09:43:21 +00:00
|
|
|
c.ProtocolConfiguration.ValidatorsHistory = map[uint32]uint32{
|
2022-01-21 02:33:06 +00:00
|
|
|
0: 1,
|
|
|
|
4: 4,
|
|
|
|
}
|
|
|
|
require.NoError(t, c.ProtocolConfiguration.Validate())
|
|
|
|
})
|
|
|
|
require.Equal(t, uint32(0), bc.BlockHeight())
|
|
|
|
|
|
|
|
priv0 := testchain.PrivateKeyByID(0)
|
|
|
|
|
2023-08-30 16:48:20 +00:00
|
|
|
vals := bc.ComputeNextBlockValidators()
|
2022-01-21 02:33:06 +00:00
|
|
|
script, err := smartcontract.CreateDefaultMultiSigRedeemScript(vals)
|
|
|
|
require.NoError(t, err)
|
|
|
|
curWit := transaction.Witness{
|
|
|
|
VerificationScript: script,
|
|
|
|
}
|
|
|
|
for i := 1; i < 26; i++ {
|
|
|
|
comm, err := bc.GetCommittee()
|
|
|
|
require.NoError(t, err)
|
|
|
|
if i < 5 {
|
|
|
|
require.Equal(t, 1, len(comm))
|
|
|
|
} else if i < 25 {
|
|
|
|
require.Equal(t, 4, len(comm))
|
|
|
|
} else {
|
|
|
|
require.Equal(t, 6, len(comm))
|
|
|
|
}
|
|
|
|
// Mimic consensus.
|
|
|
|
if bc.config.ShouldUpdateCommitteeAt(uint32(i)) {
|
2023-08-30 16:48:20 +00:00
|
|
|
vals = bc.ComputeNextBlockValidators()
|
2022-01-21 02:33:06 +00:00
|
|
|
} else {
|
|
|
|
vals, err = bc.GetNextBlockValidators()
|
|
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
|
|
if i < 4 {
|
|
|
|
require.Equalf(t, 1, len(vals), "at %d", i)
|
|
|
|
} else {
|
|
|
|
require.Equalf(t, 4, len(vals), "at %d", i)
|
|
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
|
|
script, err := smartcontract.CreateDefaultMultiSigRedeemScript(vals)
|
|
|
|
require.NoError(t, err)
|
|
|
|
nextWit := transaction.Witness{
|
|
|
|
VerificationScript: script,
|
|
|
|
}
|
|
|
|
b := &block.Block{
|
|
|
|
Header: block.Header{
|
|
|
|
NextConsensus: nextWit.ScriptHash(),
|
|
|
|
Script: curWit,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
curWit = nextWit
|
2022-11-18 20:19:50 +00:00
|
|
|
b.PrevHash = bc.GetHeaderHash(uint32(i) - 1)
|
2022-01-21 02:33:06 +00:00
|
|
|
b.Timestamp = uint64(time.Now().UTC().Unix())*1000 + uint64(i)
|
|
|
|
b.Index = uint32(i)
|
|
|
|
b.RebuildMerkleRoot()
|
|
|
|
if i < 5 {
|
|
|
|
signa := priv0.SignHashable(uint32(bc.config.Magic), b)
|
|
|
|
b.Script.InvocationScript = append([]byte{byte(opcode.PUSHDATA1), byte(len(signa))}, signa...)
|
|
|
|
} else {
|
|
|
|
b.Script.InvocationScript = testchain.Sign(b)
|
|
|
|
}
|
|
|
|
err = bc.AddBlock(b)
|
|
|
|
require.NoErrorf(t, err, "at %d", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-09 17:23:58 +00:00
|
|
|
func setSigner(tx *transaction.Transaction, h util.Uint160) {
|
|
|
|
tx.Signers = []transaction.Signer{{
|
|
|
|
Account: h,
|
|
|
|
Scopes: transaction.Global,
|
|
|
|
}}
|
|
|
|
}
|
2022-04-08 09:49:25 +00:00
|
|
|
|
|
|
|
// This test checks that value of BaseExecFee returned from corresponding Blockchain's method matches
|
|
|
|
// the one provided to the constructor of new interop context.
|
|
|
|
func TestBlockchain_BaseExecFeeBaseStoragePrice_Compat(t *testing.T) {
|
|
|
|
bc := newTestChain(t)
|
|
|
|
|
|
|
|
check := func(t *testing.T) {
|
2022-04-12 14:29:11 +00:00
|
|
|
ic := bc.newInteropContext(trigger.Application, bc.dao, bc.topBlock.Load().(*block.Block), nil)
|
2022-04-08 09:49:25 +00:00
|
|
|
require.Equal(t, bc.GetBaseExecFee(), ic.BaseExecFee())
|
|
|
|
require.Equal(t, bc.GetStoragePrice(), ic.BaseStorageFee())
|
|
|
|
}
|
|
|
|
t.Run("zero block", func(t *testing.T) {
|
|
|
|
check(t)
|
|
|
|
})
|
|
|
|
t.Run("non-zero block", func(t *testing.T) {
|
|
|
|
require.NoError(t, bc.AddBlock(bc.newBlock()))
|
|
|
|
check(t)
|
|
|
|
})
|
|
|
|
}
|
2022-11-10 11:31:49 +00:00
|
|
|
|
|
|
|
func TestBlockchain_IsRunning(t *testing.T) {
|
|
|
|
chain := initTestChain(t, nil, nil)
|
|
|
|
require.False(t, chain.isRunning.Load().(bool))
|
|
|
|
oldPersisted := atomic.LoadUint32(&chain.persistedHeight)
|
|
|
|
|
|
|
|
go chain.Run()
|
|
|
|
require.NoError(t, chain.AddBlock(chain.newBlock()))
|
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
persisted := atomic.LoadUint32(&chain.persistedHeight)
|
|
|
|
return persisted > oldPersisted
|
|
|
|
}, 2*persistInterval, 100*time.Millisecond)
|
|
|
|
require.True(t, chain.isRunning.Load().(bool))
|
|
|
|
|
|
|
|
chain.Close()
|
|
|
|
require.False(t, chain.isRunning.Load().(bool))
|
|
|
|
}
|
2023-08-24 16:24:34 +00:00
|
|
|
|
|
|
|
func TestNewBlockchain_InitHardforks(t *testing.T) {
|
2024-04-18 13:42:22 +00:00
|
|
|
t.Run("nil set", func(t *testing.T) {
|
2023-08-24 16:24:34 +00:00
|
|
|
bc := newTestChainWithCustomCfg(t, func(c *config.Config) {
|
2024-04-18 13:42:22 +00:00
|
|
|
c.ProtocolConfiguration.Hardforks = nil
|
2023-08-24 16:24:34 +00:00
|
|
|
require.NoError(t, c.ProtocolConfiguration.Validate())
|
|
|
|
})
|
|
|
|
require.Equal(t, map[string]uint32{
|
|
|
|
config.HFAspidochelone.String(): 0,
|
|
|
|
config.HFBasilisk.String(): 0,
|
2024-04-08 16:19:34 +00:00
|
|
|
config.HFCockatrice.String(): 0,
|
2024-06-04 10:25:21 +00:00
|
|
|
config.HFDomovoi.String(): 0,
|
2024-06-04 10:25:21 +00:00
|
|
|
config.HFEchidna.String(): 0,
|
2023-08-24 16:24:34 +00:00
|
|
|
}, bc.GetConfig().Hardforks)
|
|
|
|
})
|
2024-04-18 13:42:22 +00:00
|
|
|
t.Run("empty set", func(t *testing.T) {
|
|
|
|
bc := newTestChainWithCustomCfg(t, func(c *config.Config) {
|
|
|
|
c.ProtocolConfiguration.Hardforks = map[string]uint32{}
|
|
|
|
require.NoError(t, c.ProtocolConfiguration.Validate())
|
|
|
|
})
|
|
|
|
require.Equal(t, map[string]uint32{}, bc.GetConfig().Hardforks)
|
|
|
|
})
|
2023-08-24 16:24:34 +00:00
|
|
|
t.Run("missing old", func(t *testing.T) {
|
|
|
|
bc := newTestChainWithCustomCfg(t, func(c *config.Config) {
|
|
|
|
c.ProtocolConfiguration.Hardforks = map[string]uint32{config.HFBasilisk.String(): 5}
|
|
|
|
require.NoError(t, c.ProtocolConfiguration.Validate())
|
|
|
|
})
|
|
|
|
require.Equal(t, map[string]uint32{
|
|
|
|
config.HFAspidochelone.String(): 0,
|
|
|
|
config.HFBasilisk.String(): 5,
|
|
|
|
}, bc.GetConfig().Hardforks)
|
|
|
|
})
|
|
|
|
t.Run("missing new", func(t *testing.T) {
|
|
|
|
bc := newTestChainWithCustomCfg(t, func(c *config.Config) {
|
|
|
|
c.ProtocolConfiguration.Hardforks = map[string]uint32{config.HFAspidochelone.String(): 5}
|
|
|
|
require.NoError(t, c.ProtocolConfiguration.Validate())
|
|
|
|
})
|
|
|
|
require.Equal(t, map[string]uint32{
|
|
|
|
config.HFAspidochelone.String(): 5,
|
|
|
|
}, bc.GetConfig().Hardforks)
|
|
|
|
})
|
|
|
|
t.Run("all present", func(t *testing.T) {
|
|
|
|
bc := newTestChainWithCustomCfg(t, func(c *config.Config) {
|
2024-06-04 10:25:21 +00:00
|
|
|
c.ProtocolConfiguration.Hardforks = map[string]uint32{config.HFAspidochelone.String(): 5, config.HFBasilisk.String(): 10, config.HFCockatrice.String(): 15, config.HFDomovoi.String(): 20, config.HFEchidna.String(): 25}
|
2023-08-24 16:24:34 +00:00
|
|
|
require.NoError(t, c.ProtocolConfiguration.Validate())
|
|
|
|
})
|
|
|
|
require.Equal(t, map[string]uint32{
|
|
|
|
config.HFAspidochelone.String(): 5,
|
|
|
|
config.HFBasilisk.String(): 10,
|
2024-04-08 16:19:34 +00:00
|
|
|
config.HFCockatrice.String(): 15,
|
2024-06-04 10:25:21 +00:00
|
|
|
config.HFDomovoi.String(): 20,
|
2024-06-04 10:25:21 +00:00
|
|
|
config.HFEchidna.String(): 25,
|
2023-08-24 16:24:34 +00:00
|
|
|
}, bc.GetConfig().Hardforks)
|
|
|
|
})
|
|
|
|
}
|
2024-11-22 08:33:05 +00:00
|
|
|
|
|
|
|
type nopCloserStorage struct {
|
|
|
|
storage.Store
|
|
|
|
}
|
|
|
|
|
|
|
|
func (nopCloserStorage) Close() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestBlockchainRestoreStateRootInHeader(t *testing.T) {
|
|
|
|
bc := newTestChainWithCustomCfg(t, func(c *config.Config) {
|
|
|
|
c.ProtocolConfiguration.StateRootInHeader = true
|
|
|
|
})
|
|
|
|
require.NoError(t, bc.AddBlock(bc.newBlock()))
|
|
|
|
require.NoError(t, bc.AddBlock(bc.newBlock()))
|
|
|
|
require.NoError(t, bc.AddBlock(bc.newBlock()))
|
|
|
|
|
|
|
|
w := io.NewBufBinWriter()
|
|
|
|
require.NoError(t, chaindump.Dump(bc, w.BinWriter, 0, 4))
|
|
|
|
require.NoError(t, w.Err)
|
|
|
|
|
|
|
|
data := w.Bytes()
|
|
|
|
fcfg := func(c *config.Config) { c.ProtocolConfiguration.StateRootInHeader = true }
|
|
|
|
initChain := func(st storage.Store) *Blockchain {
|
|
|
|
chain := initTestChain(t, st, fcfg)
|
|
|
|
go chain.Run()
|
|
|
|
return chain
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("empty MemoryStore", func(t *testing.T) {
|
|
|
|
bc := initChain(storage.NewMemoryStore())
|
|
|
|
r := io.NewBinReaderFromBuf(data)
|
|
|
|
require.NoError(t, chaindump.Restore(bc, r, 0, 4, nil))
|
|
|
|
bc.Close()
|
|
|
|
})
|
|
|
|
t.Run("not empty MemoryStore, one block", func(t *testing.T) {
|
|
|
|
st := nopCloserStorage{Store: storage.NewMemoryStore()}
|
|
|
|
|
|
|
|
bc := initChain(st)
|
|
|
|
r := io.NewBinReaderFromBuf(data)
|
|
|
|
require.NoError(t, chaindump.Restore(bc, r, 0, 1, nil))
|
|
|
|
expected := bc.stateRoot.CurrentLocalStateRoot()
|
|
|
|
bc.Close()
|
|
|
|
|
|
|
|
bc = initChain(st)
|
|
|
|
actual := bc.stateRoot.CurrentLocalStateRoot()
|
|
|
|
require.Equal(t, expected, actual)
|
|
|
|
r = io.NewBinReaderFromBuf(data)
|
|
|
|
require.NoError(t, chaindump.Restore(bc, r, 1, 1, nil))
|
|
|
|
bc.Close()
|
|
|
|
})
|
|
|
|
t.Run("not empty MemoryStore, multiple blocks", func(t *testing.T) {
|
|
|
|
st := nopCloserStorage{Store: storage.NewMemoryStore()}
|
|
|
|
{
|
|
|
|
bc := initChain(st)
|
|
|
|
r := io.NewBinReaderFromBuf(data)
|
|
|
|
require.NoError(t, chaindump.Restore(bc, r, 0, 2, nil))
|
|
|
|
expected := bc.stateRoot.CurrentLocalStateRoot()
|
|
|
|
bc.Close()
|
|
|
|
|
|
|
|
bc = initChain(st)
|
|
|
|
actual := bc.stateRoot.CurrentLocalStateRoot()
|
|
|
|
require.Equal(t, expected, actual)
|
|
|
|
r = io.NewBinReaderFromBuf(data)
|
|
|
|
require.NoError(t, chaindump.Restore(bc, r, 2, 1, nil))
|
|
|
|
bc.Close()
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|