forked from TrueCloudLab/neoneo-go
parent
3b45325319
commit
4a74c117ee
12 changed files with 752 additions and 815 deletions
275
pkg/core/basic_chain_test.go
Normal file
275
pkg/core/basic_chain_test.go
Normal file
|
@ -0,0 +1,275 @@
|
||||||
|
package core_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/core/chaindump"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/core/native"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/interop/native/roles"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/neotest"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/neotest/chain"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpc/client/nns"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// examplesPrefix is a prefix of the example smart-contracts.
|
||||||
|
examplesPrefix = "../../examples/"
|
||||||
|
// basicChainPrefix is a prefix used to store Basic chain .acc file for tests.
|
||||||
|
// It is also used to retrieve smart contracts that should be deployed to
|
||||||
|
// Basic chain.
|
||||||
|
basicChainPrefix = "../rpc/server/testdata/"
|
||||||
|
)
|
||||||
|
|
||||||
|
var notaryModulePath = filepath.Join("..", "services", "notary")
|
||||||
|
|
||||||
|
// TestCreateBasicChain generates "../rpc/testdata/testblocks.acc" file which
|
||||||
|
// contains data for RPC unit tests. It also is a nice integration test.
|
||||||
|
// To generate new "../rpc/testdata/testblocks.acc", follow the steps:
|
||||||
|
// 1. Set saveChain down below to true
|
||||||
|
// 2. Run tests with `$ make test`
|
||||||
|
func TestCreateBasicChain(t *testing.T) {
|
||||||
|
const saveChain = false
|
||||||
|
|
||||||
|
bc, validators, committee := chain.NewMultiWithCustomConfig(t, func(cfg *config.ProtocolConfiguration) {
|
||||||
|
cfg.P2PSigExtensions = true
|
||||||
|
})
|
||||||
|
e := neotest.NewExecutor(t, bc, validators, committee)
|
||||||
|
|
||||||
|
initBasicChain(t, e)
|
||||||
|
|
||||||
|
if saveChain {
|
||||||
|
outStream, err := os.Create(basicChainPrefix + "testblocks.acc")
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
outStream.Close()
|
||||||
|
})
|
||||||
|
|
||||||
|
writer := io.NewBinWriterFromIO(outStream)
|
||||||
|
writer.WriteU32LE(bc.BlockHeight())
|
||||||
|
err = chaindump.Dump(bc, writer, 1, bc.BlockHeight())
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.False(t, saveChain)
|
||||||
|
}
|
||||||
|
|
||||||
|
func initBasicChain(t *testing.T, e *neotest.Executor) {
|
||||||
|
if !e.Chain.GetConfig().P2PSigExtensions {
|
||||||
|
t.Fatal("P2PSitExtensions should be enabled to init basic chain")
|
||||||
|
}
|
||||||
|
|
||||||
|
const neoAmount = 99999000
|
||||||
|
|
||||||
|
gasHash := e.NativeHash(t, nativenames.Gas)
|
||||||
|
neoHash := e.NativeHash(t, nativenames.Neo)
|
||||||
|
policyHash := e.NativeHash(t, nativenames.Policy)
|
||||||
|
notaryHash := e.NativeHash(t, nativenames.Notary)
|
||||||
|
designationHash := e.NativeHash(t, nativenames.Designation)
|
||||||
|
t.Logf("native GAS hash: %v", gasHash)
|
||||||
|
t.Logf("native NEO hash: %v", neoHash)
|
||||||
|
t.Logf("native Policy hash: %v", policyHash)
|
||||||
|
t.Logf("native Notary hash: %v", notaryHash)
|
||||||
|
t.Logf("Block0 hash: %s", e.Chain.GetHeaderHash(0).StringLE())
|
||||||
|
|
||||||
|
acc0 := e.Validator.(neotest.MultiSigner).Single(2) // priv0 index->order and order->index conversion
|
||||||
|
priv0ScriptHash := acc0.ScriptHash()
|
||||||
|
acc1 := e.Validator.(neotest.MultiSigner).Single(0) // priv1 index->order and order->index conversion
|
||||||
|
priv1ScriptHash := acc1.ScriptHash()
|
||||||
|
neoValidatorInvoker := e.ValidatorInvoker(neoHash)
|
||||||
|
gasValidatorInvoker := e.ValidatorInvoker(gasHash)
|
||||||
|
neoPriv0Invoker := e.NewInvoker(neoHash, acc0)
|
||||||
|
gasPriv0Invoker := e.NewInvoker(gasHash, acc0)
|
||||||
|
designateSuperInvoker := e.NewInvoker(designationHash, e.Validator, e.Committee)
|
||||||
|
|
||||||
|
deployContractFromPriv0 := func(t *testing.T, path, contractName string, configPath string, expectedID int32) (util.Uint256, util.Uint256, util.Uint160) {
|
||||||
|
txDeployHash, cH := newDeployTx(t, e, acc0, path, configPath, true)
|
||||||
|
b := e.TopBlock(t)
|
||||||
|
return b.Hash(), txDeployHash, cH
|
||||||
|
}
|
||||||
|
|
||||||
|
e.CheckGASBalance(t, priv0ScriptHash, big.NewInt(5000_0000)) // gas bounty
|
||||||
|
|
||||||
|
// Block #1: move 1000 GAS and neoAmount NEO to priv0.
|
||||||
|
txMoveNeo := neoValidatorInvoker.PrepareInvoke(t, "transfer", e.Validator.ScriptHash(), priv0ScriptHash, neoAmount, nil)
|
||||||
|
txMoveGas := gasValidatorInvoker.PrepareInvoke(t, "transfer", e.Validator.ScriptHash(), priv0ScriptHash, int64(fixedn.Fixed8FromInt64(1000)), nil)
|
||||||
|
b := e.AddNewBlock(t, txMoveNeo, txMoveGas)
|
||||||
|
e.CheckHalt(t, txMoveNeo.Hash(), stackitem.Make(true))
|
||||||
|
e.CheckHalt(t, txMoveGas.Hash(), stackitem.Make(true))
|
||||||
|
t.Logf("Block1 hash: %s", b.Hash().StringLE())
|
||||||
|
bw := io.NewBufBinWriter()
|
||||||
|
b.EncodeBinary(bw.BinWriter)
|
||||||
|
require.NoError(t, bw.Err)
|
||||||
|
jsonB, err := b.MarshalJSON()
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Logf("Block1 base64: %s", base64.StdEncoding.EncodeToString(bw.Bytes()))
|
||||||
|
t.Logf("Block1 JSON: %s", string(jsonB))
|
||||||
|
bw.Reset()
|
||||||
|
b.Header.EncodeBinary(bw.BinWriter)
|
||||||
|
require.NoError(t, bw.Err)
|
||||||
|
jsonH, err := b.Header.MarshalJSON()
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Logf("Header1 base64: %s", base64.StdEncoding.EncodeToString(bw.Bytes()))
|
||||||
|
t.Logf("Header1 JSON: %s", string(jsonH))
|
||||||
|
jsonTxMoveNeo, err := txMoveNeo.MarshalJSON()
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Logf("txMoveNeo hash: %s", txMoveNeo.Hash().StringLE())
|
||||||
|
t.Logf("txMoveNeo JSON: %s", string(jsonTxMoveNeo))
|
||||||
|
t.Logf("txMoveNeo base64: %s", base64.StdEncoding.EncodeToString(txMoveNeo.Bytes()))
|
||||||
|
t.Logf("txMoveGas hash: %s", txMoveGas.Hash().StringLE())
|
||||||
|
|
||||||
|
e.EnsureGASBalance(t, priv0ScriptHash, func(balance *big.Int) bool { return balance.Cmp(big.NewInt(1000*native.GASFactor)) >= 0 })
|
||||||
|
// info for getblockheader rpc tests
|
||||||
|
t.Logf("header hash: %s", b.Hash().StringLE())
|
||||||
|
buf := io.NewBufBinWriter()
|
||||||
|
b.Header.EncodeBinary(buf.BinWriter)
|
||||||
|
t.Logf("header: %s", hex.EncodeToString(buf.Bytes()))
|
||||||
|
|
||||||
|
// Block #2: deploy test_contract (Rubles contract).
|
||||||
|
cfgPath := basicChainPrefix + "test_contract.yml"
|
||||||
|
block2H, txDeployH, cHash := deployContractFromPriv0(t, basicChainPrefix+"test_contract.go", "Rubl", cfgPath, 1)
|
||||||
|
t.Logf("txDeploy: %s", txDeployH.StringLE())
|
||||||
|
t.Logf("Block2 hash: %s", block2H.StringLE())
|
||||||
|
|
||||||
|
// Block #3: invoke `putValue` method on the test_contract.
|
||||||
|
rublPriv0Invoker := e.NewInvoker(cHash, acc0)
|
||||||
|
txInvH := rublPriv0Invoker.Invoke(t, true, "putValue", "testkey", "testvalue")
|
||||||
|
t.Logf("txInv: %s", txInvH.StringLE())
|
||||||
|
|
||||||
|
// Block #4: transfer 1000 NEO from priv0 to priv1.
|
||||||
|
neoPriv0Invoker.Invoke(t, true, "transfer", priv0ScriptHash, priv1ScriptHash, 1000, nil)
|
||||||
|
|
||||||
|
// Block #5: initialize rubles contract and transfer 1000 rubles from the contract to priv0.
|
||||||
|
initTx := rublPriv0Invoker.PrepareInvoke(t, "init")
|
||||||
|
transferTx := e.NewUnsignedTx(t, rublPriv0Invoker.Hash, "transfer", cHash, priv0ScriptHash, 1000, nil)
|
||||||
|
e.SignTx(t, transferTx, 1500_0000, acc0) // Set system fee manually to avoid verification failure.
|
||||||
|
e.AddNewBlock(t, initTx, transferTx)
|
||||||
|
e.CheckHalt(t, initTx.Hash(), stackitem.NewBool(true))
|
||||||
|
e.CheckHalt(t, transferTx.Hash(), stackitem.Make(true))
|
||||||
|
t.Logf("receiveRublesTx: %v", transferTx.Hash().StringLE())
|
||||||
|
|
||||||
|
// Block #6: transfer 123 rubles from priv0 to priv1
|
||||||
|
transferTxH := rublPriv0Invoker.Invoke(t, true, "transfer", priv0ScriptHash, priv1ScriptHash, 123, nil)
|
||||||
|
t.Logf("sendRublesTx: %v", transferTxH.StringLE())
|
||||||
|
|
||||||
|
// Block #7: push verification contract into the chain.
|
||||||
|
verifyPath := filepath.Join(basicChainPrefix, "verify", "verification_contract.go")
|
||||||
|
verifyCfg := filepath.Join(basicChainPrefix, "verify", "verification_contract.yml")
|
||||||
|
_, _, _ = deployContractFromPriv0(t, verifyPath, "Verify", verifyCfg, 2)
|
||||||
|
|
||||||
|
// Block #8: deposit some GAS to notary contract for priv0.
|
||||||
|
transferTxH = gasPriv0Invoker.Invoke(t, true, "transfer", priv0ScriptHash, notaryHash, 10_0000_0000, []interface{}{priv0ScriptHash, int64(e.Chain.BlockHeight() + 1000)})
|
||||||
|
t.Logf("notaryDepositTxPriv0: %v", transferTxH.StringLE())
|
||||||
|
|
||||||
|
// Block #9: designate new Notary node.
|
||||||
|
ntr, err := wallet.NewWalletFromFile(path.Join(notaryModulePath, "./testdata/notary1.json"))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, ntr.Accounts[0].Decrypt("one", ntr.Scrypt))
|
||||||
|
designateSuperInvoker.Invoke(t, stackitem.Null{}, "designateAsRole",
|
||||||
|
int64(roles.P2PNotary), []interface{}{ntr.Accounts[0].PrivateKey().PublicKey().Bytes()})
|
||||||
|
t.Logf("Designated Notary node: %s", hex.EncodeToString(ntr.Accounts[0].PrivateKey().PublicKey().Bytes()))
|
||||||
|
|
||||||
|
// Block #10: push verification contract with arguments into the chain.
|
||||||
|
verifyPath = filepath.Join(basicChainPrefix, "verify_args", "verification_with_args_contract.go")
|
||||||
|
verifyCfg = filepath.Join(basicChainPrefix, "verify_args", "verification_with_args_contract.yml")
|
||||||
|
_, _, _ = deployContractFromPriv0(t, verifyPath, "VerifyWithArgs", verifyCfg, 3) // block #10
|
||||||
|
|
||||||
|
// Block #11: push NameService contract into the chain.
|
||||||
|
nsPath := examplesPrefix + "nft-nd-nns/"
|
||||||
|
nsConfigPath := nsPath + "nns.yml"
|
||||||
|
_, _, nsHash := deployContractFromPriv0(t, nsPath, nsPath, nsConfigPath, 4) // block #11
|
||||||
|
nsCommitteeInvoker := e.CommitteeInvoker(nsHash)
|
||||||
|
nsPriv0Invoker := e.NewInvoker(nsHash, acc0)
|
||||||
|
|
||||||
|
// Block #12: transfer funds to committee for further NS record registration.
|
||||||
|
gasValidatorInvoker.Invoke(t, true, "transfer",
|
||||||
|
e.Validator.ScriptHash(), e.Committee.ScriptHash(), 1000_00000000, nil) // block #12
|
||||||
|
|
||||||
|
// Block #13: add `.com` root to NNS.
|
||||||
|
nsCommitteeInvoker.Invoke(t, stackitem.Null{}, "addRoot", "com") // block #13
|
||||||
|
|
||||||
|
// Block #14: register `neo.com` via NNS.
|
||||||
|
registerTxH := nsPriv0Invoker.Invoke(t, true, "register",
|
||||||
|
"neo.com", priv0ScriptHash) // block #14
|
||||||
|
res := e.GetTxExecResult(t, registerTxH)
|
||||||
|
require.Equal(t, 1, len(res.Events)) // transfer
|
||||||
|
tokenID, err := res.Events[0].Item.Value().([]stackitem.Item)[3].TryBytes()
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Logf("NNS token #1 ID (hex): %s", hex.EncodeToString(tokenID))
|
||||||
|
|
||||||
|
// Block #15: set A record type with priv0 owner via NNS.
|
||||||
|
nsPriv0Invoker.Invoke(t, stackitem.Null{}, "setRecord", "neo.com", int64(nns.A), "1.2.3.4") // block #15
|
||||||
|
|
||||||
|
// Block #16: invoke `test_contract.go`: put new value with the same key to check `getstate` RPC call
|
||||||
|
txPutNewValue := rublPriv0Invoker.PrepareInvoke(t, "putValue", "testkey", "newtestvalue")
|
||||||
|
// Invoke `test_contract.go`: put values to check `findstates` RPC call.
|
||||||
|
txPut1 := rublPriv0Invoker.PrepareInvoke(t, "putValue", "aa", "v1")
|
||||||
|
txPut2 := rublPriv0Invoker.PrepareInvoke(t, "putValue", "aa10", "v2")
|
||||||
|
txPut3 := rublPriv0Invoker.PrepareInvoke(t, "putValue", "aa50", "v3")
|
||||||
|
e.AddNewBlock(t, txPutNewValue, txPut1, txPut2, txPut3) // block #16
|
||||||
|
e.CheckHalt(t, txPutNewValue.Hash(), stackitem.NewBool(true))
|
||||||
|
e.CheckHalt(t, txPut1.Hash(), stackitem.NewBool(true))
|
||||||
|
e.CheckHalt(t, txPut2.Hash(), stackitem.NewBool(true))
|
||||||
|
e.CheckHalt(t, txPut3.Hash(), stackitem.NewBool(true))
|
||||||
|
|
||||||
|
// Block #17: deploy NeoFS Object contract (NEP11-Divisible).
|
||||||
|
nfsPath := examplesPrefix + "nft-d/"
|
||||||
|
nfsConfigPath := nfsPath + "nft.yml"
|
||||||
|
_, _, nfsHash := deployContractFromPriv0(t, nfsPath, nfsPath, nfsConfigPath, 5) // block #17
|
||||||
|
nfsPriv0Invoker := e.NewInvoker(nfsHash, acc0)
|
||||||
|
nfsPriv1Invoker := e.NewInvoker(nfsHash, acc1)
|
||||||
|
|
||||||
|
// Block #18: mint 1.00 NFSO token by transferring 10 GAS to NFSO contract.
|
||||||
|
containerID := util.Uint256{1, 2, 3}
|
||||||
|
objectID := util.Uint256{4, 5, 6}
|
||||||
|
txGas0toNFSH := gasPriv0Invoker.Invoke(t, true, "transfer",
|
||||||
|
priv0ScriptHash, nfsHash, 10_0000_0000, []interface{}{containerID.BytesBE(), objectID.BytesBE()}) // block #18
|
||||||
|
res = e.GetTxExecResult(t, txGas0toNFSH)
|
||||||
|
require.Equal(t, 2, len(res.Events)) // GAS transfer + NFSO transfer
|
||||||
|
tokenID, err = res.Events[1].Item.Value().([]stackitem.Item)[3].TryBytes()
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Logf("NFSO token #1 ID (hex): %s", hex.EncodeToString(tokenID))
|
||||||
|
|
||||||
|
// Block #19: transfer 0.25 NFSO from priv0 to priv1.
|
||||||
|
nfsPriv0Invoker.Invoke(t, true, "transfer", priv0ScriptHash, priv1ScriptHash, 25, tokenID, nil) // block #19
|
||||||
|
|
||||||
|
// Block #20: transfer 1000 GAS to priv1.
|
||||||
|
gasValidatorInvoker.Invoke(t, true, "transfer", e.Validator.ScriptHash(),
|
||||||
|
priv1ScriptHash, int64(fixedn.Fixed8FromInt64(1000)), nil) // block #20
|
||||||
|
|
||||||
|
// Block #21: transfer 0.05 NFSO from priv1 back to priv0.
|
||||||
|
nfsPriv1Invoker.Invoke(t, true, "transfer", priv1ScriptHash, priv0ScriptHash, 5, tokenID, nil) // block #21
|
||||||
|
|
||||||
|
// Compile contract to test `invokescript` RPC call
|
||||||
|
invokePath := filepath.Join(basicChainPrefix, "invoke", "invokescript_contract.go")
|
||||||
|
invokeCfg := filepath.Join(basicChainPrefix, "invoke", "invoke.yml")
|
||||||
|
_, _ = newDeployTx(t, e, acc0, invokePath, invokeCfg, false)
|
||||||
|
|
||||||
|
// Prepare some transaction for future submission.
|
||||||
|
txSendRaw := neoPriv0Invoker.PrepareInvoke(t, "transfer", priv0ScriptHash, priv1ScriptHash, int64(fixedn.Fixed8FromInt64(1000)), nil)
|
||||||
|
bw.Reset()
|
||||||
|
txSendRaw.EncodeBinary(bw.BinWriter)
|
||||||
|
t.Logf("sendrawtransaction: \n\tbase64: %s\n\tHash LE: %s", base64.StdEncoding.EncodeToString(bw.Bytes()), txSendRaw.Hash().StringLE())
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDeployTx(t *testing.T, e *neotest.Executor, sender neotest.Signer, sourcePath, configPath string, deploy bool) (util.Uint256, util.Uint160) {
|
||||||
|
c := neotest.CompileFile(t, sender.ScriptHash(), sourcePath, configPath)
|
||||||
|
t.Logf("contract (%s): \n\tHash: %s\n\tAVM: %s", sourcePath, c.Hash.StringLE(), base64.StdEncoding.EncodeToString(c.NEF.Script))
|
||||||
|
if deploy {
|
||||||
|
return e.DeployContractBy(t, sender, c, nil), c.Hash
|
||||||
|
}
|
||||||
|
return util.Uint256{}, c.Hash
|
||||||
|
}
|
|
@ -16,13 +16,10 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/config"
|
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/config/netmode"
|
"github.com/nspcc-dev/neo-go/pkg/config/netmode"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/chaindump"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/dao"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/fee"
|
"github.com/nspcc-dev/neo-go/pkg/core/fee"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/interop/interopnames"
|
"github.com/nspcc-dev/neo-go/pkg/core/interop/interopnames"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/mempool"
|
"github.com/nspcc-dev/neo-go/pkg/core/mempool"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/native"
|
"github.com/nspcc-dev/neo-go/pkg/core/native"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/native/nativeprices"
|
"github.com/nspcc-dev/neo-go/pkg/core/native/nativeprices"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
|
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||||
|
@ -1523,77 +1520,6 @@ func TestSubscriptions(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testDumpAndRestore(t *testing.T, dumpF, restoreF func(c *config.Config)) {
|
|
||||||
if restoreF == nil {
|
|
||||||
restoreF = dumpF
|
|
||||||
}
|
|
||||||
|
|
||||||
bc := newTestChainWithCustomCfg(t, dumpF)
|
|
||||||
|
|
||||||
initBasicChain(t, bc)
|
|
||||||
require.True(t, bc.BlockHeight() > 5) // ensure that test is valid
|
|
||||||
|
|
||||||
w := io.NewBufBinWriter()
|
|
||||||
require.NoError(t, chaindump.Dump(bc, w.BinWriter, 0, bc.BlockHeight()+1))
|
|
||||||
require.NoError(t, w.Err)
|
|
||||||
|
|
||||||
buf := w.Bytes()
|
|
||||||
t.Run("invalid start", func(t *testing.T) {
|
|
||||||
bc2 := newTestChainWithCustomCfg(t, restoreF)
|
|
||||||
|
|
||||||
r := io.NewBinReaderFromBuf(buf)
|
|
||||||
require.Error(t, chaindump.Restore(bc2, r, 2, 1, nil))
|
|
||||||
})
|
|
||||||
t.Run("good", func(t *testing.T) {
|
|
||||||
bc2 := newTestChainWithCustomCfg(t, restoreF)
|
|
||||||
|
|
||||||
r := io.NewBinReaderFromBuf(buf)
|
|
||||||
require.NoError(t, chaindump.Restore(bc2, r, 0, 2, nil))
|
|
||||||
require.Equal(t, uint32(1), bc2.BlockHeight())
|
|
||||||
|
|
||||||
r = io.NewBinReaderFromBuf(buf) // new reader because start is relative to dump
|
|
||||||
require.NoError(t, chaindump.Restore(bc2, r, 2, 1, nil))
|
|
||||||
t.Run("check handler", func(t *testing.T) {
|
|
||||||
lastIndex := uint32(0)
|
|
||||||
errStopped := errors.New("stopped")
|
|
||||||
f := func(b *block.Block) error {
|
|
||||||
lastIndex = b.Index
|
|
||||||
if b.Index >= bc.BlockHeight()-1 {
|
|
||||||
return errStopped
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
require.NoError(t, chaindump.Restore(bc2, r, 0, 1, f))
|
|
||||||
require.Equal(t, bc2.BlockHeight(), lastIndex)
|
|
||||||
|
|
||||||
r = io.NewBinReaderFromBuf(buf)
|
|
||||||
err := chaindump.Restore(bc2, r, 4, bc.BlockHeight()-bc2.BlockHeight(), f)
|
|
||||||
require.True(t, errors.Is(err, errStopped))
|
|
||||||
require.Equal(t, bc.BlockHeight()-1, lastIndex)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDumpAndRestore(t *testing.T) {
|
|
||||||
t.Run("no state root", func(t *testing.T) {
|
|
||||||
testDumpAndRestore(t, func(c *config.Config) {
|
|
||||||
c.ProtocolConfiguration.StateRootInHeader = false
|
|
||||||
}, nil)
|
|
||||||
})
|
|
||||||
t.Run("with state root", func(t *testing.T) {
|
|
||||||
testDumpAndRestore(t, func(c *config.Config) {
|
|
||||||
c.ProtocolConfiguration.StateRootInHeader = true
|
|
||||||
}, nil)
|
|
||||||
})
|
|
||||||
t.Run("remove untraceable", func(t *testing.T) {
|
|
||||||
// Dump can only be created if all blocks and transactions are present.
|
|
||||||
testDumpAndRestore(t, nil, func(c *config.Config) {
|
|
||||||
c.ProtocolConfiguration.MaxTraceableBlocks = 2
|
|
||||||
c.ProtocolConfiguration.RemoveUntraceableBlocks = true
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRemoveOldTransfers(t *testing.T) {
|
func TestRemoveOldTransfers(t *testing.T) {
|
||||||
// Creating proper number of transfers/blocks takes unneccessary time, so emulate
|
// Creating proper number of transfers/blocks takes unneccessary time, so emulate
|
||||||
// some DB with stale entries.
|
// some DB with stale entries.
|
||||||
|
@ -1841,7 +1767,11 @@ func TestBlockchain_InitWithIncompleteStateJump(t *testing.T) {
|
||||||
c.ProtocolConfiguration.KeepOnlyLatestState = true
|
c.ProtocolConfiguration.KeepOnlyLatestState = true
|
||||||
}
|
}
|
||||||
bcSpout := newTestChainWithCustomCfg(t, spountCfg)
|
bcSpout := newTestChainWithCustomCfg(t, spountCfg)
|
||||||
initBasicChain(t, bcSpout)
|
|
||||||
|
// Generate some content.
|
||||||
|
for i := 0; i < len(bcSpout.GetConfig().StandbyCommittee); i++ {
|
||||||
|
require.NoError(t, bcSpout.AddBlock(bcSpout.newBlock()))
|
||||||
|
}
|
||||||
|
|
||||||
// reach next to the latest state sync point and pretend that we've just restored
|
// reach next to the latest state sync point and pretend that we've just restored
|
||||||
stateSyncPoint := (int(bcSpout.BlockHeight())/stateSyncInterval + 1) * stateSyncInterval
|
stateSyncPoint := (int(bcSpout.BlockHeight())/stateSyncInterval + 1) * stateSyncInterval
|
||||||
|
@ -2007,227 +1937,3 @@ func setSigner(tx *transaction.Transaction, h util.Uint160) {
|
||||||
Scopes: transaction.Global,
|
Scopes: transaction.Global,
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockchain_StartFromExistingDB(t *testing.T) {
|
|
||||||
ps, path := newLevelDBForTestingWithPath(t, "")
|
|
||||||
customConfig := func(c *config.Config) {
|
|
||||||
c.ProtocolConfiguration.StateRootInHeader = true // Need for P2PStateExchangeExtensions check.
|
|
||||||
}
|
|
||||||
bc := initTestChain(t, ps, customConfig)
|
|
||||||
go bc.Run()
|
|
||||||
initBasicChain(t, bc)
|
|
||||||
require.True(t, bc.BlockHeight() > 5, "ensure that basic chain is correctly initialised")
|
|
||||||
|
|
||||||
// Information for further tests.
|
|
||||||
h := bc.BlockHeight()
|
|
||||||
cryptoLibHash, err := bc.GetNativeContractScriptHash(nativenames.CryptoLib)
|
|
||||||
require.NoError(t, err)
|
|
||||||
cryptoLibState := bc.GetContractState(cryptoLibHash)
|
|
||||||
require.NotNil(t, cryptoLibState)
|
|
||||||
var (
|
|
||||||
managementID = -1
|
|
||||||
managementContractPrefix = 8
|
|
||||||
)
|
|
||||||
|
|
||||||
bc.Close() // Ensure persist is done and persistent store is properly closed.
|
|
||||||
|
|
||||||
newPS := func(t *testing.T) storage.Store {
|
|
||||||
ps, _ = newLevelDBForTestingWithPath(t, path)
|
|
||||||
t.Cleanup(func() { require.NoError(t, ps.Close()) })
|
|
||||||
return ps
|
|
||||||
}
|
|
||||||
t.Run("mismatch storage version", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
|
||||||
d := dao.NewSimple(cache, bc.config.StateRootInHeader, bc.config.P2PStateExchangeExtensions)
|
|
||||||
d.PutVersion(dao.Version{
|
|
||||||
Value: "0.0.0",
|
|
||||||
})
|
|
||||||
_, err := d.Persist() // Persist to `cache` wrapper.
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = initTestChainNoCheck(t, cache, customConfig)
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), "storage version mismatch"))
|
|
||||||
})
|
|
||||||
t.Run("mismatch StateRootInHeader", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
_, err := initTestChainNoCheck(t, ps, func(c *config.Config) {
|
|
||||||
customConfig(c)
|
|
||||||
c.ProtocolConfiguration.StateRootInHeader = false
|
|
||||||
})
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), "StateRootInHeader setting mismatch"))
|
|
||||||
})
|
|
||||||
t.Run("mismatch P2PSigExtensions", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
_, err := initTestChainNoCheck(t, ps, func(c *config.Config) {
|
|
||||||
customConfig(c)
|
|
||||||
c.ProtocolConfiguration.P2PSigExtensions = false
|
|
||||||
})
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), "P2PSigExtensions setting mismatch"))
|
|
||||||
})
|
|
||||||
t.Run("mismatch P2PStateExchangeExtensions", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
_, err := initTestChainNoCheck(t, ps, func(c *config.Config) {
|
|
||||||
customConfig(c)
|
|
||||||
c.ProtocolConfiguration.StateRootInHeader = true
|
|
||||||
c.ProtocolConfiguration.P2PStateExchangeExtensions = true
|
|
||||||
})
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), "P2PStateExchangeExtensions setting mismatch"))
|
|
||||||
})
|
|
||||||
t.Run("mismatch KeepOnlyLatestState", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
_, err := initTestChainNoCheck(t, ps, func(c *config.Config) {
|
|
||||||
customConfig(c)
|
|
||||||
c.ProtocolConfiguration.KeepOnlyLatestState = true
|
|
||||||
})
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), "KeepOnlyLatestState setting mismatch"))
|
|
||||||
})
|
|
||||||
t.Run("corrupted headers", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
|
|
||||||
// Corrupt headers hashes batch.
|
|
||||||
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
|
||||||
key := make([]byte, 5)
|
|
||||||
key[0] = byte(storage.IXHeaderHashList)
|
|
||||||
binary.BigEndian.PutUint32(key[1:], 1)
|
|
||||||
cache.Put(key, []byte{1, 2, 3})
|
|
||||||
|
|
||||||
_, err := initTestChainNoCheck(t, cache, customConfig)
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), "failed to read batch of 2000"))
|
|
||||||
})
|
|
||||||
t.Run("corrupted current header height", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
|
|
||||||
// Remove current header.
|
|
||||||
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
|
||||||
cache.Delete([]byte{byte(storage.SYSCurrentHeader)})
|
|
||||||
|
|
||||||
_, err := initTestChainNoCheck(t, cache, customConfig)
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), "failed to retrieve current header"))
|
|
||||||
})
|
|
||||||
t.Run("missing last batch of 2000 headers and missing last header", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
|
|
||||||
// Remove latest headers hashes batch and current header.
|
|
||||||
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
|
||||||
cache.Delete([]byte{byte(storage.IXHeaderHashList)})
|
|
||||||
currHeaderInfo, err := cache.Get([]byte{byte(storage.SYSCurrentHeader)})
|
|
||||||
require.NoError(t, err)
|
|
||||||
currHeaderHash, err := util.Uint256DecodeBytesLE(currHeaderInfo[:32])
|
|
||||||
require.NoError(t, err)
|
|
||||||
cache.Delete(append([]byte{byte(storage.DataExecutable)}, currHeaderHash.BytesBE()...))
|
|
||||||
|
|
||||||
_, err = initTestChainNoCheck(t, cache, customConfig)
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), "could not get header"))
|
|
||||||
})
|
|
||||||
t.Run("missing last block", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
|
|
||||||
// Remove current block from storage.
|
|
||||||
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
|
||||||
cache.Delete([]byte{byte(storage.SYSCurrentBlock)})
|
|
||||||
|
|
||||||
_, err := initTestChainNoCheck(t, cache, customConfig)
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), "failed to retrieve current block height"))
|
|
||||||
})
|
|
||||||
t.Run("missing last stateroot", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
|
|
||||||
// Remove latest stateroot from storage.
|
|
||||||
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
|
||||||
key := make([]byte, 5)
|
|
||||||
key[0] = byte(storage.DataMPTAux)
|
|
||||||
binary.BigEndian.PutUint32(key, h)
|
|
||||||
cache.Delete(key)
|
|
||||||
|
|
||||||
_, err := initTestChainNoCheck(t, cache, customConfig)
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), "can't init MPT at height"))
|
|
||||||
})
|
|
||||||
t.Run("failed native Management initialisation", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
|
|
||||||
// Corrupt serialised CryptoLib state.
|
|
||||||
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
|
||||||
key := make([]byte, 1+4+1+20)
|
|
||||||
key[0] = byte(storage.STStorage)
|
|
||||||
binary.LittleEndian.PutUint32(key[1:], uint32(managementID))
|
|
||||||
key[5] = byte(managementContractPrefix)
|
|
||||||
copy(key[6:], cryptoLibHash.BytesBE())
|
|
||||||
cache.Put(key, []byte{1, 2, 3})
|
|
||||||
|
|
||||||
_, err := initTestChainNoCheck(t, cache, customConfig)
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), "can't init cache for Management native contract"))
|
|
||||||
})
|
|
||||||
t.Run("invalid native contract deactivation", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
_, err := initTestChainNoCheck(t, ps, func(c *config.Config) {
|
|
||||||
customConfig(c)
|
|
||||||
c.ProtocolConfiguration.NativeUpdateHistories = map[string][]uint32{
|
|
||||||
nativenames.Policy: {0},
|
|
||||||
nativenames.Neo: {0},
|
|
||||||
nativenames.Gas: {0},
|
|
||||||
nativenames.Designation: {0},
|
|
||||||
nativenames.StdLib: {0},
|
|
||||||
nativenames.Management: {0},
|
|
||||||
nativenames.Oracle: {0},
|
|
||||||
nativenames.Ledger: {0},
|
|
||||||
nativenames.Notary: {0},
|
|
||||||
nativenames.CryptoLib: {h + 10},
|
|
||||||
}
|
|
||||||
})
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), fmt.Sprintf("native contract %s is already stored, but marked as inactive for height %d in config", nativenames.CryptoLib, h)))
|
|
||||||
})
|
|
||||||
t.Run("invalid native contract activation", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
|
|
||||||
// Remove CryptoLib from storage.
|
|
||||||
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
|
||||||
key := make([]byte, 1+4+1+20)
|
|
||||||
key[0] = byte(storage.STStorage)
|
|
||||||
binary.LittleEndian.PutUint32(key[1:], uint32(managementID))
|
|
||||||
key[5] = byte(managementContractPrefix)
|
|
||||||
copy(key[6:], cryptoLibHash.BytesBE())
|
|
||||||
cache.Delete(key)
|
|
||||||
|
|
||||||
_, err := initTestChainNoCheck(t, cache, customConfig)
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), fmt.Sprintf("native contract %s is not stored, but should be active at height %d according to config", nativenames.CryptoLib, h)))
|
|
||||||
})
|
|
||||||
t.Run("stored and autogenerated native contract's states mismatch", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
|
|
||||||
// Change stored CryptoLib state.
|
|
||||||
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
|
||||||
key := make([]byte, 1+4+1+20)
|
|
||||||
key[0] = byte(storage.STStorage)
|
|
||||||
binary.LittleEndian.PutUint32(key[1:], uint32(managementID))
|
|
||||||
key[5] = byte(managementContractPrefix)
|
|
||||||
copy(key[6:], cryptoLibHash.BytesBE())
|
|
||||||
cs := *cryptoLibState
|
|
||||||
cs.ID = -123
|
|
||||||
csBytes, err := stackitem.SerializeConvertible(&cs)
|
|
||||||
require.NoError(t, err)
|
|
||||||
cache.Put(key, csBytes)
|
|
||||||
|
|
||||||
_, err = initTestChainNoCheck(t, cache, customConfig)
|
|
||||||
require.Error(t, err)
|
|
||||||
require.True(t, strings.Contains(err.Error(), fmt.Sprintf("native %s: version mismatch (stored contract state differs from autogenerated one)", nativenames.CryptoLib)))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("good", func(t *testing.T) {
|
|
||||||
ps = newPS(t)
|
|
||||||
_, err := initTestChainNoCheck(t, ps, customConfig)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
}
|
|
338
pkg/core/blockchain_neotest_test.go
Normal file
338
pkg/core/blockchain_neotest_test.go
Normal file
|
@ -0,0 +1,338 @@
|
||||||
|
package core_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/core/chaindump"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/core/dao"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/neotest"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/neotest/chain"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBlockchain_DumpAndRestore(t *testing.T) {
|
||||||
|
t.Run("no state root", func(t *testing.T) {
|
||||||
|
testDumpAndRestore(t, func(c *config.ProtocolConfiguration) {
|
||||||
|
c.StateRootInHeader = false
|
||||||
|
c.P2PSigExtensions = true
|
||||||
|
}, nil)
|
||||||
|
})
|
||||||
|
t.Run("with state root", func(t *testing.T) {
|
||||||
|
testDumpAndRestore(t, func(c *config.ProtocolConfiguration) {
|
||||||
|
c.StateRootInHeader = true
|
||||||
|
c.P2PSigExtensions = true
|
||||||
|
}, nil)
|
||||||
|
})
|
||||||
|
t.Run("remove untraceable", func(t *testing.T) {
|
||||||
|
// Dump can only be created if all blocks and transactions are present.
|
||||||
|
testDumpAndRestore(t, func(c *config.ProtocolConfiguration) {
|
||||||
|
c.P2PSigExtensions = true
|
||||||
|
}, func(c *config.ProtocolConfiguration) {
|
||||||
|
c.MaxTraceableBlocks = 2
|
||||||
|
c.RemoveUntraceableBlocks = true
|
||||||
|
c.P2PSigExtensions = true
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testDumpAndRestore(t *testing.T, dumpF, restoreF func(c *config.ProtocolConfiguration)) {
|
||||||
|
if restoreF == nil {
|
||||||
|
restoreF = dumpF
|
||||||
|
}
|
||||||
|
|
||||||
|
bc, validators, committee := chain.NewMultiWithCustomConfig(t, dumpF)
|
||||||
|
e := neotest.NewExecutor(t, bc, validators, committee)
|
||||||
|
|
||||||
|
initBasicChain(t, e)
|
||||||
|
require.True(t, bc.BlockHeight() > 5) // ensure that test is valid
|
||||||
|
|
||||||
|
w := io.NewBufBinWriter()
|
||||||
|
require.NoError(t, chaindump.Dump(bc, w.BinWriter, 0, bc.BlockHeight()+1))
|
||||||
|
require.NoError(t, w.Err)
|
||||||
|
|
||||||
|
buf := w.Bytes()
|
||||||
|
t.Run("invalid start", func(t *testing.T) {
|
||||||
|
bc2, _, _ := chain.NewMultiWithCustomConfig(t, restoreF)
|
||||||
|
|
||||||
|
r := io.NewBinReaderFromBuf(buf)
|
||||||
|
require.Error(t, chaindump.Restore(bc2, r, 2, 1, nil))
|
||||||
|
})
|
||||||
|
t.Run("good", func(t *testing.T) {
|
||||||
|
bc2, _, _ := chain.NewMultiWithCustomConfig(t, dumpF)
|
||||||
|
|
||||||
|
r := io.NewBinReaderFromBuf(buf)
|
||||||
|
require.NoError(t, chaindump.Restore(bc2, r, 0, 2, nil))
|
||||||
|
require.Equal(t, uint32(1), bc2.BlockHeight())
|
||||||
|
|
||||||
|
r = io.NewBinReaderFromBuf(buf) // new reader because start is relative to dump
|
||||||
|
require.NoError(t, chaindump.Restore(bc2, r, 2, 1, nil))
|
||||||
|
t.Run("check handler", func(t *testing.T) {
|
||||||
|
lastIndex := uint32(0)
|
||||||
|
errStopped := errors.New("stopped")
|
||||||
|
f := func(b *block.Block) error {
|
||||||
|
lastIndex = b.Index
|
||||||
|
if b.Index >= bc.BlockHeight()-1 {
|
||||||
|
return errStopped
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
require.NoError(t, chaindump.Restore(bc2, r, 0, 1, f))
|
||||||
|
require.Equal(t, bc2.BlockHeight(), lastIndex)
|
||||||
|
|
||||||
|
r = io.NewBinReaderFromBuf(buf)
|
||||||
|
err := chaindump.Restore(bc2, r, 4, bc.BlockHeight()-bc2.BlockHeight(), f)
|
||||||
|
require.True(t, errors.Is(err, errStopped))
|
||||||
|
require.Equal(t, bc.BlockHeight()-1, lastIndex)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLevelDBForTestingWithPath(t testing.TB, dbPath string) (storage.Store, string) {
|
||||||
|
if dbPath == "" {
|
||||||
|
dbPath = t.TempDir()
|
||||||
|
}
|
||||||
|
dbOptions := storage.LevelDBOptions{
|
||||||
|
DataDirectoryPath: dbPath,
|
||||||
|
}
|
||||||
|
newLevelStore, err := storage.NewLevelDBStore(dbOptions)
|
||||||
|
require.Nil(t, err, "NewLevelDBStore error")
|
||||||
|
return newLevelStore, dbPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlockchain_StartFromExistingDB(t *testing.T) {
|
||||||
|
ps, path := newLevelDBForTestingWithPath(t, "")
|
||||||
|
customConfig := func(c *config.ProtocolConfiguration) {
|
||||||
|
c.StateRootInHeader = true // Need for P2PStateExchangeExtensions check.
|
||||||
|
c.P2PSigExtensions = true // Need for basic chain initializer.
|
||||||
|
}
|
||||||
|
bc, validators, committee, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, customConfig, ps)
|
||||||
|
require.NoError(t, err)
|
||||||
|
go bc.Run()
|
||||||
|
e := neotest.NewExecutor(t, bc, validators, committee)
|
||||||
|
initBasicChain(t, e)
|
||||||
|
require.True(t, bc.BlockHeight() > 5, "ensure that basic chain is correctly initialised")
|
||||||
|
|
||||||
|
// Information for further tests.
|
||||||
|
h := bc.BlockHeight()
|
||||||
|
cryptoLibHash, err := bc.GetNativeContractScriptHash(nativenames.CryptoLib)
|
||||||
|
require.NoError(t, err)
|
||||||
|
cryptoLibState := bc.GetContractState(cryptoLibHash)
|
||||||
|
require.NotNil(t, cryptoLibState)
|
||||||
|
var (
|
||||||
|
managementID = -1
|
||||||
|
managementContractPrefix = 8
|
||||||
|
)
|
||||||
|
|
||||||
|
bc.Close() // Ensure persist is done and persistent store is properly closed.
|
||||||
|
|
||||||
|
newPS := func(t *testing.T) storage.Store {
|
||||||
|
ps, _ = newLevelDBForTestingWithPath(t, path)
|
||||||
|
t.Cleanup(func() { require.NoError(t, ps.Close()) })
|
||||||
|
return ps
|
||||||
|
}
|
||||||
|
t.Run("mismatch storage version", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
||||||
|
d := dao.NewSimple(cache, bc.GetConfig().StateRootInHeader, bc.GetConfig().P2PStateExchangeExtensions)
|
||||||
|
d.PutVersion(dao.Version{
|
||||||
|
Value: "0.0.0",
|
||||||
|
})
|
||||||
|
_, err := d.Persist() // Persist to `cache` wrapper.
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, _, _, err = chain.NewMultiWithCustomConfigAndStoreNoCheck(t, customConfig, cache)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), "storage version mismatch"), err)
|
||||||
|
})
|
||||||
|
t.Run("mismatch StateRootInHeader", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
_, _, _, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, func(c *config.ProtocolConfiguration) {
|
||||||
|
customConfig(c)
|
||||||
|
c.StateRootInHeader = false
|
||||||
|
}, ps)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), "StateRootInHeader setting mismatch"), err)
|
||||||
|
})
|
||||||
|
t.Run("mismatch P2PSigExtensions", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
_, _, _, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, func(c *config.ProtocolConfiguration) {
|
||||||
|
customConfig(c)
|
||||||
|
c.P2PSigExtensions = false
|
||||||
|
}, ps)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), "P2PSigExtensions setting mismatch"), err)
|
||||||
|
})
|
||||||
|
t.Run("mismatch P2PStateExchangeExtensions", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
_, _, _, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, func(c *config.ProtocolConfiguration) {
|
||||||
|
customConfig(c)
|
||||||
|
c.StateRootInHeader = true
|
||||||
|
c.P2PStateExchangeExtensions = true
|
||||||
|
}, ps)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), "P2PStateExchangeExtensions setting mismatch"), err)
|
||||||
|
})
|
||||||
|
t.Run("mismatch KeepOnlyLatestState", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
_, _, _, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, func(c *config.ProtocolConfiguration) {
|
||||||
|
customConfig(c)
|
||||||
|
c.KeepOnlyLatestState = true
|
||||||
|
}, ps)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), "KeepOnlyLatestState setting mismatch"), err)
|
||||||
|
})
|
||||||
|
t.Run("corrupted headers", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
|
||||||
|
// Corrupt headers hashes batch.
|
||||||
|
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
||||||
|
key := make([]byte, 5)
|
||||||
|
key[0] = byte(storage.IXHeaderHashList)
|
||||||
|
binary.BigEndian.PutUint32(key[1:], 1)
|
||||||
|
cache.Put(key, []byte{1, 2, 3})
|
||||||
|
|
||||||
|
_, _, _, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, customConfig, cache)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), "failed to read batch of 2000"), err)
|
||||||
|
})
|
||||||
|
t.Run("corrupted current header height", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
|
||||||
|
// Remove current header.
|
||||||
|
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
||||||
|
cache.Delete([]byte{byte(storage.SYSCurrentHeader)})
|
||||||
|
|
||||||
|
_, _, _, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, customConfig, cache)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), "failed to retrieve current header"), err)
|
||||||
|
})
|
||||||
|
t.Run("missing last batch of 2000 headers and missing last header", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
|
||||||
|
// Remove latest headers hashes batch and current header.
|
||||||
|
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
||||||
|
cache.Delete([]byte{byte(storage.IXHeaderHashList)})
|
||||||
|
currHeaderInfo, err := cache.Get([]byte{byte(storage.SYSCurrentHeader)})
|
||||||
|
require.NoError(t, err)
|
||||||
|
currHeaderHash, err := util.Uint256DecodeBytesLE(currHeaderInfo[:32])
|
||||||
|
require.NoError(t, err)
|
||||||
|
cache.Delete(append([]byte{byte(storage.DataExecutable)}, currHeaderHash.BytesBE()...))
|
||||||
|
|
||||||
|
_, _, _, err = chain.NewMultiWithCustomConfigAndStoreNoCheck(t, customConfig, cache)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), "could not get header"), err)
|
||||||
|
})
|
||||||
|
t.Run("missing last block", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
|
||||||
|
// Remove current block from storage.
|
||||||
|
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
||||||
|
cache.Delete([]byte{byte(storage.SYSCurrentBlock)})
|
||||||
|
|
||||||
|
_, _, _, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, customConfig, cache)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), "failed to retrieve current block height"), err)
|
||||||
|
})
|
||||||
|
t.Run("missing last stateroot", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
|
||||||
|
// Remove latest stateroot from storage.
|
||||||
|
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
||||||
|
key := make([]byte, 5)
|
||||||
|
key[0] = byte(storage.DataMPTAux)
|
||||||
|
binary.BigEndian.PutUint32(key, h)
|
||||||
|
cache.Delete(key)
|
||||||
|
|
||||||
|
_, _, _, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, customConfig, cache)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), "can't init MPT at height"), err)
|
||||||
|
})
|
||||||
|
t.Run("failed native Management initialisation", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
|
||||||
|
// Corrupt serialised CryptoLib state.
|
||||||
|
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
||||||
|
key := make([]byte, 1+4+1+20)
|
||||||
|
key[0] = byte(storage.STStorage)
|
||||||
|
binary.LittleEndian.PutUint32(key[1:], uint32(managementID))
|
||||||
|
key[5] = byte(managementContractPrefix)
|
||||||
|
copy(key[6:], cryptoLibHash.BytesBE())
|
||||||
|
cache.Put(key, []byte{1, 2, 3})
|
||||||
|
|
||||||
|
_, _, _, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, customConfig, cache)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), "can't init cache for Management native contract"), err)
|
||||||
|
})
|
||||||
|
t.Run("invalid native contract deactivation", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
_, _, _, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, func(c *config.ProtocolConfiguration) {
|
||||||
|
customConfig(c)
|
||||||
|
c.NativeUpdateHistories = map[string][]uint32{
|
||||||
|
nativenames.Policy: {0},
|
||||||
|
nativenames.Neo: {0},
|
||||||
|
nativenames.Gas: {0},
|
||||||
|
nativenames.Designation: {0},
|
||||||
|
nativenames.StdLib: {0},
|
||||||
|
nativenames.Management: {0},
|
||||||
|
nativenames.Oracle: {0},
|
||||||
|
nativenames.Ledger: {0},
|
||||||
|
nativenames.Notary: {0},
|
||||||
|
nativenames.CryptoLib: {h + 10},
|
||||||
|
}
|
||||||
|
}, ps)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), fmt.Sprintf("native contract %s is already stored, but marked as inactive for height %d in config", nativenames.CryptoLib, h)), err)
|
||||||
|
})
|
||||||
|
t.Run("invalid native contract activation", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
|
||||||
|
// Remove CryptoLib from storage.
|
||||||
|
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
||||||
|
key := make([]byte, 1+4+1+20)
|
||||||
|
key[0] = byte(storage.STStorage)
|
||||||
|
binary.LittleEndian.PutUint32(key[1:], uint32(managementID))
|
||||||
|
key[5] = byte(managementContractPrefix)
|
||||||
|
copy(key[6:], cryptoLibHash.BytesBE())
|
||||||
|
cache.Delete(key)
|
||||||
|
|
||||||
|
_, _, _, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, customConfig, cache)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), fmt.Sprintf("native contract %s is not stored, but should be active at height %d according to config", nativenames.CryptoLib, h)), err)
|
||||||
|
})
|
||||||
|
t.Run("stored and autogenerated native contract's states mismatch", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
|
||||||
|
// Change stored CryptoLib state.
|
||||||
|
cache := storage.NewMemCachedStore(ps) // Extra wrapper to avoid good DB corruption.
|
||||||
|
key := make([]byte, 1+4+1+20)
|
||||||
|
key[0] = byte(storage.STStorage)
|
||||||
|
binary.LittleEndian.PutUint32(key[1:], uint32(managementID))
|
||||||
|
key[5] = byte(managementContractPrefix)
|
||||||
|
copy(key[6:], cryptoLibHash.BytesBE())
|
||||||
|
cs := *cryptoLibState
|
||||||
|
cs.ID = -123
|
||||||
|
csBytes, err := stackitem.SerializeConvertible(&cs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
cache.Put(key, csBytes)
|
||||||
|
|
||||||
|
_, _, _, err = chain.NewMultiWithCustomConfigAndStoreNoCheck(t, customConfig, cache)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.True(t, strings.Contains(err.Error(), fmt.Sprintf("native %s: version mismatch (stored contract state differs from autogenerated one)", nativenames.CryptoLib)), err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("good", func(t *testing.T) {
|
||||||
|
ps = newPS(t)
|
||||||
|
_, _, _, err := chain.NewMultiWithCustomConfigAndStoreNoCheck(t, customConfig, ps)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,13 +1,9 @@
|
||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -18,18 +14,12 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/config"
|
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/blockchainer"
|
"github.com/nspcc-dev/neo-go/pkg/core/blockchainer"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/chaindump"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/fee"
|
"github.com/nspcc-dev/neo-go/pkg/core/fee"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/native"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
"github.com/nspcc-dev/neo-go/pkg/core/state"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/io"
|
"github.com/nspcc-dev/neo-go/pkg/io"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/rpc/client/nns"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
"github.com/nspcc-dev/neo-go/pkg/smartcontract"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
|
||||||
|
@ -47,9 +37,6 @@ import (
|
||||||
// multisig address which possess all NEO.
|
// multisig address which possess all NEO.
|
||||||
var neoOwner = testchain.MultisigScriptHash()
|
var neoOwner = testchain.MultisigScriptHash()
|
||||||
|
|
||||||
// examplesPrefix is a prefix of the example smart-contracts.
|
|
||||||
const examplesPrefix = "../../examples/"
|
|
||||||
|
|
||||||
// newTestChain should be called before newBlock invocation to properly setup
|
// newTestChain should be called before newBlock invocation to properly setup
|
||||||
// global state.
|
// global state.
|
||||||
func newTestChain(t testing.TB) *Blockchain {
|
func newTestChain(t testing.TB) *Blockchain {
|
||||||
|
@ -68,35 +55,21 @@ func newTestChainWithCustomCfgAndStore(t testing.TB, st storage.Store, f func(*c
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLevelDBForTesting(t testing.TB) storage.Store {
|
func newLevelDBForTesting(t testing.TB) storage.Store {
|
||||||
newLevelStore, _ := newLevelDBForTestingWithPath(t, "")
|
dbPath := t.TempDir()
|
||||||
return newLevelStore
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLevelDBForTestingWithPath(t testing.TB, dbPath string) (storage.Store, string) {
|
|
||||||
if dbPath == "" {
|
|
||||||
dbPath = t.TempDir()
|
|
||||||
}
|
|
||||||
dbOptions := storage.LevelDBOptions{
|
dbOptions := storage.LevelDBOptions{
|
||||||
DataDirectoryPath: dbPath,
|
DataDirectoryPath: dbPath,
|
||||||
}
|
}
|
||||||
newLevelStore, err := storage.NewLevelDBStore(dbOptions)
|
newLevelStore, err := storage.NewLevelDBStore(dbOptions)
|
||||||
require.Nil(t, err, "NewLevelDBStore error")
|
require.Nil(t, err, "NewLevelDBStore error")
|
||||||
return newLevelStore, dbPath
|
return newLevelStore
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBoltStoreForTesting(t testing.TB) storage.Store {
|
func newBoltStoreForTesting(t testing.TB) storage.Store {
|
||||||
boltDBStore, _ := newBoltStoreForTestingWithPath(t, "")
|
|
||||||
return boltDBStore
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBoltStoreForTestingWithPath(t testing.TB, dbPath string) (storage.Store, string) {
|
|
||||||
if dbPath == "" {
|
|
||||||
d := t.TempDir()
|
d := t.TempDir()
|
||||||
dbPath = filepath.Join(d, "test_bolt_db")
|
dbPath := filepath.Join(d, "test_bolt_db")
|
||||||
}
|
|
||||||
boltDBStore, err := storage.NewBoltDBStore(storage.BoltDBOptions{FilePath: dbPath})
|
boltDBStore, err := storage.NewBoltDBStore(storage.BoltDBOptions{FilePath: dbPath})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return boltDBStore, dbPath
|
return boltDBStore
|
||||||
}
|
}
|
||||||
|
|
||||||
func initTestChain(t testing.TB, st storage.Store, f func(*config.Config)) *Blockchain {
|
func initTestChain(t testing.TB, st storage.Store, f func(*config.Config)) *Blockchain {
|
||||||
|
@ -218,382 +191,6 @@ func TestBug1728(t *testing.T) {
|
||||||
require.Equal(t, aer.VMState, vm.HaltState)
|
require.Equal(t, aer.VMState, vm.HaltState)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function generates "../rpc/testdata/testblocks.acc" file which contains data
|
|
||||||
// for RPC unit tests. It also is a nice integration test.
|
|
||||||
// To generate new "../rpc/testdata/testblocks.acc", follow the steps:
|
|
||||||
// 1. Set saveChain down below to true
|
|
||||||
// 2. Run tests with `$ make test`
|
|
||||||
func TestCreateBasicChain(t *testing.T) {
|
|
||||||
const saveChain = false
|
|
||||||
const prefix = "../rpc/server/testdata/"
|
|
||||||
|
|
||||||
bc := newTestChain(t)
|
|
||||||
initBasicChain(t, bc)
|
|
||||||
|
|
||||||
if saveChain {
|
|
||||||
outStream, err := os.Create(prefix + "testblocks.acc")
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Cleanup(func() {
|
|
||||||
outStream.Close()
|
|
||||||
})
|
|
||||||
|
|
||||||
writer := io.NewBinWriterFromIO(outStream)
|
|
||||||
writer.WriteU32LE(bc.BlockHeight())
|
|
||||||
err = chaindump.Dump(bc, writer, 1, bc.BlockHeight())
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
priv0 := testchain.PrivateKeyByID(0)
|
|
||||||
priv1 := testchain.PrivateKeyByID(1)
|
|
||||||
priv0ScriptHash := priv0.GetScriptHash()
|
|
||||||
acc0 := wallet.NewAccountFromPrivateKey(priv0)
|
|
||||||
|
|
||||||
// Prepare some transaction for future submission.
|
|
||||||
txSendRaw := newNEP17Transfer(bc.contracts.NEO.Hash, priv0ScriptHash, priv1.GetScriptHash(), int64(fixedn.Fixed8FromInt64(1000)))
|
|
||||||
txSendRaw.ValidUntilBlock = bc.config.MaxValidUntilBlockIncrement
|
|
||||||
txSendRaw.Nonce = 0x1234
|
|
||||||
txSendRaw.Signers = []transaction.Signer{{
|
|
||||||
Account: priv0ScriptHash,
|
|
||||||
Scopes: transaction.CalledByEntry,
|
|
||||||
AllowedContracts: nil,
|
|
||||||
AllowedGroups: nil,
|
|
||||||
}}
|
|
||||||
require.NoError(t, addNetworkFee(bc, txSendRaw, acc0))
|
|
||||||
require.NoError(t, acc0.SignTx(testchain.Network(), txSendRaw))
|
|
||||||
bw := io.NewBufBinWriter()
|
|
||||||
txSendRaw.EncodeBinary(bw.BinWriter)
|
|
||||||
t.Logf("sendrawtransaction: \n\tbase64: %s\n\tHash LE: %s", base64.StdEncoding.EncodeToString(bw.Bytes()), txSendRaw.Hash().StringLE())
|
|
||||||
require.False(t, saveChain)
|
|
||||||
}
|
|
||||||
|
|
||||||
func initBasicChain(t *testing.T, bc *Blockchain) {
|
|
||||||
const prefix = "../rpc/server/testdata/"
|
|
||||||
// Increase in case if you need more blocks
|
|
||||||
const validUntilBlock = 1200
|
|
||||||
|
|
||||||
// To be incremented after each created transaction to keep chain constant.
|
|
||||||
var testNonce uint32 = 1
|
|
||||||
|
|
||||||
// Use as nonce when new transaction is created to avoid random data in tests.
|
|
||||||
getNextNonce := func() uint32 {
|
|
||||||
testNonce++
|
|
||||||
return testNonce
|
|
||||||
}
|
|
||||||
|
|
||||||
const neoAmount = 99999000
|
|
||||||
|
|
||||||
gasHash := bc.contracts.GAS.Hash
|
|
||||||
neoHash := bc.contracts.NEO.Hash
|
|
||||||
policyHash := bc.contracts.Policy.Hash
|
|
||||||
notaryHash := bc.contracts.Notary.Hash
|
|
||||||
t.Logf("native GAS hash: %v", gasHash)
|
|
||||||
t.Logf("native NEO hash: %v", neoHash)
|
|
||||||
t.Logf("native Policy hash: %v", policyHash)
|
|
||||||
t.Logf("native Notary hash: %v", notaryHash)
|
|
||||||
t.Logf("Block0 hash: %s", bc.GetHeaderHash(0).StringLE())
|
|
||||||
|
|
||||||
priv0 := testchain.PrivateKeyByID(0)
|
|
||||||
priv0ScriptHash := priv0.GetScriptHash()
|
|
||||||
priv1 := testchain.PrivateKeyByID(1)
|
|
||||||
priv1ScriptHash := priv1.GetScriptHash()
|
|
||||||
acc0 := wallet.NewAccountFromPrivateKey(priv0)
|
|
||||||
acc1 := wallet.NewAccountFromPrivateKey(priv1)
|
|
||||||
|
|
||||||
deployContractFromPriv0 := func(t *testing.T, path, contractName string, configPath *string, expectedID int32) (util.Uint256, util.Uint256, util.Uint160) {
|
|
||||||
txDeploy, _ := newDeployTx(t, bc, priv0ScriptHash, path, contractName, configPath)
|
|
||||||
txDeploy.Nonce = getNextNonce()
|
|
||||||
txDeploy.ValidUntilBlock = validUntilBlock
|
|
||||||
require.NoError(t, addNetworkFee(bc, txDeploy, acc0))
|
|
||||||
require.NoError(t, acc0.SignTx(testchain.Network(), txDeploy))
|
|
||||||
b := bc.newBlock(txDeploy)
|
|
||||||
require.NoError(t, bc.AddBlock(b)) // block #11
|
|
||||||
checkTxHalt(t, bc, txDeploy.Hash())
|
|
||||||
sh, err := bc.GetContractScriptHash(expectedID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return b.Hash(), txDeploy.Hash(), sh
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Equal(t, big.NewInt(5000_0000), bc.GetUtilityTokenBalance(priv0ScriptHash)) // gas bounty
|
|
||||||
|
|
||||||
// Block #1: move 1000 GAS and neoAmount NEO to priv0.
|
|
||||||
txMoveNeo, err := testchain.NewTransferFromOwner(bc, neoHash, priv0ScriptHash, neoAmount, getNextNonce(), validUntilBlock)
|
|
||||||
require.NoError(t, err)
|
|
||||||
// Move some GAS to one simple account.
|
|
||||||
txMoveGas, err := testchain.NewTransferFromOwner(bc, gasHash, priv0ScriptHash, int64(fixedn.Fixed8FromInt64(1000)),
|
|
||||||
getNextNonce(), validUntilBlock)
|
|
||||||
require.NoError(t, err)
|
|
||||||
b := bc.newBlock(txMoveNeo, txMoveGas)
|
|
||||||
require.NoError(t, bc.AddBlock(b))
|
|
||||||
checkTxHalt(t, bc, txMoveGas.Hash())
|
|
||||||
checkTxHalt(t, bc, txMoveNeo.Hash())
|
|
||||||
t.Logf("Block1 hash: %s", b.Hash().StringLE())
|
|
||||||
bw := io.NewBufBinWriter()
|
|
||||||
b.EncodeBinary(bw.BinWriter)
|
|
||||||
require.NoError(t, bw.Err)
|
|
||||||
jsonB, err := b.MarshalJSON()
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Logf("Block1 base64: %s", base64.StdEncoding.EncodeToString(bw.Bytes()))
|
|
||||||
t.Logf("Block1 JSON: %s", string(jsonB))
|
|
||||||
bw.Reset()
|
|
||||||
b.Header.EncodeBinary(bw.BinWriter)
|
|
||||||
require.NoError(t, bw.Err)
|
|
||||||
jsonH, err := b.Header.MarshalJSON()
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Logf("Header1 base64: %s", base64.StdEncoding.EncodeToString(bw.Bytes()))
|
|
||||||
t.Logf("Header1 JSON: %s", string(jsonH))
|
|
||||||
jsonTxMoveNeo, err := txMoveNeo.MarshalJSON()
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Logf("txMoveNeo hash: %s", txMoveNeo.Hash().StringLE())
|
|
||||||
t.Logf("txMoveNeo JSON: %s", string(jsonTxMoveNeo))
|
|
||||||
t.Logf("txMoveNeo base64: %s", base64.StdEncoding.EncodeToString(txMoveNeo.Bytes()))
|
|
||||||
t.Logf("txMoveGas hash: %s", txMoveGas.Hash().StringLE())
|
|
||||||
|
|
||||||
require.True(t, bc.GetUtilityTokenBalance(priv0ScriptHash).Cmp(big.NewInt(1000*native.GASFactor)) >= 0)
|
|
||||||
// info for getblockheader rpc tests
|
|
||||||
t.Logf("header hash: %s", b.Hash().StringLE())
|
|
||||||
buf := io.NewBufBinWriter()
|
|
||||||
b.Header.EncodeBinary(buf.BinWriter)
|
|
||||||
t.Logf("header: %s", hex.EncodeToString(buf.Bytes()))
|
|
||||||
|
|
||||||
// Block #2: deploy test_contract.
|
|
||||||
cfgPath := prefix + "test_contract.yml"
|
|
||||||
block2H, txDeployH, cHash := deployContractFromPriv0(t, prefix+"test_contract.go", "Rubl", &cfgPath, 1)
|
|
||||||
t.Logf("txDeploy: %s", txDeployH.StringLE())
|
|
||||||
t.Logf("Block2 hash: %s", block2H.StringLE())
|
|
||||||
|
|
||||||
// Block #3: invoke `putValue` method on the test_contract.
|
|
||||||
script := io.NewBufBinWriter()
|
|
||||||
emit.AppCall(script.BinWriter, cHash, "putValue", callflag.All, "testkey", "testvalue")
|
|
||||||
txInv := transaction.New(script.Bytes(), 1*native.GASFactor)
|
|
||||||
txInv.Nonce = getNextNonce()
|
|
||||||
txInv.ValidUntilBlock = validUntilBlock
|
|
||||||
txInv.Signers = []transaction.Signer{{Account: priv0ScriptHash}}
|
|
||||||
require.NoError(t, addNetworkFee(bc, txInv, acc0))
|
|
||||||
require.NoError(t, acc0.SignTx(testchain.Network(), txInv))
|
|
||||||
b = bc.newBlock(txInv)
|
|
||||||
require.NoError(t, bc.AddBlock(b))
|
|
||||||
checkTxHalt(t, bc, txInv.Hash())
|
|
||||||
t.Logf("txInv: %s", txInv.Hash().StringLE())
|
|
||||||
|
|
||||||
// Block #4: transfer 0.0000_1 NEO from priv0 to priv1.
|
|
||||||
txNeo0to1 := newNEP17Transfer(neoHash, priv0ScriptHash, priv1ScriptHash, 1000)
|
|
||||||
txNeo0to1.Nonce = getNextNonce()
|
|
||||||
txNeo0to1.ValidUntilBlock = validUntilBlock
|
|
||||||
txNeo0to1.Signers = []transaction.Signer{
|
|
||||||
{
|
|
||||||
Account: priv0ScriptHash,
|
|
||||||
Scopes: transaction.CalledByEntry,
|
|
||||||
AllowedContracts: nil,
|
|
||||||
AllowedGroups: nil,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
require.NoError(t, addNetworkFee(bc, txNeo0to1, acc0))
|
|
||||||
require.NoError(t, acc0.SignTx(testchain.Network(), txNeo0to1))
|
|
||||||
b = bc.newBlock(txNeo0to1)
|
|
||||||
require.NoError(t, bc.AddBlock(b))
|
|
||||||
checkTxHalt(t, bc, txNeo0to1.Hash())
|
|
||||||
|
|
||||||
// Block #5: initialize rubles contract and transfer 1000 rubles from the contract to priv0.
|
|
||||||
w := io.NewBufBinWriter()
|
|
||||||
emit.AppCall(w.BinWriter, cHash, "init", callflag.All)
|
|
||||||
initTx := transaction.New(w.Bytes(), 1*native.GASFactor)
|
|
||||||
initTx.Nonce = getNextNonce()
|
|
||||||
initTx.ValidUntilBlock = validUntilBlock
|
|
||||||
initTx.Signers = []transaction.Signer{{Account: priv0ScriptHash}}
|
|
||||||
require.NoError(t, addNetworkFee(bc, initTx, acc0))
|
|
||||||
require.NoError(t, acc0.SignTx(testchain.Network(), initTx))
|
|
||||||
transferTx := newNEP17Transfer(cHash, cHash, priv0ScriptHash, 1000)
|
|
||||||
transferTx.Nonce = getNextNonce()
|
|
||||||
transferTx.ValidUntilBlock = validUntilBlock
|
|
||||||
transferTx.Signers = []transaction.Signer{
|
|
||||||
{
|
|
||||||
Account: priv0ScriptHash,
|
|
||||||
Scopes: transaction.CalledByEntry,
|
|
||||||
AllowedContracts: nil,
|
|
||||||
AllowedGroups: nil,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
require.NoError(t, addNetworkFee(bc, transferTx, acc0))
|
|
||||||
transferTx.SystemFee += 1000000
|
|
||||||
require.NoError(t, acc0.SignTx(testchain.Network(), transferTx))
|
|
||||||
b = bc.newBlock(initTx, transferTx)
|
|
||||||
require.NoError(t, bc.AddBlock(b))
|
|
||||||
checkTxHalt(t, bc, initTx.Hash())
|
|
||||||
checkTxHalt(t, bc, transferTx.Hash())
|
|
||||||
t.Logf("recieveRublesTx: %v", transferTx.Hash().StringLE())
|
|
||||||
|
|
||||||
// Block #6: transfer 123 rubles from priv0 to priv1
|
|
||||||
transferTx = newNEP17Transfer(cHash, priv0.GetScriptHash(), priv1ScriptHash, 123)
|
|
||||||
transferTx.Nonce = getNextNonce()
|
|
||||||
transferTx.ValidUntilBlock = validUntilBlock
|
|
||||||
transferTx.Signers = []transaction.Signer{
|
|
||||||
{
|
|
||||||
Account: priv0ScriptHash,
|
|
||||||
Scopes: transaction.CalledByEntry,
|
|
||||||
AllowedContracts: nil,
|
|
||||||
AllowedGroups: nil,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
require.NoError(t, addNetworkFee(bc, transferTx, acc0))
|
|
||||||
transferTx.SystemFee += 1000000
|
|
||||||
require.NoError(t, acc0.SignTx(testchain.Network(), transferTx))
|
|
||||||
b = bc.newBlock(transferTx)
|
|
||||||
require.NoError(t, bc.AddBlock(b))
|
|
||||||
checkTxHalt(t, bc, transferTx.Hash())
|
|
||||||
t.Logf("sendRublesTx: %v", transferTx.Hash().StringLE())
|
|
||||||
|
|
||||||
// Block #7: push verification contract into the chain.
|
|
||||||
verifyPath := filepath.Join(prefix, "verify", "verification_contract.go")
|
|
||||||
_, _, _ = deployContractFromPriv0(t, verifyPath, "Verify", nil, 2)
|
|
||||||
|
|
||||||
// Block #8: deposit some GAS to notary contract for priv0.
|
|
||||||
transferTx = newNEP17Transfer(gasHash, priv0.GetScriptHash(), notaryHash, 10_0000_0000, priv0.GetScriptHash(), int64(bc.BlockHeight()+1000))
|
|
||||||
transferTx.Nonce = getNextNonce()
|
|
||||||
transferTx.ValidUntilBlock = validUntilBlock
|
|
||||||
transferTx.Signers = []transaction.Signer{
|
|
||||||
{
|
|
||||||
Account: priv0ScriptHash,
|
|
||||||
Scopes: transaction.CalledByEntry,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
require.NoError(t, addNetworkFee(bc, transferTx, acc0))
|
|
||||||
transferTx.SystemFee += 10_0000
|
|
||||||
require.NoError(t, acc0.SignTx(testchain.Network(), transferTx))
|
|
||||||
b = bc.newBlock(transferTx)
|
|
||||||
require.NoError(t, bc.AddBlock(b))
|
|
||||||
checkTxHalt(t, bc, transferTx.Hash())
|
|
||||||
t.Logf("notaryDepositTxPriv0: %v", transferTx.Hash().StringLE())
|
|
||||||
|
|
||||||
// Block #9: designate new Notary node.
|
|
||||||
ntr, err := wallet.NewWalletFromFile(path.Join(notaryModulePath, "./testdata/notary1.json"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, ntr.Accounts[0].Decrypt("one", ntr.Scrypt))
|
|
||||||
bc.setNodesByRole(t, true, noderoles.P2PNotary, keys.PublicKeys{ntr.Accounts[0].PrivateKey().PublicKey()})
|
|
||||||
t.Logf("Designated Notary node: %s", hex.EncodeToString(ntr.Accounts[0].PrivateKey().PublicKey().Bytes()))
|
|
||||||
|
|
||||||
// Block #10: push verification contract with arguments into the chain.
|
|
||||||
verifyPath = filepath.Join(prefix, "verify_args", "verification_with_args_contract.go")
|
|
||||||
_, _, _ = deployContractFromPriv0(t, verifyPath, "VerifyWithArgs", nil, 3) // block #10
|
|
||||||
|
|
||||||
// Block #11: push NameService contract into the chain.
|
|
||||||
nsPath := examplesPrefix + "nft-nd-nns/"
|
|
||||||
nsConfigPath := nsPath + "nns.yml"
|
|
||||||
_, _, nsHash := deployContractFromPriv0(t, nsPath, nsPath, &nsConfigPath, 4) // block #11
|
|
||||||
|
|
||||||
// Block #12: transfer funds to committee for futher NS record registration.
|
|
||||||
transferFundsToCommittee(t, bc) // block #12
|
|
||||||
|
|
||||||
// Block #13: add `.com` root to NNS.
|
|
||||||
res, err := invokeContractMethodGeneric(bc, -1,
|
|
||||||
nsHash, "addRoot", true, "com") // block #13
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkResult(t, res, stackitem.Null{})
|
|
||||||
|
|
||||||
// Block #14: register `neo.com` via NNS.
|
|
||||||
res, err = invokeContractMethodGeneric(bc, -1,
|
|
||||||
nsHash, "register", acc0, "neo.com", priv0ScriptHash) // block #14
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkResult(t, res, stackitem.NewBool(true))
|
|
||||||
require.Equal(t, 1, len(res.Events)) // transfer
|
|
||||||
tokenID, err := res.Events[0].Item.Value().([]stackitem.Item)[3].TryBytes()
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Logf("NNS token #1 ID (hex): %s", hex.EncodeToString(tokenID))
|
|
||||||
|
|
||||||
// Block #15: set A record type with priv0 owner via NNS.
|
|
||||||
res, err = invokeContractMethodGeneric(bc, -1, nsHash,
|
|
||||||
"setRecord", acc0, "neo.com", int64(nns.A), "1.2.3.4") // block #15
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkResult(t, res, stackitem.Null{})
|
|
||||||
|
|
||||||
// Block #16: invoke `test_contract.go`: put new value with the same key to check `getstate` RPC call
|
|
||||||
script.Reset()
|
|
||||||
emit.AppCall(script.BinWriter, cHash, "putValue", callflag.All, "testkey", "newtestvalue")
|
|
||||||
// Invoke `test_contract.go`: put values to check `findstates` RPC call
|
|
||||||
emit.AppCall(script.BinWriter, cHash, "putValue", callflag.All, "aa", "v1")
|
|
||||||
emit.AppCall(script.BinWriter, cHash, "putValue", callflag.All, "aa10", "v2")
|
|
||||||
emit.AppCall(script.BinWriter, cHash, "putValue", callflag.All, "aa50", "v3")
|
|
||||||
txInv = transaction.New(script.Bytes(), 1*native.GASFactor)
|
|
||||||
txInv.Nonce = getNextNonce()
|
|
||||||
txInv.ValidUntilBlock = validUntilBlock
|
|
||||||
txInv.Signers = []transaction.Signer{{Account: priv0ScriptHash}}
|
|
||||||
require.NoError(t, addNetworkFee(bc, txInv, acc0))
|
|
||||||
require.NoError(t, acc0.SignTx(testchain.Network(), txInv))
|
|
||||||
b = bc.newBlock(txInv)
|
|
||||||
require.NoError(t, bc.AddBlock(b)) // block #16
|
|
||||||
checkTxHalt(t, bc, txInv.Hash())
|
|
||||||
|
|
||||||
// Block #17: deploy NeoFS Object contract (NEP11-Divisible).
|
|
||||||
nfsPath := examplesPrefix + "nft-d/"
|
|
||||||
nfsConfigPath := nfsPath + "nft.yml"
|
|
||||||
_, _, nfsHash := deployContractFromPriv0(t, nfsPath, nfsPath, &nfsConfigPath, 5) // block #17
|
|
||||||
|
|
||||||
// Block #18: mint 1.00 NFSO token by transferring 10 GAS to NFSO contract.
|
|
||||||
containerID := util.Uint256{1, 2, 3}
|
|
||||||
objectID := util.Uint256{4, 5, 6}
|
|
||||||
txGas0toNFS := newNEP17Transfer(gasHash, priv0ScriptHash, nfsHash, 10_0000_0000, containerID.BytesBE(), objectID.BytesBE())
|
|
||||||
txGas0toNFS.SystemFee += 4000_0000
|
|
||||||
txGas0toNFS.Nonce = getNextNonce()
|
|
||||||
txGas0toNFS.ValidUntilBlock = validUntilBlock
|
|
||||||
txGas0toNFS.Signers = []transaction.Signer{
|
|
||||||
{
|
|
||||||
Account: priv0ScriptHash,
|
|
||||||
Scopes: transaction.CalledByEntry,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
require.NoError(t, addNetworkFee(bc, txGas0toNFS, acc0))
|
|
||||||
require.NoError(t, acc0.SignTx(testchain.Network(), txGas0toNFS))
|
|
||||||
b = bc.newBlock(txGas0toNFS)
|
|
||||||
require.NoError(t, bc.AddBlock(b)) // block #18
|
|
||||||
checkTxHalt(t, bc, txGas0toNFS.Hash())
|
|
||||||
aer, _ := bc.GetAppExecResults(txGas0toNFS.Hash(), trigger.Application)
|
|
||||||
require.Equal(t, 2, len(aer[0].Events)) // GAS transfer + NFSO transfer
|
|
||||||
tokenID, err = aer[0].Events[1].Item.Value().([]stackitem.Item)[3].TryBytes()
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Logf("NFSO token #1 ID (hex): %s", hex.EncodeToString(tokenID))
|
|
||||||
|
|
||||||
// Block #19: transfer 0.25 NFSO from priv0 to priv1.
|
|
||||||
script.Reset()
|
|
||||||
emit.AppCall(script.BinWriter, nfsHash, "transfer", callflag.All, priv0ScriptHash, priv1ScriptHash, 25, tokenID, nil)
|
|
||||||
emit.Opcodes(script.BinWriter, opcode.ASSERT)
|
|
||||||
require.NoError(t, script.Err)
|
|
||||||
txNFS0to1 := transaction.New(script.Bytes(), 1*native.GASFactor)
|
|
||||||
txNFS0to1.Nonce = getNextNonce()
|
|
||||||
txNFS0to1.ValidUntilBlock = validUntilBlock
|
|
||||||
txNFS0to1.Signers = []transaction.Signer{{Account: priv0ScriptHash, Scopes: transaction.CalledByEntry}}
|
|
||||||
require.NoError(t, addNetworkFee(bc, txNFS0to1, acc0))
|
|
||||||
require.NoError(t, acc0.SignTx(testchain.Network(), txNFS0to1))
|
|
||||||
b = bc.newBlock(txNFS0to1)
|
|
||||||
require.NoError(t, bc.AddBlock(b)) // block #19
|
|
||||||
checkTxHalt(t, bc, txNFS0to1.Hash())
|
|
||||||
|
|
||||||
// Block #20: transfer 1000 GAS to priv1.
|
|
||||||
txMoveGas, err = testchain.NewTransferFromOwner(bc, gasHash, priv1ScriptHash, int64(fixedn.Fixed8FromInt64(1000)),
|
|
||||||
getNextNonce(), validUntilBlock)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, bc.AddBlock(bc.newBlock(txMoveGas)))
|
|
||||||
checkTxHalt(t, bc, txMoveGas.Hash()) // block #20
|
|
||||||
|
|
||||||
// Block #21: transfer 0.05 NFSO from priv1 back to priv0.
|
|
||||||
script.Reset()
|
|
||||||
emit.AppCall(script.BinWriter, nfsHash, "transfer", callflag.All, priv1ScriptHash, priv0.GetScriptHash(), 5, tokenID, nil)
|
|
||||||
emit.Opcodes(script.BinWriter, opcode.ASSERT)
|
|
||||||
require.NoError(t, script.Err)
|
|
||||||
txNFS1to0 := transaction.New(script.Bytes(), 1*native.GASFactor)
|
|
||||||
txNFS1to0.Nonce = getNextNonce()
|
|
||||||
txNFS1to0.ValidUntilBlock = validUntilBlock
|
|
||||||
txNFS1to0.Signers = []transaction.Signer{{Account: priv1ScriptHash, Scopes: transaction.CalledByEntry}}
|
|
||||||
require.NoError(t, addNetworkFee(bc, txNFS1to0, acc0))
|
|
||||||
require.NoError(t, acc1.SignTx(testchain.Network(), txNFS1to0))
|
|
||||||
b = bc.newBlock(txNFS1to0)
|
|
||||||
require.NoError(t, bc.AddBlock(b)) // block #21
|
|
||||||
checkTxHalt(t, bc, txNFS1to0.Hash())
|
|
||||||
|
|
||||||
// Compile contract to test `invokescript` RPC call
|
|
||||||
invokePath := filepath.Join(prefix, "invoke", "invokescript_contract.go")
|
|
||||||
invokeCfg := filepath.Join(prefix, "invoke", "invoke.yml")
|
|
||||||
_, _ = newDeployTx(t, bc, priv0ScriptHash, invokePath, "ContractForInvokescriptTest", &invokeCfg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newNEP17Transfer(sc, from, to util.Uint160, amount int64, additionalArgs ...interface{}) *transaction.Transaction {
|
func newNEP17Transfer(sc, from, to util.Uint160, amount int64, additionalArgs ...interface{}) *transaction.Transaction {
|
||||||
return newNEP17TransferWithAssert(sc, from, to, amount, true, additionalArgs...)
|
return newNEP17TransferWithAssert(sc, from, to, amount, true, additionalArgs...)
|
||||||
}
|
}
|
||||||
|
@ -612,13 +209,6 @@ func newNEP17TransferWithAssert(sc, from, to util.Uint160, amount int64, needAss
|
||||||
return transaction.New(script, 11000000)
|
return transaction.New(script, 11000000)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDeployTx(t *testing.T, bc *Blockchain, sender util.Uint160, name, ctrName string, cfgName *string) (*transaction.Transaction, util.Uint160) {
|
|
||||||
tx, h, avm, err := testchain.NewDeployTx(bc, name, sender, nil, cfgName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Logf("contract (%s): \n\tHash: %s\n\tAVM: %s", name, h.StringLE(), base64.StdEncoding.EncodeToString(avm))
|
|
||||||
return tx, h
|
|
||||||
}
|
|
||||||
|
|
||||||
func addSigners(sender util.Uint160, txs ...*transaction.Transaction) {
|
func addSigners(sender util.Uint160, txs ...*transaction.Transaction) {
|
||||||
for _, tx := range txs {
|
for _, tx := range txs {
|
||||||
tx.Signers = []transaction.Signer{{
|
tx.Signers = []transaction.Signer{{
|
||||||
|
@ -630,23 +220,6 @@ func addSigners(sender util.Uint160, txs ...*transaction.Transaction) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func addNetworkFee(bc *Blockchain, tx *transaction.Transaction, sender *wallet.Account) error {
|
|
||||||
size := io.GetVarSize(tx)
|
|
||||||
netFee, sizeDelta := fee.Calculate(bc.GetBaseExecFee(), sender.Contract.Script)
|
|
||||||
tx.NetworkFee += netFee
|
|
||||||
size += sizeDelta
|
|
||||||
for _, cosigner := range tx.Signers {
|
|
||||||
contract := bc.GetContractState(cosigner.Account)
|
|
||||||
if contract != nil {
|
|
||||||
netFee, sizeDelta = fee.Calculate(bc.GetBaseExecFee(), contract.NEF.Script)
|
|
||||||
tx.NetworkFee += netFee
|
|
||||||
size += sizeDelta
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tx.NetworkFee += int64(size) * bc.FeePerByte()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signer can be either bool or *wallet.Account.
|
// Signer can be either bool or *wallet.Account.
|
||||||
// In the first case `true` means sign by committee, `false` means sign by validators.
|
// In the first case `true` means sign by committee, `false` means sign by validators.
|
||||||
func prepareContractMethodInvokeGeneric(chain *Blockchain, sysfee int64,
|
func prepareContractMethodInvokeGeneric(chain *Blockchain, sysfee int64,
|
||||||
|
|
|
@ -1,30 +1,33 @@
|
||||||
package core
|
package core_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/core/native/nativenames"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/neotest"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/neotest/chain"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
type memoryStore struct {
|
|
||||||
*storage.MemoryStore
|
|
||||||
}
|
|
||||||
|
|
||||||
func (memoryStore) Close() error { return nil }
|
|
||||||
|
|
||||||
func TestManagement_GetNEP17Contracts(t *testing.T) {
|
func TestManagement_GetNEP17Contracts(t *testing.T) {
|
||||||
t.Run("empty chain", func(t *testing.T) {
|
t.Run("empty chain", func(t *testing.T) {
|
||||||
chain := newTestChain(t)
|
bc, validators, committee := chain.NewMulti(t)
|
||||||
require.ElementsMatch(t, []util.Uint160{chain.contracts.NEO.Hash, chain.contracts.GAS.Hash}, chain.contracts.Management.GetNEP17Contracts())
|
e := neotest.NewExecutor(t, bc, validators, committee)
|
||||||
|
|
||||||
|
require.ElementsMatch(t, []util.Uint160{e.NativeHash(t, nativenames.Neo),
|
||||||
|
e.NativeHash(t, nativenames.Gas)}, bc.GetNEP17Contracts())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("test chain", func(t *testing.T) {
|
t.Run("basic chain", func(t *testing.T) {
|
||||||
chain := newTestChain(t)
|
bc, validators, committee := chain.NewMultiWithCustomConfig(t, func(c *config.ProtocolConfiguration) {
|
||||||
initBasicChain(t, chain)
|
c.P2PSigExtensions = true // `initBasicChain` requires Notary enabled
|
||||||
rublesHash, err := chain.GetContractScriptHash(1)
|
})
|
||||||
require.NoError(t, err)
|
e := neotest.NewExecutor(t, bc, validators, committee)
|
||||||
require.ElementsMatch(t, []util.Uint160{chain.contracts.NEO.Hash, chain.contracts.GAS.Hash, rublesHash}, chain.contracts.Management.GetNEP17Contracts())
|
initBasicChain(t, e)
|
||||||
|
|
||||||
|
require.ElementsMatch(t, []util.Uint160{e.NativeHash(t, nativenames.Neo),
|
||||||
|
e.NativeHash(t, nativenames.Gas), e.ContractHash(t, 1)}, bc.GetNEP17Contracts())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,11 +10,6 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func transferFundsToCommittee(t *testing.T, chain *Blockchain) {
|
|
||||||
transferTokenFromMultisigAccount(t, chain, testchain.CommitteeScriptHash(),
|
|
||||||
chain.contracts.GAS.Hash, 1000_00000000)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFeePerByte(t *testing.T) {
|
func TestFeePerByte(t *testing.T) {
|
||||||
chain := newTestChain(t)
|
chain := newTestChain(t)
|
||||||
|
|
||||||
|
|
|
@ -133,6 +133,12 @@ func TestStateRoot(t *testing.T) {
|
||||||
require.Equal(t, h, r.Witness[0].ScriptHash())
|
require.Equal(t, h, r.Witness[0].ScriptHash())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type memoryStore struct {
|
||||||
|
*storage.MemoryStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func (memoryStore) Close() error { return nil }
|
||||||
|
|
||||||
func TestStateRootInitNonZeroHeight(t *testing.T) {
|
func TestStateRootInitNonZeroHeight(t *testing.T) {
|
||||||
st := memoryStore{storage.NewMemoryStore()}
|
st := memoryStore{storage.NewMemoryStore()}
|
||||||
h, pubs, accs := newMajorityMultisigWithGAS(t, 2)
|
h, pubs, accs := newMajorityMultisigWithGAS(t, 2)
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
package core
|
package core_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/config"
|
"github.com/nspcc-dev/neo-go/pkg/config"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
"github.com/nspcc-dev/neo-go/pkg/core/block"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/mpt"
|
"github.com/nspcc-dev/neo-go/pkg/core/mpt"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
"github.com/nspcc-dev/neo-go/pkg/core/storage"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/neotest"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/neotest/chain"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util/slice"
|
"github.com/nspcc-dev/neo-go/pkg/util/slice"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -18,40 +19,41 @@ func TestStateSyncModule_Init(t *testing.T) {
|
||||||
stateSyncInterval = 2
|
stateSyncInterval = 2
|
||||||
maxTraceable uint32 = 3
|
maxTraceable uint32 = 3
|
||||||
)
|
)
|
||||||
spoutCfg := func(c *config.Config) {
|
spoutCfg := func(c *config.ProtocolConfiguration) {
|
||||||
c.ProtocolConfiguration.StateRootInHeader = true
|
c.StateRootInHeader = true
|
||||||
c.ProtocolConfiguration.P2PStateExchangeExtensions = true
|
c.P2PStateExchangeExtensions = true
|
||||||
c.ProtocolConfiguration.StateSyncInterval = stateSyncInterval
|
c.StateSyncInterval = stateSyncInterval
|
||||||
c.ProtocolConfiguration.MaxTraceableBlocks = maxTraceable
|
c.MaxTraceableBlocks = maxTraceable
|
||||||
}
|
}
|
||||||
bcSpout := newTestChainWithCustomCfg(t, spoutCfg)
|
bcSpout, validators, committee := chain.NewMultiWithCustomConfig(t, spoutCfg)
|
||||||
|
e := neotest.NewExecutor(t, bcSpout, validators, committee)
|
||||||
for i := 0; i <= 2*stateSyncInterval+int(maxTraceable)+1; i++ {
|
for i := 0; i <= 2*stateSyncInterval+int(maxTraceable)+1; i++ {
|
||||||
require.NoError(t, bcSpout.AddBlock(bcSpout.newBlock()))
|
e.AddNewBlock(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
boltCfg := func(c *config.Config) {
|
boltCfg := func(c *config.ProtocolConfiguration) {
|
||||||
spoutCfg(c)
|
spoutCfg(c)
|
||||||
c.ProtocolConfiguration.KeepOnlyLatestState = true
|
c.KeepOnlyLatestState = true
|
||||||
c.ProtocolConfiguration.RemoveUntraceableBlocks = true
|
c.RemoveUntraceableBlocks = true
|
||||||
}
|
}
|
||||||
t.Run("error: module disabled by config", func(t *testing.T) {
|
t.Run("error: module disabled by config", func(t *testing.T) {
|
||||||
bcBolt := newTestChainWithCustomCfg(t, func(c *config.Config) {
|
bcBolt, _, _ := chain.NewMultiWithCustomConfig(t, func(c *config.ProtocolConfiguration) {
|
||||||
boltCfg(c)
|
boltCfg(c)
|
||||||
c.ProtocolConfiguration.RemoveUntraceableBlocks = false
|
c.RemoveUntraceableBlocks = false
|
||||||
})
|
})
|
||||||
module := bcBolt.GetStateSyncModule()
|
module := bcBolt.GetStateSyncModule()
|
||||||
require.Error(t, module.Init(bcSpout.BlockHeight())) // module inactive (non-archival node)
|
require.Error(t, module.Init(bcSpout.BlockHeight())) // module inactive (non-archival node)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("inactive: spout chain is too low to start state sync process", func(t *testing.T) {
|
t.Run("inactive: spout chain is too low to start state sync process", func(t *testing.T) {
|
||||||
bcBolt := newTestChainWithCustomCfg(t, boltCfg)
|
bcBolt, _, _ := chain.NewMultiWithCustomConfig(t, boltCfg)
|
||||||
module := bcBolt.GetStateSyncModule()
|
module := bcBolt.GetStateSyncModule()
|
||||||
require.NoError(t, module.Init(uint32(2*stateSyncInterval-1)))
|
require.NoError(t, module.Init(uint32(2*stateSyncInterval-1)))
|
||||||
require.False(t, module.IsActive())
|
require.False(t, module.IsActive())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("inactive: bolt chain height is close enough to spout chain height", func(t *testing.T) {
|
t.Run("inactive: bolt chain height is close enough to spout chain height", func(t *testing.T) {
|
||||||
bcBolt := newTestChainWithCustomCfg(t, boltCfg)
|
bcBolt, _, _ := chain.NewMultiWithCustomConfig(t, boltCfg)
|
||||||
for i := 1; i < int(bcSpout.BlockHeight())-stateSyncInterval; i++ {
|
for i := 1; i < int(bcSpout.BlockHeight())-stateSyncInterval; i++ {
|
||||||
b, err := bcSpout.GetBlock(bcSpout.GetHeaderHash(i))
|
b, err := bcSpout.GetBlock(bcSpout.GetHeaderHash(i))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -63,15 +65,16 @@ func TestStateSyncModule_Init(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("error: bolt chain is too low to start state sync process", func(t *testing.T) {
|
t.Run("error: bolt chain is too low to start state sync process", func(t *testing.T) {
|
||||||
bcBolt := newTestChainWithCustomCfg(t, boltCfg)
|
bcBolt, validatorsBolt, committeeBolt := chain.NewMultiWithCustomConfig(t, boltCfg)
|
||||||
require.NoError(t, bcBolt.AddBlock(bcBolt.newBlock()))
|
eBolt := neotest.NewExecutor(t, bcBolt, validatorsBolt, committeeBolt)
|
||||||
|
eBolt.AddNewBlock(t)
|
||||||
|
|
||||||
module := bcBolt.GetStateSyncModule()
|
module := bcBolt.GetStateSyncModule()
|
||||||
require.Error(t, module.Init(uint32(3*stateSyncInterval)))
|
require.Error(t, module.Init(uint32(3*stateSyncInterval)))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("initialized: no previous state sync point", func(t *testing.T) {
|
t.Run("initialized: no previous state sync point", func(t *testing.T) {
|
||||||
bcBolt := newTestChainWithCustomCfg(t, boltCfg)
|
bcBolt, _, _ := chain.NewMultiWithCustomConfig(t, boltCfg)
|
||||||
|
|
||||||
module := bcBolt.GetStateSyncModule()
|
module := bcBolt.GetStateSyncModule()
|
||||||
require.NoError(t, module.Init(bcSpout.BlockHeight()))
|
require.NoError(t, module.Init(bcSpout.BlockHeight()))
|
||||||
|
@ -82,7 +85,7 @@ func TestStateSyncModule_Init(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("error: outdated state sync point in the storage", func(t *testing.T) {
|
t.Run("error: outdated state sync point in the storage", func(t *testing.T) {
|
||||||
bcBolt := newTestChainWithCustomCfg(t, boltCfg)
|
bcBolt, _, _ := chain.NewMultiWithCustomConfig(t, boltCfg)
|
||||||
module := bcBolt.GetStateSyncModule()
|
module := bcBolt.GetStateSyncModule()
|
||||||
require.NoError(t, module.Init(bcSpout.BlockHeight()))
|
require.NoError(t, module.Init(bcSpout.BlockHeight()))
|
||||||
|
|
||||||
|
@ -91,7 +94,7 @@ func TestStateSyncModule_Init(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("initialized: valid previous state sync point in the storage", func(t *testing.T) {
|
t.Run("initialized: valid previous state sync point in the storage", func(t *testing.T) {
|
||||||
bcBolt := newTestChainWithCustomCfg(t, boltCfg)
|
bcBolt, _, _ := chain.NewMultiWithCustomConfig(t, boltCfg)
|
||||||
module := bcBolt.GetStateSyncModule()
|
module := bcBolt.GetStateSyncModule()
|
||||||
require.NoError(t, module.Init(bcSpout.BlockHeight()))
|
require.NoError(t, module.Init(bcSpout.BlockHeight()))
|
||||||
|
|
||||||
|
@ -104,7 +107,8 @@ func TestStateSyncModule_Init(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("initialization from headers/blocks/mpt synced stages", func(t *testing.T) {
|
t.Run("initialization from headers/blocks/mpt synced stages", func(t *testing.T) {
|
||||||
bcBolt := newTestChainWithCustomCfg(t, boltCfg)
|
bcBolt, validatorsBolt, committeeBolt := chain.NewMultiWithCustomConfig(t, boltCfg)
|
||||||
|
eBolt := neotest.NewExecutor(t, bcBolt, validatorsBolt, committeeBolt)
|
||||||
module := bcBolt.GetStateSyncModule()
|
module := bcBolt.GetStateSyncModule()
|
||||||
require.NoError(t, module.Init(bcSpout.BlockHeight()))
|
require.NoError(t, module.Init(bcSpout.BlockHeight()))
|
||||||
|
|
||||||
|
@ -263,7 +267,7 @@ func TestStateSyncModule_Init(t *testing.T) {
|
||||||
|
|
||||||
// add one more block to the restored chain and start new module: the module should recognise state sync is completed
|
// add one more block to the restored chain and start new module: the module should recognise state sync is completed
|
||||||
// and regular blocks processing was started
|
// and regular blocks processing was started
|
||||||
require.NoError(t, bcBolt.AddBlock(bcBolt.newBlock()))
|
eBolt.AddNewBlock(t)
|
||||||
module = bcBolt.GetStateSyncModule()
|
module = bcBolt.GetStateSyncModule()
|
||||||
require.NoError(t, module.Init(bcSpout.BlockHeight()))
|
require.NoError(t, module.Init(bcSpout.BlockHeight()))
|
||||||
require.False(t, module.IsActive())
|
require.False(t, module.IsActive())
|
||||||
|
@ -282,27 +286,31 @@ func TestStateSyncModule_RestoreBasicChain(t *testing.T) {
|
||||||
maxTraceable uint32 = 6
|
maxTraceable uint32 = 6
|
||||||
stateSyncPoint = 20
|
stateSyncPoint = 20
|
||||||
)
|
)
|
||||||
spoutCfg := func(c *config.Config) {
|
spoutCfg := func(c *config.ProtocolConfiguration) {
|
||||||
c.ProtocolConfiguration.StateRootInHeader = true
|
c.StateRootInHeader = true
|
||||||
c.ProtocolConfiguration.P2PStateExchangeExtensions = true
|
c.P2PStateExchangeExtensions = true
|
||||||
c.ProtocolConfiguration.StateSyncInterval = stateSyncInterval
|
c.StateSyncInterval = stateSyncInterval
|
||||||
c.ProtocolConfiguration.MaxTraceableBlocks = maxTraceable
|
c.MaxTraceableBlocks = maxTraceable
|
||||||
|
c.P2PSigExtensions = true // `initBasicChain` assumes Notary is enabled.
|
||||||
}
|
}
|
||||||
bcSpout := newTestChainWithCustomCfg(t, spoutCfg)
|
bcSpoutStore := storage.NewMemoryStore()
|
||||||
initBasicChain(t, bcSpout)
|
bcSpout, validators, committee := chain.NewMultiWithCustomConfigAndStore(t, spoutCfg, bcSpoutStore, false)
|
||||||
|
go bcSpout.Run() // Will close it manually at the end.
|
||||||
|
e := neotest.NewExecutor(t, bcSpout, validators, committee)
|
||||||
|
initBasicChain(t, e)
|
||||||
|
|
||||||
// make spout chain higher that latest state sync point (add several blocks up to stateSyncPoint+2)
|
// make spout chain higher that latest state sync point (add several blocks up to stateSyncPoint+2)
|
||||||
require.NoError(t, bcSpout.AddBlock(bcSpout.newBlock()))
|
e.AddNewBlock(t)
|
||||||
require.Equal(t, stateSyncPoint+2, int(bcSpout.BlockHeight()))
|
require.Equal(t, stateSyncPoint+2, int(bcSpout.BlockHeight()))
|
||||||
|
|
||||||
boltCfg := func(c *config.Config) {
|
boltCfg := func(c *config.ProtocolConfiguration) {
|
||||||
spoutCfg(c)
|
spoutCfg(c)
|
||||||
c.ProtocolConfiguration.KeepOnlyLatestState = true
|
c.KeepOnlyLatestState = true
|
||||||
c.ProtocolConfiguration.RemoveUntraceableBlocks = true
|
c.RemoveUntraceableBlocks = true
|
||||||
}
|
}
|
||||||
bcBoltStore := memoryStore{storage.NewMemoryStore()}
|
bcBoltStore := storage.NewMemoryStore()
|
||||||
bcBolt := initTestChain(t, bcBoltStore, boltCfg)
|
bcBolt, _, _ := chain.NewMultiWithCustomConfigAndStore(t, boltCfg, bcBoltStore, false)
|
||||||
go bcBolt.Run()
|
go bcBolt.Run() // Will close it manually at the end.
|
||||||
module := bcBolt.GetStateSyncModule()
|
module := bcBolt.GetStateSyncModule()
|
||||||
|
|
||||||
t.Run("error: add headers before initialisation", func(t *testing.T) {
|
t.Run("error: add headers before initialisation", func(t *testing.T) {
|
||||||
|
@ -421,9 +429,9 @@ func TestStateSyncModule_RestoreBasicChain(t *testing.T) {
|
||||||
require.Equal(t, bcSpout.BlockHeight(), bcBolt.BlockHeight())
|
require.Equal(t, bcSpout.BlockHeight(), bcBolt.BlockHeight())
|
||||||
|
|
||||||
// compare storage states
|
// compare storage states
|
||||||
fetchStorage := func(bc *Blockchain) []storage.KeyValue {
|
fetchStorage := func(ps storage.Store, storagePrefix byte) []storage.KeyValue {
|
||||||
var kv []storage.KeyValue
|
var kv []storage.KeyValue
|
||||||
bc.dao.Store.Seek(storage.SeekRange{Prefix: []byte{byte(bc.dao.Version.StoragePrefix)}}, func(k, v []byte) bool {
|
ps.Seek(storage.SeekRange{Prefix: []byte{storagePrefix}}, func(k, v []byte) bool {
|
||||||
key := slice.Copy(k)
|
key := slice.Copy(k)
|
||||||
value := slice.Copy(v)
|
value := slice.Copy(v)
|
||||||
if key[0] == byte(storage.STTempStorage) {
|
if key[0] == byte(storage.STTempStorage) {
|
||||||
|
@ -437,25 +445,19 @@ func TestStateSyncModule_RestoreBasicChain(t *testing.T) {
|
||||||
})
|
})
|
||||||
return kv
|
return kv
|
||||||
}
|
}
|
||||||
expected := fetchStorage(bcSpout)
|
// Both blockchains are running, so we need to wait until recent changes will be persisted
|
||||||
actual := fetchStorage(bcBolt)
|
// to the underlying backend store. Close blockchains to ensure persist was completed.
|
||||||
|
bcSpout.Close()
|
||||||
|
bcBolt.Close()
|
||||||
|
expected := fetchStorage(bcSpoutStore, byte(storage.STStorage))
|
||||||
|
actual := fetchStorage(bcBoltStore, byte(storage.STTempStorage))
|
||||||
require.ElementsMatch(t, expected, actual)
|
require.ElementsMatch(t, expected, actual)
|
||||||
|
|
||||||
// no temp items should be left
|
// no temp items should be left
|
||||||
require.Eventually(t, func() bool {
|
|
||||||
var haveItems bool
|
var haveItems bool
|
||||||
bcBolt.dao.Store.Seek(storage.SeekRange{Prefix: []byte{byte(storage.STStorage)}}, func(_, _ []byte) bool {
|
bcBoltStore.Seek(storage.SeekRange{Prefix: []byte{byte(storage.STStorage)}}, func(_, _ []byte) bool {
|
||||||
haveItems = true
|
haveItems = true
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
return !haveItems
|
require.False(t, haveItems)
|
||||||
}, time.Second*5, time.Millisecond*100)
|
|
||||||
bcBolt.Close()
|
|
||||||
|
|
||||||
// Check restoring with new prefix.
|
|
||||||
bcBolt = initTestChain(t, bcBoltStore, boltCfg)
|
|
||||||
go bcBolt.Run()
|
|
||||||
defer bcBolt.Close()
|
|
||||||
require.Equal(t, storage.STTempStorage, bcBolt.dao.Version.StoragePrefix)
|
|
||||||
require.Equal(t, storage.STTempStorage, bcBolt.persistent.Version.StoragePrefix)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,6 +63,13 @@ func (e *Executor) NativeHash(t *testing.T, name string) util.Uint160 {
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ContractHash returns contract hash by ID.
|
||||||
|
func (e *Executor) ContractHash(t *testing.T, id int32) util.Uint160 {
|
||||||
|
h, err := e.Chain.GetContractScriptHash(id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
// NativeID returns native contract ID by name.
|
// NativeID returns native contract ID by name.
|
||||||
func (e *Executor) NativeID(t *testing.T, name string) int32 {
|
func (e *Executor) NativeID(t *testing.T, name string) int32 {
|
||||||
h := e.NativeHash(t, name)
|
h := e.NativeHash(t, name)
|
||||||
|
@ -132,7 +139,15 @@ func (e *Executor) NewAccount(t *testing.T, expectedGASBalance ...int64) Signer
|
||||||
// data is an optional argument to `_deploy`.
|
// data is an optional argument to `_deploy`.
|
||||||
// Returns hash of the deploy transaction.
|
// Returns hash of the deploy transaction.
|
||||||
func (e *Executor) DeployContract(t *testing.T, c *Contract, data interface{}) util.Uint256 {
|
func (e *Executor) DeployContract(t *testing.T, c *Contract, data interface{}) util.Uint256 {
|
||||||
tx := e.NewDeployTx(t, e.Chain, c, data)
|
return e.DeployContractBy(t, e.Validator, c, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeployContractBy compiles and deploys contract to bc using provided signer.
|
||||||
|
// It also checks that precalculated contract hash matches the actual one.
|
||||||
|
// data is an optional argument to `_deploy`.
|
||||||
|
// Returns hash of the deploy transaction.
|
||||||
|
func (e *Executor) DeployContractBy(t *testing.T, signer Signer, c *Contract, data interface{}) util.Uint256 {
|
||||||
|
tx := NewDeployTxBy(t, e.Chain, signer, c, data)
|
||||||
e.AddNewBlock(t, tx)
|
e.AddNewBlock(t, tx)
|
||||||
e.CheckHalt(t, tx.Hash())
|
e.CheckHalt(t, tx.Hash())
|
||||||
|
|
||||||
|
@ -148,8 +163,8 @@ func (e *Executor) DeployContract(t *testing.T, c *Contract, data interface{}) u
|
||||||
return tx.Hash()
|
return tx.Hash()
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeployContractCheckFAULT compiles and deploys contract to bc. It checks that deploy
|
// DeployContractCheckFAULT compiles and deploys contract to bc using validator
|
||||||
// transaction FAULTed with the specified error.
|
// account. It checks that deploy transaction FAULTed with the specified error.
|
||||||
func (e *Executor) DeployContractCheckFAULT(t *testing.T, c *Contract, data interface{}, errMessage string) {
|
func (e *Executor) DeployContractCheckFAULT(t *testing.T, c *Contract, data interface{}, errMessage string) {
|
||||||
tx := e.NewDeployTx(t, e.Chain, c, data)
|
tx := e.NewDeployTx(t, e.Chain, c, data)
|
||||||
e.AddNewBlock(t, tx)
|
e.AddNewBlock(t, tx)
|
||||||
|
@ -221,8 +236,19 @@ func (e *Executor) CheckGASBalance(t *testing.T, acc util.Uint160, expected *big
|
||||||
require.Equal(t, expected, actual, fmt.Errorf("invalid GAS balance: expected %s, got %s", expected.String(), actual.String()))
|
require.Equal(t, expected, actual, fmt.Errorf("invalid GAS balance: expected %s, got %s", expected.String(), actual.String()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EnsureGASBalance ensures that provided account owns amount of GAS that satisfies provided condition.
|
||||||
|
func (e *Executor) EnsureGASBalance(t *testing.T, acc util.Uint160, isOk func(balance *big.Int) bool) {
|
||||||
|
actual := e.Chain.GetUtilityTokenBalance(acc)
|
||||||
|
require.True(t, isOk(actual), fmt.Errorf("invalid GAS balance: got %s, condition is not satisfied", actual.String()))
|
||||||
|
}
|
||||||
|
|
||||||
// NewDeployTx returns new deployment tx for contract signed by committee.
|
// NewDeployTx returns new deployment tx for contract signed by committee.
|
||||||
func (e *Executor) NewDeployTx(t *testing.T, bc blockchainer.Blockchainer, c *Contract, data interface{}) *transaction.Transaction {
|
func (e *Executor) NewDeployTx(t *testing.T, bc blockchainer.Blockchainer, c *Contract, data interface{}) *transaction.Transaction {
|
||||||
|
return NewDeployTxBy(t, bc, e.Validator, c, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDeployTxBy returns new deployment tx for contract signed by the specified signer.
|
||||||
|
func NewDeployTxBy(t *testing.T, bc blockchainer.Blockchainer, signer Signer, c *Contract, data interface{}) *transaction.Transaction {
|
||||||
rawManifest, err := json.Marshal(c.Manifest)
|
rawManifest, err := json.Marshal(c.Manifest)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -237,11 +263,11 @@ func (e *Executor) NewDeployTx(t *testing.T, bc blockchainer.Blockchainer, c *Co
|
||||||
tx.Nonce = Nonce()
|
tx.Nonce = Nonce()
|
||||||
tx.ValidUntilBlock = bc.BlockHeight() + 1
|
tx.ValidUntilBlock = bc.BlockHeight() + 1
|
||||||
tx.Signers = []transaction.Signer{{
|
tx.Signers = []transaction.Signer{{
|
||||||
Account: e.Validator.ScriptHash(),
|
Account: signer.ScriptHash(),
|
||||||
Scopes: transaction.Global,
|
Scopes: transaction.Global,
|
||||||
}}
|
}}
|
||||||
addNetworkFee(bc, tx, e.Validator)
|
addNetworkFee(bc, tx, signer)
|
||||||
require.NoError(t, e.Validator.SignTx(netmode.UnitTestNet, tx))
|
require.NoError(t, signer.SignTx(netmode.UnitTestNet, tx))
|
||||||
return tx
|
return tx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,15 @@ type ContractInvoker struct {
|
||||||
Signers []Signer
|
Signers []Signer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewInvoker creates new ContractInvoker for contract with hash h and specified signers.
|
||||||
|
func (e *Executor) NewInvoker(h util.Uint160, signers ...Signer) *ContractInvoker {
|
||||||
|
return &ContractInvoker{
|
||||||
|
Executor: e,
|
||||||
|
Hash: h,
|
||||||
|
Signers: signers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// CommitteeInvoker creates new ContractInvoker for contract with hash h and committee multisignature signer.
|
// CommitteeInvoker creates new ContractInvoker for contract with hash h and committee multisignature signer.
|
||||||
func (e *Executor) CommitteeInvoker(h util.Uint160) *ContractInvoker {
|
func (e *Executor) CommitteeInvoker(h util.Uint160) *ContractInvoker {
|
||||||
return &ContractInvoker{
|
return &ContractInvoker{
|
||||||
|
|
2
pkg/rpc/server/testdata/verify/verification_contract.yml
vendored
Normal file
2
pkg/rpc/server/testdata/verify/verification_contract.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
name: "Verify"
|
||||||
|
sourceurl: https://github.com/nspcc-dev/neo-go/
|
2
pkg/rpc/server/testdata/verify_args/verification_with_args_contract.yml
vendored
Normal file
2
pkg/rpc/server/testdata/verify_args/verification_with_args_contract.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
name: "Verify with args"
|
||||||
|
sourceurl: https://github.com/nspcc-dev/neo-go/
|
Loading…
Reference in a new issue