mirror of
https://github.com/nspcc-dev/neo-go.git
synced 2024-11-25 13:47:19 +00:00
*: improve for loop syntax
Mostly it's about Go 1.22+ syntax with ranging over integers, but it also prefers ranging over slices where possible (it makes code a little better to read). Notice that we have a number of dangerous loops where slices are mutated during loop execution, many of these can't be converted since we need proper length evalutation at every iteration. Signed-off-by: Roman Khimov <roman@nspcc.ru>
This commit is contained in:
parent
133cd1dcf8
commit
1b83dc2476
113 changed files with 267 additions and 267 deletions
|
@ -81,7 +81,7 @@ func TestNEP17Balance(t *testing.T) {
|
||||||
|
|
||||||
e.CheckNextLine(t, "^Account "+testcli.TestWalletMultiAccount1)
|
e.CheckNextLine(t, "^Account "+testcli.TestWalletMultiAccount1)
|
||||||
// The order of assets is undefined.
|
// The order of assets is undefined.
|
||||||
for i := 0; i < 2; i++ {
|
for range 2 {
|
||||||
line := e.GetNextLine(t)
|
line := e.GetNextLine(t)
|
||||||
if strings.Contains(line, "GAS") {
|
if strings.Contains(line, "GAS") {
|
||||||
e.CheckLine(t, line, "^\\s*GAS:\\s+GasToken \\("+e.Chain.UtilityTokenHash().StringLE()+"\\)")
|
e.CheckLine(t, line, "^\\s*GAS:\\s+GasToken \\("+e.Chain.UtilityTokenHash().StringLE()+"\\)")
|
||||||
|
|
|
@ -362,7 +362,7 @@ func getMatchingTokenRPC(ctx *cli.Context, c *rpcclient.Client, addr util.Uint16
|
||||||
func getMatchingTokenAux(ctx *cli.Context, get func(i int) *wallet.Token, n int, name string, standard string) (*wallet.Token, error) {
|
func getMatchingTokenAux(ctx *cli.Context, get func(i int) *wallet.Token, n int, name string, standard string) (*wallet.Token, error) {
|
||||||
var token *wallet.Token
|
var token *wallet.Token
|
||||||
var count int
|
var count int
|
||||||
for i := 0; i < n; i++ {
|
for i := range n {
|
||||||
t := get(i)
|
t := get(i)
|
||||||
if t != nil && (t.Hash.StringLE() == name || t.Address() == name || t.Symbol == name || t.Name == name) && t.Standard == standard {
|
if t != nil && (t.Hash.StringLE() == name || t.Address() == name || t.Symbol == name || t.Name == name) && t.Standard == standard {
|
||||||
if count == 1 {
|
if count == 1 {
|
||||||
|
@ -540,7 +540,7 @@ func multiTransferNEP17(ctx *cli.Context) error {
|
||||||
recipients []transferTarget
|
recipients []transferTarget
|
||||||
cosignersSepPos = ctx.NArg() // `--` position.
|
cosignersSepPos = ctx.NArg() // `--` position.
|
||||||
)
|
)
|
||||||
for i := 0; i < ctx.NArg(); i++ {
|
for i := range ctx.NArg() {
|
||||||
arg := ctx.Args().Get(i)
|
arg := ctx.Args().Get(i)
|
||||||
if arg == cmdargs.CosignersSeparator {
|
if arg == cmdargs.CosignersSeparator {
|
||||||
cosignersSepPos = i
|
cosignersSepPos = i
|
||||||
|
@ -561,7 +561,7 @@ func multiTransferNEP17(ctx *cli.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
cache := make(map[string]*wallet.Token)
|
cache := make(map[string]*wallet.Token)
|
||||||
for i := 0; i < cosignersSepPos; i++ {
|
for i := range cosignersSepPos {
|
||||||
arg := ctx.Args().Get(i)
|
arg := ctx.Args().Get(i)
|
||||||
ss := strings.SplitN(arg, ":", 3)
|
ss := strings.SplitN(arg, ":", 3)
|
||||||
if len(ss) != 3 {
|
if len(ss) != 3 {
|
||||||
|
|
|
@ -1062,7 +1062,7 @@ func TestWalletDumpKeys(t *testing.T) {
|
||||||
e.CheckNextLine(t, pubRegex)
|
e.CheckNextLine(t, pubRegex)
|
||||||
e.CheckNextLine(t, "^\\s*$")
|
e.CheckNextLine(t, "^\\s*$")
|
||||||
e.CheckNextLine(t, "NVTiAjNgagDkTr5HTzDmQP9kPwPHN5BgVq")
|
e.CheckNextLine(t, "NVTiAjNgagDkTr5HTzDmQP9kPwPHN5BgVq")
|
||||||
for i := 0; i < 4; i++ {
|
for range 4 {
|
||||||
e.CheckNextLine(t, pubRegex)
|
e.CheckNextLine(t, pubRegex)
|
||||||
}
|
}
|
||||||
e.CheckNextLine(t, "^\\s*$")
|
e.CheckNextLine(t, "^\\s*$")
|
||||||
|
@ -1085,7 +1085,7 @@ func TestWalletDumpKeys(t *testing.T) {
|
||||||
cmd := append(cmd, "-a", "NVTiAjNgagDkTr5HTzDmQP9kPwPHN5BgVq")
|
cmd := append(cmd, "-a", "NVTiAjNgagDkTr5HTzDmQP9kPwPHN5BgVq")
|
||||||
e.Run(t, cmd...)
|
e.Run(t, cmd...)
|
||||||
e.CheckNextLine(t, "3 out of 4 multisig contract")
|
e.CheckNextLine(t, "3 out of 4 multisig contract")
|
||||||
for i := 0; i < 4; i++ {
|
for range 4 {
|
||||||
e.CheckNextLine(t, pubRegex)
|
e.CheckNextLine(t, pubRegex)
|
||||||
}
|
}
|
||||||
e.CheckEOF(t)
|
e.CheckEOF(t)
|
||||||
|
|
|
@ -158,7 +158,7 @@ func TestRegisterAndRenew(t *testing.T) {
|
||||||
c.InvokeWithFeeFail(t, "GAS limit exceeded", defaultNameServiceSysfee, "register", "neo.org", e.CommitteeHash)
|
c.InvokeWithFeeFail(t, "GAS limit exceeded", defaultNameServiceSysfee, "register", "neo.org", e.CommitteeHash)
|
||||||
c.InvokeWithFeeFail(t, "GAS limit exceeded", defaultNameServiceDomainPrice, "register", "neo.com", e.CommitteeHash)
|
c.InvokeWithFeeFail(t, "GAS limit exceeded", defaultNameServiceDomainPrice, "register", "neo.com", e.CommitteeHash)
|
||||||
var maxLenFragment string
|
var maxLenFragment string
|
||||||
for i := 0; i < maxDomainNameFragmentLength; i++ {
|
for i := range maxDomainNameFragmentLength {
|
||||||
maxLenFragment += "q"
|
maxLenFragment += "q"
|
||||||
}
|
}
|
||||||
c.Invoke(t, true, "isAvailable", maxLenFragment+".com")
|
c.Invoke(t, true, "isAvailable", maxLenFragment+".com")
|
||||||
|
|
|
@ -315,7 +315,7 @@ func setup(t *testing.T, ccs constraint.ConstraintSystem, phase1ResponsePath str
|
||||||
// receive a []byte, deserialize it, add his contribution and send back to
|
// receive a []byte, deserialize it, add his contribution and send back to
|
||||||
// coordinator, like it is done in https://github.com/bnb-chain/zkbnb-setup
|
// coordinator, like it is done in https://github.com/bnb-chain/zkbnb-setup
|
||||||
// for BN254 elliptic curve.
|
// for BN254 elliptic curve.
|
||||||
for i := 0; i < nContributionsPhase2; i++ {
|
for i := range nContributionsPhase2 {
|
||||||
srs2.Contribute()
|
srs2.Contribute()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ var valuesPrefix = []byte{0x01}
|
||||||
func _deploy(data any, isUpdate bool) {
|
func _deploy(data any, isUpdate bool) {
|
||||||
if !isUpdate {
|
if !isUpdate {
|
||||||
ctx := storage.GetContext()
|
ctx := storage.GetContext()
|
||||||
for i := 0; i < valuesCount; i++ {
|
for i := range valuesCount {
|
||||||
key := append(valuesPrefix, byte(i))
|
key := append(valuesPrefix, byte(i))
|
||||||
storage.Put(ctx, key, i)
|
storage.Put(ctx, key, i)
|
||||||
}
|
}
|
||||||
|
|
|
@ -128,7 +128,7 @@ func CommitteeAddress() string {
|
||||||
// Sign signs data by all consensus nodes and returns invocation script.
|
// Sign signs data by all consensus nodes and returns invocation script.
|
||||||
func Sign(h hash.Hashable) []byte {
|
func Sign(h hash.Hashable) []byte {
|
||||||
buf := io.NewBufBinWriter()
|
buf := io.NewBufBinWriter()
|
||||||
for i := 0; i < 3; i++ {
|
for i := range 3 {
|
||||||
pKey := PrivateKey(i)
|
pKey := PrivateKey(i)
|
||||||
sig := pKey.SignHashable(uint32(Network()), h)
|
sig := pKey.SignHashable(uint32(Network()), h)
|
||||||
if len(sig) != 64 {
|
if len(sig) != 64 {
|
||||||
|
|
|
@ -377,7 +377,7 @@ func (e *Executor) CheckTxTestInvokeOutput(t *testing.T, scriptSize int) {
|
||||||
|
|
||||||
func (e *Executor) CheckScriptDump(t *testing.T, scriptSize int) {
|
func (e *Executor) CheckScriptDump(t *testing.T, scriptSize int) {
|
||||||
e.CheckNextLine(t, `INDEX\s+`)
|
e.CheckNextLine(t, `INDEX\s+`)
|
||||||
for i := 0; i < scriptSize; i++ {
|
for range scriptSize {
|
||||||
e.CheckNextLine(t, `\d+\s+\w+`)
|
e.CheckNextLine(t, `\d+\s+\w+`)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -229,7 +229,7 @@ func isExprNil(e ast.Expr) bool {
|
||||||
// indexOfStruct returns the index of the given field inside that struct.
|
// indexOfStruct returns the index of the given field inside that struct.
|
||||||
// If the struct does not contain that field, it will return -1.
|
// If the struct does not contain that field, it will return -1.
|
||||||
func indexOfStruct(strct *types.Struct, fldName string) int {
|
func indexOfStruct(strct *types.Struct, fldName string) int {
|
||||||
for i := 0; i < strct.NumFields(); i++ {
|
for i := range strct.NumFields() {
|
||||||
if strct.Field(i).Name() == fldName {
|
if strct.Field(i).Name() == fldName {
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
|
|
|
@ -383,7 +383,7 @@ func (c *codegen) isVerifyFunc(decl *ast.FuncDecl) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *codegen) clearSlots(n int) {
|
func (c *codegen) clearSlots(n int) {
|
||||||
for i := 0; i < n; i++ {
|
for i := range n {
|
||||||
emit.Opcodes(c.prog.BinWriter, opcode.PUSHNULL)
|
emit.Opcodes(c.prog.BinWriter, opcode.PUSHNULL)
|
||||||
c.emitStoreByIndex(varLocal, i)
|
c.emitStoreByIndex(varLocal, i)
|
||||||
}
|
}
|
||||||
|
@ -680,7 +680,7 @@ func (c *codegen) Visit(node ast.Node) ast.Visitor {
|
||||||
ast.Walk(c, n.Rhs[0])
|
ast.Walk(c, n.Rhs[0])
|
||||||
c.emitToken(n.Tok, c.typeOf(n.Rhs[0]))
|
c.emitToken(n.Tok, c.typeOf(n.Rhs[0]))
|
||||||
}
|
}
|
||||||
for i := 0; i < len(n.Lhs); i++ {
|
for i := range n.Lhs {
|
||||||
switch t := n.Lhs[i].(type) {
|
switch t := n.Lhs[i].(type) {
|
||||||
case *ast.Ident:
|
case *ast.Ident:
|
||||||
if n.Tok == token.DEFINE {
|
if n.Tok == token.DEFINE {
|
||||||
|
@ -1099,7 +1099,7 @@ func (c *codegen) Visit(node ast.Node) ast.Visitor {
|
||||||
f := c.typeOf(n.Fun).Underlying().(*types.Signature)
|
f := c.typeOf(n.Fun).Underlying().(*types.Signature)
|
||||||
sz = f.Results().Len()
|
sz = f.Results().Len()
|
||||||
}
|
}
|
||||||
for i := 0; i < sz; i++ {
|
for range sz {
|
||||||
emit.Opcodes(c.prog.BinWriter, opcode.DROP)
|
emit.Opcodes(c.prog.BinWriter, opcode.DROP)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1662,7 +1662,7 @@ func (c *codegen) dropStackLabel() {
|
||||||
|
|
||||||
func (c *codegen) dropItems(n int) {
|
func (c *codegen) dropItems(n int) {
|
||||||
if n < 4 {
|
if n < 4 {
|
||||||
for i := 0; i < n; i++ {
|
for range n {
|
||||||
emit.Opcodes(c.prog.BinWriter, opcode.DROP)
|
emit.Opcodes(c.prog.BinWriter, opcode.DROP)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
@ -1930,7 +1930,7 @@ func (c *codegen) convertBuiltin(expr *ast.CallExpr) {
|
||||||
opcode.INC) // x y cnt+1
|
opcode.INC) // x y cnt+1
|
||||||
emit.Jmp(c.prog.BinWriter, opcode.JMPL, start)
|
emit.Jmp(c.prog.BinWriter, opcode.JMPL, start)
|
||||||
c.setLabel(after)
|
c.setLabel(after)
|
||||||
for i := 0; i < 4; i++ { // leave x on stack
|
for range 4 { // leave x on stack
|
||||||
emit.Opcodes(c.prog.BinWriter, opcode.DROP)
|
emit.Opcodes(c.prog.BinWriter, opcode.DROP)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -2012,7 +2012,7 @@ func (c *codegen) emitConvert(typ stackitem.Type) {
|
||||||
func (c *codegen) convertByteArray(elems []ast.Expr) {
|
func (c *codegen) convertByteArray(elems []ast.Expr) {
|
||||||
buf := make([]byte, len(elems))
|
buf := make([]byte, len(elems))
|
||||||
varIndices := []int{}
|
varIndices := []int{}
|
||||||
for i := 0; i < len(elems); i++ {
|
for i := range elems {
|
||||||
t := c.typeAndValueOf(elems[i])
|
t := c.typeAndValueOf(elems[i])
|
||||||
if t.Value != nil {
|
if t.Value != nil {
|
||||||
val, _ := constant.Int64Val(t.Value)
|
val, _ := constant.Int64Val(t.Value)
|
||||||
|
@ -2508,7 +2508,7 @@ func removeNOPs(b []byte, nopOffsets []int, sequencePoints map[string][]DebugSeq
|
||||||
// 2. Convert instructions.
|
// 2. Convert instructions.
|
||||||
copyOffset := 0
|
copyOffset := 0
|
||||||
l := len(nopOffsets)
|
l := len(nopOffsets)
|
||||||
for i := 0; i < l; i++ {
|
for i := range l {
|
||||||
start := nopOffsets[i]
|
start := nopOffsets[i]
|
||||||
end := len(b)
|
end := len(b)
|
||||||
if i != l-1 {
|
if i != l-1 {
|
||||||
|
|
|
@ -883,11 +883,11 @@ func TestMultipleFuncSameName(t *testing.T) {
|
||||||
func TestConstDontUseSlots(t *testing.T) {
|
func TestConstDontUseSlots(t *testing.T) {
|
||||||
const count = 256
|
const count = 256
|
||||||
buf := bytes.NewBufferString("package foo\n")
|
buf := bytes.NewBufferString("package foo\n")
|
||||||
for i := 0; i < count; i++ {
|
for i := range count {
|
||||||
buf.WriteString(fmt.Sprintf("const n%d = 1\n", i))
|
buf.WriteString(fmt.Sprintf("const n%d = 1\n", i))
|
||||||
}
|
}
|
||||||
buf.WriteString("func Main() int { sum := 0\n")
|
buf.WriteString("func Main() int { sum := 0\n")
|
||||||
for i := 0; i < count; i++ {
|
for i := range count {
|
||||||
buf.WriteString(fmt.Sprintf("sum += n%d\n", i))
|
buf.WriteString(fmt.Sprintf("sum += n%d\n", i))
|
||||||
}
|
}
|
||||||
buf.WriteString("return sum }")
|
buf.WriteString("return sum }")
|
||||||
|
@ -899,11 +899,11 @@ func TestConstDontUseSlots(t *testing.T) {
|
||||||
func TestUnderscoreVarsDontUseSlots(t *testing.T) {
|
func TestUnderscoreVarsDontUseSlots(t *testing.T) {
|
||||||
const count = 128
|
const count = 128
|
||||||
buf := bytes.NewBufferString("package foo\n")
|
buf := bytes.NewBufferString("package foo\n")
|
||||||
for i := 0; i < count; i++ {
|
for i := range count {
|
||||||
buf.WriteString(fmt.Sprintf("var _, n%d = 1, 1\n", i))
|
buf.WriteString(fmt.Sprintf("var _, n%d = 1, 1\n", i))
|
||||||
}
|
}
|
||||||
buf.WriteString("func Main() int { sum := 0\n")
|
buf.WriteString("func Main() int { sum := 0\n")
|
||||||
for i := 0; i < count; i++ {
|
for i := range count {
|
||||||
buf.WriteString(fmt.Sprintf("sum += n%d\n", i))
|
buf.WriteString(fmt.Sprintf("sum += n%d\n", i))
|
||||||
}
|
}
|
||||||
buf.WriteString("return sum }")
|
buf.WriteString("return sum }")
|
||||||
|
|
|
@ -124,7 +124,7 @@ func (c *codegen) inlineCall(f *funcScope, n *ast.CallExpr) {
|
||||||
ast.Walk(c, f.decl.Body)
|
ast.Walk(c, f.decl.Body)
|
||||||
c.setLabel(c.inlineContext[offSz].returnLabel)
|
c.setLabel(c.inlineContext[offSz].returnLabel)
|
||||||
if c.scope.voidCalls[n] {
|
if c.scope.voidCalls[n] {
|
||||||
for i := 0; i < f.decl.Type.Results.NumFields(); i++ {
|
for range f.decl.Type.Results.NumFields() {
|
||||||
emit.Opcodes(c.prog.BinWriter, opcode.DROP)
|
emit.Opcodes(c.prog.BinWriter, opcode.DROP)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ func TestManyVariables(t *testing.T) {
|
||||||
const count = 155
|
const count = 155
|
||||||
|
|
||||||
buf := bytes.NewBufferString("package main\n")
|
buf := bytes.NewBufferString("package main\n")
|
||||||
for i := 0; i < count; i++ {
|
for i := range count {
|
||||||
buf.WriteString(fmt.Sprintf("var a%d = %d\n", i, i))
|
buf.WriteString(fmt.Sprintf("var a%d = %d\n", i, i))
|
||||||
}
|
}
|
||||||
buf.WriteString("func Main() int {\nreturn 7\n}\n")
|
buf.WriteString("func Main() int {\nreturn 7\n}\n")
|
||||||
|
|
|
@ -361,7 +361,7 @@ func runNativeTestCase(t *testing.T, b *nef.File, di *compiler.DebugInfo, ctr in
|
||||||
if t.CallFlag != md.RequiredFlags {
|
if t.CallFlag != md.RequiredFlags {
|
||||||
return fmt.Errorf("wrong flags %v", t.CallFlag)
|
return fmt.Errorf("wrong flags %v", t.CallFlag)
|
||||||
}
|
}
|
||||||
for i := 0; i < int(t.ParamCount); i++ {
|
for range t.ParamCount {
|
||||||
_ = v.Estack().Pop()
|
_ = v.Estack().Pop()
|
||||||
}
|
}
|
||||||
if v.Estack().Len() != 0 {
|
if v.Estack().Len() != 0 {
|
||||||
|
|
|
@ -151,7 +151,7 @@ func runSyscallTestCase(t *testing.T, ic *interop.Context, realName string,
|
||||||
if ic.VM.Estack().Len() < f.ParamCount {
|
if ic.VM.Estack().Len() < f.ParamCount {
|
||||||
return errors.New("not enough parameters")
|
return errors.New("not enough parameters")
|
||||||
}
|
}
|
||||||
for i := 0; i < f.ParamCount; i++ {
|
for range f.ParamCount {
|
||||||
ic.VM.Estack().Pop()
|
ic.VM.Estack().Pop()
|
||||||
}
|
}
|
||||||
if !tc.isVoid {
|
if !tc.isVoid {
|
||||||
|
@ -385,7 +385,7 @@ func TestInteropTypesComparison(t *testing.T) {
|
||||||
typeCheck := func(t *testing.T, typeName string, typeLen int) {
|
typeCheck := func(t *testing.T, typeName string, typeLen int) {
|
||||||
t.Run(typeName, func(t *testing.T) {
|
t.Run(typeName, func(t *testing.T) {
|
||||||
var ha, hb string
|
var ha, hb string
|
||||||
for i := 0; i < typeLen; i++ {
|
for i := range typeLen {
|
||||||
if i == typeLen-1 {
|
if i == typeLen-1 {
|
||||||
ha += "2"
|
ha += "2"
|
||||||
hb += "3"
|
hb += "3"
|
||||||
|
|
|
@ -195,7 +195,7 @@ func TestService_GetVerified(t *testing.T) {
|
||||||
srv := newTestService(t)
|
srv := newTestService(t)
|
||||||
srv.dbft.Start(0)
|
srv.dbft.Start(0)
|
||||||
var txs []*transaction.Transaction
|
var txs []*transaction.Transaction
|
||||||
for i := 0; i < 4; i++ {
|
for i := range 4 {
|
||||||
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 100000)
|
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 100000)
|
||||||
tx.Nonce = 123 + uint32(i)
|
tx.Nonce = 123 + uint32(i)
|
||||||
tx.ValidUntilBlock = 1
|
tx.ValidUntilBlock = 1
|
||||||
|
@ -208,7 +208,7 @@ func TestService_GetVerified(t *testing.T) {
|
||||||
hashes := []util.Uint256{txs[0].Hash(), txs[1].Hash(), txs[2].Hash()}
|
hashes := []util.Uint256{txs[0].Hash(), txs[1].Hash(), txs[2].Hash()}
|
||||||
|
|
||||||
// Everyone sends a message.
|
// Everyone sends a message.
|
||||||
for i := 0; i < 4; i++ {
|
for i := range 4 {
|
||||||
p := new(Payload)
|
p := new(Payload)
|
||||||
// One PrepareRequest and three ChangeViews.
|
// One PrepareRequest and three ChangeViews.
|
||||||
if i == 1 {
|
if i == 1 {
|
||||||
|
@ -578,7 +578,7 @@ func addSender(t *testing.T, txs ...*transaction.Transaction) {
|
||||||
func signTx(t *testing.T, bc Ledger, txs ...*transaction.Transaction) {
|
func signTx(t *testing.T, bc Ledger, txs ...*transaction.Transaction) {
|
||||||
validators := make([]*keys.PublicKey, 4)
|
validators := make([]*keys.PublicKey, 4)
|
||||||
privNetKeys := make([]*keys.PrivateKey, 4)
|
privNetKeys := make([]*keys.PrivateKey, 4)
|
||||||
for i := 0; i < 4; i++ {
|
for i := range 4 {
|
||||||
privNetKeys[i] = testchain.PrivateKey(i)
|
privNetKeys[i] = testchain.PrivateKey(i)
|
||||||
validators[i] = privNetKeys[i].PublicKey()
|
validators[i] = privNetKeys[i].PublicKey()
|
||||||
}
|
}
|
||||||
|
|
|
@ -200,7 +200,7 @@ func randomPrepareRequest(t *testing.T) *prepareRequest {
|
||||||
transactionHashes: make([]util.Uint256, txCount),
|
transactionHashes: make([]util.Uint256, txCount),
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < txCount; i++ {
|
for i := range txCount {
|
||||||
req.transactionHashes[i] = random.Uint256()
|
req.transactionHashes[i] = random.Uint256()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ func testRecoveryMessageSetters(t *testing.T, enableStateRoot bool) {
|
||||||
srv := newTestServiceWithState(t, enableStateRoot)
|
srv := newTestServiceWithState(t, enableStateRoot)
|
||||||
privs := make([]*privateKey, testchain.Size())
|
privs := make([]*privateKey, testchain.Size())
|
||||||
pubs := make([]dbft.PublicKey, testchain.Size())
|
pubs := make([]dbft.PublicKey, testchain.Size())
|
||||||
for i := 0; i < testchain.Size(); i++ {
|
for i := range testchain.Size() {
|
||||||
privs[i], pubs[i] = getTestValidator(i)
|
privs[i], pubs[i] = getTestValidator(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ func BenchmarkBlockchain_VerifyWitness(t *testing.B) {
|
||||||
tx := e.NewTx(t, []neotest.Signer{acc}, e.NativeHash(t, nativenames.Gas), "transfer", acc.ScriptHash(), acc.Script(), 1, nil)
|
tx := e.NewTx(t, []neotest.Signer{acc}, e.NativeHash(t, nativenames.Gas), "transfer", acc.ScriptHash(), acc.Script(), 1, nil)
|
||||||
|
|
||||||
t.ResetTimer()
|
t.ResetTimer()
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
_, err := bc.VerifyWitness(tx.Signers[0].Account, tx, &tx.Scripts[0], 100000000)
|
_, err := bc.VerifyWitness(tx.Signers[0].Account, tx, &tx.Scripts[0], 100000000)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -92,9 +92,9 @@ func benchmarkForEachNEP17Transfer(t *testing.B, ps storage.Store, startFromBloc
|
||||||
acc := random.Uint160()
|
acc := random.Uint160()
|
||||||
from := e.Validator.ScriptHash()
|
from := e.Validator.ScriptHash()
|
||||||
|
|
||||||
for j := 0; j < chainHeight; j++ {
|
for range chainHeight {
|
||||||
b := smartcontract.NewBuilder()
|
b := smartcontract.NewBuilder()
|
||||||
for i := 0; i < transfersPerBlock; i++ {
|
for range transfersPerBlock {
|
||||||
b.InvokeWithAssert(gasHash, "transfer", from, acc, 1, nil)
|
b.InvokeWithAssert(gasHash, "transfer", from, acc, 1, nil)
|
||||||
}
|
}
|
||||||
script, err := b.Script()
|
script, err := b.Script()
|
||||||
|
@ -119,7 +119,7 @@ func benchmarkForEachNEP17Transfer(t *testing.B, ps storage.Store, startFromBloc
|
||||||
t.ResetTimer()
|
t.ResetTimer()
|
||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
t.StartTimer()
|
t.StartTimer()
|
||||||
for i := 0; i < t.N; i++ {
|
for range t.N {
|
||||||
require.NoError(t, bc.ForEachNEP17Transfer(acc, newestTimestamp, func(t *state.NEP17Transfer) (bool, error) {
|
require.NoError(t, bc.ForEachNEP17Transfer(acc, newestTimestamp, func(t *state.NEP17Transfer) (bool, error) {
|
||||||
if t.Timestamp < oldestTimestamp {
|
if t.Timestamp < oldestTimestamp {
|
||||||
// iterating from newest to oldest, already have reached the needed height
|
// iterating from newest to oldest, already have reached the needed height
|
||||||
|
@ -165,7 +165,7 @@ func benchmarkGasPerVote(t *testing.B, ps storage.Store, nRewardRecords int, rew
|
||||||
voters := make([]*wallet.Account, sz)
|
voters := make([]*wallet.Account, sz)
|
||||||
candidates := make(keys.PublicKeys, sz)
|
candidates := make(keys.PublicKeys, sz)
|
||||||
txs := make([]*transaction.Transaction, 0, len(voters)*3)
|
txs := make([]*transaction.Transaction, 0, len(voters)*3)
|
||||||
for i := 0; i < sz; i++ {
|
for i := range sz {
|
||||||
priv, err := keys.NewPrivateKey()
|
priv, err := keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
candidates[i] = priv.PublicKey()
|
candidates[i] = priv.PublicKey()
|
||||||
|
@ -186,7 +186,7 @@ func benchmarkGasPerVote(t *testing.B, ps storage.Store, nRewardRecords int, rew
|
||||||
e.CheckHalt(t, tx.Hash())
|
e.CheckHalt(t, tx.Hash())
|
||||||
}
|
}
|
||||||
voteTxs := make([]*transaction.Transaction, 0, sz)
|
voteTxs := make([]*transaction.Transaction, 0, sz)
|
||||||
for i := 0; i < sz; i++ {
|
for i := range sz {
|
||||||
priv := voters[i].PrivateKey()
|
priv := voters[i].PrivateKey()
|
||||||
h := priv.GetScriptHash()
|
h := priv.GetScriptHash()
|
||||||
voteTx := e.NewTx(t, []neotest.Signer{neotest.NewSingleSigner(voters[i])}, neoHash, "vote", h, candidates[i].Bytes())
|
voteTx := e.NewTx(t, []neotest.Signer{neotest.NewSingleSigner(voters[i])}, neoHash, "vote", h, candidates[i].Bytes())
|
||||||
|
@ -211,7 +211,7 @@ func benchmarkGasPerVote(t *testing.B, ps storage.Store, nRewardRecords int, rew
|
||||||
t.ResetTimer()
|
t.ResetTimer()
|
||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
t.StartTimer()
|
t.StartTimer()
|
||||||
for i := 0; i < t.N; i++ {
|
for range t.N {
|
||||||
_, err := bc.CalculateClaimable(to, end)
|
_, err := bc.CalculateClaimable(to, end)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,7 +83,7 @@ func NewTrimmedFromReader(stateRootEnabled bool, br *io.BinReader) (*Block, erro
|
||||||
}
|
}
|
||||||
if lenHashes > 0 {
|
if lenHashes > 0 {
|
||||||
block.Transactions = make([]*transaction.Transaction, lenHashes)
|
block.Transactions = make([]*transaction.Transaction, lenHashes)
|
||||||
for i := 0; i < int(lenHashes); i++ {
|
for i := range lenHashes {
|
||||||
var hash util.Uint256
|
var hash util.Uint256
|
||||||
hash.DecodeBinary(br)
|
hash.DecodeBinary(br)
|
||||||
block.Transactions[i] = transaction.NewTrimmedTX(hash)
|
block.Transactions[i] = transaction.NewTrimmedTX(hash)
|
||||||
|
@ -124,7 +124,7 @@ func (b *Block) DecodeBinary(br *io.BinReader) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
txes := make([]*transaction.Transaction, contentsCount)
|
txes := make([]*transaction.Transaction, contentsCount)
|
||||||
for i := 0; i < int(contentsCount); i++ {
|
for i := range txes {
|
||||||
tx := &transaction.Transaction{}
|
tx := &transaction.Transaction{}
|
||||||
tx.DecodeBinary(br)
|
tx.DecodeBinary(br)
|
||||||
txes[i] = tx
|
txes[i] = tx
|
||||||
|
@ -140,7 +140,7 @@ func (b *Block) DecodeBinary(br *io.BinReader) {
|
||||||
func (b *Block) EncodeBinary(bw *io.BinWriter) {
|
func (b *Block) EncodeBinary(bw *io.BinWriter) {
|
||||||
b.Header.EncodeBinary(bw)
|
b.Header.EncodeBinary(bw)
|
||||||
bw.WriteVarUint(uint64(len(b.Transactions)))
|
bw.WriteVarUint(uint64(len(b.Transactions)))
|
||||||
for i := 0; i < len(b.Transactions); i++ {
|
for i := range b.Transactions {
|
||||||
b.Transactions[i].EncodeBinary(bw)
|
b.Transactions[i].EncodeBinary(bw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,7 @@ func TestTrimmedBlock(t *testing.T) {
|
||||||
|
|
||||||
assert.Equal(t, block.Script, trimmedBlock.Script)
|
assert.Equal(t, block.Script, trimmedBlock.Script)
|
||||||
assert.Equal(t, len(block.Transactions), len(trimmedBlock.Transactions))
|
assert.Equal(t, len(block.Transactions), len(trimmedBlock.Transactions))
|
||||||
for i := 0; i < len(block.Transactions); i++ {
|
for i := range block.Transactions {
|
||||||
assert.Equal(t, block.Transactions[i].Hash(), trimmedBlock.Transactions[i].Hash())
|
assert.Equal(t, block.Transactions[i].Hash(), trimmedBlock.Transactions[i].Hash())
|
||||||
assert.True(t, trimmedBlock.Transactions[i].Trimmed)
|
assert.True(t, trimmedBlock.Transactions[i].Trimmed)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1203,7 +1203,7 @@ func (bc *Blockchain) resetTransfers(cache *dao.Simple, height uint32) error {
|
||||||
oldBatchSize = v[0]
|
oldBatchSize = v[0]
|
||||||
newBatchSize byte
|
newBatchSize byte
|
||||||
)
|
)
|
||||||
for i := byte(0); i < v[0]; i++ { // From oldest to newest transfer of the batch.
|
for range v[0] { // From oldest to newest transfer of the batch.
|
||||||
var t *state.NEP17Transfer
|
var t *state.NEP17Transfer
|
||||||
if k[0] == byte(storage.STNEP11Transfers) {
|
if k[0] == byte(storage.STNEP11Transfers) {
|
||||||
tr := new(state.NEP11Transfer)
|
tr := new(state.NEP11Transfer)
|
||||||
|
|
|
@ -89,16 +89,16 @@ func TestRemoveOldTransfers(t *testing.T) {
|
||||||
acc3 := util.Uint160{3}
|
acc3 := util.Uint160{3}
|
||||||
ttl := state.TokenTransferLog{Raw: []byte{1}} // It's incorrect, but who cares.
|
ttl := state.TokenTransferLog{Raw: []byte{1}} // It's incorrect, but who cares.
|
||||||
|
|
||||||
for i := uint32(0); i < 3; i++ {
|
for i := range uint32(3) {
|
||||||
bc.dao.PutTokenTransferLog(acc1, older, i, false, &ttl)
|
bc.dao.PutTokenTransferLog(acc1, older, i, false, &ttl)
|
||||||
}
|
}
|
||||||
for i := uint32(0); i < 3; i++ {
|
for i := range uint32(3) {
|
||||||
bc.dao.PutTokenTransferLog(acc2, newer, i, false, &ttl)
|
bc.dao.PutTokenTransferLog(acc2, newer, i, false, &ttl)
|
||||||
}
|
}
|
||||||
for i := uint32(0); i < 2; i++ {
|
for i := range uint32(2) {
|
||||||
bc.dao.PutTokenTransferLog(acc3, older, i, true, &ttl)
|
bc.dao.PutTokenTransferLog(acc3, older, i, true, &ttl)
|
||||||
}
|
}
|
||||||
for i := uint32(0); i < 2; i++ {
|
for i := range uint32(2) {
|
||||||
bc.dao.PutTokenTransferLog(acc3, newer, i, true, &ttl)
|
bc.dao.PutTokenTransferLog(acc3, newer, i, true, &ttl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ func TestRemoveOldTransfers(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_ = bc.removeOldTransfers(0)
|
_ = bc.removeOldTransfers(0)
|
||||||
|
|
||||||
for i := uint32(0); i < 2; i++ {
|
for i := range uint32(2) {
|
||||||
log, err := bc.dao.GetTokenTransferLog(acc1, older, i, false)
|
log, err := bc.dao.GetTokenTransferLog(acc1, older, i, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 0, len(log.Raw))
|
require.Equal(t, 0, len(log.Raw))
|
||||||
|
@ -153,7 +153,7 @@ func TestBlockchain_InitWithIncompleteStateJump(t *testing.T) {
|
||||||
bcSpout := newTestChainWithCustomCfg(t, spountCfg)
|
bcSpout := newTestChainWithCustomCfg(t, spountCfg)
|
||||||
|
|
||||||
// Generate some content.
|
// Generate some content.
|
||||||
for i := 0; i < len(bcSpout.GetConfig().StandbyCommittee); i++ {
|
for range bcSpout.GetConfig().StandbyCommittee {
|
||||||
require.NoError(t, bcSpout.AddBlock(bcSpout.newBlock()))
|
require.NoError(t, bcSpout.AddBlock(bcSpout.newBlock()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -299,7 +299,7 @@ func TestBlockchain_InitializeNeoCache_Bug3181(t *testing.T) {
|
||||||
// Put some empty blocks to reach N-1 block height, so that newEpoch cached
|
// Put some empty blocks to reach N-1 block height, so that newEpoch cached
|
||||||
// values of native Neo contract require an update on the subsequent cache
|
// values of native Neo contract require an update on the subsequent cache
|
||||||
// initialization.
|
// initialization.
|
||||||
for i := 0; i < len(bc.GetConfig().StandbyCommittee)-1-2; i++ {
|
for range len(bc.GetConfig().StandbyCommittee) - 1 - 2 {
|
||||||
e.AddNewBlock(t)
|
e.AddNewBlock(t)
|
||||||
}
|
}
|
||||||
bc.Close() // Ensure persist is done and persistent store is properly closed.
|
bc.Close() // Ensure persist is done and persistent store is properly closed.
|
||||||
|
@ -358,12 +358,12 @@ func TestBlockchain_InitializeNeoCache_Bug3424(t *testing.T) {
|
||||||
// voters vote for candidates.
|
// voters vote for candidates.
|
||||||
voters := make([]neotest.Signer, committeeSize+1)
|
voters := make([]neotest.Signer, committeeSize+1)
|
||||||
candidates := make([]neotest.Signer, committeeSize+1)
|
candidates := make([]neotest.Signer, committeeSize+1)
|
||||||
for i := 0; i < committeeSize+1; i++ {
|
for i := range committeeSize + 1 {
|
||||||
voters[i] = e.NewAccount(t, 10_0000_0000)
|
voters[i] = e.NewAccount(t, 10_0000_0000)
|
||||||
candidates[i] = e.NewAccount(t, 2000_0000_0000) // enough for one registration
|
candidates[i] = e.NewAccount(t, 2000_0000_0000) // enough for one registration
|
||||||
}
|
}
|
||||||
txes := make([]*transaction.Transaction, 0, committeeSize*3)
|
txes := make([]*transaction.Transaction, 0, committeeSize*3)
|
||||||
for i := 0; i < committeeSize+1; i++ {
|
for i := range committeeSize + 1 {
|
||||||
transferTx := neoValidatorsInvoker.PrepareInvoke(t, "transfer", e.Validator.ScriptHash(), voters[i].(neotest.SingleSigner).Account().PrivateKey().GetScriptHash(), int64(committeeSize+1-i)*1000000, nil)
|
transferTx := neoValidatorsInvoker.PrepareInvoke(t, "transfer", e.Validator.ScriptHash(), voters[i].(neotest.SingleSigner).Account().PrivateKey().GetScriptHash(), int64(committeeSize+1-i)*1000000, nil)
|
||||||
txes = append(txes, transferTx)
|
txes = append(txes, transferTx)
|
||||||
registerTx := neoValidatorsInvoker.WithSigners(candidates[i]).PrepareInvoke(t, "registerCandidate", candidates[i].(neotest.SingleSigner).Account().PublicKey().Bytes())
|
registerTx := neoValidatorsInvoker.WithSigners(candidates[i]).PrepareInvoke(t, "registerCandidate", candidates[i].(neotest.SingleSigner).Account().PublicKey().Bytes())
|
||||||
|
@ -458,7 +458,7 @@ func TestBlockchain_InitializeNativeCacheWrtNativeActivations(t *testing.T) {
|
||||||
|
|
||||||
// Ensure Notary will be properly initialized and accessing Notary cache works
|
// Ensure Notary will be properly initialized and accessing Notary cache works
|
||||||
// as expected.
|
// as expected.
|
||||||
for i := 0; i < notaryEnabledHeight; i++ {
|
for i := range notaryEnabledHeight {
|
||||||
require.NotPanics(t, func() {
|
require.NotPanics(t, func() {
|
||||||
e.AddNewBlock(t)
|
e.AddNewBlock(t)
|
||||||
}, h+uint32(i)+1)
|
}, h+uint32(i)+1)
|
||||||
|
@ -641,7 +641,7 @@ func TestBlockchain_GetBlock(t *testing.T) {
|
||||||
blocks := e.GenerateNewBlocks(t, 10)
|
blocks := e.GenerateNewBlocks(t, 10)
|
||||||
neoValidatorInvoker := e.ValidatorInvoker(e.NativeHash(t, nativenames.Neo))
|
neoValidatorInvoker := e.ValidatorInvoker(e.NativeHash(t, nativenames.Neo))
|
||||||
|
|
||||||
for i := 0; i < len(blocks); i++ {
|
for i := range blocks {
|
||||||
block, err := bc.GetBlock(blocks[i].Hash())
|
block, err := bc.GetBlock(blocks[i].Hash())
|
||||||
require.NoErrorf(t, err, "can't get block %d: %s", i, err)
|
require.NoErrorf(t, err, "can't get block %d: %s", i, err)
|
||||||
assert.Equal(t, blocks[i].Index, block.Index)
|
assert.Equal(t, blocks[i].Index, block.Index)
|
||||||
|
@ -881,7 +881,7 @@ func TestBlockchain_HasBlock(t *testing.T) {
|
||||||
|
|
||||||
blocks := e.GenerateNewBlocks(t, 10)
|
blocks := e.GenerateNewBlocks(t, 10)
|
||||||
|
|
||||||
for i := 0; i < len(blocks); i++ {
|
for i := range blocks {
|
||||||
assert.True(t, bc.HasBlock(blocks[i].Hash()))
|
assert.True(t, bc.HasBlock(blocks[i].Hash()))
|
||||||
}
|
}
|
||||||
newBlock := e.NewUnsignedBlock(t)
|
newBlock := e.NewUnsignedBlock(t)
|
||||||
|
@ -1008,7 +1008,7 @@ func TestBlockchain_Subscriptions(t *testing.T) {
|
||||||
|
|
||||||
// 3 burn events for every tx and 1 mint for primary node
|
// 3 burn events for every tx and 1 mint for primary node
|
||||||
require.True(t, len(notificationCh) >= 4)
|
require.True(t, len(notificationCh) >= 4)
|
||||||
for i := 0; i < 4; i++ {
|
for range 4 {
|
||||||
notif := <-notificationCh
|
notif := <-notificationCh
|
||||||
require.Equal(t, nativeGASHash, notif.ScriptHash)
|
require.Equal(t, nativeGASHash, notif.ScriptHash)
|
||||||
}
|
}
|
||||||
|
@ -2327,7 +2327,7 @@ func TestBlockchain_ResetStateErrors(t *testing.T) {
|
||||||
bc, validators, committee := chain.NewMultiWithCustomConfigAndStore(t, cfg, db, false)
|
bc, validators, committee := chain.NewMultiWithCustomConfigAndStore(t, cfg, db, false)
|
||||||
e := neotest.NewExecutor(t, bc, validators, committee)
|
e := neotest.NewExecutor(t, bc, validators, committee)
|
||||||
go bc.Run()
|
go bc.Run()
|
||||||
for i := 0; i < chainHeight; i++ {
|
for range chainHeight {
|
||||||
e.AddNewBlock(t) // get some height
|
e.AddNewBlock(t) // get some height
|
||||||
}
|
}
|
||||||
bc.Close()
|
bc.Close()
|
||||||
|
|
|
@ -388,7 +388,7 @@ func BenchmarkStoreAsTransaction(b *testing.B) {
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for n := 0; n < b.N; n++ {
|
for range b.N {
|
||||||
err := dao.StoreAsTransaction(tx, 1, aer)
|
err := dao.StoreAsTransaction(tx, 1, aer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.FailNow()
|
b.FailNow()
|
||||||
|
|
|
@ -13,7 +13,7 @@ func BenchmarkOpcode1(t *testing.B) {
|
||||||
// Just so that we don't always test the same opcode.
|
// Just so that we don't always test the same opcode.
|
||||||
script := []opcode.Opcode{opcode.NOP, opcode.ADD, opcode.SYSCALL, opcode.APPEND}
|
script := []opcode.Opcode{opcode.NOP, opcode.ADD, opcode.SYSCALL, opcode.APPEND}
|
||||||
l := len(script)
|
l := len(script)
|
||||||
for n := 0; n < t.N; n++ {
|
for n := range t.N {
|
||||||
_ = Opcode(feeFactor, script[n%l])
|
_ = Opcode(feeFactor, script[n%l])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,7 +117,7 @@ func (bc *Blockchain) genBlocks(n int) ([]*block.Block, error) {
|
||||||
blocks := make([]*block.Block, n)
|
blocks := make([]*block.Block, n)
|
||||||
lastHash := bc.topBlock.Load().(*block.Block).Hash()
|
lastHash := bc.topBlock.Load().(*block.Block).Hash()
|
||||||
lastIndex := bc.topBlock.Load().(*block.Block).Index
|
lastIndex := bc.topBlock.Load().(*block.Block).Index
|
||||||
for i := 0; i < n; i++ {
|
for i := range n {
|
||||||
blocks[i] = newBlock(bc.config.ProtocolConfiguration, uint32(i)+lastIndex+1, lastHash)
|
blocks[i] = newBlock(bc.config.ProtocolConfiguration, uint32(i)+lastIndex+1, lastHash)
|
||||||
if err := bc.AddBlock(blocks[i]); err != nil {
|
if err := bc.AddBlock(blocks[i]); err != nil {
|
||||||
return blocks, err
|
return blocks, err
|
||||||
|
|
|
@ -192,7 +192,7 @@ func TestSystemContractCall_Permissions(t *testing.T) {
|
||||||
e.DeployContract(t, ctrA, nil)
|
e.DeployContract(t, ctrA, nil)
|
||||||
|
|
||||||
var hashAStr string
|
var hashAStr string
|
||||||
for i := 0; i < util.Uint160Size; i++ {
|
for i := range util.Uint160Size {
|
||||||
hashAStr += fmt.Sprintf("%#x", ctrA.Hash[i])
|
hashAStr += fmt.Sprintf("%#x", ctrA.Hash[i])
|
||||||
if i != util.Uint160Size-1 {
|
if i != util.Uint160Size-1 {
|
||||||
hashAStr += ", "
|
hashAStr += ", "
|
||||||
|
@ -367,7 +367,7 @@ func TestSnapshotIsolation_Exceptions(t *testing.T) {
|
||||||
e.DeployContract(t, ctrA, nil)
|
e.DeployContract(t, ctrA, nil)
|
||||||
|
|
||||||
var hashAStr string
|
var hashAStr string
|
||||||
for i := 0; i < util.Uint160Size; i++ {
|
for i := range util.Uint160Size {
|
||||||
hashAStr += fmt.Sprintf("%#x", ctrA.Hash[i])
|
hashAStr += fmt.Sprintf("%#x", ctrA.Hash[i])
|
||||||
if i != util.Uint160Size-1 {
|
if i != util.Uint160Size-1 {
|
||||||
hashAStr += ", "
|
hashAStr += ", "
|
||||||
|
@ -599,7 +599,7 @@ func TestRET_after_FINALLY_PanicInsideVoidMethod(t *testing.T) {
|
||||||
e.DeployContract(t, ctrA, nil)
|
e.DeployContract(t, ctrA, nil)
|
||||||
|
|
||||||
var hashAStr string
|
var hashAStr string
|
||||||
for i := 0; i < util.Uint160Size; i++ {
|
for i := range util.Uint160Size {
|
||||||
hashAStr += fmt.Sprintf("%#x", ctrA.Hash[i])
|
hashAStr += fmt.Sprintf("%#x", ctrA.Hash[i])
|
||||||
if i != util.Uint160Size-1 {
|
if i != util.Uint160Size-1 {
|
||||||
hashAStr += ", "
|
hashAStr += ", "
|
||||||
|
@ -659,7 +659,7 @@ func TestRET_after_FINALLY_CallNonVoidAfterVoidMethod(t *testing.T) {
|
||||||
e.DeployContract(t, ctrA, nil)
|
e.DeployContract(t, ctrA, nil)
|
||||||
|
|
||||||
var hashAStr string
|
var hashAStr string
|
||||||
for i := 0; i < util.Uint160Size; i++ {
|
for i := range util.Uint160Size {
|
||||||
hashAStr += fmt.Sprintf("%#x", ctrA.Hash[i])
|
hashAStr += fmt.Sprintf("%#x", ctrA.Hash[i])
|
||||||
if i != util.Uint160Size-1 {
|
if i != util.Uint160Size-1 {
|
||||||
hashAStr += ", "
|
hashAStr += ", "
|
||||||
|
|
|
@ -84,7 +84,7 @@ func TestRuntimeGetNotifications(t *testing.T) {
|
||||||
require.Error(t, GetNotifications(ic))
|
require.Error(t, GetNotifications(ic))
|
||||||
})
|
})
|
||||||
t.Run("too many notifications", func(t *testing.T) {
|
t.Run("too many notifications", func(t *testing.T) {
|
||||||
for i := 0; i <= vm.MaxStackSize; i++ {
|
for range vm.MaxStackSize + 1 {
|
||||||
ic.Notifications = append(ic.Notifications, state.NotificationEvent{
|
ic.Notifications = append(ic.Notifications, state.NotificationEvent{
|
||||||
ScriptHash: util.Uint160{3},
|
ScriptHash: util.Uint160{3},
|
||||||
Name: "Event3",
|
Name: "Event3",
|
||||||
|
|
|
@ -20,7 +20,7 @@ func BenchmarkStorageFind(b *testing.B) {
|
||||||
require.NoError(b, native.PutContractState(context.DAO, contractState))
|
require.NoError(b, native.PutContractState(context.DAO, contractState))
|
||||||
|
|
||||||
items := make(map[string]state.StorageItem)
|
items := make(map[string]state.StorageItem)
|
||||||
for i := 0; i < count; i++ {
|
for range count {
|
||||||
items["abc"+random.String(10)] = random.Bytes(10)
|
items["abc"+random.String(10)] = random.Bytes(10)
|
||||||
}
|
}
|
||||||
for k, v := range items {
|
for k, v := range items {
|
||||||
|
@ -33,7 +33,7 @@ func BenchmarkStorageFind(b *testing.B) {
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
v.Estack().PushVal(istorage.FindDefault)
|
v.Estack().PushVal(istorage.FindDefault)
|
||||||
v.Estack().PushVal("abc")
|
v.Estack().PushVal("abc")
|
||||||
|
@ -64,7 +64,7 @@ func BenchmarkStorageFindIteratorNext(b *testing.B) {
|
||||||
require.NoError(b, native.PutContractState(context.DAO, contractState))
|
require.NoError(b, native.PutContractState(context.DAO, contractState))
|
||||||
|
|
||||||
items := make(map[string]state.StorageItem)
|
items := make(map[string]state.StorageItem)
|
||||||
for i := 0; i < count; i++ {
|
for range count {
|
||||||
items["abc"+random.String(10)] = random.Bytes(10)
|
items["abc"+random.String(10)] = random.Bytes(10)
|
||||||
}
|
}
|
||||||
for k, v := range items {
|
for k, v := range items {
|
||||||
|
@ -76,7 +76,7 @@ func BenchmarkStorageFindIteratorNext(b *testing.B) {
|
||||||
require.NotEqual(b, 0, changes)
|
require.NotEqual(b, 0, changes)
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
v.Estack().PushVal(istorage.FindDefault)
|
v.Estack().PushVal(istorage.FindDefault)
|
||||||
v.Estack().PushVal("abc")
|
v.Estack().PushVal("abc")
|
||||||
|
@ -88,7 +88,7 @@ func BenchmarkStorageFindIteratorNext(b *testing.B) {
|
||||||
b.FailNow()
|
b.FailNow()
|
||||||
}
|
}
|
||||||
res := context.VM.Estack().Pop().Item()
|
res := context.VM.Estack().Pop().Item()
|
||||||
for i := 0; i < last; i++ {
|
for range last {
|
||||||
context.VM.Estack().PushVal(res)
|
context.VM.Estack().PushVal(res)
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
require.NoError(b, iterator.Next(context))
|
require.NoError(b, iterator.Next(context))
|
||||||
|
|
|
@ -49,7 +49,7 @@ func BenchmarkPool(b *testing.B) {
|
||||||
b.Run(name, func(b *testing.B) {
|
b.Run(name, func(b *testing.B) {
|
||||||
p := New(poolSize, 0, false, nil)
|
p := New(poolSize, 0, false, nil)
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
for j := range txes {
|
for j := range txes {
|
||||||
if p.Add(txes[j], fe) != nil {
|
if p.Add(txes[j], fe) != nil {
|
||||||
b.Fail()
|
b.Fail()
|
||||||
|
|
|
@ -120,7 +120,7 @@ func TestOverCapacity(t *testing.T) {
|
||||||
require.True(t, slices.IsSortedFunc(mp.verifiedTxes, func(a, b item) int { return -a.Compare(b) }))
|
require.True(t, slices.IsSortedFunc(mp.verifiedTxes, func(a, b item) int { return -a.Compare(b) }))
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < mempoolSize; i++ {
|
for i := range mempoolSize {
|
||||||
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 0)
|
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 0)
|
||||||
tx.Nonce = uint32(i)
|
tx.Nonce = uint32(i)
|
||||||
tx.Signers = []transaction.Signer{{Account: acc}}
|
tx.Signers = []transaction.Signer{{Account: acc}}
|
||||||
|
@ -135,7 +135,7 @@ func TestOverCapacity(t *testing.T) {
|
||||||
bigScript[0] = byte(opcode.PUSH1)
|
bigScript[0] = byte(opcode.PUSH1)
|
||||||
bigScript[1] = byte(opcode.RET)
|
bigScript[1] = byte(opcode.RET)
|
||||||
// Fees are also prioritized.
|
// Fees are also prioritized.
|
||||||
for i := 0; i < mempoolSize; i++ {
|
for range mempoolSize {
|
||||||
tx := transaction.New(bigScript, 0)
|
tx := transaction.New(bigScript, 0)
|
||||||
tx.NetworkFee = 10000
|
tx.NetworkFee = 10000
|
||||||
tx.Nonce = txcnt
|
tx.Nonce = txcnt
|
||||||
|
@ -176,7 +176,7 @@ func TestOverCapacity(t *testing.T) {
|
||||||
require.Equal(t, *uint256.NewInt(9*10000 + 7000), mp.fees[acc].feeSum)
|
require.Equal(t, *uint256.NewInt(9*10000 + 7000), mp.fees[acc].feeSum)
|
||||||
|
|
||||||
// High priority always wins over low priority.
|
// High priority always wins over low priority.
|
||||||
for i := 0; i < mempoolSize; i++ {
|
for range mempoolSize {
|
||||||
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 0)
|
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 0)
|
||||||
tx.NetworkFee = 8000
|
tx.NetworkFee = 8000
|
||||||
tx.Nonce = txcnt
|
tx.Nonce = txcnt
|
||||||
|
@ -203,7 +203,7 @@ func TestGetVerified(t *testing.T) {
|
||||||
mp := New(mempoolSize, 0, false, nil)
|
mp := New(mempoolSize, 0, false, nil)
|
||||||
|
|
||||||
txes := make([]*transaction.Transaction, 0, mempoolSize)
|
txes := make([]*transaction.Transaction, 0, mempoolSize)
|
||||||
for i := 0; i < mempoolSize; i++ {
|
for i := range mempoolSize {
|
||||||
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 0)
|
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 0)
|
||||||
tx.Nonce = uint32(i)
|
tx.Nonce = uint32(i)
|
||||||
tx.Signers = []transaction.Signer{{Account: util.Uint160{1, 2, 3}}}
|
tx.Signers = []transaction.Signer{{Account: util.Uint160{1, 2, 3}}}
|
||||||
|
@ -228,7 +228,7 @@ func TestRemoveStale(t *testing.T) {
|
||||||
|
|
||||||
txes1 := make([]*transaction.Transaction, 0, mempoolSize/2)
|
txes1 := make([]*transaction.Transaction, 0, mempoolSize/2)
|
||||||
txes2 := make([]*transaction.Transaction, 0, mempoolSize/2)
|
txes2 := make([]*transaction.Transaction, 0, mempoolSize/2)
|
||||||
for i := 0; i < mempoolSize; i++ {
|
for i := range mempoolSize {
|
||||||
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 0)
|
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 0)
|
||||||
tx.Nonce = uint32(i)
|
tx.Nonce = uint32(i)
|
||||||
tx.Signers = []transaction.Signer{{Account: util.Uint160{1, 2, 3}}}
|
tx.Signers = []transaction.Signer{{Account: util.Uint160{1, 2, 3}}}
|
||||||
|
|
|
@ -10,7 +10,7 @@ func benchmarkBytes(b *testing.B, n Node) {
|
||||||
inv := n.(interface{ invalidateCache() })
|
inv := n.(interface{ invalidateCache() })
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
inv.invalidateCache()
|
inv.invalidateCache()
|
||||||
_ = n.Bytes()
|
_ = n.Bytes()
|
||||||
}
|
}
|
||||||
|
|
|
@ -318,7 +318,7 @@ func (b *Billet) tryCollapseExtension(curr *ExtensionNode) Node {
|
||||||
|
|
||||||
func (b *Billet) tryCollapseBranch(curr *BranchNode) Node {
|
func (b *Billet) tryCollapseBranch(curr *BranchNode) Node {
|
||||||
canCollapse := true
|
canCollapse := true
|
||||||
for i := 0; i < childrenCount; i++ {
|
for i := range childrenCount {
|
||||||
if curr.Children[i].Type() == EmptyT {
|
if curr.Children[i].Type() == EmptyT {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ var _ Node = (*BranchNode)(nil)
|
||||||
// NewBranchNode returns a new branch node.
|
// NewBranchNode returns a new branch node.
|
||||||
func NewBranchNode() *BranchNode {
|
func NewBranchNode() *BranchNode {
|
||||||
b := new(BranchNode)
|
b := new(BranchNode)
|
||||||
for i := 0; i < childrenCount; i++ {
|
for i := range childrenCount {
|
||||||
b.Children[i] = EmptyNode{}
|
b.Children[i] = EmptyNode{}
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
|
@ -58,14 +58,14 @@ func (b *BranchNode) Size() int {
|
||||||
|
|
||||||
// EncodeBinary implements io.Serializable.
|
// EncodeBinary implements io.Serializable.
|
||||||
func (b *BranchNode) EncodeBinary(w *io.BinWriter) {
|
func (b *BranchNode) EncodeBinary(w *io.BinWriter) {
|
||||||
for i := 0; i < childrenCount; i++ {
|
for i := range childrenCount {
|
||||||
encodeBinaryAsChild(b.Children[i], w)
|
encodeBinaryAsChild(b.Children[i], w)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecodeBinary implements io.Serializable.
|
// DecodeBinary implements io.Serializable.
|
||||||
func (b *BranchNode) DecodeBinary(r *io.BinReader) {
|
func (b *BranchNode) DecodeBinary(r *io.BinReader) {
|
||||||
for i := 0; i < childrenCount; i++ {
|
for i := range childrenCount {
|
||||||
no := new(NodeObject)
|
no := new(NodeObject)
|
||||||
no.DecodeBinary(r)
|
no.DecodeBinary(r)
|
||||||
b.Children[i] = no.Node
|
b.Children[i] = no.Node
|
||||||
|
|
|
@ -51,7 +51,7 @@ func toNibbles(path []byte) []byte {
|
||||||
// ignoring the first byte (prefix).
|
// ignoring the first byte (prefix).
|
||||||
func strToNibbles(path string) []byte {
|
func strToNibbles(path string) []byte {
|
||||||
result := make([]byte, (len(path)-1)*2)
|
result := make([]byte, (len(path)-1)*2)
|
||||||
for i := 0; i < len(path)-1; i++ {
|
for i := range len(path) - 1 {
|
||||||
result[i*2] = path[i+1] >> 4
|
result[i*2] = path[i+1] >> 4
|
||||||
result[i*2+1] = path[i+1] & 0x0F
|
result[i*2+1] = path[i+1] & 0x0F
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,7 @@ func TestTrieStore_TestTrieOperations(t *testing.T) {
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
require.Equal(t, 4, len(res))
|
require.Equal(t, 4, len(res))
|
||||||
for i := 0; i < len(res); i++ {
|
for i := range res {
|
||||||
require.Equal(t, byte(storage.STStorage), res[i][0])
|
require.Equal(t, byte(storage.STStorage), res[i][0])
|
||||||
if i < len(res)-1 {
|
if i < len(res)-1 {
|
||||||
cmp := bytes.Compare(res[i], res[i+1])
|
cmp := bytes.Compare(res[i], res[i+1])
|
||||||
|
|
|
@ -27,7 +27,7 @@ func TestNamesASCII(t *testing.T) {
|
||||||
|
|
||||||
func isASCII(s string) bool {
|
func isASCII(s string) bool {
|
||||||
ok := true
|
ok := true
|
||||||
for i := 0; i < len(s); i++ {
|
for i := range s {
|
||||||
ok = ok && s[i] <= unicode.MaxASCII
|
ok = ok && s[i] <= unicode.MaxASCII
|
||||||
}
|
}
|
||||||
return ok
|
return ok
|
||||||
|
|
|
@ -442,7 +442,7 @@ func (n *NEO) OnPersist(ic *interop.Context) error {
|
||||||
// during the last epoch block handling or by initialization code).
|
// during the last epoch block handling or by initialization code).
|
||||||
|
|
||||||
var oldCommittee, newCommittee stackitem.Item
|
var oldCommittee, newCommittee stackitem.Item
|
||||||
for i := 0; i < len(cache.committee); i++ {
|
for i := range cache.committee {
|
||||||
if cache.newEpochCommittee[i].Key != cache.committee[i].Key ||
|
if cache.newEpochCommittee[i].Key != cache.committee[i].Key ||
|
||||||
(i == 0 && len(cache.newEpochCommittee) != len(cache.committee)) {
|
(i == 0 && len(cache.newEpochCommittee) != len(cache.committee)) {
|
||||||
oldCommittee, newCommittee = cache.committee.toNotificationItem(), cache.newEpochCommittee.toNotificationItem()
|
oldCommittee, newCommittee = cache.committee.toNotificationItem(), cache.newEpochCommittee.toNotificationItem()
|
||||||
|
|
|
@ -238,13 +238,13 @@ func TestLedger_GetTransactionSignersInteropAPI(t *testing.T) {
|
||||||
txHash = tx.Hash().BytesBE()
|
txHash = tx.Hash().BytesBE()
|
||||||
acc = c.Committee.ScriptHash().BytesBE()
|
acc = c.Committee.ScriptHash().BytesBE()
|
||||||
)
|
)
|
||||||
for i := 0; i < util.Uint256Size; i++ {
|
for i := range util.Uint256Size {
|
||||||
hashStr += fmt.Sprintf("%#x", txHash[i])
|
hashStr += fmt.Sprintf("%#x", txHash[i])
|
||||||
if i != util.Uint256Size-1 {
|
if i != util.Uint256Size-1 {
|
||||||
hashStr += ", "
|
hashStr += ", "
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < util.Uint160Size; i++ {
|
for i := range util.Uint160Size {
|
||||||
accStr += fmt.Sprintf("%#x", acc[i])
|
accStr += fmt.Sprintf("%#x", acc[i])
|
||||||
if i != util.Uint160Size-1 {
|
if i != util.Uint160Size-1 {
|
||||||
accStr += ", "
|
accStr += ", "
|
||||||
|
|
|
@ -231,7 +231,7 @@ func TestManagement_NativeUpdate(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
// Add some blocks up to the Cockatrice enabling height and check the default natives state.
|
// Add some blocks up to the Cockatrice enabling height and check the default natives state.
|
||||||
for i := 0; i < cockatriceHeight-1; i++ {
|
for range cockatriceHeight - 1 {
|
||||||
c.AddNewBlock(t)
|
c.AddNewBlock(t)
|
||||||
for _, name := range nativenames.All {
|
for _, name := range nativenames.All {
|
||||||
h := state.CreateNativeContractHash(name)
|
h := state.CreateNativeContractHash(name)
|
||||||
|
@ -275,7 +275,7 @@ func TestManagement_NativeUpdate_Call(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
// Invoke Cockatrice-dependant method before Cockatrice should fail.
|
// Invoke Cockatrice-dependant method before Cockatrice should fail.
|
||||||
for i := 0; i < cockatriceHeight-1; i++ {
|
for range cockatriceHeight - 1 {
|
||||||
c.InvokeFail(t, "at instruction 45 (SYSCALL): System.Contract.Call failed: method not found: getCommitteeAddress/0", method)
|
c.InvokeFail(t, "at instruction 45 (SYSCALL): System.Contract.Call failed: method not found: getCommitteeAddress/0", method)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -131,12 +131,12 @@ func TestNEO_CommitteeEvents(t *testing.T) {
|
||||||
|
|
||||||
voters := make([]neotest.Signer, committeeSize)
|
voters := make([]neotest.Signer, committeeSize)
|
||||||
candidates := make([]neotest.Signer, committeeSize)
|
candidates := make([]neotest.Signer, committeeSize)
|
||||||
for i := 0; i < committeeSize; i++ {
|
for i := range committeeSize {
|
||||||
voters[i] = e.NewAccount(t, 10_0000_0000)
|
voters[i] = e.NewAccount(t, 10_0000_0000)
|
||||||
candidates[i] = e.NewAccount(t, 2000_0000_0000) // enough for one registration
|
candidates[i] = e.NewAccount(t, 2000_0000_0000) // enough for one registration
|
||||||
}
|
}
|
||||||
txes := make([]*transaction.Transaction, 0, committeeSize*3)
|
txes := make([]*transaction.Transaction, 0, committeeSize*3)
|
||||||
for i := 0; i < committeeSize; i++ {
|
for i := range committeeSize {
|
||||||
transferTx := neoValidatorsInvoker.PrepareInvoke(t, "transfer", e.Validator.ScriptHash(), voters[i].(neotest.SingleSigner).Account().PrivateKey().GetScriptHash(), int64(committeeSize-i)*1000000, nil)
|
transferTx := neoValidatorsInvoker.PrepareInvoke(t, "transfer", e.Validator.ScriptHash(), voters[i].(neotest.SingleSigner).Account().PrivateKey().GetScriptHash(), int64(committeeSize-i)*1000000, nil)
|
||||||
txes = append(txes, transferTx)
|
txes = append(txes, transferTx)
|
||||||
|
|
||||||
|
@ -196,7 +196,7 @@ func TestNEO_Vote(t *testing.T) {
|
||||||
validatorsCount := cfg.GetNumOfCNs(0)
|
validatorsCount := cfg.GetNumOfCNs(0)
|
||||||
freq := validatorsCount + committeeSize
|
freq := validatorsCount + committeeSize
|
||||||
advanceChain := func(t *testing.T) {
|
advanceChain := func(t *testing.T) {
|
||||||
for i := 0; i < freq; i++ {
|
for range freq {
|
||||||
neoCommitteeInvoker.AddNewBlock(t)
|
neoCommitteeInvoker.AddNewBlock(t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -217,13 +217,13 @@ func TestNEO_Vote(t *testing.T) {
|
||||||
// how much GAS voters receive for NEO ownership.
|
// how much GAS voters receive for NEO ownership.
|
||||||
referenceAccounts := make([]neotest.Signer, committeeSize+1)
|
referenceAccounts := make([]neotest.Signer, committeeSize+1)
|
||||||
candidates := make([]neotest.Signer, committeeSize+1)
|
candidates := make([]neotest.Signer, committeeSize+1)
|
||||||
for i := 0; i < committeeSize+1; i++ {
|
for i := range committeeSize + 1 {
|
||||||
voters[i] = e.NewAccount(t, 10_0000_0000)
|
voters[i] = e.NewAccount(t, 10_0000_0000)
|
||||||
referenceAccounts[i] = e.NewAccount(t, 10_0000_0000)
|
referenceAccounts[i] = e.NewAccount(t, 10_0000_0000)
|
||||||
candidates[i] = e.NewAccount(t, 2000_0000_0000) // enough for one registration
|
candidates[i] = e.NewAccount(t, 2000_0000_0000) // enough for one registration
|
||||||
}
|
}
|
||||||
txes := make([]*transaction.Transaction, 0, committeeSize*4-2)
|
txes := make([]*transaction.Transaction, 0, committeeSize*4-2)
|
||||||
for i := 0; i < committeeSize+1; i++ {
|
for i := range committeeSize + 1 {
|
||||||
transferTx := neoValidatorsInvoker.PrepareInvoke(t, "transfer", e.Validator.ScriptHash(), voters[i].(neotest.SingleSigner).Account().PrivateKey().GetScriptHash(), int64(committeeSize+1-i)*1000000, nil)
|
transferTx := neoValidatorsInvoker.PrepareInvoke(t, "transfer", e.Validator.ScriptHash(), voters[i].(neotest.SingleSigner).Account().PrivateKey().GetScriptHash(), int64(committeeSize+1-i)*1000000, nil)
|
||||||
txes = append(txes, transferTx)
|
txes = append(txes, transferTx)
|
||||||
transferTx = neoValidatorsInvoker.PrepareInvoke(t, "transfer", e.Validator.ScriptHash(), referenceAccounts[i].(neotest.SingleSigner).Account().PrivateKey().GetScriptHash(), int64(committeeSize+1-i)*1000000, nil)
|
transferTx = neoValidatorsInvoker.PrepareInvoke(t, "transfer", e.Validator.ScriptHash(), referenceAccounts[i].(neotest.SingleSigner).Account().PrivateKey().GetScriptHash(), int64(committeeSize+1-i)*1000000, nil)
|
||||||
|
@ -419,7 +419,7 @@ func TestNEO_GetAccountState(t *testing.T) {
|
||||||
committeeSize := cfg.GetCommitteeSize(0)
|
committeeSize := cfg.GetCommitteeSize(0)
|
||||||
validatorSize := cfg.GetNumOfCNs(0)
|
validatorSize := cfg.GetNumOfCNs(0)
|
||||||
advanceChain := func(t *testing.T) {
|
advanceChain := func(t *testing.T) {
|
||||||
for i := 0; i < committeeSize; i++ {
|
for range committeeSize {
|
||||||
neoValidatorInvoker.AddNewBlock(t)
|
neoValidatorInvoker.AddNewBlock(t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -485,7 +485,7 @@ func TestNEO_GetAccountStateInteropAPI(t *testing.T) {
|
||||||
committeeSize := cfg.GetCommitteeSize(0)
|
committeeSize := cfg.GetCommitteeSize(0)
|
||||||
validatorSize := cfg.GetNumOfCNs(0)
|
validatorSize := cfg.GetNumOfCNs(0)
|
||||||
advanceChain := func(t *testing.T) {
|
advanceChain := func(t *testing.T) {
|
||||||
for i := 0; i < committeeSize; i++ {
|
for range committeeSize {
|
||||||
neoValidatorInvoker.AddNewBlock(t)
|
neoValidatorInvoker.AddNewBlock(t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -500,7 +500,7 @@ func TestNEO_GetAccountStateInteropAPI(t *testing.T) {
|
||||||
neoValidatorInvoker.WithSigners(acc).Invoke(t, true, "transfer", acc.ScriptHash(), acc.ScriptHash(), amount, nil)
|
neoValidatorInvoker.WithSigners(acc).Invoke(t, true, "transfer", acc.ScriptHash(), acc.ScriptHash(), amount, nil)
|
||||||
|
|
||||||
var hashAStr string
|
var hashAStr string
|
||||||
for i := 0; i < util.Uint160Size; i++ {
|
for i := range util.Uint160Size {
|
||||||
hashAStr += fmt.Sprintf("%#x", acc.ScriptHash()[i])
|
hashAStr += fmt.Sprintf("%#x", acc.ScriptHash()[i])
|
||||||
if i != util.Uint160Size-1 {
|
if i != util.Uint160Size-1 {
|
||||||
hashAStr += ", "
|
hashAStr += ", "
|
||||||
|
@ -544,11 +544,11 @@ func TestNEO_CommitteeBountyOnPersist(t *testing.T) {
|
||||||
const singleBounty = 50000000
|
const singleBounty = 50000000
|
||||||
bs := map[int]int64{0: singleBounty}
|
bs := map[int]int64{0: singleBounty}
|
||||||
checkBalances := func() {
|
checkBalances := func() {
|
||||||
for i := 0; i < committeeSize; i++ {
|
for i := range committeeSize {
|
||||||
require.EqualValues(t, bs[i], e.Chain.GetUtilityTokenBalance(hs[i].GetScriptHash()).Int64(), i)
|
require.EqualValues(t, bs[i], e.Chain.GetUtilityTokenBalance(hs[i].GetScriptHash()).Int64(), i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < committeeSize*2; i++ {
|
for i := range committeeSize * 2 {
|
||||||
e.AddNewBlock(t)
|
e.AddNewBlock(t)
|
||||||
bs[(i+1)%committeeSize] += singleBounty
|
bs[(i+1)%committeeSize] += singleBounty
|
||||||
checkBalances()
|
checkBalances()
|
||||||
|
@ -731,7 +731,7 @@ func TestNEO_CalculateBonus(t *testing.T) {
|
||||||
|
|
||||||
t.Run("Zero", func(t *testing.T) {
|
t.Run("Zero", func(t *testing.T) {
|
||||||
initialGASBalance := e.Chain.GetUtilityTokenBalance(accH)
|
initialGASBalance := e.Chain.GetUtilityTokenBalance(accH)
|
||||||
for i := 0; i < rewardDistance; i++ {
|
for range rewardDistance {
|
||||||
e.AddNewBlock(t)
|
e.AddNewBlock(t)
|
||||||
}
|
}
|
||||||
// Claim GAS, but there's no NEO on the account, so no GAS should be earned.
|
// Claim GAS, but there's no NEO on the account, so no GAS should be earned.
|
||||||
|
@ -750,13 +750,13 @@ func TestNEO_CalculateBonus(t *testing.T) {
|
||||||
|
|
||||||
// Five blocks of NEO owning with default GasPerBlockValue.
|
// Five blocks of NEO owning with default GasPerBlockValue.
|
||||||
neoValidatorsInvoker.Invoke(t, true, "transfer", e.Validator.ScriptHash(), accH, amount, nil)
|
neoValidatorsInvoker.Invoke(t, true, "transfer", e.Validator.ScriptHash(), accH, amount, nil)
|
||||||
for i := 0; i < rewardDistance/2-2; i++ {
|
for range rewardDistance/2 - 2 {
|
||||||
e.AddNewBlock(t)
|
e.AddNewBlock(t)
|
||||||
}
|
}
|
||||||
neoCommitteeInvoker.Invoke(t, stackitem.Null{}, "setGasPerBlock", newGASPerBlock*native.GASFactor)
|
neoCommitteeInvoker.Invoke(t, stackitem.Null{}, "setGasPerBlock", newGASPerBlock*native.GASFactor)
|
||||||
|
|
||||||
// Five blocks more with modified GasPerBlock value.
|
// Five blocks more with modified GasPerBlock value.
|
||||||
for i := 0; i < rewardDistance/2; i++ {
|
for range rewardDistance / 2 {
|
||||||
e.AddNewBlock(t)
|
e.AddNewBlock(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -783,12 +783,12 @@ func TestNEO_GetCandidates(t *testing.T) {
|
||||||
// Register a set of candidates and vote for them.
|
// Register a set of candidates and vote for them.
|
||||||
voters := make([]neotest.Signer, candidatesCount)
|
voters := make([]neotest.Signer, candidatesCount)
|
||||||
candidates := make([]neotest.Signer, candidatesCount)
|
candidates := make([]neotest.Signer, candidatesCount)
|
||||||
for i := 0; i < candidatesCount; i++ {
|
for i := range candidatesCount {
|
||||||
voters[i] = e.NewAccount(t, 10_0000_0000)
|
voters[i] = e.NewAccount(t, 10_0000_0000)
|
||||||
candidates[i] = e.NewAccount(t, 2000_0000_0000) // enough for one registration
|
candidates[i] = e.NewAccount(t, 2000_0000_0000) // enough for one registration
|
||||||
}
|
}
|
||||||
txes := make([]*transaction.Transaction, 0, candidatesCount*3)
|
txes := make([]*transaction.Transaction, 0, candidatesCount*3)
|
||||||
for i := 0; i < candidatesCount; i++ {
|
for i := range candidatesCount {
|
||||||
transferTx := neoValidatorsInvoker.PrepareInvoke(t, "transfer", e.Validator.ScriptHash(), voters[i].(neotest.SingleSigner).Account().PrivateKey().GetScriptHash(), int64(candidatesCount+1-i)*1000000, nil)
|
transferTx := neoValidatorsInvoker.PrepareInvoke(t, "transfer", e.Validator.ScriptHash(), voters[i].(neotest.SingleSigner).Account().PrivateKey().GetScriptHash(), int64(candidatesCount+1-i)*1000000, nil)
|
||||||
txes = append(txes, transferTx)
|
txes = append(txes, transferTx)
|
||||||
registerTx := neoValidatorsInvoker.WithSigners(candidates[i]).PrepareInvoke(t, "registerCandidate", candidates[i].(neotest.SingleSigner).Account().PublicKey().Bytes())
|
registerTx := neoValidatorsInvoker.WithSigners(candidates[i]).PrepareInvoke(t, "registerCandidate", candidates[i].(neotest.SingleSigner).Account().PublicKey().Bytes())
|
||||||
|
@ -818,7 +818,7 @@ func TestNEO_GetCandidates(t *testing.T) {
|
||||||
|
|
||||||
// Check that GetAllCandidates works the same way as GetCandidates.
|
// Check that GetAllCandidates works the same way as GetCandidates.
|
||||||
checkGetAllCandidates := func(t *testing.T, expected []stackitem.Item) {
|
checkGetAllCandidates := func(t *testing.T, expected []stackitem.Item) {
|
||||||
for i := 0; i < len(expected)+1; i++ {
|
for i := range len(expected) + 1 {
|
||||||
w := io.NewBufBinWriter()
|
w := io.NewBufBinWriter()
|
||||||
emit.AppCall(w.BinWriter, neoCommitteeInvoker.Hash, "getAllCandidates", callflag.All)
|
emit.AppCall(w.BinWriter, neoCommitteeInvoker.Hash, "getAllCandidates", callflag.All)
|
||||||
for j := 0; j < i+1; j++ {
|
for j := 0; j < i+1; j++ {
|
||||||
|
|
|
@ -183,7 +183,7 @@ func TestNotary_MaliciousWithdrawal(t *testing.T) {
|
||||||
|
|
||||||
// Perform several deposits to a set of different accounts.
|
// Perform several deposits to a set of different accounts.
|
||||||
count := 3
|
count := 3
|
||||||
for i := 0; i < count; i++ {
|
for range count {
|
||||||
h := random.Uint160()
|
h := random.Uint160()
|
||||||
gasCommitteeInvoker.Invoke(t, true, "transfer", multisigHash, notaryHash, 2*feePerKey, ¬ary.OnNEP17PaymentData{Account: &h, Till: e.Chain.BlockHeight() + 2})
|
gasCommitteeInvoker.Invoke(t, true, "transfer", multisigHash, notaryHash, 2*feePerKey, ¬ary.OnNEP17PaymentData{Account: &h, Till: e.Chain.BlockHeight() + 2})
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,13 +85,13 @@ func BenchmarkNEP17BalanceBytes(b *testing.B) {
|
||||||
|
|
||||||
b.Run("stackitem", func(b *testing.B) {
|
b.Run("stackitem", func(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_, _ = stackitem.SerializeConvertible(&bl)
|
_, _ = stackitem.SerializeConvertible(&bl)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
b.Run("bytes", func(b *testing.B) {
|
b.Run("bytes", func(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_ = bl.Bytes(nil)
|
_ = bl.Bytes(nil)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -100,7 +100,7 @@ func BenchmarkNEP17BalanceBytes(b *testing.B) {
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_ = bl.Bytes(bs[:0])
|
_ = bl.Bytes(bs[:0])
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -114,13 +114,13 @@ func BenchmarkNEP17BalanceFromBytes(b *testing.B) {
|
||||||
|
|
||||||
b.Run("stackitem", func(b *testing.B) {
|
b.Run("stackitem", func(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_ = stackitem.DeserializeConvertible(buf, new(NEP17Balance))
|
_ = stackitem.DeserializeConvertible(buf, new(NEP17Balance))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
b.Run("from bytes", func(b *testing.B) {
|
b.Run("from bytes", func(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_, _ = NEP17BalanceFromBytes(buf)
|
_, _ = NEP17BalanceFromBytes(buf)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -111,7 +111,7 @@ func (aer *AppExecResult) DecodeBinary(r *io.BinReader) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
arr := make([]stackitem.Item, sz)
|
arr := make([]stackitem.Item, sz)
|
||||||
for i := 0; i < int(sz); i++ {
|
for i := range arr {
|
||||||
arr[i] = stackitem.DecodeBinaryProtected(r)
|
arr[i] = stackitem.DecodeBinaryProtected(r)
|
||||||
if r.Err != nil {
|
if r.Err != nil {
|
||||||
return
|
return
|
||||||
|
|
|
@ -33,7 +33,7 @@ func BenchmarkAppExecResult_EncodeBinary(b *testing.B) {
|
||||||
w := io.NewBufBinWriter()
|
w := io.NewBufBinWriter()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
w.Reset()
|
w.Reset()
|
||||||
aer.EncodeBinary(w.BinWriter)
|
aer.EncodeBinary(w.BinWriter)
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,7 +82,7 @@ func (bs *TokenTransferInfo) DecodeBinary(r *io.BinReader) {
|
||||||
bs.NewNEP17Batch = r.ReadBool()
|
bs.NewNEP17Batch = r.ReadBool()
|
||||||
lenBalances := r.ReadVarUint()
|
lenBalances := r.ReadVarUint()
|
||||||
m := make(map[int32]uint32, lenBalances)
|
m := make(map[int32]uint32, lenBalances)
|
||||||
for i := 0; i < int(lenBalances); i++ {
|
for range lenBalances {
|
||||||
key := int32(r.ReadU32LE())
|
key := int32(r.ReadU32LE())
|
||||||
m[key] = r.ReadU32LE()
|
m[key] = r.ReadU32LE()
|
||||||
}
|
}
|
||||||
|
@ -142,7 +142,7 @@ func (lg *TokenTransferLog) ForEachNEP11(f func(*NEP11Transfer) (bool, error)) (
|
||||||
}
|
}
|
||||||
transfers := make([]NEP11Transfer, lg.Size())
|
transfers := make([]NEP11Transfer, lg.Size())
|
||||||
r := io.NewBinReaderFromBuf(lg.Raw[1:])
|
r := io.NewBinReaderFromBuf(lg.Raw[1:])
|
||||||
for i := 0; i < lg.Size(); i++ {
|
for i := range transfers {
|
||||||
transfers[i].DecodeBinary(r)
|
transfers[i].DecodeBinary(r)
|
||||||
}
|
}
|
||||||
if r.Err != nil {
|
if r.Err != nil {
|
||||||
|
@ -164,7 +164,7 @@ func (lg *TokenTransferLog) ForEachNEP17(f func(*NEP17Transfer) (bool, error)) (
|
||||||
}
|
}
|
||||||
transfers := make([]NEP17Transfer, lg.Size())
|
transfers := make([]NEP17Transfer, lg.Size())
|
||||||
r := io.NewBinReaderFromBuf(lg.Raw[1:])
|
r := io.NewBinReaderFromBuf(lg.Raw[1:])
|
||||||
for i := 0; i < lg.Size(); i++ {
|
for i := range transfers {
|
||||||
transfers[i].DecodeBinary(r)
|
transfers[i].DecodeBinary(r)
|
||||||
}
|
}
|
||||||
if r.Err != nil {
|
if r.Err != nil {
|
||||||
|
|
|
@ -73,7 +73,7 @@ func BenchmarkTokenTransferLog_Append(b *testing.B) {
|
||||||
lg := new(TokenTransferLog)
|
lg := new(TokenTransferLog)
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
for _, tr := range ts {
|
for _, tr := range ts {
|
||||||
err := lg.Append(tr)
|
err := lg.Append(tr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -82,7 +82,7 @@ func TestPool_AddRemoveUpdate(t *testing.T) {
|
||||||
func TestPool_GetBatch(t *testing.T) {
|
func TestPool_GetBatch(t *testing.T) {
|
||||||
check := func(t *testing.T, limit int, itemsCount int) {
|
check := func(t *testing.T, limit int, itemsCount int) {
|
||||||
mp := NewPool()
|
mp := NewPool()
|
||||||
for i := 0; i < itemsCount; i++ {
|
for range itemsCount {
|
||||||
mp.Add(random.Uint256(), []byte{0x01})
|
mp.Add(random.Uint256(), []byte{0x01})
|
||||||
}
|
}
|
||||||
batch := mp.GetBatch(limit)
|
batch := mp.GetBatch(limit)
|
||||||
|
|
|
@ -28,7 +28,7 @@ func TestStateSyncModule_Init(t *testing.T) {
|
||||||
}
|
}
|
||||||
bcSpout, validators, committee := chain.NewMultiWithCustomConfig(t, spoutCfg)
|
bcSpout, validators, committee := chain.NewMultiWithCustomConfig(t, spoutCfg)
|
||||||
e := neotest.NewExecutor(t, bcSpout, validators, committee)
|
e := neotest.NewExecutor(t, bcSpout, validators, committee)
|
||||||
for i := 0; i <= 2*stateSyncInterval+int(maxTraceable)+1; i++ {
|
for range 2*stateSyncInterval + int(maxTraceable) + 2 {
|
||||||
e.AddNewBlock(t)
|
e.AddNewBlock(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -229,7 +229,7 @@ func benchmarkCachedSeek(t *testing.B, ps Store, psElementsCount, tsElementsCoun
|
||||||
|
|
||||||
ts = NewMemCachedStore(ps)
|
ts = NewMemCachedStore(ps)
|
||||||
)
|
)
|
||||||
for i := 0; i < psElementsCount; i++ {
|
for i := range psElementsCount {
|
||||||
// lower KVs with matching prefix that should be found
|
// lower KVs with matching prefix that should be found
|
||||||
ts.Put(append(lowerPrefixGood, random.Bytes(10)...), []byte("value"))
|
ts.Put(append(lowerPrefixGood, random.Bytes(10)...), []byte("value"))
|
||||||
// lower KVs with non-matching prefix that shouldn't be found
|
// lower KVs with non-matching prefix that shouldn't be found
|
||||||
|
@ -266,7 +266,7 @@ func benchmarkCachedSeek(t *testing.B, ps Store, psElementsCount, tsElementsCoun
|
||||||
|
|
||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
t.ResetTimer()
|
t.ResetTimer()
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
ts.Seek(SeekRange{Prefix: searchPrefix}, func(k, v []byte) bool { return true })
|
ts.Seek(SeekRange{Prefix: searchPrefix}, func(k, v []byte) bool { return true })
|
||||||
}
|
}
|
||||||
t.StopTimer()
|
t.StopTimer()
|
||||||
|
|
|
@ -21,7 +21,7 @@ func BenchmarkMemorySeek(t *testing.B) {
|
||||||
badPrefix = []byte{2}
|
badPrefix = []byte{2}
|
||||||
)
|
)
|
||||||
ts := NewMemCachedStore(ms)
|
ts := NewMemCachedStore(ms)
|
||||||
for i := 0; i < count; i++ {
|
for range count {
|
||||||
ts.Put(append(searchPrefix, random.Bytes(10)...), random.Bytes(10))
|
ts.Put(append(searchPrefix, random.Bytes(10)...), random.Bytes(10))
|
||||||
ts.Put(append(badPrefix, random.Bytes(10)...), random.Bytes(10))
|
ts.Put(append(badPrefix, random.Bytes(10)...), random.Bytes(10))
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ func BenchmarkMemorySeek(t *testing.B) {
|
||||||
|
|
||||||
t.ReportAllocs()
|
t.ReportAllocs()
|
||||||
t.ResetTimer()
|
t.ResetTimer()
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
ms.Seek(SeekRange{Prefix: searchPrefix}, func(k, v []byte) bool { return false })
|
ms.Seek(SeekRange{Prefix: searchPrefix}, func(k, v []byte) bool { return false })
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -32,7 +32,7 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkDecodeBinary(t *testing.B) {
|
func BenchmarkDecodeBinary(t *testing.B) {
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
r := io.NewBinReaderFromBuf(benchTx)
|
r := io.NewBinReaderFromBuf(benchTx)
|
||||||
tx := &Transaction{}
|
tx := &Transaction{}
|
||||||
tx.DecodeBinary(r)
|
tx.DecodeBinary(r)
|
||||||
|
@ -41,14 +41,14 @@ func BenchmarkDecodeBinary(t *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkDecodeJSON(t *testing.B) {
|
func BenchmarkDecodeJSON(t *testing.B) {
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
tx := &Transaction{}
|
tx := &Transaction{}
|
||||||
require.NoError(t, tx.UnmarshalJSON(benchTxJSON))
|
require.NoError(t, tx.UnmarshalJSON(benchTxJSON))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkDecodeFromBytes(t *testing.B) {
|
func BenchmarkDecodeFromBytes(t *testing.B) {
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
_, err := NewTransactionFromBytes(benchTx)
|
_, err := NewTransactionFromBytes(benchTx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ func BenchmarkTransaction_Bytes(b *testing.B) {
|
||||||
|
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_ = tx.Bytes()
|
_ = tx.Bytes()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,7 @@ func BenchmarkGetVarSize(b *testing.B) {
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_ = io.GetVarSize(tx)
|
_ = io.GetVarSize(tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -160,7 +160,7 @@ func (t *Transaction) decodeHashableFields(br *io.BinReader, buf []byte) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t.Signers = make([]Signer, nsigners)
|
t.Signers = make([]Signer, nsigners)
|
||||||
for i := 0; i < int(nsigners); i++ {
|
for i := range t.Signers {
|
||||||
t.Signers[i].DecodeBinary(br)
|
t.Signers[i].DecodeBinary(br)
|
||||||
}
|
}
|
||||||
nattrs := br.ReadVarUint()
|
nattrs := br.ReadVarUint()
|
||||||
|
@ -169,7 +169,7 @@ func (t *Transaction) decodeHashableFields(br *io.BinReader, buf []byte) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t.Attributes = make([]Attribute, nattrs)
|
t.Attributes = make([]Attribute, nattrs)
|
||||||
for i := 0; i < int(nattrs); i++ {
|
for i := range t.Attributes {
|
||||||
t.Attributes[i].DecodeBinary(br)
|
t.Attributes[i].DecodeBinary(br)
|
||||||
}
|
}
|
||||||
t.Script = br.ReadVarBytes(MaxScriptLength)
|
t.Script = br.ReadVarBytes(MaxScriptLength)
|
||||||
|
@ -197,7 +197,7 @@ func (t *Transaction) decodeBinaryNoSize(br *io.BinReader, buf []byte) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t.Scripts = make([]Witness, nscripts)
|
t.Scripts = make([]Witness, nscripts)
|
||||||
for i := 0; i < int(nscripts); i++ {
|
for i := range t.Scripts {
|
||||||
t.Scripts[i].DecodeBinary(br)
|
t.Scripts[i].DecodeBinary(br)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -422,7 +422,7 @@ func (t *Transaction) isValid() error {
|
||||||
if len(t.Signers) == 0 {
|
if len(t.Signers) == 0 {
|
||||||
return ErrEmptySigners
|
return ErrEmptySigners
|
||||||
}
|
}
|
||||||
for i := 0; i < len(t.Signers); i++ {
|
for i := range t.Signers {
|
||||||
for j := i + 1; j < len(t.Signers); j++ {
|
for j := i + 1; j < len(t.Signers); j++ {
|
||||||
if t.Signers[i].Account.Equals(t.Signers[j].Account) {
|
if t.Signers[i].Account.Equals(t.Signers[j].Account) {
|
||||||
return ErrNonUniqueSigners
|
return ErrNonUniqueSigners
|
||||||
|
|
|
@ -318,7 +318,7 @@ func BenchmarkTxHash(b *testing.B) {
|
||||||
|
|
||||||
// Prime cache.
|
// Prime cache.
|
||||||
tx.Hash()
|
tx.Hash()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_ = tx.Hash()
|
_ = tx.Hash()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -233,7 +233,7 @@ func readArrayOfConditions(r *io.BinReader, maxDepth int) []WitnessCondition {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
a := make([]WitnessCondition, l)
|
a := make([]WitnessCondition, l)
|
||||||
for i := 0; i < int(l); i++ {
|
for i := range a {
|
||||||
a[i] = decodeBinaryCondition(r, maxDepth-1)
|
a[i] = decodeBinaryCondition(r, maxDepth-1)
|
||||||
}
|
}
|
||||||
if r.Err != nil {
|
if r.Err != nil {
|
||||||
|
@ -253,7 +253,7 @@ func (c *ConditionAnd) DecodeBinarySpecific(r *io.BinReader, maxDepth int) {
|
||||||
|
|
||||||
func arrayToJSON(c WitnessCondition, a []WitnessCondition) ([]byte, error) {
|
func arrayToJSON(c WitnessCondition, a []WitnessCondition) ([]byte, error) {
|
||||||
exprs := make([]json.RawMessage, len(a))
|
exprs := make([]json.RawMessage, len(a))
|
||||||
for i := 0; i < len(a); i++ {
|
for i := range a {
|
||||||
b, err := a[i].MarshalJSON()
|
b, err := a[i].MarshalJSON()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -74,7 +74,7 @@ func TestWitnessConditionSerDes(t *testing.T) {
|
||||||
}
|
}
|
||||||
var maxSubCondAnd = &ConditionAnd{}
|
var maxSubCondAnd = &ConditionAnd{}
|
||||||
var maxSubCondOr = &ConditionAnd{}
|
var maxSubCondOr = &ConditionAnd{}
|
||||||
for i := 0; i < maxSubitems+1; i++ {
|
for range maxSubitems + 1 {
|
||||||
*maxSubCondAnd = append(*maxSubCondAnd, (*ConditionBoolean)(&someBool))
|
*maxSubCondAnd = append(*maxSubCondAnd, (*ConditionBoolean)(&someBool))
|
||||||
*maxSubCondOr = append(*maxSubCondOr, (*ConditionBoolean)(&someBool))
|
*maxSubCondOr = append(*maxSubCondOr, (*ConditionBoolean)(&someBool))
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ func BenchmarkMerkle(t *testing.B) {
|
||||||
|
|
||||||
t.Run("NewMerkleTree", func(t *testing.B) {
|
t.Run("NewMerkleTree", func(t *testing.B) {
|
||||||
t.ResetTimer()
|
t.ResetTimer()
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
tr, err := hash.NewMerkleTree(hashes)
|
tr, err := hash.NewMerkleTree(hashes)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_ = tr.Root()
|
_ = tr.Root()
|
||||||
|
@ -25,7 +25,7 @@ func BenchmarkMerkle(t *testing.B) {
|
||||||
})
|
})
|
||||||
t.Run("CalcMerkleRoot", func(t *testing.B) {
|
t.Run("CalcMerkleRoot", func(t *testing.B) {
|
||||||
t.ResetTimer()
|
t.ResetTimer()
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
_ = hash.CalcMerkleRoot(hashes)
|
_ = hash.CalcMerkleRoot(hashes)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -19,7 +19,7 @@ func NewMerkleTree(hashes []util.Uint256) (*MerkleTree, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes := make([]*MerkleTreeNode, len(hashes))
|
nodes := make([]*MerkleTreeNode, len(hashes))
|
||||||
for i := 0; i < len(hashes); i++ {
|
for i := range hashes {
|
||||||
nodes[i] = &MerkleTreeNode{
|
nodes[i] = &MerkleTreeNode{
|
||||||
hash: hashes[i],
|
hash: hashes[i],
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ func buildMerkleTree(leaves []*MerkleTreeNode) *MerkleTreeNode {
|
||||||
}
|
}
|
||||||
|
|
||||||
parents := make([]*MerkleTreeNode, (len(leaves)+1)/2)
|
parents := make([]*MerkleTreeNode, (len(leaves)+1)/2)
|
||||||
for i := 0; i < len(parents); i++ {
|
for i := range parents {
|
||||||
parents[i] = &MerkleTreeNode{}
|
parents[i] = &MerkleTreeNode{}
|
||||||
parents[i].leftChild = leaves[i*2]
|
parents[i].leftChild = leaves[i*2]
|
||||||
leaves[i*2].parent = parents[i]
|
leaves[i*2].parent = parents[i]
|
||||||
|
@ -81,7 +81,7 @@ func CalcMerkleRoot(hashes []util.Uint256) util.Uint256 {
|
||||||
|
|
||||||
scratch := make([]byte, 64)
|
scratch := make([]byte, 64)
|
||||||
parents := hashes[:(len(hashes)+1)/2]
|
parents := hashes[:(len(hashes)+1)/2]
|
||||||
for i := 0; i < len(parents); i++ {
|
for i := range parents {
|
||||||
copy(scratch, hashes[i*2].BytesBE())
|
copy(scratch, hashes[i*2].BytesBE())
|
||||||
|
|
||||||
if i*2+1 == len(hashes) {
|
if i*2+1 == len(hashes) {
|
||||||
|
|
|
@ -149,7 +149,7 @@ func xor(a, b []byte) []byte {
|
||||||
panic("cannot XOR non equal length arrays")
|
panic("cannot XOR non equal length arrays")
|
||||||
}
|
}
|
||||||
dst := make([]byte, len(a))
|
dst := make([]byte, len(a))
|
||||||
for i := 0; i < len(dst); i++ {
|
for i := range dst {
|
||||||
dst[i] = a[i] ^ b[i]
|
dst[i] = a[i] ^ b[i]
|
||||||
}
|
}
|
||||||
return dst
|
return dst
|
||||||
|
|
|
@ -26,7 +26,7 @@ func TestEncodeDecodeInfinity(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEncodeDecodePublicKey(t *testing.T) {
|
func TestEncodeDecodePublicKey(t *testing.T) {
|
||||||
for i := 0; i < 4; i++ {
|
for range 4 {
|
||||||
k, err := NewPrivateKey()
|
k, err := NewPrivateKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
p := k.PublicKey()
|
p := k.PublicKey()
|
||||||
|
@ -225,7 +225,7 @@ func BenchmarkPublicEqual(t *testing.B) {
|
||||||
k12 := getPubKey(t)
|
k12 := getPubKey(t)
|
||||||
k2, err := NewPublicKeyFromString("03b209fd4f53a7170ea4444e0cb0a6bb6a53c2bd016926989cf85f9b0fba17a70c")
|
k2, err := NewPublicKeyFromString("03b209fd4f53a7170ea4444e0cb0a6bb6a53c2bd016926989cf85f9b0fba17a70c")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
_ = k11.Equal(k12)
|
_ = k11.Equal(k12)
|
||||||
_ = k11.Equal(k2)
|
_ = k11.Equal(k2)
|
||||||
}
|
}
|
||||||
|
@ -233,14 +233,14 @@ func BenchmarkPublicEqual(t *testing.B) {
|
||||||
|
|
||||||
func BenchmarkPublicBytes(t *testing.B) {
|
func BenchmarkPublicBytes(t *testing.B) {
|
||||||
k := getPubKey(t)
|
k := getPubKey(t)
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
_ = k.Bytes()
|
_ = k.Bytes()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkPublicUncompressedBytes(t *testing.B) {
|
func BenchmarkPublicUncompressedBytes(t *testing.B) {
|
||||||
k := getPubKey(t)
|
k := getPubKey(t)
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
_ = k.Bytes()
|
_ = k.Bytes()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -249,7 +249,7 @@ func BenchmarkPublicDecodeBytes(t *testing.B) {
|
||||||
keyBytes, err := hex.DecodeString("03b209fd4f53a7170ea4444e0cb0a6bb6a53c2bd016926989cf85f9b0fba17a70c")
|
keyBytes, err := hex.DecodeString("03b209fd4f53a7170ea4444e0cb0a6bb6a53c2bd016926989cf85f9b0fba17a70c")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
k := new(PublicKey)
|
k := new(PublicKey)
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
require.NoError(t, k.DecodeBytes(keyBytes))
|
require.NoError(t, k.DecodeBytes(keyBytes))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ func BenchmarkToPreallocatedBytes(b *testing.B) {
|
||||||
vn := big.NewInt(-100500)
|
vn := big.NewInt(-100500)
|
||||||
buf := make([]byte, 4)
|
buf := make([]byte, 4)
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_ = ToPreallocatedBytes(v, buf[:0])
|
_ = ToPreallocatedBytes(v, buf[:0])
|
||||||
_ = ToPreallocatedBytes(vn, buf[:0])
|
_ = ToPreallocatedBytes(vn, buf[:0])
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ func FromBytes(data []byte) *big.Int {
|
||||||
|
|
||||||
lw := size / wordSizeBytes
|
lw := size / wordSizeBytes
|
||||||
ws := make([]big.Word, lw+1)
|
ws := make([]big.Word, lw+1)
|
||||||
for i := 0; i < lw; i++ {
|
for i := range lw {
|
||||||
base := i * wordSizeBytes
|
base := i * wordSizeBytes
|
||||||
for j := base + 7; j >= base; j-- {
|
for j := base + 7; j >= base; j-- {
|
||||||
ws[i] <<= 8
|
ws[i] <<= 8
|
||||||
|
|
|
@ -13,7 +13,7 @@ func FuzzFromBytes(f *testing.F) {
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
f.Add(tc.buf)
|
f.Add(tc.buf)
|
||||||
}
|
}
|
||||||
for i := 0; i < 50; i++ {
|
for range 50 {
|
||||||
for j := 1; j < MaxBytesLen; j++ {
|
for j := 1; j < MaxBytesLen; j++ {
|
||||||
b := make([]byte, j)
|
b := make([]byte, j)
|
||||||
_, err := rand.Read(b)
|
_, err := rand.Read(b)
|
||||||
|
|
|
@ -17,7 +17,7 @@ var _pow10 []*big.Int
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
var p = int64(1)
|
var p = int64(1)
|
||||||
for i := 0; i <= maxAllowedPrecision; i++ {
|
for range maxAllowedPrecision + 1 {
|
||||||
_pow10 = append(_pow10, big.NewInt(p))
|
_pow10 = append(_pow10, big.NewInt(p))
|
||||||
p *= 10
|
p *= 10
|
||||||
}
|
}
|
||||||
|
|
|
@ -128,7 +128,7 @@ func (r *BinReader) ReadArray(t any, maxSize ...int) {
|
||||||
l := int(lu)
|
l := int(lu)
|
||||||
arr := reflect.MakeSlice(sliceType, l, l)
|
arr := reflect.MakeSlice(sliceType, l, l)
|
||||||
|
|
||||||
for i := 0; i < l; i++ {
|
for i := range l {
|
||||||
var elem reflect.Value
|
var elem reflect.Value
|
||||||
if isPtr {
|
if isPtr {
|
||||||
elem = reflect.New(elemType.Elem())
|
elem = reflect.New(elemType.Elem())
|
||||||
|
|
|
@ -78,7 +78,7 @@ func (w *BinWriter) WriteArray(arr any) {
|
||||||
typ := val.Type().Elem()
|
typ := val.Type().Elem()
|
||||||
|
|
||||||
w.WriteVarUint(uint64(val.Len()))
|
w.WriteVarUint(uint64(val.Len()))
|
||||||
for i := 0; i < val.Len(); i++ {
|
for i := range val.Len() {
|
||||||
el, ok := val.Index(i).Interface().(encodable)
|
el, ok := val.Index(i).Interface().(encodable)
|
||||||
if !ok {
|
if !ok {
|
||||||
el, ok = val.Index(i).Addr().Interface().(encodable)
|
el, ok = val.Index(i).Addr().Interface().(encodable)
|
||||||
|
|
|
@ -218,7 +218,7 @@ func TestBufBinWriterErr(t *testing.T) {
|
||||||
|
|
||||||
func TestBufBinWriterReset(t *testing.T) {
|
func TestBufBinWriterReset(t *testing.T) {
|
||||||
bw := NewBufBinWriter()
|
bw := NewBufBinWriter()
|
||||||
for i := 0; i < 3; i++ {
|
for i := range 3 {
|
||||||
bw.WriteU32LE(uint32(i))
|
bw.WriteU32LE(uint32(i))
|
||||||
assert.Nil(t, bw.Err)
|
assert.Nil(t, bw.Err)
|
||||||
_ = bw.Bytes()
|
_ = bw.Bytes()
|
||||||
|
|
|
@ -75,7 +75,7 @@ func GetVarSize(value any) int {
|
||||||
if valueLength != 0 {
|
if valueLength != 0 {
|
||||||
switch reflect.ValueOf(value).Index(0).Interface().(type) {
|
switch reflect.ValueOf(value).Index(0).Interface().(type) {
|
||||||
case Serializable:
|
case Serializable:
|
||||||
for i := 0; i < valueLength; i++ {
|
for i := range valueLength {
|
||||||
valueSize += GetVarSize(v.Index(i).Interface())
|
valueSize += GetVarSize(v.Index(i).Interface())
|
||||||
}
|
}
|
||||||
case uint8, int8:
|
case uint8, int8:
|
||||||
|
|
|
@ -49,7 +49,7 @@ func (p *ProofWithKey) EncodeBinary(w *io.BinWriter) {
|
||||||
func (p *ProofWithKey) DecodeBinary(r *io.BinReader) {
|
func (p *ProofWithKey) DecodeBinary(r *io.BinReader) {
|
||||||
p.Key = r.ReadVarBytes()
|
p.Key = r.ReadVarBytes()
|
||||||
sz := r.ReadVarUint()
|
sz := r.ReadVarUint()
|
||||||
for i := uint64(0); i < sz; i++ {
|
for range sz {
|
||||||
p.Proof = append(p.Proof, r.ReadVarBytes())
|
p.Proof = append(p.Proof, r.ReadVarBytes())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -376,7 +376,7 @@ func (e *Executor) AddNewBlock(t testing.TB, txs ...*transaction.Transaction) *b
|
||||||
// GenerateNewBlocks adds the specified number of empty blocks to the chain.
|
// GenerateNewBlocks adds the specified number of empty blocks to the chain.
|
||||||
func (e *Executor) GenerateNewBlocks(t testing.TB, count int) []*block.Block {
|
func (e *Executor) GenerateNewBlocks(t testing.TB, count int) []*block.Block {
|
||||||
blocks := make([]*block.Block, count)
|
blocks := make([]*block.Block, count)
|
||||||
for i := 0; i < count; i++ {
|
for i := range count {
|
||||||
blocks[i] = e.AddNewBlock(t)
|
blocks[i] = e.AddNewBlock(t)
|
||||||
}
|
}
|
||||||
return blocks
|
return blocks
|
||||||
|
|
|
@ -133,7 +133,7 @@ func (m multiSigner) Script() []byte {
|
||||||
// SignHashable implements Signer interface.
|
// SignHashable implements Signer interface.
|
||||||
func (m multiSigner) SignHashable(magic uint32, item hash.Hashable) []byte {
|
func (m multiSigner) SignHashable(magic uint32, item hash.Hashable) []byte {
|
||||||
var script []byte
|
var script []byte
|
||||||
for i := 0; i < m.m; i++ {
|
for i := range m.m {
|
||||||
sign := m.accounts[i].SignHashable(netmode.Magic(magic), item)
|
sign := m.accounts[i].SignHashable(netmode.Magic(magic), item)
|
||||||
script = append(script, byte(opcode.PUSHDATA1), keys.SignatureLen)
|
script = append(script, byte(opcode.PUSHDATA1), keys.SignatureLen)
|
||||||
script = append(script, sign...)
|
script = append(script, sign...)
|
||||||
|
|
|
@ -96,7 +96,7 @@ func TestDefaultDiscoverer(t *testing.T) {
|
||||||
// Added addresses should end up in the pool and in the unconnected set.
|
// Added addresses should end up in the pool and in the unconnected set.
|
||||||
// Done twice to check re-adding unconnected addresses, which should be
|
// Done twice to check re-adding unconnected addresses, which should be
|
||||||
// a no-op.
|
// a no-op.
|
||||||
for i := 0; i < 2; i++ {
|
for range 2 {
|
||||||
d.BackFill(set1...)
|
d.BackFill(set1...)
|
||||||
assert.Equal(t, len(set1), d.PoolCount())
|
assert.Equal(t, len(set1), d.PoolCount())
|
||||||
set1D := d.UnconnectedPeers()
|
set1D := d.UnconnectedPeers()
|
||||||
|
@ -110,7 +110,7 @@ func TestDefaultDiscoverer(t *testing.T) {
|
||||||
// Request should make goroutines dial our addresses draining the pool.
|
// Request should make goroutines dial our addresses draining the pool.
|
||||||
d.RequestRemote(len(set1))
|
d.RequestRemote(len(set1))
|
||||||
dialled := make([]string, 0)
|
dialled := make([]string, 0)
|
||||||
for i := 0; i < len(set1); i++ {
|
for range set1 {
|
||||||
select {
|
select {
|
||||||
case a := <-ts.dialCh:
|
case a := <-ts.dialCh:
|
||||||
dialled = append(dialled, a)
|
dialled = append(dialled, a)
|
||||||
|
@ -182,8 +182,8 @@ func TestDefaultDiscoverer(t *testing.T) {
|
||||||
|
|
||||||
dialledBad := make([]string, 0)
|
dialledBad := make([]string, 0)
|
||||||
d.RequestRemote(len(set1))
|
d.RequestRemote(len(set1))
|
||||||
for i := 0; i < connRetries; i++ {
|
for i := range connRetries {
|
||||||
for j := 0; j < len(set1); j++ {
|
for j := range set1 {
|
||||||
select {
|
select {
|
||||||
case a := <-ts.dialCh:
|
case a := <-ts.dialCh:
|
||||||
dialledBad = append(dialledBad, a)
|
dialledBad = append(dialledBad, a)
|
||||||
|
@ -194,8 +194,8 @@ func TestDefaultDiscoverer(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.Eventually(t, func() bool { return d.PoolCount() == 0 }, 2*time.Second, 50*time.Millisecond)
|
require.Eventually(t, func() bool { return d.PoolCount() == 0 }, 2*time.Second, 50*time.Millisecond)
|
||||||
slices.Sort(dialledBad)
|
slices.Sort(dialledBad)
|
||||||
for i := 0; i < len(set1); i++ {
|
for i := range set1 {
|
||||||
for j := 0; j < connRetries; j++ {
|
for j := range connRetries {
|
||||||
assert.Equal(t, set1[i], dialledBad[i*connRetries+j])
|
assert.Equal(t, set1[i], dialledBad[i*connRetries+j])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -222,7 +222,7 @@ func TestSeedDiscovery(t *testing.T) {
|
||||||
tryMaxWait = 1 // Don't waste time.
|
tryMaxWait = 1 // Don't waste time.
|
||||||
|
|
||||||
d.RequestRemote(len(seeds))
|
d.RequestRemote(len(seeds))
|
||||||
for i := 0; i < connRetries*2; i++ {
|
for range connRetries * 2 {
|
||||||
for range seeds {
|
for range seeds {
|
||||||
select {
|
select {
|
||||||
case <-ts.dialCh:
|
case <-ts.dialCh:
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func FuzzMessageDecode(f *testing.F) {
|
func FuzzMessageDecode(f *testing.F) {
|
||||||
for i := 0; i < 100; i++ {
|
for range 100 {
|
||||||
seed := make([]byte, rand.IntN(1000))
|
seed := make([]byte, rand.IntN(1000))
|
||||||
random.Fill(seed)
|
random.Fill(seed)
|
||||||
f.Add(seed)
|
f.Add(seed)
|
||||||
|
|
|
@ -79,7 +79,7 @@ func BenchmarkMessageBytes(b *testing.B) {
|
||||||
|
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_, err := msg.Bytes()
|
_, err := msg.Bytes()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.FailNow()
|
b.FailNow()
|
||||||
|
|
|
@ -44,7 +44,7 @@ func TestEncodeDecodeAddress(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func fillAddressList(al *AddressList) {
|
func fillAddressList(al *AddressList) {
|
||||||
for i := 0; i < len(al.Addrs); i++ {
|
for i := range al.Addrs {
|
||||||
e, _ := net.ResolveTCPAddr("tcp", fmt.Sprintf("127.0.0.1:20%d", i))
|
e, _ := net.ResolveTCPAddr("tcp", fmt.Sprintf("127.0.0.1:20%d", i))
|
||||||
al.Addrs[i] = NewAddressAndTime(e, time.Now(), capability.Capabilities{
|
al.Addrs[i] = NewAddressAndTime(e, time.Now(), capability.Capabilities{
|
||||||
{
|
{
|
||||||
|
|
|
@ -44,7 +44,7 @@ func (p *Headers) DecodeBinary(br *io.BinReader) {
|
||||||
|
|
||||||
p.Hdrs = make([]*block.Header, lenHeaders)
|
p.Hdrs = make([]*block.Header, lenHeaders)
|
||||||
|
|
||||||
for i := 0; i < int(lenHeaders); i++ {
|
for i := range p.Hdrs {
|
||||||
header := &block.Header{}
|
header := &block.Header{}
|
||||||
header.StateRootEnabled = p.StateRootInHeader
|
header.StateRootEnabled = p.StateRootInHeader
|
||||||
header.DecodeBinary(br)
|
header.DecodeBinary(br)
|
||||||
|
|
|
@ -56,7 +56,7 @@ func testHeadersEncodeDecode(t *testing.T, headers *Headers, expected int, retEr
|
||||||
assert.Equal(t, retErr, rErr)
|
assert.Equal(t, retErr, rErr)
|
||||||
assert.Equal(t, expected, len(headersDecode.Hdrs))
|
assert.Equal(t, expected, len(headersDecode.Hdrs))
|
||||||
|
|
||||||
for i := 0; i < len(headersDecode.Hdrs); i++ {
|
for i := range headersDecode.Hdrs {
|
||||||
assert.Equal(t, headers.Hdrs[i].Version, headersDecode.Hdrs[i].Version)
|
assert.Equal(t, headers.Hdrs[i].Version, headersDecode.Hdrs[i].Version)
|
||||||
assert.Equal(t, headers.Hdrs[i].Index, headersDecode.Hdrs[i].Index)
|
assert.Equal(t, headers.Hdrs[i].Index, headersDecode.Hdrs[i].Index)
|
||||||
assert.Equal(t, headers.Hdrs[i].Script, headersDecode.Hdrs[i].Script)
|
assert.Equal(t, headers.Hdrs[i].Script, headersDecode.Hdrs[i].Script)
|
||||||
|
|
|
@ -26,7 +26,7 @@ func (d *MPTData) DecodeBinary(r *io.BinReader) {
|
||||||
r.Err = errors.New("empty MPT nodes list")
|
r.Err = errors.New("empty MPT nodes list")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for i := uint64(0); i < sz; i++ {
|
for range sz {
|
||||||
d.Nodes = append(d.Nodes, r.ReadVarBytes())
|
d.Nodes = append(d.Nodes, r.ReadVarBytes())
|
||||||
if r.Err != nil {
|
if r.Err != nil {
|
||||||
return
|
return
|
||||||
|
|
|
@ -288,7 +288,7 @@ func (s *Server) Start() {
|
||||||
|
|
||||||
var txThreads = optimalNumOfThreads()
|
var txThreads = optimalNumOfThreads()
|
||||||
s.txHandlerLoopWG.Add(txThreads)
|
s.txHandlerLoopWG.Add(txThreads)
|
||||||
for i := 0; i < txThreads; i++ {
|
for range txThreads {
|
||||||
go s.txHandlerLoop()
|
go s.txHandlerLoop()
|
||||||
}
|
}
|
||||||
go s.broadcastTxLoop()
|
go s.broadcastTxLoop()
|
||||||
|
@ -1487,7 +1487,7 @@ func (s *Server) RequestTx(hashes ...util.Uint256) {
|
||||||
slices.SortFunc(sorted, util.Uint256.Compare)
|
slices.SortFunc(sorted, util.Uint256.Compare)
|
||||||
s.txCbList.Store(sorted)
|
s.txCbList.Store(sorted)
|
||||||
|
|
||||||
for i := 0; i <= len(hashes)/payload.MaxHashesCount; i++ {
|
for i := range len(hashes)/payload.MaxHashesCount + 1 {
|
||||||
start := i * payload.MaxHashesCount
|
start := i * payload.MaxHashesCount
|
||||||
stop := (i + 1) * payload.MaxHashesCount
|
stop := (i + 1) * payload.MaxHashesCount
|
||||||
stop = min(stop, len(hashes))
|
stop = min(stop, len(hashes))
|
||||||
|
|
|
@ -356,7 +356,7 @@ func TestWSClientNonBlockingEvents(t *testing.T) {
|
||||||
wsc.subscriptionsLock.RUnlock()
|
wsc.subscriptionsLock.RUnlock()
|
||||||
|
|
||||||
// Check that receiver was closed after overflow.
|
// Check that receiver was closed after overflow.
|
||||||
for i := 0; i < chCap; i++ {
|
for range chCap {
|
||||||
_, ok := <-bCh
|
_, ok := <-bCh
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
}
|
}
|
||||||
|
@ -836,7 +836,7 @@ func TestWSConcurrentAccess(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
batchCount := 100
|
batchCount := 100
|
||||||
completed := &atomic.Int32{}
|
completed := &atomic.Int32{}
|
||||||
for i := 0; i < batchCount; i++ {
|
for range batchCount {
|
||||||
go func() {
|
go func() {
|
||||||
_, err := wsc.GetBlockCount()
|
_, err := wsc.GetBlockCount()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -305,7 +305,7 @@ func TestNotary(t *testing.T) {
|
||||||
// check that tx size was updated
|
// check that tx size was updated
|
||||||
require.Equal(t, io.GetVarSize(completedTx), completedTx.Size())
|
require.Equal(t, io.GetVarSize(completedTx), completedTx.Size())
|
||||||
|
|
||||||
for i := 0; i < len(completedTx.Scripts)-1; i++ {
|
for i := range len(completedTx.Scripts) - 1 {
|
||||||
_, err := bc.VerifyWitness(completedTx.Signers[i].Account, completedTx, &completedTx.Scripts[i], -1)
|
_, err := bc.VerifyWitness(completedTx.Signers[i].Account, completedTx, &completedTx.Scripts[i], -1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
@ -393,7 +393,7 @@ func TestNotary(t *testing.T) {
|
||||||
|
|
||||||
var submittedRequests []*payload.P2PNotaryRequest
|
var submittedRequests []*payload.P2PNotaryRequest
|
||||||
// sent only nSigs (m out of n) requests - it should be enough to complete min tx
|
// sent only nSigs (m out of n) requests - it should be enough to complete min tx
|
||||||
for i := 0; i < nSigs; i++ {
|
for i := range nSigs {
|
||||||
submittedRequests = append(submittedRequests, requests[sendOrder[i]])
|
submittedRequests = append(submittedRequests, requests[sendOrder[i]])
|
||||||
|
|
||||||
ntr1.OnNewRequest(requests[sendOrder[i]])
|
ntr1.OnNewRequest(requests[sendOrder[i]])
|
||||||
|
|
|
@ -54,7 +54,7 @@ func TestFilter(t *testing.T) {
|
||||||
func TestFilterOOM(t *testing.T) {
|
func TestFilterOOM(t *testing.T) {
|
||||||
construct := func(depth, width int) string {
|
construct := func(depth, width int) string {
|
||||||
data := `$`
|
data := `$`
|
||||||
for i := 0; i < depth; i++ {
|
for range depth {
|
||||||
data = data + `[0`
|
data = data + `[0`
|
||||||
for j := 1; j < width; j++ {
|
for j := 1; j < width; j++ {
|
||||||
data = data + `,0`
|
data = data + `,0`
|
||||||
|
|
|
@ -218,7 +218,7 @@ func (o *Oracle) start() {
|
||||||
o.running = true
|
o.running = true
|
||||||
o.respMtx.Unlock()
|
o.respMtx.Unlock()
|
||||||
|
|
||||||
for i := 0; i < o.MainCfg.MaxConcurrentRequests; i++ {
|
for range o.MainCfg.MaxConcurrentRequests {
|
||||||
go o.runRequestWorker()
|
go o.runRequestWorker()
|
||||||
}
|
}
|
||||||
go o.ResponseHandler.Run()
|
go o.ResponseHandler.Run()
|
||||||
|
|
|
@ -419,7 +419,7 @@ func TestNotYetRunningOracle(t *testing.T) {
|
||||||
|
|
||||||
var req state.OracleRequest
|
var req state.OracleRequest
|
||||||
var reqs = make(map[uint64]*state.OracleRequest)
|
var reqs = make(map[uint64]*state.OracleRequest)
|
||||||
for i := uint64(0); i < 3; i++ {
|
for i := range uint64(3) {
|
||||||
reqs[i] = &req
|
reqs[i] = &req
|
||||||
}
|
}
|
||||||
orc.AddRequests(reqs) // 0, 1, 2 added to pending.
|
orc.AddRequests(reqs) // 0, 1, 2 added to pending.
|
||||||
|
|
|
@ -1455,7 +1455,7 @@ func TestClient_IteratorSessions(t *testing.T) {
|
||||||
// storageItemsCount is the amount of storage items stored in Storage contract, it's hard-coded in the contract code.
|
// storageItemsCount is the amount of storage items stored in Storage contract, it's hard-coded in the contract code.
|
||||||
const storageItemsCount = 255
|
const storageItemsCount = 255
|
||||||
expected := make([][]byte, storageItemsCount)
|
expected := make([][]byte, storageItemsCount)
|
||||||
for i := 0; i < storageItemsCount; i++ {
|
for i := range storageItemsCount {
|
||||||
expected[i] = stackitem.NewBigInteger(big.NewInt(int64(i))).Bytes()
|
expected[i] = stackitem.NewBigInteger(big.NewInt(int64(i))).Bytes()
|
||||||
}
|
}
|
||||||
slices.SortFunc(expected, func(a, b []byte) int {
|
slices.SortFunc(expected, func(a, b []byte) int {
|
||||||
|
@ -1483,7 +1483,7 @@ func TestClient_IteratorSessions(t *testing.T) {
|
||||||
set, err := c.TraverseIterator(sID, iID, maxNum)
|
set, err := c.TraverseIterator(sID, iID, maxNum)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, maxNum, len(set))
|
require.Equal(t, maxNum, len(set))
|
||||||
for i := 0; i < maxNum; i++ {
|
for i := range maxNum {
|
||||||
// According to the Storage contract code.
|
// According to the Storage contract code.
|
||||||
require.Equal(t, expected[start+i], set[i].Value().([]byte), start+i)
|
require.Equal(t, expected[start+i], set[i].Value().([]byte), start+i)
|
||||||
}
|
}
|
||||||
|
@ -1503,7 +1503,7 @@ func TestClient_IteratorSessions(t *testing.T) {
|
||||||
|
|
||||||
t.Run("traverse, request more than exists", func(t *testing.T) {
|
t.Run("traverse, request more than exists", func(t *testing.T) {
|
||||||
sID, iID := prepareSession(t)
|
sID, iID := prepareSession(t)
|
||||||
for i := 0; i < storageItemsCount/config.DefaultMaxIteratorResultItems; i++ {
|
for range storageItemsCount / config.DefaultMaxIteratorResultItems {
|
||||||
set, err := c.TraverseIterator(sID, iID, config.DefaultMaxIteratorResultItems)
|
set, err := c.TraverseIterator(sID, iID, config.DefaultMaxIteratorResultItems)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, config.DefaultMaxIteratorResultItems, len(set))
|
require.Equal(t, config.DefaultMaxIteratorResultItems, len(set))
|
||||||
|
@ -1533,7 +1533,7 @@ func TestClient_IteratorSessions(t *testing.T) {
|
||||||
assert.Equal(t, 1, len(set))
|
assert.Equal(t, 1, len(set))
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}
|
}
|
||||||
for i := 0; i < storageItemsCount; i++ {
|
for range storageItemsCount {
|
||||||
go check(t)
|
go check(t)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
@ -1677,7 +1677,7 @@ func TestClient_Iterator_SessionConfigVariations(t *testing.T) {
|
||||||
actual, err := c.TraverseIterator(res.Session, *iterator.ID, maxNum)
|
actual, err := c.TraverseIterator(res.Session, *iterator.ID, maxNum)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, maxNum, len(actual))
|
require.Equal(t, maxNum, len(actual))
|
||||||
for i := 0; i < maxNum; i++ {
|
for i := range maxNum {
|
||||||
// According to the Storage contract code.
|
// According to the Storage contract code.
|
||||||
require.Equal(t, expected[i], actual[i].Value().([]byte), i)
|
require.Equal(t, expected[i], actual[i].Value().([]byte), i)
|
||||||
}
|
}
|
||||||
|
@ -1696,7 +1696,7 @@ func TestClient_Iterator_SessionConfigVariations(t *testing.T) {
|
||||||
|
|
||||||
// Fill in expected stackitems set during the first test.
|
// Fill in expected stackitems set during the first test.
|
||||||
expected = make([][]byte, storageItemsCount)
|
expected = make([][]byte, storageItemsCount)
|
||||||
for i := 0; i < storageItemsCount; i++ {
|
for i := range storageItemsCount {
|
||||||
expected[i] = stackitem.NewBigInteger(big.NewInt(int64(i))).Bytes()
|
expected[i] = stackitem.NewBigInteger(big.NewInt(int64(i))).Bytes()
|
||||||
}
|
}
|
||||||
slices.SortFunc(expected, func(a, b []byte) int {
|
slices.SortFunc(expected, func(a, b []byte) int {
|
||||||
|
@ -1761,7 +1761,7 @@ func TestClient_Iterator_SessionConfigVariations(t *testing.T) {
|
||||||
require.NotEmpty(t, iterator.Values)
|
require.NotEmpty(t, iterator.Values)
|
||||||
require.True(t, iterator.Truncated)
|
require.True(t, iterator.Truncated)
|
||||||
require.Equal(t, rpcSrv.config.MaxIteratorResultItems, len(iterator.Values))
|
require.Equal(t, rpcSrv.config.MaxIteratorResultItems, len(iterator.Values))
|
||||||
for i := 0; i < rpcSrv.config.MaxIteratorResultItems; i++ {
|
for i := range rpcSrv.config.MaxIteratorResultItems {
|
||||||
// According to the Storage contract code.
|
// According to the Storage contract code.
|
||||||
require.Equal(t, expected[i], iterator.Values[i].Value().([]byte), i)
|
require.Equal(t, expected[i], iterator.Values[i].Value().([]byte), i)
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ func BenchmarkUnmarshal(b *testing.B) {
|
||||||
b.Run("unmarshal", func(b *testing.B) {
|
b.Run("unmarshal", func(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
in := new(In)
|
in := new(In)
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
|
@ -33,7 +33,7 @@ func BenchmarkUnmarshal(b *testing.B) {
|
||||||
b.Run("decode data", func(b *testing.B) {
|
b.Run("decode data", func(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
r := NewRequest()
|
r := NewRequest()
|
||||||
r.In = new(In)
|
r.In = new(In)
|
||||||
|
|
|
@ -978,7 +978,7 @@ func (s *Server) calculateNetworkFee(reqParams params.Params) (any, *neorpc.Erro
|
||||||
paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}}
|
paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}}
|
||||||
} else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok {
|
} else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok {
|
||||||
paramz = make([]manifest.Parameter, nSigs)
|
paramz = make([]manifest.Parameter, nSigs)
|
||||||
for j := 0; j < nSigs; j++ {
|
for j := range paramz {
|
||||||
paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType}
|
paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -101,7 +101,7 @@ func getTestBlocks(t *testing.T) []*block.Block {
|
||||||
nBlocks := br.ReadU32LE()
|
nBlocks := br.ReadU32LE()
|
||||||
require.Nil(t, br.Err)
|
require.Nil(t, br.Err)
|
||||||
blocks := make([]*block.Block, 0, int(nBlocks))
|
blocks := make([]*block.Block, 0, int(nBlocks))
|
||||||
for i := 0; i < int(nBlocks); i++ {
|
for range nBlocks {
|
||||||
_ = br.ReadU32LE()
|
_ = br.ReadU32LE()
|
||||||
b := block.New(false)
|
b := block.New(false)
|
||||||
b.DecodeBinary(br)
|
b.DecodeBinary(br)
|
||||||
|
|
|
@ -2974,7 +2974,7 @@ func testRPCProtocol(t *testing.T, doRPCCall func(string, string, *testing.T) []
|
||||||
for _, tx := range mp.GetVerifiedTransactions() {
|
for _, tx := range mp.GetVerifiedTransactions() {
|
||||||
expected = append(expected, tx.Hash())
|
expected = append(expected, tx.Hash())
|
||||||
}
|
}
|
||||||
for i := 0; i < 5; i++ {
|
for range 5 {
|
||||||
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 0)
|
tx := transaction.New([]byte{byte(opcode.PUSH1)}, 0)
|
||||||
tx.Signers = []transaction.Signer{{Account: util.Uint160{1, 2, 3}}}
|
tx.Signers = []transaction.Signer{{Account: util.Uint160{1, 2, 3}}}
|
||||||
assert.NoError(t, mp.Add(tx, &FeerStub{}))
|
assert.NoError(t, mp.Add(tx, &FeerStub{}))
|
||||||
|
@ -4138,7 +4138,7 @@ func BenchmarkHandleIn(b *testing.B) {
|
||||||
do := func(b *testing.B, req []byte) {
|
do := func(b *testing.B, req []byte) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
in := new(params.In)
|
in := new(params.In)
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
|
|
|
@ -148,7 +148,7 @@ func TestSubscriptions(t *testing.T) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < len(b.Transactions); i++ {
|
for i := range b.Transactions {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
resp = getNotification(t, respMsgs)
|
resp = getNotification(t, respMsgs)
|
||||||
}
|
}
|
||||||
|
@ -460,7 +460,7 @@ func TestFilteredBlockSubscriptions(t *testing.T) {
|
||||||
blockSubID := callSubscribe(t, c, respMsgs, `["block_added", {"primary":3}]`)
|
blockSubID := callSubscribe(t, c, respMsgs, `["block_added", {"primary":3}]`)
|
||||||
|
|
||||||
var expectedCnt int
|
var expectedCnt int
|
||||||
for i := 0; i < numBlocks; i++ {
|
for i := range numBlocks {
|
||||||
primary := uint32(i % 4)
|
primary := uint32(i % 4)
|
||||||
if primary == 3 {
|
if primary == 3 {
|
||||||
expectedCnt++
|
expectedCnt++
|
||||||
|
@ -469,7 +469,7 @@ func TestFilteredBlockSubscriptions(t *testing.T) {
|
||||||
require.NoError(t, chain.AddBlock(b))
|
require.NoError(t, chain.AddBlock(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < expectedCnt; i++ {
|
for range expectedCnt {
|
||||||
var resp = new(neorpc.Notification)
|
var resp = new(neorpc.Notification)
|
||||||
select {
|
select {
|
||||||
case body := <-respMsgs:
|
case body := <-respMsgs:
|
||||||
|
@ -493,7 +493,7 @@ func TestHeaderOfAddedBlockSubscriptions(t *testing.T) {
|
||||||
headerSubID := callSubscribe(t, c, respMsgs, `["header_of_added_block", {"primary":3}]`)
|
headerSubID := callSubscribe(t, c, respMsgs, `["header_of_added_block", {"primary":3}]`)
|
||||||
|
|
||||||
var expectedCnt int
|
var expectedCnt int
|
||||||
for i := 0; i < numBlocks; i++ {
|
for i := range numBlocks {
|
||||||
primary := uint32(i % 4)
|
primary := uint32(i % 4)
|
||||||
if primary == 3 {
|
if primary == 3 {
|
||||||
expectedCnt++
|
expectedCnt++
|
||||||
|
@ -502,7 +502,7 @@ func TestHeaderOfAddedBlockSubscriptions(t *testing.T) {
|
||||||
require.NoError(t, chain.AddBlock(b))
|
require.NoError(t, chain.AddBlock(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < expectedCnt; i++ {
|
for range expectedCnt {
|
||||||
var resp = new(neorpc.Notification)
|
var resp = new(neorpc.Notification)
|
||||||
select {
|
select {
|
||||||
case body := <-respMsgs:
|
case body := <-respMsgs:
|
||||||
|
@ -523,7 +523,7 @@ func TestMaxSubscriptions(t *testing.T) {
|
||||||
var subIDs = make([]string, 0)
|
var subIDs = make([]string, 0)
|
||||||
_, _, c, respMsgs := initCleanServerAndWSClient(t)
|
_, _, c, respMsgs := initCleanServerAndWSClient(t)
|
||||||
|
|
||||||
for i := 0; i < maxFeeds+1; i++ {
|
for i := range maxFeeds + 1 {
|
||||||
var s string
|
var s string
|
||||||
resp := callWSGetRaw(t, c, `{"jsonrpc": "2.0", "method": "subscribe", "params": ["block_added"], "id": 1}`, respMsgs)
|
resp := callWSGetRaw(t, c, `{"jsonrpc": "2.0", "method": "subscribe", "params": ["block_added"], "id": 1}`, respMsgs)
|
||||||
if i < maxFeeds {
|
if i < maxFeeds {
|
||||||
|
@ -606,7 +606,7 @@ func TestWSClientsLimit(t *testing.T) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
// Dial effectiveClients connections in parallel
|
// Dial effectiveClients connections in parallel
|
||||||
for i := 0; i < effectiveClients; i++ {
|
for i := range effectiveClients {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
j := i
|
j := i
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -658,11 +658,11 @@ func TestSubscriptionOverflow(t *testing.T) {
|
||||||
require.NotNil(t, resp.Result)
|
require.NotNil(t, resp.Result)
|
||||||
|
|
||||||
// Push a lot of new blocks, but don't read events for them.
|
// Push a lot of new blocks, but don't read events for them.
|
||||||
for i := 0; i < blockCnt; i++ {
|
for range blockCnt {
|
||||||
b := testchain.NewBlock(t, chain, 1, 0)
|
b := testchain.NewBlock(t, chain, 1, 0)
|
||||||
require.NoError(t, chain.AddBlock(b))
|
require.NoError(t, chain.AddBlock(b))
|
||||||
}
|
}
|
||||||
for i := 0; i < blockCnt; i++ {
|
for range blockCnt {
|
||||||
resp := getNotification(t, respMsgs)
|
resp := getNotification(t, respMsgs)
|
||||||
if resp.Event != neorpc.BlockEventID {
|
if resp.Event != neorpc.BlockEventID {
|
||||||
require.Equal(t, neorpc.MissedEventID, resp.Event)
|
require.Equal(t, neorpc.MissedEventID, resp.Event)
|
||||||
|
|
|
@ -39,7 +39,7 @@ import (
|
||||||
func testSignStateRoot(t *testing.T, r *state.MPTRoot, pubs keys.PublicKeys, accs ...*wallet.Account) []byte {
|
func testSignStateRoot(t *testing.T, r *state.MPTRoot, pubs keys.PublicKeys, accs ...*wallet.Account) []byte {
|
||||||
n := smartcontract.GetMajorityHonestNodeCount(len(accs))
|
n := smartcontract.GetMajorityHonestNodeCount(len(accs))
|
||||||
w := io.NewBufBinWriter()
|
w := io.NewBufBinWriter()
|
||||||
for i := 0; i < n; i++ {
|
for i := range n {
|
||||||
sig := accs[i].PrivateKey().SignHashable(uint32(netmode.UnitTestNet), r)
|
sig := accs[i].PrivateKey().SignHashable(uint32(netmode.UnitTestNet), r)
|
||||||
emit.Bytes(w.BinWriter, sig)
|
emit.Bytes(w.BinWriter, sig)
|
||||||
}
|
}
|
||||||
|
@ -323,7 +323,7 @@ func TestStateroot_GetLatestStateHeight(t *testing.T) {
|
||||||
basicchain.Init(t, "../../../", e)
|
basicchain.Init(t, "../../../", e)
|
||||||
|
|
||||||
m := bc.GetStateModule()
|
m := bc.GetStateModule()
|
||||||
for i := uint32(0); i < bc.BlockHeight(); i++ {
|
for i := range bc.BlockHeight() {
|
||||||
r, err := m.GetStateRoot(i)
|
r, err := m.GetStateRoot(i)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
h, err := bc.GetStateModule().GetLatestStateHeight(r.Root)
|
h, err := bc.GetStateModule().GetLatestStateHeight(r.Root)
|
||||||
|
|
|
@ -24,7 +24,7 @@ func TestCreateMultiSigRedeemScript(t *testing.T) {
|
||||||
br := io.NewBinReaderFromBuf(out)
|
br := io.NewBinReaderFromBuf(out)
|
||||||
assert.Equal(t, opcode.PUSH3, opcode.Opcode(br.ReadB()))
|
assert.Equal(t, opcode.PUSH3, opcode.Opcode(br.ReadB()))
|
||||||
|
|
||||||
for i := 0; i < len(validators); i++ {
|
for i := range validators {
|
||||||
assert.EqualValues(t, opcode.PUSHDATA1, br.ReadB())
|
assert.EqualValues(t, opcode.PUSHDATA1, br.ReadB())
|
||||||
bb := br.ReadVarBytes()
|
bb := br.ReadVarBytes()
|
||||||
require.NoError(t, br.Err)
|
require.NoError(t, br.Err)
|
||||||
|
@ -61,13 +61,13 @@ func TestCreateDefaultMultiSigRedeemScript(t *testing.T) {
|
||||||
checkM(2)
|
checkM(2)
|
||||||
|
|
||||||
// 3 out of 4
|
// 3 out of 4
|
||||||
for i := 0; i < 2; i++ {
|
for range 2 {
|
||||||
addKey()
|
addKey()
|
||||||
}
|
}
|
||||||
checkM(3)
|
checkM(3)
|
||||||
|
|
||||||
// 5 out of 6
|
// 5 out of 6
|
||||||
for i := 0; i < 2; i++ {
|
for range 2 {
|
||||||
addKey()
|
addKey()
|
||||||
}
|
}
|
||||||
checkM(5)
|
checkM(5)
|
||||||
|
@ -77,7 +77,7 @@ func TestCreateDefaultMultiSigRedeemScript(t *testing.T) {
|
||||||
checkM(5)
|
checkM(5)
|
||||||
|
|
||||||
// 7 out of 10
|
// 7 out of 10
|
||||||
for i := 0; i < 3; i++ {
|
for range 3 {
|
||||||
addKey()
|
addKey()
|
||||||
}
|
}
|
||||||
checkM(7)
|
checkM(7)
|
||||||
|
|
|
@ -273,7 +273,7 @@ func TestIsValid(t *testing.T) {
|
||||||
m.Groups = m.Groups[:0]
|
m.Groups = m.Groups[:0]
|
||||||
|
|
||||||
t.Run("invalid, unserializable", func(t *testing.T) {
|
t.Run("invalid, unserializable", func(t *testing.T) {
|
||||||
for i := 0; i < stackitem.MaxSerialized; i++ {
|
for i := range stackitem.MaxSerialized {
|
||||||
m.ABI.Events = append(m.ABI.Events, Event{
|
m.ABI.Events = append(m.ABI.Events, Event{
|
||||||
Name: fmt.Sprintf("Event%d", i),
|
Name: fmt.Sprintf("Event%d", i),
|
||||||
Parameters: []Parameter{},
|
Parameters: []Parameter{},
|
||||||
|
|
|
@ -133,11 +133,11 @@ func VerifyProof(a []byte, b []byte, c []byte, publicInput [][]byte) bool {
|
||||||
panic("error: inputlen or iclen")
|
panic("error: inputlen or iclen")
|
||||||
}
|
}
|
||||||
icPoints := make([]crypto.Bls12381Point, iclen)
|
icPoints := make([]crypto.Bls12381Point, iclen)
|
||||||
for i := 0; i < iclen; i++ {
|
for i := range iclen {
|
||||||
icPoints[i] = crypto.Bls12381Deserialize(ic[i])
|
icPoints[i] = crypto.Bls12381Deserialize(ic[i])
|
||||||
}
|
}
|
||||||
acc := icPoints[0]
|
acc := icPoints[0]
|
||||||
for i := 0; i < inputlen; i++ {
|
for i := range inputlen {
|
||||||
scalar := publicInput[i] // 32-bytes LE field element.
|
scalar := publicInput[i] // 32-bytes LE field element.
|
||||||
temp := crypto.Bls12381Mul(icPoints[i+1], scalar, false)
|
temp := crypto.Bls12381Mul(icPoints[i+1], scalar, false)
|
||||||
acc = crypto.Bls12381Add(acc, temp)
|
acc = crypto.Bls12381Add(acc, temp)
|
||||||
|
|
|
@ -7,7 +7,7 @@ import (
|
||||||
func BenchmarkUint256MarshalJSON(b *testing.B) {
|
func BenchmarkUint256MarshalJSON(b *testing.B) {
|
||||||
v := Uint256{0x01, 0x02, 0x03}
|
v := Uint256{0x01, 0x02, 0x03}
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_, _ = v.MarshalJSON()
|
_, _ = v.MarshalJSON()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,7 +96,7 @@ func (u Uint160) StringLE() string {
|
||||||
|
|
||||||
// Reverse returns a reversed representation of u.
|
// Reverse returns a reversed representation of u.
|
||||||
func (u Uint160) Reverse() (r Uint160) {
|
func (u Uint160) Reverse() (r Uint160) {
|
||||||
for i := 0; i < Uint160Size; i++ {
|
for i := range Uint160Size {
|
||||||
r[i] = u[Uint160Size-i-1]
|
r[i] = u[Uint160Size-i-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -101,7 +101,7 @@ func BenchmarkUint256DecodeStringLE(b *testing.B) {
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_, err := util.Uint256DecodeStringLE(a)
|
_, err := util.Uint256DecodeStringLE(a)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.FailNow()
|
b.FailNow()
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func benchScript(t *testing.B, script []byte) {
|
func benchScript(t *testing.B, script []byte) {
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
t.StopTimer()
|
t.StopTimer()
|
||||||
vm := load(script)
|
vm := load(script)
|
||||||
t.StartTimer()
|
t.StartTimer()
|
||||||
|
@ -40,7 +40,7 @@ func BenchmarkScriptPushPop(t *testing.B) {
|
||||||
for _, i := range []int{4, 16, 128, 1024} {
|
for _, i := range []int{4, 16, 128, 1024} {
|
||||||
t.Run(strconv.Itoa(i), func(t *testing.B) {
|
t.Run(strconv.Itoa(i), func(t *testing.B) {
|
||||||
var script = make([]byte, i*2)
|
var script = make([]byte, i*2)
|
||||||
for p := 0; p < i; p++ {
|
for p := range i {
|
||||||
script[p] = byte(opcode.PUSH1)
|
script[p] = byte(opcode.PUSH1)
|
||||||
script[i+p] = byte(opcode.DROP)
|
script[i+p] = byte(opcode.DROP)
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ func BenchmarkIsSignatureContract(t *testing.B) {
|
||||||
b64script := "DCED2eixa9myLTNF1tTN4xvhw+HRYVMuPQzOy5Xs4utYM25BVuezJw=="
|
b64script := "DCED2eixa9myLTNF1tTN4xvhw+HRYVMuPQzOy5Xs4utYM25BVuezJw=="
|
||||||
script, err := base64.StdEncoding.DecodeString(b64script)
|
script, err := base64.StdEncoding.DecodeString(b64script)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for n := 0; n < t.N; n++ {
|
for range t.N {
|
||||||
_ = IsSignatureContract(script)
|
_ = IsSignatureContract(script)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,7 +65,7 @@ func TestIsSignatureContract(t *testing.T) {
|
||||||
|
|
||||||
func testMultisigContract(t *testing.T, n, m int) []byte {
|
func testMultisigContract(t *testing.T, n, m int) []byte {
|
||||||
pubs := make(keys.PublicKeys, n)
|
pubs := make(keys.PublicKeys, n)
|
||||||
for i := 0; i < n; i++ {
|
for i := range n {
|
||||||
priv, err := keys.NewPrivateKey()
|
priv, err := keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubs[i] = priv.PublicKey()
|
pubs[i] = priv.PublicKey()
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue