[#265] go.mod: Use range over int
All checks were successful
DCO / DCO (pull_request) Successful in 47s
Tests and linters / Tests (1.22) (pull_request) Successful in 1m7s
Tests and linters / Tests (1.23) (pull_request) Successful in 1m7s
Tests and linters / Lint (pull_request) Successful in 1m42s

Since Go 1.22 a `for` statement with a `range` clause is able
to iterate through integer values from zero to an upper limit.

gopatch script:
@@
var i, e expression
@@
-for i := 0; i <= e - 1; i++ {
+for i := range e {
    ...
}

@@
var i, e expression
@@
-for i := 0; i <= e; i++ {
+for i := range e + 1 {
    ...
}

@@
var i, e expression
@@
-for i := 0; i < e; i++ {
+for i := range e {
    ...
}

Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
This commit is contained in:
Ekaterina Lebedeva 2024-09-03 13:20:30 +03:00
parent 8f751d9dd0
commit 46ee543899
17 changed files with 41 additions and 41 deletions

View file

@ -308,7 +308,7 @@ func (x *Container) SetAttribute(key, value string) {
attrs := x.v2.GetAttributes() attrs := x.v2.GetAttributes()
ln := len(attrs) ln := len(attrs)
for i := 0; i < ln; i++ { for i := range ln {
if attrs[i].GetKey() == key { if attrs[i].GetKey() == key {
attrs[i].SetValue(value) attrs[i].SetValue(value)
return return

View file

@ -286,13 +286,13 @@ func equalRecords(r1, r2 Record) bool {
return false return false
} }
for i := 0; i < len(fs1); i++ { for i := range len(fs1) {
if !equalFilters(fs1[i], fs2[i]) { if !equalFilters(fs1[i], fs2[i]) {
return false return false
} }
} }
for i := 0; i < len(ts1); i++ { for i := range len(ts1) {
if !equalTargets(ts1[i], ts2[i]) { if !equalTargets(ts1[i], ts2[i]) {
return false return false
} }

View file

@ -212,7 +212,7 @@ func EqualTables(t1, t2 Table) bool {
return false return false
} }
for i := 0; i < len(rs1); i++ { for i := range len(rs1) {
if !equalRecords(rs1[i], rs2[i]) { if !equalRecords(rs1[i], rs2[i]) {
return false return false
} }

View file

@ -51,7 +51,7 @@ func SetTargetECDSAKeys(t *Target, pubs ...*ecdsa.PublicKey) {
binKeys = make([][]byte, 0, ln) binKeys = make([][]byte, 0, ln)
} }
for i := 0; i < ln; i++ { for i := range ln {
binKeys = append(binKeys, (*keys.PublicKey)(pubs[i]).Bytes()) binKeys = append(binKeys, (*keys.PublicKey)(pubs[i]).Bytes())
} }
@ -67,7 +67,7 @@ func TargetECDSAKeys(t *Target) []*ecdsa.PublicKey {
pubs := make([]*ecdsa.PublicKey, ln) pubs := make([]*ecdsa.PublicKey, ln)
for i := 0; i < ln; i++ { for i := range ln {
p := new(keys.PublicKey) p := new(keys.PublicKey)
if p.DecodeBytes(binKeys[i]) == nil { if p.DecodeBytes(binKeys[i]) == nil {
pubs[i] = (*ecdsa.PublicKey)(p) pubs[i] = (*ecdsa.PublicKey)(p)
@ -169,7 +169,7 @@ func equalTargets(t1, t2 Target) bool {
return false return false
} }
for i := 0; i < len(keys1); i++ { for i := range len(keys1) {
if !bytes.Equal(keys1[i], keys2[i]) { if !bytes.Equal(keys1[i], keys2[i]) {
return false return false
} }

View file

@ -19,7 +19,7 @@ func baseBenchmarkTableBinaryComparison(b *testing.B, factor int) {
b.StopTimer() b.StopTimer()
b.ResetTimer() b.ResetTimer()
b.StartTimer() b.StartTimer()
for i := 0; i < b.N; i++ { for range b.N {
got, _ := t.Marshal() got, _ := t.Marshal()
if !bytes.Equal(exp, got) { if !bytes.Equal(exp, got) {
b.Fail() b.Fail()
@ -38,7 +38,7 @@ func baseBenchmarkTableEqualsComparison(b *testing.B, factor int) {
b.StopTimer() b.StopTimer()
b.ResetTimer() b.ResetTimer()
b.StartTimer() b.StartTimer()
for i := 0; i < b.N; i++ { for range b.N {
if !eacl.EqualTables(*t, *t2) { if !eacl.EqualTables(*t, *t2) {
b.Fail() b.Fail()
} }
@ -76,7 +76,7 @@ func TargetN(n int) *eacl.Target {
x.SetRole(eacl.RoleSystem) x.SetRole(eacl.RoleSystem)
keys := make([][]byte, n) keys := make([][]byte, n)
for i := 0; i < n; i++ { for i := range n {
keys[i] = make([]byte, 32) keys[i] = make([]byte, 32)
rand.Read(keys[i]) rand.Read(keys[i])
} }
@ -94,7 +94,7 @@ func RecordN(n int) *eacl.Record {
x.SetOperation(eacl.OperationRangeHash) x.SetOperation(eacl.OperationRangeHash)
x.SetTargets(*TargetN(n)) x.SetTargets(*TargetN(n))
for i := 0; i < n; i++ { for range n {
x.AddFilter(eacl.HeaderFromObject, eacl.MatchStringEqual, "", cidtest.ID().EncodeToString()) x.AddFilter(eacl.HeaderFromObject, eacl.MatchStringEqual, "", cidtest.ID().EncodeToString())
} }
@ -106,7 +106,7 @@ func TableN(n int) *eacl.Table {
x.SetCID(cidtest.ID()) x.SetCID(cidtest.ID())
for i := 0; i < n; i++ { for range n {
x.AddRecord(RecordN(n)) x.AddRecord(RecordN(n))
} }

View file

@ -47,7 +47,7 @@ func BenchmarkNetmap_ContainerNodes(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for range b.N {
_, err := nm.ContainerNodes(p, pivot) _, err := nm.ContainerNodes(p, pivot)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)

View file

@ -38,7 +38,7 @@ func BenchmarkHRWSort(b *testing.B) {
b.Run("sort by index, no weight", func(b *testing.B) { b.Run("sort by index, no weight", func(b *testing.B) {
realNodes := make([]nodes, netmapSize) realNodes := make([]nodes, netmapSize)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for range b.N {
b.StopTimer() b.StopTimer()
copy(realNodes, vectors) copy(realNodes, vectors)
b.StartTimer() b.StartTimer()
@ -49,7 +49,7 @@ func BenchmarkHRWSort(b *testing.B) {
b.Run("sort by value, no weight", func(b *testing.B) { b.Run("sort by value, no weight", func(b *testing.B) {
realNodes := make([]nodes, netmapSize) realNodes := make([]nodes, netmapSize)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for range b.N {
b.StopTimer() b.StopTimer()
copy(realNodes, vectors) copy(realNodes, vectors)
b.StartTimer() b.StartTimer()
@ -60,7 +60,7 @@ func BenchmarkHRWSort(b *testing.B) {
b.Run("only sort by index", func(b *testing.B) { b.Run("only sort by index", func(b *testing.B) {
realNodes := make([]nodes, netmapSize) realNodes := make([]nodes, netmapSize)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for range b.N {
b.StopTimer() b.StopTimer()
copy(realNodes, vectors) copy(realNodes, vectors)
b.StartTimer() b.StartTimer()
@ -71,7 +71,7 @@ func BenchmarkHRWSort(b *testing.B) {
b.Run("sort by value", func(b *testing.B) { b.Run("sort by value", func(b *testing.B) {
realNodes := make([]nodes, netmapSize) realNodes := make([]nodes, netmapSize)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for range b.N {
b.StopTimer() b.StopTimer()
copy(realNodes, vectors) copy(realNodes, vectors)
b.StartTimer() b.StartTimer()
@ -82,7 +82,7 @@ func BenchmarkHRWSort(b *testing.B) {
b.Run("sort by ID, then by index (deterministic)", func(b *testing.B) { b.Run("sort by ID, then by index (deterministic)", func(b *testing.B) {
realNodes := make([]nodes, netmapSize) realNodes := make([]nodes, netmapSize)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for range b.N {
b.StopTimer() b.StopTimer()
copy(realNodes, vectors) copy(realNodes, vectors)
b.StartTimer() b.StartTimer()
@ -134,7 +134,7 @@ func BenchmarkPolicyHRWType(b *testing.B) {
nm.SetNodes(nodes) nm.SetNodes(nodes)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for range b.N {
_, err := nm.ContainerNodes(p, []byte{1}) _, err := nm.ContainerNodes(p, []byte{1})
if err != nil { if err != nil {
b.Fatal() b.Fatal()
@ -195,7 +195,7 @@ func TestPlacementPolicy_DeterministicOrder(t *testing.T) {
} }
a, b := getIndices(t) a, b := getIndices(t)
for i := 0; i < 10; i++ { for range 10 {
x, y := getIndices(t) x, y := getIndices(t)
require.Equal(t, a, x) require.Equal(t, a, x)
require.Equal(t, b, y) require.Equal(t, b, y)
@ -352,7 +352,7 @@ func TestPlacementPolicy_Unique(t *testing.T) {
var nodes []NodeInfo var nodes []NodeInfo
for i, city := range []string{"Moscow", "Berlin", "Shenzhen"} { for i, city := range []string{"Moscow", "Berlin", "Shenzhen"} {
for j := 0; j < 3; j++ { for j := range 3 {
node := nodeInfoFromAttributes("City", city) node := nodeInfoFromAttributes("City", city)
node.SetPublicKey(binary.BigEndian.AppendUint16(nil, uint16(i*4+j))) node.SetPublicKey(binary.BigEndian.AppendUint16(nil, uint16(i*4+j)))
nodes = append(nodes, node) nodes = append(nodes, node)
@ -366,7 +366,7 @@ func TestPlacementPolicy_Unique(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
for i, vi := range v { for i, vi := range v {
for _, ni := range vi { for _, ni := range vi {
for j := 0; j < i; j++ { for j := range i {
for _, nj := range v[j] { for _, nj := range v[j] {
require.NotEqual(t, ni.hash, nj.hash) require.NotEqual(t, ni.hash, nj.hash)
} }
@ -455,7 +455,7 @@ func TestPlacementPolicy_MultiREP(t *testing.T) {
for _, additional := range []int{0, 1, 2} { for _, additional := range []int{0, 1, 2} {
t.Run(fmt.Sprintf("unique=%t, additional=%d", unique, additional), func(t *testing.T) { t.Run(fmt.Sprintf("unique=%t, additional=%d", unique, additional), func(t *testing.T) {
rs := []ReplicaDescriptor{newReplica(1, "SameRU")} rs := []ReplicaDescriptor{newReplica(1, "SameRU")}
for i := 0; i < additional; i++ { for range additional {
rs = append(rs, newReplica(1, "")) rs = append(rs, newReplica(1, ""))
} }

View file

@ -130,7 +130,7 @@ func BenchmarkPlacementPolicyInteropability(b *testing.B) {
b.Run(name, func(b *testing.B) { b.Run(name, func(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for range b.N {
b.StartTimer() b.StartTimer()
v, err := nm.ContainerNodes(tt.Policy, tt.Pivot) v, err := nm.ContainerNodes(tt.Policy, tt.Pivot)
b.StopTimer() b.StopTimer()
@ -173,7 +173,7 @@ func BenchmarkManySelects(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for range b.N {
_, err = nm.ContainerNodes(tt.Policy, tt.Pivot) _, err = nm.ContainerNodes(tt.Policy, tt.Pivot)
if err != nil { if err != nil {
b.FailNow() b.FailNow()

View file

@ -53,7 +53,7 @@ func TestErasureCodeReconstruct(t *testing.T) {
}) })
t.Run("from parity", func(t *testing.T) { t.Run("from parity", func(t *testing.T) {
parts := cloneSlice(parts) parts := cloneSlice(parts)
for i := 0; i < parityCount; i++ { for i := range parityCount {
parts[i] = nil parts[i] = nil
} }
reconstructed, err := c.ReconstructHeader(parts) reconstructed, err := c.ReconstructHeader(parts)
@ -138,7 +138,7 @@ func TestErasureCodeReconstruct(t *testing.T) {
}) })
t.Run("from parity", func(t *testing.T) { t.Run("from parity", func(t *testing.T) {
parts := cloneSlice(parts) parts := cloneSlice(parts)
for i := 0; i < parityCount; i++ { for i := range parityCount {
parts[i] = nil parts[i] = nil
} }
reconstructed, err := c.Reconstruct(parts) reconstructed, err := c.Reconstruct(parts)
@ -180,7 +180,7 @@ func TestErasureCodeReconstruct(t *testing.T) {
t.Run("from parity", func(t *testing.T) { t.Run("from parity", func(t *testing.T) {
oldParts := parts oldParts := parts
parts := cloneSlice(parts) parts := cloneSlice(parts)
for i := 0; i < parityCount; i++ { for i := range parityCount {
parts[i] = nil parts[i] = nil
} }

View file

@ -61,7 +61,7 @@ func TestID_Equal(t *testing.T) {
func TestID_Parse(t *testing.T) { func TestID_Parse(t *testing.T) {
t.Run("should parse successful", func(t *testing.T) { t.Run("should parse successful", func(t *testing.T) {
for i := 0; i < 10; i++ { for i := range 10 {
t.Run(strconv.Itoa(i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
cs := randSHA256Checksum(t) cs := randSHA256Checksum(t)
str := base58.Encode(cs[:]) str := base58.Encode(cs[:])
@ -78,7 +78,7 @@ func TestID_Parse(t *testing.T) {
}) })
t.Run("should failure on parse", func(t *testing.T) { t.Run("should failure on parse", func(t *testing.T) {
for i := 0; i < 10; i++ { for i := range 10 {
j := i j := i
t.Run(strconv.Itoa(j), func(t *testing.T) { t.Run(strconv.Itoa(j), func(t *testing.T) {
cs := []byte{1, 2, 3, 4, 5, byte(j)} cs := []byte{1, 2, 3, 4, 5, byte(j)}
@ -98,7 +98,7 @@ func TestID_String(t *testing.T) {
}) })
t.Run("should be equal", func(t *testing.T) { t.Run("should be equal", func(t *testing.T) {
for i := 0; i < 10; i++ { for i := range 10 {
t.Run(strconv.Itoa(i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
cs := randSHA256Checksum(t) cs := randSHA256Checksum(t)
str := base58.Encode(cs[:]) str := base58.Encode(cs[:])

View file

@ -14,7 +14,7 @@ func generateIDList(sz int) []oid.ID {
res := make([]oid.ID, sz) res := make([]oid.ID, sz)
cs := [sha256.Size]byte{} cs := [sha256.Size]byte{}
for i := 0; i < sz; i++ { for i := range sz {
var oID oid.ID var oID oid.ID
res[i] = oID res[i] = oID

View file

@ -72,7 +72,7 @@ func TestTransformer(t *testing.T) {
require.Equal(t, ids.ParentID, &parID) require.Equal(t, ids.ParentID, &parID)
children := tt.objects[i].Children() children := tt.objects[i].Children()
for j := 0; j < i; j++ { for j := range i {
id, ok := tt.objects[j].ID() id, ok := tt.objects[j].ID()
require.True(t, ok) require.True(t, ok)
require.Equal(t, id, children[j]) require.Equal(t, id, children[j])
@ -152,7 +152,7 @@ func benchmarkTransformer(b *testing.B, header *objectSDK.Object, payloadSize, s
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for range b.N {
f, _ := newPayloadSizeLimiter(maxSize, uint64(sizeHint), func() ObjectWriter { return benchTarget{} }) f, _ := newPayloadSizeLimiter(maxSize, uint64(sizeHint), func() ObjectWriter { return benchTarget{} })
if err := f.WriteHeader(ctx, header); err != nil { if err := f.WriteHeader(ctx, header); err != nil {
b.Fatalf("write header: %v", err) b.Fatalf("write header: %v", err)

View file

@ -2256,7 +2256,7 @@ func (p *innerPool) connection() (client, error) {
return nil, errors.New("no healthy client") return nil, errors.New("no healthy client")
} }
attempts := 3 * len(p.clients) attempts := 3 * len(p.clients)
for k := 0; k < attempts; k++ { for range attempts {
i := p.sampler.Next() i := p.sampler.Next()
if cp := p.clients[i]; cp.isHealthy() { if cp := p.clients[i]; cp.isHealthy() {
return cp, nil return cp, nil

View file

@ -222,7 +222,7 @@ func TestOneOfTwoFailed(t *testing.T) {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
for i := 0; i < 5; i++ { for range 5 {
cp, err := pool.connection() cp, err := pool.connection()
require.NoError(t, err) require.NoError(t, err)
st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false)) st, _ := pool.cache.Get(formCacheKey(cp.address(), pool.key, false))
@ -514,7 +514,7 @@ func TestStatusMonitor(t *testing.T) {
monitor.errorThreshold = 3 monitor.errorThreshold = 3
count := 10 count := 10
for i := 0; i < count; i++ { for range count {
monitor.incErrorRate() monitor.incErrorRate()
} }
@ -724,7 +724,7 @@ func TestSwitchAfterErrorThreshold(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(pool.Close) t.Cleanup(pool.Close)
for i := 0; i < errorThreshold; i++ { for range errorThreshold {
conn, err := pool.connection() conn, err := pool.connection()
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, nodes[0].address, conn.address()) require.Equal(t, nodes[0].address, conn.address())

View file

@ -30,7 +30,7 @@ func newSampler(probabilities []float64, source rand.Source) *sampler {
sampler.alias = make([]int, n) sampler.alias = make([]int, n)
// Compute scaled probabilities. // Compute scaled probabilities.
p := make([]float64, n) p := make([]float64, n)
for i := 0; i < n; i++ { for i := range n {
p[i] = probabilities[i] * float64(n) p[i] = probabilities[i] * float64(n)
} }
for i, pi := range p { for i, pi := range p {

View file

@ -32,7 +32,7 @@ func TestSamplerStability(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
sampler := newSampler(tc.probabilities, rand.NewSource(0)) sampler := newSampler(tc.probabilities, rand.NewSource(0))
res := make([]int, len(tc.probabilities)) res := make([]int, len(tc.probabilities))
for i := 0; i < COUNT; i++ { for range COUNT {
res[sampler.Next()]++ res[sampler.Next()]++
} }

View file

@ -382,7 +382,7 @@ type SubTreeReader struct {
// Read reads another list of the subtree nodes. // Read reads another list of the subtree nodes.
func (x *SubTreeReader) Read(buf []*grpcService.GetSubTreeResponse_Body) (int, error) { func (x *SubTreeReader) Read(buf []*grpcService.GetSubTreeResponse_Body) (int, error) {
for i := 0; i < len(buf); i++ { for i := range len(buf) {
resp, err := x.cli.Recv() resp, err := x.cli.Recv()
if err == io.EOF { if err == io.EOF {
return i, io.EOF return i, io.EOF