From a685fcdc963b0f58003059bb2dae2d21c925e25a Mon Sep 17 00:00:00 2001 From: Ekaterina Lebedeva Date: Fri, 30 Aug 2024 19:20:55 +0300 Subject: [PATCH] [#1317] go.mod: Use range over int Since Go 1.22 a "for" statement with a "range" clause is able to iterate through integer values from zero to an upper limit. gopatch script: @@ var i, e expression @@ -for i := 0; i <= e - 1; i++ { +for i := range e { ... } @@ var i, e expression @@ -for i := 0; i <= e; i++ { +for i := range e + 1 { ... } @@ var i, e expression @@ -for i := 0; i < e; i++ { +for i := range e { ... } Signed-off-by: Ekaterina Lebedeva --- .../modules/morph/contract/dump_hashes.go | 4 ++-- .../modules/morph/helper/local_client.go | 2 +- .../internal/modules/morph/helper/util.go | 2 +- .../morph/initialize/initialize_test.go | 4 ++-- cmd/frostfs-cli/internal/client/client.go | 2 +- cmd/frostfs-cli/modules/container/create.go | 2 +- cmd/frostfs-cli/modules/container/delete.go | 2 +- cmd/frostfs-cli/modules/object/nodes.go | 2 +- cmd/frostfs-cli/modules/util/acl.go | 2 +- cmd/frostfs-node/config/node/config.go | 2 +- cmd/frostfs-node/morph.go | 2 +- pkg/innerring/indexer_test.go | 2 +- pkg/innerring/notary.go | 2 +- .../processors/alphabet/handlers_test.go | 6 ++--- .../processors/governance/handlers_test.go | 2 +- .../processors/governance/list_test.go | 4 ++-- .../blobovnicza/sizes_test.go | 2 +- .../blobovniczatree/concurrency_test.go | 2 +- .../blobstor/blobovniczatree/rebuild_test.go | 2 +- .../blobstor/blobstor_test.go | 12 +++++----- .../blobstor/compression/bench_test.go | 2 +- .../blobstor/fstree/fstree_test.go | 6 ++--- .../blobstor/perf_test.go | 4 ++-- .../engine/control_test.go | 4 ++-- .../engine/engine_test.go | 12 +++++----- .../engine/evacuate_test.go | 4 ++-- pkg/local_object_storage/engine/list_test.go | 2 +- .../engine/remove_copies.go | 2 +- .../engine/remove_copies_test.go | 4 ++-- .../engine/shards_test.go | 2 +- pkg/local_object_storage/engine/tree_test.go | 6 ++--- .../internal/testutil/generators_test.go | 6 ++--- .../metabase/containers_test.go | 8 +++---- .../metabase/counter_test.go | 8 +++---- .../metabase/delete_test.go | 2 +- pkg/local_object_storage/metabase/get_test.go | 4 ++-- .../metabase/list_test.go | 12 +++++----- .../metabase/lock_test.go | 4 ++-- pkg/local_object_storage/metabase/put_test.go | 2 +- .../metabase/reset_test.go | 2 +- .../metabase/select_test.go | 6 ++--- pkg/local_object_storage/metabase/upgrade.go | 2 +- .../metabase/upgrade_test.go | 10 ++++---- pkg/local_object_storage/pilorama/boltdb.go | 2 +- .../pilorama/forest_test.go | 24 +++++++++---------- pkg/local_object_storage/shard/list_test.go | 4 ++-- .../shard/metrics_test.go | 10 ++++---- pkg/local_object_storage/shard/refill_test.go | 4 ++-- .../writecache/benchmark/writecache_test.go | 2 +- pkg/local_object_storage/writecache/flush.go | 2 +- pkg/morph/event/notary_preparator_test.go | 2 +- pkg/morph/timer/block_test.go | 2 +- pkg/network/tls_test.go | 2 +- pkg/services/control/server/evacuate.go | 2 +- pkg/services/object/acl/v2/util_test.go | 2 +- pkg/services/object/get/get_test.go | 8 +++---- pkg/services/object/get/getrangeec_test.go | 2 +- pkg/services/object/put/ec.go | 2 +- pkg/services/object/search/search_test.go | 4 ++-- .../object_manager/placement/cache_test.go | 4 ++-- .../object_manager/placement/traverser.go | 4 ++-- .../placement/traverser_test.go | 6 ++--- .../storage/persistent/executor_test.go | 2 +- pkg/services/tree/getsubtree_test.go | 2 +- pkg/services/tree/replicator.go | 2 +- pkg/util/sync/key_locker_test.go | 2 +- 66 files changed, 135 insertions(+), 135 deletions(-) diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go index 5a0d29550..be2134b77 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go @@ -68,7 +68,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error { if irSize != 0 { bw.Reset() - for i := 0; i < irSize; i++ { + for i := range irSize { emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly, helper.GetAlphabetNNSDomain(i), int64(nns.TXT)) @@ -79,7 +79,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error { return fmt.Errorf("can't fetch info from NNS: %w", err) } - for i := 0; i < irSize; i++ { + for i := range irSize { info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)} if h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]); err == nil { info.hash = h diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go index 44d1b4ecf..375fa84d7 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go @@ -224,7 +224,7 @@ func (l *LocalClient) CalculateNetworkFee(tx *transaction.Transaction) (int64, e paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}} } else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok { paramz = make([]manifest.Parameter, nSigs) - for j := 0; j < nSigs; j++ { + for j := range nSigs { paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType} } } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go index 2d9281c24..8c6b90539 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/util.go @@ -44,7 +44,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er var wallets []*wallet.Wallet var letter string - for i := 0; i < constants.MaxAlphabetNodes; i++ { + for i := range constants.MaxAlphabetNodes { letter = innerring.GlagoliticLetter(i).String() p := filepath.Join(walletDir, letter+".json") var w *wallet.Wallet diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go index 6c52aa2ab..74f5d3e88 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go @@ -113,7 +113,7 @@ func generateTestData(dir string, size int) error { } var pubs []string - for i := 0; i < size; i++ { + for i := range size { p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json") w, err := wallet.NewWalletFromFile(p) if err != nil { @@ -148,7 +148,7 @@ func generateTestData(dir string, size int) error { } func setTestCredentials(v *viper.Viper, size int) { - for i := 0; i < size; i++ { + for i := range size { v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10)) } v.Set("credentials.contract", constants.TestContractPassword) diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index a0fa22410..57bcf5620 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -692,7 +692,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes for { n, ok = rdr.Read(buf) - for i := 0; i < n; i++ { + for i := range n { list = append(list, buf[i]) } if !ok { diff --git a/cmd/frostfs-cli/modules/container/create.go b/cmd/frostfs-cli/modules/container/create.go index c6f576908..f37b169ce 100644 --- a/cmd/frostfs-cli/modules/container/create.go +++ b/cmd/frostfs-cli/modules/container/create.go @@ -139,7 +139,7 @@ It will be stored in sidechain when inner ring will accepts it.`, }, } - for i := 0; i < awaitTimeout; i++ { + for range awaitTimeout { time.Sleep(1 * time.Second) _, err := internalclient.GetContainer(cmd.Context(), getPrm) diff --git a/cmd/frostfs-cli/modules/container/delete.go b/cmd/frostfs-cli/modules/container/delete.go index e5425bf25..c20188884 100644 --- a/cmd/frostfs-cli/modules/container/delete.go +++ b/cmd/frostfs-cli/modules/container/delete.go @@ -110,7 +110,7 @@ Only owner of the container has a permission to remove container.`, }, } - for i := 0; i < awaitTimeout; i++ { + for range awaitTimeout { time.Sleep(1 * time.Second) _, err := internalclient.GetContainer(cmd.Context(), getPrm) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index 896f6f17f..0eac4e6d2 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -195,7 +195,7 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member prmHead.SetRawFlag(true) // to get an error instead of whole object eg, egCtx := errgroup.WithContext(cmd.Context()) - for idx := 0; idx < len(members); idx++ { + for idx := range len(members) { partObjID := members[idx] eg.Go(func() error { diff --git a/cmd/frostfs-cli/modules/util/acl.go b/cmd/frostfs-cli/modules/util/acl.go index 4c2e324b3..145dcc756 100644 --- a/cmd/frostfs-cli/modules/util/acl.go +++ b/cmd/frostfs-cli/modules/util/acl.go @@ -33,7 +33,7 @@ func PrettyPrintTableBACL(cmd *cobra.Command, bacl *acl.Basic) { fmt.Fprintln(w, strings.Join(bits, "\t")) // Footer footer := []string{"X F"} - for i := 0; i < 7; i++ { + for range 7 { footer = append(footer, "U S O B") } fmt.Fprintln(w, strings.Join(footer, "\t")) diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go index 97aca274a..4d063245b 100644 --- a/cmd/frostfs-node/config/node/config.go +++ b/cmd/frostfs-node/config/node/config.go @@ -121,7 +121,7 @@ func BootstrapAddresses(c *config.Config) (addr network.AddressGroup) { func Attributes(c *config.Config) (attrs []string) { const maxAttributes = 100 - for i := 0; i < maxAttributes; i++ { + for i := range maxAttributes { attr := config.StringSafe(c.Sub(subsection), attributePrefix+"_"+strconv.Itoa(i)) if attr == "" { return diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 1b148095b..7178cd97d 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -157,7 +157,7 @@ var ( ) func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error { - for i := 0; i < notaryDepositRetriesAmount; i++ { + for range notaryDepositRetriesAmount { c.log.Debug(logs.ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted) select { case <-ctx.Done(): diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go index 5bc2cc988..c8a819b5b 100644 --- a/pkg/innerring/indexer_test.go +++ b/pkg/innerring/indexer_test.go @@ -237,7 +237,7 @@ func BenchmarkKeyPosition(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { if keyPosition(key, list) != 5 { b.FailNow() } diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go index c601f5587..e6f2b1de4 100644 --- a/pkg/innerring/notary.go +++ b/pkg/innerring/notary.go @@ -90,7 +90,7 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite } func awaitNotaryDepositInClient(ctx context.Context, cli *client.Client, txHash util.Uint256) error { - for i := 0; i < notaryDepositTimeout; i++ { + for range notaryDepositTimeout { select { case <-ctx.Done(): return ctx.Err() diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go index 346901949..dfda37472 100644 --- a/pkg/innerring/processors/alphabet/handlers_test.go +++ b/pkg/innerring/processors/alphabet/handlers_test.go @@ -21,7 +21,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) { var parsedWallets []util.Uint160 = []util.Uint160{{20}, {25}} alphabetContracts := innerring.NewAlphabetContracts() - for i := 0; i <= index; i++ { + for i := range index + 1 { alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} } @@ -98,7 +98,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) { var parsedWallets []util.Uint160 = []util.Uint160{} alphabetContracts := innerring.NewAlphabetContracts() - for i := 0; i <= index; i++ { + for i := range index + 1 { alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} } @@ -170,7 +170,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) { var parsedWallets []util.Uint160 = []util.Uint160{} alphabetContracts := innerring.NewAlphabetContracts() - for i := 0; i <= index; i++ { + for i := range index + 1 { alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} } diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go index 2a505f8d1..b73e24318 100644 --- a/pkg/innerring/processors/governance/handlers_test.go +++ b/pkg/innerring/processors/governance/handlers_test.go @@ -190,7 +190,7 @@ func generateTestKeys(t *testing.T) testKeys { for { var result testKeys - for i := 0; i < 4; i++ { + for range 4 { pk, err := keys.NewPrivateKey() require.NoError(t, err, "failed to create private key") result.sidechainKeys = append(result.sidechainKeys, pk.PublicKey()) diff --git a/pkg/innerring/processors/governance/list_test.go b/pkg/innerring/processors/governance/list_test.go index d099ec837..4ecebf05b 100644 --- a/pkg/innerring/processors/governance/list_test.go +++ b/pkg/innerring/processors/governance/list_test.go @@ -49,7 +49,7 @@ func TestNewAlphabetList(t *testing.T) { } ln := len(rounds) - for i := 0; i < ln; i++ { + for i := range ln { list, err = newAlphabetList(list, exp) require.NoError(t, err) require.True(t, equalPublicKeyLists(list, rounds[i])) @@ -131,7 +131,7 @@ func TestUpdateInnerRing(t *testing.T) { func generateKeys(n int) (keys.PublicKeys, error) { pubKeys := make(keys.PublicKeys, 0, n) - for i := 0; i < n; i++ { + for range n { privKey, err := keys.NewPrivateKey() if err != nil { return nil, err diff --git a/pkg/local_object_storage/blobovnicza/sizes_test.go b/pkg/local_object_storage/blobovnicza/sizes_test.go index 01093b8d7..d582fc5e4 100644 --- a/pkg/local_object_storage/blobovnicza/sizes_test.go +++ b/pkg/local_object_storage/blobovnicza/sizes_test.go @@ -42,7 +42,7 @@ func TestSizes(t *testing.T) { func BenchmarkUpperBound(b *testing.B) { for _, size := range []uint64{1, 1023, 65 * 1024, 40 * 1024 * 1024} { b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { _ = upperPowerOfTwo(size) } }) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go index 5bed86142..cc8a52d03 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go @@ -34,7 +34,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) { var cnt atomic.Int64 var wg sync.WaitGroup - for i := 0; i < 1000; i++ { + for range 1000 { wg.Add(1) go func() { defer wg.Done() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index 7a1de4c13..4a51fd86a 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -127,7 +127,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta eg, egCtx := errgroup.WithContext(context.Background()) storageIDs := make(map[oid.Address][]byte) storageIDsGuard := &sync.Mutex{} - for i := 0; i < 100; i++ { + for range 100 { eg.Go(func() error { obj := blobstortest.NewObject(1024) data, err := obj.Marshal() diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go index c7d80dc84..bed5e0eb9 100644 --- a/pkg/local_object_storage/blobstor/blobstor_test.go +++ b/pkg/local_object_storage/blobstor/blobstor_test.go @@ -60,7 +60,7 @@ func TestCompression(t *testing.T) { bigObj := make([]*objectSDK.Object, objCount) smallObj := make([]*objectSDK.Object, objCount) - for i := 0; i < objCount; i++ { + for i := range objCount { bigObj[i] = testObject(smallSizeLimit * 2) smallObj[i] = testObject(smallSizeLimit / 2) } @@ -219,7 +219,7 @@ func TestConcurrentPut(t *testing.T) { bigObj := testObject(smallSizeLimit * 2) var wg sync.WaitGroup - for i := 0; i < concurrentPutCount; i++ { + for range concurrentPutCount { wg.Add(1) go func() { testPut(t, blobStor, bigObj) @@ -235,7 +235,7 @@ func TestConcurrentPut(t *testing.T) { bigObj := testObject(smallSizeLimit * 2) var wg sync.WaitGroup - for i := 0; i < concurrentPutCount+1; i++ { + for range concurrentPutCount + 1 { wg.Add(1) go func() { testPutFileExistsError(t, blobStor, bigObj) @@ -251,7 +251,7 @@ func TestConcurrentPut(t *testing.T) { smallObj := testObject(smallSizeLimit / 2) var wg sync.WaitGroup - for i := 0; i < concurrentPutCount; i++ { + for range concurrentPutCount { wg.Add(1) go func() { testPut(t, blobStor, smallObj) @@ -302,7 +302,7 @@ func TestConcurrentDelete(t *testing.T) { testPut(t, blobStor, bigObj) var wg sync.WaitGroup - for i := 0; i < 2; i++ { + for range 2 { wg.Add(1) go func() { testDelete(t, blobStor, bigObj) @@ -319,7 +319,7 @@ func TestConcurrentDelete(t *testing.T) { testPut(t, blobStor, smallObj) var wg sync.WaitGroup - for i := 0; i < 2; i++ { + for range 2 { wg.Add(1) go func() { testDelete(t, blobStor, smallObj) diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go index 986912985..9f70f8ec2 100644 --- a/pkg/local_object_storage/blobstor/compression/bench_test.go +++ b/pkg/local_object_storage/blobstor/compression/bench_test.go @@ -36,7 +36,7 @@ func BenchmarkCompression(b *testing.B) { func benchWith(b *testing.B, c Config, data []byte) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { _ = c.Compress(data) } } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go index d633cbac3..5786dfd3b 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go @@ -28,7 +28,7 @@ func Benchmark_addressFromString(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { _, err := addressFromString(s) if err != nil { b.Fatalf("benchmark error: %v", err) @@ -73,7 +73,7 @@ func TestObjectCounter(t *testing.T) { eg, egCtx := errgroup.WithContext(context.Background()) eg.Go(func() error { - for j := 0; j < 1_000; j++ { + for range 1_000 { _, err := fst.Put(egCtx, putPrm) if err != nil { return err @@ -84,7 +84,7 @@ func TestObjectCounter(t *testing.T) { eg.Go(func() error { var le logicerr.Logical - for j := 0; j < 1_000; j++ { + for range 1_000 { _, err := fst.Delete(egCtx, delPrm) if err != nil && !errors.As(err, &le) { return err diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go index 501c95a1d..1ac769e36 100644 --- a/pkg/local_object_storage/blobstor/perf_test.go +++ b/pkg/local_object_storage/blobstor/perf_test.go @@ -110,7 +110,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) { // Fill database var errG errgroup.Group - for i := 0; i < tt.size; i++ { + for range tt.size { obj := objGen.Next() addr := testutil.AddressFromObject(b, obj) errG.Go(func() error { @@ -203,7 +203,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) { defer func() { require.NoError(b, st.Close()) }() // Fill database - for i := 0; i < tt.size; i++ { + for range tt.size { obj := objGen.Next() addr := testutil.AddressFromObject(b, obj) raw, err := obj.Marshal() diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index f0809883c..2de92ae84 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -208,7 +208,7 @@ func TestPersistentShardID(t *testing.T) { require.NoError(t, te.ng.Close(context.Background())) newTe := newEngineWithErrorThreshold(t, dir, 1) - for i := 0; i < len(newTe.shards); i++ { + for i := range len(newTe.shards) { require.Equal(t, te.shards[i].id, newTe.shards[i].id) } require.NoError(t, newTe.ng.Close(context.Background())) @@ -269,7 +269,7 @@ func TestReload(t *testing.T) { e, currShards := engineWithShards(t, removePath, shardNum) var rcfg ReConfiguration - for i := 0; i < len(currShards)-1; i++ { // without one of the shards + for i := range len(currShards) - 1 { // without one of the shards rcfg.AddShard(currShards[i], nil) } diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 49976abbb..525e17f34 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -44,7 +44,7 @@ func BenchmarkExists(b *testing.B) { func benchmarkExists(b *testing.B, shardNum int) { shards := make([]*shard.Shard, shardNum) - for i := 0; i < shardNum; i++ { + for i := range shardNum { shards[i] = testNewShard(b) } @@ -52,7 +52,7 @@ func benchmarkExists(b *testing.B, shardNum int) { defer func() { require.NoError(b, e.Close(context.Background())) }() addr := oidtest.Address() - for i := 0; i < 100; i++ { + for range 100 { obj := testutil.GenerateObjectWithCID(cidtest.ID()) err := Put(context.Background(), e, obj) if err != nil { @@ -62,7 +62,7 @@ func benchmarkExists(b *testing.B, shardNum int) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { var shPrm shard.ExistsPrm shPrm.Address = addr shPrm.ParentAddress = oid.Address{} @@ -109,7 +109,7 @@ func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper { shards := make([]*shard.Shard, 0, num) - for i := 0; i < num; i++ { + for range num { shards = append(shards, testNewShard(t)) } @@ -117,7 +117,7 @@ func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrap } func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper { - for i := 0; i < num; i++ { + for i := range num { opts := shardOpts(i) id, err := te.engine.AddShard(context.Background(), opts...) require.NoError(t, err) @@ -127,7 +127,7 @@ func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts f } func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper { - for i := 0; i < num; i++ { + for i := range num { defaultOpts := testDefaultShardOptions(t) opts := append(defaultOpts, shardOpts(i)...) id, err := te.engine.AddShard(context.Background(), opts...) diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index 55268b549..8d25dad4a 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -61,7 +61,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng } for _, sh := range ids { - for i := 0; i < objPerShard; i++ { + for range objPerShard { contID := cidtest.ID() obj := testutil.GenerateObjectWithCID(contID) objects = append(objects, obj) @@ -554,7 +554,7 @@ func TestEvacuateTreesRemote(t *testing.T) { require.Equal(t, "", st.ErrorMessage(), "invalid final error message") expectedTreeOps := make(map[string][]*pilorama.Move) - for i := 0; i < len(e.shards); i++ { + for i := range len(e.shards) { sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[i].String()]) require.NoError(t, err, "list source trees failed") require.Len(t, sourceTrees, 3) diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go index dd8a2e8a0..11a6c7841 100644 --- a/pkg/local_object_storage/engine/list_test.go +++ b/pkg/local_object_storage/engine/list_test.go @@ -79,7 +79,7 @@ func TestListWithCursor(t *testing.T) { expected := make([]object.Info, 0, tt.objectNum) got := make([]object.Info, 0, tt.objectNum) - for i := 0; i < tt.objectNum; i++ { + for range tt.objectNum { containerID := cidtest.ID() obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'}) diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go index b99cf4f44..5e1ced56a 100644 --- a/pkg/local_object_storage/engine/remove_copies.go +++ b/pkg/local_object_storage/engine/remove_copies.go @@ -87,7 +87,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat } }) - for i := 0; i < prm.Concurrency; i++ { + for range prm.Concurrency { errG.Go(func() error { return e.removeObjects(ctx, ch) }) diff --git a/pkg/local_object_storage/engine/remove_copies_test.go b/pkg/local_object_storage/engine/remove_copies_test.go index 99963576c..6d2291c74 100644 --- a/pkg/local_object_storage/engine/remove_copies_test.go +++ b/pkg/local_object_storage/engine/remove_copies_test.go @@ -96,7 +96,7 @@ loop: require.FailNow(t, "unexpected object was removed", removed[i].addr) } - for i := 0; i < copyCount; i++ { + for i := range copyCount { if i%3 == 0 { require.True(t, removedMask[i], "object %d was expected to be removed", i) } else { @@ -207,7 +207,7 @@ func TestRebalanceExitByContext(t *testing.T) { }() const removeCount = 3 - for i := 0; i < removeCount-1; i++ { + for range removeCount - 1 { <-deleteCh signal <- struct{}{} } diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go index f4c7a4309..3347d58f1 100644 --- a/pkg/local_object_storage/engine/shards_test.go +++ b/pkg/local_object_storage/engine/shards_test.go @@ -72,7 +72,7 @@ func TestSortShardsByWeight(t *testing.T) { var shards1 []hashedShard var weights1 []float64 var shards2 []hashedShard - for i := 0; i < numOfShards; i++ { + for i := range numOfShards { shards1 = append(shards1, hashedShard{ hash: uint64(i), }) diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go index 2739058e9..6f694f082 100644 --- a/pkg/local_object_storage/engine/tree_test.go +++ b/pkg/local_object_storage/engine/tree_test.go @@ -34,7 +34,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { d := pilorama.CIDDescriptor{CID: cid, Position: 0, Size: 1} treeID := "someTree" - for i := 0; i < objCount; i++ { + for i := range objCount { obj := testutil.GenerateObjectWithCID(cid) testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i)) err := Put(context.Background(), te.ng, obj) @@ -56,7 +56,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual) prm.WithFilters(fs) - for i := 0; i < b.N; i++ { + for range b.N { res, err := te.ng.Select(context.Background(), prm) if err != nil { b.Fatal(err) @@ -67,7 +67,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { } }) b.Run("TreeGetByPath", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true) if err != nil { b.Fatal(err) diff --git a/pkg/local_object_storage/internal/testutil/generators_test.go b/pkg/local_object_storage/internal/testutil/generators_test.go index f7be6014d..cc6f726a4 100644 --- a/pkg/local_object_storage/internal/testutil/generators_test.go +++ b/pkg/local_object_storage/internal/testutil/generators_test.go @@ -13,7 +13,7 @@ func TestOverwriteObjGenerator(t *testing.T) { ObjSize: 10, MaxObjects: 4, } - for i := 0; i < 40; i++ { + for range 40 { obj := gen.Next() id, isSet := obj.ID() i := binary.LittleEndian.Uint64(id[:]) @@ -26,7 +26,7 @@ func TestOverwriteObjGenerator(t *testing.T) { func TestRandObjGenerator(t *testing.T) { gen := &RandObjGenerator{ObjSize: 10} - for i := 0; i < 10; i++ { + for range 10 { obj := gen.Next() require.Equal(t, gen.ObjSize, uint64(len(obj.Payload()))) @@ -50,7 +50,7 @@ func TestSeqObjGenerator(t *testing.T) { func TestRandAddrGenerator(t *testing.T) { gen := RandAddrGenerator(5) - for i := 0; i < 50; i++ { + for range 50 { addr := gen.Next() id := addr.Object() k := binary.LittleEndian.Uint64(id[:]) diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go index 5d6788d7e..8b1874458 100644 --- a/pkg/local_object_storage/metabase/containers_test.go +++ b/pkg/local_object_storage/metabase/containers_test.go @@ -24,7 +24,7 @@ func TestDB_Containers(t *testing.T) { cids := make(map[string]int, N) - for i := 0; i < N; i++ { + for range N { obj := testutil.GenerateObject() cnr, _ := obj.ContainerID() @@ -95,7 +95,7 @@ func TestDB_ContainersCount(t *testing.T) { expected := make([]cid.ID, 0, R+T+SG+L) for _, upload := range uploadObjects { - for i := 0; i < upload.amount; i++ { + for range upload.amount { obj := testutil.GenerateObject() obj.SetType(upload.typ) @@ -126,11 +126,11 @@ func TestDB_ContainerSize(t *testing.T) { cids := make(map[cid.ID]int, C) objs := make(map[cid.ID][]*objectSDK.Object, C*N) - for i := 0; i < C; i++ { + for range C { cnr := cidtest.ID() cids[cnr] = 0 - for j := 0; j < N; j++ { + for range N { size := rand.Intn(1024) parent := testutil.GenerateObjectWithCID(cnr) diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go index 1797fc0aa..d1f808a63 100644 --- a/pkg/local_object_storage/metabase/counter_test.go +++ b/pkg/local_object_storage/metabase/counter_test.go @@ -39,14 +39,14 @@ func TestCounters(t *testing.T) { db := newDB(t) defer func() { require.NoError(t, db.Close()) }() oo := make([]*objectSDK.Object, 0, objCount) - for i := 0; i < objCount; i++ { + for range objCount { oo = append(oo, testutil.GenerateObject()) } var prm meta.PutPrm exp := make(map[cid.ID]meta.ObjectCounters) - for i := 0; i < objCount; i++ { + for i := range objCount { prm.SetObject(oo[i]) cnrID, _ := oo[i].ContainerID() c := meta.ObjectCounters{} @@ -187,7 +187,7 @@ func TestCounters(t *testing.T) { // put objects and check that parent info // does not affect the counter - for i := 0; i < objCount; i++ { + for i := range objCount { o := testutil.GenerateObject() if i < objCount/2 { // half of the objs will have the parent o.SetParent(parObj) @@ -535,7 +535,7 @@ func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*objectSDK parent := testutil.GenerateObject() oo := make([]*objectSDK.Object, 0, count) - for i := 0; i < count; i++ { + for i := range count { o := testutil.GenerateObject() if withParent { o.SetParent(parent) diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go index 2053874d0..cb85157e7 100644 --- a/pkg/local_object_storage/metabase/delete_test.go +++ b/pkg/local_object_storage/metabase/delete_test.go @@ -131,7 +131,7 @@ func TestDelete(t *testing.T) { defer func() { require.NoError(t, db.Close()) }() cnr := cidtest.ID() - for i := 0; i < 10; i++ { + for range 10 { obj := testutil.GenerateObjectWithCID(cnr) var prm meta.PutPrm diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go index 247ddf9cd..7654d2cd8 100644 --- a/pkg/local_object_storage/metabase/get_test.go +++ b/pkg/local_object_storage/metabase/get_test.go @@ -223,7 +223,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { defer func() { require.NoError(b, db.Close()) }() addrs := make([]oid.Address, 0, numOfObj) - for i := 0; i < numOfObj; i++ { + for range numOfObj { raw := testutil.GenerateObject() addrs = append(addrs, object.AddressOf(raw)) @@ -261,7 +261,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { b.Run("serial", func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for i := range b.N { var getPrm meta.GetPrm getPrm.SetAddress(addrs[i%len(addrs)]) diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index a92e2eff4..6207497b1 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -35,7 +35,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB { defer func() { require.NoError(b, db.Close()) }() obj := testutil.GenerateObject() - for i := 0; i < 100_000; i++ { // should be a multiple of all batch sizes + for i := range 100_000 { // should be a multiple of all batch sizes obj.SetID(oidtest.ID()) if i%9 == 0 { // let's have 9 objects per container obj.SetContainerID(cidtest.ID()) @@ -51,7 +51,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { res, err := db.ListWithCursor(context.Background(), prm) if err != nil { if err != meta.ErrEndOfListing { @@ -80,7 +80,7 @@ func TestLisObjectsWithCursor(t *testing.T) { expected := make([]object.Info, 0, total) // fill metabase with objects - for i := 0; i < containers; i++ { + for range containers { containerID := cidtest.ID() // add one regular object @@ -140,7 +140,7 @@ func TestLisObjectsWithCursor(t *testing.T) { expectedIterations-- } - for i := 0; i < expectedIterations; i++ { + for range expectedIterations { res, cursor, err = metaListWithCursor(db, uint32(countPerReq), cursor) require.NoError(t, err, "count:%d", countPerReq) got = append(got, res...) @@ -169,7 +169,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) { expected := make(map[string]int, total) // fill metabase with objects - for i := 0; i < total; i++ { + for range total { obj := testutil.GenerateObject() err := putBig(db, obj) require.NoError(t, err) @@ -186,7 +186,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) { } // add new objects - for i := 0; i < total; i++ { + for range total { obj := testutil.GenerateObject() err = putBig(db, obj) require.NoError(t, err) diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go index 2d7bfc1cc..62a109b02 100644 --- a/pkg/local_object_storage/metabase/lock_test.go +++ b/pkg/local_object_storage/metabase/lock_test.go @@ -155,7 +155,7 @@ func TestDB_Lock(t *testing.T) { inhumePrm.SetGCMark() - for i := 0; i < objsNum; i++ { + for i := range objsNum { inhumePrm.SetAddresses(objectcore.AddressOf(objs[i])) res, err = db.Inhume(context.Background(), inhumePrm) @@ -255,7 +255,7 @@ func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*objectSDK lockedObjs := make([]*objectSDK.Object, 0, numOfLockedObjs) lockedObjIDs := make([]oid.ID, 0, numOfLockedObjs) - for i := 0; i < numOfLockedObjs; i++ { + for range numOfLockedObjs { obj := testutil.GenerateObjectWithCID(cnr) err := putBig(db, obj) require.NoError(t, err) diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go index 84e4029f2..914f5ef06 100644 --- a/pkg/local_object_storage/metabase/put_test.go +++ b/pkg/local_object_storage/metabase/put_test.go @@ -74,7 +74,7 @@ func BenchmarkPut(b *testing.B) { objs := prepareObjects(b.N) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { if err := metaPut(db, objs[index.Add(1)], nil); err != nil { b.Fatal(err) } diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go index 66f5eefc6..993079dce 100644 --- a/pkg/local_object_storage/metabase/reset_test.go +++ b/pkg/local_object_storage/metabase/reset_test.go @@ -34,7 +34,7 @@ func TestResetDropsContainerBuckets(t *testing.T) { defer func() { require.NoError(t, db.Close()) }() - for idx := 0; idx < 100; idx++ { + for idx := range 100 { var putPrm PutPrm putPrm.SetObject(testutil.GenerateObject()) putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx))) diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index 4fbc5910e..0fab3a108 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -920,7 +920,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) { ec, err := erasurecode.NewConstructor(dataCount, parityCount) require.NoError(t, err) - for i := 0; i < partCount; i++ { + for i := range partCount { cs, err := ec.Split(tt.objects[i], &pk.PrivateKey) require.NoError(t, err) @@ -1070,7 +1070,7 @@ func BenchmarkSelect(b *testing.B) { cid := cidtest.ID() - for i := 0; i < objCount; i++ { + for i := range objCount { var attr objectSDK.Attribute attr.SetKey("myHeader") attr.SetValue(strconv.Itoa(i)) @@ -1129,7 +1129,7 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear prm.SetContainerID(cid) prm.SetFilters(fs) - for i := 0; i < b.N; i++ { + for range b.N { res, err := db.Select(context.Background(), prm) if err != nil { b.Fatal(err) diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go index a4c7707b4..e9abd746c 100644 --- a/pkg/local_object_storage/metabase/upgrade.go +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -143,7 +143,7 @@ func createExpirationEpochBuckets(ctx context.Context, db *bbolt.DB, log func(a return selectObjectsWithExpirationEpoch(ctx, db, objects) }) var count atomic.Uint64 - for i := 0; i < upgradeWorkersCount; i++ { + for range upgradeWorkersCount { eg.Go(func() error { for { select { diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go index dc3d7d07d..3797de0a4 100644 --- a/pkg/local_object_storage/metabase/upgrade_test.go +++ b/pkg/local_object_storage/metabase/upgrade_test.go @@ -91,7 +91,7 @@ func TestGenerateMetabaseFile(t *testing.T) { eg, ctx := errgroup.WithContext(context.Background()) eg.SetLimit(generateWorkersCount) // simple objects - for i := 0; i < simpleObjectsCount; i++ { + for i := range simpleObjectsCount { i := i eg.Go(func() error { obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) @@ -110,7 +110,7 @@ func TestGenerateMetabaseFile(t *testing.T) { eg, ctx = errgroup.WithContext(context.Background()) eg.SetLimit(generateWorkersCount) // complex objects - for i := 0; i < complexObjectsCount; i++ { + for i := range complexObjectsCount { i := i eg.Go(func() error { parent := testutil.GenerateObjectWithCID(containers[i%len(containers)]) @@ -134,7 +134,7 @@ func TestGenerateMetabaseFile(t *testing.T) { eg, ctx = errgroup.WithContext(context.Background()) eg.SetLimit(generateWorkersCount) // simple objects deleted by gc marks - for i := 0; i < deletedByGCMarksCount; i++ { + for i := range deletedByGCMarksCount { i := i eg.Go(func() error { obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) @@ -156,7 +156,7 @@ func TestGenerateMetabaseFile(t *testing.T) { eg, ctx = errgroup.WithContext(context.Background()) eg.SetLimit(10000) // simple objects deleted by tombstones - for i := 0; i < deletedByTombstoneCount; i++ { + for i := range deletedByTombstoneCount { i := i eg.Go(func() error { obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) @@ -186,7 +186,7 @@ func TestGenerateMetabaseFile(t *testing.T) { eg, ctx = errgroup.WithContext(context.Background()) eg.SetLimit(generateWorkersCount) // simple objects locked by locks - for i := 0; i < lockedCount; i++ { + for i := range lockedCount { i := i eg.Go(func() error { obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 29941be83..e2d69cafa 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -705,7 +705,7 @@ func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*M key, value = c.Prev() } - for i := 0; i < len(ms); i++ { + for i := range len(ms) { // Loop invariant: key represents the next stored timestamp after ms[i].Time. // 2. Insert the operation. diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go index 41d7a567c..854fe0aad 100644 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ b/pkg/local_object_storage/pilorama/forest_test.go @@ -194,7 +194,7 @@ func BenchmarkForestSortedIteration(b *testing.B) { const total = 100_000 d := CIDDescriptor{cnr, 0, 1} - for i := 0; i < total; i++ { + for i := range total { u, err := uuid.NewRandom() if err != nil { b.FailNow() @@ -216,7 +216,7 @@ func BenchmarkForestSortedIteration(b *testing.B) { } b.Run(providers[i].name+",root", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{RootID}, nil, 100) if err != nil || len(res) != 100 { b.Fatalf("err %v, count %d", err, len(res)) @@ -224,7 +224,7 @@ func BenchmarkForestSortedIteration(b *testing.B) { } }) b.Run(providers[i].name+",leaf", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{1}, nil, 100) if err != nil || len(res) != 0 { b.FailNow() @@ -804,7 +804,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ defer func() { require.NoError(t, s.Close()) }() require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false)) - for i := 0; i < batchSize; i++ { + for range batchSize { errG.Go(func() error { return s.TreeApply(ctx, cid, treeID, &logs[2], false) }) @@ -1043,7 +1043,7 @@ func TestForest_ParallelApply(t *testing.T) { // The operations are guaranteed to be applied and returned sorted by `Time`. func prepareRandomTree(nodeCount, opCount int) []Move { ops := make([]Move, nodeCount+opCount) - for i := 0; i < nodeCount; i++ { + for i := range nodeCount { ops[i] = Move{ Parent: 0, Meta: Meta{ @@ -1121,14 +1121,14 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) } - for i := 0; i < iterCount; i++ { + for range iterCount { // Shuffle random operations, leave initialization in place. r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] }) actual := constructor(t, WithMaxBatchSize(batchSize), WithNoSync(true)) wg := new(sync.WaitGroup) ch := make(chan *Move) - for i := 0; i < batchSize; i++ { + for range batchSize { wg.Add(1) go func() { defer wg.Done() @@ -1170,7 +1170,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ .. } const iterCount = 200 - for i := 0; i < iterCount; i++ { + for range iterCount { // Shuffle random operations, leave initialization in place. r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] }) @@ -1247,7 +1247,7 @@ func BenchmarkApplyReorderLast(b *testing.B) { Child: uint64(r.Intn(benchNodeCount)), } if i != 0 && i%blockSize == 0 { - for j := 0; j < blockSize/2; j++ { + for j := range blockSize / 2 { ops[i-j], ops[i+j-blockSize] = ops[i+j-blockSize], ops[i-j] } } @@ -1265,7 +1265,7 @@ func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) { cid := cidtest.ID() treeID := "version" ch := make(chan int, b.N) - for i := 0; i < b.N; i++ { + for i := range b.N { ch <- i } @@ -1311,7 +1311,7 @@ func testTreeGetByPath(t *testing.T, s ForestStorage) { if mf, ok := s.(*memoryForest); ok { single := mf.treeMap[cid.String()+"/"+treeID] t.Run("test meta", func(t *testing.T) { - for i := 0; i < 6; i++ { + for i := range 6 { require.Equal(t, uint64(i), single.infoMap[Node(i+1)].Meta.Time) } }) @@ -1492,7 +1492,7 @@ func testTreeListTreesCount(t *testing.T, constructor func(t testing.TB, _ ...Op var expected []ContainerIDTreeID treeIDs := []string{"version", "system", "s", "avada kedavra"} - for i := 0; i < count; i++ { + for i := range count { cid := cidtest.ID() treeID := treeIDs[i%len(treeIDs)] expected = append(expected, ContainerIDTreeID{ diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go index 8a49a36fd..3414dc76a 100644 --- a/pkg/local_object_storage/shard/list_test.go +++ b/pkg/local_object_storage/shard/list_test.go @@ -39,11 +39,11 @@ func testShardList(t *testing.T, sh *Shard) { var errG errgroup.Group errG.SetLimit(C * N) - for i := 0; i < C; i++ { + for range C { errG.Go(func() error { cnr := cidtest.ID() - for j := 0; j < N; j++ { + for range N { errG.Go(func() error { obj := testutil.GenerateObjectWithCID(cnr) testutil.AddPayload(obj, 1<<2) diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go index 38d465f31..1ef849c02 100644 --- a/pkg/local_object_storage/shard/metrics_test.go +++ b/pkg/local_object_storage/shard/metrics_test.go @@ -206,7 +206,7 @@ func TestCounters(t *testing.T) { const objNumber = 10 oo := make([]*objectSDK.Object, objNumber) - for i := 0; i < objNumber; i++ { + for i := range objNumber { oo[i] = testutil.GenerateObject() } @@ -248,7 +248,7 @@ func TestCounters(t *testing.T) { var prm PutPrm - for i := 0; i < objNumber; i++ { + for i := range objNumber { prm.SetObject(oo[i]) _, err := sh.Put(context.Background(), prm) @@ -269,7 +269,7 @@ func TestCounters(t *testing.T) { var prm InhumePrm inhumedNumber := objNumber / 4 - for i := 0; i < inhumedNumber; i++ { + for i := range inhumedNumber { prm.MarkAsGarbage(objectcore.AddressOf(oo[i])) _, err := sh.Inhume(context.Background(), prm) @@ -317,7 +317,7 @@ func TestCounters(t *testing.T) { _, err := sh.Inhume(context.Background(), prm) require.NoError(t, err) - for i := 0; i < inhumedNumber; i++ { + for i := range inhumedNumber { cid, ok := oo[i].ContainerID() require.True(t, ok) expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize()) @@ -419,7 +419,7 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) { func addrFromObjs(oo []*objectSDK.Object) []oid.Address { aa := make([]oid.Address, len(oo)) - for i := 0; i < len(oo); i++ { + for i := range len(oo) { aa[i] = objectcore.AddressOf(oo[i]) } diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go index 509ccaaa6..0025bb45a 100644 --- a/pkg/local_object_storage/shard/refill_test.go +++ b/pkg/local_object_storage/shard/refill_test.go @@ -38,7 +38,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) { var putPrm PutPrm - for i := 0; i < objectsCount/2; i++ { + for range objectsCount / 2 { obj := testutil.GenerateObject() testutil.AddAttribute(obj, "foo", "bar") testutil.AddPayload(obj, 1<<5) // blobvnicza tree obj @@ -49,7 +49,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) { require.NoError(b, err) } - for i := 0; i < objectsCount/2; i++ { + for range objectsCount / 2 { obj := testutil.GenerateObject() testutil.AddAttribute(obj, "foo", "bar") obj.SetID(oidtest.ID()) diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go index 4f4398452..4da9a26d7 100644 --- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go +++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go @@ -54,7 +54,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) { objGen := testutil.RandObjGenerator{ObjSize: size} b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { obj := objGen.Next() rawData, err := obj.Marshal() require.NoError(b, err, "marshaling object") diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index e34f5a76b..930ac8431 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -41,7 +41,7 @@ func (c *cache) runFlushLoop(ctx context.Context) { if c.disableBackgroundFlush { return } - for i := 0; i < c.workersCount; i++ { + for range c.workersCount { c.wg.Add(1) go c.workerFlushSmall(ctx) } diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go index 8da9d868a..4c269bcbd 100644 --- a/pkg/morph/event/notary_preparator_test.go +++ b/pkg/morph/event/notary_preparator_test.go @@ -439,7 +439,7 @@ func TestPrepare_CorrectNR(t *testing.T) { ) for _, test := range tests { - for i := 0; i < 1; i++ { // run tests against 3 and 4 witness NR + for i := range 1 { // run tests against 3 and 4 witness NR for _, dummyMultisig := range []bool{true, false} { // run tests against empty and dummy multisig/Notary witness additionalWitness := i == 0 nr := correctNR(script(test.hash, test.method, test.args...), dummyMultisig, additionalWitness) diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go index 93bb04de5..7929754c1 100644 --- a/pkg/morph/timer/block_test.go +++ b/pkg/morph/timer/block_test.go @@ -208,7 +208,7 @@ func TestBlockTimer_TickSameHeight(t *testing.T) { require.NoError(t, bt.Reset()) check := func(t *testing.T, h uint32, base, delta int) { - for i := 0; i < 2*int(blockDur); i++ { + for range 2 * int(blockDur) { bt.Tick(h) require.Equal(t, base, baseCounter) require.Equal(t, delta, deltaCounter) diff --git a/pkg/network/tls_test.go b/pkg/network/tls_test.go index 6c352484b..14729f4c2 100644 --- a/pkg/network/tls_test.go +++ b/pkg/network/tls_test.go @@ -37,7 +37,7 @@ func BenchmarkAddressTLSEnabled(b *testing.B) { b.ReportAllocs() var enabled bool - for i := 0; i < b.N; i++ { + for range b.N { enabled = addr.IsTLSEnabled() } require.True(b, enabled) diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go index 0ba8be765..300cb9dc9 100644 --- a/pkg/services/control/server/evacuate.go +++ b/pkg/services/control/server/evacuate.go @@ -169,7 +169,7 @@ func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) { nodes := placement.FlattenNodes(ns) bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes() - for i := 0; i < len(nodes); i++ { + for i := range len(nodes) { if bytes.Equal(nodes[i].PublicKey(), bs) { copy(nodes[i:], nodes[i+1:]) nodes = nodes[:len(nodes)-1] diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go index 394feef4e..435339683 100644 --- a/pkg/services/object/acl/v2/util_test.go +++ b/pkg/services/object/acl/v2/util_test.go @@ -33,7 +33,7 @@ func TestOriginalTokens(t *testing.T) { var sTokenV2 session.Token sToken.WriteToV2(&sTokenV2) - for i := 0; i < 10; i++ { + for i := range 10 { metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2) res, err := originalSessionToken(metaHeaders) require.NoError(t, err) diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go index 1fc6b7b20..6827018dc 100644 --- a/pkg/services/object/get/get_test.go +++ b/pkg/services/object/get/get_test.go @@ -470,7 +470,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) { ns := make([]netmap.NodeInfo, dim[i]) as := make([]string, dim[i]) - for j := 0; j < dim[i]; j++ { + for j := range dim[i] { a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s", strconv.Itoa(i), strconv.Itoa(60000+j), @@ -508,7 +508,7 @@ func generateChain(ln int, cnr cid.ID) ([]*objectSDK.Object, []oid.ID, []byte) { ids := make([]oid.ID, 0, ln) payload := make([]byte, 0, ln*10) - for i := 0; i < ln; i++ { + for i := range ln { ids = append(ids, curID) addr.SetObject(curID) @@ -1750,7 +1750,7 @@ func TestGetRange(t *testing.T) { }, }) - for from := 0; from < totalSize-1; from++ { + for from := range totalSize - 1 { for to := from; to < totalSize; to++ { t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) { testGetRange(t, svc, addr, uint64(from), uint64(to), payload) @@ -1811,7 +1811,7 @@ func TestGetRange(t *testing.T) { }, }) - for from := 0; from < totalSize-1; from++ { + for from := range totalSize - 1 { for to := from; to < totalSize; to++ { t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) { testGetRange(t, svc, addr, uint64(from), uint64(to), payload) diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go index b8497d7d1..a6882d4a8 100644 --- a/pkg/services/object/get/getrangeec_test.go +++ b/pkg/services/object/get/getrangeec_test.go @@ -131,7 +131,7 @@ func TestGetRangeEC(t *testing.T) { clients: clients, }) - for from := 0; from < totalSize-1; from++ { + for from := range totalSize - 1 { for to := from; to < totalSize; to++ { t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) { testGetRange(t, svc, addr, uint64(from), uint64(to), obj.Payload()) diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/put/ec.go index 1fadf65fe..9980f6d61 100644 --- a/pkg/services/object/put/ec.go +++ b/pkg/services/object/put/ec.go @@ -276,7 +276,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx } // try to save to any node not visited by current part - for i := 0; i < len(nodes); i++ { + for i := range len(nodes) { select { case <-ctx.Done(): return ctx.Err() diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go index 679380402..44abcfe5b 100644 --- a/pkg/services/object/search/search_test.go +++ b/pkg/services/object/search/search_test.go @@ -151,7 +151,7 @@ func testSHA256() (cs [sha256.Size]byte) { func generateIDs(num int) []oid.ID { res := make([]oid.ID, num) - for i := 0; i < num; i++ { + for i := range num { res[i].SetSHA256(testSHA256()) } @@ -232,7 +232,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) { ns := make([]netmap.NodeInfo, dim[i]) as := make([]string, dim[i]) - for j := 0; j < dim[i]; j++ { + for j := range dim[i] { a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s", strconv.Itoa(i), strconv.Itoa(60000+j), diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go index 07e9340dc..a890d5357 100644 --- a/pkg/services/object_manager/placement/cache_test.go +++ b/pkg/services/object_manager/placement/cache_test.go @@ -64,7 +64,7 @@ func TestContainerNodesCache(t *testing.T) { nm2 := nm(1, nodes[1:2]) cnr := [size * 2]cid.ID{} res := [size * 2][][]netmapSDK.NodeInfo{} - for i := 0; i < size*2; i++ { + for i := range size * 2 { cnr[i] = cidtest.ID() var err error @@ -77,7 +77,7 @@ func TestContainerNodesCache(t *testing.T) { require.NoError(t, err) require.Equal(t, res[i], r) } - for i := 0; i < size; i++ { + for i := range size { r, err := c.ContainerNodes(nm2, cnr[i], pp) require.NoError(t, err) require.NotEqual(t, res[i], r) diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index 9a5877c52..4e790628f 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -136,7 +136,7 @@ func defaultCopiesVector(policy netmap.PlacementPolicy) []int { replNum := policy.NumberOfReplicas() copyVector := make([]int, 0, replNum) - for i := 0; i < replNum; i++ { + for i := range replNum { copyVector = append(copyVector, int(policy.ReplicaDescriptor(i).NumberOfObjects()+policy.ReplicaDescriptor(i).GetECDataCount()+policy.ReplicaDescriptor(i).GetECParityCount())) } @@ -212,7 +212,7 @@ func (t *Traverser) Next() []Node { nodes := make([]Node, count) - for i := 0; i < count; i++ { + for i := range count { err := nodes[i].addresses.FromIterator(network.NodeEndpointsIterator(t.vectors[0][i])) if err != nil { return nil diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index f5731c81e..b3b57677d 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -48,7 +48,7 @@ func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) { for i := range ss { ns := make([]netmap.NodeInfo, 0, ss[i]) - for j := 0; j < ss[i]; j++ { + for range ss[i] { ns = append(ns, testNode(num)) num++ } @@ -125,7 +125,7 @@ func TestTraverserObjectScenarios(t *testing.T) { ) require.NoError(t, err) - for i := 0; i < len(nodes[0]); i++ { + for range len(nodes[0]) { require.NotNil(t, tr.Next()) } @@ -164,7 +164,7 @@ func TestTraverserObjectScenarios(t *testing.T) { require.Empty(t, tr.Next()) require.False(t, tr.Success()) - for i := 0; i < replicas[curVector]; i++ { + for range replicas[curVector] { tr.SubmitSuccess() } } diff --git a/pkg/services/session/storage/persistent/executor_test.go b/pkg/services/session/storage/persistent/executor_test.go index 39cb14359..124d36930 100644 --- a/pkg/services/session/storage/persistent/executor_test.go +++ b/pkg/services/session/storage/persistent/executor_test.go @@ -39,7 +39,7 @@ func TestTokenStore(t *testing.T) { tokens := make([]tok, 0, tokenNumber) - for i := 0; i < tokenNumber; i++ { + for i := range tokenNumber { req.SetExpiration(uint64(i)) res, err := ts.Create(context.Background(), req) diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go index 677431889..95bdda34b 100644 --- a/pkg/services/tree/getsubtree_test.go +++ b/pkg/services/tree/getsubtree_test.go @@ -62,7 +62,7 @@ func TestGetSubTree(t *testing.T) { loop: for i := 1; i < len(acc.seen); i++ { parent := acc.seen[i].Body.ParentId - for j := 0; j < i; j++ { + for j := range i { if acc.seen[j].Body.NodeId[0] == parent[0] { continue loop } diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index 346198b3c..95c8f8013 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -133,7 +133,7 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req } func (s *Service) replicateLoop(ctx context.Context) { - for i := 0; i < s.replicatorWorkerCount; i++ { + for range s.replicatorWorkerCount { go s.replicationWorker(ctx) go s.localReplicationWorker(ctx) } diff --git a/pkg/util/sync/key_locker_test.go b/pkg/util/sync/key_locker_test.go index 3b3e6a694..259064ecf 100644 --- a/pkg/util/sync/key_locker_test.go +++ b/pkg/util/sync/key_locker_test.go @@ -13,7 +13,7 @@ func TestKeyLocker(t *testing.T) { taken := false eg, _ := errgroup.WithContext(context.Background()) keyLocker := NewKeyLocker[int]() - for i := 0; i < 100; i++ { + for range 100 { eg.Go(func() error { keyLocker.Lock(0) defer keyLocker.Unlock(0)