diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go index 5a0d2955..be2134b7 100644 --- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go +++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go @@ -68,7 +68,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error { if irSize != 0 { bw.Reset() - for i := 0; i < irSize; i++ { + for i := range irSize { emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly, helper.GetAlphabetNNSDomain(i), int64(nns.TXT)) @@ -79,7 +79,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error { return fmt.Errorf("can't fetch info from NNS: %w", err) } - for i := 0; i < irSize; i++ { + for i := range irSize { info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)} if h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]); err == nil { info.hash = h diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go index 44d1b4ec..375fa84d 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go @@ -224,7 +224,7 @@ func (l *LocalClient) CalculateNetworkFee(tx *transaction.Transaction) (int64, e paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}} } else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok { paramz = make([]manifest.Parameter, nSigs) - for j := 0; j < nSigs; j++ { + for j := range nSigs { paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType} } } diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go index 2d9281c2..8c6b9053 100644 --- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go +++ b/cmd/frostfs-adm/internal/modules/morph/helper/util.go @@ -44,7 +44,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er var wallets []*wallet.Wallet var letter string - for i := 0; i < constants.MaxAlphabetNodes; i++ { + for i := range constants.MaxAlphabetNodes { letter = innerring.GlagoliticLetter(i).String() p := filepath.Join(walletDir, letter+".json") var w *wallet.Wallet diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go index 6c52aa2a..74f5d3e8 100644 --- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go +++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go @@ -113,7 +113,7 @@ func generateTestData(dir string, size int) error { } var pubs []string - for i := 0; i < size; i++ { + for i := range size { p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json") w, err := wallet.NewWalletFromFile(p) if err != nil { @@ -148,7 +148,7 @@ func generateTestData(dir string, size int) error { } func setTestCredentials(v *viper.Viper, size int) { - for i := 0; i < size; i++ { + for i := range size { v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10)) } v.Set("credentials.contract", constants.TestContractPassword) diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go index a0fa2241..57bcf562 100644 --- a/cmd/frostfs-cli/internal/client/client.go +++ b/cmd/frostfs-cli/internal/client/client.go @@ -692,7 +692,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes for { n, ok = rdr.Read(buf) - for i := 0; i < n; i++ { + for i := range n { list = append(list, buf[i]) } if !ok { diff --git a/cmd/frostfs-cli/modules/container/create.go b/cmd/frostfs-cli/modules/container/create.go index c6f57690..f37b169c 100644 --- a/cmd/frostfs-cli/modules/container/create.go +++ b/cmd/frostfs-cli/modules/container/create.go @@ -139,7 +139,7 @@ It will be stored in sidechain when inner ring will accepts it.`, }, } - for i := 0; i < awaitTimeout; i++ { + for range awaitTimeout { time.Sleep(1 * time.Second) _, err := internalclient.GetContainer(cmd.Context(), getPrm) diff --git a/cmd/frostfs-cli/modules/container/delete.go b/cmd/frostfs-cli/modules/container/delete.go index e5425bf2..c2018888 100644 --- a/cmd/frostfs-cli/modules/container/delete.go +++ b/cmd/frostfs-cli/modules/container/delete.go @@ -110,7 +110,7 @@ Only owner of the container has a permission to remove container.`, }, } - for i := 0; i < awaitTimeout; i++ { + for range awaitTimeout { time.Sleep(1 * time.Second) _, err := internalclient.GetContainer(cmd.Context(), getPrm) diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index 896f6f17..0eac4e6d 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -195,7 +195,7 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member prmHead.SetRawFlag(true) // to get an error instead of whole object eg, egCtx := errgroup.WithContext(cmd.Context()) - for idx := 0; idx < len(members); idx++ { + for idx := range len(members) { partObjID := members[idx] eg.Go(func() error { diff --git a/cmd/frostfs-cli/modules/util/acl.go b/cmd/frostfs-cli/modules/util/acl.go index 4c2e324b..145dcc75 100644 --- a/cmd/frostfs-cli/modules/util/acl.go +++ b/cmd/frostfs-cli/modules/util/acl.go @@ -33,7 +33,7 @@ func PrettyPrintTableBACL(cmd *cobra.Command, bacl *acl.Basic) { fmt.Fprintln(w, strings.Join(bits, "\t")) // Footer footer := []string{"X F"} - for i := 0; i < 7; i++ { + for range 7 { footer = append(footer, "U S O B") } fmt.Fprintln(w, strings.Join(footer, "\t")) diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go index 97aca274..4d063245 100644 --- a/cmd/frostfs-node/config/node/config.go +++ b/cmd/frostfs-node/config/node/config.go @@ -121,7 +121,7 @@ func BootstrapAddresses(c *config.Config) (addr network.AddressGroup) { func Attributes(c *config.Config) (attrs []string) { const maxAttributes = 100 - for i := 0; i < maxAttributes; i++ { + for i := range maxAttributes { attr := config.StringSafe(c.Sub(subsection), attributePrefix+"_"+strconv.Itoa(i)) if attr == "" { return diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go index 1b148095..7178cd97 100644 --- a/cmd/frostfs-node/morph.go +++ b/cmd/frostfs-node/morph.go @@ -157,7 +157,7 @@ var ( ) func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error { - for i := 0; i < notaryDepositRetriesAmount; i++ { + for range notaryDepositRetriesAmount { c.log.Debug(logs.ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted) select { case <-ctx.Done(): diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go index 5bc2cc98..c8a819b5 100644 --- a/pkg/innerring/indexer_test.go +++ b/pkg/innerring/indexer_test.go @@ -237,7 +237,7 @@ func BenchmarkKeyPosition(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { if keyPosition(key, list) != 5 { b.FailNow() } diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go index c601f558..e6f2b1de 100644 --- a/pkg/innerring/notary.go +++ b/pkg/innerring/notary.go @@ -90,7 +90,7 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite } func awaitNotaryDepositInClient(ctx context.Context, cli *client.Client, txHash util.Uint256) error { - for i := 0; i < notaryDepositTimeout; i++ { + for range notaryDepositTimeout { select { case <-ctx.Done(): return ctx.Err() diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go index 34690194..dfda3747 100644 --- a/pkg/innerring/processors/alphabet/handlers_test.go +++ b/pkg/innerring/processors/alphabet/handlers_test.go @@ -21,7 +21,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) { var parsedWallets []util.Uint160 = []util.Uint160{{20}, {25}} alphabetContracts := innerring.NewAlphabetContracts() - for i := 0; i <= index; i++ { + for i := range index + 1 { alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} } @@ -98,7 +98,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) { var parsedWallets []util.Uint160 = []util.Uint160{} alphabetContracts := innerring.NewAlphabetContracts() - for i := 0; i <= index; i++ { + for i := range index + 1 { alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} } @@ -170,7 +170,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) { var parsedWallets []util.Uint160 = []util.Uint160{} alphabetContracts := innerring.NewAlphabetContracts() - for i := 0; i <= index; i++ { + for i := range index + 1 { alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)} } diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go index 2a505f8d..b73e2431 100644 --- a/pkg/innerring/processors/governance/handlers_test.go +++ b/pkg/innerring/processors/governance/handlers_test.go @@ -190,7 +190,7 @@ func generateTestKeys(t *testing.T) testKeys { for { var result testKeys - for i := 0; i < 4; i++ { + for range 4 { pk, err := keys.NewPrivateKey() require.NoError(t, err, "failed to create private key") result.sidechainKeys = append(result.sidechainKeys, pk.PublicKey()) diff --git a/pkg/innerring/processors/governance/list_test.go b/pkg/innerring/processors/governance/list_test.go index d099ec83..4ecebf05 100644 --- a/pkg/innerring/processors/governance/list_test.go +++ b/pkg/innerring/processors/governance/list_test.go @@ -49,7 +49,7 @@ func TestNewAlphabetList(t *testing.T) { } ln := len(rounds) - for i := 0; i < ln; i++ { + for i := range ln { list, err = newAlphabetList(list, exp) require.NoError(t, err) require.True(t, equalPublicKeyLists(list, rounds[i])) @@ -131,7 +131,7 @@ func TestUpdateInnerRing(t *testing.T) { func generateKeys(n int) (keys.PublicKeys, error) { pubKeys := make(keys.PublicKeys, 0, n) - for i := 0; i < n; i++ { + for range n { privKey, err := keys.NewPrivateKey() if err != nil { return nil, err diff --git a/pkg/local_object_storage/blobovnicza/sizes_test.go b/pkg/local_object_storage/blobovnicza/sizes_test.go index 01093b8d..d582fc5e 100644 --- a/pkg/local_object_storage/blobovnicza/sizes_test.go +++ b/pkg/local_object_storage/blobovnicza/sizes_test.go @@ -42,7 +42,7 @@ func TestSizes(t *testing.T) { func BenchmarkUpperBound(b *testing.B) { for _, size := range []uint64{1, 1023, 65 * 1024, 40 * 1024 * 1024} { b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { _ = upperPowerOfTwo(size) } }) diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go index 5bed8614..cc8a52d0 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go @@ -34,7 +34,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) { var cnt atomic.Int64 var wg sync.WaitGroup - for i := 0; i < 1000; i++ { + for range 1000 { wg.Add(1) go func() { defer wg.Done() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go index 7a1de4c1..4a51fd86 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go @@ -127,7 +127,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta eg, egCtx := errgroup.WithContext(context.Background()) storageIDs := make(map[oid.Address][]byte) storageIDsGuard := &sync.Mutex{} - for i := 0; i < 100; i++ { + for range 100 { eg.Go(func() error { obj := blobstortest.NewObject(1024) data, err := obj.Marshal() diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go index c7d80dc8..bed5e0eb 100644 --- a/pkg/local_object_storage/blobstor/blobstor_test.go +++ b/pkg/local_object_storage/blobstor/blobstor_test.go @@ -60,7 +60,7 @@ func TestCompression(t *testing.T) { bigObj := make([]*objectSDK.Object, objCount) smallObj := make([]*objectSDK.Object, objCount) - for i := 0; i < objCount; i++ { + for i := range objCount { bigObj[i] = testObject(smallSizeLimit * 2) smallObj[i] = testObject(smallSizeLimit / 2) } @@ -219,7 +219,7 @@ func TestConcurrentPut(t *testing.T) { bigObj := testObject(smallSizeLimit * 2) var wg sync.WaitGroup - for i := 0; i < concurrentPutCount; i++ { + for range concurrentPutCount { wg.Add(1) go func() { testPut(t, blobStor, bigObj) @@ -235,7 +235,7 @@ func TestConcurrentPut(t *testing.T) { bigObj := testObject(smallSizeLimit * 2) var wg sync.WaitGroup - for i := 0; i < concurrentPutCount+1; i++ { + for range concurrentPutCount + 1 { wg.Add(1) go func() { testPutFileExistsError(t, blobStor, bigObj) @@ -251,7 +251,7 @@ func TestConcurrentPut(t *testing.T) { smallObj := testObject(smallSizeLimit / 2) var wg sync.WaitGroup - for i := 0; i < concurrentPutCount; i++ { + for range concurrentPutCount { wg.Add(1) go func() { testPut(t, blobStor, smallObj) @@ -302,7 +302,7 @@ func TestConcurrentDelete(t *testing.T) { testPut(t, blobStor, bigObj) var wg sync.WaitGroup - for i := 0; i < 2; i++ { + for range 2 { wg.Add(1) go func() { testDelete(t, blobStor, bigObj) @@ -319,7 +319,7 @@ func TestConcurrentDelete(t *testing.T) { testPut(t, blobStor, smallObj) var wg sync.WaitGroup - for i := 0; i < 2; i++ { + for range 2 { wg.Add(1) go func() { testDelete(t, blobStor, smallObj) diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go index 98691298..9f70f8ec 100644 --- a/pkg/local_object_storage/blobstor/compression/bench_test.go +++ b/pkg/local_object_storage/blobstor/compression/bench_test.go @@ -36,7 +36,7 @@ func BenchmarkCompression(b *testing.B) { func benchWith(b *testing.B, c Config, data []byte) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { _ = c.Compress(data) } } diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go index d633cbac..5786dfd3 100644 --- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go +++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go @@ -28,7 +28,7 @@ func Benchmark_addressFromString(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { _, err := addressFromString(s) if err != nil { b.Fatalf("benchmark error: %v", err) @@ -73,7 +73,7 @@ func TestObjectCounter(t *testing.T) { eg, egCtx := errgroup.WithContext(context.Background()) eg.Go(func() error { - for j := 0; j < 1_000; j++ { + for range 1_000 { _, err := fst.Put(egCtx, putPrm) if err != nil { return err @@ -84,7 +84,7 @@ func TestObjectCounter(t *testing.T) { eg.Go(func() error { var le logicerr.Logical - for j := 0; j < 1_000; j++ { + for range 1_000 { _, err := fst.Delete(egCtx, delPrm) if err != nil && !errors.As(err, &le) { return err diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go index 501c95a1..1ac769e3 100644 --- a/pkg/local_object_storage/blobstor/perf_test.go +++ b/pkg/local_object_storage/blobstor/perf_test.go @@ -110,7 +110,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) { // Fill database var errG errgroup.Group - for i := 0; i < tt.size; i++ { + for range tt.size { obj := objGen.Next() addr := testutil.AddressFromObject(b, obj) errG.Go(func() error { @@ -203,7 +203,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) { defer func() { require.NoError(b, st.Close()) }() // Fill database - for i := 0; i < tt.size; i++ { + for range tt.size { obj := objGen.Next() addr := testutil.AddressFromObject(b, obj) raw, err := obj.Marshal() diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go index f0809883..2de92ae8 100644 --- a/pkg/local_object_storage/engine/control_test.go +++ b/pkg/local_object_storage/engine/control_test.go @@ -208,7 +208,7 @@ func TestPersistentShardID(t *testing.T) { require.NoError(t, te.ng.Close(context.Background())) newTe := newEngineWithErrorThreshold(t, dir, 1) - for i := 0; i < len(newTe.shards); i++ { + for i := range len(newTe.shards) { require.Equal(t, te.shards[i].id, newTe.shards[i].id) } require.NoError(t, newTe.ng.Close(context.Background())) @@ -269,7 +269,7 @@ func TestReload(t *testing.T) { e, currShards := engineWithShards(t, removePath, shardNum) var rcfg ReConfiguration - for i := 0; i < len(currShards)-1; i++ { // without one of the shards + for i := range len(currShards) - 1 { // without one of the shards rcfg.AddShard(currShards[i], nil) } diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go index 49976abb..525e17f3 100644 --- a/pkg/local_object_storage/engine/engine_test.go +++ b/pkg/local_object_storage/engine/engine_test.go @@ -44,7 +44,7 @@ func BenchmarkExists(b *testing.B) { func benchmarkExists(b *testing.B, shardNum int) { shards := make([]*shard.Shard, shardNum) - for i := 0; i < shardNum; i++ { + for i := range shardNum { shards[i] = testNewShard(b) } @@ -52,7 +52,7 @@ func benchmarkExists(b *testing.B, shardNum int) { defer func() { require.NoError(b, e.Close(context.Background())) }() addr := oidtest.Address() - for i := 0; i < 100; i++ { + for range 100 { obj := testutil.GenerateObjectWithCID(cidtest.ID()) err := Put(context.Background(), e, obj) if err != nil { @@ -62,7 +62,7 @@ func benchmarkExists(b *testing.B, shardNum int) { b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for range b.N { var shPrm shard.ExistsPrm shPrm.Address = addr shPrm.ParentAddress = oid.Address{} @@ -109,7 +109,7 @@ func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper { shards := make([]*shard.Shard, 0, num) - for i := 0; i < num; i++ { + for range num { shards = append(shards, testNewShard(t)) } @@ -117,7 +117,7 @@ func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrap } func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper { - for i := 0; i < num; i++ { + for i := range num { opts := shardOpts(i) id, err := te.engine.AddShard(context.Background(), opts...) require.NoError(t, err) @@ -127,7 +127,7 @@ func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts f } func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper { - for i := 0; i < num; i++ { + for i := range num { defaultOpts := testDefaultShardOptions(t) opts := append(defaultOpts, shardOpts(i)...) id, err := te.engine.AddShard(context.Background(), opts...) diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index 55268b54..8d25dad4 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -61,7 +61,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng } for _, sh := range ids { - for i := 0; i < objPerShard; i++ { + for range objPerShard { contID := cidtest.ID() obj := testutil.GenerateObjectWithCID(contID) objects = append(objects, obj) @@ -554,7 +554,7 @@ func TestEvacuateTreesRemote(t *testing.T) { require.Equal(t, "", st.ErrorMessage(), "invalid final error message") expectedTreeOps := make(map[string][]*pilorama.Move) - for i := 0; i < len(e.shards); i++ { + for i := range len(e.shards) { sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[i].String()]) require.NoError(t, err, "list source trees failed") require.Len(t, sourceTrees, 3) diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go index dd8a2e8a..11a6c784 100644 --- a/pkg/local_object_storage/engine/list_test.go +++ b/pkg/local_object_storage/engine/list_test.go @@ -79,7 +79,7 @@ func TestListWithCursor(t *testing.T) { expected := make([]object.Info, 0, tt.objectNum) got := make([]object.Info, 0, tt.objectNum) - for i := 0; i < tt.objectNum; i++ { + for range tt.objectNum { containerID := cidtest.ID() obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'}) diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go index b99cf4f4..5e1ced56 100644 --- a/pkg/local_object_storage/engine/remove_copies.go +++ b/pkg/local_object_storage/engine/remove_copies.go @@ -87,7 +87,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat } }) - for i := 0; i < prm.Concurrency; i++ { + for range prm.Concurrency { errG.Go(func() error { return e.removeObjects(ctx, ch) }) diff --git a/pkg/local_object_storage/engine/remove_copies_test.go b/pkg/local_object_storage/engine/remove_copies_test.go index 99963576..6d2291c7 100644 --- a/pkg/local_object_storage/engine/remove_copies_test.go +++ b/pkg/local_object_storage/engine/remove_copies_test.go @@ -96,7 +96,7 @@ loop: require.FailNow(t, "unexpected object was removed", removed[i].addr) } - for i := 0; i < copyCount; i++ { + for i := range copyCount { if i%3 == 0 { require.True(t, removedMask[i], "object %d was expected to be removed", i) } else { @@ -207,7 +207,7 @@ func TestRebalanceExitByContext(t *testing.T) { }() const removeCount = 3 - for i := 0; i < removeCount-1; i++ { + for range removeCount - 1 { <-deleteCh signal <- struct{}{} } diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go index f4c7a430..3347d58f 100644 --- a/pkg/local_object_storage/engine/shards_test.go +++ b/pkg/local_object_storage/engine/shards_test.go @@ -72,7 +72,7 @@ func TestSortShardsByWeight(t *testing.T) { var shards1 []hashedShard var weights1 []float64 var shards2 []hashedShard - for i := 0; i < numOfShards; i++ { + for i := range numOfShards { shards1 = append(shards1, hashedShard{ hash: uint64(i), }) diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go index 2739058e..6f694f08 100644 --- a/pkg/local_object_storage/engine/tree_test.go +++ b/pkg/local_object_storage/engine/tree_test.go @@ -34,7 +34,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { d := pilorama.CIDDescriptor{CID: cid, Position: 0, Size: 1} treeID := "someTree" - for i := 0; i < objCount; i++ { + for i := range objCount { obj := testutil.GenerateObjectWithCID(cid) testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i)) err := Put(context.Background(), te.ng, obj) @@ -56,7 +56,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual) prm.WithFilters(fs) - for i := 0; i < b.N; i++ { + for range b.N { res, err := te.ng.Select(context.Background(), prm) if err != nil { b.Fatal(err) @@ -67,7 +67,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) { } }) b.Run("TreeGetByPath", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true) if err != nil { b.Fatal(err) diff --git a/pkg/local_object_storage/internal/testutil/generators_test.go b/pkg/local_object_storage/internal/testutil/generators_test.go index f7be6014..cc6f726a 100644 --- a/pkg/local_object_storage/internal/testutil/generators_test.go +++ b/pkg/local_object_storage/internal/testutil/generators_test.go @@ -13,7 +13,7 @@ func TestOverwriteObjGenerator(t *testing.T) { ObjSize: 10, MaxObjects: 4, } - for i := 0; i < 40; i++ { + for range 40 { obj := gen.Next() id, isSet := obj.ID() i := binary.LittleEndian.Uint64(id[:]) @@ -26,7 +26,7 @@ func TestOverwriteObjGenerator(t *testing.T) { func TestRandObjGenerator(t *testing.T) { gen := &RandObjGenerator{ObjSize: 10} - for i := 0; i < 10; i++ { + for range 10 { obj := gen.Next() require.Equal(t, gen.ObjSize, uint64(len(obj.Payload()))) @@ -50,7 +50,7 @@ func TestSeqObjGenerator(t *testing.T) { func TestRandAddrGenerator(t *testing.T) { gen := RandAddrGenerator(5) - for i := 0; i < 50; i++ { + for range 50 { addr := gen.Next() id := addr.Object() k := binary.LittleEndian.Uint64(id[:]) diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go index 5d6788d7..8b187445 100644 --- a/pkg/local_object_storage/metabase/containers_test.go +++ b/pkg/local_object_storage/metabase/containers_test.go @@ -24,7 +24,7 @@ func TestDB_Containers(t *testing.T) { cids := make(map[string]int, N) - for i := 0; i < N; i++ { + for range N { obj := testutil.GenerateObject() cnr, _ := obj.ContainerID() @@ -95,7 +95,7 @@ func TestDB_ContainersCount(t *testing.T) { expected := make([]cid.ID, 0, R+T+SG+L) for _, upload := range uploadObjects { - for i := 0; i < upload.amount; i++ { + for range upload.amount { obj := testutil.GenerateObject() obj.SetType(upload.typ) @@ -126,11 +126,11 @@ func TestDB_ContainerSize(t *testing.T) { cids := make(map[cid.ID]int, C) objs := make(map[cid.ID][]*objectSDK.Object, C*N) - for i := 0; i < C; i++ { + for range C { cnr := cidtest.ID() cids[cnr] = 0 - for j := 0; j < N; j++ { + for range N { size := rand.Intn(1024) parent := testutil.GenerateObjectWithCID(cnr) diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go index 1797fc0a..d1f808a6 100644 --- a/pkg/local_object_storage/metabase/counter_test.go +++ b/pkg/local_object_storage/metabase/counter_test.go @@ -39,14 +39,14 @@ func TestCounters(t *testing.T) { db := newDB(t) defer func() { require.NoError(t, db.Close()) }() oo := make([]*objectSDK.Object, 0, objCount) - for i := 0; i < objCount; i++ { + for range objCount { oo = append(oo, testutil.GenerateObject()) } var prm meta.PutPrm exp := make(map[cid.ID]meta.ObjectCounters) - for i := 0; i < objCount; i++ { + for i := range objCount { prm.SetObject(oo[i]) cnrID, _ := oo[i].ContainerID() c := meta.ObjectCounters{} @@ -187,7 +187,7 @@ func TestCounters(t *testing.T) { // put objects and check that parent info // does not affect the counter - for i := 0; i < objCount; i++ { + for i := range objCount { o := testutil.GenerateObject() if i < objCount/2 { // half of the objs will have the parent o.SetParent(parObj) @@ -535,7 +535,7 @@ func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*objectSDK parent := testutil.GenerateObject() oo := make([]*objectSDK.Object, 0, count) - for i := 0; i < count; i++ { + for i := range count { o := testutil.GenerateObject() if withParent { o.SetParent(parent) diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go index 2053874d..cb85157e 100644 --- a/pkg/local_object_storage/metabase/delete_test.go +++ b/pkg/local_object_storage/metabase/delete_test.go @@ -131,7 +131,7 @@ func TestDelete(t *testing.T) { defer func() { require.NoError(t, db.Close()) }() cnr := cidtest.ID() - for i := 0; i < 10; i++ { + for range 10 { obj := testutil.GenerateObjectWithCID(cnr) var prm meta.PutPrm diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go index 247ddf9c..7654d2cd 100644 --- a/pkg/local_object_storage/metabase/get_test.go +++ b/pkg/local_object_storage/metabase/get_test.go @@ -223,7 +223,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { defer func() { require.NoError(b, db.Close()) }() addrs := make([]oid.Address, 0, numOfObj) - for i := 0; i < numOfObj; i++ { + for range numOfObj { raw := testutil.GenerateObject() addrs = append(addrs, object.AddressOf(raw)) @@ -261,7 +261,7 @@ func benchmarkGet(b *testing.B, numOfObj int) { b.Run("serial", func(b *testing.B) { b.ReportAllocs() - for i := 0; i < b.N; i++ { + for i := range b.N { var getPrm meta.GetPrm getPrm.SetAddress(addrs[i%len(addrs)]) diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go index a92e2eff..6207497b 100644 --- a/pkg/local_object_storage/metabase/list_test.go +++ b/pkg/local_object_storage/metabase/list_test.go @@ -35,7 +35,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB { defer func() { require.NoError(b, db.Close()) }() obj := testutil.GenerateObject() - for i := 0; i < 100_000; i++ { // should be a multiple of all batch sizes + for i := range 100_000 { // should be a multiple of all batch sizes obj.SetID(oidtest.ID()) if i%9 == 0 { // let's have 9 objects per container obj.SetContainerID(cidtest.ID()) @@ -51,7 +51,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { res, err := db.ListWithCursor(context.Background(), prm) if err != nil { if err != meta.ErrEndOfListing { @@ -80,7 +80,7 @@ func TestLisObjectsWithCursor(t *testing.T) { expected := make([]object.Info, 0, total) // fill metabase with objects - for i := 0; i < containers; i++ { + for range containers { containerID := cidtest.ID() // add one regular object @@ -140,7 +140,7 @@ func TestLisObjectsWithCursor(t *testing.T) { expectedIterations-- } - for i := 0; i < expectedIterations; i++ { + for range expectedIterations { res, cursor, err = metaListWithCursor(db, uint32(countPerReq), cursor) require.NoError(t, err, "count:%d", countPerReq) got = append(got, res...) @@ -169,7 +169,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) { expected := make(map[string]int, total) // fill metabase with objects - for i := 0; i < total; i++ { + for range total { obj := testutil.GenerateObject() err := putBig(db, obj) require.NoError(t, err) @@ -186,7 +186,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) { } // add new objects - for i := 0; i < total; i++ { + for range total { obj := testutil.GenerateObject() err = putBig(db, obj) require.NoError(t, err) diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go index 2d7bfc1c..62a109b0 100644 --- a/pkg/local_object_storage/metabase/lock_test.go +++ b/pkg/local_object_storage/metabase/lock_test.go @@ -155,7 +155,7 @@ func TestDB_Lock(t *testing.T) { inhumePrm.SetGCMark() - for i := 0; i < objsNum; i++ { + for i := range objsNum { inhumePrm.SetAddresses(objectcore.AddressOf(objs[i])) res, err = db.Inhume(context.Background(), inhumePrm) @@ -255,7 +255,7 @@ func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*objectSDK lockedObjs := make([]*objectSDK.Object, 0, numOfLockedObjs) lockedObjIDs := make([]oid.ID, 0, numOfLockedObjs) - for i := 0; i < numOfLockedObjs; i++ { + for range numOfLockedObjs { obj := testutil.GenerateObjectWithCID(cnr) err := putBig(db, obj) require.NoError(t, err) diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go index 84e4029f..914f5ef0 100644 --- a/pkg/local_object_storage/metabase/put_test.go +++ b/pkg/local_object_storage/metabase/put_test.go @@ -74,7 +74,7 @@ func BenchmarkPut(b *testing.B) { objs := prepareObjects(b.N) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for range b.N { if err := metaPut(db, objs[index.Add(1)], nil); err != nil { b.Fatal(err) } diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go index 66f5eefc..993079dc 100644 --- a/pkg/local_object_storage/metabase/reset_test.go +++ b/pkg/local_object_storage/metabase/reset_test.go @@ -34,7 +34,7 @@ func TestResetDropsContainerBuckets(t *testing.T) { defer func() { require.NoError(t, db.Close()) }() - for idx := 0; idx < 100; idx++ { + for idx := range 100 { var putPrm PutPrm putPrm.SetObject(testutil.GenerateObject()) putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx))) diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go index 4fbc5910..0fab3a10 100644 --- a/pkg/local_object_storage/metabase/select_test.go +++ b/pkg/local_object_storage/metabase/select_test.go @@ -920,7 +920,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) { ec, err := erasurecode.NewConstructor(dataCount, parityCount) require.NoError(t, err) - for i := 0; i < partCount; i++ { + for i := range partCount { cs, err := ec.Split(tt.objects[i], &pk.PrivateKey) require.NoError(t, err) @@ -1070,7 +1070,7 @@ func BenchmarkSelect(b *testing.B) { cid := cidtest.ID() - for i := 0; i < objCount; i++ { + for i := range objCount { var attr objectSDK.Attribute attr.SetKey("myHeader") attr.SetValue(strconv.Itoa(i)) @@ -1129,7 +1129,7 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear prm.SetContainerID(cid) prm.SetFilters(fs) - for i := 0; i < b.N; i++ { + for range b.N { res, err := db.Select(context.Background(), prm) if err != nil { b.Fatal(err) diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go index a4c7707b..e9abd746 100644 --- a/pkg/local_object_storage/metabase/upgrade.go +++ b/pkg/local_object_storage/metabase/upgrade.go @@ -143,7 +143,7 @@ func createExpirationEpochBuckets(ctx context.Context, db *bbolt.DB, log func(a return selectObjectsWithExpirationEpoch(ctx, db, objects) }) var count atomic.Uint64 - for i := 0; i < upgradeWorkersCount; i++ { + for range upgradeWorkersCount { eg.Go(func() error { for { select { diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go index dc3d7d07..3797de0a 100644 --- a/pkg/local_object_storage/metabase/upgrade_test.go +++ b/pkg/local_object_storage/metabase/upgrade_test.go @@ -91,7 +91,7 @@ func TestGenerateMetabaseFile(t *testing.T) { eg, ctx := errgroup.WithContext(context.Background()) eg.SetLimit(generateWorkersCount) // simple objects - for i := 0; i < simpleObjectsCount; i++ { + for i := range simpleObjectsCount { i := i eg.Go(func() error { obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) @@ -110,7 +110,7 @@ func TestGenerateMetabaseFile(t *testing.T) { eg, ctx = errgroup.WithContext(context.Background()) eg.SetLimit(generateWorkersCount) // complex objects - for i := 0; i < complexObjectsCount; i++ { + for i := range complexObjectsCount { i := i eg.Go(func() error { parent := testutil.GenerateObjectWithCID(containers[i%len(containers)]) @@ -134,7 +134,7 @@ func TestGenerateMetabaseFile(t *testing.T) { eg, ctx = errgroup.WithContext(context.Background()) eg.SetLimit(generateWorkersCount) // simple objects deleted by gc marks - for i := 0; i < deletedByGCMarksCount; i++ { + for i := range deletedByGCMarksCount { i := i eg.Go(func() error { obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) @@ -156,7 +156,7 @@ func TestGenerateMetabaseFile(t *testing.T) { eg, ctx = errgroup.WithContext(context.Background()) eg.SetLimit(10000) // simple objects deleted by tombstones - for i := 0; i < deletedByTombstoneCount; i++ { + for i := range deletedByTombstoneCount { i := i eg.Go(func() error { obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) @@ -186,7 +186,7 @@ func TestGenerateMetabaseFile(t *testing.T) { eg, ctx = errgroup.WithContext(context.Background()) eg.SetLimit(generateWorkersCount) // simple objects locked by locks - for i := 0; i < lockedCount; i++ { + for i := range lockedCount { i := i eg.Go(func() error { obj := testutil.GenerateObjectWithCID(containers[i%len(containers)]) diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go index 29941be8..e2d69caf 100644 --- a/pkg/local_object_storage/pilorama/boltdb.go +++ b/pkg/local_object_storage/pilorama/boltdb.go @@ -705,7 +705,7 @@ func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*M key, value = c.Prev() } - for i := 0; i < len(ms); i++ { + for i := range len(ms) { // Loop invariant: key represents the next stored timestamp after ms[i].Time. // 2. Insert the operation. diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go index 41d7a567..854fe0aa 100644 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ b/pkg/local_object_storage/pilorama/forest_test.go @@ -194,7 +194,7 @@ func BenchmarkForestSortedIteration(b *testing.B) { const total = 100_000 d := CIDDescriptor{cnr, 0, 1} - for i := 0; i < total; i++ { + for i := range total { u, err := uuid.NewRandom() if err != nil { b.FailNow() @@ -216,7 +216,7 @@ func BenchmarkForestSortedIteration(b *testing.B) { } b.Run(providers[i].name+",root", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{RootID}, nil, 100) if err != nil || len(res) != 100 { b.Fatalf("err %v, count %d", err, len(res)) @@ -224,7 +224,7 @@ func BenchmarkForestSortedIteration(b *testing.B) { } }) b.Run(providers[i].name+",leaf", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for range b.N { res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{1}, nil, 100) if err != nil || len(res) != 0 { b.FailNow() @@ -804,7 +804,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ defer func() { require.NoError(t, s.Close()) }() require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false)) - for i := 0; i < batchSize; i++ { + for range batchSize { errG.Go(func() error { return s.TreeApply(ctx, cid, treeID, &logs[2], false) }) @@ -1043,7 +1043,7 @@ func TestForest_ParallelApply(t *testing.T) { // The operations are guaranteed to be applied and returned sorted by `Time`. func prepareRandomTree(nodeCount, opCount int) []Move { ops := make([]Move, nodeCount+opCount) - for i := 0; i < nodeCount; i++ { + for i := range nodeCount { ops[i] = Move{ Parent: 0, Meta: Meta{ @@ -1121,14 +1121,14 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) } - for i := 0; i < iterCount; i++ { + for range iterCount { // Shuffle random operations, leave initialization in place. r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] }) actual := constructor(t, WithMaxBatchSize(batchSize), WithNoSync(true)) wg := new(sync.WaitGroup) ch := make(chan *Move) - for i := 0; i < batchSize; i++ { + for range batchSize { wg.Add(1) go func() { defer wg.Done() @@ -1170,7 +1170,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ .. } const iterCount = 200 - for i := 0; i < iterCount; i++ { + for range iterCount { // Shuffle random operations, leave initialization in place. r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] }) @@ -1247,7 +1247,7 @@ func BenchmarkApplyReorderLast(b *testing.B) { Child: uint64(r.Intn(benchNodeCount)), } if i != 0 && i%blockSize == 0 { - for j := 0; j < blockSize/2; j++ { + for j := range blockSize / 2 { ops[i-j], ops[i+j-blockSize] = ops[i+j-blockSize], ops[i-j] } } @@ -1265,7 +1265,7 @@ func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) { cid := cidtest.ID() treeID := "version" ch := make(chan int, b.N) - for i := 0; i < b.N; i++ { + for i := range b.N { ch <- i } @@ -1311,7 +1311,7 @@ func testTreeGetByPath(t *testing.T, s ForestStorage) { if mf, ok := s.(*memoryForest); ok { single := mf.treeMap[cid.String()+"/"+treeID] t.Run("test meta", func(t *testing.T) { - for i := 0; i < 6; i++ { + for i := range 6 { require.Equal(t, uint64(i), single.infoMap[Node(i+1)].Meta.Time) } }) @@ -1492,7 +1492,7 @@ func testTreeListTreesCount(t *testing.T, constructor func(t testing.TB, _ ...Op var expected []ContainerIDTreeID treeIDs := []string{"version", "system", "s", "avada kedavra"} - for i := 0; i < count; i++ { + for i := range count { cid := cidtest.ID() treeID := treeIDs[i%len(treeIDs)] expected = append(expected, ContainerIDTreeID{ diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go index 8a49a36f..3414dc76 100644 --- a/pkg/local_object_storage/shard/list_test.go +++ b/pkg/local_object_storage/shard/list_test.go @@ -39,11 +39,11 @@ func testShardList(t *testing.T, sh *Shard) { var errG errgroup.Group errG.SetLimit(C * N) - for i := 0; i < C; i++ { + for range C { errG.Go(func() error { cnr := cidtest.ID() - for j := 0; j < N; j++ { + for range N { errG.Go(func() error { obj := testutil.GenerateObjectWithCID(cnr) testutil.AddPayload(obj, 1<<2) diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go index 38d465f3..1ef849c0 100644 --- a/pkg/local_object_storage/shard/metrics_test.go +++ b/pkg/local_object_storage/shard/metrics_test.go @@ -206,7 +206,7 @@ func TestCounters(t *testing.T) { const objNumber = 10 oo := make([]*objectSDK.Object, objNumber) - for i := 0; i < objNumber; i++ { + for i := range objNumber { oo[i] = testutil.GenerateObject() } @@ -248,7 +248,7 @@ func TestCounters(t *testing.T) { var prm PutPrm - for i := 0; i < objNumber; i++ { + for i := range objNumber { prm.SetObject(oo[i]) _, err := sh.Put(context.Background(), prm) @@ -269,7 +269,7 @@ func TestCounters(t *testing.T) { var prm InhumePrm inhumedNumber := objNumber / 4 - for i := 0; i < inhumedNumber; i++ { + for i := range inhumedNumber { prm.MarkAsGarbage(objectcore.AddressOf(oo[i])) _, err := sh.Inhume(context.Background(), prm) @@ -317,7 +317,7 @@ func TestCounters(t *testing.T) { _, err := sh.Inhume(context.Background(), prm) require.NoError(t, err) - for i := 0; i < inhumedNumber; i++ { + for i := range inhumedNumber { cid, ok := oo[i].ContainerID() require.True(t, ok) expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize()) @@ -419,7 +419,7 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) { func addrFromObjs(oo []*objectSDK.Object) []oid.Address { aa := make([]oid.Address, len(oo)) - for i := 0; i < len(oo); i++ { + for i := range len(oo) { aa[i] = objectcore.AddressOf(oo[i]) } diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go index 509ccaaa..0025bb45 100644 --- a/pkg/local_object_storage/shard/refill_test.go +++ b/pkg/local_object_storage/shard/refill_test.go @@ -38,7 +38,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) { var putPrm PutPrm - for i := 0; i < objectsCount/2; i++ { + for range objectsCount / 2 { obj := testutil.GenerateObject() testutil.AddAttribute(obj, "foo", "bar") testutil.AddPayload(obj, 1<<5) // blobvnicza tree obj @@ -49,7 +49,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) { require.NoError(b, err) } - for i := 0; i < objectsCount/2; i++ { + for range objectsCount / 2 { obj := testutil.GenerateObject() testutil.AddAttribute(obj, "foo", "bar") obj.SetID(oidtest.ID()) diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go index 4f439845..4da9a26d 100644 --- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go +++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go @@ -54,7 +54,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) { objGen := testutil.RandObjGenerator{ObjSize: size} b.ResetTimer() - for n := 0; n < b.N; n++ { + for range b.N { obj := objGen.Next() rawData, err := obj.Marshal() require.NoError(b, err, "marshaling object") diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go index e34f5a76..930ac843 100644 --- a/pkg/local_object_storage/writecache/flush.go +++ b/pkg/local_object_storage/writecache/flush.go @@ -41,7 +41,7 @@ func (c *cache) runFlushLoop(ctx context.Context) { if c.disableBackgroundFlush { return } - for i := 0; i < c.workersCount; i++ { + for range c.workersCount { c.wg.Add(1) go c.workerFlushSmall(ctx) } diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go index 8da9d868..4c269bcb 100644 --- a/pkg/morph/event/notary_preparator_test.go +++ b/pkg/morph/event/notary_preparator_test.go @@ -439,7 +439,7 @@ func TestPrepare_CorrectNR(t *testing.T) { ) for _, test := range tests { - for i := 0; i < 1; i++ { // run tests against 3 and 4 witness NR + for i := range 1 { // run tests against 3 and 4 witness NR for _, dummyMultisig := range []bool{true, false} { // run tests against empty and dummy multisig/Notary witness additionalWitness := i == 0 nr := correctNR(script(test.hash, test.method, test.args...), dummyMultisig, additionalWitness) diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go index 93bb04de..7929754c 100644 --- a/pkg/morph/timer/block_test.go +++ b/pkg/morph/timer/block_test.go @@ -208,7 +208,7 @@ func TestBlockTimer_TickSameHeight(t *testing.T) { require.NoError(t, bt.Reset()) check := func(t *testing.T, h uint32, base, delta int) { - for i := 0; i < 2*int(blockDur); i++ { + for range 2 * int(blockDur) { bt.Tick(h) require.Equal(t, base, baseCounter) require.Equal(t, delta, deltaCounter) diff --git a/pkg/network/tls_test.go b/pkg/network/tls_test.go index 6c352484..14729f4c 100644 --- a/pkg/network/tls_test.go +++ b/pkg/network/tls_test.go @@ -37,7 +37,7 @@ func BenchmarkAddressTLSEnabled(b *testing.B) { b.ReportAllocs() var enabled bool - for i := 0; i < b.N; i++ { + for range b.N { enabled = addr.IsTLSEnabled() } require.True(b, enabled) diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go index 0ba8be76..300cb9dc 100644 --- a/pkg/services/control/server/evacuate.go +++ b/pkg/services/control/server/evacuate.go @@ -169,7 +169,7 @@ func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) { nodes := placement.FlattenNodes(ns) bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes() - for i := 0; i < len(nodes); i++ { + for i := range len(nodes) { if bytes.Equal(nodes[i].PublicKey(), bs) { copy(nodes[i:], nodes[i+1:]) nodes = nodes[:len(nodes)-1] diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go index 394feef4..43533968 100644 --- a/pkg/services/object/acl/v2/util_test.go +++ b/pkg/services/object/acl/v2/util_test.go @@ -33,7 +33,7 @@ func TestOriginalTokens(t *testing.T) { var sTokenV2 session.Token sToken.WriteToV2(&sTokenV2) - for i := 0; i < 10; i++ { + for i := range 10 { metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2) res, err := originalSessionToken(metaHeaders) require.NoError(t, err) diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go index 1fc6b7b2..6827018d 100644 --- a/pkg/services/object/get/get_test.go +++ b/pkg/services/object/get/get_test.go @@ -470,7 +470,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) { ns := make([]netmap.NodeInfo, dim[i]) as := make([]string, dim[i]) - for j := 0; j < dim[i]; j++ { + for j := range dim[i] { a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s", strconv.Itoa(i), strconv.Itoa(60000+j), @@ -508,7 +508,7 @@ func generateChain(ln int, cnr cid.ID) ([]*objectSDK.Object, []oid.ID, []byte) { ids := make([]oid.ID, 0, ln) payload := make([]byte, 0, ln*10) - for i := 0; i < ln; i++ { + for i := range ln { ids = append(ids, curID) addr.SetObject(curID) @@ -1750,7 +1750,7 @@ func TestGetRange(t *testing.T) { }, }) - for from := 0; from < totalSize-1; from++ { + for from := range totalSize - 1 { for to := from; to < totalSize; to++ { t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) { testGetRange(t, svc, addr, uint64(from), uint64(to), payload) @@ -1811,7 +1811,7 @@ func TestGetRange(t *testing.T) { }, }) - for from := 0; from < totalSize-1; from++ { + for from := range totalSize - 1 { for to := from; to < totalSize; to++ { t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) { testGetRange(t, svc, addr, uint64(from), uint64(to), payload) diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go index b8497d7d..a6882d4a 100644 --- a/pkg/services/object/get/getrangeec_test.go +++ b/pkg/services/object/get/getrangeec_test.go @@ -131,7 +131,7 @@ func TestGetRangeEC(t *testing.T) { clients: clients, }) - for from := 0; from < totalSize-1; from++ { + for from := range totalSize - 1 { for to := from; to < totalSize; to++ { t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) { testGetRange(t, svc, addr, uint64(from), uint64(to), obj.Payload()) diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/put/ec.go index 1fadf65f..9980f6d6 100644 --- a/pkg/services/object/put/ec.go +++ b/pkg/services/object/put/ec.go @@ -276,7 +276,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx } // try to save to any node not visited by current part - for i := 0; i < len(nodes); i++ { + for i := range len(nodes) { select { case <-ctx.Done(): return ctx.Err() diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go index 67938040..44abcfe5 100644 --- a/pkg/services/object/search/search_test.go +++ b/pkg/services/object/search/search_test.go @@ -151,7 +151,7 @@ func testSHA256() (cs [sha256.Size]byte) { func generateIDs(num int) []oid.ID { res := make([]oid.ID, num) - for i := 0; i < num; i++ { + for i := range num { res[i].SetSHA256(testSHA256()) } @@ -232,7 +232,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) { ns := make([]netmap.NodeInfo, dim[i]) as := make([]string, dim[i]) - for j := 0; j < dim[i]; j++ { + for j := range dim[i] { a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s", strconv.Itoa(i), strconv.Itoa(60000+j), diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go index 07e9340d..a890d535 100644 --- a/pkg/services/object_manager/placement/cache_test.go +++ b/pkg/services/object_manager/placement/cache_test.go @@ -64,7 +64,7 @@ func TestContainerNodesCache(t *testing.T) { nm2 := nm(1, nodes[1:2]) cnr := [size * 2]cid.ID{} res := [size * 2][][]netmapSDK.NodeInfo{} - for i := 0; i < size*2; i++ { + for i := range size * 2 { cnr[i] = cidtest.ID() var err error @@ -77,7 +77,7 @@ func TestContainerNodesCache(t *testing.T) { require.NoError(t, err) require.Equal(t, res[i], r) } - for i := 0; i < size; i++ { + for i := range size { r, err := c.ContainerNodes(nm2, cnr[i], pp) require.NoError(t, err) require.NotEqual(t, res[i], r) diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go index 9a5877c5..4e790628 100644 --- a/pkg/services/object_manager/placement/traverser.go +++ b/pkg/services/object_manager/placement/traverser.go @@ -136,7 +136,7 @@ func defaultCopiesVector(policy netmap.PlacementPolicy) []int { replNum := policy.NumberOfReplicas() copyVector := make([]int, 0, replNum) - for i := 0; i < replNum; i++ { + for i := range replNum { copyVector = append(copyVector, int(policy.ReplicaDescriptor(i).NumberOfObjects()+policy.ReplicaDescriptor(i).GetECDataCount()+policy.ReplicaDescriptor(i).GetECParityCount())) } @@ -212,7 +212,7 @@ func (t *Traverser) Next() []Node { nodes := make([]Node, count) - for i := 0; i < count; i++ { + for i := range count { err := nodes[i].addresses.FromIterator(network.NodeEndpointsIterator(t.vectors[0][i])) if err != nil { return nil diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go index f5731c81..b3b57677 100644 --- a/pkg/services/object_manager/placement/traverser_test.go +++ b/pkg/services/object_manager/placement/traverser_test.go @@ -48,7 +48,7 @@ func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) { for i := range ss { ns := make([]netmap.NodeInfo, 0, ss[i]) - for j := 0; j < ss[i]; j++ { + for range ss[i] { ns = append(ns, testNode(num)) num++ } @@ -125,7 +125,7 @@ func TestTraverserObjectScenarios(t *testing.T) { ) require.NoError(t, err) - for i := 0; i < len(nodes[0]); i++ { + for range len(nodes[0]) { require.NotNil(t, tr.Next()) } @@ -164,7 +164,7 @@ func TestTraverserObjectScenarios(t *testing.T) { require.Empty(t, tr.Next()) require.False(t, tr.Success()) - for i := 0; i < replicas[curVector]; i++ { + for range replicas[curVector] { tr.SubmitSuccess() } } diff --git a/pkg/services/session/storage/persistent/executor_test.go b/pkg/services/session/storage/persistent/executor_test.go index 39cb1435..124d3693 100644 --- a/pkg/services/session/storage/persistent/executor_test.go +++ b/pkg/services/session/storage/persistent/executor_test.go @@ -39,7 +39,7 @@ func TestTokenStore(t *testing.T) { tokens := make([]tok, 0, tokenNumber) - for i := 0; i < tokenNumber; i++ { + for i := range tokenNumber { req.SetExpiration(uint64(i)) res, err := ts.Create(context.Background(), req) diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go index 67743188..95bdda34 100644 --- a/pkg/services/tree/getsubtree_test.go +++ b/pkg/services/tree/getsubtree_test.go @@ -62,7 +62,7 @@ func TestGetSubTree(t *testing.T) { loop: for i := 1; i < len(acc.seen); i++ { parent := acc.seen[i].Body.ParentId - for j := 0; j < i; j++ { + for j := range i { if acc.seen[j].Body.NodeId[0] == parent[0] { continue loop } diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go index 346198b3..95c8f801 100644 --- a/pkg/services/tree/replicator.go +++ b/pkg/services/tree/replicator.go @@ -133,7 +133,7 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req } func (s *Service) replicateLoop(ctx context.Context) { - for i := 0; i < s.replicatorWorkerCount; i++ { + for range s.replicatorWorkerCount { go s.replicationWorker(ctx) go s.localReplicationWorker(ctx) } diff --git a/pkg/util/sync/key_locker_test.go b/pkg/util/sync/key_locker_test.go index 3b3e6a69..259064ec 100644 --- a/pkg/util/sync/key_locker_test.go +++ b/pkg/util/sync/key_locker_test.go @@ -13,7 +13,7 @@ func TestKeyLocker(t *testing.T) { taken := false eg, _ := errgroup.WithContext(context.Background()) keyLocker := NewKeyLocker[int]() - for i := 0; i < 100; i++ { + for range 100 { eg.Go(func() error { keyLocker.Lock(0) defer keyLocker.Unlock(0)