diff --git a/.golangci.yml b/.golangci.yml index 2e9e78fc..971f0d0e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -67,7 +67,7 @@ linters: - bidichk - durationcheck - exhaustive - - exportloopref + - copyloopvar - gofmt - goimports - misspell diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go index c7de599e..7af77679 100644 --- a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go +++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go @@ -73,7 +73,6 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er return nil, fmt.Errorf("can't fetch password: %w", err) } - i := i errG.Go(func() error { p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json") f, err := os.OpenFile(p, os.O_CREATE, 0o644) @@ -107,7 +106,6 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er // Create consensus account with 2*N/3+1 multi-signature. bftCount := smartcontract.GetDefaultHonestNodeCount(size) for i := range wallets { - i := i ps := pubs.Copy() errG.Go(func() error { if err := addMultisigAccount(wallets[i], majCount, constants.CommitteeAccountName, passwords[i], ps); err != nil { diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go index a1410d7a..6d0019ec 100644 --- a/cmd/frostfs-cli/modules/container/list.go +++ b/cmd/frostfs-cli/modules/container/list.go @@ -70,7 +70,6 @@ var listContainersCmd = &cobra.Command{ continue } - cnrID := cnrID prmGet.ClientParams.ContainerID = &cnrID res, err := internalclient.GetContainer(cmd.Context(), prmGet) if err != nil { diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go index 4efe04d1..896f6f17 100644 --- a/cmd/frostfs-cli/modules/object/nodes.go +++ b/cmd/frostfs-cli/modules/object/nodes.go @@ -393,8 +393,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. eg, egCtx := errgroup.WithContext(cmd.Context()) for _, cand := range candidates { - cand := cand - eg.Go(func() error { cli, err := createClient(egCtx, cmd, cand, pk) if err != nil { @@ -405,7 +403,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa. } for _, object := range objects { - object := object eg.Go(func() error { stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk) resultMtx.Lock() diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go index 93ef8ba2..cfc17cfa 100644 --- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go +++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go @@ -160,9 +160,6 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn eg, egCtx := errgroup.WithContext(ctx) for addr, data := range batch { - addr := addr - data := data - if err := limiter.AcquireWorkSlot(egCtx); err != nil { _ = eg.Wait() return result.Load(), err diff --git a/pkg/local_object_storage/blobstor/info.go b/pkg/local_object_storage/blobstor/info.go index 8a5bb870..c1c47f3b 100644 --- a/pkg/local_object_storage/blobstor/info.go +++ b/pkg/local_object_storage/blobstor/info.go @@ -43,7 +43,6 @@ func (b *BlobStor) ObjectsCount(ctx context.Context) (uint64, error) { eg, egCtx := errgroup.WithContext(ctx) for i := range b.storage { - i := i eg.Go(func() error { v, e := b.storage[i].Storage.ObjectsCount(egCtx) if e != nil { diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index 03196400..4778cf53 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -82,8 +82,6 @@ func (e *StorageEngine) Init(ctx context.Context) error { } for id, sh := range e.shards { - id := id - sh := sh eg.Go(func() error { if err := sh.Init(ctx); err != nil { errCh <- shardInitError{ diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go index 56d4fcd4..980b38a6 100644 --- a/pkg/local_object_storage/engine/shards.go +++ b/pkg/local_object_storage/engine/shards.go @@ -367,7 +367,6 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error { var multiErrGuard sync.Mutex var eg errgroup.Group for _, sh := range deletedShards { - sh := sh eg.Go(func() error { err := sh.SetMode(mode.Disabled) if err != nil { diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go index 3e8f387e..7710bc7f 100644 --- a/pkg/local_object_storage/engine/writecache.go +++ b/pkg/local_object_storage/engine/writecache.go @@ -102,7 +102,6 @@ func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePr eg, egCtx := errgroup.WithContext(ctx) for _, shardID := range prm.ShardIDs { - shardID := shardID eg.Go(func() error { e.mtx.RLock() sh, ok := e.shards[shardID.String()] diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go index ecca9842..41d7a567 100644 --- a/pkg/local_object_storage/pilorama/forest_test.go +++ b/pkg/local_object_storage/pilorama/forest_test.go @@ -1459,7 +1459,6 @@ func testTreeLastSyncHeight(t *testing.T, f ForestStorage) { func TestForest_ListTrees(t *testing.T) { for i := range providers { - i := i t.Run(providers[i].name, func(t *testing.T) { testTreeListTrees(t, providers[i].construct) }) diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go index 3c951beb..a637da45 100644 --- a/pkg/local_object_storage/writecache/flush_test.go +++ b/pkg/local_object_storage/writecache/flush_test.go @@ -170,7 +170,6 @@ func runFlushTest[Option any]( t.Run("ignore errors", func(t *testing.T) { for _, f := range failures { - f := f t.Run(f.Desc, func(t *testing.T) { errCountOpt, errCount := errCountOption() wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt) diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go index 6a02673c..dde0d7da 100644 --- a/pkg/services/object/get/assemblerec.go +++ b/pkg/services/object/get/assemblerec.go @@ -170,7 +170,6 @@ func (a *assemblerec) processECNodesRequests(ctx context.Context, nodes []placem eg.SetLimit(dataCount) for _, ch := range a.ecInfo.localChunks { - ch := ch eg.Go(func() error { select { case <-ctx.Done(): diff --git a/pkg/services/object/put/common.go b/pkg/services/object/put/common.go index 6696a192..cbb7f5f3 100644 --- a/pkg/services/object/put/common.go +++ b/pkg/services/object/put/common.go @@ -71,7 +71,6 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement. var wg sync.WaitGroup for _, addr := range addrs { - addr := addr if ok := n.mExclude[string(addr.PublicKey())]; ok != nil { if *ok { traverser.SubmitSuccess() diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/put/ec.go index fbb51912..1fadf65f 100644 --- a/pkg/services/object/put/ec.go +++ b/pkg/services/object/put/ec.go @@ -216,7 +216,6 @@ func (e *ecWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er } for idx := range parts { - idx := idx eg.Go(func() error { return e.writePart(egCtx, parts[idx], idx, nodes, visited) }) diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go index 0a118797..61a65fc2 100644 --- a/pkg/services/policer/ec.go +++ b/pkg/services/policer/ec.go @@ -357,8 +357,6 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I parts := make([]*objectSDK.Object, objInfo.ECInfo.Total) errGroup, egCtx := errgroup.WithContext(ctx) for idx, nodes := range existedChunks { - idx := idx - nodes := nodes errGroup.Go(func() error { var objID oid.Address objID.SetContainer(parentAddress.Container()) diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go index 0f85f50b..be22074a 100644 --- a/pkg/services/tree/sync.go +++ b/pkg/services/tree/sync.go @@ -190,8 +190,6 @@ func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID s var prev *pilorama.Move for m := range operationStream { - m := m - // skip already applied op if prev != nil && prev.Time == m.Time { continue @@ -287,8 +285,6 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64, allNodesSynced.Store(true) for i, n := range nodes { - i := i - n := n errGroup.Go(func() error { var nodeSynced bool n.IterateNetworkEndpoints(func(addr string) bool { @@ -421,7 +417,7 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) { var wg sync.WaitGroup for _, cnr := range cnrs { wg.Add(1) - cnr := cnr + err := s.syncPool.Submit(func() { defer wg.Done() s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr)) diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go index 190b4ccb..497d9055 100644 --- a/pkg/services/tree/sync_test.go +++ b/pkg/services/tree/sync_test.go @@ -51,8 +51,6 @@ func Test_mergeOperationStreams(t *testing.T) { // generate and put values to all chans for i, ch := range nodeOpChans { - i := i - ch := ch go func() { for _, tm := range tt.opTimes[i] { op := &pilorama.Move{}