[#1341] .golangci.yml: Replace exportloopref with copyloopvar

exportloopref is deprecated.
gopatch:
```
@@
var index, value identifier
var slice expression
@@
for index, value := range slice {
...
-value := value
...
}

@@
var index, value identifier
var slice expression
@@
for index, value := range slice {
...
-index := index
...
}

@@
var value identifier
var channel expression
@@
for value := range channel {
...
-value := value
...
}
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
This commit is contained in:
Evgenii Stratonikov 2024-08-28 14:45:57 +03:00 committed by Evgenii Stratonikov
parent 7e97df4878
commit 03976c6ed5
17 changed files with 2 additions and 29 deletions

View file

@ -67,7 +67,7 @@ linters:
- bidichk - bidichk
- durationcheck - durationcheck
- exhaustive - exhaustive
- exportloopref - copyloopvar
- gofmt - gofmt
- goimports - goimports
- misspell - misspell

View file

@ -73,7 +73,6 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
return nil, fmt.Errorf("can't fetch password: %w", err) return nil, fmt.Errorf("can't fetch password: %w", err)
} }
i := i
errG.Go(func() error { errG.Go(func() error {
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json") p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
f, err := os.OpenFile(p, os.O_CREATE, 0o644) f, err := os.OpenFile(p, os.O_CREATE, 0o644)
@ -107,7 +106,6 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
// Create consensus account with 2*N/3+1 multi-signature. // Create consensus account with 2*N/3+1 multi-signature.
bftCount := smartcontract.GetDefaultHonestNodeCount(size) bftCount := smartcontract.GetDefaultHonestNodeCount(size)
for i := range wallets { for i := range wallets {
i := i
ps := pubs.Copy() ps := pubs.Copy()
errG.Go(func() error { errG.Go(func() error {
if err := addMultisigAccount(wallets[i], majCount, constants.CommitteeAccountName, passwords[i], ps); err != nil { if err := addMultisigAccount(wallets[i], majCount, constants.CommitteeAccountName, passwords[i], ps); err != nil {

View file

@ -70,7 +70,6 @@ var listContainersCmd = &cobra.Command{
continue continue
} }
cnrID := cnrID
prmGet.ClientParams.ContainerID = &cnrID prmGet.ClientParams.ContainerID = &cnrID
res, err := internalclient.GetContainer(cmd.Context(), prmGet) res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil { if err != nil {

View file

@ -393,8 +393,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
eg, egCtx := errgroup.WithContext(cmd.Context()) eg, egCtx := errgroup.WithContext(cmd.Context())
for _, cand := range candidates { for _, cand := range candidates {
cand := cand
eg.Go(func() error { eg.Go(func() error {
cli, err := createClient(egCtx, cmd, cand, pk) cli, err := createClient(egCtx, cmd, cand, pk)
if err != nil { if err != nil {
@ -405,7 +403,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
} }
for _, object := range objects { for _, object := range objects {
object := object
eg.Go(func() error { eg.Go(func() error {
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk) stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
resultMtx.Lock() resultMtx.Lock()

View file

@ -160,9 +160,6 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn
eg, egCtx := errgroup.WithContext(ctx) eg, egCtx := errgroup.WithContext(ctx)
for addr, data := range batch { for addr, data := range batch {
addr := addr
data := data
if err := limiter.AcquireWorkSlot(egCtx); err != nil { if err := limiter.AcquireWorkSlot(egCtx); err != nil {
_ = eg.Wait() _ = eg.Wait()
return result.Load(), err return result.Load(), err

View file

@ -43,7 +43,6 @@ func (b *BlobStor) ObjectsCount(ctx context.Context) (uint64, error) {
eg, egCtx := errgroup.WithContext(ctx) eg, egCtx := errgroup.WithContext(ctx)
for i := range b.storage { for i := range b.storage {
i := i
eg.Go(func() error { eg.Go(func() error {
v, e := b.storage[i].Storage.ObjectsCount(egCtx) v, e := b.storage[i].Storage.ObjectsCount(egCtx)
if e != nil { if e != nil {

View file

@ -82,8 +82,6 @@ func (e *StorageEngine) Init(ctx context.Context) error {
} }
for id, sh := range e.shards { for id, sh := range e.shards {
id := id
sh := sh
eg.Go(func() error { eg.Go(func() error {
if err := sh.Init(ctx); err != nil { if err := sh.Init(ctx); err != nil {
errCh <- shardInitError{ errCh <- shardInitError{

View file

@ -367,7 +367,6 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error {
var multiErrGuard sync.Mutex var multiErrGuard sync.Mutex
var eg errgroup.Group var eg errgroup.Group
for _, sh := range deletedShards { for _, sh := range deletedShards {
sh := sh
eg.Go(func() error { eg.Go(func() error {
err := sh.SetMode(mode.Disabled) err := sh.SetMode(mode.Disabled)
if err != nil { if err != nil {

View file

@ -102,7 +102,6 @@ func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePr
eg, egCtx := errgroup.WithContext(ctx) eg, egCtx := errgroup.WithContext(ctx)
for _, shardID := range prm.ShardIDs { for _, shardID := range prm.ShardIDs {
shardID := shardID
eg.Go(func() error { eg.Go(func() error {
e.mtx.RLock() e.mtx.RLock()
sh, ok := e.shards[shardID.String()] sh, ok := e.shards[shardID.String()]

View file

@ -1459,7 +1459,6 @@ func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
func TestForest_ListTrees(t *testing.T) { func TestForest_ListTrees(t *testing.T) {
for i := range providers { for i := range providers {
i := i
t.Run(providers[i].name, func(t *testing.T) { t.Run(providers[i].name, func(t *testing.T) {
testTreeListTrees(t, providers[i].construct) testTreeListTrees(t, providers[i].construct)
}) })

View file

@ -170,7 +170,6 @@ func runFlushTest[Option any](
t.Run("ignore errors", func(t *testing.T) { t.Run("ignore errors", func(t *testing.T) {
for _, f := range failures { for _, f := range failures {
f := f
t.Run(f.Desc, func(t *testing.T) { t.Run(f.Desc, func(t *testing.T) {
errCountOpt, errCount := errCountOption() errCountOpt, errCount := errCountOption()
wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt) wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt)

View file

@ -170,7 +170,6 @@ func (a *assemblerec) processECNodesRequests(ctx context.Context, nodes []placem
eg.SetLimit(dataCount) eg.SetLimit(dataCount)
for _, ch := range a.ecInfo.localChunks { for _, ch := range a.ecInfo.localChunks {
ch := ch
eg.Go(func() error { eg.Go(func() error {
select { select {
case <-ctx.Done(): case <-ctx.Done():

View file

@ -71,7 +71,6 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.
var wg sync.WaitGroup var wg sync.WaitGroup
for _, addr := range addrs { for _, addr := range addrs {
addr := addr
if ok := n.mExclude[string(addr.PublicKey())]; ok != nil { if ok := n.mExclude[string(addr.PublicKey())]; ok != nil {
if *ok { if *ok {
traverser.SubmitSuccess() traverser.SubmitSuccess()

View file

@ -216,7 +216,6 @@ func (e *ecWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
} }
for idx := range parts { for idx := range parts {
idx := idx
eg.Go(func() error { eg.Go(func() error {
return e.writePart(egCtx, parts[idx], idx, nodes, visited) return e.writePart(egCtx, parts[idx], idx, nodes, visited)
}) })

View file

@ -357,8 +357,6 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
parts := make([]*objectSDK.Object, objInfo.ECInfo.Total) parts := make([]*objectSDK.Object, objInfo.ECInfo.Total)
errGroup, egCtx := errgroup.WithContext(ctx) errGroup, egCtx := errgroup.WithContext(ctx)
for idx, nodes := range existedChunks { for idx, nodes := range existedChunks {
idx := idx
nodes := nodes
errGroup.Go(func() error { errGroup.Go(func() error {
var objID oid.Address var objID oid.Address
objID.SetContainer(parentAddress.Container()) objID.SetContainer(parentAddress.Container())

View file

@ -190,8 +190,6 @@ func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID s
var prev *pilorama.Move var prev *pilorama.Move
for m := range operationStream { for m := range operationStream {
m := m
// skip already applied op // skip already applied op
if prev != nil && prev.Time == m.Time { if prev != nil && prev.Time == m.Time {
continue continue
@ -287,8 +285,6 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
allNodesSynced.Store(true) allNodesSynced.Store(true)
for i, n := range nodes { for i, n := range nodes {
i := i
n := n
errGroup.Go(func() error { errGroup.Go(func() error {
var nodeSynced bool var nodeSynced bool
n.IterateNetworkEndpoints(func(addr string) bool { n.IterateNetworkEndpoints(func(addr string) bool {
@ -421,7 +417,7 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
var wg sync.WaitGroup var wg sync.WaitGroup
for _, cnr := range cnrs { for _, cnr := range cnrs {
wg.Add(1) wg.Add(1)
cnr := cnr
err := s.syncPool.Submit(func() { err := s.syncPool.Submit(func() {
defer wg.Done() defer wg.Done()
s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr)) s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))

View file

@ -51,8 +51,6 @@ func Test_mergeOperationStreams(t *testing.T) {
// generate and put values to all chans // generate and put values to all chans
for i, ch := range nodeOpChans { for i, ch := range nodeOpChans {
i := i
ch := ch
go func() { go func() {
for _, tm := range tt.opTimes[i] { for _, tm := range tt.opTimes[i] {
op := &pilorama.Move{} op := &pilorama.Move{}