Fix lint errors, update loopvar-related linters #1341
18 changed files with 5 additions and 32 deletions
|
@ -67,7 +67,7 @@ linters:
|
|||
- bidichk
|
||||
- durationcheck
|
||||
- exhaustive
|
||||
- exportloopref
|
||||
- copyloopvar
|
||||
- gofmt
|
||||
- goimports
|
||||
- misspell
|
||||
|
|
6
Makefile
6
Makefile
|
@ -8,8 +8,8 @@ HUB_IMAGE ?= truecloudlab/frostfs
|
|||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||
|
||||
GO_VERSION ?= 1.22
|
||||
LINT_VERSION ?= 1.60.1
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
||||
LINT_VERSION ?= 1.60.3
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
|
||||
PROTOC_VERSION ?= 25.0
|
||||
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
|
||||
PROTOC_OS_VERSION=osx-x86_64
|
||||
|
@ -197,7 +197,7 @@ lint-install:
|
|||
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
|
||||
@rm -rf $(TMP_DIR)/linters
|
||||
@rmdir $(TMP_DIR) 2>/dev/null || true
|
||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
||||
|
||||
# Run linters
|
||||
lint:
|
||||
|
|
|
@ -73,7 +73,6 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
|
|||
return nil, fmt.Errorf("can't fetch password: %w", err)
|
||||
}
|
||||
|
||||
i := i
|
||||
errG.Go(func() error {
|
||||
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
|
||||
f, err := os.OpenFile(p, os.O_CREATE, 0o644)
|
||||
|
@ -107,7 +106,6 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
|
|||
// Create consensus account with 2*N/3+1 multi-signature.
|
||||
bftCount := smartcontract.GetDefaultHonestNodeCount(size)
|
||||
for i := range wallets {
|
||||
i := i
|
||||
ps := pubs.Copy()
|
||||
errG.Go(func() error {
|
||||
if err := addMultisigAccount(wallets[i], majCount, constants.CommitteeAccountName, passwords[i], ps); err != nil {
|
||||
|
|
|
@ -70,7 +70,6 @@ var listContainersCmd = &cobra.Command{
|
|||
continue
|
||||
}
|
||||
|
||||
cnrID := cnrID
|
||||
prmGet.ClientParams.ContainerID = &cnrID
|
||||
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
|
||||
if err != nil {
|
||||
|
|
|
@ -393,8 +393,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
|
|||
|
||||
eg, egCtx := errgroup.WithContext(cmd.Context())
|
||||
for _, cand := range candidates {
|
||||
cand := cand
|
||||
|
||||
eg.Go(func() error {
|
||||
cli, err := createClient(egCtx, cmd, cand, pk)
|
||||
if err != nil {
|
||||
|
@ -405,7 +403,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
|
|||
}
|
||||
|
||||
for _, object := range objects {
|
||||
object := object
|
||||
eg.Go(func() error {
|
||||
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
|
||||
resultMtx.Lock()
|
||||
|
|
|
@ -160,9 +160,6 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn
|
|||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
|
||||
for addr, data := range batch {
|
||||
addr := addr
|
||||
data := data
|
||||
|
||||
if err := limiter.AcquireWorkSlot(egCtx); err != nil {
|
||||
_ = eg.Wait()
|
||||
return result.Load(), err
|
||||
|
|
|
@ -43,7 +43,6 @@ func (b *BlobStor) ObjectsCount(ctx context.Context) (uint64, error) {
|
|||
|
||||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
for i := range b.storage {
|
||||
i := i
|
||||
eg.Go(func() error {
|
||||
v, e := b.storage[i].Storage.ObjectsCount(egCtx)
|
||||
if e != nil {
|
||||
|
|
|
@ -82,8 +82,6 @@ func (e *StorageEngine) Init(ctx context.Context) error {
|
|||
}
|
||||
|
||||
for id, sh := range e.shards {
|
||||
id := id
|
||||
sh := sh
|
||||
eg.Go(func() error {
|
||||
if err := sh.Init(ctx); err != nil {
|
||||
errCh <- shardInitError{
|
||||
|
|
|
@ -367,7 +367,6 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error {
|
|||
var multiErrGuard sync.Mutex
|
||||
var eg errgroup.Group
|
||||
for _, sh := range deletedShards {
|
||||
sh := sh
|
||||
eg.Go(func() error {
|
||||
err := sh.SetMode(mode.Disabled)
|
||||
if err != nil {
|
||||
|
|
|
@ -102,7 +102,6 @@ func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePr
|
|||
|
||||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
for _, shardID := range prm.ShardIDs {
|
||||
shardID := shardID
|
||||
eg.Go(func() error {
|
||||
e.mtx.RLock()
|
||||
sh, ok := e.shards[shardID.String()]
|
||||
|
|
|
@ -1459,7 +1459,6 @@ func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
|
|||
|
||||
func TestForest_ListTrees(t *testing.T) {
|
||||
for i := range providers {
|
||||
i := i
|
||||
t.Run(providers[i].name, func(t *testing.T) {
|
||||
testTreeListTrees(t, providers[i].construct)
|
||||
})
|
||||
|
|
|
@ -170,7 +170,6 @@ func runFlushTest[Option any](
|
|||
|
||||
t.Run("ignore errors", func(t *testing.T) {
|
||||
for _, f := range failures {
|
||||
f := f
|
||||
t.Run(f.Desc, func(t *testing.T) {
|
||||
errCountOpt, errCount := errCountOption()
|
||||
wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt)
|
||||
|
|
|
@ -170,7 +170,6 @@ func (a *assemblerec) processECNodesRequests(ctx context.Context, nodes []placem
|
|||
eg.SetLimit(dataCount)
|
||||
|
||||
for _, ch := range a.ecInfo.localChunks {
|
||||
ch := ch
|
||||
eg.Go(func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
|
|
@ -71,7 +71,6 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.
|
|||
var wg sync.WaitGroup
|
||||
|
||||
for _, addr := range addrs {
|
||||
addr := addr
|
||||
if ok := n.mExclude[string(addr.PublicKey())]; ok != nil {
|
||||
if *ok {
|
||||
traverser.SubmitSuccess()
|
||||
|
|
|
@ -216,7 +216,6 @@ func (e *ecWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
|
|||
}
|
||||
|
||||
for idx := range parts {
|
||||
idx := idx
|
||||
eg.Go(func() error {
|
||||
return e.writePart(egCtx, parts[idx], idx, nodes, visited)
|
||||
})
|
||||
|
|
|
@ -357,8 +357,6 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
|
|||
parts := make([]*objectSDK.Object, objInfo.ECInfo.Total)
|
||||
errGroup, egCtx := errgroup.WithContext(ctx)
|
||||
for idx, nodes := range existedChunks {
|
||||
idx := idx
|
||||
nodes := nodes
|
||||
errGroup.Go(func() error {
|
||||
var objID oid.Address
|
||||
objID.SetContainer(parentAddress.Container())
|
||||
|
|
|
@ -190,8 +190,6 @@ func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID s
|
|||
|
||||
var prev *pilorama.Move
|
||||
for m := range operationStream {
|
||||
m := m
|
||||
|
||||
// skip already applied op
|
||||
if prev != nil && prev.Time == m.Time {
|
||||
continue
|
||||
|
@ -287,8 +285,6 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
|
|||
allNodesSynced.Store(true)
|
||||
|
||||
for i, n := range nodes {
|
||||
i := i
|
||||
n := n
|
||||
errGroup.Go(func() error {
|
||||
var nodeSynced bool
|
||||
n.IterateNetworkEndpoints(func(addr string) bool {
|
||||
|
@ -421,7 +417,7 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
|
|||
var wg sync.WaitGroup
|
||||
for _, cnr := range cnrs {
|
||||
wg.Add(1)
|
||||
cnr := cnr
|
||||
|
||||
err := s.syncPool.Submit(func() {
|
||||
defer wg.Done()
|
||||
s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
|
||||
|
|
|
@ -51,8 +51,6 @@ func Test_mergeOperationStreams(t *testing.T) {
|
|||
|
||||
// generate and put values to all chans
|
||||
for i, ch := range nodeOpChans {
|
||||
i := i
|
||||
ch := ch
|
||||
go func() {
|
||||
for _, tm := range tt.opTimes[i] {
|
||||
op := &pilorama.Move{}
|
||||
|
|
Loading…
Reference in a new issue