.golangci.yml: Add intrange linter, fix issues #1459

Merged
fyrchik merged 3 commits from fyrchik/frostfs-node:golangci-intrange into master 2024-10-30 15:18:23 +00:00
18 changed files with 28 additions and 42 deletions

View file

@ -87,5 +87,7 @@ linters:
- perfsprint - perfsprint
- testifylint - testifylint
- protogetter - protogetter
- intrange
- tenv
disable-all: true disable-all: true
fast: false fast: false

View file

@ -128,7 +128,7 @@ func generateConfigExample(appDir string, credSize int) (string, error) {
tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets") tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets")
var i innerring.GlagoliticLetter var i innerring.GlagoliticLetter
for i = 0; i < innerring.GlagoliticLetter(credSize); i++ { for i = range innerring.GlagoliticLetter(credSize) {
tmpl.Glagolitics = append(tmpl.Glagolitics, i.String()) tmpl.Glagolitics = append(tmpl.Glagolitics, i.String())
} }

View file

@ -63,7 +63,7 @@ func TestGenerateAlphabet(t *testing.T) {
buf.Reset() buf.Reset()
v.Set(commonflags.AlphabetWalletsFlag, walletDir) v.Set(commonflags.AlphabetWalletsFlag, walletDir)
require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10))) require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10)))
for i := uint64(0); i < size; i++ { for i := range uint64(size) {
buf.WriteString(strconv.FormatUint(i, 10) + "\r") buf.WriteString(strconv.FormatUint(i, 10) + "\r")
} }

View file

@ -659,9 +659,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
for { for {
n, ok = rdr.Read(buf) n, ok = rdr.Read(buf)
for i := range n { list = append(list, buf[:n]...)
list = append(list, buf[i])
}
if !ok { if !ok {
break break
} }

View file

@ -11,7 +11,7 @@ func DecodeOIDs(data []byte) ([]oid.ID, error) {
size := r.ReadVarUint() size := r.ReadVarUint()
oids := make([]oid.ID, size) oids := make([]oid.ID, size)
for i := uint64(0); i < size; i++ { for i := range size {
if err := oids[i].Decode(r.ReadVarBytes()); err != nil { if err := oids[i].Decode(r.ReadVarBytes()); err != nil {
return nil, err return nil, err
} }

View file

@ -1,7 +1,6 @@
package config_test package config_test
import ( import (
"os"
"strings" "strings"
"testing" "testing"
@ -38,8 +37,7 @@ func TestConfigEnv(t *testing.T) {
envName := strings.ToUpper( envName := strings.ToUpper(
strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator)) strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator))
err := os.Setenv(envName, value) t.Setenv(envName, value)
require.NoError(t, err)
c := configtest.EmptyConfig() c := configtest.EmptyConfig()

View file

@ -11,8 +11,6 @@ import (
) )
func fromFile(path string) *config.Config { func fromFile(path string) *config.Config {
os.Clearenv() // ENVs have priority over config files, so we do this in tests
return config.New(path, "", "") return config.New(path, "", "")
} }
@ -40,15 +38,6 @@ func ForEachFileType(pref string, f func(*config.Config)) {
// ForEnvFileType creates config from `<pref>.env` file. // ForEnvFileType creates config from `<pref>.env` file.
func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) { func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) {
envs := os.Environ()
t.Cleanup(func() {
os.Clearenv()
for _, env := range envs {
keyValue := strings.Split(env, "=")
os.Setenv(keyValue[0], keyValue[1])
}
})
f(fromEnvFile(t, pref+".env")) f(fromEnvFile(t, pref+".env"))
} }
@ -73,7 +62,6 @@ func loadEnv(t testing.TB, path string) {
v = strings.Trim(v, `"`) v = strings.Trim(v, `"`)
err = os.Setenv(k, v) t.Setenv(k, v)
require.NoError(t, err, "can't set environment variable")
} }
} }

View file

@ -61,7 +61,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, b.Init()) require.NoError(t, b.Init())
storageIDs := make(map[oid.Address][]byte) storageIDs := make(map[oid.Address][]byte)
for i := 0; i < 100; i++ { for range 100 {
obj := blobstortest.NewObject(64 * 1024) // 64KB object obj := blobstortest.NewObject(64 * 1024) // 64KB object
data, err := obj.Marshal() data, err := obj.Marshal()
require.NoError(t, err) require.NoError(t, err)
@ -168,7 +168,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
storageIDs := make(map[oid.Address][]byte) storageIDs := make(map[oid.Address][]byte)
toDelete := make(map[oid.Address][]byte) toDelete := make(map[oid.Address][]byte)
for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created for i := range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
obj := blobstortest.NewObject(64 * 1024) obj := blobstortest.NewObject(64 * 1024)
data, err := obj.Marshal() data, err := obj.Marshal()
require.NoError(t, err) require.NoError(t, err)
@ -236,7 +236,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, b.Init()) require.NoError(t, b.Init())
storageIDs := make(map[oid.Address][]byte) storageIDs := make(map[oid.Address][]byte)
for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created for range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
obj := blobstortest.NewObject(64 * 1024) obj := blobstortest.NewObject(64 * 1024)
data, err := obj.Marshal() data, err := obj.Marshal()
require.NoError(t, err) require.NoError(t, err)

View file

@ -47,7 +47,7 @@ func TestIterateObjects(t *testing.T) {
mObjs := make(map[string]addrData) mObjs := make(map[string]addrData)
for i := uint64(0); i < objNum; i++ { for i := range uint64(objNum) {
sz := smalSz sz := smalSz
big := i < objNum/2 big := i < objNum/2

View file

@ -151,7 +151,7 @@ func TestErrorReporting(t *testing.T) {
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite) checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
} }
for i := uint32(0); i < 2; i++ { for i := range uint32(2) {
_, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)}) _, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
require.Error(t, err) require.Error(t, err)
checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.ReadOnly) checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.ReadOnly)

View file

@ -1081,7 +1081,7 @@ func prepareRandomTree(nodeCount, opCount int) []Move {
} }
func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) { func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) {
for i := uint64(0); i < uint64(nodeCount); i++ { for i := range uint64(nodeCount) {
expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i) expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i)
require.NoError(t, err) require.NoError(t, err)
actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i) actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i)

View file

@ -216,7 +216,7 @@ func TestRefillMetabase(t *testing.T) {
locked := make([]oid.ID, 1, 2) locked := make([]oid.ID, 1, 2)
locked[0] = oidtest.ID() locked[0] = oidtest.ID()
cnrLocked := cidtest.ID() cnrLocked := cidtest.ID()
for i := uint64(0); i < objNum; i++ { for range objNum {
obj := objecttest.Object() obj := objecttest.Object()
obj.SetType(objectSDK.TypeRegular) obj.SetType(objectSDK.TypeRegular)

View file

@ -14,7 +14,7 @@ func TestLimiter(t *testing.T) {
l := newFlushLimiter(uint64(maxSize)) l := newFlushLimiter(uint64(maxSize))
var currSize atomic.Int64 var currSize atomic.Int64
var eg errgroup.Group var eg errgroup.Group
for i := 0; i < 10_000; i++ { for range 10_000 {
eg.Go(func() error { eg.Go(func() error {
defer l.release(single) defer l.release(single)
defer currSize.Add(-1) defer currSize.Add(-1)

View file

@ -8,7 +8,7 @@ import (
) )
func tickN(t *timer.BlockTimer, n uint32) { func tickN(t *timer.BlockTimer, n uint32) {
for i := uint32(0); i < n; i++ { for range n {
t.Tick(0) t.Tick(0)
} }
} }

View file

@ -50,7 +50,7 @@ func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token)
metaHeader.SetBearerToken(b) metaHeader.SetBearerToken(b)
metaHeader.SetSessionToken(s) metaHeader.SetSessionToken(s)
for i := uint32(0); i < depth; i++ { for range depth {
link := metaHeader link := metaHeader
metaHeader = new(session.RequestMetaHeader) metaHeader = new(session.RequestMetaHeader)
metaHeader.SetOrigin(link) metaHeader.SetOrigin(link)

View file

@ -19,7 +19,7 @@ import (
func GeneratePayloadPool(count uint, size uint) [][]byte { func GeneratePayloadPool(count uint, size uint) [][]byte {
var pool [][]byte var pool [][]byte
for i := uint(0); i < count; i++ { for range count {
payload := make([]byte, size) payload := make([]byte, size)
_, _ = rand.Read(payload) _, _ = rand.Read(payload)
@ -30,8 +30,8 @@ func GeneratePayloadPool(count uint, size uint) [][]byte {
func GenerateAttributePool(count uint) []objectSDK.Attribute { func GenerateAttributePool(count uint) []objectSDK.Attribute {
var pool []objectSDK.Attribute var pool []objectSDK.Attribute
for i := uint(0); i < count; i++ { for i := range count {
for j := uint(0); j < count; j++ { for j := range count {
attr := *objectSDK.NewAttribute() attr := *objectSDK.NewAttribute()
attr.SetKey(fmt.Sprintf("key%d", i)) attr.SetKey(fmt.Sprintf("key%d", i))
attr.SetValue(fmt.Sprintf("value%d", j)) attr.SetValue(fmt.Sprintf("value%d", j))
@ -43,7 +43,7 @@ func GenerateAttributePool(count uint) []objectSDK.Attribute {
func GenerateOwnerPool(count uint) []user.ID { func GenerateOwnerPool(count uint) []user.ID {
var pool []user.ID var pool []user.ID
for i := uint(0); i < count; i++ { for range count {
pool = append(pool, usertest.ID()) pool = append(pool, usertest.ID())
} }
return pool return pool
@ -118,7 +118,7 @@ func WithPayloadFromPool(pool [][]byte) ObjectOption {
func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption { func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption {
return func(obj *objectSDK.Object) { return func(obj *objectSDK.Object) {
var attrs []objectSDK.Attribute var attrs []objectSDK.Attribute
for i := uint(0); i < count; i++ { for range count {
attrs = append(attrs, pool[rand.Intn(len(pool))]) attrs = append(attrs, pool[rand.Intn(len(pool))])
} }
obj.SetAttributes(attrs...) obj.SetAttributes(attrs...)

View file

@ -29,7 +29,7 @@ func PopulateWithObjects(
) { ) {
digits := "0123456789" digits := "0123456789"
for i := uint(0); i < count; i++ { for range count {
obj := factory() obj := factory()
id := []byte(fmt.Sprintf( id := []byte(fmt.Sprintf(
@ -59,7 +59,7 @@ func PopulateWithBigObjects(
count uint, count uint,
factory func() *objectSDK.Object, factory func() *objectSDK.Object,
) { ) {
for i := uint(0); i < count; i++ { for range count {
group.Go(func() error { group.Go(func() error {
if err := populateWithBigObject(ctx, db, factory); err != nil { if err := populateWithBigObject(ctx, db, factory); err != nil {
return fmt.Errorf("couldn't put a big object: %w", err) return fmt.Errorf("couldn't put a big object: %w", err)
@ -154,7 +154,7 @@ func PopulateGraveyard(
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
wg.Add(int(count)) wg.Add(int(count))
for i := uint(0); i < count; i++ { for range count {
obj := factory() obj := factory()
prm := meta.PutPrm{} prm := meta.PutPrm{}
@ -226,7 +226,7 @@ func PopulateLocked(
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
wg.Add(int(count)) wg.Add(int(count))
for i := uint(0); i < count; i++ { for range count {
defer wg.Done() defer wg.Done()
obj := factory() obj := factory()

View file

@ -116,7 +116,7 @@ func populate() (err error) {
eg, ctx := errgroup.WithContext(ctx) eg, ctx := errgroup.WithContext(ctx)
eg.SetLimit(int(jobs)) eg.SetLimit(int(jobs))
for i := uint(0); i < numContainers; i++ { for range numContainers {
cid := cidtest.ID() cid := cidtest.ID()
for _, typ := range types { for _, typ := range types {