.golangci.yml: Add intrange linter, fix issues #1459
18 changed files with 28 additions and 42 deletions
|
@ -87,5 +87,7 @@ linters:
|
|||
- perfsprint
|
||||
- testifylint
|
||||
- protogetter
|
||||
- intrange
|
||||
- tenv
|
||||
disable-all: true
|
||||
fast: false
|
||||
|
|
|
@ -128,7 +128,7 @@ func generateConfigExample(appDir string, credSize int) (string, error) {
|
|||
tmpl.AlphabetDir = filepath.Join(appDir, "alphabet-wallets")
|
||||
|
||||
var i innerring.GlagoliticLetter
|
||||
for i = 0; i < innerring.GlagoliticLetter(credSize); i++ {
|
||||
for i = range innerring.GlagoliticLetter(credSize) {
|
||||
tmpl.Glagolitics = append(tmpl.Glagolitics, i.String())
|
||||
}
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ func TestGenerateAlphabet(t *testing.T) {
|
|||
buf.Reset()
|
||||
v.Set(commonflags.AlphabetWalletsFlag, walletDir)
|
||||
require.NoError(t, GenerateAlphabetCmd.Flags().Set(commonflags.AlphabetSizeFlag, strconv.FormatUint(size, 10)))
|
||||
for i := uint64(0); i < size; i++ {
|
||||
for i := range uint64(size) {
|
||||
buf.WriteString(strconv.FormatUint(i, 10) + "\r")
|
||||
}
|
||||
|
||||
|
|
|
@ -659,9 +659,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
|
|||
|
||||
for {
|
||||
n, ok = rdr.Read(buf)
|
||||
for i := range n {
|
||||
list = append(list, buf[i])
|
||||
}
|
||||
list = append(list, buf[:n]...)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ func DecodeOIDs(data []byte) ([]oid.ID, error) {
|
|||
size := r.ReadVarUint()
|
||||
oids := make([]oid.ID, size)
|
||||
|
||||
for i := uint64(0); i < size; i++ {
|
||||
for i := range size {
|
||||
if err := oids[i].Decode(r.ReadVarBytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package config_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -38,8 +37,7 @@ func TestConfigEnv(t *testing.T) {
|
|||
|
||||
envName := strings.ToUpper(
|
||||
strings.Join([]string{config.EnvPrefix, section, name}, configViper.EnvSeparator))
|
||||
err := os.Setenv(envName, value)
|
||||
require.NoError(t, err)
|
||||
t.Setenv(envName, value)
|
||||
|
||||
c := configtest.EmptyConfig()
|
||||
|
||||
|
|
|
@ -11,8 +11,6 @@ import (
|
|||
)
|
||||
|
||||
func fromFile(path string) *config.Config {
|
||||
os.Clearenv() // ENVs have priority over config files, so we do this in tests
|
||||
|
||||
return config.New(path, "", "")
|
||||
}
|
||||
|
||||
|
@ -40,15 +38,6 @@ func ForEachFileType(pref string, f func(*config.Config)) {
|
|||
|
||||
// ForEnvFileType creates config from `<pref>.env` file.
|
||||
func ForEnvFileType(t testing.TB, pref string, f func(*config.Config)) {
|
||||
envs := os.Environ()
|
||||
t.Cleanup(func() {
|
||||
os.Clearenv()
|
||||
for _, env := range envs {
|
||||
keyValue := strings.Split(env, "=")
|
||||
os.Setenv(keyValue[0], keyValue[1])
|
||||
}
|
||||
})
|
||||
|
||||
f(fromEnvFile(t, pref+".env"))
|
||||
}
|
||||
|
||||
|
@ -73,7 +62,6 @@ func loadEnv(t testing.TB, path string) {
|
|||
|
||||
v = strings.Trim(v, `"`)
|
||||
|
||||
err = os.Setenv(k, v)
|
||||
require.NoError(t, err, "can't set environment variable")
|
||||
t.Setenv(k, v)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
|||
require.NoError(t, b.Init())
|
||||
|
||||
storageIDs := make(map[oid.Address][]byte)
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
obj := blobstortest.NewObject(64 * 1024) // 64KB object
|
||||
data, err := obj.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
@ -168,7 +168,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
|||
|
||||
storageIDs := make(map[oid.Address][]byte)
|
||||
toDelete := make(map[oid.Address][]byte)
|
||||
for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
|
||||
for i := range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
|
||||
obj := blobstortest.NewObject(64 * 1024)
|
||||
data, err := obj.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
@ -236,7 +236,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
|
|||
require.NoError(t, b.Init())
|
||||
|
||||
storageIDs := make(map[oid.Address][]byte)
|
||||
for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
|
||||
for range 100 { // 2 objects for one blobovnicza, so 50 DBs total will be created
|
||||
obj := blobstortest.NewObject(64 * 1024)
|
||||
data, err := obj.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -47,7 +47,7 @@ func TestIterateObjects(t *testing.T) {
|
|||
|
||||
mObjs := make(map[string]addrData)
|
||||
|
||||
for i := uint64(0); i < objNum; i++ {
|
||||
for i := range uint64(objNum) {
|
||||
sz := smalSz
|
||||
|
||||
big := i < objNum/2
|
||||
|
|
|
@ -151,7 +151,7 @@ func TestErrorReporting(t *testing.T) {
|
|||
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
|
||||
}
|
||||
|
||||
for i := uint32(0); i < 2; i++ {
|
||||
for i := range uint32(2) {
|
||||
_, err = te.ng.Get(context.Background(), GetPrm{addr: object.AddressOf(obj)})
|
||||
require.Error(t, err)
|
||||
checkShardState(t, te.ng, te.shards[0].id, errThreshold+i, mode.ReadOnly)
|
||||
|
|
|
@ -1081,7 +1081,7 @@ func prepareRandomTree(nodeCount, opCount int) []Move {
|
|||
}
|
||||
|
||||
func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID string, nodeCount int) {
|
||||
for i := uint64(0); i < uint64(nodeCount); i++ {
|
||||
for i := range uint64(nodeCount) {
|
||||
expectedMeta, expectedParent, err := expected.TreeGetMeta(context.Background(), cid, treeID, i)
|
||||
require.NoError(t, err)
|
||||
actualMeta, actualParent, err := actual.TreeGetMeta(context.Background(), cid, treeID, i)
|
||||
|
|
|
@ -216,7 +216,7 @@ func TestRefillMetabase(t *testing.T) {
|
|||
locked := make([]oid.ID, 1, 2)
|
||||
locked[0] = oidtest.ID()
|
||||
cnrLocked := cidtest.ID()
|
||||
for i := uint64(0); i < objNum; i++ {
|
||||
for range objNum {
|
||||
obj := objecttest.Object()
|
||||
obj.SetType(objectSDK.TypeRegular)
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ func TestLimiter(t *testing.T) {
|
|||
l := newFlushLimiter(uint64(maxSize))
|
||||
var currSize atomic.Int64
|
||||
var eg errgroup.Group
|
||||
for i := 0; i < 10_000; i++ {
|
||||
for range 10_000 {
|
||||
eg.Go(func() error {
|
||||
defer l.release(single)
|
||||
defer currSize.Add(-1)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
)
|
||||
|
||||
func tickN(t *timer.BlockTimer, n uint32) {
|
||||
for i := uint32(0); i < n; i++ {
|
||||
for range n {
|
||||
t.Tick(0)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func testGenerateMetaHeader(depth uint32, b *acl.BearerToken, s *session.Token)
|
|||
metaHeader.SetBearerToken(b)
|
||||
metaHeader.SetSessionToken(s)
|
||||
|
||||
for i := uint32(0); i < depth; i++ {
|
||||
for range depth {
|
||||
link := metaHeader
|
||||
metaHeader = new(session.RequestMetaHeader)
|
||||
metaHeader.SetOrigin(link)
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
|
||||
func GeneratePayloadPool(count uint, size uint) [][]byte {
|
||||
var pool [][]byte
|
||||
for i := uint(0); i < count; i++ {
|
||||
for range count {
|
||||
payload := make([]byte, size)
|
||||
_, _ = rand.Read(payload)
|
||||
|
||||
|
@ -30,8 +30,8 @@ func GeneratePayloadPool(count uint, size uint) [][]byte {
|
|||
|
||||
func GenerateAttributePool(count uint) []objectSDK.Attribute {
|
||||
var pool []objectSDK.Attribute
|
||||
for i := uint(0); i < count; i++ {
|
||||
for j := uint(0); j < count; j++ {
|
||||
for i := range count {
|
||||
for j := range count {
|
||||
attr := *objectSDK.NewAttribute()
|
||||
attr.SetKey(fmt.Sprintf("key%d", i))
|
||||
attr.SetValue(fmt.Sprintf("value%d", j))
|
||||
|
@ -43,7 +43,7 @@ func GenerateAttributePool(count uint) []objectSDK.Attribute {
|
|||
|
||||
func GenerateOwnerPool(count uint) []user.ID {
|
||||
var pool []user.ID
|
||||
for i := uint(0); i < count; i++ {
|
||||
for range count {
|
||||
pool = append(pool, usertest.ID())
|
||||
}
|
||||
return pool
|
||||
|
@ -118,7 +118,7 @@ func WithPayloadFromPool(pool [][]byte) ObjectOption {
|
|||
func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption {
|
||||
return func(obj *objectSDK.Object) {
|
||||
var attrs []objectSDK.Attribute
|
||||
for i := uint(0); i < count; i++ {
|
||||
for range count {
|
||||
attrs = append(attrs, pool[rand.Intn(len(pool))])
|
||||
}
|
||||
obj.SetAttributes(attrs...)
|
||||
|
|
|
@ -29,7 +29,7 @@ func PopulateWithObjects(
|
|||
) {
|
||||
digits := "0123456789"
|
||||
|
||||
for i := uint(0); i < count; i++ {
|
||||
for range count {
|
||||
obj := factory()
|
||||
|
||||
id := []byte(fmt.Sprintf(
|
||||
|
@ -59,7 +59,7 @@ func PopulateWithBigObjects(
|
|||
count uint,
|
||||
factory func() *objectSDK.Object,
|
||||
) {
|
||||
for i := uint(0); i < count; i++ {
|
||||
for range count {
|
||||
group.Go(func() error {
|
||||
if err := populateWithBigObject(ctx, db, factory); err != nil {
|
||||
return fmt.Errorf("couldn't put a big object: %w", err)
|
||||
|
@ -154,7 +154,7 @@ func PopulateGraveyard(
|
|||
wg := &sync.WaitGroup{}
|
||||
wg.Add(int(count))
|
||||
|
||||
for i := uint(0); i < count; i++ {
|
||||
for range count {
|
||||
obj := factory()
|
||||
|
||||
prm := meta.PutPrm{}
|
||||
|
@ -226,7 +226,7 @@ func PopulateLocked(
|
|||
wg := &sync.WaitGroup{}
|
||||
wg.Add(int(count))
|
||||
|
||||
for i := uint(0); i < count; i++ {
|
||||
for range count {
|
||||
defer wg.Done()
|
||||
|
||||
obj := factory()
|
||||
|
|
|
@ -116,7 +116,7 @@ func populate() (err error) {
|
|||
eg, ctx := errgroup.WithContext(ctx)
|
||||
eg.SetLimit(int(jobs))
|
||||
|
||||
for i := uint(0); i < numContainers; i++ {
|
||||
for range numContainers {
|
||||
cid := cidtest.ID()
|
||||
|
||||
for _, typ := range types {
|
||||
|
|
Loading…
Reference in a new issue