forked from TrueCloudLab/frostfs-node
[#1317] go.mod: Use range over int
Since Go 1.22 a "for" statement with a "range" clause is able to iterate through integer values from zero to an upper limit. gopatch script: @@ var i, e expression @@ -for i := 0; i <= e - 1; i++ { +for i := range e { ... } @@ var i, e expression @@ -for i := 0; i <= e; i++ { +for i := range e + 1 { ... } @@ var i, e expression @@ -for i := 0; i < e; i++ { +for i := range e { ... } Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
This commit is contained in:
parent
2b3fc50681
commit
a685fcdc96
66 changed files with 135 additions and 135 deletions
|
@ -68,7 +68,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
|
||||||
|
|
||||||
if irSize != 0 {
|
if irSize != 0 {
|
||||||
bw.Reset()
|
bw.Reset()
|
||||||
for i := 0; i < irSize; i++ {
|
for i := range irSize {
|
||||||
emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly,
|
emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly,
|
||||||
helper.GetAlphabetNNSDomain(i),
|
helper.GetAlphabetNNSDomain(i),
|
||||||
int64(nns.TXT))
|
int64(nns.TXT))
|
||||||
|
@ -79,7 +79,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
|
||||||
return fmt.Errorf("can't fetch info from NNS: %w", err)
|
return fmt.Errorf("can't fetch info from NNS: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < irSize; i++ {
|
for i := range irSize {
|
||||||
info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)}
|
info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)}
|
||||||
if h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]); err == nil {
|
if h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]); err == nil {
|
||||||
info.hash = h
|
info.hash = h
|
||||||
|
|
|
@ -224,7 +224,7 @@ func (l *LocalClient) CalculateNetworkFee(tx *transaction.Transaction) (int64, e
|
||||||
paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}}
|
paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}}
|
||||||
} else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok {
|
} else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok {
|
||||||
paramz = make([]manifest.Parameter, nSigs)
|
paramz = make([]manifest.Parameter, nSigs)
|
||||||
for j := 0; j < nSigs; j++ {
|
for j := range nSigs {
|
||||||
paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType}
|
paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
|
||||||
|
|
||||||
var wallets []*wallet.Wallet
|
var wallets []*wallet.Wallet
|
||||||
var letter string
|
var letter string
|
||||||
for i := 0; i < constants.MaxAlphabetNodes; i++ {
|
for i := range constants.MaxAlphabetNodes {
|
||||||
letter = innerring.GlagoliticLetter(i).String()
|
letter = innerring.GlagoliticLetter(i).String()
|
||||||
p := filepath.Join(walletDir, letter+".json")
|
p := filepath.Join(walletDir, letter+".json")
|
||||||
var w *wallet.Wallet
|
var w *wallet.Wallet
|
||||||
|
|
|
@ -113,7 +113,7 @@ func generateTestData(dir string, size int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var pubs []string
|
var pubs []string
|
||||||
for i := 0; i < size; i++ {
|
for i := range size {
|
||||||
p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json")
|
p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json")
|
||||||
w, err := wallet.NewWalletFromFile(p)
|
w, err := wallet.NewWalletFromFile(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -148,7 +148,7 @@ func generateTestData(dir string, size int) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func setTestCredentials(v *viper.Viper, size int) {
|
func setTestCredentials(v *viper.Viper, size int) {
|
||||||
for i := 0; i < size; i++ {
|
for i := range size {
|
||||||
v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10))
|
v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10))
|
||||||
}
|
}
|
||||||
v.Set("credentials.contract", constants.TestContractPassword)
|
v.Set("credentials.contract", constants.TestContractPassword)
|
||||||
|
|
|
@ -692,7 +692,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
|
||||||
|
|
||||||
for {
|
for {
|
||||||
n, ok = rdr.Read(buf)
|
n, ok = rdr.Read(buf)
|
||||||
for i := 0; i < n; i++ {
|
for i := range n {
|
||||||
list = append(list, buf[i])
|
list = append(list, buf[i])
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|
|
@ -139,7 +139,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < awaitTimeout; i++ {
|
for range awaitTimeout {
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
|
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
|
||||||
|
|
|
@ -110,7 +110,7 @@ Only owner of the container has a permission to remove container.`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < awaitTimeout; i++ {
|
for range awaitTimeout {
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
|
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
|
||||||
|
|
|
@ -195,7 +195,7 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member
|
||||||
prmHead.SetRawFlag(true) // to get an error instead of whole object
|
prmHead.SetRawFlag(true) // to get an error instead of whole object
|
||||||
|
|
||||||
eg, egCtx := errgroup.WithContext(cmd.Context())
|
eg, egCtx := errgroup.WithContext(cmd.Context())
|
||||||
for idx := 0; idx < len(members); idx++ {
|
for idx := range len(members) {
|
||||||
partObjID := members[idx]
|
partObjID := members[idx]
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
|
|
|
@ -33,7 +33,7 @@ func PrettyPrintTableBACL(cmd *cobra.Command, bacl *acl.Basic) {
|
||||||
fmt.Fprintln(w, strings.Join(bits, "\t"))
|
fmt.Fprintln(w, strings.Join(bits, "\t"))
|
||||||
// Footer
|
// Footer
|
||||||
footer := []string{"X F"}
|
footer := []string{"X F"}
|
||||||
for i := 0; i < 7; i++ {
|
for range 7 {
|
||||||
footer = append(footer, "U S O B")
|
footer = append(footer, "U S O B")
|
||||||
}
|
}
|
||||||
fmt.Fprintln(w, strings.Join(footer, "\t"))
|
fmt.Fprintln(w, strings.Join(footer, "\t"))
|
||||||
|
|
|
@ -121,7 +121,7 @@ func BootstrapAddresses(c *config.Config) (addr network.AddressGroup) {
|
||||||
func Attributes(c *config.Config) (attrs []string) {
|
func Attributes(c *config.Config) (attrs []string) {
|
||||||
const maxAttributes = 100
|
const maxAttributes = 100
|
||||||
|
|
||||||
for i := 0; i < maxAttributes; i++ {
|
for i := range maxAttributes {
|
||||||
attr := config.StringSafe(c.Sub(subsection), attributePrefix+"_"+strconv.Itoa(i))
|
attr := config.StringSafe(c.Sub(subsection), attributePrefix+"_"+strconv.Itoa(i))
|
||||||
if attr == "" {
|
if attr == "" {
|
||||||
return
|
return
|
||||||
|
|
|
@ -157,7 +157,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error {
|
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error {
|
||||||
for i := 0; i < notaryDepositRetriesAmount; i++ {
|
for range notaryDepositRetriesAmount {
|
||||||
c.log.Debug(logs.ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted)
|
c.log.Debug(logs.ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted)
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
|
|
@ -237,7 +237,7 @@ func BenchmarkKeyPosition(b *testing.B) {
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
if keyPosition(key, list) != 5 {
|
if keyPosition(key, list) != 5 {
|
||||||
b.FailNow()
|
b.FailNow()
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,7 +90,7 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
|
||||||
}
|
}
|
||||||
|
|
||||||
func awaitNotaryDepositInClient(ctx context.Context, cli *client.Client, txHash util.Uint256) error {
|
func awaitNotaryDepositInClient(ctx context.Context, cli *client.Client, txHash util.Uint256) error {
|
||||||
for i := 0; i < notaryDepositTimeout; i++ {
|
for range notaryDepositTimeout {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
|
|
|
@ -21,7 +21,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
|
||||||
var parsedWallets []util.Uint160 = []util.Uint160{{20}, {25}}
|
var parsedWallets []util.Uint160 = []util.Uint160{{20}, {25}}
|
||||||
|
|
||||||
alphabetContracts := innerring.NewAlphabetContracts()
|
alphabetContracts := innerring.NewAlphabetContracts()
|
||||||
for i := 0; i <= index; i++ {
|
for i := range index + 1 {
|
||||||
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
|
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
|
||||||
var parsedWallets []util.Uint160 = []util.Uint160{}
|
var parsedWallets []util.Uint160 = []util.Uint160{}
|
||||||
|
|
||||||
alphabetContracts := innerring.NewAlphabetContracts()
|
alphabetContracts := innerring.NewAlphabetContracts()
|
||||||
for i := 0; i <= index; i++ {
|
for i := range index + 1 {
|
||||||
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
|
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,7 +170,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
|
||||||
var parsedWallets []util.Uint160 = []util.Uint160{}
|
var parsedWallets []util.Uint160 = []util.Uint160{}
|
||||||
|
|
||||||
alphabetContracts := innerring.NewAlphabetContracts()
|
alphabetContracts := innerring.NewAlphabetContracts()
|
||||||
for i := 0; i <= index; i++ {
|
for i := range index + 1 {
|
||||||
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
|
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -190,7 +190,7 @@ func generateTestKeys(t *testing.T) testKeys {
|
||||||
for {
|
for {
|
||||||
var result testKeys
|
var result testKeys
|
||||||
|
|
||||||
for i := 0; i < 4; i++ {
|
for range 4 {
|
||||||
pk, err := keys.NewPrivateKey()
|
pk, err := keys.NewPrivateKey()
|
||||||
require.NoError(t, err, "failed to create private key")
|
require.NoError(t, err, "failed to create private key")
|
||||||
result.sidechainKeys = append(result.sidechainKeys, pk.PublicKey())
|
result.sidechainKeys = append(result.sidechainKeys, pk.PublicKey())
|
||||||
|
|
|
@ -49,7 +49,7 @@ func TestNewAlphabetList(t *testing.T) {
|
||||||
}
|
}
|
||||||
ln := len(rounds)
|
ln := len(rounds)
|
||||||
|
|
||||||
for i := 0; i < ln; i++ {
|
for i := range ln {
|
||||||
list, err = newAlphabetList(list, exp)
|
list, err = newAlphabetList(list, exp)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.True(t, equalPublicKeyLists(list, rounds[i]))
|
require.True(t, equalPublicKeyLists(list, rounds[i]))
|
||||||
|
@ -131,7 +131,7 @@ func TestUpdateInnerRing(t *testing.T) {
|
||||||
func generateKeys(n int) (keys.PublicKeys, error) {
|
func generateKeys(n int) (keys.PublicKeys, error) {
|
||||||
pubKeys := make(keys.PublicKeys, 0, n)
|
pubKeys := make(keys.PublicKeys, 0, n)
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for range n {
|
||||||
privKey, err := keys.NewPrivateKey()
|
privKey, err := keys.NewPrivateKey()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -42,7 +42,7 @@ func TestSizes(t *testing.T) {
|
||||||
func BenchmarkUpperBound(b *testing.B) {
|
func BenchmarkUpperBound(b *testing.B) {
|
||||||
for _, size := range []uint64{1, 1023, 65 * 1024, 40 * 1024 * 1024} {
|
for _, size := range []uint64{1, 1023, 65 * 1024, 40 * 1024 * 1024} {
|
||||||
b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) {
|
b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_ = upperPowerOfTwo(size)
|
_ = upperPowerOfTwo(size)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -34,7 +34,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
|
||||||
|
|
||||||
var cnt atomic.Int64
|
var cnt atomic.Int64
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < 1000; i++ {
|
for range 1000 {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
|
@ -127,7 +127,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
|
||||||
eg, egCtx := errgroup.WithContext(context.Background())
|
eg, egCtx := errgroup.WithContext(context.Background())
|
||||||
storageIDs := make(map[oid.Address][]byte)
|
storageIDs := make(map[oid.Address][]byte)
|
||||||
storageIDsGuard := &sync.Mutex{}
|
storageIDsGuard := &sync.Mutex{}
|
||||||
for i := 0; i < 100; i++ {
|
for range 100 {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
obj := blobstortest.NewObject(1024)
|
obj := blobstortest.NewObject(1024)
|
||||||
data, err := obj.Marshal()
|
data, err := obj.Marshal()
|
||||||
|
|
|
@ -60,7 +60,7 @@ func TestCompression(t *testing.T) {
|
||||||
|
|
||||||
bigObj := make([]*objectSDK.Object, objCount)
|
bigObj := make([]*objectSDK.Object, objCount)
|
||||||
smallObj := make([]*objectSDK.Object, objCount)
|
smallObj := make([]*objectSDK.Object, objCount)
|
||||||
for i := 0; i < objCount; i++ {
|
for i := range objCount {
|
||||||
bigObj[i] = testObject(smallSizeLimit * 2)
|
bigObj[i] = testObject(smallSizeLimit * 2)
|
||||||
smallObj[i] = testObject(smallSizeLimit / 2)
|
smallObj[i] = testObject(smallSizeLimit / 2)
|
||||||
}
|
}
|
||||||
|
@ -219,7 +219,7 @@ func TestConcurrentPut(t *testing.T) {
|
||||||
bigObj := testObject(smallSizeLimit * 2)
|
bigObj := testObject(smallSizeLimit * 2)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < concurrentPutCount; i++ {
|
for range concurrentPutCount {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
testPut(t, blobStor, bigObj)
|
testPut(t, blobStor, bigObj)
|
||||||
|
@ -235,7 +235,7 @@ func TestConcurrentPut(t *testing.T) {
|
||||||
bigObj := testObject(smallSizeLimit * 2)
|
bigObj := testObject(smallSizeLimit * 2)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < concurrentPutCount+1; i++ {
|
for range concurrentPutCount + 1 {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
testPutFileExistsError(t, blobStor, bigObj)
|
testPutFileExistsError(t, blobStor, bigObj)
|
||||||
|
@ -251,7 +251,7 @@ func TestConcurrentPut(t *testing.T) {
|
||||||
smallObj := testObject(smallSizeLimit / 2)
|
smallObj := testObject(smallSizeLimit / 2)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < concurrentPutCount; i++ {
|
for range concurrentPutCount {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
testPut(t, blobStor, smallObj)
|
testPut(t, blobStor, smallObj)
|
||||||
|
@ -302,7 +302,7 @@ func TestConcurrentDelete(t *testing.T) {
|
||||||
testPut(t, blobStor, bigObj)
|
testPut(t, blobStor, bigObj)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < 2; i++ {
|
for range 2 {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
testDelete(t, blobStor, bigObj)
|
testDelete(t, blobStor, bigObj)
|
||||||
|
@ -319,7 +319,7 @@ func TestConcurrentDelete(t *testing.T) {
|
||||||
testPut(t, blobStor, smallObj)
|
testPut(t, blobStor, smallObj)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < 2; i++ {
|
for range 2 {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
testDelete(t, blobStor, smallObj)
|
testDelete(t, blobStor, smallObj)
|
||||||
|
|
|
@ -36,7 +36,7 @@ func BenchmarkCompression(b *testing.B) {
|
||||||
func benchWith(b *testing.B, c Config, data []byte) {
|
func benchWith(b *testing.B, c Config, data []byte) {
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_ = c.Compress(data)
|
_ = c.Compress(data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ func Benchmark_addressFromString(b *testing.B) {
|
||||||
|
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
_, err := addressFromString(s)
|
_, err := addressFromString(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("benchmark error: %v", err)
|
b.Fatalf("benchmark error: %v", err)
|
||||||
|
@ -73,7 +73,7 @@ func TestObjectCounter(t *testing.T) {
|
||||||
eg, egCtx := errgroup.WithContext(context.Background())
|
eg, egCtx := errgroup.WithContext(context.Background())
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
for j := 0; j < 1_000; j++ {
|
for range 1_000 {
|
||||||
_, err := fst.Put(egCtx, putPrm)
|
_, err := fst.Put(egCtx, putPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -84,7 +84,7 @@ func TestObjectCounter(t *testing.T) {
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
var le logicerr.Logical
|
var le logicerr.Logical
|
||||||
for j := 0; j < 1_000; j++ {
|
for range 1_000 {
|
||||||
_, err := fst.Delete(egCtx, delPrm)
|
_, err := fst.Delete(egCtx, delPrm)
|
||||||
if err != nil && !errors.As(err, &le) {
|
if err != nil && !errors.As(err, &le) {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -110,7 +110,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
|
||||||
|
|
||||||
// Fill database
|
// Fill database
|
||||||
var errG errgroup.Group
|
var errG errgroup.Group
|
||||||
for i := 0; i < tt.size; i++ {
|
for range tt.size {
|
||||||
obj := objGen.Next()
|
obj := objGen.Next()
|
||||||
addr := testutil.AddressFromObject(b, obj)
|
addr := testutil.AddressFromObject(b, obj)
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
|
@ -203,7 +203,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
|
||||||
defer func() { require.NoError(b, st.Close()) }()
|
defer func() { require.NoError(b, st.Close()) }()
|
||||||
|
|
||||||
// Fill database
|
// Fill database
|
||||||
for i := 0; i < tt.size; i++ {
|
for range tt.size {
|
||||||
obj := objGen.Next()
|
obj := objGen.Next()
|
||||||
addr := testutil.AddressFromObject(b, obj)
|
addr := testutil.AddressFromObject(b, obj)
|
||||||
raw, err := obj.Marshal()
|
raw, err := obj.Marshal()
|
||||||
|
|
|
@ -208,7 +208,7 @@ func TestPersistentShardID(t *testing.T) {
|
||||||
require.NoError(t, te.ng.Close(context.Background()))
|
require.NoError(t, te.ng.Close(context.Background()))
|
||||||
|
|
||||||
newTe := newEngineWithErrorThreshold(t, dir, 1)
|
newTe := newEngineWithErrorThreshold(t, dir, 1)
|
||||||
for i := 0; i < len(newTe.shards); i++ {
|
for i := range len(newTe.shards) {
|
||||||
require.Equal(t, te.shards[i].id, newTe.shards[i].id)
|
require.Equal(t, te.shards[i].id, newTe.shards[i].id)
|
||||||
}
|
}
|
||||||
require.NoError(t, newTe.ng.Close(context.Background()))
|
require.NoError(t, newTe.ng.Close(context.Background()))
|
||||||
|
@ -269,7 +269,7 @@ func TestReload(t *testing.T) {
|
||||||
e, currShards := engineWithShards(t, removePath, shardNum)
|
e, currShards := engineWithShards(t, removePath, shardNum)
|
||||||
|
|
||||||
var rcfg ReConfiguration
|
var rcfg ReConfiguration
|
||||||
for i := 0; i < len(currShards)-1; i++ { // without one of the shards
|
for i := range len(currShards) - 1 { // without one of the shards
|
||||||
rcfg.AddShard(currShards[i], nil)
|
rcfg.AddShard(currShards[i], nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ func BenchmarkExists(b *testing.B) {
|
||||||
|
|
||||||
func benchmarkExists(b *testing.B, shardNum int) {
|
func benchmarkExists(b *testing.B, shardNum int) {
|
||||||
shards := make([]*shard.Shard, shardNum)
|
shards := make([]*shard.Shard, shardNum)
|
||||||
for i := 0; i < shardNum; i++ {
|
for i := range shardNum {
|
||||||
shards[i] = testNewShard(b)
|
shards[i] = testNewShard(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
|
||||||
defer func() { require.NoError(b, e.Close(context.Background())) }()
|
defer func() { require.NoError(b, e.Close(context.Background())) }()
|
||||||
|
|
||||||
addr := oidtest.Address()
|
addr := oidtest.Address()
|
||||||
for i := 0; i < 100; i++ {
|
for range 100 {
|
||||||
obj := testutil.GenerateObjectWithCID(cidtest.ID())
|
obj := testutil.GenerateObjectWithCID(cidtest.ID())
|
||||||
err := Put(context.Background(), e, obj)
|
err := Put(context.Background(), e, obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -62,7 +62,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
|
||||||
|
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
var shPrm shard.ExistsPrm
|
var shPrm shard.ExistsPrm
|
||||||
shPrm.Address = addr
|
shPrm.Address = addr
|
||||||
shPrm.ParentAddress = oid.Address{}
|
shPrm.ParentAddress = oid.Address{}
|
||||||
|
@ -109,7 +109,7 @@ func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard
|
||||||
func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
|
func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
|
||||||
shards := make([]*shard.Shard, 0, num)
|
shards := make([]*shard.Shard, 0, num)
|
||||||
|
|
||||||
for i := 0; i < num; i++ {
|
for range num {
|
||||||
shards = append(shards, testNewShard(t))
|
shards = append(shards, testNewShard(t))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrap
|
||||||
}
|
}
|
||||||
|
|
||||||
func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
|
func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
|
||||||
for i := 0; i < num; i++ {
|
for i := range num {
|
||||||
opts := shardOpts(i)
|
opts := shardOpts(i)
|
||||||
id, err := te.engine.AddShard(context.Background(), opts...)
|
id, err := te.engine.AddShard(context.Background(), opts...)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -127,7 +127,7 @@ func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts f
|
||||||
}
|
}
|
||||||
|
|
||||||
func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
|
func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
|
||||||
for i := 0; i < num; i++ {
|
for i := range num {
|
||||||
defaultOpts := testDefaultShardOptions(t)
|
defaultOpts := testDefaultShardOptions(t)
|
||||||
opts := append(defaultOpts, shardOpts(i)...)
|
opts := append(defaultOpts, shardOpts(i)...)
|
||||||
id, err := te.engine.AddShard(context.Background(), opts...)
|
id, err := te.engine.AddShard(context.Background(), opts...)
|
||||||
|
|
|
@ -61,7 +61,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sh := range ids {
|
for _, sh := range ids {
|
||||||
for i := 0; i < objPerShard; i++ {
|
for range objPerShard {
|
||||||
contID := cidtest.ID()
|
contID := cidtest.ID()
|
||||||
obj := testutil.GenerateObjectWithCID(contID)
|
obj := testutil.GenerateObjectWithCID(contID)
|
||||||
objects = append(objects, obj)
|
objects = append(objects, obj)
|
||||||
|
@ -554,7 +554,7 @@ func TestEvacuateTreesRemote(t *testing.T) {
|
||||||
require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
|
require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
|
||||||
|
|
||||||
expectedTreeOps := make(map[string][]*pilorama.Move)
|
expectedTreeOps := make(map[string][]*pilorama.Move)
|
||||||
for i := 0; i < len(e.shards); i++ {
|
for i := range len(e.shards) {
|
||||||
sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[i].String()])
|
sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[i].String()])
|
||||||
require.NoError(t, err, "list source trees failed")
|
require.NoError(t, err, "list source trees failed")
|
||||||
require.Len(t, sourceTrees, 3)
|
require.Len(t, sourceTrees, 3)
|
||||||
|
|
|
@ -79,7 +79,7 @@ func TestListWithCursor(t *testing.T) {
|
||||||
expected := make([]object.Info, 0, tt.objectNum)
|
expected := make([]object.Info, 0, tt.objectNum)
|
||||||
got := make([]object.Info, 0, tt.objectNum)
|
got := make([]object.Info, 0, tt.objectNum)
|
||||||
|
|
||||||
for i := 0; i < tt.objectNum; i++ {
|
for range tt.objectNum {
|
||||||
containerID := cidtest.ID()
|
containerID := cidtest.ID()
|
||||||
obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'})
|
obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'})
|
||||||
|
|
||||||
|
|
|
@ -87,7 +87,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
for i := 0; i < prm.Concurrency; i++ {
|
for range prm.Concurrency {
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
return e.removeObjects(ctx, ch)
|
return e.removeObjects(ctx, ch)
|
||||||
})
|
})
|
||||||
|
|
|
@ -96,7 +96,7 @@ loop:
|
||||||
require.FailNow(t, "unexpected object was removed", removed[i].addr)
|
require.FailNow(t, "unexpected object was removed", removed[i].addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < copyCount; i++ {
|
for i := range copyCount {
|
||||||
if i%3 == 0 {
|
if i%3 == 0 {
|
||||||
require.True(t, removedMask[i], "object %d was expected to be removed", i)
|
require.True(t, removedMask[i], "object %d was expected to be removed", i)
|
||||||
} else {
|
} else {
|
||||||
|
@ -207,7 +207,7 @@ func TestRebalanceExitByContext(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
const removeCount = 3
|
const removeCount = 3
|
||||||
for i := 0; i < removeCount-1; i++ {
|
for range removeCount - 1 {
|
||||||
<-deleteCh
|
<-deleteCh
|
||||||
signal <- struct{}{}
|
signal <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,7 +72,7 @@ func TestSortShardsByWeight(t *testing.T) {
|
||||||
var shards1 []hashedShard
|
var shards1 []hashedShard
|
||||||
var weights1 []float64
|
var weights1 []float64
|
||||||
var shards2 []hashedShard
|
var shards2 []hashedShard
|
||||||
for i := 0; i < numOfShards; i++ {
|
for i := range numOfShards {
|
||||||
shards1 = append(shards1, hashedShard{
|
shards1 = append(shards1, hashedShard{
|
||||||
hash: uint64(i),
|
hash: uint64(i),
|
||||||
})
|
})
|
||||||
|
|
|
@ -34,7 +34,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
|
||||||
d := pilorama.CIDDescriptor{CID: cid, Position: 0, Size: 1}
|
d := pilorama.CIDDescriptor{CID: cid, Position: 0, Size: 1}
|
||||||
treeID := "someTree"
|
treeID := "someTree"
|
||||||
|
|
||||||
for i := 0; i < objCount; i++ {
|
for i := range objCount {
|
||||||
obj := testutil.GenerateObjectWithCID(cid)
|
obj := testutil.GenerateObjectWithCID(cid)
|
||||||
testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
|
testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
|
||||||
err := Put(context.Background(), te.ng, obj)
|
err := Put(context.Background(), te.ng, obj)
|
||||||
|
@ -56,7 +56,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
|
||||||
fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual)
|
fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual)
|
||||||
prm.WithFilters(fs)
|
prm.WithFilters(fs)
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
res, err := te.ng.Select(context.Background(), prm)
|
res, err := te.ng.Select(context.Background(), prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
|
@ -67,7 +67,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
b.Run("TreeGetByPath", func(b *testing.B) {
|
b.Run("TreeGetByPath", func(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true)
|
nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
|
|
|
@ -13,7 +13,7 @@ func TestOverwriteObjGenerator(t *testing.T) {
|
||||||
ObjSize: 10,
|
ObjSize: 10,
|
||||||
MaxObjects: 4,
|
MaxObjects: 4,
|
||||||
}
|
}
|
||||||
for i := 0; i < 40; i++ {
|
for range 40 {
|
||||||
obj := gen.Next()
|
obj := gen.Next()
|
||||||
id, isSet := obj.ID()
|
id, isSet := obj.ID()
|
||||||
i := binary.LittleEndian.Uint64(id[:])
|
i := binary.LittleEndian.Uint64(id[:])
|
||||||
|
@ -26,7 +26,7 @@ func TestOverwriteObjGenerator(t *testing.T) {
|
||||||
|
|
||||||
func TestRandObjGenerator(t *testing.T) {
|
func TestRandObjGenerator(t *testing.T) {
|
||||||
gen := &RandObjGenerator{ObjSize: 10}
|
gen := &RandObjGenerator{ObjSize: 10}
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
obj := gen.Next()
|
obj := gen.Next()
|
||||||
|
|
||||||
require.Equal(t, gen.ObjSize, uint64(len(obj.Payload())))
|
require.Equal(t, gen.ObjSize, uint64(len(obj.Payload())))
|
||||||
|
@ -50,7 +50,7 @@ func TestSeqObjGenerator(t *testing.T) {
|
||||||
|
|
||||||
func TestRandAddrGenerator(t *testing.T) {
|
func TestRandAddrGenerator(t *testing.T) {
|
||||||
gen := RandAddrGenerator(5)
|
gen := RandAddrGenerator(5)
|
||||||
for i := 0; i < 50; i++ {
|
for range 50 {
|
||||||
addr := gen.Next()
|
addr := gen.Next()
|
||||||
id := addr.Object()
|
id := addr.Object()
|
||||||
k := binary.LittleEndian.Uint64(id[:])
|
k := binary.LittleEndian.Uint64(id[:])
|
||||||
|
|
|
@ -24,7 +24,7 @@ func TestDB_Containers(t *testing.T) {
|
||||||
|
|
||||||
cids := make(map[string]int, N)
|
cids := make(map[string]int, N)
|
||||||
|
|
||||||
for i := 0; i < N; i++ {
|
for range N {
|
||||||
obj := testutil.GenerateObject()
|
obj := testutil.GenerateObject()
|
||||||
|
|
||||||
cnr, _ := obj.ContainerID()
|
cnr, _ := obj.ContainerID()
|
||||||
|
@ -95,7 +95,7 @@ func TestDB_ContainersCount(t *testing.T) {
|
||||||
expected := make([]cid.ID, 0, R+T+SG+L)
|
expected := make([]cid.ID, 0, R+T+SG+L)
|
||||||
|
|
||||||
for _, upload := range uploadObjects {
|
for _, upload := range uploadObjects {
|
||||||
for i := 0; i < upload.amount; i++ {
|
for range upload.amount {
|
||||||
obj := testutil.GenerateObject()
|
obj := testutil.GenerateObject()
|
||||||
obj.SetType(upload.typ)
|
obj.SetType(upload.typ)
|
||||||
|
|
||||||
|
@ -126,11 +126,11 @@ func TestDB_ContainerSize(t *testing.T) {
|
||||||
cids := make(map[cid.ID]int, C)
|
cids := make(map[cid.ID]int, C)
|
||||||
objs := make(map[cid.ID][]*objectSDK.Object, C*N)
|
objs := make(map[cid.ID][]*objectSDK.Object, C*N)
|
||||||
|
|
||||||
for i := 0; i < C; i++ {
|
for range C {
|
||||||
cnr := cidtest.ID()
|
cnr := cidtest.ID()
|
||||||
cids[cnr] = 0
|
cids[cnr] = 0
|
||||||
|
|
||||||
for j := 0; j < N; j++ {
|
for range N {
|
||||||
size := rand.Intn(1024)
|
size := rand.Intn(1024)
|
||||||
|
|
||||||
parent := testutil.GenerateObjectWithCID(cnr)
|
parent := testutil.GenerateObjectWithCID(cnr)
|
||||||
|
|
|
@ -39,14 +39,14 @@ func TestCounters(t *testing.T) {
|
||||||
db := newDB(t)
|
db := newDB(t)
|
||||||
defer func() { require.NoError(t, db.Close()) }()
|
defer func() { require.NoError(t, db.Close()) }()
|
||||||
oo := make([]*objectSDK.Object, 0, objCount)
|
oo := make([]*objectSDK.Object, 0, objCount)
|
||||||
for i := 0; i < objCount; i++ {
|
for range objCount {
|
||||||
oo = append(oo, testutil.GenerateObject())
|
oo = append(oo, testutil.GenerateObject())
|
||||||
}
|
}
|
||||||
|
|
||||||
var prm meta.PutPrm
|
var prm meta.PutPrm
|
||||||
exp := make(map[cid.ID]meta.ObjectCounters)
|
exp := make(map[cid.ID]meta.ObjectCounters)
|
||||||
|
|
||||||
for i := 0; i < objCount; i++ {
|
for i := range objCount {
|
||||||
prm.SetObject(oo[i])
|
prm.SetObject(oo[i])
|
||||||
cnrID, _ := oo[i].ContainerID()
|
cnrID, _ := oo[i].ContainerID()
|
||||||
c := meta.ObjectCounters{}
|
c := meta.ObjectCounters{}
|
||||||
|
@ -187,7 +187,7 @@ func TestCounters(t *testing.T) {
|
||||||
|
|
||||||
// put objects and check that parent info
|
// put objects and check that parent info
|
||||||
// does not affect the counter
|
// does not affect the counter
|
||||||
for i := 0; i < objCount; i++ {
|
for i := range objCount {
|
||||||
o := testutil.GenerateObject()
|
o := testutil.GenerateObject()
|
||||||
if i < objCount/2 { // half of the objs will have the parent
|
if i < objCount/2 { // half of the objs will have the parent
|
||||||
o.SetParent(parObj)
|
o.SetParent(parObj)
|
||||||
|
@ -535,7 +535,7 @@ func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*objectSDK
|
||||||
parent := testutil.GenerateObject()
|
parent := testutil.GenerateObject()
|
||||||
|
|
||||||
oo := make([]*objectSDK.Object, 0, count)
|
oo := make([]*objectSDK.Object, 0, count)
|
||||||
for i := 0; i < count; i++ {
|
for i := range count {
|
||||||
o := testutil.GenerateObject()
|
o := testutil.GenerateObject()
|
||||||
if withParent {
|
if withParent {
|
||||||
o.SetParent(parent)
|
o.SetParent(parent)
|
||||||
|
|
|
@ -131,7 +131,7 @@ func TestDelete(t *testing.T) {
|
||||||
defer func() { require.NoError(t, db.Close()) }()
|
defer func() { require.NoError(t, db.Close()) }()
|
||||||
|
|
||||||
cnr := cidtest.ID()
|
cnr := cidtest.ID()
|
||||||
for i := 0; i < 10; i++ {
|
for range 10 {
|
||||||
obj := testutil.GenerateObjectWithCID(cnr)
|
obj := testutil.GenerateObjectWithCID(cnr)
|
||||||
|
|
||||||
var prm meta.PutPrm
|
var prm meta.PutPrm
|
||||||
|
|
|
@ -223,7 +223,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
|
||||||
defer func() { require.NoError(b, db.Close()) }()
|
defer func() { require.NoError(b, db.Close()) }()
|
||||||
addrs := make([]oid.Address, 0, numOfObj)
|
addrs := make([]oid.Address, 0, numOfObj)
|
||||||
|
|
||||||
for i := 0; i < numOfObj; i++ {
|
for range numOfObj {
|
||||||
raw := testutil.GenerateObject()
|
raw := testutil.GenerateObject()
|
||||||
addrs = append(addrs, object.AddressOf(raw))
|
addrs = append(addrs, object.AddressOf(raw))
|
||||||
|
|
||||||
|
@ -261,7 +261,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
|
||||||
|
|
||||||
b.Run("serial", func(b *testing.B) {
|
b.Run("serial", func(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := range b.N {
|
||||||
var getPrm meta.GetPrm
|
var getPrm meta.GetPrm
|
||||||
getPrm.SetAddress(addrs[i%len(addrs)])
|
getPrm.SetAddress(addrs[i%len(addrs)])
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
|
||||||
defer func() { require.NoError(b, db.Close()) }()
|
defer func() { require.NoError(b, db.Close()) }()
|
||||||
|
|
||||||
obj := testutil.GenerateObject()
|
obj := testutil.GenerateObject()
|
||||||
for i := 0; i < 100_000; i++ { // should be a multiple of all batch sizes
|
for i := range 100_000 { // should be a multiple of all batch sizes
|
||||||
obj.SetID(oidtest.ID())
|
obj.SetID(oidtest.ID())
|
||||||
if i%9 == 0 { // let's have 9 objects per container
|
if i%9 == 0 { // let's have 9 objects per container
|
||||||
obj.SetContainerID(cidtest.ID())
|
obj.SetContainerID(cidtest.ID())
|
||||||
|
@ -51,7 +51,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
res, err := db.ListWithCursor(context.Background(), prm)
|
res, err := db.ListWithCursor(context.Background(), prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != meta.ErrEndOfListing {
|
if err != meta.ErrEndOfListing {
|
||||||
|
@ -80,7 +80,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
|
||||||
expected := make([]object.Info, 0, total)
|
expected := make([]object.Info, 0, total)
|
||||||
|
|
||||||
// fill metabase with objects
|
// fill metabase with objects
|
||||||
for i := 0; i < containers; i++ {
|
for range containers {
|
||||||
containerID := cidtest.ID()
|
containerID := cidtest.ID()
|
||||||
|
|
||||||
// add one regular object
|
// add one regular object
|
||||||
|
@ -140,7 +140,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
|
||||||
expectedIterations--
|
expectedIterations--
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < expectedIterations; i++ {
|
for range expectedIterations {
|
||||||
res, cursor, err = metaListWithCursor(db, uint32(countPerReq), cursor)
|
res, cursor, err = metaListWithCursor(db, uint32(countPerReq), cursor)
|
||||||
require.NoError(t, err, "count:%d", countPerReq)
|
require.NoError(t, err, "count:%d", countPerReq)
|
||||||
got = append(got, res...)
|
got = append(got, res...)
|
||||||
|
@ -169,7 +169,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
|
||||||
expected := make(map[string]int, total)
|
expected := make(map[string]int, total)
|
||||||
|
|
||||||
// fill metabase with objects
|
// fill metabase with objects
|
||||||
for i := 0; i < total; i++ {
|
for range total {
|
||||||
obj := testutil.GenerateObject()
|
obj := testutil.GenerateObject()
|
||||||
err := putBig(db, obj)
|
err := putBig(db, obj)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -186,7 +186,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// add new objects
|
// add new objects
|
||||||
for i := 0; i < total; i++ {
|
for range total {
|
||||||
obj := testutil.GenerateObject()
|
obj := testutil.GenerateObject()
|
||||||
err = putBig(db, obj)
|
err = putBig(db, obj)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -155,7 +155,7 @@ func TestDB_Lock(t *testing.T) {
|
||||||
|
|
||||||
inhumePrm.SetGCMark()
|
inhumePrm.SetGCMark()
|
||||||
|
|
||||||
for i := 0; i < objsNum; i++ {
|
for i := range objsNum {
|
||||||
inhumePrm.SetAddresses(objectcore.AddressOf(objs[i]))
|
inhumePrm.SetAddresses(objectcore.AddressOf(objs[i]))
|
||||||
|
|
||||||
res, err = db.Inhume(context.Background(), inhumePrm)
|
res, err = db.Inhume(context.Background(), inhumePrm)
|
||||||
|
@ -255,7 +255,7 @@ func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*objectSDK
|
||||||
lockedObjs := make([]*objectSDK.Object, 0, numOfLockedObjs)
|
lockedObjs := make([]*objectSDK.Object, 0, numOfLockedObjs)
|
||||||
lockedObjIDs := make([]oid.ID, 0, numOfLockedObjs)
|
lockedObjIDs := make([]oid.ID, 0, numOfLockedObjs)
|
||||||
|
|
||||||
for i := 0; i < numOfLockedObjs; i++ {
|
for range numOfLockedObjs {
|
||||||
obj := testutil.GenerateObjectWithCID(cnr)
|
obj := testutil.GenerateObjectWithCID(cnr)
|
||||||
err := putBig(db, obj)
|
err := putBig(db, obj)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -74,7 +74,7 @@ func BenchmarkPut(b *testing.B) {
|
||||||
objs := prepareObjects(b.N)
|
objs := prepareObjects(b.N)
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
if err := metaPut(db, objs[index.Add(1)], nil); err != nil {
|
if err := metaPut(db, objs[index.Add(1)], nil); err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,7 @@ func TestResetDropsContainerBuckets(t *testing.T) {
|
||||||
|
|
||||||
defer func() { require.NoError(t, db.Close()) }()
|
defer func() { require.NoError(t, db.Close()) }()
|
||||||
|
|
||||||
for idx := 0; idx < 100; idx++ {
|
for idx := range 100 {
|
||||||
var putPrm PutPrm
|
var putPrm PutPrm
|
||||||
putPrm.SetObject(testutil.GenerateObject())
|
putPrm.SetObject(testutil.GenerateObject())
|
||||||
putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx)))
|
putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx)))
|
||||||
|
|
|
@ -920,7 +920,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) {
|
||||||
ec, err := erasurecode.NewConstructor(dataCount, parityCount)
|
ec, err := erasurecode.NewConstructor(dataCount, parityCount)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for i := 0; i < partCount; i++ {
|
for i := range partCount {
|
||||||
cs, err := ec.Split(tt.objects[i], &pk.PrivateKey)
|
cs, err := ec.Split(tt.objects[i], &pk.PrivateKey)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -1070,7 +1070,7 @@ func BenchmarkSelect(b *testing.B) {
|
||||||
|
|
||||||
cid := cidtest.ID()
|
cid := cidtest.ID()
|
||||||
|
|
||||||
for i := 0; i < objCount; i++ {
|
for i := range objCount {
|
||||||
var attr objectSDK.Attribute
|
var attr objectSDK.Attribute
|
||||||
attr.SetKey("myHeader")
|
attr.SetKey("myHeader")
|
||||||
attr.SetValue(strconv.Itoa(i))
|
attr.SetValue(strconv.Itoa(i))
|
||||||
|
@ -1129,7 +1129,7 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear
|
||||||
prm.SetContainerID(cid)
|
prm.SetContainerID(cid)
|
||||||
prm.SetFilters(fs)
|
prm.SetFilters(fs)
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
res, err := db.Select(context.Background(), prm)
|
res, err := db.Select(context.Background(), prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
|
|
|
@ -143,7 +143,7 @@ func createExpirationEpochBuckets(ctx context.Context, db *bbolt.DB, log func(a
|
||||||
return selectObjectsWithExpirationEpoch(ctx, db, objects)
|
return selectObjectsWithExpirationEpoch(ctx, db, objects)
|
||||||
})
|
})
|
||||||
var count atomic.Uint64
|
var count atomic.Uint64
|
||||||
for i := 0; i < upgradeWorkersCount; i++ {
|
for range upgradeWorkersCount {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
|
|
@ -91,7 +91,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
|
||||||
eg, ctx := errgroup.WithContext(context.Background())
|
eg, ctx := errgroup.WithContext(context.Background())
|
||||||
eg.SetLimit(generateWorkersCount)
|
eg.SetLimit(generateWorkersCount)
|
||||||
// simple objects
|
// simple objects
|
||||||
for i := 0; i < simpleObjectsCount; i++ {
|
for i := range simpleObjectsCount {
|
||||||
i := i
|
i := i
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
|
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
|
||||||
|
@ -110,7 +110,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
|
||||||
eg, ctx = errgroup.WithContext(context.Background())
|
eg, ctx = errgroup.WithContext(context.Background())
|
||||||
eg.SetLimit(generateWorkersCount)
|
eg.SetLimit(generateWorkersCount)
|
||||||
// complex objects
|
// complex objects
|
||||||
for i := 0; i < complexObjectsCount; i++ {
|
for i := range complexObjectsCount {
|
||||||
i := i
|
i := i
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
parent := testutil.GenerateObjectWithCID(containers[i%len(containers)])
|
parent := testutil.GenerateObjectWithCID(containers[i%len(containers)])
|
||||||
|
@ -134,7 +134,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
|
||||||
eg, ctx = errgroup.WithContext(context.Background())
|
eg, ctx = errgroup.WithContext(context.Background())
|
||||||
eg.SetLimit(generateWorkersCount)
|
eg.SetLimit(generateWorkersCount)
|
||||||
// simple objects deleted by gc marks
|
// simple objects deleted by gc marks
|
||||||
for i := 0; i < deletedByGCMarksCount; i++ {
|
for i := range deletedByGCMarksCount {
|
||||||
i := i
|
i := i
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
|
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
|
||||||
|
@ -156,7 +156,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
|
||||||
eg, ctx = errgroup.WithContext(context.Background())
|
eg, ctx = errgroup.WithContext(context.Background())
|
||||||
eg.SetLimit(10000)
|
eg.SetLimit(10000)
|
||||||
// simple objects deleted by tombstones
|
// simple objects deleted by tombstones
|
||||||
for i := 0; i < deletedByTombstoneCount; i++ {
|
for i := range deletedByTombstoneCount {
|
||||||
i := i
|
i := i
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
|
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
|
||||||
|
@ -186,7 +186,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
|
||||||
eg, ctx = errgroup.WithContext(context.Background())
|
eg, ctx = errgroup.WithContext(context.Background())
|
||||||
eg.SetLimit(generateWorkersCount)
|
eg.SetLimit(generateWorkersCount)
|
||||||
// simple objects locked by locks
|
// simple objects locked by locks
|
||||||
for i := 0; i < lockedCount; i++ {
|
for i := range lockedCount {
|
||||||
i := i
|
i := i
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
|
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
|
||||||
|
|
|
@ -705,7 +705,7 @@ func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*M
|
||||||
key, value = c.Prev()
|
key, value = c.Prev()
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(ms); i++ {
|
for i := range len(ms) {
|
||||||
// Loop invariant: key represents the next stored timestamp after ms[i].Time.
|
// Loop invariant: key represents the next stored timestamp after ms[i].Time.
|
||||||
|
|
||||||
// 2. Insert the operation.
|
// 2. Insert the operation.
|
||||||
|
|
|
@ -194,7 +194,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
|
||||||
|
|
||||||
const total = 100_000
|
const total = 100_000
|
||||||
d := CIDDescriptor{cnr, 0, 1}
|
d := CIDDescriptor{cnr, 0, 1}
|
||||||
for i := 0; i < total; i++ {
|
for i := range total {
|
||||||
u, err := uuid.NewRandom()
|
u, err := uuid.NewRandom()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.FailNow()
|
b.FailNow()
|
||||||
|
@ -216,7 +216,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
b.Run(providers[i].name+",root", func(b *testing.B) {
|
b.Run(providers[i].name+",root", func(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{RootID}, nil, 100)
|
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{RootID}, nil, 100)
|
||||||
if err != nil || len(res) != 100 {
|
if err != nil || len(res) != 100 {
|
||||||
b.Fatalf("err %v, count %d", err, len(res))
|
b.Fatalf("err %v, count %d", err, len(res))
|
||||||
|
@ -224,7 +224,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
b.Run(providers[i].name+",leaf", func(b *testing.B) {
|
b.Run(providers[i].name+",leaf", func(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{1}, nil, 100)
|
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{1}, nil, 100)
|
||||||
if err != nil || len(res) != 0 {
|
if err != nil || len(res) != 0 {
|
||||||
b.FailNow()
|
b.FailNow()
|
||||||
|
@ -804,7 +804,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
|
||||||
defer func() { require.NoError(t, s.Close()) }()
|
defer func() { require.NoError(t, s.Close()) }()
|
||||||
|
|
||||||
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
|
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
|
||||||
for i := 0; i < batchSize; i++ {
|
for range batchSize {
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
return s.TreeApply(ctx, cid, treeID, &logs[2], false)
|
return s.TreeApply(ctx, cid, treeID, &logs[2], false)
|
||||||
})
|
})
|
||||||
|
@ -1043,7 +1043,7 @@ func TestForest_ParallelApply(t *testing.T) {
|
||||||
// The operations are guaranteed to be applied and returned sorted by `Time`.
|
// The operations are guaranteed to be applied and returned sorted by `Time`.
|
||||||
func prepareRandomTree(nodeCount, opCount int) []Move {
|
func prepareRandomTree(nodeCount, opCount int) []Move {
|
||||||
ops := make([]Move, nodeCount+opCount)
|
ops := make([]Move, nodeCount+opCount)
|
||||||
for i := 0; i < nodeCount; i++ {
|
for i := range nodeCount {
|
||||||
ops[i] = Move{
|
ops[i] = Move{
|
||||||
Parent: 0,
|
Parent: 0,
|
||||||
Meta: Meta{
|
Meta: Meta{
|
||||||
|
@ -1121,14 +1121,14 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
|
||||||
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
|
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < iterCount; i++ {
|
for range iterCount {
|
||||||
// Shuffle random operations, leave initialization in place.
|
// Shuffle random operations, leave initialization in place.
|
||||||
r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
|
r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
|
||||||
|
|
||||||
actual := constructor(t, WithMaxBatchSize(batchSize), WithNoSync(true))
|
actual := constructor(t, WithMaxBatchSize(batchSize), WithNoSync(true))
|
||||||
wg := new(sync.WaitGroup)
|
wg := new(sync.WaitGroup)
|
||||||
ch := make(chan *Move)
|
ch := make(chan *Move)
|
||||||
for i := 0; i < batchSize; i++ {
|
for range batchSize {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
@ -1170,7 +1170,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
|
||||||
}
|
}
|
||||||
|
|
||||||
const iterCount = 200
|
const iterCount = 200
|
||||||
for i := 0; i < iterCount; i++ {
|
for range iterCount {
|
||||||
// Shuffle random operations, leave initialization in place.
|
// Shuffle random operations, leave initialization in place.
|
||||||
r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
|
r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
|
||||||
|
|
||||||
|
@ -1247,7 +1247,7 @@ func BenchmarkApplyReorderLast(b *testing.B) {
|
||||||
Child: uint64(r.Intn(benchNodeCount)),
|
Child: uint64(r.Intn(benchNodeCount)),
|
||||||
}
|
}
|
||||||
if i != 0 && i%blockSize == 0 {
|
if i != 0 && i%blockSize == 0 {
|
||||||
for j := 0; j < blockSize/2; j++ {
|
for j := range blockSize / 2 {
|
||||||
ops[i-j], ops[i+j-blockSize] = ops[i+j-blockSize], ops[i-j]
|
ops[i-j], ops[i+j-blockSize] = ops[i+j-blockSize], ops[i-j]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1265,7 +1265,7 @@ func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) {
|
||||||
cid := cidtest.ID()
|
cid := cidtest.ID()
|
||||||
treeID := "version"
|
treeID := "version"
|
||||||
ch := make(chan int, b.N)
|
ch := make(chan int, b.N)
|
||||||
for i := 0; i < b.N; i++ {
|
for i := range b.N {
|
||||||
ch <- i
|
ch <- i
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1311,7 +1311,7 @@ func testTreeGetByPath(t *testing.T, s ForestStorage) {
|
||||||
if mf, ok := s.(*memoryForest); ok {
|
if mf, ok := s.(*memoryForest); ok {
|
||||||
single := mf.treeMap[cid.String()+"/"+treeID]
|
single := mf.treeMap[cid.String()+"/"+treeID]
|
||||||
t.Run("test meta", func(t *testing.T) {
|
t.Run("test meta", func(t *testing.T) {
|
||||||
for i := 0; i < 6; i++ {
|
for i := range 6 {
|
||||||
require.Equal(t, uint64(i), single.infoMap[Node(i+1)].Meta.Time)
|
require.Equal(t, uint64(i), single.infoMap[Node(i+1)].Meta.Time)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -1492,7 +1492,7 @@ func testTreeListTreesCount(t *testing.T, constructor func(t testing.TB, _ ...Op
|
||||||
var expected []ContainerIDTreeID
|
var expected []ContainerIDTreeID
|
||||||
|
|
||||||
treeIDs := []string{"version", "system", "s", "avada kedavra"}
|
treeIDs := []string{"version", "system", "s", "avada kedavra"}
|
||||||
for i := 0; i < count; i++ {
|
for i := range count {
|
||||||
cid := cidtest.ID()
|
cid := cidtest.ID()
|
||||||
treeID := treeIDs[i%len(treeIDs)]
|
treeID := treeIDs[i%len(treeIDs)]
|
||||||
expected = append(expected, ContainerIDTreeID{
|
expected = append(expected, ContainerIDTreeID{
|
||||||
|
|
|
@ -39,11 +39,11 @@ func testShardList(t *testing.T, sh *Shard) {
|
||||||
|
|
||||||
var errG errgroup.Group
|
var errG errgroup.Group
|
||||||
errG.SetLimit(C * N)
|
errG.SetLimit(C * N)
|
||||||
for i := 0; i < C; i++ {
|
for range C {
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
cnr := cidtest.ID()
|
cnr := cidtest.ID()
|
||||||
|
|
||||||
for j := 0; j < N; j++ {
|
for range N {
|
||||||
errG.Go(func() error {
|
errG.Go(func() error {
|
||||||
obj := testutil.GenerateObjectWithCID(cnr)
|
obj := testutil.GenerateObjectWithCID(cnr)
|
||||||
testutil.AddPayload(obj, 1<<2)
|
testutil.AddPayload(obj, 1<<2)
|
||||||
|
|
|
@ -206,7 +206,7 @@ func TestCounters(t *testing.T) {
|
||||||
|
|
||||||
const objNumber = 10
|
const objNumber = 10
|
||||||
oo := make([]*objectSDK.Object, objNumber)
|
oo := make([]*objectSDK.Object, objNumber)
|
||||||
for i := 0; i < objNumber; i++ {
|
for i := range objNumber {
|
||||||
oo[i] = testutil.GenerateObject()
|
oo[i] = testutil.GenerateObject()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,7 +248,7 @@ func TestCounters(t *testing.T) {
|
||||||
|
|
||||||
var prm PutPrm
|
var prm PutPrm
|
||||||
|
|
||||||
for i := 0; i < objNumber; i++ {
|
for i := range objNumber {
|
||||||
prm.SetObject(oo[i])
|
prm.SetObject(oo[i])
|
||||||
|
|
||||||
_, err := sh.Put(context.Background(), prm)
|
_, err := sh.Put(context.Background(), prm)
|
||||||
|
@ -269,7 +269,7 @@ func TestCounters(t *testing.T) {
|
||||||
var prm InhumePrm
|
var prm InhumePrm
|
||||||
inhumedNumber := objNumber / 4
|
inhumedNumber := objNumber / 4
|
||||||
|
|
||||||
for i := 0; i < inhumedNumber; i++ {
|
for i := range inhumedNumber {
|
||||||
prm.MarkAsGarbage(objectcore.AddressOf(oo[i]))
|
prm.MarkAsGarbage(objectcore.AddressOf(oo[i]))
|
||||||
|
|
||||||
_, err := sh.Inhume(context.Background(), prm)
|
_, err := sh.Inhume(context.Background(), prm)
|
||||||
|
@ -317,7 +317,7 @@ func TestCounters(t *testing.T) {
|
||||||
_, err := sh.Inhume(context.Background(), prm)
|
_, err := sh.Inhume(context.Background(), prm)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for i := 0; i < inhumedNumber; i++ {
|
for i := range inhumedNumber {
|
||||||
cid, ok := oo[i].ContainerID()
|
cid, ok := oo[i].ContainerID()
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize())
|
expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize())
|
||||||
|
@ -419,7 +419,7 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
|
||||||
func addrFromObjs(oo []*objectSDK.Object) []oid.Address {
|
func addrFromObjs(oo []*objectSDK.Object) []oid.Address {
|
||||||
aa := make([]oid.Address, len(oo))
|
aa := make([]oid.Address, len(oo))
|
||||||
|
|
||||||
for i := 0; i < len(oo); i++ {
|
for i := range len(oo) {
|
||||||
aa[i] = objectcore.AddressOf(oo[i])
|
aa[i] = objectcore.AddressOf(oo[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
|
||||||
|
|
||||||
var putPrm PutPrm
|
var putPrm PutPrm
|
||||||
|
|
||||||
for i := 0; i < objectsCount/2; i++ {
|
for range objectsCount / 2 {
|
||||||
obj := testutil.GenerateObject()
|
obj := testutil.GenerateObject()
|
||||||
testutil.AddAttribute(obj, "foo", "bar")
|
testutil.AddAttribute(obj, "foo", "bar")
|
||||||
testutil.AddPayload(obj, 1<<5) // blobvnicza tree obj
|
testutil.AddPayload(obj, 1<<5) // blobvnicza tree obj
|
||||||
|
@ -49,7 +49,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < objectsCount/2; i++ {
|
for range objectsCount / 2 {
|
||||||
obj := testutil.GenerateObject()
|
obj := testutil.GenerateObject()
|
||||||
testutil.AddAttribute(obj, "foo", "bar")
|
testutil.AddAttribute(obj, "foo", "bar")
|
||||||
obj.SetID(oidtest.ID())
|
obj.SetID(oidtest.ID())
|
||||||
|
|
|
@ -54,7 +54,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
|
||||||
objGen := testutil.RandObjGenerator{ObjSize: size}
|
objGen := testutil.RandObjGenerator{ObjSize: size}
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for n := 0; n < b.N; n++ {
|
for range b.N {
|
||||||
obj := objGen.Next()
|
obj := objGen.Next()
|
||||||
rawData, err := obj.Marshal()
|
rawData, err := obj.Marshal()
|
||||||
require.NoError(b, err, "marshaling object")
|
require.NoError(b, err, "marshaling object")
|
||||||
|
|
|
@ -41,7 +41,7 @@ func (c *cache) runFlushLoop(ctx context.Context) {
|
||||||
if c.disableBackgroundFlush {
|
if c.disableBackgroundFlush {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for i := 0; i < c.workersCount; i++ {
|
for range c.workersCount {
|
||||||
c.wg.Add(1)
|
c.wg.Add(1)
|
||||||
go c.workerFlushSmall(ctx)
|
go c.workerFlushSmall(ctx)
|
||||||
}
|
}
|
||||||
|
|
|
@ -439,7 +439,7 @@ func TestPrepare_CorrectNR(t *testing.T) {
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
for i := 0; i < 1; i++ { // run tests against 3 and 4 witness NR
|
for i := range 1 { // run tests against 3 and 4 witness NR
|
||||||
for _, dummyMultisig := range []bool{true, false} { // run tests against empty and dummy multisig/Notary witness
|
for _, dummyMultisig := range []bool{true, false} { // run tests against empty and dummy multisig/Notary witness
|
||||||
additionalWitness := i == 0
|
additionalWitness := i == 0
|
||||||
nr := correctNR(script(test.hash, test.method, test.args...), dummyMultisig, additionalWitness)
|
nr := correctNR(script(test.hash, test.method, test.args...), dummyMultisig, additionalWitness)
|
||||||
|
|
|
@ -208,7 +208,7 @@ func TestBlockTimer_TickSameHeight(t *testing.T) {
|
||||||
require.NoError(t, bt.Reset())
|
require.NoError(t, bt.Reset())
|
||||||
|
|
||||||
check := func(t *testing.T, h uint32, base, delta int) {
|
check := func(t *testing.T, h uint32, base, delta int) {
|
||||||
for i := 0; i < 2*int(blockDur); i++ {
|
for range 2 * int(blockDur) {
|
||||||
bt.Tick(h)
|
bt.Tick(h)
|
||||||
require.Equal(t, base, baseCounter)
|
require.Equal(t, base, baseCounter)
|
||||||
require.Equal(t, delta, deltaCounter)
|
require.Equal(t, delta, deltaCounter)
|
||||||
|
|
|
@ -37,7 +37,7 @@ func BenchmarkAddressTLSEnabled(b *testing.B) {
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
|
||||||
var enabled bool
|
var enabled bool
|
||||||
for i := 0; i < b.N; i++ {
|
for range b.N {
|
||||||
enabled = addr.IsTLSEnabled()
|
enabled = addr.IsTLSEnabled()
|
||||||
}
|
}
|
||||||
require.True(b, enabled)
|
require.True(b, enabled)
|
||||||
|
|
|
@ -169,7 +169,7 @@ func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) {
|
||||||
|
|
||||||
nodes := placement.FlattenNodes(ns)
|
nodes := placement.FlattenNodes(ns)
|
||||||
bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
|
bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
|
||||||
for i := 0; i < len(nodes); i++ {
|
for i := range len(nodes) {
|
||||||
if bytes.Equal(nodes[i].PublicKey(), bs) {
|
if bytes.Equal(nodes[i].PublicKey(), bs) {
|
||||||
copy(nodes[i:], nodes[i+1:])
|
copy(nodes[i:], nodes[i+1:])
|
||||||
nodes = nodes[:len(nodes)-1]
|
nodes = nodes[:len(nodes)-1]
|
||||||
|
|
|
@ -33,7 +33,7 @@ func TestOriginalTokens(t *testing.T) {
|
||||||
var sTokenV2 session.Token
|
var sTokenV2 session.Token
|
||||||
sToken.WriteToV2(&sTokenV2)
|
sToken.WriteToV2(&sTokenV2)
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := range 10 {
|
||||||
metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2)
|
metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2)
|
||||||
res, err := originalSessionToken(metaHeaders)
|
res, err := originalSessionToken(metaHeaders)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -470,7 +470,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
|
||||||
ns := make([]netmap.NodeInfo, dim[i])
|
ns := make([]netmap.NodeInfo, dim[i])
|
||||||
as := make([]string, dim[i])
|
as := make([]string, dim[i])
|
||||||
|
|
||||||
for j := 0; j < dim[i]; j++ {
|
for j := range dim[i] {
|
||||||
a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
|
a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
|
||||||
strconv.Itoa(i),
|
strconv.Itoa(i),
|
||||||
strconv.Itoa(60000+j),
|
strconv.Itoa(60000+j),
|
||||||
|
@ -508,7 +508,7 @@ func generateChain(ln int, cnr cid.ID) ([]*objectSDK.Object, []oid.ID, []byte) {
|
||||||
ids := make([]oid.ID, 0, ln)
|
ids := make([]oid.ID, 0, ln)
|
||||||
payload := make([]byte, 0, ln*10)
|
payload := make([]byte, 0, ln*10)
|
||||||
|
|
||||||
for i := 0; i < ln; i++ {
|
for i := range ln {
|
||||||
ids = append(ids, curID)
|
ids = append(ids, curID)
|
||||||
addr.SetObject(curID)
|
addr.SetObject(curID)
|
||||||
|
|
||||||
|
@ -1750,7 +1750,7 @@ func TestGetRange(t *testing.T) {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
for from := 0; from < totalSize-1; from++ {
|
for from := range totalSize - 1 {
|
||||||
for to := from; to < totalSize; to++ {
|
for to := from; to < totalSize; to++ {
|
||||||
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
|
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
|
||||||
testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
|
testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
|
||||||
|
@ -1811,7 +1811,7 @@ func TestGetRange(t *testing.T) {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
for from := 0; from < totalSize-1; from++ {
|
for from := range totalSize - 1 {
|
||||||
for to := from; to < totalSize; to++ {
|
for to := from; to < totalSize; to++ {
|
||||||
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
|
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
|
||||||
testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
|
testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
|
||||||
|
|
|
@ -131,7 +131,7 @@ func TestGetRangeEC(t *testing.T) {
|
||||||
clients: clients,
|
clients: clients,
|
||||||
})
|
})
|
||||||
|
|
||||||
for from := 0; from < totalSize-1; from++ {
|
for from := range totalSize - 1 {
|
||||||
for to := from; to < totalSize; to++ {
|
for to := from; to < totalSize; to++ {
|
||||||
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
|
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
|
||||||
testGetRange(t, svc, addr, uint64(from), uint64(to), obj.Payload())
|
testGetRange(t, svc, addr, uint64(from), uint64(to), obj.Payload())
|
||||||
|
|
|
@ -276,7 +276,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
|
||||||
}
|
}
|
||||||
|
|
||||||
// try to save to any node not visited by current part
|
// try to save to any node not visited by current part
|
||||||
for i := 0; i < len(nodes); i++ {
|
for i := range len(nodes) {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
|
|
|
@ -151,7 +151,7 @@ func testSHA256() (cs [sha256.Size]byte) {
|
||||||
func generateIDs(num int) []oid.ID {
|
func generateIDs(num int) []oid.ID {
|
||||||
res := make([]oid.ID, num)
|
res := make([]oid.ID, num)
|
||||||
|
|
||||||
for i := 0; i < num; i++ {
|
for i := range num {
|
||||||
res[i].SetSHA256(testSHA256())
|
res[i].SetSHA256(testSHA256())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,7 +232,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
|
||||||
ns := make([]netmap.NodeInfo, dim[i])
|
ns := make([]netmap.NodeInfo, dim[i])
|
||||||
as := make([]string, dim[i])
|
as := make([]string, dim[i])
|
||||||
|
|
||||||
for j := 0; j < dim[i]; j++ {
|
for j := range dim[i] {
|
||||||
a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
|
a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
|
||||||
strconv.Itoa(i),
|
strconv.Itoa(i),
|
||||||
strconv.Itoa(60000+j),
|
strconv.Itoa(60000+j),
|
||||||
|
|
|
@ -64,7 +64,7 @@ func TestContainerNodesCache(t *testing.T) {
|
||||||
nm2 := nm(1, nodes[1:2])
|
nm2 := nm(1, nodes[1:2])
|
||||||
cnr := [size * 2]cid.ID{}
|
cnr := [size * 2]cid.ID{}
|
||||||
res := [size * 2][][]netmapSDK.NodeInfo{}
|
res := [size * 2][][]netmapSDK.NodeInfo{}
|
||||||
for i := 0; i < size*2; i++ {
|
for i := range size * 2 {
|
||||||
cnr[i] = cidtest.ID()
|
cnr[i] = cidtest.ID()
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
@ -77,7 +77,7 @@ func TestContainerNodesCache(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, res[i], r)
|
require.Equal(t, res[i], r)
|
||||||
}
|
}
|
||||||
for i := 0; i < size; i++ {
|
for i := range size {
|
||||||
r, err := c.ContainerNodes(nm2, cnr[i], pp)
|
r, err := c.ContainerNodes(nm2, cnr[i], pp)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEqual(t, res[i], r)
|
require.NotEqual(t, res[i], r)
|
||||||
|
|
|
@ -136,7 +136,7 @@ func defaultCopiesVector(policy netmap.PlacementPolicy) []int {
|
||||||
replNum := policy.NumberOfReplicas()
|
replNum := policy.NumberOfReplicas()
|
||||||
copyVector := make([]int, 0, replNum)
|
copyVector := make([]int, 0, replNum)
|
||||||
|
|
||||||
for i := 0; i < replNum; i++ {
|
for i := range replNum {
|
||||||
copyVector = append(copyVector, int(policy.ReplicaDescriptor(i).NumberOfObjects()+policy.ReplicaDescriptor(i).GetECDataCount()+policy.ReplicaDescriptor(i).GetECParityCount()))
|
copyVector = append(copyVector, int(policy.ReplicaDescriptor(i).NumberOfObjects()+policy.ReplicaDescriptor(i).GetECDataCount()+policy.ReplicaDescriptor(i).GetECParityCount()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,7 +212,7 @@ func (t *Traverser) Next() []Node {
|
||||||
|
|
||||||
nodes := make([]Node, count)
|
nodes := make([]Node, count)
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
for i := range count {
|
||||||
err := nodes[i].addresses.FromIterator(network.NodeEndpointsIterator(t.vectors[0][i]))
|
err := nodes[i].addresses.FromIterator(network.NodeEndpointsIterator(t.vectors[0][i]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -48,7 +48,7 @@ func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) {
|
||||||
for i := range ss {
|
for i := range ss {
|
||||||
ns := make([]netmap.NodeInfo, 0, ss[i])
|
ns := make([]netmap.NodeInfo, 0, ss[i])
|
||||||
|
|
||||||
for j := 0; j < ss[i]; j++ {
|
for range ss[i] {
|
||||||
ns = append(ns, testNode(num))
|
ns = append(ns, testNode(num))
|
||||||
num++
|
num++
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for i := 0; i < len(nodes[0]); i++ {
|
for range len(nodes[0]) {
|
||||||
require.NotNil(t, tr.Next())
|
require.NotNil(t, tr.Next())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
|
||||||
require.Empty(t, tr.Next())
|
require.Empty(t, tr.Next())
|
||||||
require.False(t, tr.Success())
|
require.False(t, tr.Success())
|
||||||
|
|
||||||
for i := 0; i < replicas[curVector]; i++ {
|
for range replicas[curVector] {
|
||||||
tr.SubmitSuccess()
|
tr.SubmitSuccess()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,7 @@ func TestTokenStore(t *testing.T) {
|
||||||
|
|
||||||
tokens := make([]tok, 0, tokenNumber)
|
tokens := make([]tok, 0, tokenNumber)
|
||||||
|
|
||||||
for i := 0; i < tokenNumber; i++ {
|
for i := range tokenNumber {
|
||||||
req.SetExpiration(uint64(i))
|
req.SetExpiration(uint64(i))
|
||||||
|
|
||||||
res, err := ts.Create(context.Background(), req)
|
res, err := ts.Create(context.Background(), req)
|
||||||
|
|
|
@ -62,7 +62,7 @@ func TestGetSubTree(t *testing.T) {
|
||||||
loop:
|
loop:
|
||||||
for i := 1; i < len(acc.seen); i++ {
|
for i := 1; i < len(acc.seen); i++ {
|
||||||
parent := acc.seen[i].Body.ParentId
|
parent := acc.seen[i].Body.ParentId
|
||||||
for j := 0; j < i; j++ {
|
for j := range i {
|
||||||
if acc.seen[j].Body.NodeId[0] == parent[0] {
|
if acc.seen[j].Body.NodeId[0] == parent[0] {
|
||||||
continue loop
|
continue loop
|
||||||
}
|
}
|
||||||
|
|
|
@ -133,7 +133,7 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) replicateLoop(ctx context.Context) {
|
func (s *Service) replicateLoop(ctx context.Context) {
|
||||||
for i := 0; i < s.replicatorWorkerCount; i++ {
|
for range s.replicatorWorkerCount {
|
||||||
go s.replicationWorker(ctx)
|
go s.replicationWorker(ctx)
|
||||||
go s.localReplicationWorker(ctx)
|
go s.localReplicationWorker(ctx)
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ func TestKeyLocker(t *testing.T) {
|
||||||
taken := false
|
taken := false
|
||||||
eg, _ := errgroup.WithContext(context.Background())
|
eg, _ := errgroup.WithContext(context.Background())
|
||||||
keyLocker := NewKeyLocker[int]()
|
keyLocker := NewKeyLocker[int]()
|
||||||
for i := 0; i < 100; i++ {
|
for range 100 {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
keyLocker.Lock(0)
|
keyLocker.Lock(0)
|
||||||
defer keyLocker.Unlock(0)
|
defer keyLocker.Unlock(0)
|
||||||
|
|
Loading…
Reference in a new issue