Parallelize longest tests #328

Merged
fyrchik merged 7 commits from fyrchik/frostfs-node:parallelize-tests into master 2023-05-12 09:45:05 +00:00
26 changed files with 212 additions and 81 deletions

View file

@ -21,6 +21,7 @@ import (
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"golang.org/x/sync/errgroup"
)
const (
@ -92,28 +93,30 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
pubs[i] = w.Accounts[0].PrivateKey().PublicKey()
}
var errG errgroup.Group
// Create committee account with N/2+1 multi-signature.
majCount := smartcontract.GetMajorityHonestNodeCount(size)
for i, w := range wallets {
if err := addMultisigAccount(w, majCount, committeeAccountName, passwords[i], pubs); err != nil {
return nil, fmt.Errorf("can't create committee account: %w", err)
}
}
// Create consensus account with 2*N/3+1 multi-signature.
bftCount := smartcontract.GetDefaultHonestNodeCount(size)
for i, w := range wallets {
if err := addMultisigAccount(w, bftCount, consensusAccountName, passwords[i], pubs); err != nil {
return nil, fmt.Errorf("can't create consensus account: %w", err)
for i := range wallets {
i := i
errG.Go(func() error {
if err := addMultisigAccount(wallets[i], majCount, committeeAccountName, passwords[i], pubs); err != nil {
return fmt.Errorf("can't create committee account: %w", err)
}
if err := addMultisigAccount(wallets[i], bftCount, consensusAccountName, passwords[i], pubs); err != nil {
return fmt.Errorf("can't create consentus account: %w", err)
}
for _, w := range wallets {
if err := w.SavePretty(); err != nil {
return nil, fmt.Errorf("can't save wallet: %w", err)
if err := wallets[i].SavePretty(); err != nil {
return fmt.Errorf("can't save wallet: %w", err)
}
return nil
})
}
if err := errG.Wait(); err != nil {
return nil, err
}
return passwords, nil
}

View file

@ -7,6 +7,7 @@ import (
"os"
"path/filepath"
"strconv"
"sync"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
@ -71,11 +72,16 @@ func TestGenerateAlphabet(t *testing.T) {
buf.WriteString(testContractPassword + "\r")
require.NoError(t, generateAlphabetCreds(generateAlphabetCmd, nil))
var wg sync.WaitGroup
for i := uint64(0); i < size; i++ {
i := i
go func() {
defer wg.Done()
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
w, err := wallet.NewWalletFromFile(p)
require.NoError(t, err, "wallet doesn't exist")
require.Equal(t, 3, len(w.Accounts), "not all accounts were created")
for _, a := range w.Accounts {
err := a.Decrypt(strconv.FormatUint(i, 10), keys.NEP2ScryptParams())
require.NoError(t, err, "can't decrypt account")
@ -88,7 +94,9 @@ func TestGenerateAlphabet(t *testing.T) {
require.Equal(t, singleAccountName, a.Label)
}
}
}()
}
wg.Wait()
t.Run("check contract group wallet", func(t *testing.T) {
p := filepath.Join(walletDir, contractWalletFilename)

View file

@ -18,6 +18,8 @@ import (
)
func TestDeleteBigObject(t *testing.T) {
t.Parallel()
defer os.RemoveAll(t.Name())
cnr := cidtest.ID()

View file

@ -77,6 +77,8 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
}
func TestEvacuateShard(t *testing.T) {
t.Parallel()
const objPerShard = 3
e, ids, objects := newEngineEvacuate(t, 3, objPerShard)
@ -132,6 +134,8 @@ func TestEvacuateShard(t *testing.T) {
}
func TestEvacuateNetwork(t *testing.T) {
t.Parallel()
var errReplication = errors.New("handler error")
acceptOneOf := func(objects []*objectSDK.Object, max int) func(context.Context, oid.Address, *objectSDK.Object) error {
@ -154,6 +158,7 @@ func TestEvacuateNetwork(t *testing.T) {
}
t.Run("single shard", func(t *testing.T) {
t.Parallel()
e, ids, objects := newEngineEvacuate(t, 1, 3)
evacuateShardID := ids[0].String()
@ -173,6 +178,7 @@ func TestEvacuateNetwork(t *testing.T) {
require.Equal(t, 2, res.Count())
})
t.Run("multiple shards, evacuate one", func(t *testing.T) {
t.Parallel()
e, ids, objects := newEngineEvacuate(t, 2, 3)
require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
@ -195,6 +201,7 @@ func TestEvacuateNetwork(t *testing.T) {
})
})
t.Run("multiple shards, evacuate many", func(t *testing.T) {
t.Parallel()
e, ids, objects := newEngineEvacuate(t, 4, 5)
evacuateIDs := ids[0:3]
@ -229,6 +236,7 @@ func TestEvacuateNetwork(t *testing.T) {
}
func TestEvacuateCancellation(t *testing.T) {
t.Parallel()
e, ids, _ := newEngineEvacuate(t, 2, 3)
require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))

View file

@ -29,6 +29,8 @@ func sortAddresses(addrWithType []object.AddressWithType) []object.AddressWithTy
}
func TestListWithCursor(t *testing.T) {
t.Parallel()
tests := []struct {
name string
shardNum int
@ -60,8 +62,10 @@ func TestListWithCursor(t *testing.T) {
batchSize: 100,
},
}
for _, tt := range tests {
for i := range tests {
tt := tests[i]
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
e := testNewEngine(t).setShardsNumOpts(t, tt.shardNum, func(id int) []shard.Option {
return []shard.Option{
shard.WithLogger(&logger.Logger{Logger: zap.L()}),

View file

@ -31,6 +31,8 @@ func (t tss) IsTombstoneAvailable(ctx context.Context, _ oid.Address, epoch uint
}
func TestLockUserScenario(t *testing.T) {
t.Parallel()
// Tested user actions:
// 1. stores some object
// 2. locks the object
@ -146,6 +148,8 @@ func TestLockUserScenario(t *testing.T) {
}
func TestLockExpiration(t *testing.T) {
t.Parallel()
// Tested scenario:
// 1. some object is stored
// 2. lock object for it is stored, and the object is locked
@ -222,6 +226,8 @@ func TestLockExpiration(t *testing.T) {
}
func TestLockForceRemoval(t *testing.T) {
t.Parallel()
// Tested scenario:
// 1. some object is stored
// 2. lock object for it is stored, and the object is locked

View file

@ -17,6 +17,8 @@ import (
)
func TestRebalance(t *testing.T) {
t.Parallel()
te := newEngineWithErrorThreshold(t, "", 0)
const (
@ -101,6 +103,8 @@ loop:
}
func TestRebalanceSingleThread(t *testing.T) {
t.Parallel()
te := newEngineWithErrorThreshold(t, "", 0)
obj := testutil.GenerateObjectWithCID(cidtest.ID())

View file

@ -15,6 +15,8 @@ import (
)
func TestDB_Containers(t *testing.T) {
t.Parallel()
db := newDB(t)
const N = 10
@ -90,6 +92,8 @@ func TestDB_Containers(t *testing.T) {
}
func TestDB_ContainersCount(t *testing.T) {
t.Parallel()
db := newDB(t)
const R, T, SG, L = 10, 11, 12, 13 // amount of object per type
@ -133,6 +137,8 @@ func TestDB_ContainersCount(t *testing.T) {
}
func TestDB_ContainerSize(t *testing.T) {
t.Parallel()
db := newDB(t)
const (

View file

@ -2,6 +2,7 @@ package meta_test
import (
"context"
"os"
"testing"
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -16,19 +17,23 @@ import (
const objCount = 10
func TestCounters(t *testing.T) {
db := newDB(t)
var c meta.ObjectCounters
var err error
t.Parallel()
t.Cleanup(func() {
require.NoError(t, os.RemoveAll(t.Name()))
})
t.Run("defaults", func(t *testing.T) {
c, err = db.ObjectCounters()
t.Parallel()
db := newDB(t)
c, err := db.ObjectCounters()
require.NoError(t, err)
require.Zero(t, c.Phy())
require.Zero(t, c.Logic())
})
t.Run("put", func(t *testing.T) {
t.Parallel()
db := newDB(t)
oo := make([]*object.Object, 0, objCount)
for i := 0; i < objCount; i++ {
oo = append(oo, testutil.GenerateObject())
@ -39,10 +44,10 @@ func TestCounters(t *testing.T) {
for i := 0; i < objCount; i++ {
prm.SetObject(oo[i])
_, err = db.Put(context.Background(), prm)
_, err := db.Put(context.Background(), prm)
require.NoError(t, err)
c, err = db.ObjectCounters()
c, err := db.ObjectCounters()
require.NoError(t, err)
require.Equal(t, uint64(i+1), c.Phy())
@ -50,9 +55,9 @@ func TestCounters(t *testing.T) {
}
})
require.NoError(t, db.Reset())
t.Run("delete", func(t *testing.T) {
t.Parallel()
db := newDB(t)
oo := putObjs(t, db, objCount, false)
var prm meta.DeletePrm
@ -63,7 +68,7 @@ func TestCounters(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(1), res.AvailableObjectsRemoved())
c, err = db.ObjectCounters()
c, err := db.ObjectCounters()
require.NoError(t, err)
require.Equal(t, uint64(i), c.Phy())
@ -71,9 +76,9 @@ func TestCounters(t *testing.T) {
}
})
require.NoError(t, db.Reset())
t.Run("inhume", func(t *testing.T) {
t.Parallel()
db := newDB(t)
oo := putObjs(t, db, objCount, false)
inhumedObjs := make([]oid.Address, objCount/2)
@ -94,16 +99,16 @@ func TestCounters(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(len(inhumedObjs)), res.AvailableInhumed())
c, err = db.ObjectCounters()
c, err := db.ObjectCounters()
require.NoError(t, err)
require.Equal(t, uint64(objCount), c.Phy())
require.Equal(t, uint64(objCount-len(inhumedObjs)), c.Logic())
})
require.NoError(t, db.Reset())
t.Run("put_split", func(t *testing.T) {
t.Parallel()
db := newDB(t)
parObj := testutil.GenerateObject()
// put objects and check that parent info
@ -116,16 +121,16 @@ func TestCounters(t *testing.T) {
require.NoError(t, putBig(db, o))
c, err = db.ObjectCounters()
c, err := db.ObjectCounters()
require.NoError(t, err)
require.Equal(t, uint64(i+1), c.Phy())
require.Equal(t, uint64(i+1), c.Logic())
}
})
require.NoError(t, db.Reset())
t.Run("delete_split", func(t *testing.T) {
t.Parallel()
db := newDB(t)
oo := putObjs(t, db, objCount, true)
// delete objects that have parent info
@ -141,9 +146,9 @@ func TestCounters(t *testing.T) {
}
})
require.NoError(t, db.Reset())
t.Run("inhume_split", func(t *testing.T) {
t.Parallel()
db := newDB(t)
oo := putObjs(t, db, objCount, true)
inhumedObjs := make([]oid.Address, objCount/2)
@ -160,10 +165,10 @@ func TestCounters(t *testing.T) {
prm.SetTombstoneAddress(oidtest.Address())
prm.SetAddresses(inhumedObjs...)
_, err = db.Inhume(context.Background(), prm)
_, err := db.Inhume(context.Background(), prm)
require.NoError(t, err)
c, err = db.ObjectCounters()
c, err := db.ObjectCounters()
require.NoError(t, err)
require.Equal(t, uint64(objCount), c.Phy())

View file

@ -10,6 +10,8 @@ import (
)
func TestGeneric(t *testing.T) {
t.Parallel()
defer func() { _ = os.RemoveAll(t.Name()) }()
var n int

View file

@ -66,6 +66,8 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
}
func TestLisObjectsWithCursor(t *testing.T) {
t.Parallel()
db := newDB(t)
const (
@ -159,6 +161,8 @@ func TestLisObjectsWithCursor(t *testing.T) {
}
func TestAddObjectDuringListingWithCursor(t *testing.T) {
t.Parallel()
db := newDB(t)
const total = 5

View file

@ -17,6 +17,8 @@ import (
)
func TestDB_Lock(t *testing.T) {
t.Parallel()
cnr := cidtest.ID()
db := newDB(t)
@ -171,6 +173,8 @@ func TestDB_Lock(t *testing.T) {
}
func TestDB_Lock_Expired(t *testing.T) {
t.Parallel()
es := &epochState{e: 123}
db := newDB(t, meta.WithEpochState(es))
@ -192,6 +196,8 @@ func TestDB_Lock_Expired(t *testing.T) {
}
func TestDB_IsLocked(t *testing.T) {
t.Parallel()
db := newDB(t)
// existing and locked objs

View file

@ -20,6 +20,8 @@ import (
)
func TestDB_SelectUserAttributes(t *testing.T) {
t.Parallel()
db := newDB(t)
cnr := cidtest.ID()
@ -142,6 +144,8 @@ func TestDB_SelectUserAttributes(t *testing.T) {
}
func TestDB_SelectRootPhyParent(t *testing.T) {
t.Parallel()
db := newDB(t)
cnr := cidtest.ID()
@ -293,6 +297,8 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
}
func TestDB_SelectInhume(t *testing.T) {
t.Parallel()
db := newDB(t)
cnr := cidtest.ID()
@ -325,6 +331,8 @@ func TestDB_SelectInhume(t *testing.T) {
}
func TestDB_SelectPayloadHash(t *testing.T) {
t.Parallel()
db := newDB(t)
cnr := cidtest.ID()
@ -393,6 +401,8 @@ func TestDB_SelectPayloadHash(t *testing.T) {
}
func TestDB_SelectWithSlowFilters(t *testing.T) {
t.Parallel()
db := newDB(t)
cnr := cidtest.ID()
@ -498,6 +508,8 @@ func TestDB_SelectWithSlowFilters(t *testing.T) {
}
func TestDB_SelectObjectID(t *testing.T) {
t.Parallel()
db := newDB(t)
cnr := cidtest.ID()
@ -611,6 +623,8 @@ func TestDB_SelectObjectID(t *testing.T) {
}
func TestDB_SelectSplitID(t *testing.T) {
t.Parallel()
db := newDB(t)
cnr := cidtest.ID()
@ -665,6 +679,8 @@ func TestDB_SelectSplitID(t *testing.T) {
}
func TestDB_SelectContainerID(t *testing.T) {
t.Parallel()
db := newDB(t)
cnr := cidtest.ID()
@ -750,6 +766,8 @@ func BenchmarkSelect(b *testing.B) {
}
func TestExpiredObjects(t *testing.T) {
t.Parallel()
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {

View file

@ -12,6 +12,8 @@ import (
)
func TestDB_StorageID(t *testing.T) {
t.Parallel()
db := newDB(t)
raw1 := testutil.GenerateObject()

View file

@ -43,6 +43,8 @@ type objAddr struct {
}
func TestShardOpen(t *testing.T) {
t.Parallel()
dir := t.TempDir()
metaPath := filepath.Join(dir, "meta")
@ -111,6 +113,8 @@ func TestShardOpen(t *testing.T) {
}
func TestRefillMetabaseCorrupted(t *testing.T) {
t.Parallel()
dir := t.TempDir()
fsTree := fstree.New(
@ -164,6 +168,8 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
}
func TestRefillMetabase(t *testing.T) {
t.Parallel()
p := t.Name()
defer os.RemoveAll(p)

View file

@ -13,11 +13,15 @@ import (
)
func TestShard_Delete(t *testing.T) {
t.Parallel()
t.Run("without write cache", func(t *testing.T) {
t.Parallel()
testShardDelete(t, false)
})
t.Run("with write cache", func(t *testing.T) {
t.Parallel()
testShardDelete(t, true)
})
}

View file

@ -25,6 +25,8 @@ import (
)
func Test_GCDropsLockedExpiredObject(t *testing.T) {
t.Parallel()
var sh *shard.Shard
epoch := &epochState{

View file

@ -17,11 +17,15 @@ import (
)
func TestShard_Get(t *testing.T) {
t.Parallel()
t.Run("without write cache", func(t *testing.T) {
t.Parallel()
testShardGet(t, false)
})
t.Run("with write cache", func(t *testing.T) {
t.Parallel()
testShardGet(t, true)
})
}

View file

@ -15,11 +15,15 @@ import (
)
func TestShard_Head(t *testing.T) {
t.Parallel()
t.Run("without write cache", func(t *testing.T) {
t.Parallel()
testShardHead(t, false)
})
t.Run("with write cache", func(t *testing.T) {
t.Parallel()
testShardHead(t, true)
})
}

View file

@ -13,11 +13,15 @@ import (
)
func TestShard_Inhume(t *testing.T) {
t.Parallel()
t.Run("without write cache", func(t *testing.T) {
t.Parallel()
testShardInhume(t, false)
})
t.Run("with write cache", func(t *testing.T) {
t.Parallel()
testShardInhume(t, true)
})
}

View file

@ -2,6 +2,7 @@ package shard_test
import (
"context"
"sync"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
@ -9,22 +10,23 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
func TestShard_List(t *testing.T) {
sh := newShard(t, false)
shWC := newShard(t, true)
defer func() {
releaseShard(sh, t)
releaseShard(shWC, t)
}()
t.Parallel()
t.Run("without write cache", func(t *testing.T) {
t.Parallel()
sh := newShard(t, false)
defer releaseShard(sh, t)
testShardList(t, sh)
})
t.Run("with write cache", func(t *testing.T) {
t.Parallel()
shWC := newShard(t, true)
defer releaseShard(shWC, t)
testShardList(t, shWC)
})
}
@ -33,13 +35,17 @@ func testShardList(t *testing.T, sh *shard.Shard) {
const C = 10
const N = 5
var mtx sync.Mutex
objs := make(map[string]int)
var putPrm shard.PutPrm
var errG errgroup.Group
errG.SetLimit(C * N)
for i := 0; i < C; i++ {
errG.Go(func() error {
cnr := cidtest.ID()
for j := 0; j < N; j++ {
errG.Go(func() error {
obj := testutil.GenerateObjectWithCID(cnr)
testutil.AddPayload(obj, 1<<2)
@ -49,14 +55,21 @@ func testShardList(t *testing.T, sh *shard.Shard) {
obj.SetParentID(idParent)
obj.SetParent(parent)
mtx.Lock()
objs[object.AddressOf(obj).EncodeToString()] = 0
mtx.Unlock()
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
return err
})
}
return nil
})
}
require.NoError(t, errG.Wait())
res, err := sh.List()
require.NoError(t, err)

View file

@ -23,6 +23,8 @@ import (
)
func TestShard_Lock(t *testing.T) {
t.Parallel()
var sh *shard.Shard
rootPath := t.TempDir()

View file

@ -72,6 +72,8 @@ const physical = "phy"
const logical = "logic"
func TestCounters(t *testing.T) {
t.Parallel()
dir := t.TempDir()
sh, mm := shardWithMetrics(t, dir)

View file

@ -22,11 +22,14 @@ import (
)
func TestShard_GetRange(t *testing.T) {
t.Parallel()
t.Run("without write cache", func(t *testing.T) {
t.Parallel()
testShardGetRange(t, false)
})
t.Run("with write cache", func(t *testing.T) {
t.Parallel()
testShardGetRange(t, true)
})
}

View file

@ -24,6 +24,8 @@ import (
)
func TestShardReload(t *testing.T) {
t.Parallel()
p := t.Name()
defer os.RemoveAll(p)

View file

@ -12,9 +12,12 @@ import (
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
func TestWriteCacheObjectLoss(t *testing.T) {
t.Parallel()
const (
smallSize = 1024
objCount = 100
@ -39,13 +42,17 @@ func TestWriteCacheObjectLoss(t *testing.T) {
sh := newCustomShard(t, dir, true, wcOpts, nil)
var putPrm shard.PutPrm
var errG errgroup.Group
for i := range objects {
putPrm.SetObject(objects[i])
obj := objects[i]
errG.Go(func() error {
var putPrm shard.PutPrm
putPrm.SetObject(obj)
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
return err
})
}
require.NoError(t, errG.Wait())
require.NoError(t, sh.Close())
sh = newCustomShard(t, dir, true, wcOpts, nil)