[#895] test: Use t.Cleanup only for external resources

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
Dmitrii Stepanov 2024-01-09 16:26:43 +03:00 committed by Evgenii Stratonikov
parent 836818fb75
commit 47dcfa20f3
50 changed files with 164 additions and 95 deletions

View file

@ -2,7 +2,6 @@ package blobovnicza
import ( import (
"context" "context"
"os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -15,11 +14,7 @@ func TestBlobovnicza_Get(t *testing.T) {
filename := filepath.Join(t.TempDir(), "blob") filename := filepath.Join(t.TempDir(), "blob")
var blz *Blobovnicza var blz *Blobovnicza
defer func() { require.NoError(t, blz.Close()) }()
t.Cleanup(func() {
blz.Close()
os.RemoveAll(filename)
})
fnInit := func(szLimit uint64) { fnInit := func(szLimit uint64) {
if blz != nil { if blz != nil {

View file

@ -24,9 +24,9 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
WithRootPath(t.TempDir())) WithRootPath(t.TempDir()))
require.NoError(t, st.Open(false)) require.NoError(t, st.Open(false))
require.NoError(t, st.Init()) require.NoError(t, st.Init())
t.Cleanup(func() { defer func() {
require.NoError(t, st.Close()) require.NoError(t, st.Close())
}) }()
objGen := &testutil.SeqObjGenerator{ObjSize: 1} objGen := &testutil.SeqObjGenerator{ObjSize: 1}

View file

@ -25,7 +25,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
WithBlobovniczaSize(1<<20)) WithBlobovniczaSize(1<<20))
require.NoError(t, b.Open(false)) require.NoError(t, b.Open(false))
require.NoError(t, b.Init()) require.NoError(t, b.Init())
t.Cleanup(func() { _ = b.Close() }) defer func() { require.NoError(t, b.Close()) }()
obj := blobstortest.NewObject(1024) obj := blobstortest.NewObject(1024)
addr := object.AddressOf(obj) addr := object.AddressOf(obj)

View file

@ -14,7 +14,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
s := cons(t) s := cons(t)
require.NoError(t, s.Open(false)) require.NoError(t, s.Open(false))
require.NoError(t, s.Init()) require.NoError(t, s.Init())
t.Cleanup(func() { require.NoError(t, s.Close()) }) defer func() { require.NoError(t, s.Close()) }()
objects := prepare(t, 4, s, min, max) objects := prepare(t, 4, s, min, max)

View file

@ -13,7 +13,7 @@ func TestExists(t *testing.T, cons Constructor, min, max uint64) {
s := cons(t) s := cons(t)
require.NoError(t, s.Open(false)) require.NoError(t, s.Open(false))
require.NoError(t, s.Init()) require.NoError(t, s.Init())
t.Cleanup(func() { require.NoError(t, s.Close()) }) defer func() { require.NoError(t, s.Close()) }()
objects := prepare(t, 1, s, min, max) objects := prepare(t, 1, s, min, max)

View file

@ -14,7 +14,7 @@ func TestGet(t *testing.T, cons Constructor, min, max uint64) {
s := cons(t) s := cons(t)
require.NoError(t, s.Open(false)) require.NoError(t, s.Open(false))
require.NoError(t, s.Init()) require.NoError(t, s.Init())
t.Cleanup(func() { require.NoError(t, s.Close()) }) defer func() { require.NoError(t, s.Close()) }()
objects := prepare(t, 2, s, min, max) objects := prepare(t, 2, s, min, max)

View file

@ -16,7 +16,7 @@ func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
s := cons(t) s := cons(t)
require.NoError(t, s.Open(false)) require.NoError(t, s.Open(false))
require.NoError(t, s.Init()) require.NoError(t, s.Init())
t.Cleanup(func() { require.NoError(t, s.Close()) }) defer func() { require.NoError(t, s.Close()) }()
objects := prepare(t, 1, s, min, max) objects := prepare(t, 1, s, min, max)

View file

@ -13,7 +13,7 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
s := cons(t) s := cons(t)
require.NoError(t, s.Open(false)) require.NoError(t, s.Open(false))
require.NoError(t, s.Init()) require.NoError(t, s.Init())
t.Cleanup(func() { require.NoError(t, s.Close()) }) defer func() { require.NoError(t, s.Close()) }()
objects := prepare(t, 10, s, min, max) objects := prepare(t, 10, s, min, max)

View file

@ -17,7 +17,7 @@ func TestSimpleLifecycle(t *testing.T) {
WithRootPath("memstore"), WithRootPath("memstore"),
WithLogger(test.NewLogger(t)), WithLogger(test.NewLogger(t)),
) )
t.Cleanup(func() { _ = s.Close() }) defer func() { require.NoError(t, s.Close()) }()
require.NoError(t, s.Open(false)) require.NoError(t, s.Open(false))
require.NoError(t, s.Init()) require.NoError(t, s.Init())

View file

@ -25,10 +25,6 @@ func (s storage) open(b *testing.B) common.Storage {
require.NoError(b, st.Open(false)) require.NoError(b, st.Open(false))
require.NoError(b, st.Init()) require.NoError(b, st.Init())
b.Cleanup(func() {
require.NoError(b, st.Close())
})
return st return st
} }
@ -108,6 +104,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) { b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen() objGen := tt.objGen()
st := stEntry.open(b) st := stEntry.open(b)
defer func() { require.NoError(b, st.Close()) }()
// Fill database // Fill database
var errG errgroup.Group var errG errgroup.Group
@ -162,6 +159,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) { b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) {
gen := genEntry.create() gen := genEntry.create()
st := stEntry.open(b) st := stEntry.open(b)
defer func() { require.NoError(b, st.Close()) }()
b.ResetTimer() b.ResetTimer()
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
@ -200,6 +198,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) { b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen() objGen := tt.objGen()
st := stEntry.open(b) st := stEntry.open(b)
defer func() { require.NoError(b, st.Close()) }()
// Fill database // Fill database
for i := 0; i < tt.size; i++ { for i := 0; i < tt.size; i++ {

View file

@ -122,6 +122,9 @@ func testEngineFailInitAndReload(t *testing.T, errOnAdd bool, opts []shard.Optio
var configID string var configID string
e := New() e := New()
defer func() {
require.NoError(t, e.Close(context.Background()))
}()
_, err := e.AddShard(context.Background(), opts...) _, err := e.AddShard(context.Background(), opts...)
if errOnAdd { if errOnAdd {
require.Error(t, err) require.Error(t, err)
@ -258,6 +261,8 @@ func TestReload(t *testing.T) {
require.Equal(t, shardNum+1, len(e.shards)) require.Equal(t, shardNum+1, len(e.shards))
require.Equal(t, shardNum+1, len(e.shardPools)) require.Equal(t, shardNum+1, len(e.shardPools))
require.NoError(t, e.Close(context.Background()))
}) })
t.Run("remove shards", func(t *testing.T) { t.Run("remove shards", func(t *testing.T) {
@ -276,6 +281,8 @@ func TestReload(t *testing.T) {
// removed one // removed one
require.Equal(t, shardNum-1, len(e.shards)) require.Equal(t, shardNum-1, len(e.shards))
require.Equal(t, shardNum-1, len(e.shardPools)) require.Equal(t, shardNum-1, len(e.shardPools))
require.NoError(t, e.Close(context.Background()))
}) })
} }

View file

@ -2,7 +2,6 @@ package engine
import ( import (
"context" "context"
"os"
"path/filepath" "path/filepath"
"sync/atomic" "sync/atomic"
"testing" "testing"
@ -49,10 +48,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
} }
e := testNewEngine(b).setInitializedShards(b, shards...).engine e := testNewEngine(b).setInitializedShards(b, shards...).engine
b.Cleanup(func() { defer func() { require.NoError(b, e.Close(context.Background())) }()
_ = e.Close(context.Background())
_ = os.RemoveAll(b.Name())
})
addr := oidtest.Address() addr := oidtest.Address()
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {

View file

@ -114,6 +114,7 @@ func TestErrorReporting(t *testing.T) {
checkShardState(t, te.ng, te.shards[0].id, i, mode.ReadWrite) checkShardState(t, te.ng, te.shards[0].id, i, mode.ReadWrite)
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite) checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
} }
require.NoError(t, te.ng.Close(context.Background()))
}) })
t.Run("with error threshold", func(t *testing.T) { t.Run("with error threshold", func(t *testing.T) {
const errThreshold = 3 const errThreshold = 3
@ -161,6 +162,7 @@ func TestErrorReporting(t *testing.T) {
require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, true)) require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, true))
checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite) checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
require.NoError(t, te.ng.Close(context.Background()))
}) })
} }

View file

@ -80,9 +80,9 @@ func TestListWithCursor(t *testing.T) {
require.NoError(t, e.Open(context.Background())) require.NoError(t, e.Open(context.Background()))
require.NoError(t, e.Init(context.Background())) require.NoError(t, e.Init(context.Background()))
t.Cleanup(func() { defer func() {
e.Close(context.Background()) require.NoError(t, e.Close(context.Background()))
}) }()
expected := make([]object.AddressWithType, 0, tt.objectNum) expected := make([]object.AddressWithType, 0, tt.objectNum)
got := make([]object.AddressWithType, 0, tt.objectNum) got := make([]object.AddressWithType, 0, tt.objectNum)

View file

@ -62,9 +62,7 @@ func TestLockUserScenario(t *testing.T) {
require.NoError(t, e.Open(context.Background())) require.NoError(t, e.Open(context.Background()))
require.NoError(t, e.Init(context.Background())) require.NoError(t, e.Init(context.Background()))
t.Cleanup(func() { defer func() { require.NoError(t, e.Close(context.Background())) }()
_ = e.Close(context.Background())
})
lockerID := oidtest.ID() lockerID := oidtest.ID()
tombID := oidtest.ID() tombID := oidtest.ID()
@ -169,9 +167,7 @@ func TestLockExpiration(t *testing.T) {
require.NoError(t, e.Open(context.Background())) require.NoError(t, e.Open(context.Background()))
require.NoError(t, e.Init(context.Background())) require.NoError(t, e.Init(context.Background()))
t.Cleanup(func() { defer func() { require.NoError(t, e.Close(context.Background())) }()
_ = e.Close(context.Background())
})
const lockerExpiresAfter = 13 const lockerExpiresAfter = 13
@ -246,9 +242,7 @@ func TestLockForceRemoval(t *testing.T) {
}).engine }).engine
require.NoError(t, e.Open(context.Background())) require.NoError(t, e.Open(context.Background()))
require.NoError(t, e.Init(context.Background())) require.NoError(t, e.Init(context.Background()))
t.Cleanup(func() { defer func() { require.NoError(t, e.Close(context.Background())) }()
_ = e.Close(context.Background())
})
cnr := cidtest.ID() cnr := cidtest.ID()
var err error var err error

View file

@ -12,9 +12,7 @@ func TestRemoveShard(t *testing.T) {
te := testNewEngine(t).setShardsNum(t, numOfShards) te := testNewEngine(t).setShardsNum(t, numOfShards)
e, ids := te.engine, te.shardIDs e, ids := te.engine, te.shardIDs
t.Cleanup(func() { defer func() { require.NoError(t, e.Close(context.Background())) }()
e.Close(context.Background())
})
require.Equal(t, numOfShards, len(e.shardPools)) require.Equal(t, numOfShards, len(e.shardPools))
require.Equal(t, numOfShards, len(e.shards)) require.Equal(t, numOfShards, len(e.shards))

View file

@ -19,6 +19,7 @@ func TestDB_Containers(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
const N = 10 const N = 10
@ -96,6 +97,7 @@ func TestDB_ContainersCount(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
const R, T, SG, L = 10, 11, 12, 13 // amount of object per type const R, T, SG, L = 10, 11, 12, 13 // amount of object per type
@ -141,6 +143,7 @@ func TestDB_ContainerSize(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
const ( const (
C = 3 C = 3

View file

@ -15,6 +15,7 @@ import (
func TestReset(t *testing.T) { func TestReset(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
err := db.Reset() err := db.Reset()
require.NoError(t, err) require.NoError(t, err)

View file

@ -22,6 +22,7 @@ func TestCounters(t *testing.T) {
t.Run("defaults", func(t *testing.T) { t.Run("defaults", func(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
c, err := db.ObjectCounters() c, err := db.ObjectCounters()
require.NoError(t, err) require.NoError(t, err)
require.Zero(t, c.Phy) require.Zero(t, c.Phy)
@ -36,6 +37,7 @@ func TestCounters(t *testing.T) {
t.Run("put", func(t *testing.T) { t.Run("put", func(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
oo := make([]*objectSDK.Object, 0, objCount) oo := make([]*objectSDK.Object, 0, objCount)
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
oo = append(oo, testutil.GenerateObject()) oo = append(oo, testutil.GenerateObject())
@ -73,6 +75,7 @@ func TestCounters(t *testing.T) {
t.Run("delete", func(t *testing.T) { t.Run("delete", func(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
oo := putObjs(t, db, objCount, false) oo := putObjs(t, db, objCount, false)
exp := make(map[cid.ID]meta.ObjectCounters) exp := make(map[cid.ID]meta.ObjectCounters)
@ -117,6 +120,7 @@ func TestCounters(t *testing.T) {
t.Run("inhume", func(t *testing.T) { t.Run("inhume", func(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
oo := putObjs(t, db, objCount, false) oo := putObjs(t, db, objCount, false)
exp := make(map[cid.ID]meta.ObjectCounters) exp := make(map[cid.ID]meta.ObjectCounters)
@ -176,6 +180,7 @@ func TestCounters(t *testing.T) {
t.Run("put_split", func(t *testing.T) { t.Run("put_split", func(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
parObj := testutil.GenerateObject() parObj := testutil.GenerateObject()
exp := make(map[cid.ID]meta.ObjectCounters) exp := make(map[cid.ID]meta.ObjectCounters)
@ -213,6 +218,7 @@ func TestCounters(t *testing.T) {
t.Run("delete_split", func(t *testing.T) { t.Run("delete_split", func(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
oo := putObjs(t, db, objCount, true) oo := putObjs(t, db, objCount, true)
exp := make(map[cid.ID]meta.ObjectCounters) exp := make(map[cid.ID]meta.ObjectCounters)
@ -254,6 +260,7 @@ func TestCounters(t *testing.T) {
t.Run("inhume_split", func(t *testing.T) { t.Run("inhume_split", func(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
oo := putObjs(t, db, objCount, true) oo := putObjs(t, db, objCount, true)
exp := make(map[cid.ID]meta.ObjectCounters) exp := make(map[cid.ID]meta.ObjectCounters)
@ -321,6 +328,7 @@ func TestCounters_Expired(t *testing.T) {
es := &epochState{epoch} es := &epochState{epoch}
db := newDB(t, meta.WithEpochState(es)) db := newDB(t, meta.WithEpochState(es))
defer func() { require.NoError(t, db.Close()) }()
oo := make([]oid.Address, objCount) oo := make([]oid.Address, objCount)
for i := range oo { for i := range oo {

View file

@ -2,7 +2,6 @@ package meta_test
import ( import (
"context" "context"
"os"
"path/filepath" "path/filepath"
"strconv" "strconv"
"testing" "testing"
@ -53,11 +52,6 @@ func newDB(t testing.TB, opts ...meta.Option) *meta.DB {
require.NoError(t, bdb.Open(context.Background(), false)) require.NoError(t, bdb.Open(context.Background(), false))
require.NoError(t, bdb.Init()) require.NoError(t, bdb.Init())
t.Cleanup(func() {
bdb.Close()
os.Remove(bdb.DumpInfo().Path)
})
return bdb return bdb
} }

View file

@ -18,6 +18,7 @@ import (
func TestDB_Delete(t *testing.T) { func TestDB_Delete(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
parent := testutil.GenerateObjectWithCID(cnr) parent := testutil.GenerateObjectWithCID(cnr)
@ -78,6 +79,7 @@ func TestDB_Delete(t *testing.T) {
func TestDeleteAllChildren(t *testing.T) { func TestDeleteAllChildren(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
@ -115,6 +117,7 @@ func TestDeleteAllChildren(t *testing.T) {
func TestGraveOnlyDelete(t *testing.T) { func TestGraveOnlyDelete(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
addr := oidtest.Address() addr := oidtest.Address()
@ -127,6 +130,7 @@ func TestGraveOnlyDelete(t *testing.T) {
func TestExpiredObject(t *testing.T) { func TestExpiredObject(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch})) db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
defer func() { require.NoError(t, db.Close()) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
// removing expired object should be error-free // removing expired object should be error-free

View file

@ -18,6 +18,7 @@ const currEpoch = 1000
func TestDB_Exists(t *testing.T) { func TestDB_Exists(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch})) db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
defer func() { require.NoError(t, db.Close()) }()
t.Run("no object", func(t *testing.T) { t.Run("no object", func(t *testing.T) {
nonExist := testutil.GenerateObject() nonExist := testutil.GenerateObject()

View file

@ -13,6 +13,7 @@ import (
func TestDB_SelectExpired(t *testing.T) { func TestDB_SelectExpired(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
containerID1 := cidtest.ID() containerID1 := cidtest.ID()

View file

@ -22,6 +22,7 @@ import (
func TestDB_Get(t *testing.T) { func TestDB_Get(t *testing.T) {
db := newDB(t, meta.WithEpochState(epochState{currEpoch})) db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
defer func() { require.NoError(t, db.Close()) }()
raw := testutil.GenerateObject() raw := testutil.GenerateObject()
@ -180,6 +181,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
meta.WithMaxBatchSize(batchSize), meta.WithMaxBatchSize(batchSize),
meta.WithMaxBatchDelay(10*time.Millisecond), meta.WithMaxBatchDelay(10*time.Millisecond),
) )
defer func() { require.NoError(b, db.Close()) }()
addrs := make([]oid.Address, 0, numOfObj) addrs := make([]oid.Address, 0, numOfObj)
for i := 0; i < numOfObj; i++ { for i := 0; i < numOfObj; i++ {

View file

@ -14,6 +14,7 @@ import (
func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) { func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
var counter int var counter int
var iterGravePRM meta.GraveyardIterationPrm var iterGravePRM meta.GraveyardIterationPrm
@ -40,6 +41,7 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
func TestDB_Iterate_OffsetNotFound(t *testing.T) { func TestDB_Iterate_OffsetNotFound(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
obj1 := testutil.GenerateObject() obj1 := testutil.GenerateObject()
obj2 := testutil.GenerateObject() obj2 := testutil.GenerateObject()
@ -110,6 +112,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
func TestDB_IterateDeletedObjects(t *testing.T) { func TestDB_IterateDeletedObjects(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
// generate and put 4 objects // generate and put 4 objects
obj1 := testutil.GenerateObject() obj1 := testutil.GenerateObject()
@ -196,6 +199,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
func TestDB_IterateOverGraveyard_Offset(t *testing.T) { func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
// generate and put 4 objects // generate and put 4 objects
obj1 := testutil.GenerateObject() obj1 := testutil.GenerateObject()
@ -294,6 +298,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
func TestDB_IterateOverGarbage_Offset(t *testing.T) { func TestDB_IterateOverGarbage_Offset(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
// generate and put 4 objects // generate and put 4 objects
obj1 := testutil.GenerateObject() obj1 := testutil.GenerateObject()
@ -385,6 +390,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
func TestDB_DropGraves(t *testing.T) { func TestDB_DropGraves(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
// generate and put 2 objects // generate and put 2 objects
obj1 := testutil.GenerateObject() obj1 := testutil.GenerateObject()

View file

@ -16,6 +16,7 @@ import (
func TestDB_Inhume(t *testing.T) { func TestDB_Inhume(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
raw := testutil.GenerateObject() raw := testutil.GenerateObject()
testutil.AddAttribute(raw, "foo", "bar") testutil.AddAttribute(raw, "foo", "bar")
@ -37,6 +38,7 @@ func TestDB_Inhume(t *testing.T) {
func TestInhumeTombOnTomb(t *testing.T) { func TestInhumeTombOnTomb(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
var ( var (
err error err error
@ -99,6 +101,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
func TestInhumeLocked(t *testing.T) { func TestInhumeLocked(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
locked := oidtest.Address() locked := oidtest.Address()

View file

@ -17,6 +17,7 @@ import (
func TestDB_IterateExpired(t *testing.T) { func TestDB_IterateExpired(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
const epoch = 13 const epoch = 13
@ -68,6 +69,7 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt
func TestDB_IterateCoveredByTombstones(t *testing.T) { func TestDB_IterateCoveredByTombstones(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
ts := oidtest.Address() ts := oidtest.Address()
protected1 := oidtest.Address() protected1 := oidtest.Address()

View file

@ -33,6 +33,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{ db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{
NoSync: true, NoSync: true,
})) // faster single-thread generation })) // faster single-thread generation
defer func() { require.NoError(b, db.Close()) }()
obj := testutil.GenerateObject() obj := testutil.GenerateObject()
for i := 0; i < 100_000; i++ { // should be a multiple of all batch sizes for i := 0; i < 100_000; i++ { // should be a multiple of all batch sizes
@ -70,6 +71,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
const ( const (
containers = 5 containers = 5
@ -165,6 +167,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
const total = 5 const total = 5

View file

@ -21,6 +21,7 @@ func TestDB_Lock(t *testing.T) {
cnr := cidtest.ID() cnr := cidtest.ID()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
t.Run("empty locked list", func(t *testing.T) { t.Run("empty locked list", func(t *testing.T) {
require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) }) require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) })
@ -182,6 +183,7 @@ func TestDB_Lock_Expired(t *testing.T) {
es := &epochState{e: 123} es := &epochState{e: 123}
db := newDB(t, meta.WithEpochState(es)) db := newDB(t, meta.WithEpochState(es))
defer func() { require.NoError(t, db.Close()) }()
// put an object // put an object
addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124) addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124)
@ -203,6 +205,7 @@ func TestDB_IsLocked(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
// existing and locked objs // existing and locked objs

View file

@ -13,6 +13,7 @@ import (
func TestDB_Movable(t *testing.T) { func TestDB_Movable(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
raw1 := testutil.GenerateObject() raw1 := testutil.GenerateObject()
raw2 := testutil.GenerateObject() raw2 := testutil.GenerateObject()

View file

@ -46,6 +46,7 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b, db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10), meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(runtime.NumCPU())) meta.WithMaxBatchSize(runtime.NumCPU()))
defer func() { require.NoError(b, db.Close()) }()
// Ensure the benchmark is bound by CPU and not waiting batch-delay time. // Ensure the benchmark is bound by CPU and not waiting batch-delay time.
b.SetParallelism(1) b.SetParallelism(1)
@ -67,6 +68,7 @@ func BenchmarkPut(b *testing.B) {
db := newDB(b, db := newDB(b,
meta.WithMaxBatchDelay(time.Millisecond*10), meta.WithMaxBatchDelay(time.Millisecond*10),
meta.WithMaxBatchSize(1)) meta.WithMaxBatchSize(1))
defer func() { require.NoError(b, db.Close()) }()
var index atomic.Int64 var index atomic.Int64
index.Store(-1) index.Store(-1)
objs := prepareObjects(b, b.N) objs := prepareObjects(b, b.N)
@ -82,6 +84,7 @@ func BenchmarkPut(b *testing.B) {
func TestDB_PutBlobovniczaUpdate(t *testing.T) { func TestDB_PutBlobovniczaUpdate(t *testing.T) {
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
raw1 := testutil.GenerateObject() raw1 := testutil.GenerateObject()
storageID := []byte{1, 2, 3, 4} storageID := []byte{1, 2, 3, 4}

View file

@ -23,6 +23,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
@ -147,6 +148,7 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
@ -300,6 +302,7 @@ func TestDB_SelectInhume(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
@ -334,6 +337,7 @@ func TestDB_SelectPayloadHash(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
@ -404,6 +408,7 @@ func TestDB_SelectWithSlowFilters(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
@ -511,6 +516,7 @@ func TestDB_SelectObjectID(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
@ -626,6 +632,7 @@ func TestDB_SelectSplitID(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
@ -682,6 +689,7 @@ func TestDB_SelectContainerID(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
@ -729,6 +737,8 @@ func TestDB_SelectContainerID(t *testing.T) {
func BenchmarkSelect(b *testing.B) { func BenchmarkSelect(b *testing.B) {
const objCount = 1000 const objCount = 1000
db := newDB(b) db := newDB(b)
defer func() { require.NoError(b, db.Close()) }()
cid := cidtest.ID() cid := cidtest.ID()
for i := 0; i < objCount; i++ { for i := 0; i < objCount; i++ {
@ -769,6 +779,7 @@ func TestExpiredObjects(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t, meta.WithEpochState(epochState{currEpoch})) db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
defer func() { require.NoError(t, db.Close()) }()
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) { checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
cidExp, _ := exp.ContainerID() cidExp, _ := exp.ContainerID()

View file

@ -15,6 +15,7 @@ func TestDB_StorageID(t *testing.T) {
t.Parallel() t.Parallel()
db := newDB(t) db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
raw1 := testutil.GenerateObject() raw1 := testutil.GenerateObject()
raw2 := testutil.GenerateObject() raw2 := testutil.GenerateObject()

View file

@ -28,8 +28,9 @@ func BenchmarkCreate(b *testing.B) {
WithMaxBatchSize(runtime.GOMAXPROCS(0))) WithMaxBatchSize(runtime.GOMAXPROCS(0)))
require.NoError(b, f.Open(context.Background(), false)) require.NoError(b, f.Open(context.Background(), false))
require.NoError(b, f.Init()) require.NoError(b, f.Init())
defer func() { require.NoError(b, f.Close()) }()
b.Cleanup(func() { b.Cleanup(func() {
require.NoError(b, f.Close())
require.NoError(b, os.RemoveAll(tmpDir)) require.NoError(b, os.RemoveAll(tmpDir))
}) })

View file

@ -20,19 +20,15 @@ import (
var providers = []struct { var providers = []struct {
name string name string
construct func(t testing.TB, opts ...Option) Forest construct func(t testing.TB, opts ...Option) ForestStorage
}{ }{
{"inmemory", func(t testing.TB, _ ...Option) Forest { {"inmemory", func(t testing.TB, _ ...Option) ForestStorage {
f := NewMemoryForest() f := NewMemoryForest()
require.NoError(t, f.Open(context.Background(), false)) require.NoError(t, f.Open(context.Background(), false))
require.NoError(t, f.Init()) require.NoError(t, f.Init())
t.Cleanup(func() {
require.NoError(t, f.Close())
})
return f return f
}}, }},
{"bbolt", func(t testing.TB, opts ...Option) Forest { {"bbolt", func(t testing.TB, opts ...Option) ForestStorage {
f := NewBoltForest( f := NewBoltForest(
append([]Option{ append([]Option{
WithPath(filepath.Join(t.TempDir(), "test.db")), WithPath(filepath.Join(t.TempDir(), "test.db")),
@ -40,9 +36,6 @@ var providers = []struct {
}, opts...)...) }, opts...)...)
require.NoError(t, f.Open(context.Background(), false)) require.NoError(t, f.Open(context.Background(), false))
require.NoError(t, f.Init()) require.NoError(t, f.Init())
t.Cleanup(func() {
require.NoError(t, f.Close())
})
return f return f
}}, }},
} }
@ -62,7 +55,9 @@ func TestForest_TreeMove(t *testing.T) {
} }
} }
func testForestTreeMove(t *testing.T, s Forest) { func testForestTreeMove(t *testing.T, s ForestStorage) {
defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID() cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1} d := CIDDescriptor{cid, 0, 1}
treeID := "version" treeID := "version"
@ -124,7 +119,9 @@ func TestMemoryForest_TreeGetChildren(t *testing.T) {
} }
} }
func testForestTreeGetChildren(t *testing.T, s Forest) { func testForestTreeGetChildren(t *testing.T, s ForestStorage) {
defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID() cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1} d := CIDDescriptor{cid, 0, 1}
treeID := "version" treeID := "version"
@ -188,7 +185,9 @@ func TestForest_TreeDrop(t *testing.T) {
} }
} }
func testForestTreeDrop(t *testing.T, s Forest) { func testForestTreeDrop(t *testing.T, s ForestStorage) {
defer func() { require.NoError(t, s.Close()) }()
const cidsSize = 3 const cidsSize = 3
var cids [cidsSize]cidSDK.ID var cids [cidsSize]cidSDK.ID
@ -256,7 +255,9 @@ func TestForest_TreeAdd(t *testing.T) {
} }
} }
func testForestTreeAdd(t *testing.T, s Forest) { func testForestTreeAdd(t *testing.T, s ForestStorage) {
defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID() cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1} d := CIDDescriptor{cid, 0, 1}
treeID := "version" treeID := "version"
@ -302,7 +303,9 @@ func TestForest_TreeAddByPath(t *testing.T) {
} }
} }
func testForestTreeAddByPath(t *testing.T, s Forest) { func testForestTreeAddByPath(t *testing.T, s ForestStorage) {
defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID() cid := cidtest.ID()
d := CIDDescriptor{cid, 0, 1} d := CIDDescriptor{cid, 0, 1}
treeID := "version" treeID := "version"
@ -425,7 +428,7 @@ func TestForest_Apply(t *testing.T) {
} }
} }
func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) { func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
cid := cidtest.ID() cid := cidtest.ID()
treeID := "version" treeID := "version"
@ -439,6 +442,8 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
t.Run("add a child, then insert a parent removal", func(t *testing.T) { t.Run("add a child, then insert a parent removal", func(t *testing.T) {
s := constructor(t) s := constructor(t)
defer func() { require.NoError(t, s.Close()) }()
testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}}) testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}})
meta := Meta{Time: 3, Items: []KeyValue{{"child", []byte{3}}}} meta := Meta{Time: 3, Items: []KeyValue{{"child", []byte{3}}}}
@ -450,6 +455,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
}) })
t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) { t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) {
s := constructor(t) s := constructor(t)
defer func() { require.NoError(t, s.Close()) }()
meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}} meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}}
testApply(t, s, 11, 10, meta) testApply(t, s, 11, 10, meta)
@ -469,7 +475,7 @@ func TestForest_ApplySameOperation(t *testing.T) {
} }
} }
func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest, parallel bool) { func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, parallel bool) {
cid := cidtest.ID() cid := cidtest.ID()
treeID := "version" treeID := "version"
@ -519,6 +525,8 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
t.Run("expected", func(t *testing.T) { t.Run("expected", func(t *testing.T) {
s := constructor(t) s := constructor(t)
defer func() { require.NoError(t, s.Close()) }()
for i := range logs { for i := range logs {
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false)) require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false))
} }
@ -526,6 +534,8 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
}) })
s := constructor(t, WithMaxBatchSize(batchSize)) s := constructor(t, WithMaxBatchSize(batchSize))
defer func() { require.NoError(t, s.Close()) }()
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false)) require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
for i := 0; i < batchSize; i++ { for i := 0; i < batchSize; i++ {
errG.Go(func() error { errG.Go(func() error {
@ -545,7 +555,7 @@ func TestForest_GetOpLog(t *testing.T) {
} }
} }
func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) { func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
cid := cidtest.ID() cid := cidtest.ID()
treeID := "version" treeID := "version"
logs := []Move{ logs := []Move{
@ -565,6 +575,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
} }
s := constructor(t) s := constructor(t)
defer func() { require.NoError(t, s.Close()) }()
t.Run("empty log, no panic", func(t *testing.T) { t.Run("empty log, no panic", func(t *testing.T) {
_, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0) _, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0)
@ -603,8 +614,9 @@ func TestForest_TreeExists(t *testing.T) {
} }
} }
func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) Forest) { func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) {
s := constructor(t) s := constructor(t)
defer func() { require.NoError(t, s.Close()) }()
checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) { checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) {
actual, err := s.TreeExists(context.Background(), cid, treeID) actual, err := s.TreeExists(context.Background(), cid, treeID)
@ -663,6 +675,8 @@ func TestApplyTricky1(t *testing.T) {
for i := range providers { for i := range providers {
t.Run(providers[i].name, func(t *testing.T) { t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t) s := providers[i].construct(t)
defer func() { require.NoError(t, s.Close()) }()
for i := range ops { for i := range ops {
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false)) require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
} }
@ -724,6 +738,8 @@ func TestApplyTricky2(t *testing.T) {
for i := range providers { for i := range providers {
t.Run(providers[i].name, func(t *testing.T) { t.Run(providers[i].name, func(t *testing.T) {
s := providers[i].construct(t) s := providers[i].construct(t)
defer func() { require.NoError(t, s.Close()) }()
for i := range ops { for i := range ops {
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false)) require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
} }
@ -821,7 +837,7 @@ func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID
} }
} }
func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest, batchSize, opCount, iterCount int) { func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, batchSize, opCount, iterCount int) {
r := mrand.New(mrand.NewSource(42)) r := mrand.New(mrand.NewSource(42))
const nodeCount = 5 const nodeCount = 5
@ -832,6 +848,8 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
treeID := "version" treeID := "version"
expected := constructor(t, WithNoSync(true)) expected := constructor(t, WithNoSync(true))
defer func() { require.NoError(t, expected.Close()) }()
for i := range ops { for i := range ops {
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
} }
@ -860,10 +878,11 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
wg.Wait() wg.Wait()
compareForests(t, expected, actual, cid, treeID, nodeCount) compareForests(t, expected, actual, cid, treeID, nodeCount)
require.NoError(t, actual.Close())
} }
} }
func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) { func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
r := mrand.New(mrand.NewSource(42)) r := mrand.New(mrand.NewSource(42))
const ( const (
@ -877,6 +896,8 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
treeID := "version" treeID := "version"
expected := constructor(t, WithNoSync(true)) expected := constructor(t, WithNoSync(true))
defer func() { require.NoError(t, expected.Close()) }()
for i := range ops { for i := range ops {
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false)) require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
} }
@ -891,6 +912,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false)) require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false))
} }
compareForests(t, expected, actual, cid, treeID, nodeCount) compareForests(t, expected, actual, cid, treeID, nodeCount)
require.NoError(t, actual.Close())
} }
} }
@ -908,6 +930,8 @@ func BenchmarkApplySequential(b *testing.B) {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) { b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
r := mrand.New(mrand.NewSource(time.Now().Unix())) r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs)) s := providers[i].construct(b, WithMaxBatchSize(bs))
defer func() { require.NoError(b, s.Close()) }()
benchmarkApply(b, s, func(opCount int) []Move { benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount) ops := make([]Move, opCount)
for i := range ops { for i := range ops {
@ -942,6 +966,8 @@ func BenchmarkApplyReorderLast(b *testing.B) {
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) { b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
r := mrand.New(mrand.NewSource(time.Now().Unix())) r := mrand.New(mrand.NewSource(time.Now().Unix()))
s := providers[i].construct(b, WithMaxBatchSize(bs)) s := providers[i].construct(b, WithMaxBatchSize(bs))
defer func() { require.NoError(b, s.Close()) }()
benchmarkApply(b, s, func(opCount int) []Move { benchmarkApply(b, s, func(opCount int) []Move {
ops := make([]Move, opCount) ops := make([]Move, opCount)
for i := range ops { for i := range ops {
@ -996,7 +1022,8 @@ func TestTreeGetByPath(t *testing.T) {
} }
} }
func testTreeGetByPath(t *testing.T, s Forest) { func testTreeGetByPath(t *testing.T, s ForestStorage) {
defer func() { require.NoError(t, s.Close()) }()
cid := cidtest.ID() cid := cidtest.ID()
treeID := "version" treeID := "version"
@ -1074,7 +1101,9 @@ func TestGetTrees(t *testing.T) {
} }
} }
func testTreeGetTrees(t *testing.T, s Forest) { func testTreeGetTrees(t *testing.T, s ForestStorage) {
defer func() { require.NoError(t, s.Close()) }()
cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()} cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()}
d := CIDDescriptor{Position: 0, Size: 1} d := CIDDescriptor{Position: 0, Size: 1}
@ -1118,7 +1147,9 @@ func TestTreeLastSyncHeight(t *testing.T) {
} }
} }
func testTreeLastSyncHeight(t *testing.T, f Forest) { func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
defer func() { require.NoError(t, f.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
treeID := "someTree" treeID := "someTree"

View file

@ -38,6 +38,7 @@ func TestShard_Delete_BigObject(t *testing.T) {
func testShard(t *testing.T, hasWriteCache bool, payloadSize int) { func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
sh := newShard(t, hasWriteCache) sh := newShard(t, hasWriteCache)
defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()

View file

@ -78,10 +78,7 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
sh.gcCfg.testHookRemover = func(context.Context) gcRunResult { return gcRunResult{} } sh.gcCfg.testHookRemover = func(context.Context) gcRunResult { return gcRunResult{} }
require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background())) require.NoError(t, sh.Init(context.Background()))
defer func() { require.NoError(t, sh.Close()) }()
t.Cleanup(func() {
require.NoError(t, sh.Close())
})
cnr := cidtest.ID() cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr) obj := testutil.GenerateObjectWithCID(cnr)

View file

@ -31,6 +31,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
return util.NewPseudoWorkerPool() // synchronous event processing return util.NewPseudoWorkerPool() // synchronous event processing
})}, })},
}) })
defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
@ -127,6 +128,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
return util.NewPseudoWorkerPool() // synchronous event processing return util.NewPseudoWorkerPool() // synchronous event processing
})}, })},
}) })
defer func() { require.NoError(t, sh.Close()) }()
lock := testutil.GenerateObjectWithCID(cnr) lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock) lock.SetType(objectSDK.TypeLock)

View file

@ -32,6 +32,7 @@ func TestShard_Get(t *testing.T) {
func testShardGet(t *testing.T, hasWriteCache bool) { func testShardGet(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache) sh := newShard(t, hasWriteCache)
defer func() { require.NoError(t, sh.Close()) }()
var putPrm PutPrm var putPrm PutPrm
var getPrm GetPrm var getPrm GetPrm

View file

@ -30,6 +30,7 @@ func TestShard_Head(t *testing.T) {
func testShardHead(t *testing.T, hasWriteCache bool) { func testShardHead(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache) sh := newShard(t, hasWriteCache)
defer func() { require.NoError(t, sh.Close()) }()
var putPrm PutPrm var putPrm PutPrm
var headPrm HeadPrm var headPrm HeadPrm

View file

@ -27,6 +27,7 @@ func TestShard_Inhume(t *testing.T) {
func testShardInhume(t *testing.T, hasWriteCache bool) { func testShardInhume(t *testing.T, hasWriteCache bool) {
sh := newShard(t, hasWriteCache) sh := newShard(t, hasWriteCache)
defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()

View file

@ -18,12 +18,14 @@ func TestShard_List(t *testing.T) {
t.Run("without write cache", func(t *testing.T) { t.Run("without write cache", func(t *testing.T) {
t.Parallel() t.Parallel()
sh := newShard(t, false) sh := newShard(t, false)
defer func() { require.NoError(t, sh.Close()) }()
testShardList(t, sh) testShardList(t, sh)
}) })
t.Run("with write cache", func(t *testing.T) { t.Run("with write cache", func(t *testing.T) {
t.Parallel() t.Parallel()
shWC := newShard(t, true) shWC := newShard(t, true)
defer func() { require.NoError(t, shWC.Close()) }()
testShardList(t, shWC) testShardList(t, shWC)
}) })
} }

View file

@ -61,9 +61,7 @@ func TestShard_Lock(t *testing.T) {
require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background())) require.NoError(t, sh.Init(context.Background()))
t.Cleanup(func() { defer func() { require.NoError(t, sh.Close()) }()
releaseShard(sh, t)
})
cnr := cidtest.ID() cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr) obj := testutil.GenerateObjectWithCID(cnr)
@ -149,6 +147,7 @@ func TestShard_Lock(t *testing.T) {
func TestShard_IsLocked(t *testing.T) { func TestShard_IsLocked(t *testing.T) {
sh := newShard(t, false) sh := newShard(t, false)
defer func() { require.NoError(t, sh.Close()) }()
cnr := cidtest.ID() cnr := cidtest.ID()
obj := testutil.GenerateObjectWithCID(cnr) obj := testutil.GenerateObjectWithCID(cnr)

View file

@ -160,6 +160,7 @@ func TestCounters(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
sh, mm := shardWithMetrics(t, dir) sh, mm := shardWithMetrics(t, dir)
defer func() { require.NoError(t, sh.Close()) }()
sh.SetMode(mode.ReadOnly) sh.SetMode(mode.ReadOnly)
require.Equal(t, mode.ReadOnly, mm.mode) require.Equal(t, mode.ReadOnly, mm.mode)
@ -382,10 +383,6 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background())) require.NoError(t, sh.Init(context.Background()))
t.Cleanup(func() {
sh.Close()
})
return sh, mm return sh, mm
} }

View file

@ -93,6 +93,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
}), }),
}, },
}) })
defer func() { require.NoError(t, sh.Close()) }()
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {

View file

@ -57,6 +57,10 @@ func TestShardReload(t *testing.T) {
require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background())) require.NoError(t, sh.Init(context.Background()))
defer func() {
require.NoError(t, sh.Close())
}()
objects := make([]objAddr, 5) objects := make([]objAddr, 5)
for i := range objects { for i := range objects {
objects[i].obj = newObject() objects[i].obj = newObject()

View file

@ -30,7 +30,6 @@ func (s epochState) CurrentEpoch() uint64 {
type shardOptions struct { type shardOptions struct {
rootPath string rootPath string
dontRelease bool
wcOpts []writecache.Option wcOpts []writecache.Option
bsOpts []blobstor.Option bsOpts []blobstor.Option
metaOptions []meta.Option metaOptions []meta.Option
@ -109,13 +108,5 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
require.NoError(t, sh.Open(context.Background())) require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background())) require.NoError(t, sh.Init(context.Background()))
if !o.dontRelease {
t.Cleanup(func() { releaseShard(sh, t) })
}
return sh return sh
} }
func releaseShard(s *Shard, t testing.TB) {
require.NoError(t, s.Close())
}

View file

@ -39,7 +39,7 @@ func TestWriteCacheObjectLoss(t *testing.T) {
writecache.WithMaxObjectSize(smallSize * 2), writecache.WithMaxObjectSize(smallSize * 2),
} }
sh := newCustomShard(t, true, shardOptions{dontRelease: true, rootPath: dir, wcOpts: wcOpts}) sh := newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
var errG errgroup.Group var errG errgroup.Group
for i := range objects { for i := range objects {
@ -55,6 +55,7 @@ func TestWriteCacheObjectLoss(t *testing.T) {
require.NoError(t, sh.Close()) require.NoError(t, sh.Close())
sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts}) sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
defer func() { require.NoError(t, sh.Close()) }()
var getPrm GetPrm var getPrm GetPrm

View file

@ -28,6 +28,7 @@ func BenchmarkWritecachePar(b *testing.B) {
func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) { func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache) benchmarkPutPrepare(b, cache)
defer func() { require.NoError(b, cache.Close()) }()
ctx := context.Background() ctx := context.Background()
objGen := testutil.RandObjGenerator{ObjSize: size} objGen := testutil.RandObjGenerator{ObjSize: size}
@ -50,6 +51,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) { func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
benchmarkPutPrepare(b, cache) benchmarkPutPrepare(b, cache)
defer func() { require.NoError(b, cache.Close()) }()
ctx := context.Background() ctx := context.Background()
@ -75,9 +77,6 @@ func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) { func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) {
require.NoError(b, cache.Open(context.Background(), false), "opening") require.NoError(b, cache.Open(context.Background(), false), "opening")
require.NoError(b, cache.Init(), "initializing") require.NoError(b, cache.Init(), "initializing")
b.Cleanup(func() {
require.NoError(b, cache.Close(), "closing")
})
} }
type testMetabase struct{} type testMetabase struct{}

View file

@ -142,6 +142,7 @@ func runFlushTest[Option any](
) { ) {
t.Run("no errors", func(t *testing.T) { t.Run("no errors", func(t *testing.T) {
wc, bs, mb := newCache(t, createCacheFn, smallSize) wc, bs, mb := newCache(t, createCacheFn, smallSize)
defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc) objects := putObjects(t, wc)
require.NoError(t, bs.SetMode(mode.ReadWrite)) require.NoError(t, bs.SetMode(mode.ReadWrite))
@ -154,6 +155,7 @@ func runFlushTest[Option any](
t.Run("flush on moving to degraded mode", func(t *testing.T) { t.Run("flush on moving to degraded mode", func(t *testing.T) {
wc, bs, mb := newCache(t, createCacheFn, smallSize) wc, bs, mb := newCache(t, createCacheFn, smallSize)
defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc) objects := putObjects(t, wc)
// Blobstor is read-only, so we expect en error from `flush` here. // Blobstor is read-only, so we expect en error from `flush` here.
@ -172,6 +174,7 @@ func runFlushTest[Option any](
t.Run(f.Desc, func(t *testing.T) { t.Run(f.Desc, func(t *testing.T) {
errCountOpt, errCount := errCountOption() errCountOpt, errCount := errCountOption()
wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt) wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt)
defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc) objects := putObjects(t, wc)
f.InjectFn(t, wc) f.InjectFn(t, wc)
@ -214,7 +217,6 @@ func newCache[Option any](
require.NoError(t, bs.Init()) require.NoError(t, bs.Init())
wc := createCacheFn(t, smallSize, mb, bs, opts...) wc := createCacheFn(t, smallSize, mb, bs, opts...)
t.Cleanup(func() { require.NoError(t, wc.Close()) })
require.NoError(t, wc.Open(context.Background(), false)) require.NoError(t, wc.Open(context.Background(), false))
require.NoError(t, wc.Init()) require.NoError(t, wc.Init())