Fix big object deletion #896
|
@ -2,7 +2,6 @@ package blobovnicza
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
|
@ -15,11 +14,7 @@ func TestBlobovnicza_Get(t *testing.T) {
|
|||
filename := filepath.Join(t.TempDir(), "blob")
|
||||
|
||||
var blz *Blobovnicza
|
||||
|
||||
t.Cleanup(func() {
|
||||
blz.Close()
|
||||
os.RemoveAll(filename)
|
||||
|
||||
})
|
||||
defer func() { require.NoError(t, blz.Close()) }()
|
||||
|
||||
fnInit := func(szLimit uint64) {
|
||||
if blz != nil {
|
||||
|
|
|
@ -24,9 +24,9 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
|
|||
WithRootPath(t.TempDir()))
|
||||
require.NoError(t, st.Open(false))
|
||||
require.NoError(t, st.Init())
|
||||
t.Cleanup(func() {
|
||||
defer func() {
|
||||
require.NoError(t, st.Close())
|
||||
})
|
||||
}()
|
||||
|
||||
objGen := &testutil.SeqObjGenerator{ObjSize: 1}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
|
|||
WithBlobovniczaSize(1<<20))
|
||||
require.NoError(t, b.Open(false))
|
||||
require.NoError(t, b.Init())
|
||||
t.Cleanup(func() { _ = b.Close() })
|
||||
defer func() { require.NoError(t, b.Close()) }()
|
||||
|
||||
obj := blobstortest.NewObject(1024)
|
||||
addr := object.AddressOf(obj)
|
||||
|
|
|
@ -14,7 +14,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
|
|||
s := cons(t)
|
||||
require.NoError(t, s.Open(false))
|
||||
require.NoError(t, s.Init())
|
||||
t.Cleanup(func() { require.NoError(t, s.Close()) })
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
objects := prepare(t, 4, s, min, max)
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ func TestExists(t *testing.T, cons Constructor, min, max uint64) {
|
|||
s := cons(t)
|
||||
require.NoError(t, s.Open(false))
|
||||
require.NoError(t, s.Init())
|
||||
t.Cleanup(func() { require.NoError(t, s.Close()) })
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
objects := prepare(t, 1, s, min, max)
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ func TestGet(t *testing.T, cons Constructor, min, max uint64) {
|
|||
s := cons(t)
|
||||
require.NoError(t, s.Open(false))
|
||||
require.NoError(t, s.Init())
|
||||
t.Cleanup(func() { require.NoError(t, s.Close()) })
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
objects := prepare(t, 2, s, min, max)
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
|
|||
s := cons(t)
|
||||
require.NoError(t, s.Open(false))
|
||||
require.NoError(t, s.Init())
|
||||
t.Cleanup(func() { require.NoError(t, s.Close()) })
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
objects := prepare(t, 1, s, min, max)
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
|
|||
s := cons(t)
|
||||
require.NoError(t, s.Open(false))
|
||||
require.NoError(t, s.Init())
|
||||
t.Cleanup(func() { require.NoError(t, s.Close()) })
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
objects := prepare(t, 10, s, min, max)
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ func TestSimpleLifecycle(t *testing.T) {
|
|||
WithRootPath("memstore"),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
)
|
||||
t.Cleanup(func() { _ = s.Close() })
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
require.NoError(t, s.Open(false))
|
||||
require.NoError(t, s.Init())
|
||||
|
||||
|
|
|
@ -25,10 +25,6 @@ func (s storage) open(b *testing.B) common.Storage {
|
|||
require.NoError(b, st.Open(false))
|
||||
require.NoError(b, st.Init())
|
||||
|
||||
b.Cleanup(func() {
|
||||
dstepanov-yadro
commented
`Close` call moved to caller.
|
||||
require.NoError(b, st.Close())
|
||||
})
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
|
@ -108,6 +104,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
|
|||
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
|
||||
objGen := tt.objGen()
|
||||
st := stEntry.open(b)
|
||||
defer func() { require.NoError(b, st.Close()) }()
|
||||
|
||||
// Fill database
|
||||
var errG errgroup.Group
|
||||
|
@ -162,6 +159,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
|
|||
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) {
|
||||
gen := genEntry.create()
|
||||
st := stEntry.open(b)
|
||||
defer func() { require.NoError(b, st.Close()) }()
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -200,6 +198,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
|
|||
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
|
||||
objGen := tt.objGen()
|
||||
st := stEntry.open(b)
|
||||
defer func() { require.NoError(b, st.Close()) }()
|
||||
|
||||
// Fill database
|
||||
for i := 0; i < tt.size; i++ {
|
||||
|
|
|
@ -122,6 +122,9 @@ func testEngineFailInitAndReload(t *testing.T, errOnAdd bool, opts []shard.Optio
|
|||
var configID string
|
||||
|
||||
e := New()
|
||||
defer func() {
|
||||
require.NoError(t, e.Close(context.Background()))
|
||||
}()
|
||||
_, err := e.AddShard(context.Background(), opts...)
|
||||
if errOnAdd {
|
||||
require.Error(t, err)
|
||||
|
@ -258,6 +261,8 @@ func TestReload(t *testing.T) {
|
|||
|
||||
require.Equal(t, shardNum+1, len(e.shards))
|
||||
require.Equal(t, shardNum+1, len(e.shardPools))
|
||||
|
||||
require.NoError(t, e.Close(context.Background()))
|
||||
})
|
||||
|
||||
t.Run("remove shards", func(t *testing.T) {
|
||||
|
@ -276,6 +281,8 @@ func TestReload(t *testing.T) {
|
|||
// removed one
|
||||
require.Equal(t, shardNum-1, len(e.shards))
|
||||
require.Equal(t, shardNum-1, len(e.shardPools))
|
||||
|
||||
require.NoError(t, e.Close(context.Background()))
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package engine
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
@ -49,10 +48,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
|
|||
}
|
||||
|
||||
e := testNewEngine(b).setInitializedShards(b, shards...).engine
|
||||
b.Cleanup(func() {
|
||||
_ = e.Close(context.Background())
|
||||
_ = os.RemoveAll(b.Name())
|
||||
})
|
||||
defer func() { require.NoError(b, e.Close(context.Background())) }()
|
||||
|
||||
addr := oidtest.Address()
|
||||
for i := 0; i < 100; i++ {
|
||||
|
|
|
@ -114,6 +114,7 @@ func TestErrorReporting(t *testing.T) {
|
|||
checkShardState(t, te.ng, te.shards[0].id, i, mode.ReadWrite)
|
||||
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
|
||||
}
|
||||
require.NoError(t, te.ng.Close(context.Background()))
|
||||
})
|
||||
t.Run("with error threshold", func(t *testing.T) {
|
||||
const errThreshold = 3
|
||||
|
@ -161,6 +162,7 @@ func TestErrorReporting(t *testing.T) {
|
|||
|
||||
require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, true))
|
||||
checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
|
||||
require.NoError(t, te.ng.Close(context.Background()))
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -80,9 +80,9 @@ func TestListWithCursor(t *testing.T) {
|
|||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
t.Cleanup(func() {
|
||||
e.Close(context.Background())
|
||||
})
|
||||
defer func() {
|
||||
dstepanov-yadro
commented
https://github.com/golang/go/issues/40908
fyrchik
commented
This is exactly why we got rid of This is exactly why we got rid of `zaptest` and used `zap.L()`, these problems occured all over the tests (and writing `t.Cleanup` in the constructor is much easier that to remember writing it everywhere else.
fyrchik
commented
https://git.frostfs.info/TrueCloudLab/frostfs-node/pulls/621
dstepanov-yadro
commented
There was only one place in the entire project that needed to be fixed. But clear logs make sense for all of tests. There was only one place in the entire project that needed to be fixed. But clear logs make sense for all of tests.
fyrchik
commented
Debatable: we do not need logs at all and when debugging tests usually a single test can be run. I don't like returning the behaviour which clearly has problems and which we have intentionally fixed at some point. Debatable: we do not need logs at all and when debugging tests usually a single test can be run.
Actually, I see lots of `Cleanup` with `Close` inside, they could trigger race detector later at some point.
I don't like returning the behaviour which clearly has problems and which we have intentionally fixed at some point.
dstepanov-yadro
commented
I disagree with the statement: I disagree with the statement: `we do not need logs at all`. For several tasks already, I needed normal logs of falling tests.
fyrchik
commented
Ok, but reverting a fix to a real problem is not the right approach here. Ok, but reverting a fix to a real problem is not the right approach here.
dstepanov-yadro
commented
Now there is no problem: race condition for Now there is no problem: race condition for `t.Cleanup` is actual only for engine after `Init()`.
Looks like it was an inappropriate fix.
dstepanov-yadro
commented
Also see this comment (testing.go: 1580):
As far as I understand Also see this comment (testing.go: 1580):
```
// Do not lock t.done to allow race detector to detect race in case
// the user does not appropriately synchronize a goroutine.
```
As far as I understand `Cleanup` requires all test background goroutines must be stopped. So using `Cleanup` for `engine.Close` is invalid usage.
fyrchik
commented
I am not sure the problems is gone now. The problem is that
If you do Or here, is it different from the I would rather see a discussion first. I am not sure the problems is gone now. The problem is that
1. In logger we read `done` field https://github.com/golang/go/blob/cc85462b3d23193e4861813ea85e254cfe372403/src/testing/testing.go#L1017
2. `done` is written to intentionally without a mutex in https://github.com/golang/go/blob/cc85462b3d23193e4861813ea85e254cfe372403/src/testing/testing.go#L1580
If you do `rg 'Cleanup\(' -A4` over the codebase, there are multiple calls to `releaseShard` in `Cleanup` (and to writecache etc.), because we currently use `Cleanup()` in tests. Are you _sure_ there are no goroutines in `Shard` which can log and run until `Close()` is called? In the writecache?
Or here, is it different from the `list_test.go` situation: https://git.frostfs.info/TrueCloudLab/frostfs-node/src/commit/cbc78a8efb72c40c7e39cccdc0aed4dc387fb053/pkg/local_object_storage/engine/shards_test.go#L16 ?
I would rather see a discussion first.
|
||||
require.NoError(t, e.Close(context.Background()))
|
||||
}()
|
||||
|
||||
expected := make([]object.AddressWithType, 0, tt.objectNum)
|
||||
got := make([]object.AddressWithType, 0, tt.objectNum)
|
||||
|
|
|
@ -62,9 +62,7 @@ func TestLockUserScenario(t *testing.T) {
|
|||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = e.Close(context.Background())
|
||||
})
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
lockerID := oidtest.ID()
|
||||
tombID := oidtest.ID()
|
||||
|
@ -169,9 +167,7 @@ func TestLockExpiration(t *testing.T) {
|
|||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = e.Close(context.Background())
|
||||
})
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
const lockerExpiresAfter = 13
|
||||
|
||||
|
@ -246,9 +242,7 @@ func TestLockForceRemoval(t *testing.T) {
|
|||
}).engine
|
||||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
t.Cleanup(func() {
|
||||
_ = e.Close(context.Background())
|
||||
})
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
var err error
|
||||
|
|
|
@ -12,9 +12,7 @@ func TestRemoveShard(t *testing.T) {
|
|||
|
||||
te := testNewEngine(t).setShardsNum(t, numOfShards)
|
||||
e, ids := te.engine, te.shardIDs
|
||||
t.Cleanup(func() {
|
||||
e.Close(context.Background())
|
||||
})
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
require.Equal(t, numOfShards, len(e.shardPools))
|
||||
require.Equal(t, numOfShards, len(e.shards))
|
||||
|
|
|
@ -19,6 +19,7 @@ func TestDB_Containers(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
const N = 10
|
||||
|
||||
|
@ -96,6 +97,7 @@ func TestDB_ContainersCount(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
const R, T, SG, L = 10, 11, 12, 13 // amount of object per type
|
||||
|
||||
|
@ -141,6 +143,7 @@ func TestDB_ContainerSize(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
const (
|
||||
C = 3
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
|
||||
func TestReset(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
err := db.Reset()
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -22,6 +22,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("defaults", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
c, err := db.ObjectCounters()
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, c.Phy)
|
||||
|
@ -36,6 +37,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("put", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
oo := make([]*objectSDK.Object, 0, objCount)
|
||||
for i := 0; i < objCount; i++ {
|
||||
oo = append(oo, testutil.GenerateObject())
|
||||
|
@ -73,6 +75,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("delete", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
oo := putObjs(t, db, objCount, false)
|
||||
|
||||
exp := make(map[cid.ID]meta.ObjectCounters)
|
||||
|
@ -117,6 +120,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("inhume", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
oo := putObjs(t, db, objCount, false)
|
||||
|
||||
exp := make(map[cid.ID]meta.ObjectCounters)
|
||||
|
@ -176,6 +180,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("put_split", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
parObj := testutil.GenerateObject()
|
||||
|
||||
exp := make(map[cid.ID]meta.ObjectCounters)
|
||||
|
@ -213,6 +218,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("delete_split", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
oo := putObjs(t, db, objCount, true)
|
||||
|
||||
exp := make(map[cid.ID]meta.ObjectCounters)
|
||||
|
@ -254,6 +260,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("inhume_split", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
oo := putObjs(t, db, objCount, true)
|
||||
|
||||
exp := make(map[cid.ID]meta.ObjectCounters)
|
||||
|
@ -321,6 +328,7 @@ func TestCounters_Expired(t *testing.T) {
|
|||
|
||||
es := &epochState{epoch}
|
||||
db := newDB(t, meta.WithEpochState(es))
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
oo := make([]oid.Address, objCount)
|
||||
for i := range oo {
|
||||
|
|
|
@ -2,7 +2,6 @@ package meta_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
@ -53,11 +52,6 @@ func newDB(t testing.TB, opts ...meta.Option) *meta.DB {
|
|||
require.NoError(t, bdb.Open(context.Background(), false))
|
||||
require.NoError(t, bdb.Init())
|
||||
|
||||
t.Cleanup(func() {
|
||||
bdb.Close()
|
||||
os.Remove(bdb.DumpInfo().Path)
|
||||
})
|
||||
|
||||
return bdb
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
|
||||
func TestDB_Delete(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
parent := testutil.GenerateObjectWithCID(cnr)
|
||||
|
@ -78,6 +79,7 @@ func TestDB_Delete(t *testing.T) {
|
|||
|
||||
func TestDeleteAllChildren(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -115,6 +117,7 @@ func TestDeleteAllChildren(t *testing.T) {
|
|||
|
||||
func TestGraveOnlyDelete(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
addr := oidtest.Address()
|
||||
|
||||
|
@ -127,6 +130,7 @@ func TestGraveOnlyDelete(t *testing.T) {
|
|||
|
||||
func TestExpiredObject(t *testing.T) {
|
||||
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
|
||||
// removing expired object should be error-free
|
||||
|
|
|
@ -18,6 +18,7 @@ const currEpoch = 1000
|
|||
|
||||
func TestDB_Exists(t *testing.T) {
|
||||
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
t.Run("no object", func(t *testing.T) {
|
||||
nonExist := testutil.GenerateObject()
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
func TestDB_SelectExpired(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
containerID1 := cidtest.ID()
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
func TestDB_Get(t *testing.T) {
|
||||
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
raw := testutil.GenerateObject()
|
||||
|
||||
|
@ -180,6 +181,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
|
|||
meta.WithMaxBatchSize(batchSize),
|
||||
meta.WithMaxBatchDelay(10*time.Millisecond),
|
||||
)
|
||||
defer func() { require.NoError(b, db.Close()) }()
|
||||
addrs := make([]oid.Address, 0, numOfObj)
|
||||
|
||||
for i := 0; i < numOfObj; i++ {
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
|
||||
func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
var counter int
|
||||
var iterGravePRM meta.GraveyardIterationPrm
|
||||
|
@ -40,6 +41,7 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
|
|||
|
||||
func TestDB_Iterate_OffsetNotFound(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
obj1 := testutil.GenerateObject()
|
||||
obj2 := testutil.GenerateObject()
|
||||
|
@ -110,6 +112,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
|
|||
|
||||
func TestDB_IterateDeletedObjects(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
// generate and put 4 objects
|
||||
obj1 := testutil.GenerateObject()
|
||||
|
@ -196,6 +199,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
|
|||
|
||||
func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
// generate and put 4 objects
|
||||
obj1 := testutil.GenerateObject()
|
||||
|
@ -294,6 +298,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
|
|||
|
||||
func TestDB_IterateOverGarbage_Offset(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
// generate and put 4 objects
|
||||
obj1 := testutil.GenerateObject()
|
||||
|
@ -385,6 +390,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
|
|||
|
||||
func TestDB_DropGraves(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
// generate and put 2 objects
|
||||
obj1 := testutil.GenerateObject()
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
|
||||
func TestDB_Inhume(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
raw := testutil.GenerateObject()
|
||||
testutil.AddAttribute(raw, "foo", "bar")
|
||||
|
@ -37,6 +38,7 @@ func TestDB_Inhume(t *testing.T) {
|
|||
|
||||
func TestInhumeTombOnTomb(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
var (
|
||||
err error
|
||||
|
@ -99,6 +101,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
|
|||
|
||||
func TestInhumeLocked(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
locked := oidtest.Address()
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
|
||||
func TestDB_IterateExpired(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
const epoch = 13
|
||||
|
||||
|
@ -68,6 +69,7 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt
|
|||
|
||||
func TestDB_IterateCoveredByTombstones(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
ts := oidtest.Address()
|
||||
protected1 := oidtest.Address()
|
||||
|
|
|
@ -33,6 +33,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
|
|||
db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{
|
||||
NoSync: true,
|
||||
})) // faster single-thread generation
|
||||
defer func() { require.NoError(b, db.Close()) }()
|
||||
|
||||
obj := testutil.GenerateObject()
|
||||
for i := 0; i < 100_000; i++ { // should be a multiple of all batch sizes
|
||||
|
@ -70,6 +71,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
const (
|
||||
containers = 5
|
||||
|
@ -165,6 +167,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
const total = 5
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ func TestDB_Lock(t *testing.T) {
|
|||
|
||||
cnr := cidtest.ID()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
t.Run("empty locked list", func(t *testing.T) {
|
||||
require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) })
|
||||
|
@ -182,6 +183,7 @@ func TestDB_Lock_Expired(t *testing.T) {
|
|||
es := &epochState{e: 123}
|
||||
|
||||
db := newDB(t, meta.WithEpochState(es))
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
// put an object
|
||||
addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124)
|
||||
|
@ -203,6 +205,7 @@ func TestDB_IsLocked(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
// existing and locked objs
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
func TestDB_Movable(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
raw1 := testutil.GenerateObject()
|
||||
raw2 := testutil.GenerateObject()
|
||||
|
|
|
@ -46,6 +46,7 @@ func BenchmarkPut(b *testing.B) {
|
|||
db := newDB(b,
|
||||
meta.WithMaxBatchDelay(time.Millisecond*10),
|
||||
meta.WithMaxBatchSize(runtime.NumCPU()))
|
||||
defer func() { require.NoError(b, db.Close()) }()
|
||||
// Ensure the benchmark is bound by CPU and not waiting batch-delay time.
|
||||
b.SetParallelism(1)
|
||||
|
||||
|
@ -67,6 +68,7 @@ func BenchmarkPut(b *testing.B) {
|
|||
db := newDB(b,
|
||||
meta.WithMaxBatchDelay(time.Millisecond*10),
|
||||
meta.WithMaxBatchSize(1))
|
||||
defer func() { require.NoError(b, db.Close()) }()
|
||||
var index atomic.Int64
|
||||
index.Store(-1)
|
||||
objs := prepareObjects(b, b.N)
|
||||
|
@ -82,6 +84,7 @@ func BenchmarkPut(b *testing.B) {
|
|||
|
||||
func TestDB_PutBlobovniczaUpdate(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
raw1 := testutil.GenerateObject()
|
||||
storageID := []byte{1, 2, 3, 4}
|
||||
|
|
|
@ -23,6 +23,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -147,6 +148,7 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -300,6 +302,7 @@ func TestDB_SelectInhume(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -334,6 +337,7 @@ func TestDB_SelectPayloadHash(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -404,6 +408,7 @@ func TestDB_SelectWithSlowFilters(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -511,6 +516,7 @@ func TestDB_SelectObjectID(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -626,6 +632,7 @@ func TestDB_SelectSplitID(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -682,6 +689,7 @@ func TestDB_SelectContainerID(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -729,6 +737,8 @@ func TestDB_SelectContainerID(t *testing.T) {
|
|||
func BenchmarkSelect(b *testing.B) {
|
||||
const objCount = 1000
|
||||
db := newDB(b)
|
||||
defer func() { require.NoError(b, db.Close()) }()
|
||||
|
||||
cid := cidtest.ID()
|
||||
|
||||
for i := 0; i < objCount; i++ {
|
||||
|
@ -769,6 +779,7 @@ func TestExpiredObjects(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
|
||||
cidExp, _ := exp.ContainerID()
|
||||
|
|
|
@ -15,6 +15,7 @@ func TestDB_StorageID(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
raw1 := testutil.GenerateObject()
|
||||
raw2 := testutil.GenerateObject()
|
||||
|
|
|
@ -28,8 +28,9 @@ func BenchmarkCreate(b *testing.B) {
|
|||
WithMaxBatchSize(runtime.GOMAXPROCS(0)))
|
||||
require.NoError(b, f.Open(context.Background(), false))
|
||||
require.NoError(b, f.Init())
|
||||
defer func() { require.NoError(b, f.Close()) }()
|
||||
|
||||
b.Cleanup(func() {
|
||||
require.NoError(b, f.Close())
|
||||
require.NoError(b, os.RemoveAll(tmpDir))
|
||||
})
|
||||
|
||||
|
|
|
@ -20,19 +20,15 @@ import (
|
|||
|
||||
var providers = []struct {
|
||||
name string
|
||||
construct func(t testing.TB, opts ...Option) Forest
|
||||
construct func(t testing.TB, opts ...Option) ForestStorage
|
||||
}{
|
||||
{"inmemory", func(t testing.TB, _ ...Option) Forest {
|
||||
{"inmemory", func(t testing.TB, _ ...Option) ForestStorage {
|
||||
f := NewMemoryForest()
|
||||
require.NoError(t, f.Open(context.Background(), false))
|
||||
require.NoError(t, f.Init())
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, f.Close())
|
||||
})
|
||||
|
||||
return f
|
||||
}},
|
||||
{"bbolt", func(t testing.TB, opts ...Option) Forest {
|
||||
{"bbolt", func(t testing.TB, opts ...Option) ForestStorage {
|
||||
f := NewBoltForest(
|
||||
append([]Option{
|
||||
WithPath(filepath.Join(t.TempDir(), "test.db")),
|
||||
|
@ -40,9 +36,6 @@ var providers = []struct {
|
|||
}, opts...)...)
|
||||
require.NoError(t, f.Open(context.Background(), false))
|
||||
require.NoError(t, f.Init())
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, f.Close())
|
||||
})
|
||||
return f
|
||||
}},
|
||||
}
|
||||
|
@ -62,7 +55,9 @@ func TestForest_TreeMove(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeMove(t *testing.T, s Forest) {
|
||||
func testForestTreeMove(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
cid := cidtest.ID()
|
||||
d := CIDDescriptor{cid, 0, 1}
|
||||
treeID := "version"
|
||||
|
@ -124,7 +119,9 @@ func TestMemoryForest_TreeGetChildren(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeGetChildren(t *testing.T, s Forest) {
|
||||
func testForestTreeGetChildren(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
cid := cidtest.ID()
|
||||
d := CIDDescriptor{cid, 0, 1}
|
||||
treeID := "version"
|
||||
|
@ -188,7 +185,9 @@ func TestForest_TreeDrop(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeDrop(t *testing.T, s Forest) {
|
||||
func testForestTreeDrop(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
const cidsSize = 3
|
||||
var cids [cidsSize]cidSDK.ID
|
||||
|
||||
|
@ -256,7 +255,9 @@ func TestForest_TreeAdd(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeAdd(t *testing.T, s Forest) {
|
||||
func testForestTreeAdd(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
cid := cidtest.ID()
|
||||
d := CIDDescriptor{cid, 0, 1}
|
||||
treeID := "version"
|
||||
|
@ -302,7 +303,9 @@ func TestForest_TreeAddByPath(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeAddByPath(t *testing.T, s Forest) {
|
||||
func testForestTreeAddByPath(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
cid := cidtest.ID()
|
||||
d := CIDDescriptor{cid, 0, 1}
|
||||
treeID := "version"
|
||||
|
@ -425,7 +428,7 @@ func TestForest_Apply(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) {
|
||||
func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
|
||||
cid := cidtest.ID()
|
||||
treeID := "version"
|
||||
|
||||
|
@ -439,6 +442,8 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
|
|||
|
||||
t.Run("add a child, then insert a parent removal", func(t *testing.T) {
|
||||
s := constructor(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}})
|
||||
|
||||
meta := Meta{Time: 3, Items: []KeyValue{{"child", []byte{3}}}}
|
||||
|
@ -450,6 +455,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
|
|||
})
|
||||
t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) {
|
||||
s := constructor(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}}
|
||||
testApply(t, s, 11, 10, meta)
|
||||
|
@ -469,7 +475,7 @@ func TestForest_ApplySameOperation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest, parallel bool) {
|
||||
func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, parallel bool) {
|
||||
cid := cidtest.ID()
|
||||
treeID := "version"
|
||||
|
||||
|
@ -519,6 +525,8 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
|
|||
|
||||
t.Run("expected", func(t *testing.T) {
|
||||
s := constructor(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
for i := range logs {
|
||||
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false))
|
||||
}
|
||||
|
@ -526,6 +534,8 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
|
|||
})
|
||||
|
||||
s := constructor(t, WithMaxBatchSize(batchSize))
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
|
||||
for i := 0; i < batchSize; i++ {
|
||||
errG.Go(func() error {
|
||||
|
@ -545,7 +555,7 @@ func TestForest_GetOpLog(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) {
|
||||
func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
|
||||
cid := cidtest.ID()
|
||||
treeID := "version"
|
||||
logs := []Move{
|
||||
|
@ -565,6 +575,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
|
|||
}
|
||||
|
||||
s := constructor(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
t.Run("empty log, no panic", func(t *testing.T) {
|
||||
_, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0)
|
||||
|
@ -603,8 +614,9 @@ func TestForest_TreeExists(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) Forest) {
|
||||
func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) {
|
||||
s := constructor(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) {
|
||||
actual, err := s.TreeExists(context.Background(), cid, treeID)
|
||||
|
@ -663,6 +675,8 @@ func TestApplyTricky1(t *testing.T) {
|
|||
for i := range providers {
|
||||
t.Run(providers[i].name, func(t *testing.T) {
|
||||
s := providers[i].construct(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
for i := range ops {
|
||||
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
|
||||
}
|
||||
|
@ -724,6 +738,8 @@ func TestApplyTricky2(t *testing.T) {
|
|||
for i := range providers {
|
||||
t.Run(providers[i].name, func(t *testing.T) {
|
||||
s := providers[i].construct(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
for i := range ops {
|
||||
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
|
||||
}
|
||||
|
@ -821,7 +837,7 @@ func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest, batchSize, opCount, iterCount int) {
|
||||
func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, batchSize, opCount, iterCount int) {
|
||||
r := mrand.New(mrand.NewSource(42))
|
||||
|
||||
const nodeCount = 5
|
||||
|
@ -832,6 +848,8 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
|
|||
treeID := "version"
|
||||
|
||||
expected := constructor(t, WithNoSync(true))
|
||||
defer func() { require.NoError(t, expected.Close()) }()
|
||||
|
||||
for i := range ops {
|
||||
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
|
||||
}
|
||||
|
@ -860,10 +878,11 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
|
|||
wg.Wait()
|
||||
|
||||
compareForests(t, expected, actual, cid, treeID, nodeCount)
|
||||
require.NoError(t, actual.Close())
|
||||
}
|
||||
}
|
||||
|
||||
func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) {
|
||||
func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
|
||||
r := mrand.New(mrand.NewSource(42))
|
||||
|
||||
const (
|
||||
|
@ -877,6 +896,8 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
|
|||
treeID := "version"
|
||||
|
||||
expected := constructor(t, WithNoSync(true))
|
||||
defer func() { require.NoError(t, expected.Close()) }()
|
||||
|
||||
for i := range ops {
|
||||
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
|
||||
}
|
||||
|
@ -891,6 +912,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
|
|||
require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false))
|
||||
}
|
||||
compareForests(t, expected, actual, cid, treeID, nodeCount)
|
||||
require.NoError(t, actual.Close())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -908,6 +930,8 @@ func BenchmarkApplySequential(b *testing.B) {
|
|||
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
|
||||
r := mrand.New(mrand.NewSource(time.Now().Unix()))
|
||||
s := providers[i].construct(b, WithMaxBatchSize(bs))
|
||||
defer func() { require.NoError(b, s.Close()) }()
|
||||
|
||||
benchmarkApply(b, s, func(opCount int) []Move {
|
||||
ops := make([]Move, opCount)
|
||||
for i := range ops {
|
||||
|
@ -942,6 +966,8 @@ func BenchmarkApplyReorderLast(b *testing.B) {
|
|||
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
|
||||
r := mrand.New(mrand.NewSource(time.Now().Unix()))
|
||||
s := providers[i].construct(b, WithMaxBatchSize(bs))
|
||||
defer func() { require.NoError(b, s.Close()) }()
|
||||
|
||||
benchmarkApply(b, s, func(opCount int) []Move {
|
||||
ops := make([]Move, opCount)
|
||||
for i := range ops {
|
||||
|
@ -996,7 +1022,8 @@ func TestTreeGetByPath(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testTreeGetByPath(t *testing.T, s Forest) {
|
||||
func testTreeGetByPath(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
cid := cidtest.ID()
|
||||
treeID := "version"
|
||||
|
||||
|
@ -1074,7 +1101,9 @@ func TestGetTrees(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testTreeGetTrees(t *testing.T, s Forest) {
|
||||
func testTreeGetTrees(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()}
|
||||
d := CIDDescriptor{Position: 0, Size: 1}
|
||||
|
||||
|
@ -1118,7 +1147,9 @@ func TestTreeLastSyncHeight(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testTreeLastSyncHeight(t *testing.T, f Forest) {
|
||||
func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
|
||||
defer func() { require.NoError(t, f.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
treeID := "someTree"
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ func TestShard_Delete_BigObject(t *testing.T) {
|
|||
|
||||
func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
|
||||
sh := newShard(t, hasWriteCache)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
|
|
@ -78,10 +78,7 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
|
|||
sh.gcCfg.testHookRemover = func(context.Context) gcRunResult { return gcRunResult{} }
|
||||
require.NoError(t, sh.Open(context.Background()))
|
||||
require.NoError(t, sh.Init(context.Background()))
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, sh.Close())
|
||||
})
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
obj := testutil.GenerateObjectWithCID(cnr)
|
||||
|
|
|
@ -31,6 +31,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
|
|||
return util.NewPseudoWorkerPool() // synchronous event processing
|
||||
})},
|
||||
})
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -127,6 +128,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
|
|||
return util.NewPseudoWorkerPool() // synchronous event processing
|
||||
})},
|
||||
})
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
lock := testutil.GenerateObjectWithCID(cnr)
|
||||
lock.SetType(objectSDK.TypeLock)
|
||||
|
|
|
@ -32,6 +32,7 @@ func TestShard_Get(t *testing.T) {
|
|||
|
||||
func testShardGet(t *testing.T, hasWriteCache bool) {
|
||||
sh := newShard(t, hasWriteCache)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
var putPrm PutPrm
|
||||
var getPrm GetPrm
|
||||
|
|
|
@ -30,6 +30,7 @@ func TestShard_Head(t *testing.T) {
|
|||
|
||||
func testShardHead(t *testing.T, hasWriteCache bool) {
|
||||
sh := newShard(t, hasWriteCache)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
var putPrm PutPrm
|
||||
var headPrm HeadPrm
|
||||
|
|
|
@ -27,6 +27,7 @@ func TestShard_Inhume(t *testing.T) {
|
|||
|
||||
func testShardInhume(t *testing.T, hasWriteCache bool) {
|
||||
sh := newShard(t, hasWriteCache)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
|
|
@ -18,12 +18,14 @@ func TestShard_List(t *testing.T) {
|
|||
t.Run("without write cache", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
sh := newShard(t, false)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
testShardList(t, sh)
|
||||
})
|
||||
|
||||
t.Run("with write cache", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
shWC := newShard(t, true)
|
||||
defer func() { require.NoError(t, shWC.Close()) }()
|
||||
testShardList(t, shWC)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -61,9 +61,7 @@ func TestShard_Lock(t *testing.T) {
|
|||
require.NoError(t, sh.Open(context.Background()))
|
||||
require.NoError(t, sh.Init(context.Background()))
|
||||
|
||||
t.Cleanup(func() {
|
||||
releaseShard(sh, t)
|
||||
})
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
obj := testutil.GenerateObjectWithCID(cnr)
|
||||
|
@ -149,6 +147,7 @@ func TestShard_Lock(t *testing.T) {
|
|||
|
||||
func TestShard_IsLocked(t *testing.T) {
|
||||
sh := newShard(t, false)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
obj := testutil.GenerateObjectWithCID(cnr)
|
||||
|
|
|
@ -160,6 +160,7 @@ func TestCounters(t *testing.T) {
|
|||
|
||||
dir := t.TempDir()
|
||||
sh, mm := shardWithMetrics(t, dir)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
sh.SetMode(mode.ReadOnly)
|
||||
require.Equal(t, mode.ReadOnly, mm.mode)
|
||||
|
@ -382,10 +383,6 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
|
|||
require.NoError(t, sh.Open(context.Background()))
|
||||
require.NoError(t, sh.Init(context.Background()))
|
||||
|
||||
t.Cleanup(func() {
|
||||
sh.Close()
|
||||
})
|
||||
|
||||
return sh, mm
|
||||
}
|
||||
|
||||
|
|
|
@ -93,6 +93,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
|
|||
}),
|
||||
},
|
||||
})
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
|
|
@ -57,6 +57,10 @@ func TestShardReload(t *testing.T) {
|
|||
require.NoError(t, sh.Open(context.Background()))
|
||||
require.NoError(t, sh.Init(context.Background()))
|
||||
|
||||
defer func() {
|
||||
require.NoError(t, sh.Close())
|
||||
}()
|
||||
|
||||
objects := make([]objAddr, 5)
|
||||
for i := range objects {
|
||||
objects[i].obj = newObject()
|
||||
|
|
|
@ -30,7 +30,6 @@ func (s epochState) CurrentEpoch() uint64 {
|
|||
|
||||
type shardOptions struct {
|
||||
rootPath string
|
||||
dontRelease bool
|
||||
wcOpts []writecache.Option
|
||||
bsOpts []blobstor.Option
|
||||
metaOptions []meta.Option
|
||||
|
@ -109,13 +108,5 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
|
|||
require.NoError(t, sh.Open(context.Background()))
|
||||
require.NoError(t, sh.Init(context.Background()))
|
||||
|
||||
if !o.dontRelease {
|
||||
t.Cleanup(func() { releaseShard(sh, t) })
|
||||
}
|
||||
|
||||
return sh
|
||||
}
|
||||
|
||||
func releaseShard(s *Shard, t testing.TB) {
|
||||
require.NoError(t, s.Close())
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ func TestWriteCacheObjectLoss(t *testing.T) {
|
|||
writecache.WithMaxObjectSize(smallSize * 2),
|
||||
}
|
||||
|
||||
sh := newCustomShard(t, true, shardOptions{dontRelease: true, rootPath: dir, wcOpts: wcOpts})
|
||||
sh := newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
|
||||
|
||||
var errG errgroup.Group
|
||||
for i := range objects {
|
||||
|
@ -55,6 +55,7 @@ func TestWriteCacheObjectLoss(t *testing.T) {
|
|||
require.NoError(t, sh.Close())
|
||||
|
||||
sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
var getPrm GetPrm
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ func BenchmarkWritecachePar(b *testing.B) {
|
|||
|
||||
func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
|
||||
benchmarkPutPrepare(b, cache)
|
||||
defer func() { require.NoError(b, cache.Close()) }()
|
||||
|
||||
ctx := context.Background()
|
||||
objGen := testutil.RandObjGenerator{ObjSize: size}
|
||||
|
@ -50,6 +51,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
|
|||
|
||||
func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
|
||||
benchmarkPutPrepare(b, cache)
|
||||
defer func() { require.NoError(b, cache.Close()) }()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -75,9 +77,6 @@ func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
|
|||
func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) {
|
||||
require.NoError(b, cache.Open(context.Background(), false), "opening")
|
||||
require.NoError(b, cache.Init(), "initializing")
|
||||
b.Cleanup(func() {
|
||||
require.NoError(b, cache.Close(), "closing")
|
||||
})
|
||||
}
|
||||
|
||||
type testMetabase struct{}
|
||||
|
|
|
@ -142,6 +142,7 @@ func runFlushTest[Option any](
|
|||
) {
|
||||
t.Run("no errors", func(t *testing.T) {
|
||||
wc, bs, mb := newCache(t, createCacheFn, smallSize)
|
||||
defer func() { require.NoError(t, wc.Close()) }()
|
||||
objects := putObjects(t, wc)
|
||||
|
||||
require.NoError(t, bs.SetMode(mode.ReadWrite))
|
||||
|
@ -154,6 +155,7 @@ func runFlushTest[Option any](
|
|||
|
||||
t.Run("flush on moving to degraded mode", func(t *testing.T) {
|
||||
wc, bs, mb := newCache(t, createCacheFn, smallSize)
|
||||
defer func() { require.NoError(t, wc.Close()) }()
|
||||
objects := putObjects(t, wc)
|
||||
|
||||
// Blobstor is read-only, so we expect en error from `flush` here.
|
||||
|
@ -172,6 +174,7 @@ func runFlushTest[Option any](
|
|||
t.Run(f.Desc, func(t *testing.T) {
|
||||
errCountOpt, errCount := errCountOption()
|
||||
wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt)
|
||||
defer func() { require.NoError(t, wc.Close()) }()
|
||||
objects := putObjects(t, wc)
|
||||
f.InjectFn(t, wc)
|
||||
|
||||
|
@ -214,7 +217,6 @@ func newCache[Option any](
|
|||
require.NoError(t, bs.Init())
|
||||
|
||||
wc := createCacheFn(t, smallSize, mb, bs, opts...)
|
||||
t.Cleanup(func() { require.NoError(t, wc.Close()) })
|
||||
require.NoError(t, wc.Open(context.Background(), false))
|
||||
require.NoError(t, wc.Init())
|
||||
|
||||
|
|
t.TempDir
will be removed by testing engine.