Fix big object deletion #896
|
@ -19,7 +19,7 @@ func TestEpochTimer(t *testing.T) {
|
|||
}
|
||||
|
||||
args := &epochTimerArgs{
|
||||
l: test.NewLogger(t, true),
|
||||
l: test.NewLogger(t),
|
||||
alphabetState: alphaState,
|
||||
newEpochHandlers: []newEpochHandler{neh.Handle},
|
||||
cnrWrapper: cnrStopper,
|
||||
|
|
|
@ -48,7 +48,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
|
|||
|
||||
params := &alphabet.Params{
|
||||
ParsedWallets: parsedWallets,
|
||||
Log: test.NewLogger(t, true),
|
||||
Log: test.NewLogger(t),
|
||||
PoolSize: 2,
|
||||
StorageEmission: emission,
|
||||
IRList: &testIndexer{index: index},
|
||||
|
@ -125,7 +125,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
|
|||
|
||||
params := &alphabet.Params{
|
||||
ParsedWallets: parsedWallets,
|
||||
Log: test.NewLogger(t, true),
|
||||
Log: test.NewLogger(t),
|
||||
PoolSize: 2,
|
||||
StorageEmission: emission,
|
||||
IRList: &testIndexer{index: index},
|
||||
|
@ -186,7 +186,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
|
|||
|
||||
params := &alphabet.Params{
|
||||
ParsedWallets: parsedWallets,
|
||||
Log: test.NewLogger(t, true),
|
||||
Log: test.NewLogger(t),
|
||||
PoolSize: 2,
|
||||
StorageEmission: emission,
|
||||
IRList: &testIndexer{index: index},
|
||||
|
|
|
@ -21,7 +21,7 @@ func TestProcessorCallsFrostFSContractForLockEvent(t *testing.T) {
|
|||
bsc := util.Uint160{100}
|
||||
|
||||
processor, err := New(&Params{
|
||||
Log: test.NewLogger(t, true),
|
||||
Log: test.NewLogger(t),
|
||||
PoolSize: 2,
|
||||
FrostFSClient: cl,
|
||||
BalanceSC: bsc,
|
||||
|
@ -47,7 +47,7 @@ func TestProcessorDoesntCallFrostFSContractIfNotAlphabet(t *testing.T) {
|
|||
bsc := util.Uint160{100}
|
||||
|
||||
processor, err := New(&Params{
|
||||
Log: test.NewLogger(t, true),
|
||||
Log: test.NewLogger(t),
|
||||
PoolSize: 2,
|
||||
FrostFSClient: cl,
|
||||
BalanceSC: bsc,
|
||||
|
|
|
@ -38,7 +38,7 @@ func TestPutEvent(t *testing.T) {
|
|||
mc := &testMorphClient{}
|
||||
|
||||
proc, err := New(&Params{
|
||||
Log: test.NewLogger(t, true),
|
||||
Log: test.NewLogger(t),
|
||||
PoolSize: 2,
|
||||
AlphabetState: &testAlphabetState{isAlphabet: true},
|
||||
NetworkState: nst,
|
||||
|
@ -99,7 +99,7 @@ func TestDeleteEvent(t *testing.T) {
|
|||
mc := &testMorphClient{}
|
||||
|
||||
proc, err := New(&Params{
|
||||
Log: test.NewLogger(t, true),
|
||||
Log: test.NewLogger(t),
|
||||
PoolSize: 2,
|
||||
AlphabetState: &testAlphabetState{isAlphabet: true},
|
||||
NetworkState: nst,
|
||||
|
@ -171,7 +171,7 @@ func TestSetEACLEvent(t *testing.T) {
|
|||
mc := &testMorphClient{}
|
||||
|
||||
proc, err := New(&Params{
|
||||
Log: test.NewLogger(t, true),
|
||||
Log: test.NewLogger(t),
|
||||
PoolSize: 2,
|
||||
AlphabetState: &testAlphabetState{isAlphabet: true},
|
||||
NetworkState: nst,
|
||||
|
|
|
@ -193,7 +193,7 @@ func TestHandleConfig(t *testing.T) {
|
|||
|
||||
func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) {
|
||||
p := &Params{
|
||||
Log: test.NewLogger(t, true),
|
||||
Log: test.NewLogger(t),
|
||||
PoolSize: 1,
|
||||
FrostFSContract: util.Uint160{0},
|
||||
BalanceClient: &testBalaceClient{},
|
||||
|
|
|
@ -42,7 +42,7 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
|
|||
|
||||
proc, err := New(
|
||||
&Params{
|
||||
Log: test.NewLogger(t, true),
|
||||
Log: test.NewLogger(t),
|
||||
EpochState: es,
|
||||
AlphabetState: as,
|
||||
Voter: v,
|
||||
|
@ -123,7 +123,7 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
|
|||
|
||||
proc, err := New(
|
||||
&Params{
|
||||
Log: test.NewLogger(t, true),
|
||||
Log: test.NewLogger(t),
|
||||
EpochState: es,
|
||||
AlphabetState: as,
|
||||
Voter: v,
|
||||
|
|
|
@ -279,7 +279,7 @@ func newTestProc(t *testing.T, nonDefault func(p *Params)) (*Processor, error) {
|
|||
eh := &testEventHandler{}
|
||||
|
||||
p := &Params{
|
||||
Log: test.NewLogger(t, true),
|
||||
Log: test.NewLogger(t),
|
||||
PoolSize: 1,
|
||||
CleanupEnabled: false,
|
||||
CleanupThreshold: 3,
|
||||
|
|
|
@ -63,7 +63,7 @@ func TestBlobovnicza(t *testing.T) {
|
|||
WithPath(p),
|
||||
WithObjectSizeLimit(objSizeLim),
|
||||
WithFullSizeLimit(sizeLim),
|
||||
WithLogger(test.NewLogger(t, true)),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
)
|
||||
|
||||
defer os.Remove(p)
|
||||
|
|
|
@ -2,7 +2,6 @@ package blobovnicza
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
|
@ -15,11 +14,7 @@ func TestBlobovnicza_Get(t *testing.T) {
|
|||
filename := filepath.Join(t.TempDir(), "blob")
|
||||
|
||||
var blz *Blobovnicza
|
||||
|
||||
t.Cleanup(func() {
|
||||
blz.Close()
|
||||
os.RemoveAll(filename)
|
||||
|
||||
})
|
||||
defer func() { require.NoError(t, blz.Close()) }()
|
||||
|
||||
fnInit := func(szLimit uint64) {
|
||||
if blz != nil {
|
||||
|
|
|
@ -17,16 +17,16 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
|
|||
const n = 1000
|
||||
|
||||
st := NewBlobovniczaTree(
|
||||
WithLogger(test.NewLogger(t, true)),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
WithObjectSizeLimit(1024),
|
||||
WithBlobovniczaShallowWidth(10),
|
||||
WithBlobovniczaShallowDepth(1),
|
||||
WithRootPath(t.TempDir()))
|
||||
require.NoError(t, st.Open(false))
|
||||
require.NoError(t, st.Init())
|
||||
t.Cleanup(func() {
|
||||
defer func() {
|
||||
require.NoError(t, st.Close())
|
||||
})
|
||||
}()
|
||||
|
||||
objGen := &testutil.SeqObjGenerator{ObjSize: 1}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
func TestExistsInvalidStorageID(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
b := NewBlobovniczaTree(
|
||||
WithLogger(test.NewLogger(t, true)),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
WithObjectSizeLimit(1024),
|
||||
WithBlobovniczaShallowWidth(2),
|
||||
WithBlobovniczaShallowDepth(2),
|
||||
|
@ -25,7 +25,7 @@ func TestExistsInvalidStorageID(t *testing.T) {
|
|||
WithBlobovniczaSize(1<<20))
|
||||
require.NoError(t, b.Open(false))
|
||||
require.NoError(t, b.Init())
|
||||
t.Cleanup(func() { _ = b.Close() })
|
||||
defer func() { require.NoError(t, b.Close()) }()
|
||||
|
||||
obj := blobstortest.NewObject(1024)
|
||||
addr := object.AddressOf(obj)
|
||||
|
|
|
@ -13,7 +13,7 @@ func TestGeneric(t *testing.T) {
|
|||
|
||||
helper := func(t *testing.T, dir string) common.Storage {
|
||||
return NewBlobovniczaTree(
|
||||
WithLogger(test.NewLogger(t, true)),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
WithObjectSizeLimit(maxObjectSize),
|
||||
WithBlobovniczaShallowWidth(2),
|
||||
WithBlobovniczaShallowDepth(2),
|
||||
|
@ -40,7 +40,7 @@ func TestControl(t *testing.T) {
|
|||
|
||||
newTree := func(t *testing.T) common.Storage {
|
||||
return NewBlobovniczaTree(
|
||||
WithLogger(test.NewLogger(t, true)),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
WithObjectSizeLimit(maxObjectSize),
|
||||
WithBlobovniczaShallowWidth(2),
|
||||
WithBlobovniczaShallowDepth(2),
|
||||
|
|
|
@ -129,7 +129,7 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
|
|||
|
||||
func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object, mustUpdateStorageID bool) {
|
||||
b := NewBlobovniczaTree(
|
||||
WithLogger(test.NewLogger(t, true)),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
WithObjectSizeLimit(2048),
|
||||
WithBlobovniczaShallowWidth(2),
|
||||
WithBlobovniczaShallowDepth(2),
|
||||
|
|
|
@ -41,7 +41,7 @@ func TestBlobovniczaTreeRebuild(t *testing.T) {
|
|||
func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, targetDepth, targetWidth uint64, shouldMigrate bool) {
|
||||
dir := t.TempDir()
|
||||
b := NewBlobovniczaTree(
|
||||
WithLogger(test.NewLogger(t, true)),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
WithObjectSizeLimit(2048),
|
||||
WithBlobovniczaShallowWidth(sourceWidth),
|
||||
WithBlobovniczaShallowDepth(sourceDepth),
|
||||
|
@ -81,7 +81,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
|
|||
require.NoError(t, b.Close())
|
||||
|
||||
b = NewBlobovniczaTree(
|
||||
WithLogger(test.NewLogger(t, true)),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
WithObjectSizeLimit(2048),
|
||||
WithBlobovniczaShallowWidth(targetWidth),
|
||||
WithBlobovniczaShallowDepth(targetDepth),
|
||||
|
|
|
@ -14,7 +14,7 @@ func TestDelete(t *testing.T, cons Constructor, min, max uint64) {
|
|||
s := cons(t)
|
||||
require.NoError(t, s.Open(false))
|
||||
require.NoError(t, s.Init())
|
||||
t.Cleanup(func() { require.NoError(t, s.Close()) })
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
objects := prepare(t, 4, s, min, max)
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ func TestExists(t *testing.T, cons Constructor, min, max uint64) {
|
|||
s := cons(t)
|
||||
require.NoError(t, s.Open(false))
|
||||
require.NoError(t, s.Init())
|
||||
t.Cleanup(func() { require.NoError(t, s.Close()) })
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
objects := prepare(t, 1, s, min, max)
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ func TestGet(t *testing.T, cons Constructor, min, max uint64) {
|
|||
s := cons(t)
|
||||
require.NoError(t, s.Open(false))
|
||||
require.NoError(t, s.Init())
|
||||
t.Cleanup(func() { require.NoError(t, s.Close()) })
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
objects := prepare(t, 2, s, min, max)
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ func TestGetRange(t *testing.T, cons Constructor, min, max uint64) {
|
|||
s := cons(t)
|
||||
require.NoError(t, s.Open(false))
|
||||
require.NoError(t, s.Init())
|
||||
t.Cleanup(func() { require.NoError(t, s.Close()) })
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
objects := prepare(t, 1, s, min, max)
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ func TestIterate(t *testing.T, cons Constructor, min, max uint64) {
|
|||
s := cons(t)
|
||||
require.NoError(t, s.Open(false))
|
||||
require.NoError(t, s.Init())
|
||||
t.Cleanup(func() { require.NoError(t, s.Close()) })
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
objects := prepare(t, 10, s, min, max)
|
||||
|
||||
|
|
|
@ -15,9 +15,9 @@ import (
|
|||
func TestSimpleLifecycle(t *testing.T) {
|
||||
s := New(
|
||||
WithRootPath("memstore"),
|
||||
WithLogger(test.NewLogger(t, true)),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
)
|
||||
t.Cleanup(func() { _ = s.Close() })
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
require.NoError(t, s.Open(false))
|
||||
require.NoError(t, s.Init())
|
||||
|
||||
|
|
|
@ -25,10 +25,6 @@ func (s storage) open(b *testing.B) common.Storage {
|
|||
require.NoError(b, st.Open(false))
|
||||
require.NoError(b, st.Init())
|
||||
|
||||
b.Cleanup(func() {
|
||||
dstepanov-yadro
commented
`Close` call moved to caller.
|
||||
require.NoError(b, st.Close())
|
||||
})
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
|
@ -108,6 +104,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
|
|||
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
|
||||
objGen := tt.objGen()
|
||||
st := stEntry.open(b)
|
||||
defer func() { require.NoError(b, st.Close()) }()
|
||||
|
||||
// Fill database
|
||||
var errG errgroup.Group
|
||||
|
@ -162,6 +159,7 @@ func BenchmarkSubstorageWritePerf(b *testing.B) {
|
|||
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) {
|
||||
gen := genEntry.create()
|
||||
st := stEntry.open(b)
|
||||
defer func() { require.NoError(b, st.Close()) }()
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
@ -200,6 +198,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
|
|||
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
|
||||
objGen := tt.objGen()
|
||||
st := stEntry.open(b)
|
||||
defer func() { require.NoError(b, st.Close()) }()
|
||||
|
||||
// Fill database
|
||||
for i := 0; i < tt.size; i++ {
|
||||
|
|
|
@ -49,7 +49,7 @@ func TestInitializationFailure(t *testing.T) {
|
|||
|
||||
return []shard.Option{
|
||||
shard.WithID(sid),
|
||||
shard.WithLogger(test.NewLogger(t, true)),
|
||||
shard.WithLogger(test.NewLogger(t)),
|
||||
shard.WithBlobStorOptions(
|
||||
blobstor.WithStorages(storages)),
|
||||
shard.WithMetaBaseOptions(
|
||||
|
@ -122,6 +122,9 @@ func testEngineFailInitAndReload(t *testing.T, errOnAdd bool, opts []shard.Optio
|
|||
var configID string
|
||||
|
||||
e := New()
|
||||
defer func() {
|
||||
require.NoError(t, e.Close(context.Background()))
|
||||
}()
|
||||
_, err := e.AddShard(context.Background(), opts...)
|
||||
if errOnAdd {
|
||||
require.Error(t, err)
|
||||
|
@ -258,6 +261,8 @@ func TestReload(t *testing.T) {
|
|||
|
||||
require.Equal(t, shardNum+1, len(e.shards))
|
||||
require.Equal(t, shardNum+1, len(e.shardPools))
|
||||
|
||||
require.NoError(t, e.Close(context.Background()))
|
||||
})
|
||||
|
||||
t.Run("remove shards", func(t *testing.T) {
|
||||
|
@ -276,6 +281,8 @@ func TestReload(t *testing.T) {
|
|||
// removed one
|
||||
require.Equal(t, shardNum-1, len(e.shards))
|
||||
require.Equal(t, shardNum-1, len(e.shardPools))
|
||||
|
||||
require.NoError(t, e.Close(context.Background()))
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -289,9 +296,9 @@ func engineWithShards(t *testing.T, path string, num int) (*StorageEngine, []str
|
|||
te := testNewEngine(t).
|
||||
setShardsNumOpts(t, num, func(id int) []shard.Option {
|
||||
return []shard.Option{
|
||||
shard.WithLogger(test.NewLogger(t, true)),
|
||||
shard.WithLogger(test.NewLogger(t)),
|
||||
shard.WithBlobStorOptions(
|
||||
blobstor.WithStorages(newStorages(filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))),
|
||||
blobstor.WithStorages(newStorages(t, filepath.Join(addPath, strconv.Itoa(id)), errSmallSize))),
|
||||
shard.WithMetaBaseOptions(
|
||||
meta.WithPath(filepath.Join(addPath, fmt.Sprintf("%d.metabase", id))),
|
||||
meta.WithPermissions(0o700),
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
|
@ -53,7 +54,7 @@ func TestDeleteBigObject(t *testing.T) {
|
|||
s3 := testNewShard(t, 3)
|
||||
|
||||
e := testNewEngine(t).setInitializedShards(t, s1, s2, s3).engine
|
||||
e.log = test.NewLogger(t, true)
|
||||
e.log = test.NewLogger(t)
|
||||
defer e.Close(context.Background())
|
||||
|
||||
for i := range children {
|
||||
|
@ -85,6 +86,97 @@ func TestDeleteBigObject(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDeleteBigObjectWithoutGC(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
parentID := oidtest.ID()
|
||||
splitID := objectSDK.NewSplitID()
|
||||
|
||||
parent := testutil.GenerateObjectWithCID(cnr)
|
||||
parent.SetID(parentID)
|
||||
parent.SetPayload(nil)
|
||||
|
||||
const childCount = 3
|
||||
children := make([]*objectSDK.Object, childCount)
|
||||
childIDs := make([]oid.ID, childCount)
|
||||
for i := range children {
|
||||
children[i] = testutil.GenerateObjectWithCID(cnr)
|
||||
if i != 0 {
|
||||
children[i].SetPreviousID(childIDs[i-1])
|
||||
}
|
||||
if i == len(children)-1 {
|
||||
children[i].SetParent(parent)
|
||||
}
|
||||
children[i].SetSplitID(splitID)
|
||||
children[i].SetPayload([]byte{byte(i), byte(i + 1), byte(i + 2)})
|
||||
childIDs[i], _ = children[i].ID()
|
||||
}
|
||||
|
||||
link := testutil.GenerateObjectWithCID(cnr)
|
||||
link.SetParent(parent)
|
||||
link.SetParentID(parentID)
|
||||
link.SetSplitID(splitID)
|
||||
link.SetChildren(childIDs...)
|
||||
|
||||
s1 := testNewShard(t, 1, shard.WithDisabledGC())
|
||||
|
||||
e := testNewEngine(t).setInitializedShards(t, s1).engine
|
||||
e.log = test.NewLogger(t)
|
||||
defer e.Close(context.Background())
|
||||
|
||||
for i := range children {
|
||||
require.NoError(t, Put(context.Background(), e, children[i]))
|
||||
}
|
||||
require.NoError(t, Put(context.Background(), e, link))
|
||||
|
||||
addrParent := object.AddressOf(parent)
|
||||
checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
|
||||
|
||||
addrLink := object.AddressOf(link)
|
||||
checkGetError[error](t, e, addrLink, false)
|
||||
|
||||
for i := range children {
|
||||
checkGetError[error](t, e, object.AddressOf(children[i]), false)
|
||||
}
|
||||
|
||||
// delete logical
|
||||
var deletePrm DeletePrm
|
||||
deletePrm.WithForceRemoval()
|
||||
deletePrm.WithAddress(addrParent)
|
||||
|
||||
_, err := e.Delete(context.Background(), deletePrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true)
|
||||
checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true)
|
||||
for i := range children {
|
||||
checkGetError[*apistatus.ObjectNotFound](t, e, object.AddressOf(children[i]), true)
|
||||
}
|
||||
|
||||
// delete physical
|
||||
var delPrm shard.DeletePrm
|
||||
delPrm.SetAddresses(addrParent)
|
||||
_, err = s1.Delete(context.Background(), delPrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
delPrm.SetAddresses(addrLink)
|
||||
_, err = s1.Delete(context.Background(), delPrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := range children {
|
||||
delPrm.SetAddresses(object.AddressOf(children[i]))
|
||||
_, err = s1.Delete(context.Background(), delPrm)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
checkGetError[*apistatus.ObjectNotFound](t, e, addrParent, true)
|
||||
checkGetError[*apistatus.ObjectNotFound](t, e, addrLink, true)
|
||||
for i := range children {
|
||||
checkGetError[*apistatus.ObjectNotFound](t, e, object.AddressOf(children[i]), true)
|
||||
}
|
||||
}
|
||||
|
||||
func checkGetError[E error](t *testing.T, e *StorageEngine, addr oid.Address, shouldFail bool) {
|
||||
var getPrm GetPrm
|
||||
getPrm.WithAddress(addr)
|
||||
|
|
|
@ -2,7 +2,6 @@ package engine
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
@ -49,10 +48,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
|
|||
}
|
||||
|
||||
e := testNewEngine(b).setInitializedShards(b, shards...).engine
|
||||
b.Cleanup(func() {
|
||||
_ = e.Close(context.Background())
|
||||
_ = os.RemoveAll(b.Name())
|
||||
})
|
||||
defer func() { require.NoError(b, e.Close(context.Background())) }()
|
||||
|
||||
addr := oidtest.Address()
|
||||
for i := 0; i < 100; i++ {
|
||||
|
@ -79,7 +75,7 @@ type testEngineWrapper struct {
|
|||
}
|
||||
|
||||
func testNewEngine(t testing.TB, opts ...Option) *testEngineWrapper {
|
||||
engine := New(WithLogger(test.NewLogger(t, true)))
|
||||
engine := New(WithLogger(test.NewLogger(t)))
|
||||
for _, opt := range opts {
|
||||
opt(engine.cfg)
|
||||
}
|
||||
|
@ -137,14 +133,15 @@ func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, s
|
|||
return te
|
||||
}
|
||||
|
||||
func newStorages(root string, smallSize uint64) []blobstor.SubStorage {
|
||||
func newStorages(t testing.TB, root string, smallSize uint64) []blobstor.SubStorage {
|
||||
return []blobstor.SubStorage{
|
||||
{
|
||||
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||
blobovniczatree.WithRootPath(filepath.Join(root, "blobovnicza")),
|
||||
blobovniczatree.WithBlobovniczaShallowDepth(1),
|
||||
blobovniczatree.WithBlobovniczaShallowWidth(1),
|
||||
blobovniczatree.WithPermissions(0o700)),
|
||||
blobovniczatree.WithPermissions(0o700),
|
||||
blobovniczatree.WithLogger(test.NewLogger(t))),
|
||||
Policy: func(_ *objectSDK.Object, data []byte) bool {
|
||||
return uint64(len(data)) < smallSize
|
||||
},
|
||||
|
@ -152,7 +149,8 @@ func newStorages(root string, smallSize uint64) []blobstor.SubStorage {
|
|||
{
|
||||
Storage: fstree.New(
|
||||
fstree.WithPath(root),
|
||||
fstree.WithDepth(1)),
|
||||
fstree.WithDepth(1),
|
||||
fstree.WithLogger(test.NewLogger(t))),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -183,12 +181,12 @@ func newTestStorages(root string, smallSize uint64) ([]blobstor.SubStorage, *tes
|
|||
}, smallFileStorage, largeFileStorage
|
||||
}
|
||||
|
||||
func testNewShard(t testing.TB, id int) *shard.Shard {
|
||||
func testNewShard(t testing.TB, id int, opts ...shard.Option) *shard.Shard {
|
||||
sid, err := generateShardID()
|
||||
require.NoError(t, err)
|
||||
|
||||
shardOpts := append([]shard.Option{shard.WithID(sid)}, testDefaultShardOptions(t, id)...)
|
||||
s := shard.New(shardOpts...)
|
||||
s := shard.New(append(shardOpts, opts...)...)
|
||||
|
||||
require.NoError(t, s.Open(context.Background()))
|
||||
require.NoError(t, s.Init(context.Background()))
|
||||
|
@ -198,15 +196,18 @@ func testNewShard(t testing.TB, id int) *shard.Shard {
|
|||
|
||||
func testDefaultShardOptions(t testing.TB, id int) []shard.Option {
|
||||
return []shard.Option{
|
||||
shard.WithLogger(test.NewLogger(t, true)),
|
||||
shard.WithLogger(test.NewLogger(t)),
|
||||
shard.WithBlobStorOptions(
|
||||
blobstor.WithStorages(
|
||||
newStorages(t.TempDir(), 1<<20))),
|
||||
newStorages(t, t.TempDir(), 1<<20)),
|
||||
blobstor.WithLogger(test.NewLogger(t)),
|
||||
),
|
||||
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
|
||||
shard.WithMetaBaseOptions(
|
||||
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
|
||||
meta.WithPermissions(0o700),
|
||||
meta.WithEpochState(epochState{}),
|
||||
meta.WithLogger(test.NewLogger(t)),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ func newEngineWithErrorThreshold(t testing.TB, dir string, errThreshold uint32)
|
|||
largeFileStorage: largeFileStorage,
|
||||
}
|
||||
return []shard.Option{
|
||||
shard.WithLogger(test.NewLogger(t, true)),
|
||||
shard.WithLogger(test.NewLogger(t)),
|
||||
shard.WithBlobStorOptions(blobstor.WithStorages(storages)),
|
||||
shard.WithMetaBaseOptions(
|
||||
meta.WithPath(filepath.Join(dir, fmt.Sprintf("%d.metabase", id))),
|
||||
|
@ -114,6 +114,7 @@ func TestErrorReporting(t *testing.T) {
|
|||
checkShardState(t, te.ng, te.shards[0].id, i, mode.ReadWrite)
|
||||
checkShardState(t, te.ng, te.shards[1].id, 0, mode.ReadWrite)
|
||||
}
|
||||
require.NoError(t, te.ng.Close(context.Background()))
|
||||
})
|
||||
t.Run("with error threshold", func(t *testing.T) {
|
||||
const errThreshold = 3
|
||||
|
@ -161,6 +162,7 @@ func TestErrorReporting(t *testing.T) {
|
|||
|
||||
require.NoError(t, te.ng.SetShardMode(te.shards[0].id, mode.ReadWrite, true))
|
||||
checkShardState(t, te.ng, te.shards[0].id, 0, mode.ReadWrite)
|
||||
require.NoError(t, te.ng.Close(context.Background()))
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
|
|||
te := testNewEngine(t).
|
||||
setShardsNumOpts(t, shardNum, func(id int) []shard.Option {
|
||||
return []shard.Option{
|
||||
shard.WithLogger(test.NewLogger(t, true)),
|
||||
shard.WithLogger(test.NewLogger(t)),
|
||||
shard.WithBlobStorOptions(
|
||||
blobstor.WithStorages([]blobstor.SubStorage{{
|
||||
Storage: fstree.New(
|
||||
|
|
|
@ -65,10 +65,10 @@ func TestListWithCursor(t *testing.T) {
|
|||
t.Parallel()
|
||||
e := testNewEngine(t).setShardsNumOpts(t, tt.shardNum, func(id int) []shard.Option {
|
||||
return []shard.Option{
|
||||
shard.WithLogger(test.NewLogger(t, true)),
|
||||
shard.WithLogger(test.NewLogger(t)),
|
||||
shard.WithBlobStorOptions(
|
||||
blobstor.WithStorages(
|
||||
newStorages(t.TempDir(), 1<<20))),
|
||||
newStorages(t, t.TempDir(), 1<<20))),
|
||||
shard.WithPiloramaOptions(pilorama.WithPath(filepath.Join(t.TempDir(), "pilorama"))),
|
||||
shard.WithMetaBaseOptions(
|
||||
meta.WithPath(filepath.Join(t.TempDir(), "metabase")),
|
||||
|
@ -80,9 +80,9 @@ func TestListWithCursor(t *testing.T) {
|
|||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
t.Cleanup(func() {
|
||||
e.Close(context.Background())
|
||||
})
|
||||
defer func() {
|
||||
dstepanov-yadro
commented
https://github.com/golang/go/issues/40908
fyrchik
commented
This is exactly why we got rid of This is exactly why we got rid of `zaptest` and used `zap.L()`, these problems occured all over the tests (and writing `t.Cleanup` in the constructor is much easier that to remember writing it everywhere else.
fyrchik
commented
https://git.frostfs.info/TrueCloudLab/frostfs-node/pulls/621
dstepanov-yadro
commented
There was only one place in the entire project that needed to be fixed. But clear logs make sense for all of tests. There was only one place in the entire project that needed to be fixed. But clear logs make sense for all of tests.
fyrchik
commented
Debatable: we do not need logs at all and when debugging tests usually a single test can be run. I don't like returning the behaviour which clearly has problems and which we have intentionally fixed at some point. Debatable: we do not need logs at all and when debugging tests usually a single test can be run.
Actually, I see lots of `Cleanup` with `Close` inside, they could trigger race detector later at some point.
I don't like returning the behaviour which clearly has problems and which we have intentionally fixed at some point.
dstepanov-yadro
commented
I disagree with the statement: I disagree with the statement: `we do not need logs at all`. For several tasks already, I needed normal logs of falling tests.
fyrchik
commented
Ok, but reverting a fix to a real problem is not the right approach here. Ok, but reverting a fix to a real problem is not the right approach here.
dstepanov-yadro
commented
Now there is no problem: race condition for Now there is no problem: race condition for `t.Cleanup` is actual only for engine after `Init()`.
Looks like it was an inappropriate fix.
dstepanov-yadro
commented
Also see this comment (testing.go: 1580):
As far as I understand Also see this comment (testing.go: 1580):
```
// Do not lock t.done to allow race detector to detect race in case
// the user does not appropriately synchronize a goroutine.
```
As far as I understand `Cleanup` requires all test background goroutines must be stopped. So using `Cleanup` for `engine.Close` is invalid usage.
fyrchik
commented
I am not sure the problems is gone now. The problem is that
If you do Or here, is it different from the I would rather see a discussion first. I am not sure the problems is gone now. The problem is that
1. In logger we read `done` field https://github.com/golang/go/blob/cc85462b3d23193e4861813ea85e254cfe372403/src/testing/testing.go#L1017
2. `done` is written to intentionally without a mutex in https://github.com/golang/go/blob/cc85462b3d23193e4861813ea85e254cfe372403/src/testing/testing.go#L1580
If you do `rg 'Cleanup\(' -A4` over the codebase, there are multiple calls to `releaseShard` in `Cleanup` (and to writecache etc.), because we currently use `Cleanup()` in tests. Are you _sure_ there are no goroutines in `Shard` which can log and run until `Close()` is called? In the writecache?
Or here, is it different from the `list_test.go` situation: https://git.frostfs.info/TrueCloudLab/frostfs-node/src/commit/cbc78a8efb72c40c7e39cccdc0aed4dc387fb053/pkg/local_object_storage/engine/shards_test.go#L16 ?
I would rather see a discussion first.
|
||||
require.NoError(t, e.Close(context.Background()))
|
||||
}()
|
||||
|
||||
expected := make([]object.AddressWithType, 0, tt.objectNum)
|
||||
got := make([]object.AddressWithType, 0, tt.objectNum)
|
||||
|
|
|
@ -62,9 +62,7 @@ func TestLockUserScenario(t *testing.T) {
|
|||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = e.Close(context.Background())
|
||||
})
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
lockerID := oidtest.ID()
|
||||
tombID := oidtest.ID()
|
||||
|
@ -169,9 +167,7 @@ func TestLockExpiration(t *testing.T) {
|
|||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = e.Close(context.Background())
|
||||
})
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
const lockerExpiresAfter = 13
|
||||
|
||||
|
@ -246,9 +242,7 @@ func TestLockForceRemoval(t *testing.T) {
|
|||
}).engine
|
||||
require.NoError(t, e.Open(context.Background()))
|
||||
require.NoError(t, e.Init(context.Background()))
|
||||
t.Cleanup(func() {
|
||||
_ = e.Close(context.Background())
|
||||
})
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
var err error
|
||||
|
|
|
@ -12,9 +12,7 @@ func TestRemoveShard(t *testing.T) {
|
|||
|
||||
te := testNewEngine(t).setShardsNum(t, numOfShards)
|
||||
e, ids := te.engine, te.shardIDs
|
||||
t.Cleanup(func() {
|
||||
e.Close(context.Background())
|
||||
})
|
||||
defer func() { require.NoError(t, e.Close(context.Background())) }()
|
||||
|
||||
require.Equal(t, numOfShards, len(e.shardPools))
|
||||
require.Equal(t, numOfShards, len(e.shards))
|
||||
|
|
|
@ -19,6 +19,7 @@ func TestDB_Containers(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
const N = 10
|
||||
|
||||
|
@ -96,6 +97,7 @@ func TestDB_ContainersCount(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
const R, T, SG, L = 10, 11, 12, 13 // amount of object per type
|
||||
|
||||
|
@ -141,6 +143,7 @@ func TestDB_ContainerSize(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
const (
|
||||
C = 3
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
|
||||
func TestReset(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
err := db.Reset()
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -22,6 +22,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("defaults", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
c, err := db.ObjectCounters()
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, c.Phy)
|
||||
|
@ -36,6 +37,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("put", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
oo := make([]*objectSDK.Object, 0, objCount)
|
||||
for i := 0; i < objCount; i++ {
|
||||
oo = append(oo, testutil.GenerateObject())
|
||||
|
@ -73,6 +75,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("delete", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
oo := putObjs(t, db, objCount, false)
|
||||
|
||||
exp := make(map[cid.ID]meta.ObjectCounters)
|
||||
|
@ -117,6 +120,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("inhume", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
oo := putObjs(t, db, objCount, false)
|
||||
|
||||
exp := make(map[cid.ID]meta.ObjectCounters)
|
||||
|
@ -176,6 +180,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("put_split", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
parObj := testutil.GenerateObject()
|
||||
|
||||
exp := make(map[cid.ID]meta.ObjectCounters)
|
||||
|
@ -213,6 +218,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("delete_split", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
oo := putObjs(t, db, objCount, true)
|
||||
|
||||
exp := make(map[cid.ID]meta.ObjectCounters)
|
||||
|
@ -254,6 +260,7 @@ func TestCounters(t *testing.T) {
|
|||
t.Run("inhume_split", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
oo := putObjs(t, db, objCount, true)
|
||||
|
||||
exp := make(map[cid.ID]meta.ObjectCounters)
|
||||
|
@ -321,6 +328,7 @@ func TestCounters_Expired(t *testing.T) {
|
|||
|
||||
es := &epochState{epoch}
|
||||
db := newDB(t, meta.WithEpochState(es))
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
oo := make([]oid.Address, objCount)
|
||||
for i := range oo {
|
||||
|
|
|
@ -2,7 +2,6 @@ package meta_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
@ -53,11 +52,6 @@ func newDB(t testing.TB, opts ...meta.Option) *meta.DB {
|
|||
require.NoError(t, bdb.Open(context.Background(), false))
|
||||
require.NoError(t, bdb.Init())
|
||||
|
||||
t.Cleanup(func() {
|
||||
bdb.Close()
|
||||
os.Remove(bdb.DumpInfo().Path)
|
||||
})
|
||||
|
||||
return bdb
|
||||
}
|
||||
|
||||
|
|
|
@ -251,26 +251,27 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
|
|||
|
||||
removeAvailableObject := inGraveyardWithKey(addrKey, graveyardBKT, garbageBKT) == 0
|
||||
|
||||
// remove record from the garbage bucket
|
||||
if garbageBKT != nil {
|
||||
err := garbageBKT.Delete(addrKey)
|
||||
if err != nil {
|
||||
return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// unmarshal object, work only with physically stored (raw == true) objects
|
||||
obj, err := db.get(tx, addr, key, false, true, currEpoch)
|
||||
if err != nil {
|
||||
var siErr *objectSDK.SplitInfoError
|
||||
|
||||
if client.IsErrObjectNotFound(err) || errors.As(err, &siErr) {
|
||||
// if object is virtual (parent) then do nothing, it will be deleted with last child
|
||||
return deleteSingleResult{}, nil
|
||||
}
|
||||
|
||||
return deleteSingleResult{}, err
|
||||
}
|
||||
|
||||
// remove record from the garbage bucket
|
||||
if garbageBKT != nil {
|
||||
err := garbageBKT.Delete(addrKey)
|
||||
if err != nil {
|
||||
return deleteSingleResult{}, fmt.Errorf("could not remove from garbage bucket: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// if object is an only link to a parent, then remove parent
|
||||
if parent := obj.Parent(); parent != nil {
|
||||
parAddr := object.AddressOf(parent)
|
||||
|
@ -327,6 +328,19 @@ func (db *DB) deleteObject(
|
|||
return fmt.Errorf("can't remove fake bucket tree indexes: %w", err)
|
||||
}
|
||||
|
||||
if isParent {
|
||||
// remove record from the garbage bucket, because regular object deletion does nothing for virtual object
|
||||
garbageBKT := tx.Bucket(garbageBucketName)
|
||||
if garbageBKT != nil {
|
||||
key := make([]byte, addressKeySize)
|
||||
addrKey := addressKey(object.AddressOf(obj), key)
|
||||
acid-ant marked this conversation as resolved
Outdated
acid-ant
commented
Why not to check it earlier? Why not to check it earlier?
dstepanov-yadro
commented
Fixed Fixed
|
||||
err := garbageBKT.Delete(addrKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not remove from garbage bucket: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
|
||||
func TestDB_Delete(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
parent := testutil.GenerateObjectWithCID(cnr)
|
||||
|
@ -78,6 +79,7 @@ func TestDB_Delete(t *testing.T) {
|
|||
|
||||
func TestDeleteAllChildren(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -115,6 +117,7 @@ func TestDeleteAllChildren(t *testing.T) {
|
|||
|
||||
func TestGraveOnlyDelete(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
addr := oidtest.Address()
|
||||
|
||||
|
@ -127,6 +130,7 @@ func TestGraveOnlyDelete(t *testing.T) {
|
|||
|
||||
func TestExpiredObject(t *testing.T) {
|
||||
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
|
||||
// removing expired object should be error-free
|
||||
|
|
|
@ -18,6 +18,7 @@ const currEpoch = 1000
|
|||
|
||||
func TestDB_Exists(t *testing.T) {
|
||||
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
t.Run("no object", func(t *testing.T) {
|
||||
nonExist := testutil.GenerateObject()
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
func TestDB_SelectExpired(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
containerID1 := cidtest.ID()
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
func TestDB_Get(t *testing.T) {
|
||||
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
raw := testutil.GenerateObject()
|
||||
|
||||
|
@ -180,6 +181,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
|
|||
meta.WithMaxBatchSize(batchSize),
|
||||
meta.WithMaxBatchDelay(10*time.Millisecond),
|
||||
)
|
||||
defer func() { require.NoError(b, db.Close()) }()
|
||||
addrs := make([]oid.Address, 0, numOfObj)
|
||||
|
||||
for i := 0; i < numOfObj; i++ {
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
|
||||
func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
var counter int
|
||||
var iterGravePRM meta.GraveyardIterationPrm
|
||||
|
@ -40,6 +41,7 @@ func TestDB_IterateDeletedObjects_EmptyDB(t *testing.T) {
|
|||
|
||||
func TestDB_Iterate_OffsetNotFound(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
obj1 := testutil.GenerateObject()
|
||||
obj2 := testutil.GenerateObject()
|
||||
|
@ -110,6 +112,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) {
|
|||
|
||||
func TestDB_IterateDeletedObjects(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
// generate and put 4 objects
|
||||
obj1 := testutil.GenerateObject()
|
||||
|
@ -196,6 +199,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
|
|||
|
||||
func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
// generate and put 4 objects
|
||||
obj1 := testutil.GenerateObject()
|
||||
|
@ -294,6 +298,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
|
|||
|
||||
func TestDB_IterateOverGarbage_Offset(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
// generate and put 4 objects
|
||||
obj1 := testutil.GenerateObject()
|
||||
|
@ -385,6 +390,7 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) {
|
|||
|
||||
func TestDB_DropGraves(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
// generate and put 2 objects
|
||||
obj1 := testutil.GenerateObject()
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
|
||||
func TestDB_Inhume(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
raw := testutil.GenerateObject()
|
||||
testutil.AddAttribute(raw, "foo", "bar")
|
||||
|
@ -37,6 +38,7 @@ func TestDB_Inhume(t *testing.T) {
|
|||
|
||||
func TestInhumeTombOnTomb(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
var (
|
||||
err error
|
||||
|
@ -99,6 +101,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
|
|||
|
||||
func TestInhumeLocked(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
locked := oidtest.Address()
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
|
||||
func TestDB_IterateExpired(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
const epoch = 13
|
||||
|
||||
|
@ -68,6 +69,7 @@ func putWithExpiration(t *testing.T, db *meta.DB, typ objectSDK.Type, expiresAt
|
|||
|
||||
func TestDB_IterateCoveredByTombstones(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
ts := oidtest.Address()
|
||||
protected1 := oidtest.Address()
|
||||
|
|
|
@ -33,6 +33,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
|
|||
db := newDB(b, meta.WithMaxBatchSize(1), meta.WithBoltDBOptions(&bbolt.Options{
|
||||
NoSync: true,
|
||||
})) // faster single-thread generation
|
||||
defer func() { require.NoError(b, db.Close()) }()
|
||||
|
||||
obj := testutil.GenerateObject()
|
||||
for i := 0; i < 100_000; i++ { // should be a multiple of all batch sizes
|
||||
|
@ -70,6 +71,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
const (
|
||||
containers = 5
|
||||
|
@ -165,6 +167,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
const total = 5
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ func TestDB_Lock(t *testing.T) {
|
|||
|
||||
cnr := cidtest.ID()
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
t.Run("empty locked list", func(t *testing.T) {
|
||||
require.Panics(t, func() { _ = db.Lock(context.Background(), cnr, oid.ID{}, nil) })
|
||||
|
@ -182,6 +183,7 @@ func TestDB_Lock_Expired(t *testing.T) {
|
|||
es := &epochState{e: 123}
|
||||
|
||||
db := newDB(t, meta.WithEpochState(es))
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
// put an object
|
||||
addr := putWithExpiration(t, db, objectSDK.TypeRegular, 124)
|
||||
|
@ -203,6 +205,7 @@ func TestDB_IsLocked(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
// existing and locked objs
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
func TestDB_Movable(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
raw1 := testutil.GenerateObject()
|
||||
raw2 := testutil.GenerateObject()
|
||||
|
|
|
@ -46,6 +46,7 @@ func BenchmarkPut(b *testing.B) {
|
|||
db := newDB(b,
|
||||
meta.WithMaxBatchDelay(time.Millisecond*10),
|
||||
meta.WithMaxBatchSize(runtime.NumCPU()))
|
||||
defer func() { require.NoError(b, db.Close()) }()
|
||||
// Ensure the benchmark is bound by CPU and not waiting batch-delay time.
|
||||
b.SetParallelism(1)
|
||||
|
||||
|
@ -67,6 +68,7 @@ func BenchmarkPut(b *testing.B) {
|
|||
db := newDB(b,
|
||||
meta.WithMaxBatchDelay(time.Millisecond*10),
|
||||
meta.WithMaxBatchSize(1))
|
||||
defer func() { require.NoError(b, db.Close()) }()
|
||||
var index atomic.Int64
|
||||
index.Store(-1)
|
||||
objs := prepareObjects(b, b.N)
|
||||
|
@ -82,6 +84,7 @@ func BenchmarkPut(b *testing.B) {
|
|||
|
||||
func TestDB_PutBlobovniczaUpdate(t *testing.T) {
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
raw1 := testutil.GenerateObject()
|
||||
storageID := []byte{1, 2, 3, 4}
|
||||
|
|
|
@ -23,6 +23,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -147,6 +148,7 @@ func TestDB_SelectRootPhyParent(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -300,6 +302,7 @@ func TestDB_SelectInhume(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -334,6 +337,7 @@ func TestDB_SelectPayloadHash(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -404,6 +408,7 @@ func TestDB_SelectWithSlowFilters(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -511,6 +516,7 @@ func TestDB_SelectObjectID(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -626,6 +632,7 @@ func TestDB_SelectSplitID(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -682,6 +689,7 @@ func TestDB_SelectContainerID(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -729,6 +737,8 @@ func TestDB_SelectContainerID(t *testing.T) {
|
|||
func BenchmarkSelect(b *testing.B) {
|
||||
const objCount = 1000
|
||||
db := newDB(b)
|
||||
defer func() { require.NoError(b, db.Close()) }()
|
||||
|
||||
cid := cidtest.ID()
|
||||
|
||||
for i := 0; i < objCount; i++ {
|
||||
|
@ -769,6 +779,7 @@ func TestExpiredObjects(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t, meta.WithEpochState(epochState{currEpoch}))
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
checkExpiredObjects(t, db, func(exp, nonExp *objectSDK.Object) {
|
||||
cidExp, _ := exp.ContainerID()
|
||||
|
|
|
@ -15,6 +15,7 @@ func TestDB_StorageID(t *testing.T) {
|
|||
t.Parallel()
|
||||
|
||||
db := newDB(t)
|
||||
defer func() { require.NoError(t, db.Close()) }()
|
||||
|
||||
raw1 := testutil.GenerateObject()
|
||||
raw2 := testutil.GenerateObject()
|
||||
|
|
|
@ -28,8 +28,9 @@ func BenchmarkCreate(b *testing.B) {
|
|||
WithMaxBatchSize(runtime.GOMAXPROCS(0)))
|
||||
require.NoError(b, f.Open(context.Background(), false))
|
||||
require.NoError(b, f.Init())
|
||||
defer func() { require.NoError(b, f.Close()) }()
|
||||
|
||||
b.Cleanup(func() {
|
||||
require.NoError(b, f.Close())
|
||||
require.NoError(b, os.RemoveAll(tmpDir))
|
||||
})
|
||||
|
||||
|
|
|
@ -20,19 +20,15 @@ import (
|
|||
|
||||
var providers = []struct {
|
||||
name string
|
||||
construct func(t testing.TB, opts ...Option) Forest
|
||||
construct func(t testing.TB, opts ...Option) ForestStorage
|
||||
}{
|
||||
{"inmemory", func(t testing.TB, _ ...Option) Forest {
|
||||
{"inmemory", func(t testing.TB, _ ...Option) ForestStorage {
|
||||
f := NewMemoryForest()
|
||||
require.NoError(t, f.Open(context.Background(), false))
|
||||
require.NoError(t, f.Init())
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, f.Close())
|
||||
})
|
||||
|
||||
return f
|
||||
}},
|
||||
{"bbolt", func(t testing.TB, opts ...Option) Forest {
|
||||
{"bbolt", func(t testing.TB, opts ...Option) ForestStorage {
|
||||
f := NewBoltForest(
|
||||
append([]Option{
|
||||
WithPath(filepath.Join(t.TempDir(), "test.db")),
|
||||
|
@ -40,9 +36,6 @@ var providers = []struct {
|
|||
}, opts...)...)
|
||||
require.NoError(t, f.Open(context.Background(), false))
|
||||
require.NoError(t, f.Init())
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, f.Close())
|
||||
})
|
||||
return f
|
||||
}},
|
||||
}
|
||||
|
@ -62,7 +55,9 @@ func TestForest_TreeMove(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeMove(t *testing.T, s Forest) {
|
||||
func testForestTreeMove(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
cid := cidtest.ID()
|
||||
d := CIDDescriptor{cid, 0, 1}
|
||||
treeID := "version"
|
||||
|
@ -124,7 +119,9 @@ func TestMemoryForest_TreeGetChildren(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeGetChildren(t *testing.T, s Forest) {
|
||||
func testForestTreeGetChildren(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
cid := cidtest.ID()
|
||||
d := CIDDescriptor{cid, 0, 1}
|
||||
treeID := "version"
|
||||
|
@ -188,7 +185,9 @@ func TestForest_TreeDrop(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeDrop(t *testing.T, s Forest) {
|
||||
func testForestTreeDrop(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
const cidsSize = 3
|
||||
var cids [cidsSize]cidSDK.ID
|
||||
|
||||
|
@ -256,7 +255,9 @@ func TestForest_TreeAdd(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeAdd(t *testing.T, s Forest) {
|
||||
func testForestTreeAdd(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
cid := cidtest.ID()
|
||||
d := CIDDescriptor{cid, 0, 1}
|
||||
treeID := "version"
|
||||
|
@ -302,7 +303,9 @@ func TestForest_TreeAddByPath(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeAddByPath(t *testing.T, s Forest) {
|
||||
func testForestTreeAddByPath(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
cid := cidtest.ID()
|
||||
d := CIDDescriptor{cid, 0, 1}
|
||||
treeID := "version"
|
||||
|
@ -425,7 +428,7 @@ func TestForest_Apply(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) {
|
||||
func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
|
||||
cid := cidtest.ID()
|
||||
treeID := "version"
|
||||
|
||||
|
@ -439,6 +442,8 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
|
|||
|
||||
t.Run("add a child, then insert a parent removal", func(t *testing.T) {
|
||||
s := constructor(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
testApply(t, s, 10, 0, Meta{Time: 1, Items: []KeyValue{{"grand", []byte{1}}}})
|
||||
|
||||
meta := Meta{Time: 3, Items: []KeyValue{{"child", []byte{3}}}}
|
||||
|
@ -450,6 +455,7 @@ func testForestTreeApply(t *testing.T, constructor func(t testing.TB, _ ...Optio
|
|||
})
|
||||
t.Run("add a child to non-existent parent, then add a parent", func(t *testing.T) {
|
||||
s := constructor(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
meta := Meta{Time: 1, Items: []KeyValue{{"child", []byte{3}}}}
|
||||
testApply(t, s, 11, 10, meta)
|
||||
|
@ -469,7 +475,7 @@ func TestForest_ApplySameOperation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest, parallel bool) {
|
||||
func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, parallel bool) {
|
||||
cid := cidtest.ID()
|
||||
treeID := "version"
|
||||
|
||||
|
@ -519,6 +525,8 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
|
|||
|
||||
t.Run("expected", func(t *testing.T) {
|
||||
s := constructor(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
for i := range logs {
|
||||
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[i], false))
|
||||
}
|
||||
|
@ -526,6 +534,8 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
|
|||
})
|
||||
|
||||
s := constructor(t, WithMaxBatchSize(batchSize))
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
|
||||
for i := 0; i < batchSize; i++ {
|
||||
errG.Go(func() error {
|
||||
|
@ -545,7 +555,7 @@ func TestForest_GetOpLog(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) {
|
||||
func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
|
||||
cid := cidtest.ID()
|
||||
treeID := "version"
|
||||
logs := []Move{
|
||||
|
@ -565,6 +575,7 @@ func testForestTreeGetOpLog(t *testing.T, constructor func(t testing.TB, _ ...Op
|
|||
}
|
||||
|
||||
s := constructor(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
t.Run("empty log, no panic", func(t *testing.T) {
|
||||
_, err := s.TreeGetOpLog(context.Background(), cid, treeID, 0)
|
||||
|
@ -603,8 +614,9 @@ func TestForest_TreeExists(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) Forest) {
|
||||
func testForestTreeExists(t *testing.T, constructor func(t testing.TB, opts ...Option) ForestStorage) {
|
||||
s := constructor(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
checkExists := func(t *testing.T, expected bool, cid cidSDK.ID, treeID string) {
|
||||
actual, err := s.TreeExists(context.Background(), cid, treeID)
|
||||
|
@ -663,6 +675,8 @@ func TestApplyTricky1(t *testing.T) {
|
|||
for i := range providers {
|
||||
t.Run(providers[i].name, func(t *testing.T) {
|
||||
s := providers[i].construct(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
for i := range ops {
|
||||
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
|
||||
}
|
||||
|
@ -724,6 +738,8 @@ func TestApplyTricky2(t *testing.T) {
|
|||
for i := range providers {
|
||||
t.Run(providers[i].name, func(t *testing.T) {
|
||||
s := providers[i].construct(t)
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
for i := range ops {
|
||||
require.NoError(t, s.TreeApply(context.Background(), cid, treeID, &ops[i], false))
|
||||
}
|
||||
|
@ -821,7 +837,7 @@ func compareForests(t *testing.T, expected, actual Forest, cid cidSDK.ID, treeID
|
|||
}
|
||||
}
|
||||
|
||||
func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest, batchSize, opCount, iterCount int) {
|
||||
func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage, batchSize, opCount, iterCount int) {
|
||||
r := mrand.New(mrand.NewSource(42))
|
||||
|
||||
const nodeCount = 5
|
||||
|
@ -832,6 +848,8 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
|
|||
treeID := "version"
|
||||
|
||||
expected := constructor(t, WithNoSync(true))
|
||||
defer func() { require.NoError(t, expected.Close()) }()
|
||||
|
||||
for i := range ops {
|
||||
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
|
||||
}
|
||||
|
@ -860,10 +878,11 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
|
|||
wg.Wait()
|
||||
|
||||
compareForests(t, expected, actual, cid, treeID, nodeCount)
|
||||
require.NoError(t, actual.Close())
|
||||
}
|
||||
}
|
||||
|
||||
func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ...Option) Forest) {
|
||||
func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ...Option) ForestStorage) {
|
||||
r := mrand.New(mrand.NewSource(42))
|
||||
|
||||
const (
|
||||
|
@ -877,6 +896,8 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
|
|||
treeID := "version"
|
||||
|
||||
expected := constructor(t, WithNoSync(true))
|
||||
defer func() { require.NoError(t, expected.Close()) }()
|
||||
|
||||
for i := range ops {
|
||||
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
|
||||
}
|
||||
|
@ -891,6 +912,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
|
|||
require.NoError(t, actual.TreeApply(context.Background(), cid, treeID, &ops[i], false))
|
||||
}
|
||||
compareForests(t, expected, actual, cid, treeID, nodeCount)
|
||||
require.NoError(t, actual.Close())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -908,6 +930,8 @@ func BenchmarkApplySequential(b *testing.B) {
|
|||
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
|
||||
r := mrand.New(mrand.NewSource(time.Now().Unix()))
|
||||
s := providers[i].construct(b, WithMaxBatchSize(bs))
|
||||
defer func() { require.NoError(b, s.Close()) }()
|
||||
|
||||
benchmarkApply(b, s, func(opCount int) []Move {
|
||||
ops := make([]Move, opCount)
|
||||
for i := range ops {
|
||||
|
@ -942,6 +966,8 @@ func BenchmarkApplyReorderLast(b *testing.B) {
|
|||
b.Run("batchsize="+strconv.Itoa(bs), func(b *testing.B) {
|
||||
r := mrand.New(mrand.NewSource(time.Now().Unix()))
|
||||
s := providers[i].construct(b, WithMaxBatchSize(bs))
|
||||
defer func() { require.NoError(b, s.Close()) }()
|
||||
|
||||
benchmarkApply(b, s, func(opCount int) []Move {
|
||||
ops := make([]Move, opCount)
|
||||
for i := range ops {
|
||||
|
@ -996,7 +1022,8 @@ func TestTreeGetByPath(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testTreeGetByPath(t *testing.T, s Forest) {
|
||||
func testTreeGetByPath(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
cid := cidtest.ID()
|
||||
treeID := "version"
|
||||
|
||||
|
@ -1074,7 +1101,9 @@ func TestGetTrees(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testTreeGetTrees(t *testing.T, s Forest) {
|
||||
func testTreeGetTrees(t *testing.T, s ForestStorage) {
|
||||
defer func() { require.NoError(t, s.Close()) }()
|
||||
|
||||
cids := []cidSDK.ID{cidtest.ID(), cidtest.ID()}
|
||||
d := CIDDescriptor{Position: 0, Size: 1}
|
||||
|
||||
|
@ -1118,7 +1147,9 @@ func TestTreeLastSyncHeight(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testTreeLastSyncHeight(t *testing.T, f Forest) {
|
||||
func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
|
||||
defer func() { require.NoError(t, f.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
treeID := "someTree"
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ func TestShardOpen(t *testing.T) {
|
|||
newShard := func() *Shard {
|
||||
return New(
|
||||
WithID(NewIDFromBytes([]byte{})),
|
||||
WithLogger(test.NewLogger(t, true)),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
WithBlobStorOptions(
|
||||
blobstor.WithStorages([]blobstor.SubStorage{
|
||||
{Storage: st},
|
||||
|
|
|
@ -38,6 +38,7 @@ func TestShard_Delete_BigObject(t *testing.T) {
|
|||
|
||||
func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
|
||||
sh := newShard(t, hasWriteCache)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
|
|
@ -30,13 +30,13 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
|
|||
rootPath := t.TempDir()
|
||||
|
||||
var sh *Shard
|
||||
l := test.NewLogger(t, true)
|
||||
l := test.NewLogger(t)
|
||||
blobOpts := []blobstor.Option{
|
||||
blobstor.WithLogger(test.NewLogger(t, true)),
|
||||
blobstor.WithLogger(test.NewLogger(t)),
|
||||
blobstor.WithStorages([]blobstor.SubStorage{
|
||||
{
|
||||
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||
blobovniczatree.WithLogger(test.NewLogger(t, true)),
|
||||
blobovniczatree.WithLogger(test.NewLogger(t)),
|
||||
blobovniczatree.WithRootPath(filepath.Join(rootPath, "blob", "blobovnicza")),
|
||||
blobovniczatree.WithBlobovniczaShallowDepth(1),
|
||||
blobovniczatree.WithBlobovniczaShallowWidth(1)),
|
||||
|
@ -78,10 +78,7 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
|
|||
sh.gcCfg.testHookRemover = func(context.Context) gcRunResult { return gcRunResult{} }
|
||||
require.NoError(t, sh.Open(context.Background()))
|
||||
require.NoError(t, sh.Init(context.Background()))
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, sh.Close())
|
||||
})
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
obj := testutil.GenerateObjectWithCID(cnr)
|
||||
|
|
|
@ -31,6 +31,7 @@ func Test_GCDropsLockedExpiredSimpleObject(t *testing.T) {
|
|||
return util.NewPseudoWorkerPool() // synchronous event processing
|
||||
})},
|
||||
})
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
@ -127,6 +128,7 @@ func Test_GCDropsLockedExpiredComplexObject(t *testing.T) {
|
|||
return util.NewPseudoWorkerPool() // synchronous event processing
|
||||
})},
|
||||
})
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
lock := testutil.GenerateObjectWithCID(cnr)
|
||||
lock.SetType(objectSDK.TypeLock)
|
||||
|
|
|
@ -32,6 +32,7 @@ func TestShard_Get(t *testing.T) {
|
|||
|
||||
func testShardGet(t *testing.T, hasWriteCache bool) {
|
||||
sh := newShard(t, hasWriteCache)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
var putPrm PutPrm
|
||||
var getPrm GetPrm
|
||||
|
|
|
@ -30,6 +30,7 @@ func TestShard_Head(t *testing.T) {
|
|||
|
||||
func testShardHead(t *testing.T, hasWriteCache bool) {
|
||||
sh := newShard(t, hasWriteCache)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
var putPrm PutPrm
|
||||
var headPrm HeadPrm
|
||||
|
|
|
@ -27,6 +27,7 @@ func TestShard_Inhume(t *testing.T) {
|
|||
|
||||
func testShardInhume(t *testing.T, hasWriteCache bool) {
|
||||
sh := newShard(t, hasWriteCache)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
|
||||
|
|
|
@ -18,12 +18,14 @@ func TestShard_List(t *testing.T) {
|
|||
t.Run("without write cache", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
sh := newShard(t, false)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
testShardList(t, sh)
|
||||
})
|
||||
|
||||
t.Run("with write cache", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
shWC := newShard(t, true)
|
||||
defer func() { require.NoError(t, shWC.Close()) }()
|
||||
testShardList(t, shWC)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -61,9 +61,7 @@ func TestShard_Lock(t *testing.T) {
|
|||
require.NoError(t, sh.Open(context.Background()))
|
||||
require.NoError(t, sh.Init(context.Background()))
|
||||
|
||||
t.Cleanup(func() {
|
||||
releaseShard(sh, t)
|
||||
})
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
obj := testutil.GenerateObjectWithCID(cnr)
|
||||
|
@ -149,6 +147,7 @@ func TestShard_Lock(t *testing.T) {
|
|||
|
||||
func TestShard_IsLocked(t *testing.T) {
|
||||
sh := newShard(t, false)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
cnr := cidtest.ID()
|
||||
obj := testutil.GenerateObjectWithCID(cnr)
|
||||
|
|
|
@ -160,6 +160,7 @@ func TestCounters(t *testing.T) {
|
|||
|
||||
dir := t.TempDir()
|
||||
sh, mm := shardWithMetrics(t, dir)
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
sh.SetMode(mode.ReadOnly)
|
||||
require.Equal(t, mode.ReadOnly, mm.mode)
|
||||
|
@ -382,10 +383,6 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
|
|||
require.NoError(t, sh.Open(context.Background()))
|
||||
require.NoError(t, sh.Init(context.Background()))
|
||||
|
||||
t.Cleanup(func() {
|
||||
sh.Close()
|
||||
})
|
||||
|
||||
return sh, mm
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
|
|||
blobstor.WithStorages([]blobstor.SubStorage{
|
||||
{
|
||||
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||
blobovniczatree.WithLogger(test.NewLogger(t, true)),
|
||||
blobovniczatree.WithLogger(test.NewLogger(t)),
|
||||
blobovniczatree.WithRootPath(filepath.Join(t.TempDir(), "blob", "blobovnicza")),
|
||||
blobovniczatree.WithBlobovniczaShallowDepth(1),
|
||||
blobovniczatree.WithBlobovniczaShallowWidth(1)),
|
||||
|
@ -93,6 +93,7 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) {
|
|||
}),
|
||||
},
|
||||
})
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
|
|
@ -27,7 +27,7 @@ func TestShardReload(t *testing.T) {
|
|||
|
||||
p := t.Name()
|
||||
defer os.RemoveAll(p)
|
||||
l := test.NewLogger(t, true)
|
||||
l := test.NewLogger(t)
|
||||
blobOpts := []blobstor.Option{
|
||||
blobstor.WithLogger(l),
|
||||
blobstor.WithStorages([]blobstor.SubStorage{
|
||||
|
@ -57,6 +57,10 @@ func TestShardReload(t *testing.T) {
|
|||
require.NoError(t, sh.Open(context.Background()))
|
||||
require.NoError(t, sh.Init(context.Background()))
|
||||
|
||||
defer func() {
|
||||
require.NoError(t, sh.Close())
|
||||
}()
|
||||
|
||||
objects := make([]objAddr, 5)
|
||||
for i := range objects {
|
||||
objects[i].obj = newObject()
|
||||
|
|
|
@ -371,6 +371,14 @@ func WithRebuildWorkerLimiter(l RebuildWorkerLimiter) Option {
|
|||
}
|
||||
}
|
||||
|
||||
// WithDisabledGC disables GC.
|
||||
// For testing purposes only.
|
||||
func WithDisabledGC() Option {
|
||||
return func(c *cfg) {
|
||||
c.gcCfg.testHookRemover = func(ctx context.Context) gcRunResult { return gcRunResult{} }
|
||||
}
|
||||
}
|
||||
|
||||
// WithZeroSizeCallback returns option to set zero-size containers callback.
|
||||
func WithZeroSizeCallback(cb EmptyContainersCallback) Option {
|
||||
return func(c *cfg) {
|
||||
|
|
|
@ -30,7 +30,6 @@ func (s epochState) CurrentEpoch() uint64 {
|
|||
|
||||
type shardOptions struct {
|
||||
rootPath string
|
||||
dontRelease bool
|
||||
wcOpts []writecache.Option
|
||||
bsOpts []blobstor.Option
|
||||
metaOptions []meta.Option
|
||||
|
@ -56,11 +55,11 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
|
|||
|
||||
if o.bsOpts == nil {
|
||||
o.bsOpts = []blobstor.Option{
|
||||
blobstor.WithLogger(test.NewLogger(t, true)),
|
||||
blobstor.WithLogger(test.NewLogger(t)),
|
||||
blobstor.WithStorages([]blobstor.SubStorage{
|
||||
{
|
||||
Storage: blobovniczatree.NewBlobovniczaTree(
|
||||
blobovniczatree.WithLogger(test.NewLogger(t, true)),
|
||||
blobovniczatree.WithLogger(test.NewLogger(t)),
|
||||
blobovniczatree.WithRootPath(filepath.Join(o.rootPath, "blob", "blobovnicza")),
|
||||
blobovniczatree.WithBlobovniczaShallowDepth(1),
|
||||
blobovniczatree.WithBlobovniczaShallowWidth(1)),
|
||||
|
@ -78,7 +77,7 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
|
|||
|
||||
opts := []Option{
|
||||
WithID(NewIDFromBytes([]byte{})),
|
||||
WithLogger(test.NewLogger(t, true)),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
WithBlobStorOptions(o.bsOpts...),
|
||||
WithMetaBaseOptions(
|
||||
append([]meta.Option{
|
||||
|
@ -109,13 +108,5 @@ func newCustomShard(t testing.TB, enableWriteCache bool, o shardOptions) *Shard
|
|||
require.NoError(t, sh.Open(context.Background()))
|
||||
require.NoError(t, sh.Init(context.Background()))
|
||||
|
||||
if !o.dontRelease {
|
||||
t.Cleanup(func() { releaseShard(sh, t) })
|
||||
}
|
||||
|
||||
return sh
|
||||
}
|
||||
|
||||
func releaseShard(s *Shard, t testing.TB) {
|
||||
require.NoError(t, s.Close())
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ func TestWriteCacheObjectLoss(t *testing.T) {
|
|||
writecache.WithMaxObjectSize(smallSize * 2),
|
||||
}
|
||||
|
||||
sh := newCustomShard(t, true, shardOptions{dontRelease: true, rootPath: dir, wcOpts: wcOpts})
|
||||
sh := newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
|
||||
|
||||
var errG errgroup.Group
|
||||
for i := range objects {
|
||||
|
@ -55,6 +55,7 @@ func TestWriteCacheObjectLoss(t *testing.T) {
|
|||
require.NoError(t, sh.Close())
|
||||
|
||||
sh = newCustomShard(t, true, shardOptions{rootPath: dir, wcOpts: wcOpts})
|
||||
defer func() { require.NoError(t, sh.Close()) }()
|
||||
|
||||
var getPrm GetPrm
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ func BenchmarkWritecachePar(b *testing.B) {
|
|||
|
||||
func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
|
||||
benchmarkPutPrepare(b, cache)
|
||||
defer func() { require.NoError(b, cache.Close()) }()
|
||||
|
||||
ctx := context.Background()
|
||||
objGen := testutil.RandObjGenerator{ObjSize: size}
|
||||
|
@ -50,6 +51,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
|
|||
|
||||
func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
|
||||
benchmarkPutPrepare(b, cache)
|
||||
defer func() { require.NoError(b, cache.Close()) }()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -75,9 +77,6 @@ func benchmarkPutPar(b *testing.B, cache writecache.Cache, size uint64) {
|
|||
func benchmarkPutPrepare(b *testing.B, cache writecache.Cache) {
|
||||
require.NoError(b, cache.Open(context.Background(), false), "opening")
|
||||
require.NoError(b, cache.Init(), "initializing")
|
||||
b.Cleanup(func() {
|
||||
require.NoError(b, cache.Close(), "closing")
|
||||
})
|
||||
}
|
||||
|
||||
type testMetabase struct{}
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
)
|
||||
|
||||
func TestFlush(t *testing.T) {
|
||||
testlogger := test.NewLogger(t, true)
|
||||
testlogger := test.NewLogger(t)
|
||||
|
||||
createCacheFn := func(t *testing.T, smallSize uint64, mb *meta.DB, bs MainStorage, opts ...Option) Cache {
|
||||
return New(
|
||||
|
@ -142,6 +142,7 @@ func runFlushTest[Option any](
|
|||
) {
|
||||
t.Run("no errors", func(t *testing.T) {
|
||||
wc, bs, mb := newCache(t, createCacheFn, smallSize)
|
||||
defer func() { require.NoError(t, wc.Close()) }()
|
||||
objects := putObjects(t, wc)
|
||||
|
||||
require.NoError(t, bs.SetMode(mode.ReadWrite))
|
||||
|
@ -154,6 +155,7 @@ func runFlushTest[Option any](
|
|||
|
||||
t.Run("flush on moving to degraded mode", func(t *testing.T) {
|
||||
wc, bs, mb := newCache(t, createCacheFn, smallSize)
|
||||
defer func() { require.NoError(t, wc.Close()) }()
|
||||
objects := putObjects(t, wc)
|
||||
|
||||
// Blobstor is read-only, so we expect en error from `flush` here.
|
||||
|
@ -172,6 +174,7 @@ func runFlushTest[Option any](
|
|||
t.Run(f.Desc, func(t *testing.T) {
|
||||
errCountOpt, errCount := errCountOption()
|
||||
wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt)
|
||||
defer func() { require.NoError(t, wc.Close()) }()
|
||||
objects := putObjects(t, wc)
|
||||
f.InjectFn(t, wc)
|
||||
|
||||
|
@ -214,7 +217,6 @@ func newCache[Option any](
|
|||
require.NoError(t, bs.Init())
|
||||
|
||||
wc := createCacheFn(t, smallSize, mb, bs, opts...)
|
||||
t.Cleanup(func() { require.NoError(t, wc.Close()) })
|
||||
require.NoError(t, wc.Open(context.Background(), false))
|
||||
require.NoError(t, wc.Init())
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
func TestGeneric(t *testing.T) {
|
||||
storagetest.TestAll(t, func(t *testing.T) storagetest.Component {
|
||||
return New(
|
||||
WithLogger(test.NewLogger(t, true)),
|
||||
WithLogger(test.NewLogger(t)),
|
||||
WithFlushWorkersCount(2),
|
||||
WithPath(t.TempDir()))
|
||||
})
|
||||
|
|
|
@ -20,7 +20,7 @@ func TestEventHandling(t *testing.T) {
|
|||
notaryRequestsCh := make(chan *result.NotaryRequestEvent)
|
||||
|
||||
l, err := NewListener(ListenerParams{
|
||||
Logger: test.NewLogger(t, true),
|
||||
Logger: test.NewLogger(t),
|
||||
Subscriber: &testSubscriber{
|
||||
blockCh: blockCh,
|
||||
notificationCh: notificationCh,
|
||||
|
@ -102,7 +102,7 @@ func TestErrorPassing(t *testing.T) {
|
|||
t.Run("notification error", func(t *testing.T) {
|
||||
nErr := fmt.Errorf("notification error")
|
||||
l, err := NewListener(ListenerParams{
|
||||
Logger: test.NewLogger(t, true),
|
||||
Logger: test.NewLogger(t),
|
||||
Subscriber: &testSubscriber{
|
||||
blockCh: blockCh,
|
||||
notificationCh: notificationCh,
|
||||
|
@ -126,7 +126,7 @@ func TestErrorPassing(t *testing.T) {
|
|||
t.Run("block error", func(t *testing.T) {
|
||||
bErr := fmt.Errorf("notification error")
|
||||
l, err := NewListener(ListenerParams{
|
||||
Logger: test.NewLogger(t, true),
|
||||
Logger: test.NewLogger(t),
|
||||
Subscriber: &testSubscriber{
|
||||
blockCh: blockCh,
|
||||
notificationCh: notificationCh,
|
||||
|
|
|
@ -270,7 +270,7 @@ func TestGetLocalOnly(t *testing.T) {
|
|||
|
||||
newSvc := func(storage *testStorage) *Service {
|
||||
return &Service{
|
||||
log: test.NewLogger(t, true),
|
||||
log: test.NewLogger(t),
|
||||
localStorage: storage,
|
||||
}
|
||||
}
|
||||
|
@ -532,7 +532,7 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
const curEpoch = 13
|
||||
|
||||
return &Service{
|
||||
log: test.NewLogger(t, true),
|
||||
log: test.NewLogger(t),
|
||||
localStorage: newTestStorage(),
|
||||
traverserGenerator: &testTraverserGenerator{
|
||||
c: cnr,
|
||||
|
@ -1663,7 +1663,7 @@ func TestGetFromPastEpoch(t *testing.T) {
|
|||
const curEpoch = 13
|
||||
|
||||
svc := &Service{
|
||||
log: test.NewLogger(t, true),
|
||||
log: test.NewLogger(t),
|
||||
localStorage: newTestStorage(),
|
||||
epochSource: testEpochReceiver(curEpoch),
|
||||
traverserGenerator: &testTraverserGenerator{
|
||||
|
|
|
@ -161,7 +161,7 @@ func TestGetLocalOnly(t *testing.T) {
|
|||
|
||||
newSvc := func(storage *testStorage) *Service {
|
||||
svc := &Service{cfg: new(cfg)}
|
||||
svc.log = test.NewLogger(t, true)
|
||||
svc.log = test.NewLogger(t)
|
||||
svc.localStorage = storage
|
||||
|
||||
return svc
|
||||
|
@ -277,7 +277,7 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
|
||||
newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service {
|
||||
svc := &Service{cfg: new(cfg)}
|
||||
svc.log = test.NewLogger(t, true)
|
||||
svc.log = test.NewLogger(t)
|
||||
svc.localStorage = newTestStorage()
|
||||
|
||||
const curEpoch = 13
|
||||
|
@ -430,7 +430,7 @@ func TestGetFromPastEpoch(t *testing.T) {
|
|||
c22.addResult(idCnr, ids22, nil)
|
||||
|
||||
svc := &Service{cfg: new(cfg)}
|
||||
svc.log = test.NewLogger(t, true)
|
||||
svc.log = test.NewLogger(t)
|
||||
svc.localStorage = newTestStorage()
|
||||
|
||||
const curEpoch = 13
|
||||
|
@ -543,7 +543,7 @@ func TestGetWithSessionToken(t *testing.T) {
|
|||
w := new(simpleIDWriter)
|
||||
|
||||
svc := &Service{cfg: new(cfg)}
|
||||
svc.log = test.NewLogger(t, true)
|
||||
svc.log = test.NewLogger(t)
|
||||
svc.localStorage = localStorage
|
||||
|
||||
const curEpoch = 13
|
||||
|
|
|
@ -100,7 +100,7 @@ func TestMessageSign(t *testing.T) {
|
|||
|
||||
s := &Service{
|
||||
cfg: cfg{
|
||||
log: test.NewLogger(t, true),
|
||||
log: test.NewLogger(t),
|
||||
key: &privs[0].PrivateKey,
|
||||
nmSource: dummyNetmapSource{},
|
||||
cnrSource: dummyContainerSource{
|
||||
|
|
|
@ -4,26 +4,16 @@ import (
|
|||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
// NewLogger creates a new logger.
|
||||
//
|
||||
// If debug, development logger is created.
|
||||
func NewLogger(t testing.TB, debug bool) *logger.Logger {
|
||||
func NewLogger(t testing.TB) *logger.Logger {
|
||||
var l logger.Logger
|
||||
l.Logger = zap.L()
|
||||
|
||||
if debug {
|
||||
cfg := zap.NewDevelopmentConfig()
|
||||
cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
|
||||
|
||||
log, err := cfg.Build()
|
||||
require.NoError(t, err, "could not prepare logger")
|
||||
l.Logger = log
|
||||
}
|
||||
|
||||
l.Logger = zaptest.NewLogger(t,
|
||||
zaptest.Level(zapcore.DebugLevel),
|
||||
zaptest.WrapOptions(zap.Development(), zap.AddCaller()))
|
||||
return &l
|
||||
}
|
||||
|
|
t.TempDir
will be removed by testing engine.