240 lines
6.9 KiB
Go
240 lines
6.9 KiB
Go
package writecache
|
|
|
|
import (
|
|
"context"
|
|
"os"
|
|
"path/filepath"
|
|
"sync/atomic"
|
|
"testing"
|
|
|
|
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
|
|
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
|
|
checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
|
|
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
|
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
|
|
versionSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
|
|
"github.com/stretchr/testify/require"
|
|
"go.etcd.io/bbolt"
|
|
"go.uber.org/zap/zaptest"
|
|
)
|
|
|
|
type objectPair struct {
|
|
addr oid.Address
|
|
obj *object.Object
|
|
}
|
|
|
|
func TestFlush(t *testing.T) {
|
|
const (
|
|
objCount = 4
|
|
smallSize = 256
|
|
)
|
|
|
|
newCache := func(t *testing.T, opts ...Option) (Cache, *blobstor.BlobStor, *meta.DB) {
|
|
dir := t.TempDir()
|
|
mb := meta.New(
|
|
meta.WithPath(filepath.Join(dir, "meta")),
|
|
meta.WithEpochState(dummyEpoch{}))
|
|
require.NoError(t, mb.Open(false))
|
|
require.NoError(t, mb.Init())
|
|
|
|
bs := blobstor.New(blobstor.WithStorages([]blobstor.SubStorage{
|
|
{
|
|
Storage: fstree.New(
|
|
fstree.WithPath(filepath.Join(dir, "blob")),
|
|
fstree.WithDepth(0),
|
|
fstree.WithDirNameLen(1)),
|
|
},
|
|
}))
|
|
require.NoError(t, bs.Open(false))
|
|
require.NoError(t, bs.Init())
|
|
|
|
wc := New(
|
|
append([]Option{
|
|
WithLogger(&logger.Logger{Logger: zaptest.NewLogger(t)}),
|
|
WithPath(filepath.Join(dir, "writecache")),
|
|
WithSmallObjectSize(smallSize),
|
|
WithMetabase(mb),
|
|
WithBlobstor(bs),
|
|
}, opts...)...)
|
|
require.NoError(t, wc.Open(false))
|
|
initWC(t, wc)
|
|
|
|
// First set mode for metabase and blobstor to prevent background flushes.
|
|
require.NoError(t, mb.SetMode(mode.ReadOnly))
|
|
require.NoError(t, bs.SetMode(mode.ReadOnly))
|
|
|
|
return wc, bs, mb
|
|
}
|
|
|
|
putObjects := func(t *testing.T, c Cache) []objectPair {
|
|
objects := make([]objectPair, objCount)
|
|
for i := range objects {
|
|
objects[i] = putObject(t, c, 1+(i%2)*smallSize)
|
|
}
|
|
return objects
|
|
}
|
|
|
|
check := func(t *testing.T, mb *meta.DB, bs *blobstor.BlobStor, objects []objectPair) {
|
|
for i := range objects {
|
|
var mPrm meta.StorageIDPrm
|
|
mPrm.SetAddress(objects[i].addr)
|
|
|
|
mRes, err := mb.StorageID(context.Background(), mPrm)
|
|
require.NoError(t, err)
|
|
|
|
var prm common.GetPrm
|
|
prm.Address = objects[i].addr
|
|
prm.StorageID = mRes.StorageID()
|
|
|
|
res, err := bs.Get(context.Background(), prm)
|
|
require.NoError(t, err)
|
|
require.Equal(t, objects[i].obj, res.Object)
|
|
}
|
|
}
|
|
|
|
t.Run("no errors", func(t *testing.T) {
|
|
wc, bs, mb := newCache(t)
|
|
objects := putObjects(t, wc)
|
|
|
|
require.NoError(t, bs.SetMode(mode.ReadWrite))
|
|
require.NoError(t, mb.SetMode(mode.ReadWrite))
|
|
|
|
require.NoError(t, wc.Flush(context.Background(), false))
|
|
|
|
check(t, mb, bs, objects)
|
|
})
|
|
|
|
t.Run("flush on moving to degraded mode", func(t *testing.T) {
|
|
wc, bs, mb := newCache(t)
|
|
objects := putObjects(t, wc)
|
|
|
|
// Blobstor is read-only, so we expect en error from `flush` here.
|
|
require.Error(t, wc.SetMode(mode.Degraded))
|
|
|
|
// First move to read-only mode to close background workers.
|
|
require.NoError(t, wc.SetMode(mode.ReadOnly))
|
|
require.NoError(t, bs.SetMode(mode.ReadWrite))
|
|
require.NoError(t, mb.SetMode(mode.ReadWrite))
|
|
require.NoError(t, wc.SetMode(mode.Degraded))
|
|
|
|
check(t, mb, bs, objects)
|
|
})
|
|
|
|
t.Run("ignore errors", func(t *testing.T) {
|
|
testIgnoreErrors := func(t *testing.T, f func(*cache)) {
|
|
var errCount atomic.Uint32
|
|
wc, bs, mb := newCache(t, WithReportErrorFunc(func(message string, err error) {
|
|
errCount.Add(1)
|
|
}))
|
|
objects := putObjects(t, wc)
|
|
f(wc.(*cache))
|
|
|
|
require.NoError(t, wc.SetMode(mode.ReadOnly))
|
|
require.NoError(t, bs.SetMode(mode.ReadWrite))
|
|
require.NoError(t, mb.SetMode(mode.ReadWrite))
|
|
|
|
require.Equal(t, uint32(0), errCount.Load())
|
|
require.Error(t, wc.Flush(context.Background(), false))
|
|
require.True(t, errCount.Load() > 0)
|
|
require.NoError(t, wc.Flush(context.Background(), true))
|
|
|
|
check(t, mb, bs, objects)
|
|
}
|
|
t.Run("db, invalid address", func(t *testing.T) {
|
|
testIgnoreErrors(t, func(c *cache) {
|
|
_, data := newObject(t, 1)
|
|
require.NoError(t, c.db.Batch(func(tx *bbolt.Tx) error {
|
|
b := tx.Bucket(defaultBucket)
|
|
return b.Put([]byte{1, 2, 3}, data)
|
|
}))
|
|
})
|
|
})
|
|
t.Run("db, invalid object", func(t *testing.T) {
|
|
testIgnoreErrors(t, func(c *cache) {
|
|
require.NoError(t, c.db.Batch(func(tx *bbolt.Tx) error {
|
|
b := tx.Bucket(defaultBucket)
|
|
return b.Put([]byte(oidtest.Address().EncodeToString()), []byte{1, 2, 3})
|
|
}))
|
|
})
|
|
})
|
|
t.Run("fs, read error", func(t *testing.T) {
|
|
testIgnoreErrors(t, func(c *cache) {
|
|
obj, data := newObject(t, 1)
|
|
|
|
var prm common.PutPrm
|
|
prm.Address = objectCore.AddressOf(obj)
|
|
prm.RawData = data
|
|
|
|
_, err := c.fsTree.Put(context.Background(), prm)
|
|
require.NoError(t, err)
|
|
|
|
p := prm.Address.Object().EncodeToString() + "." + prm.Address.Container().EncodeToString()
|
|
p = filepath.Join(c.fsTree.RootPath, p[:1], p[1:])
|
|
|
|
_, err = os.Stat(p) // sanity check
|
|
require.NoError(t, err)
|
|
require.NoError(t, os.Truncate(p, 0)) // corrupt the file contents, so that it can't be unmarshalled
|
|
})
|
|
})
|
|
t.Run("fs, invalid object", func(t *testing.T) {
|
|
testIgnoreErrors(t, func(c *cache) {
|
|
var prm common.PutPrm
|
|
prm.Address = oidtest.Address()
|
|
prm.RawData = []byte{1, 2, 3}
|
|
_, err := c.fsTree.Put(context.Background(), prm)
|
|
require.NoError(t, err)
|
|
})
|
|
})
|
|
})
|
|
}
|
|
|
|
func putObject(t *testing.T, c Cache, size int) objectPair {
|
|
obj, data := newObject(t, size)
|
|
|
|
var prm common.PutPrm
|
|
prm.Address = objectCore.AddressOf(obj)
|
|
prm.Object = obj
|
|
prm.RawData = data
|
|
|
|
_, err := c.Put(context.Background(), prm)
|
|
require.NoError(t, err)
|
|
|
|
return objectPair{prm.Address, prm.Object}
|
|
|
|
}
|
|
|
|
func newObject(t *testing.T, size int) (*object.Object, []byte) {
|
|
obj := object.New()
|
|
ver := versionSDK.Current()
|
|
|
|
obj.SetID(oidtest.ID())
|
|
obj.SetOwnerID(usertest.ID())
|
|
obj.SetContainerID(cidtest.ID())
|
|
obj.SetType(object.TypeRegular)
|
|
obj.SetVersion(&ver)
|
|
obj.SetPayloadChecksum(checksumtest.Checksum())
|
|
obj.SetPayloadHomomorphicHash(checksumtest.Checksum())
|
|
obj.SetPayload(make([]byte, size))
|
|
|
|
data, err := obj.Marshal()
|
|
require.NoError(t, err)
|
|
return obj, data
|
|
}
|
|
|
|
func initWC(t *testing.T, wc Cache) {
|
|
require.NoError(t, wc.Init())
|
|
}
|
|
|
|
type dummyEpoch struct{}
|
|
|
|
func (dummyEpoch) CurrentEpoch() uint64 {
|
|
return 0
|
|
}
|