frostfs-node/pkg/local_object_storage/blobstor/perf_test.go

235 lines
6.5 KiB
Go
Raw Normal View History

package blobstor
import (
"fmt"
"os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/blobovniczatree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"github.com/stretchr/testify/require"
)
// The storages to benchmark. Each storage has a description and a function which returns the actual
// storage along with a cleanup function.
var storages = []struct {
desc string
create func(*testing.B) (common.Storage, func())
}{
{
desc: "memstore",
create: func(*testing.B) (common.Storage, func()) {
return memstore.New(), func() {}
},
},
{
desc: "fstree_nosync",
create: func(b *testing.B) (common.Storage, func()) {
dir, err := os.MkdirTemp(os.TempDir(), "fstree_nosync")
if err != nil {
b.Fatalf("creating fstree_nosync root path: %v", err)
}
cleanup := func() { os.RemoveAll(dir) }
return fstree.New(
fstree.WithPath(dir),
fstree.WithDepth(2),
fstree.WithDirNameLen(2),
fstree.WithNoSync(true),
), cleanup
},
},
{
desc: "fstree",
create: func(b *testing.B) (common.Storage, func()) {
dir, err := os.MkdirTemp(os.TempDir(), "fstree")
if err != nil {
b.Fatalf("creating fstree root path: %v", err)
}
cleanup := func() { os.RemoveAll(dir) }
return fstree.New(
fstree.WithPath(dir),
fstree.WithDepth(2),
fstree.WithDirNameLen(2),
), cleanup
},
},
{
desc: "blobovniczatree",
create: func(b *testing.B) (common.Storage, func()) {
dir, err := os.MkdirTemp(os.TempDir(), "blobovniczatree")
if err != nil {
b.Fatalf("creating blobovniczatree root path: %v", err)
}
cleanup := func() { os.RemoveAll(dir) }
return blobovniczatree.NewBlobovniczaTree(
blobovniczatree.WithRootPath(dir),
), cleanup
},
},
}
func BenchmarkSubstorageReadPerf(b *testing.B) {
readTests := []struct {
desc string
size int
objGen func() testutil.ObjectGenerator
addrGen func() testutil.AddressGenerator
}{
{
desc: "seq100",
size: 10000,
objGen: func() testutil.ObjectGenerator { return &testutil.SeqObjGenerator{ObjSize: 100} },
addrGen: func() testutil.AddressGenerator { return &testutil.SeqAddrGenerator{MaxID: 100} },
},
{
desc: "rand100",
size: 10000,
objGen: func() testutil.ObjectGenerator { return &testutil.SeqObjGenerator{ObjSize: 100} },
addrGen: func() testutil.AddressGenerator { return testutil.RandAddrGenerator(10000) },
},
}
for _, tt := range readTests {
for _, stEntry := range storages {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
st, cleanup := stEntry.create(b)
require.NoError(b, st.Open(false))
require.NoError(b, st.Init())
// Fill database
for i := 0; i < tt.size; i++ {
obj := objGen.Next()
addr := testutil.AddressFromObject(obj)
raw, err := obj.Marshal()
require.NoError(b, err)
if _, err := st.Put(common.PutPrm{
Address: addr,
RawData: raw,
}); err != nil {
b.Fatalf("writing entry: %v", err)
}
}
// Benchmark reading
addrGen := tt.addrGen()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, err := st.Get(common.GetPrm{Address: addrGen.Next()})
require.NoError(b, err)
}
})
require.NoError(b, st.Close())
cleanup()
})
}
}
}
func BenchmarkSubstorageWritePerf(b *testing.B) {
generators := []struct {
desc string
create func() testutil.ObjectGenerator
}{
{desc: "rand10", create: func() testutil.ObjectGenerator { return &testutil.RandObjGenerator{ObjSize: 10} }},
{desc: "rand100", create: func() testutil.ObjectGenerator { return &testutil.RandObjGenerator{ObjSize: 100} }},
{desc: "rand1000", create: func() testutil.ObjectGenerator { return &testutil.RandObjGenerator{ObjSize: 1000} }},
{desc: "overwrite10", create: func() testutil.ObjectGenerator { return &testutil.OverwriteObjGenerator{ObjSize: 10, MaxObjects: 100} }},
{desc: "overwrite100", create: func() testutil.ObjectGenerator { return &testutil.OverwriteObjGenerator{ObjSize: 100, MaxObjects: 100} }},
{desc: "overwrite1000", create: func() testutil.ObjectGenerator {
return &testutil.OverwriteObjGenerator{ObjSize: 1000, MaxObjects: 100}
}},
}
for _, genEntry := range generators {
for _, stEntry := range storages {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, genEntry.desc), func(b *testing.B) {
gen := genEntry.create()
st, cleanup := stEntry.create(b)
require.NoError(b, st.Open(false))
require.NoError(b, st.Init())
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
obj := gen.Next()
addr := testutil.AddressFromObject(obj)
raw, err := obj.Marshal()
require.NoError(b, err)
if _, err := st.Put(common.PutPrm{
Address: addr,
RawData: raw,
}); err != nil {
b.Fatalf("writing entry: %v", err)
}
}
})
require.NoError(b, st.Close())
cleanup()
})
}
}
}
func BenchmarkSubstorageIteratePerf(b *testing.B) {
iterateTests := []struct {
desc string
size int
objGen func() testutil.ObjectGenerator
}{
{
desc: "rand100",
size: 10000,
objGen: func() testutil.ObjectGenerator { return &testutil.RandObjGenerator{ObjSize: 100} },
},
}
for _, tt := range iterateTests {
for _, stEntry := range storages {
b.Run(fmt.Sprintf("%s-%s", stEntry.desc, tt.desc), func(b *testing.B) {
objGen := tt.objGen()
st, cleanup := stEntry.create(b)
require.NoError(b, st.Open(false))
require.NoError(b, st.Init())
// Fill database
for i := 0; i < tt.size; i++ {
obj := objGen.Next()
addr := testutil.AddressFromObject(obj)
raw, err := obj.Marshal()
require.NoError(b, err)
if _, err := st.Put(common.PutPrm{
Address: addr,
RawData: raw,
}); err != nil {
b.Fatalf("writing entry: %v", err)
}
}
// Benchmark iterate
cnt := 0
b.ResetTimer()
_, err := st.Iterate(common.IteratePrm{
Handler: func(elem common.IterationElement) error {
cnt++
return nil
},
})
require.NoError(b, err)
require.Equal(b, tt.size, cnt)
b.StopTimer()
require.NoError(b, st.Close())
cleanup()
})
}
}
}