[#1337] blobovniczatree: Add rebuild by overflow

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
Dmitrii Stepanov 2024-09-01 12:29:02 +03:00
parent a61201a987
commit edb1747af7
2 changed files with 82 additions and 3 deletions

View file

@ -172,7 +172,7 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
continue
}
path := filepath.Join(lvlPath, e.Name())
resettlementRequired, err := b.fillPercentIsLow(path, target)
resettlementRequired, err := b.rebuildBySize(path, target)
if err != nil {
return false, err
}
@ -187,14 +187,19 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
return result, nil
}
func (b *Blobovniczas) fillPercentIsLow(path string, target int) (bool, error) {
func (b *Blobovniczas) rebuildBySize(path string, targetFillPercent int) (bool, error) {
shDB := b.getBlobovnicza(path)
blz, err := shDB.Open()
if err != nil {
return false, err
}
defer shDB.Close()
return blz.FillPercent() < target, nil
fp := blz.FillPercent()
// accepted fill percent defines as
// |----|+++++++++++++++++|+++++++++++++++++|---------------
// 0% target 100% 100+(100 - target)
// where `+` - accepted fill percent, `-` - not accepted fill percent
return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil
}
func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {

View file

@ -228,6 +228,80 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, b.Close())
})
t.Run("rebuild by overflow", func(t *testing.T) {
t.Parallel()
dir := t.TempDir()
b := NewBlobovniczaTree(
context.Background(),
WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024),
WithBlobovniczaShallowWidth(1), // single directory
WithBlobovniczaShallowDepth(1),
WithRootPath(dir),
WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
WithWaitBeforeDropDB(0),
WithOpenedCacheSize(1000),
WithMoveBatchSize(3))
require.NoError(t, b.Open(mode.ComponentReadWrite))
require.NoError(t, b.Init())
storageIDs := make(map[oid.Address][]byte)
for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
obj := blobstortest.NewObject(64 * 1024)
data, err := obj.Marshal()
require.NoError(t, err)
var prm common.PutPrm
prm.Address = object.AddressOf(obj)
prm.RawData = data
res, err := b.Put(context.Background(), prm)
require.NoError(t, err)
storageIDs[prm.Address] = res.StorageID
}
metaStub := &storageIDUpdateStub{
storageIDs: storageIDs,
guard: &sync.Mutex{},
}
require.NoError(t, b.Close())
b = NewBlobovniczaTree(
context.Background(),
WithLogger(test.NewLogger(t)),
WithObjectSizeLimit(64*1024),
WithBlobovniczaShallowWidth(1),
WithBlobovniczaShallowDepth(1),
WithRootPath(dir),
WithBlobovniczaSize(50*1024), // 50 KB limit for each blobovnicza
WithWaitBeforeDropDB(0),
WithOpenedCacheSize(1000),
WithMoveBatchSize(3))
require.NoError(t, b.Open(mode.ComponentReadWrite))
require.NoError(t, b.Init())
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
MetaStorage: metaStub,
WorkerLimiter: &rebuildLimiterStub{},
Action: common.RebuildAction{
SchemaChange: false,
FillPercent: true,
FillPercentValue: 80,
},
})
require.NoError(t, err)
require.Equal(t, uint64(49), rRes.FilesRemoved)
require.Equal(t, uint64(98), rRes.ObjectsMoved) // 49 DBs with 2 objects
require.Equal(t, uint64(98), metaStub.updatedCount)
for addr, storageID := range storageIDs {
var gPrm common.GetPrm
gPrm.Address = addr
gPrm.StorageID = storageID
_, err := b.Get(context.Background(), gPrm)
require.NoError(t, err)
}
require.NoError(t, b.Close())
})
}
func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {