2016-08-01 16:55:07 +00:00
|
|
|
package repository
|
|
|
|
|
|
|
|
import (
|
2017-06-04 09:16:55 +00:00
|
|
|
"context"
|
2020-09-19 22:45:11 +00:00
|
|
|
"os"
|
|
|
|
"sync"
|
2016-08-29 17:18:57 +00:00
|
|
|
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/debug"
|
2018-10-28 20:12:15 +00:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/fs"
|
|
|
|
"github.com/restic/restic/internal/pack"
|
2017-07-24 15:42:25 +00:00
|
|
|
"github.com/restic/restic/internal/restic"
|
2020-11-04 13:11:29 +00:00
|
|
|
"github.com/restic/restic/internal/ui/progress"
|
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2016-08-01 16:55:07 +00:00
|
|
|
)
|
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
const numRepackWorkers = 8
|
|
|
|
|
2016-08-01 16:55:07 +00:00
|
|
|
// Repack takes a list of packs together with a list of blobs contained in
|
|
|
|
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
|
2017-06-15 12:40:34 +00:00
|
|
|
// into a new pack. Returned is the list of obsolete packs which can then
|
|
|
|
// be removed.
|
2020-11-05 09:33:38 +00:00
|
|
|
//
|
|
|
|
// The map keepBlobs is modified by Repack, it is used to keep track of which
|
|
|
|
// blobs have been processed.
|
2020-11-04 13:11:29 +00:00
|
|
|
func Repack(ctx context.Context, repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet, p *progress.Counter) (obsoletePacks restic.IDSet, err error) {
|
|
|
|
defer p.Done()
|
2020-08-03 17:32:46 +00:00
|
|
|
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
|
2016-08-01 16:55:07 +00:00
|
|
|
|
2020-11-05 16:04:42 +00:00
|
|
|
wg, wgCtx := errgroup.WithContext(ctx)
|
2017-01-23 16:05:30 +00:00
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
downloadQueue := make(chan restic.ID)
|
|
|
|
wg.Go(func() error {
|
|
|
|
defer close(downloadQueue)
|
|
|
|
for packID := range packs {
|
|
|
|
select {
|
|
|
|
case downloadQueue <- packID:
|
2020-11-05 16:04:42 +00:00
|
|
|
case <-wgCtx.Done():
|
|
|
|
return wgCtx.Err()
|
2020-09-19 22:45:11 +00:00
|
|
|
}
|
2017-01-23 16:05:30 +00:00
|
|
|
}
|
2020-09-19 22:45:11 +00:00
|
|
|
return nil
|
|
|
|
})
|
2017-01-23 16:05:30 +00:00
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
type repackJob struct {
|
|
|
|
tempfile *os.File
|
|
|
|
hash restic.ID
|
|
|
|
packLength int64
|
|
|
|
}
|
|
|
|
processQueue := make(chan repackJob)
|
|
|
|
// used to close processQueue once all downloaders have finished
|
|
|
|
var downloadWG sync.WaitGroup
|
2017-01-23 16:05:30 +00:00
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
downloader := func() error {
|
|
|
|
defer downloadWG.Done()
|
|
|
|
for packID := range downloadQueue {
|
|
|
|
// load the complete pack into a temp file
|
|
|
|
h := restic.Handle{Type: restic.PackFile, Name: packID.String()}
|
2016-08-01 16:55:07 +00:00
|
|
|
|
2020-11-05 16:04:42 +00:00
|
|
|
tempfile, hash, packLength, err := DownloadAndHash(wgCtx, repo.Backend(), h)
|
2020-09-19 22:45:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "Repack")
|
2016-08-01 16:55:07 +00:00
|
|
|
}
|
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
debug.Log("pack %v loaded (%d bytes), hash %v", packID, packLength, hash)
|
2016-08-01 16:55:07 +00:00
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
if !packID.Equal(hash) {
|
|
|
|
return errors.Errorf("hash does not match id: want %v, got %v", packID, hash)
|
2016-08-01 16:55:07 +00:00
|
|
|
}
|
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
select {
|
|
|
|
case processQueue <- repackJob{tempfile, hash, packLength}:
|
2020-11-05 16:04:42 +00:00
|
|
|
case <-wgCtx.Done():
|
|
|
|
return wgCtx.Err()
|
2017-01-23 16:05:30 +00:00
|
|
|
}
|
2020-09-19 22:45:11 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-01-23 16:05:30 +00:00
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
downloadWG.Add(numRepackWorkers)
|
|
|
|
for i := 0; i < numRepackWorkers; i++ {
|
|
|
|
wg.Go(downloader)
|
|
|
|
}
|
|
|
|
wg.Go(func() error {
|
|
|
|
downloadWG.Wait()
|
|
|
|
close(processQueue)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
var keepMutex sync.Mutex
|
|
|
|
worker := func() error {
|
|
|
|
for job := range processQueue {
|
|
|
|
tempfile, packID, packLength := job.tempfile, job.hash, job.packLength
|
|
|
|
|
|
|
|
blobs, err := pack.List(repo.Key(), tempfile, packLength)
|
2016-08-01 16:55:07 +00:00
|
|
|
if err != nil {
|
2020-09-19 22:45:11 +00:00
|
|
|
return err
|
2016-08-01 16:55:07 +00:00
|
|
|
}
|
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
debug.Log("processing pack %v, blobs: %v", packID, len(blobs))
|
|
|
|
var buf []byte
|
|
|
|
for _, entry := range blobs {
|
|
|
|
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
|
|
|
|
|
|
|
|
keepMutex.Lock()
|
|
|
|
shouldKeep := keepBlobs.Has(h)
|
|
|
|
keepMutex.Unlock()
|
|
|
|
|
|
|
|
if !shouldKeep {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log(" process blob %v", h)
|
|
|
|
|
|
|
|
if uint(cap(buf)) < entry.Length {
|
|
|
|
buf = make([]byte, entry.Length)
|
|
|
|
}
|
|
|
|
buf = buf[:entry.Length]
|
|
|
|
|
|
|
|
n, err := tempfile.ReadAt(buf, int64(entry.Offset))
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "ReadAt")
|
|
|
|
}
|
|
|
|
|
|
|
|
if n != len(buf) {
|
|
|
|
return errors.Errorf("read blob %v from %v: not enough bytes read, want %v, got %v",
|
|
|
|
h, tempfile.Name(), len(buf), n)
|
|
|
|
}
|
|
|
|
|
|
|
|
nonce, ciphertext := buf[:repo.Key().NonceSize()], buf[repo.Key().NonceSize():]
|
|
|
|
plaintext, err := repo.Key().Open(ciphertext[:0], nonce, ciphertext, nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
id := restic.Hash(plaintext)
|
|
|
|
if !id.Equal(entry.ID) {
|
|
|
|
debug.Log("read blob %v/%v from %v: wrong data returned, hash is %v",
|
|
|
|
h.Type, h.ID, tempfile.Name(), id)
|
|
|
|
return errors.Errorf("read blob %v from %v: wrong data returned, hash is %v",
|
|
|
|
h, tempfile.Name(), id)
|
|
|
|
}
|
|
|
|
|
|
|
|
keepMutex.Lock()
|
|
|
|
// recheck whether some other worker was faster
|
|
|
|
shouldKeep = keepBlobs.Has(h)
|
|
|
|
if shouldKeep {
|
|
|
|
keepBlobs.Delete(h)
|
|
|
|
}
|
|
|
|
keepMutex.Unlock()
|
|
|
|
|
|
|
|
if !shouldKeep {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// We do want to save already saved blobs!
|
2020-11-05 16:04:42 +00:00
|
|
|
_, _, err = repo.SaveBlob(wgCtx, entry.Type, plaintext, entry.ID, true)
|
2020-09-19 22:45:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log(" saved blob %v", entry.ID)
|
2017-01-23 16:05:30 +00:00
|
|
|
}
|
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
if err = tempfile.Close(); err != nil {
|
|
|
|
return errors.Wrap(err, "Close")
|
2016-08-01 16:55:07 +00:00
|
|
|
}
|
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
if err = fs.RemoveIfExists(tempfile.Name()); err != nil {
|
|
|
|
return errors.Wrap(err, "Remove")
|
|
|
|
}
|
2020-11-04 13:11:29 +00:00
|
|
|
p.Add(1)
|
2016-08-01 16:55:07 +00:00
|
|
|
}
|
2020-09-19 22:45:11 +00:00
|
|
|
return nil
|
|
|
|
}
|
2017-01-23 16:05:30 +00:00
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
for i := 0; i < numRepackWorkers; i++ {
|
|
|
|
wg.Go(worker)
|
|
|
|
}
|
2017-01-23 16:05:30 +00:00
|
|
|
|
2020-09-19 22:45:11 +00:00
|
|
|
if err := wg.Wait(); err != nil {
|
|
|
|
return nil, err
|
2016-08-01 16:55:07 +00:00
|
|
|
}
|
|
|
|
|
2017-11-22 11:27:29 +00:00
|
|
|
if err := repo.Flush(ctx); err != nil {
|
2017-06-15 12:40:34 +00:00
|
|
|
return nil, err
|
2016-08-01 16:55:07 +00:00
|
|
|
}
|
|
|
|
|
2017-06-15 12:40:34 +00:00
|
|
|
return packs, nil
|
2016-08-01 16:55:07 +00:00
|
|
|
}
|