restore: Don't save (part of) pack in memory

This commit is contained in:
Alexander Weiss 2020-11-18 12:36:06 +01:00
parent 8b84c96d9d
commit 3e0acf1395

View file

@ -1,11 +1,12 @@
package restorer package restorer
import ( import (
"bytes" "bufio"
"context" "context"
"io" "io"
"math" "math"
"path/filepath" "path/filepath"
"sort"
"sync" "sync"
"github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/crypto"
@ -179,6 +180,8 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
return nil return nil
} }
const maxBufferSize = 4 * 1024 * 1024
func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) { func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) {
// calculate pack byte range and blob->[]files->[]offsets mappings // calculate pack byte range and blob->[]files->[]offsets mappings
@ -226,18 +229,12 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) {
} }
} }
packData := make([]byte, int(end-start)) sortedBlobs := make([]restic.ID, 0, len(blobs))
for blobID := range blobs {
h := restic.Handle{Type: restic.PackFile, Name: pack.id.String()} sortedBlobs = append(sortedBlobs, blobID)
err := r.packLoader(ctx, h, int(end-start), start, func(rd io.Reader) error {
l, err := io.ReadFull(rd, packData)
if err != nil {
return err
} }
if l != len(packData) { sort.Slice(sortedBlobs, func(i, j int) bool {
return errors.Errorf("unexpected pack size: expected %d but got %d", len(packData), l) return blobs[sortedBlobs[i]].offset < blobs[sortedBlobs[j]].offset
}
return nil
}) })
markFileError := func(file *fileInfo, err error) { markFileError := func(file *fileInfo, err error) {
@ -248,23 +245,28 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) {
} }
} }
h := restic.Handle{Type: restic.PackFile, Name: pack.id.String()}
err := r.packLoader(ctx, h, int(end-start), start, func(rd io.Reader) error {
bufferSize := int(end - start)
if bufferSize > maxBufferSize {
bufferSize = maxBufferSize
}
BufRd := bufio.NewReaderSize(rd, bufferSize)
currentBlobEnd := start
for _, blobID := range sortedBlobs {
blob := blobs[blobID]
_, err := BufRd.Discard(int(blob.offset - currentBlobEnd))
if err != nil { if err != nil {
for file := range pack.files { return err
markFileError(file, err)
} }
return blobData, err := r.loadBlob(BufRd, blobID, blob.length)
}
rd := bytes.NewReader(packData)
for blobID, blob := range blobs {
blobData, err := r.loadBlob(rd, blobID, blob.offset-start, blob.length)
if err != nil { if err != nil {
for file := range blob.files { for file := range blob.files {
markFileError(file, err) markFileError(file, err)
} }
continue continue
} }
currentBlobEnd = blob.offset + int64(blob.length)
for file, offsets := range blob.files { for file, offsets := range blob.files {
for _, offset := range offsets { for _, offset := range offsets {
writeToFile := func() error { writeToFile := func() error {
@ -294,14 +296,25 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) {
} }
} }
} }
return nil
})
if err != nil {
for file := range pack.files {
markFileError(file, err)
}
return
}
} }
func (r *fileRestorer) loadBlob(rd io.ReaderAt, blobID restic.ID, offset int64, length int) ([]byte, error) { func (r *fileRestorer) loadBlob(rd io.Reader, blobID restic.ID, length int) ([]byte, error) {
// TODO reconcile with Repository#loadBlob implementation // TODO reconcile with Repository#loadBlob implementation
buf := make([]byte, length) buf := make([]byte, length)
n, err := rd.ReadAt(buf, offset) n, err := rd.Read(buf)
if err != nil { if err != nil {
return nil, err return nil, err
} }