Introduce interface pack.Loader

This commit is contained in:
Alexander Neumann 2016-08-07 14:50:24 +02:00
parent f72f3dbc6a
commit 94d157d97a
13 changed files with 152 additions and 74 deletions

View file

@ -126,9 +126,9 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
name := job.Data.(string) name := job.Data.(string)
h := backend.Handle{Type: backend.Data, Name: name} h := backend.Handle{Type: backend.Data, Name: name}
rd := backend.NewReadSeeker(repo.Backend(), h) ldr := pack.BackendLoader{Backend: repo.Backend(), Handle: h}
unpacker, err := pack.NewUnpacker(repo.Key(), rd) unpacker, err := pack.NewUnpacker(repo.Key(), ldr)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -31,7 +31,9 @@ type Backend interface {
Lister Lister
// Load returns the data stored in the backend for h at the given offset // Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt. // and saves it in p. Load has the same semantics as io.ReaderAt, except
// that a negative offset is also allowed. In this case it references a
// position relative to the end of the file (similar to Seek()).
Load(h Handle, p []byte, off int64) (int, error) Load(h Handle, p []byte, off int64) (int, error)
// Save stores the data in the backend under the given handle. // Save stores the data in the backend under the given handle.

View file

@ -98,9 +98,12 @@ func dirname(base string, t backend.Type, name string) string {
return filepath.Join(base, n) return filepath.Join(base, n)
} }
// Load returns the data stored in the backend for h at the given offset // Load returns the data stored in the backend for h at the given offset and
// and saves it in p. Load has the same semantics as io.ReaderAt. // saves it in p. Load has the same semantics as io.ReaderAt, with one
// exception: when off is lower than zero, it is treated as an offset relative
// to the end of the file.
func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) { func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
debug.Log("backend.local.Load", "Load %v, length %v at %v", h, len(p), off)
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return 0, err return 0, err
} }
@ -117,11 +120,15 @@ func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
} }
}() }()
if off > 0 { switch {
case off > 0:
_, err = f.Seek(off, 0) _, err = f.Seek(off, 0)
if err != nil { case off < 0:
return 0, err _, err = f.Seek(off, 2)
} }
if err != nil {
return 0, err
} }
return io.ReadFull(f, p) return io.ReadFull(f, p)
@ -162,6 +169,7 @@ func writeToTempfile(tempdir string, p []byte) (filename string, err error) {
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
func (b *Local) Save(h backend.Handle, p []byte) (err error) { func (b *Local) Save(h backend.Handle, p []byte) (err error) {
debug.Log("backend.local.Save", "Save %v, length %v", h, len(p))
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return err return err
} }
@ -203,6 +211,7 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) {
// Stat returns information about a blob. // Stat returns information about a blob.
func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) { func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) {
debug.Log("backend.local.Stat", "Stat %v", h)
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err return backend.BlobInfo{}, err
} }
@ -217,6 +226,7 @@ func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) {
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
func (b *Local) Test(t backend.Type, name string) (bool, error) { func (b *Local) Test(t backend.Type, name string) (bool, error) {
debug.Log("backend.local.Test", "Test %v %v", t, name)
_, err := fs.Stat(filename(b.p, t, name)) _, err := fs.Stat(filename(b.p, t, name))
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
@ -230,6 +240,7 @@ func (b *Local) Test(t backend.Type, name string) (bool, error) {
// Remove removes the blob with the given name and type. // Remove removes the blob with the given name and type.
func (b *Local) Remove(t backend.Type, name string) error { func (b *Local) Remove(t backend.Type, name string) error {
debug.Log("backend.local.Remove", "Remove %v %v", t, name)
fn := filename(b.p, t, name) fn := filename(b.p, t, name)
// reset read-only flag // reset read-only flag
@ -304,6 +315,7 @@ func listDirs(dir string) (filenames []string, err error) {
// goroutine is started for this. If the channel done is closed, sending // goroutine is started for this. If the channel done is closed, sending
// stops. // stops.
func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
debug.Log("backend.local.List", "List %v", t)
lister := listDir lister := listDir
if t == backend.Data { if t == backend.Data {
lister = listDirs lister = listDirs
@ -336,11 +348,13 @@ func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
// Delete removes the repository and all files. // Delete removes the repository and all files.
func (b *Local) Delete() error { func (b *Local) Delete() error {
debug.Log("backend.local.Delete", "Delete()")
return fs.RemoveAll(b.p) return fs.RemoveAll(b.p)
} }
// Close closes all open files. // Close closes all open files.
func (b *Local) Close() error { func (b *Local) Close() error {
debug.Log("backend.local.Close", "Close()")
// this does not need to do anything, all open files are closed within the // this does not need to do anything, all open files are closed within the
// same function. // same function.
return nil return nil

View file

@ -116,8 +116,13 @@ func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, err
} }
buf := be.data[entry{h.Type, h.Name}] buf := be.data[entry{h.Type, h.Name}]
if off > int64(len(buf)) { switch {
case off > int64(len(buf)):
return 0, errors.New("offset beyond end of file") return 0, errors.New("offset beyond end of file")
case off < -int64(len(buf)):
return 0, errors.New("offset beyond beginning of file")
case off < 0:
off = int64(len(buf)) + off
} }
buf = buf[off:] buf = buf[off:]

View file

@ -75,6 +75,20 @@ func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err er
return 0, err return 0, err
} }
// invert offset
if off < 0 {
info, err := b.Stat(h)
if err != nil {
return 0, err
}
if off > -info.Size {
return 0, errors.New("offset before beginning of file")
}
off = info.Size + off
}
req, err := http.NewRequest("GET", restPath(b.url, h), nil) req, err := http.NewRequest("GET", restPath(b.url, h), nil)
if err != nil { if err != nil {
return 0, err return 0, err

View file

@ -86,11 +86,15 @@ func (be s3) Load(h backend.Handle, p []byte, off int64) (int, error) {
return 0, err return 0, err
} }
if off > 0 { switch {
case off > 0:
_, err = obj.Seek(off, 0) _, err = obj.Seek(off, 0)
if err != nil { case off < 0:
return 0, err _, err = obj.Seek(off, 2)
} }
if err != nil {
return 0, err
} }
<-be.connChan <-be.connChan

View file

@ -11,10 +11,11 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/juju/errors"
"github.com/pkg/sftp"
"restic/backend" "restic/backend"
"restic/debug" "restic/debug"
"github.com/juju/errors"
"github.com/pkg/sftp"
) )
const ( const (
@ -304,11 +305,15 @@ func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
} }
}() }()
if off > 0 { switch {
case off > 0:
_, err = f.Seek(off, 0) _, err = f.Seek(off, 0)
if err != nil { case off < 0:
return 0, err _, err = f.Seek(off, 2)
} }
if err != nil {
return 0, err
} }
return io.ReadFull(f, p) return io.ReadFull(f, p)

View file

@ -1,7 +1,6 @@
package checker package checker
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"sync" "sync"
@ -677,7 +676,7 @@ func checkPack(r *repository.Repository, id backend.ID) error {
return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
} }
unpacker, err := pack.NewUnpacker(r.Key(), bytes.NewReader(buf)) unpacker, err := pack.NewUnpacker(r.Key(), pack.BufferLoader(buf))
if err != nil { if err != nil {
return err return err
} }

43
src/restic/pack/loader.go Normal file
View file

@ -0,0 +1,43 @@
package pack
import (
"errors"
"restic/backend"
)
// Loader loads data from somewhere at a given offset. In contrast to
// io.ReaderAt, off may be negative, in which case it references a position
// relative to the end of the file (similar to Seek()).
type Loader interface {
Load(p []byte, off int64) (int, error)
}
// BackendLoader creates a Loader from a Backend and a Handle.
type BackendLoader struct {
Backend backend.Backend
Handle backend.Handle
}
// Load returns data at the given offset.
func (l BackendLoader) Load(p []byte, off int64) (int, error) {
return l.Backend.Load(l.Handle, p, off)
}
// BufferLoader allows using a buffer as a Loader.
type BufferLoader []byte
// Load returns data at the given offset.
func (b BufferLoader) Load(p []byte, off int64) (int, error) {
switch {
case off > int64(len(b)):
return 0, errors.New("offset is larger than data")
case off < -int64(len(b)):
return 0, errors.New("offset starts before the beginning of the data")
case off < 0:
off = int64(len(b)) + off
}
b = b[off:]
return copy(p, b), nil
}

View file

@ -245,61 +245,51 @@ const preloadHeaderSize = 2048
// NewUnpacker returns a pointer to Unpacker which can be used to read // NewUnpacker returns a pointer to Unpacker which can be used to read
// individual Blobs from a pack. // individual Blobs from a pack.
func NewUnpacker(k *crypto.Key, rd io.ReadSeeker) (*Unpacker, error) { func NewUnpacker(k *crypto.Key, ldr Loader) (*Unpacker, error) {
var err error var err error
// read the last 2048 byte, this will mostly be enough for the header, so // read the last 2048 byte, this will mostly be enough for the header, so
// we do not need another round trip. // we do not need another round trip.
buf := make([]byte, preloadHeaderSize) buf := make([]byte, preloadHeaderSize)
_, err = rd.Seek(-int64(len(buf)), 2) n, err := ldr.Load(buf, -int64(len(buf)))
if err != nil { if err != nil {
return nil, fmt.Errorf("seek to -%d failed: %v", len(buf), err) return nil, fmt.Errorf("Load at -%d failed: %v", len(buf), err)
}
buf = buf[:n]
bs := binary.Size(uint32(0))
p := len(buf) - bs
// read the length from the end of the buffer
length := int(binary.LittleEndian.Uint32(buf[p : p+bs]))
buf = buf[:p]
// if the header is longer than the preloaded buffer, call the loader again.
if length > len(buf) {
buf = make([]byte, length)
n, err := ldr.Load(buf, -int64(len(buf)+bs))
if err != nil {
return nil, fmt.Errorf("Load at -%d failed: %v", len(buf), err)
}
buf = buf[:n]
} }
_, err = io.ReadFull(rd, buf) buf = buf[len(buf)-length:]
if err != nil {
return nil, fmt.Errorf("error reading last %d bytes: %v", len(buf), err)
}
hdrRd := io.ReadSeeker(bytes.NewReader(buf))
ls := binary.Size(uint32(0))
// reset to the end to read header length
_, err = hdrRd.Seek(-int64(ls), 2)
if err != nil {
return nil, fmt.Errorf("seeking to read header length failed: %v", err)
}
var length uint32
err = binary.Read(hdrRd, binary.LittleEndian, &length)
if err != nil {
return nil, fmt.Errorf("reading header length failed: %v", err)
}
// if the header is longer than the preloaded buffer, use the original
// reader (and do another round trip)
if int(length) > preloadHeaderSize-ls {
hdrRd = rd
}
// reset to the beginning of the header
_, err = hdrRd.Seek(-int64(ls)-int64(length), 2)
if err != nil {
return nil, fmt.Errorf("seeking to read header length failed: %v", err)
}
// read header // read header
hrd, err := crypto.DecryptFrom(k, io.LimitReader(hdrRd, int64(length))) hdr, err := crypto.Decrypt(k, buf, buf)
if err != nil { if err != nil {
return nil, err return nil, err
} }
rd := bytes.NewReader(hdr)
var entries []Blob var entries []Blob
pos := uint(0) pos := uint(0)
for { for {
e := headerEntry{} e := headerEntry{}
err = binary.Read(hrd, binary.LittleEndian, &e) err = binary.Read(rd, binary.LittleEndian, &e)
if err == io.EOF { if err == io.EOF {
break break
} }
@ -328,11 +318,11 @@ func NewUnpacker(k *crypto.Key, rd io.ReadSeeker) (*Unpacker, error) {
pos += uint(e.Length) pos += uint(e.Length)
} }
p := &Unpacker{ up := &Unpacker{
rd: rd, rd: rd,
k: k, k: k,
Entries: entries, Entries: entries,
} }
return p, nil return up, nil
} }

View file

@ -7,7 +7,6 @@ import (
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil"
"testing" "testing"
"restic/backend" "restic/backend"
@ -48,7 +47,7 @@ func newPack(t testing.TB, k *crypto.Key) ([]Buf, []byte, uint) {
return bufs, packData, p.Size() return bufs, packData, p.Size()
} }
func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReadSeeker, packSize uint) { func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, ldr pack.Loader, packSize uint) {
written := 0 written := 0
for _, l := range lengths { for _, l := range lengths {
written += l written += l
@ -64,20 +63,24 @@ func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReadSeeker, pack
Equals(t, uint(written), packSize) Equals(t, uint(written), packSize)
// read and parse it again // read and parse it again
np, err := pack.NewUnpacker(k, rd) np, err := pack.NewUnpacker(k, ldr)
OK(t, err) OK(t, err)
Equals(t, len(np.Entries), len(bufs)) Equals(t, len(np.Entries), len(bufs))
var buf []byte
for i, b := range bufs { for i, b := range bufs {
e := np.Entries[i] e := np.Entries[i]
Equals(t, b.id, e.ID) Equals(t, b.id, e.ID)
brd, err := e.GetReader(rd) if len(buf) < int(e.Length) {
OK(t, err) buf = make([]byte, int(e.Length))
data, err := ioutil.ReadAll(brd) }
buf = buf[:int(e.Length)]
n, err := ldr.Load(buf, int64(e.Offset))
OK(t, err) OK(t, err)
buf = buf[:n]
Assert(t, bytes.Equal(b.data, data), Assert(t, bytes.Equal(b.data, buf),
"data for blob %v doesn't match", i) "data for blob %v doesn't match", i)
} }
} }
@ -88,7 +91,7 @@ func TestCreatePack(t *testing.T) {
bufs, packData, packSize := newPack(t, k) bufs, packData, packSize := newPack(t, k)
Equals(t, uint(len(packData)), packSize) Equals(t, uint(len(packData)), packSize)
verifyBlobs(t, bufs, k, bytes.NewReader(packData), packSize) verifyBlobs(t, bufs, k, pack.BufferLoader(packData), packSize)
} }
var blobTypeJSON = []struct { var blobTypeJSON = []struct {
@ -125,6 +128,6 @@ func TestUnpackReadSeeker(t *testing.T) {
handle := backend.Handle{Type: backend.Data, Name: id.String()} handle := backend.Handle{Type: backend.Data, Name: id.String()}
OK(t, b.Save(handle, packData)) OK(t, b.Save(handle, packData))
rd := backend.NewReadSeeker(b, handle) ldr := pack.BackendLoader{Backend: b, Handle: handle}
verifyBlobs(t, bufs, k, rd, packSize) verifyBlobs(t, bufs, k, ldr, packSize)
} }

View file

@ -1,7 +1,6 @@
package repository package repository
import ( import (
"bytes"
"io" "io"
"restic/backend" "restic/backend"
"restic/crypto" "restic/crypto"
@ -33,7 +32,7 @@ func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err
debug.Log("Repack", "pack %v loaded (%d bytes)", packID.Str(), len(buf)) debug.Log("Repack", "pack %v loaded (%d bytes)", packID.Str(), len(buf))
unpck, err := pack.NewUnpacker(repo.Key(), bytes.NewReader(buf)) unpck, err := pack.NewUnpacker(repo.Key(), pack.BufferLoader(buf))
if err != nil { if err != nil {
return err return err
} }

View file

@ -547,9 +547,9 @@ func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.I
// ListPack returns the list of blobs saved in the pack id. // ListPack returns the list of blobs saved in the pack id.
func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, error) { func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, error) {
h := backend.Handle{Type: backend.Data, Name: id.String()} h := backend.Handle{Type: backend.Data, Name: id.String()}
rd := backend.NewReadSeeker(r.Backend(), h) ldr := pack.BackendLoader{Backend: r.Backend(), Handle: h}
unpacker, err := pack.NewUnpacker(r.Key(), rd) unpacker, err := pack.NewUnpacker(r.Key(), ldr)
if err != nil { if err != nil {
return nil, err return nil, err
} }