This commit is contained in:
Alexander Neumann 2016-08-31 20:29:54 +02:00
parent 90da66261a
commit f0600c1d5f
42 changed files with 524 additions and 438 deletions

View file

@ -3,10 +3,8 @@ package restic
import (
"encoding/json"
"io"
"restic/backend"
"restic/debug"
"restic/pack"
"restic/repository"
"time"
"github.com/pkg/errors"
@ -14,15 +12,15 @@ import (
)
// saveTreeJSON stores a tree in the repository.
func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, error) {
func saveTreeJSON(repo Repository, item interface{}) (ID, error) {
data, err := json.Marshal(item)
if err != nil {
return backend.ID{}, errors.Wrap(err, "")
return ID{}, errors.Wrap(err, "")
}
data = append(data, '\n')
// check if tree has been saved before
id := backend.Hash(data)
id := Hash(data)
if repo.Index().Has(id, pack.Tree) {
return id, nil
}
@ -32,19 +30,19 @@ func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, er
// ArchiveReader reads from the reader and archives the data. Returned is the
// resulting snapshot and its ID.
func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name string) (*Snapshot, backend.ID, error) {
func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Snapshot, ID, error) {
debug.Log("ArchiveReader", "start archiving %s", name)
sn, err := NewSnapshot([]string{name})
if err != nil {
return nil, backend.ID{}, err
return nil, ID{}, err
}
p.Start()
defer p.Done()
chnker := chunker.New(rd, repo.Config.ChunkerPolynomial)
chnker := chunker.New(rd, repo.Config().ChunkerPolynomial())
var ids backend.IDs
var ids IDs
var fileSize uint64
for {
@ -54,15 +52,15 @@ func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name
}
if err != nil {
return nil, backend.ID{}, errors.Wrap(err, "chunker.Next()")
return nil, ID{}, errors.Wrap(err, "chunker.Next()")
}
id := backend.Hash(chunk.Data)
id := Hash(chunk.Data)
if !repo.Index().Has(id, pack.Data) {
_, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
if err != nil {
return nil, backend.ID{}, err
return nil, ID{}, err
}
debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
} else {
@ -96,14 +94,14 @@ func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name
treeID, err := saveTreeJSON(repo, tree)
if err != nil {
return nil, backend.ID{}, err
return nil, ID{}, err
}
sn.Tree = &treeID
debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())
id, err := repo.SaveJSONUnpacked(backend.Snapshot, sn)
id, err := repo.SaveJSONUnpacked(SnapshotFile, sn)
if err != nil {
return nil, backend.ID{}, err
return nil, ID{}, err
}
sn.id = &id
@ -111,12 +109,12 @@ func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name
err = repo.Flush()
if err != nil {
return nil, backend.ID{}, err
return nil, ID{}, err
}
err = repo.SaveIndex()
if err != nil {
return nil, backend.ID{}, err
return nil, ID{}, err
}
return sn, id, nil

View file

@ -12,12 +12,10 @@ import (
"github.com/pkg/errors"
"restic/backend"
"restic/debug"
"restic/fs"
"restic/pack"
"restic/pipe"
"restic/repository"
"github.com/restic/chunker"
)
@ -32,9 +30,9 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
// Archiver is used to backup a set of directories.
type Archiver struct {
repo *repository.Repository
repo Repository
knownBlobs struct {
backend.IDSet
IDSet
sync.Mutex
}
@ -46,15 +44,15 @@ type Archiver struct {
}
// NewArchiver returns a new archiver.
func NewArchiver(repo *repository.Repository) *Archiver {
func NewArchiver(repo Repository) *Archiver {
arch := &Archiver{
repo: repo,
blobToken: make(chan struct{}, maxConcurrentBlobs),
knownBlobs: struct {
backend.IDSet
IDSet
sync.Mutex
}{
IDSet: backend.NewIDSet(),
IDSet: NewIDSet(),
},
}
@ -72,7 +70,7 @@ func NewArchiver(repo *repository.Repository) *Archiver {
// When the blob is not known, false is returned and the blob is added to the
// list. This means that the caller false is returned to is responsible to save
// the blob to the backend.
func (arch *Archiver) isKnownBlob(id backend.ID, t pack.BlobType) bool {
func (arch *Archiver) isKnownBlob(id ID, t pack.BlobType) bool {
arch.knownBlobs.Lock()
defer arch.knownBlobs.Unlock()
@ -91,7 +89,7 @@ func (arch *Archiver) isKnownBlob(id backend.ID, t pack.BlobType) bool {
}
// Save stores a blob read from rd in the repository.
func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error {
func (arch *Archiver) Save(t pack.BlobType, data []byte, id ID) error {
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
if arch.isKnownBlob(id, pack.Data) {
@ -110,15 +108,15 @@ func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error {
}
// SaveTreeJSON stores a tree in the repository.
func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) {
func (arch *Archiver) SaveTreeJSON(item interface{}) (ID, error) {
data, err := json.Marshal(item)
if err != nil {
return backend.ID{}, errors.Wrap(err, "Marshal")
return ID{}, errors.Wrap(err, "Marshal")
}
data = append(data, '\n')
// check if tree has been saved before
id := backend.Hash(data)
id := Hash(data)
if arch.isKnownBlob(id, pack.Tree) {
return id, nil
}
@ -151,14 +149,14 @@ func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, erro
}
type saveResult struct {
id backend.ID
id ID
bytes uint64
}
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
defer freeBuf(chunk.Data)
id := backend.Hash(chunk.Data)
id := Hash(chunk.Data)
err := arch.Save(pack.Data, chunk.Data, id)
// TODO handle error
if err != nil {
@ -188,7 +186,7 @@ func updateNodeContent(node *Node, results []saveResult) error {
debug.Log("Archiver.Save", "checking size for file %s", node.path)
var bytes uint64
node.Content = make([]backend.ID, len(results))
node.Content = make([]ID, len(results))
for i, b := range results {
node.Content[i] = b.id
@ -220,7 +218,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
return err
}
chnker := chunker.New(file, arch.repo.Config.ChunkerPolynomial)
chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial())
resultChannels := [](<-chan saveResult){}
for {
@ -290,7 +288,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
// check if all content is still available in the repository
contentMissing := false
for _, blob := range oldNode.blobs {
if ok, err := arch.repo.Backend().Test(backend.Data, blob.Storage.String()); !ok || err != nil {
if ok, err := arch.repo.Backend().Test(DataFile, blob.Storage.String()); !ok || err != nil {
debug.Log("Archiver.fileWorker", " %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str())
contentMissing = true
break
@ -635,7 +633,7 @@ func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Snapshot creates a snapshot of the given paths. If parentID is set, this is
// used to compare the files to the ones archived at the time this snapshot was
// taken.
func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID) (*Snapshot, backend.ID, error) {
func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snapshot, ID, error) {
paths = unique(paths)
sort.Sort(baseNameSlice(paths))
@ -653,7 +651,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
// create new snapshot
sn, err := NewSnapshot(paths)
if err != nil {
return nil, backend.ID{}, err
return nil, ID{}, err
}
sn.Excludes = arch.Excludes
@ -666,7 +664,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
// load parent snapshot
parent, err := LoadSnapshot(arch.repo, *parentID)
if err != nil {
return nil, backend.ID{}, err
return nil, ID{}, err
}
// start walker on old tree
@ -735,9 +733,9 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
sn.Tree = root.Subtree
// save snapshot
id, err := arch.repo.SaveJSONUnpacked(backend.Snapshot, sn)
id, err := arch.repo.SaveJSONUnpacked(SnapshotFile, sn)
if err != nil {
return nil, backend.ID{}, err
return nil, ID{}, err
}
// store ID in snapshot struct
@ -747,14 +745,14 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
// flush repository
err = arch.repo.Flush()
if err != nil {
return nil, backend.ID{}, err
return nil, ID{}, err
}
// save index
err = arch.repo.SaveIndex()
if err != nil {
debug.Log("Archiver.Snapshot", "error saving index: %v", err)
return nil, backend.ID{}, err
return nil, ID{}, err
}
debug.Log("Archiver.Snapshot", "saved indexes")

View file

@ -11,7 +11,6 @@ import (
"restic/checker"
"restic/crypto"
"restic/pack"
"restic/repository"
. "restic/test"
"github.com/pkg/errors"
@ -302,7 +301,7 @@ func getRandomData(seed int, size int) []chunker.Chunk {
return chunks
}
func createAndInitChecker(t *testing.T, repo *repository.Repository) *checker.Checker {
func createAndInitChecker(t *testing.T, repo Repository) *checker.Checker {
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex()

View file

@ -1,28 +1,15 @@
package restic
// FileType is the type of a file in the backend.
type FileType string
// These are the different data types a backend can store.
const (
DataFile FileType = "data"
KeyFile = "key"
LockFile = "lock"
SnapshotFile = "snapshot"
IndexFile = "index"
ConfigFile = "config"
)
// Backend is used to store and access data.
type Backend interface {
// Location returns a string that describes the type and location of the
// repository.
Location() string
// Test a boolean value whether a Blob with the name and type exists.
// Test a boolean value whether a File with the name and type exists.
Test(t FileType, name string) (bool, error)
// Remove removes a Blob with type t and name.
// Remove removes a File with type t and name.
Remove(t FileType, name string) error
// Close the backend
@ -37,10 +24,10 @@ type Backend interface {
// Save stores the data in the backend under the given handle.
Save(h Handle, p []byte) error
// Stat returns information about the blob identified by h.
Stat(h Handle) (BlobInfo, error)
// Stat returns information about the File identified by h.
Stat(h Handle) (FileInfo, error)
// List returns a channel that yields all names of blobs of type t in an
// List returns a channel that yields all names of files of type t in an
// arbitrary order. A goroutine is started for this. If the channel done is
// closed, sending stops.
List(t FileType, done <-chan struct{}) <-chan string
@ -49,7 +36,6 @@ type Backend interface {
Delete() error
}
// BlobInfo is returned by Stat() and contains information about a stored blob.
type BlobInfo struct {
Size int64
}
// FileInfo is returned by Stat() and contains information about a file in the
// backend.
type FileInfo struct{ Size int64 }

View file

@ -1,17 +0,0 @@
package backend
import (
"crypto/rand"
"io"
)
// RandomID retuns a randomly generated ID. This is mainly used for testing.
// When reading from rand fails, the function panics.
func RandomID() ID {
id := ID{}
_, err := io.ReadFull(rand.Reader, id[:])
if err != nil {
panic(err)
}
return id
}

103
src/restic/blob.go Normal file
View file

@ -0,0 +1,103 @@
package restic
import (
"errors"
"fmt"
)
type Blob struct {
ID *ID `json:"id,omitempty"`
Size uint64 `json:"size,omitempty"`
Storage *ID `json:"sid,omitempty"` // encrypted ID
StorageSize uint64 `json:"ssize,omitempty"` // encrypted Size
}
type Blobs []Blob
func (b Blob) Valid() bool {
if b.ID == nil || b.Storage == nil || b.StorageSize == 0 {
return false
}
return true
}
func (b Blob) String() string {
return fmt.Sprintf("Blob<%s (%d) -> %s (%d)>",
b.ID.Str(), b.Size,
b.Storage.Str(), b.StorageSize)
}
// Compare compares two blobs by comparing the ID and the size. It returns -1,
// 0, or 1.
func (b Blob) Compare(other Blob) int {
if res := b.ID.Compare(*other.ID); res != 0 {
return res
}
if b.Size < other.Size {
return -1
}
if b.Size > other.Size {
return 1
}
return 0
}
// BlobHandle identifies a blob of a given type.
type BlobHandle struct {
ID ID
Type BlobType
}
func (h BlobHandle) String() string {
return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str())
}
// BlobType specifies what a blob stored in a pack is.
type BlobType uint8
// These are the blob types that can be stored in a pack.
const (
InvalidBlob BlobType = iota
DataBlob
TreeBlob
)
func (t BlobType) String() string {
switch t {
case DataBlob:
return "data"
case TreeBlob:
return "tree"
}
return fmt.Sprintf("<BlobType %d>", t)
}
// MarshalJSON encodes the BlobType into JSON.
func (t BlobType) MarshalJSON() ([]byte, error) {
switch t {
case DataBlob:
return []byte(`"data"`), nil
case TreeBlob:
return []byte(`"tree"`), nil
}
return nil, errors.New("unknown blob type")
}
// UnmarshalJSON decodes the BlobType from JSON.
func (t *BlobType) UnmarshalJSON(buf []byte) error {
switch string(buf) {
case `"data"`:
*t = DataBlob
case `"tree"`:
*t = TreeBlob
default:
return errors.New("unknown blob type")
}
return nil
}

41
src/restic/blob_test.go Normal file
View file

@ -0,0 +1,41 @@
package restic
import (
"encoding/json"
"testing"
)
var blobTypeJSON = []struct {
t BlobType
res string
}{
{DataBlob, `"data"`},
{TreeBlob, `"tree"`},
}
func TestBlobTypeJSON(t *testing.T) {
for _, test := range blobTypeJSON {
// test serialize
buf, err := json.Marshal(test.t)
if err != nil {
t.Error(err)
continue
}
if test.res != string(buf) {
t.Errorf("want %q, got %q", test.res, string(buf))
continue
}
// test unserialize
var v BlobType
err = json.Unmarshal([]byte(test.res), &v)
if err != nil {
t.Error(err)
continue
}
if test.t != v {
t.Errorf("want %v, got %v", test.t, v)
continue
}
}
}

View file

@ -6,6 +6,19 @@ import (
"github.com/pkg/errors"
)
// FileType is the type of a file in the backend.
type FileType string
// These are the different data types a backend can store.
const (
DataFile FileType = "data"
KeyFile = "key"
LockFile = "lock"
SnapshotFile = "snapshot"
IndexFile = "index"
ConfigFile = "config"
)
// Handle is used to store and access data in a backend.
type Handle struct {
FileType FileType

View file

@ -1,15 +1,11 @@
package restic
import (
"restic/backend"
"restic/pack"
"restic/repository"
)
import "restic/pack"
// FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data
// blobs) to the set blobs. The tree blobs in the `seen` BlobSet will not be visited
// again.
func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.BlobSet, seen pack.BlobSet) error {
func FindUsedBlobs(repo Repository, treeID ID, blobs pack.BlobSet, seen pack.BlobSet) error {
blobs.Insert(pack.Handle{ID: treeID, Type: pack.Tree})
tree, err := LoadTree(repo, treeID)

View file

@ -1,4 +1,4 @@
package restic
package restic_test
import (
"bufio"
@ -7,6 +7,7 @@ import (
"fmt"
"os"
"path/filepath"
"restic"
"sort"
"testing"
"time"
@ -92,7 +93,7 @@ func TestFindUsedBlobs(t *testing.T) {
for i, sn := range snapshots {
usedBlobs := pack.NewBlobSet()
err := FindUsedBlobs(repo, *sn.Tree, usedBlobs, pack.NewBlobSet())
err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, pack.NewBlobSet())
if err != nil {
t.Errorf("FindUsedBlobs returned error: %v", err)
continue
@ -128,7 +129,7 @@ func BenchmarkFindUsedBlobs(b *testing.B) {
for i := 0; i < b.N; i++ {
seen := pack.NewBlobSet()
blobs := pack.NewBlobSet()
err := FindUsedBlobs(repo, *sn.Tree, blobs, seen)
err := restic.FindUsedBlobs(repo, *sn.Tree, blobs, seen)
if err != nil {
b.Error(err)
}

View file

@ -11,9 +11,7 @@ import (
"github.com/pkg/errors"
"restic/backend"
"restic/debug"
"restic/repository"
)
// Lock represents a process locking the repository for an operation.
@ -33,8 +31,8 @@ type Lock struct {
UID uint32 `json:"uid,omitempty"`
GID uint32 `json:"gid,omitempty"`
repo *repository.Repository
lockID *backend.ID
repo Repository
lockID *ID
}
// ErrAlreadyLocked is returned when NewLock or NewExclusiveLock are unable to
@ -59,20 +57,20 @@ func IsAlreadyLocked(err error) bool {
// NewLock returns a new, non-exclusive lock for the repository. If an
// exclusive lock is already held by another process, ErrAlreadyLocked is
// returned.
func NewLock(repo *repository.Repository) (*Lock, error) {
func NewLock(repo Repository) (*Lock, error) {
return newLock(repo, false)
}
// NewExclusiveLock returns a new, exclusive lock for the repository. If
// another lock (normal and exclusive) is already held by another process,
// ErrAlreadyLocked is returned.
func NewExclusiveLock(repo *repository.Repository) (*Lock, error) {
func NewExclusiveLock(repo Repository) (*Lock, error) {
return newLock(repo, true)
}
const waitBeforeLockCheck = 200 * time.Millisecond
func newLock(repo *repository.Repository, excl bool) (*Lock, error) {
func newLock(repo Repository, excl bool) (*Lock, error) {
lock := &Lock{
Time: time.Now(),
PID: os.Getpid(),
@ -128,7 +126,7 @@ func (l *Lock) fillUserInfo() error {
// non-exclusive lock is to be created, an error is only returned when an
// exclusive lock is found.
func (l *Lock) checkForOtherLocks() error {
return eachLock(l.repo, func(id backend.ID, lock *Lock, err error) error {
return eachLock(l.repo, func(id ID, lock *Lock, err error) error {
if l.lockID != nil && id.Equal(*l.lockID) {
return nil
}
@ -150,11 +148,11 @@ func (l *Lock) checkForOtherLocks() error {
})
}
func eachLock(repo *repository.Repository, f func(backend.ID, *Lock, error) error) error {
func eachLock(repo Repository, f func(ID, *Lock, error) error) error {
done := make(chan struct{})
defer close(done)
for id := range repo.List(backend.Lock, done) {
for id := range repo.List(LockFile, done) {
lock, err := LoadLock(repo, id)
err = f(id, lock, err)
if err != nil {
@ -166,10 +164,10 @@ func eachLock(repo *repository.Repository, f func(backend.ID, *Lock, error) erro
}
// createLock acquires the lock by creating a file in the repository.
func (l *Lock) createLock() (backend.ID, error) {
id, err := l.repo.SaveJSONUnpacked(backend.Lock, l)
func (l *Lock) createLock() (ID, error) {
id, err := l.repo.SaveJSONUnpacked(LockFile, l)
if err != nil {
return backend.ID{}, err
return ID{}, err
}
return id, nil
@ -181,7 +179,7 @@ func (l *Lock) Unlock() error {
return nil
}
return l.repo.Backend().Remove(backend.Lock, l.lockID.String())
return l.repo.Backend().Remove(LockFile, l.lockID.String())
}
var staleTimeout = 30 * time.Minute
@ -229,7 +227,7 @@ func (l *Lock) Refresh() error {
return err
}
err = l.repo.Backend().Remove(backend.Lock, l.lockID.String())
err = l.repo.Backend().Remove(LockFile, l.lockID.String())
if err != nil {
return err
}
@ -269,9 +267,9 @@ func init() {
}
// LoadLock loads and unserializes a lock from a repository.
func LoadLock(repo *repository.Repository, id backend.ID) (*Lock, error) {
func LoadLock(repo Repository, id ID) (*Lock, error) {
lock := &Lock{}
if err := repo.LoadJSONUnpacked(backend.Lock, id, lock); err != nil {
if err := repo.LoadJSONUnpacked(LockFile, id, lock); err != nil {
return nil, err
}
lock.lockID = &id
@ -280,15 +278,15 @@ func LoadLock(repo *repository.Repository, id backend.ID) (*Lock, error) {
}
// RemoveStaleLocks deletes all locks detected as stale from the repository.
func RemoveStaleLocks(repo *repository.Repository) error {
return eachLock(repo, func(id backend.ID, lock *Lock, err error) error {
func RemoveStaleLocks(repo Repository) error {
return eachLock(repo, func(id ID, lock *Lock, err error) error {
// ignore locks that cannot be loaded
if err != nil {
return nil
}
if lock.Stale() {
return repo.Backend().Remove(backend.Lock, id.String())
return repo.Backend().Remove(LockFile, id.String())
}
return nil
@ -296,8 +294,8 @@ func RemoveStaleLocks(repo *repository.Repository) error {
}
// RemoveAllLocks removes all locks forcefully.
func RemoveAllLocks(repo *repository.Repository) error {
return eachLock(repo, func(id backend.ID, lock *Lock, err error) error {
return repo.Backend().Remove(backend.Lock, id.String())
func RemoveAllLocks(repo Repository) error {
return eachLock(repo, func(id ID, lock *Lock, err error) error {
return repo.Backend().Remove(LockFile, id.String())
})
}

View file

@ -6,8 +6,6 @@ import (
"time"
"restic"
"restic/backend"
"restic/repository"
. "restic/test"
)
@ -92,18 +90,18 @@ func TestExclusiveLockOnLockedRepo(t *testing.T) {
OK(t, elock.Unlock())
}
func createFakeLock(repo *repository.Repository, t time.Time, pid int) (backend.ID, error) {
func createFakeLock(repo restic.Repository, t time.Time, pid int) (restic.ID, error) {
hostname, err := os.Hostname()
if err != nil {
return backend.ID{}, err
return restic.ID{}, err
}
newLock := &restic.Lock{Time: t, PID: pid, Hostname: hostname}
return repo.SaveJSONUnpacked(backend.Lock, &newLock)
return repo.SaveJSONUnpacked(restic.LockFile, &newLock)
}
func removeLock(repo *repository.Repository, id backend.ID) error {
return repo.Backend().Remove(backend.Lock, id.String())
func removeLock(repo restic.Repository, id restic.ID) error {
return repo.Backend().Remove(restic.LockFile, id.String())
}
var staleLockTests = []struct {
@ -162,8 +160,8 @@ func TestLockStale(t *testing.T) {
}
}
func lockExists(repo *repository.Repository, t testing.TB, id backend.ID) bool {
exists, err := repo.Backend().Test(backend.Lock, id.String())
func lockExists(repo restic.Repository, t testing.TB, id restic.ID) bool {
exists, err := repo.Backend().Test(restic.LockFile, id.String())
OK(t, err)
return exists
@ -224,8 +222,8 @@ func TestLockRefresh(t *testing.T) {
lock, err := restic.NewLock(repo)
OK(t, err)
var lockID *backend.ID
for id := range repo.List(backend.Lock, nil) {
var lockID *restic.ID
for id := range repo.List(restic.LockFile, nil) {
if lockID != nil {
t.Error("more than one lock found")
}
@ -234,8 +232,8 @@ func TestLockRefresh(t *testing.T) {
OK(t, lock.Refresh())
var lockID2 *backend.ID
for id := range repo.List(backend.Lock, nil) {
var lockID2 *restic.ID
for id := range repo.List(restic.LockFile, nil) {
if lockID2 != nil {
t.Error("more than one lock found")
}

View file

@ -14,32 +14,30 @@ import (
"runtime"
"restic/backend"
"restic/debug"
"restic/fs"
"restic/pack"
"restic/repository"
)
// Node is a file, directory or other item in a backup.
type Node struct {
Name string `json:"name"`
FileType string `json:"type"`
Mode os.FileMode `json:"mode,omitempty"`
ModTime time.Time `json:"mtime,omitempty"`
AccessTime time.Time `json:"atime,omitempty"`
ChangeTime time.Time `json:"ctime,omitempty"`
UID uint32 `json:"uid"`
GID uint32 `json:"gid"`
User string `json:"user,omitempty"`
Group string `json:"group,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Size uint64 `json:"size,omitempty"`
Links uint64 `json:"links,omitempty"`
LinkTarget string `json:"linktarget,omitempty"`
Device uint64 `json:"device,omitempty"`
Content []backend.ID `json:"content"`
Subtree *backend.ID `json:"subtree,omitempty"`
Name string `json:"name"`
FileType string `json:"type"`
Mode os.FileMode `json:"mode,omitempty"`
ModTime time.Time `json:"mtime,omitempty"`
AccessTime time.Time `json:"atime,omitempty"`
ChangeTime time.Time `json:"ctime,omitempty"`
UID uint32 `json:"uid"`
GID uint32 `json:"gid"`
User string `json:"user,omitempty"`
Group string `json:"group,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Size uint64 `json:"size,omitempty"`
Links uint64 `json:"links,omitempty"`
LinkTarget string `json:"linktarget,omitempty"`
Device uint64 `json:"device,omitempty"`
Content IDs `json:"content"`
Subtree *ID `json:"subtree,omitempty"`
Error string `json:"error,omitempty"`
@ -47,7 +45,7 @@ type Node struct {
path string
err error
blobs repository.Blobs
blobs Blobs
}
func (node Node) String() string {
@ -108,7 +106,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string {
}
// CreateAt creates the node at the given path and restores all the meta data.
func (node *Node) CreateAt(path string, repo *repository.Repository) error {
func (node *Node) CreateAt(path string, repo Repository) error {
debug.Log("Node.CreateAt", "create node %v at %v", node.Name, path)
switch node.FileType {
@ -202,7 +200,7 @@ func (node Node) createDirAt(path string) error {
return nil
}
func (node Node) createFileAt(path string, repo *repository.Repository) error {
func (node Node) createFileAt(path string, repo Repository) error {
f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600)
defer f.Close()

View file

@ -1,4 +1,4 @@
package repository
package restic
import (
"io"

View file

@ -1,6 +1,10 @@
package restic
import "restic/repository"
import (
"restic/pack"
"github.com/restic/chunker"
)
// Repository stores data in a backend. It provides high-level functions and
// transparently encrypts/decrypts data.
@ -9,5 +13,43 @@ type Repository interface {
// Backend returns the backend used by the repository
Backend() Backend
SetIndex(*repository.MasterIndex)
SetIndex(interface{})
Index() Index
SaveFullIndex() error
SaveJSON(pack.BlobType, interface{}) (ID, error)
Config() Config
SaveAndEncrypt(pack.BlobType, []byte, *ID) (ID, error)
SaveJSONUnpacked(FileType, interface{}) (ID, error)
SaveIndex() error
LoadJSONPack(pack.BlobType, ID, interface{}) error
LoadJSONUnpacked(FileType, ID, interface{}) error
LoadBlob(ID, pack.BlobType, []byte) ([]byte, error)
LookupBlobSize(ID, pack.BlobType) (uint, error)
List(FileType, <-chan struct{}) <-chan ID
Flush() error
}
type Index interface {
Has(ID, pack.BlobType) bool
Lookup(ID, pack.BlobType) ([]PackedBlob, error)
}
type Config interface {
ChunkerPolynomial() chunker.Pol
}
type PackedBlob interface {
Type() pack.BlobType
Length() uint
ID() ID
Offset() uint
PackID() ID
}

View file

@ -1,47 +0,0 @@
package repository
import (
"fmt"
"restic/backend"
)
type Blob struct {
ID *backend.ID `json:"id,omitempty"`
Size uint64 `json:"size,omitempty"`
Storage *backend.ID `json:"sid,omitempty"` // encrypted ID
StorageSize uint64 `json:"ssize,omitempty"` // encrypted Size
}
type Blobs []Blob
func (b Blob) Valid() bool {
if b.ID == nil || b.Storage == nil || b.StorageSize == 0 {
return false
}
return true
}
func (b Blob) String() string {
return fmt.Sprintf("Blob<%s (%d) -> %s (%d)>",
b.ID.Str(), b.Size,
b.Storage.Str(), b.StorageSize)
}
// Compare compares two blobs by comparing the ID and the size. It returns -1,
// 0, or 1.
func (b Blob) Compare(other Blob) int {
if res := b.ID.Compare(*other.ID); res != 0 {
return res
}
if b.Size < other.Size {
return -1
}
if b.Size > other.Size {
return 1
}
return 0
}

View file

@ -5,11 +5,11 @@ import (
"crypto/sha256"
"encoding/hex"
"io"
"restic"
"testing"
"github.com/pkg/errors"
"restic/backend"
"restic/debug"
"github.com/restic/chunker"
@ -31,12 +31,12 @@ const RepoVersion = 1
// JSONUnpackedSaver saves unpacked JSON.
type JSONUnpackedSaver interface {
SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error)
SaveJSONUnpacked(restic.FileType, interface{}) (restic.ID, error)
}
// JSONUnpackedLoader loads unpacked JSON.
type JSONUnpackedLoader interface {
LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error
LoadJSONUnpacked(restic.FileType, restic.ID, interface{}) error
}
// CreateConfig creates a config file with a randomly selected polynomial and
@ -87,7 +87,7 @@ func LoadConfig(r JSONUnpackedLoader) (Config, error) {
cfg Config
)
err := r.LoadJSONUnpacked(backend.Config, backend.ID{}, &cfg)
err := r.LoadJSONUnpacked(restic.ConfigFile, restic.ID{}, &cfg)
if err != nil {
return Config{}, err
}

View file

@ -1,46 +1,46 @@
package repository_test
import (
"restic"
"testing"
"restic/backend"
"restic/repository"
. "restic/test"
)
type saver func(backend.Type, interface{}) (backend.ID, error)
type saver func(restic.FileType, interface{}) (restic.ID, error)
func (s saver) SaveJSONUnpacked(t backend.Type, arg interface{}) (backend.ID, error) {
func (s saver) SaveJSONUnpacked(t restic.FileType, arg interface{}) (restic.ID, error) {
return s(t, arg)
}
type loader func(backend.Type, backend.ID, interface{}) error
type loader func(restic.FileType, restic.ID, interface{}) error
func (l loader) LoadJSONUnpacked(t backend.Type, id backend.ID, arg interface{}) error {
func (l loader) LoadJSONUnpacked(t restic.FileType, id restic.ID, arg interface{}) error {
return l(t, id, arg)
}
func TestConfig(t *testing.T) {
resultConfig := repository.Config{}
save := func(tpe backend.Type, arg interface{}) (backend.ID, error) {
Assert(t, tpe == backend.Config,
save := func(tpe restic.FileType, arg interface{}) (restic.ID, error) {
Assert(t, tpe == restic.ConfigFile,
"wrong backend type: got %v, wanted %v",
tpe, backend.Config)
tpe, restic.ConfigFile)
cfg := arg.(repository.Config)
resultConfig = cfg
return backend.ID{}, nil
return restic.ID{}, nil
}
cfg1, err := repository.CreateConfig()
OK(t, err)
_, err = saver(save).SaveJSONUnpacked(backend.Config, cfg1)
_, err = saver(save).SaveJSONUnpacked(restic.ConfigFile, cfg1)
load := func(tpe backend.Type, id backend.ID, arg interface{}) error {
Assert(t, tpe == backend.Config,
load := func(tpe restic.FileType, id restic.ID, arg interface{}) error {
Assert(t, tpe == restic.ConfigFile,
"wrong backend type: got %v, wanted %v",
tpe, backend.Config)
tpe, restic.ConfigFile)
cfg := arg.(*repository.Config)
*cfg = resultConfig

View file

@ -5,12 +5,12 @@ import (
"encoding/json"
"fmt"
"io"
"restic"
"sync"
"time"
"github.com/pkg/errors"
"restic/backend"
"restic/crypto"
"restic/debug"
"restic/pack"
@ -21,14 +21,14 @@ type Index struct {
m sync.Mutex
pack map[pack.Handle][]indexEntry
final bool // set to true for all indexes read from the backend ("finalized")
id backend.ID // set to the ID of the index when it's finalized
supersedes backend.IDs
final bool // set to true for all indexes read from the backend ("finalized")
id restic.ID // set to the ID of the index when it's finalized
supersedes restic.IDs
created time.Time
}
type indexEntry struct {
packID backend.ID
packID restic.ID
offset uint
length uint
}
@ -112,7 +112,7 @@ func (idx *Index) Store(blob PackedBlob) {
}
// Lookup queries the index for the blob ID and returns a PackedBlob.
func (idx *Index) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) {
func (idx *Index) Lookup(id restic.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) {
idx.m.Lock()
defer idx.m.Unlock()
@ -144,7 +144,7 @@ func (idx *Index) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob,
}
// ListPack returns a list of blobs contained in a pack.
func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) {
func (idx *Index) ListPack(id restic.ID) (list []PackedBlob) {
idx.m.Lock()
defer idx.m.Unlock()
@ -166,7 +166,7 @@ func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) {
}
// Has returns true iff the id is listed in the index.
func (idx *Index) Has(id backend.ID, tpe pack.BlobType) bool {
func (idx *Index) Has(id restic.ID, tpe pack.BlobType) bool {
_, err := idx.Lookup(id, tpe)
if err == nil {
return true
@ -177,7 +177,7 @@ func (idx *Index) Has(id backend.ID, tpe pack.BlobType) bool {
// LookupSize returns the length of the cleartext content behind the
// given id
func (idx *Index) LookupSize(id backend.ID, tpe pack.BlobType) (cleartextLength uint, err error) {
func (idx *Index) LookupSize(id restic.ID, tpe pack.BlobType) (cleartextLength uint, err error) {
blobs, err := idx.Lookup(id, tpe)
if err != nil {
return 0, err
@ -187,13 +187,13 @@ func (idx *Index) LookupSize(id backend.ID, tpe pack.BlobType) (cleartextLength
}
// Supersedes returns the list of indexes this index supersedes, if any.
func (idx *Index) Supersedes() backend.IDs {
func (idx *Index) Supersedes() restic.IDs {
return idx.supersedes
}
// AddToSupersedes adds the ids to the list of indexes superseded by this
// index. If the index has already been finalized, an error is returned.
func (idx *Index) AddToSupersedes(ids ...backend.ID) error {
func (idx *Index) AddToSupersedes(ids ...restic.ID) error {
idx.m.Lock()
defer idx.m.Unlock()
@ -209,9 +209,9 @@ func (idx *Index) AddToSupersedes(ids ...backend.ID) error {
type PackedBlob struct {
Type pack.BlobType
Length uint
ID backend.ID
ID restic.ID
Offset uint
PackID backend.ID
PackID restic.ID
}
func (pb PackedBlob) String() string {
@ -259,11 +259,11 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob {
}
// Packs returns all packs in this index
func (idx *Index) Packs() backend.IDSet {
func (idx *Index) Packs() restic.IDSet {
idx.m.Lock()
defer idx.m.Unlock()
packs := backend.NewIDSet()
packs := restic.NewIDSet()
for _, list := range idx.pack {
for _, entry := range list {
packs.Insert(entry.packID)
@ -300,12 +300,12 @@ func (idx *Index) Length() uint {
}
type packJSON struct {
ID backend.ID `json:"id"`
ID restic.ID `json:"id"`
Blobs []blobJSON `json:"blobs"`
}
type blobJSON struct {
ID backend.ID `json:"id"`
ID restic.ID `json:"id"`
Type pack.BlobType `json:"type"`
Offset uint `json:"offset"`
Length uint `json:"length"`
@ -314,7 +314,7 @@ type blobJSON struct {
// generatePackList returns a list of packs.
func (idx *Index) generatePackList() ([]*packJSON, error) {
list := []*packJSON{}
packs := make(map[backend.ID]*packJSON)
packs := make(map[restic.ID]*packJSON)
for h, packedBlobs := range idx.pack {
for _, blob := range packedBlobs {
@ -357,7 +357,7 @@ func (idx *Index) generatePackList() ([]*packJSON, error) {
}
type jsonIndex struct {
Supersedes backend.IDs `json:"supersedes,omitempty"`
Supersedes restic.IDs `json:"supersedes,omitempty"`
Packs []*packJSON `json:"packs"`
}
@ -402,12 +402,12 @@ func (idx *Index) Finalize(w io.Writer) error {
// ID returns the ID of the index, if available. If the index is not yet
// finalized, an error is returned.
func (idx *Index) ID() (backend.ID, error) {
func (idx *Index) ID() (restic.ID, error) {
idx.m.Lock()
defer idx.m.Unlock()
if !idx.final {
return backend.ID{}, errors.New("index not finalized")
return restic.ID{}, errors.New("index not finalized")
}
return idx.id, nil
@ -415,7 +415,7 @@ func (idx *Index) ID() (backend.ID, error) {
// SetID sets the ID the index has been written to. This requires that
// Finalize() has been called before, otherwise an error is returned.
func (idx *Index) SetID(id backend.ID) error {
func (idx *Index) SetID(id restic.ID) error {
idx.m.Lock()
defer idx.m.Unlock()
@ -545,10 +545,10 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
}
// LoadIndexWithDecoder loads the index and decodes it with fn.
func LoadIndexWithDecoder(repo *Repository, id backend.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) {
func LoadIndexWithDecoder(repo *Repository, id restic.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) {
debug.Log("LoadIndexWithDecoder", "Loading index %v", id[:8])
buf, err := repo.LoadAndDecrypt(backend.Index, id)
buf, err := repo.LoadAndDecrypt(restic.IndexFile, id)
if err != nil {
return nil, err
}
@ -568,7 +568,7 @@ func LoadIndexWithDecoder(repo *Repository, id backend.ID, fn func(io.Reader) (*
// format (if necessary). When the conversion is succcessful, the old index
// is removed. Returned is either the old id (if no conversion was needed) or
// the new id.
func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) {
func ConvertIndex(repo *Repository, id restic.ID) (restic.ID, error) {
debug.Log("ConvertIndex", "checking index %v", id.Str())
idx, err := LoadIndexWithDecoder(repo, id, DecodeOldIndex)
@ -578,7 +578,7 @@ func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) {
}
buf := bytes.NewBuffer(nil)
idx.supersedes = backend.IDs{id}
idx.supersedes = restic.IDs{id}
err = idx.Encode(buf)
if err != nil {
@ -586,5 +586,5 @@ func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) {
return id, err
}
return repo.SaveUnpacked(backend.Index, buf.Bytes())
return repo.SaveUnpacked(restic.IndexFile, buf.Bytes())
}

View file

@ -3,7 +3,7 @@ package repository
import (
"fmt"
"os"
"restic/backend"
"restic"
"restic/debug"
"restic/list"
"restic/worker"
@ -23,7 +23,7 @@ func RebuildIndex(repo *Repository) error {
idx := NewIndex()
for job := range ch {
id := job.Data.(backend.ID)
id := job.Data.(restic.ID)
if job.Error != nil {
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error)
@ -44,8 +44,8 @@ func RebuildIndex(repo *Repository) error {
}
}
oldIndexes := backend.NewIDSet()
for id := range repo.List(backend.Index, done) {
oldIndexes := restic.NewIDSet()
for id := range repo.List(restic.IndexFile, done) {
idx.AddToSupersedes(id)
oldIndexes.Insert(id)
}
@ -58,7 +58,7 @@ func RebuildIndex(repo *Repository) error {
debug.Log("RebuildIndex.RebuildIndex", "new index saved as %v", id.Str())
for indexID := range oldIndexes {
err := repo.Backend().Remove(backend.Index, indexID.String())
err := repo.Backend().Remove(restic.IndexFile, indexID.String())
if err != nil {
fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", indexID.Str(), err)
}

View file

@ -2,6 +2,7 @@ package repository_test
import (
"bytes"
"restic"
"testing"
"restic/backend"
@ -12,8 +13,8 @@ import (
func TestIndexSerialize(t *testing.T) {
type testEntry struct {
id backend.ID
pack backend.ID
id restic.ID
pack restic.ID
tpe pack.BlobType
offset, length uint
}
@ -249,7 +250,7 @@ var docOldExample = []byte(`
`)
var exampleTests = []struct {
id, packID backend.ID
id, packID restic.ID
tpe pack.BlobType
offset, length uint
}{
@ -269,11 +270,11 @@ var exampleTests = []struct {
}
var exampleLookupTest = struct {
packID backend.ID
blobs map[backend.ID]pack.BlobType
packID restic.ID
blobs map[restic.ID]pack.BlobType
}{
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
map[backend.ID]pack.BlobType{
map[restic.ID]pack.BlobType{
ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): pack.Data,
ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): pack.Tree,
ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): pack.Data,
@ -281,7 +282,7 @@ var exampleLookupTest = struct {
}
func TestIndexUnserialize(t *testing.T) {
oldIdx := backend.IDs{ParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")}
oldIdx := restic.IDs{ParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")}
idx, err := repository.DecodeIndex(bytes.NewReader(docExample))
OK(t, err)
@ -345,7 +346,7 @@ func TestIndexUnserializeOld(t *testing.T) {
func TestIndexPacks(t *testing.T) {
idx := repository.NewIndex()
packs := backend.NewIDSet()
packs := restic.NewIDSet()
for i := 0; i < 20; i++ {
packID := backend.RandomID()

View file

@ -5,6 +5,7 @@ import (
"fmt"
"os"
"os/user"
"restic"
"time"
"github.com/pkg/errors"
@ -142,7 +143,7 @@ func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) {
// LoadKey loads a key from the backend.
func LoadKey(s *Repository, name string) (k *Key, err error) {
h := backend.Handle{Type: backend.Key, Name: name}
h := restic.Handle{Type: backend.Key, Name: name}
data, err := backend.LoadAll(s.be, h, nil)
if err != nil {
return nil, err
@ -224,9 +225,9 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error)
}
// store in repository and return
h := backend.Handle{
h := restic.Handle{
Type: backend.Key,
Name: backend.Hash(buf).String(),
Name: restic.Hash(buf).String(),
}
err = s.be.Save(h, buf)

View file

@ -1,11 +1,11 @@
package repository
import (
"restic"
"sync"
"github.com/pkg/errors"
"restic/backend"
"restic/debug"
"restic/pack"
)
@ -22,7 +22,7 @@ func NewMasterIndex() *MasterIndex {
}
// Lookup queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) {
func (mi *MasterIndex) Lookup(id restic.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
@ -42,7 +42,7 @@ func (mi *MasterIndex) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedB
}
// LookupSize queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) LookupSize(id backend.ID, tpe pack.BlobType) (uint, error) {
func (mi *MasterIndex) LookupSize(id restic.ID, tpe pack.BlobType) (uint, error) {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
@ -58,7 +58,7 @@ func (mi *MasterIndex) LookupSize(id backend.ID, tpe pack.BlobType) (uint, error
// ListPack returns the list of blobs in a pack. The first matching index is
// returned, or nil if no index contains information about the pack id.
func (mi *MasterIndex) ListPack(id backend.ID) (list []PackedBlob) {
func (mi *MasterIndex) ListPack(id restic.ID) (list []PackedBlob) {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
@ -73,7 +73,7 @@ func (mi *MasterIndex) ListPack(id backend.ID) (list []PackedBlob) {
}
// Has queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) Has(id backend.ID, tpe pack.BlobType) bool {
func (mi *MasterIndex) Has(id restic.ID, tpe pack.BlobType) bool {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
@ -197,7 +197,7 @@ func (mi *MasterIndex) All() []*Index {
// RebuildIndex combines all known indexes to a new index, leaving out any
// packs whose ID is contained in packBlacklist. The new index contains the IDs
// of all known indexes in the "supersedes" field.
func (mi *MasterIndex) RebuildIndex(packBlacklist backend.IDSet) (*Index, error) {
func (mi *MasterIndex) RebuildIndex(packBlacklist restic.IDSet) (*Index, error) {
mi.idxMutex.Lock()
defer mi.idxMutex.Unlock()

View file

@ -4,11 +4,11 @@ import (
"io"
"io/ioutil"
"os"
"restic"
"sync"
"github.com/pkg/errors"
"restic/backend"
"restic/crypto"
"restic/debug"
"restic/fs"
@ -17,7 +17,7 @@ import (
// Saver implements saving data in a backend.
type Saver interface {
Save(h backend.Handle, jp []byte) error
Save(h restic.Handle, jp []byte) error
}
// packerManager keeps a list of open packs and creates new on demand.
@ -114,8 +114,8 @@ func (r *Repository) savePacker(p *pack.Packer) error {
return errors.Wrap(err, "Close")
}
id := backend.Hash(data)
h := backend.Handle{Type: backend.Data, Name: id.String()}
id := restic.Hash(data)
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
err = r.be.Save(h, data)
if err != nil {

View file

@ -4,7 +4,7 @@ import (
"io"
"math/rand"
"os"
"restic/backend"
"restic"
"restic/backend/mem"
"restic/crypto"
"restic/pack"
@ -36,8 +36,8 @@ func (r *randReader) Read(p []byte) (n int, err error) {
return len(p), nil
}
func randomID(rd io.Reader) backend.ID {
id := backend.ID{}
func randomID(rd io.Reader) restic.ID {
id := restic.ID{}
_, err := io.ReadFull(rd, id[:])
if err != nil {
panic(err)
@ -64,7 +64,7 @@ func saveFile(t testing.TB, be Saver, filename string, n int) {
t.Fatal(err)
}
h := backend.Handle{Type: backend.Data, Name: backend.Hash(data).String()}
h := restic.Handle{Type: restic.DataFile, Name: restic.Hash(data).String()}
err = be.Save(h, data)
if err != nil {
@ -137,7 +137,7 @@ func flushRemainingPacks(t testing.TB, rnd *randReader, be Saver, pm *packerMana
type fakeBackend struct{}
func (f *fakeBackend) Save(h backend.Handle, data []byte) error {
func (f *fakeBackend) Save(h restic.Handle, data []byte) error {
return nil
}

View file

@ -1,6 +1,7 @@
package repository
import (
"restic"
"sync"
"restic/backend"
@ -23,12 +24,12 @@ type ParallelWorkFunc func(id string, done <-chan struct{}) error
// ParallelIDWorkFunc gets one backend.ID to work on. If an error is returned,
// processing stops. If done is closed, the function should return.
type ParallelIDWorkFunc func(id backend.ID, done <-chan struct{}) error
type ParallelIDWorkFunc func(id restic.ID, done <-chan struct{}) error
// FilesInParallel runs n workers of f in parallel, on the IDs that
// repo.List(t) yield. If f returns an error, the process is aborted and the
// first error is returned.
func FilesInParallel(repo backend.Lister, t backend.Type, n uint, f ParallelWorkFunc) error {
func FilesInParallel(repo backend.Lister, t restic.FileType, n uint, f ParallelWorkFunc) error {
done := make(chan struct{})
defer closeIfOpen(done)

View file

@ -2,12 +2,12 @@ package repository_test
import (
"math/rand"
"restic"
"testing"
"time"
"github.com/pkg/errors"
"restic/backend"
"restic/repository"
. "restic/test"
)
@ -73,7 +73,7 @@ var lister = testIDs{
"34dd044c228727f2226a0c9c06a3e5ceb5e30e31cb7854f8fa1cde846b395a58",
}
func (tests testIDs) List(t backend.Type, done <-chan struct{}) <-chan string {
func (tests testIDs) List(t restic.FileType, done <-chan struct{}) <-chan string {
ch := make(chan string)
go func() {
@ -100,7 +100,7 @@ func TestFilesInParallel(t *testing.T) {
}
for n := uint(1); n < 5; n++ {
err := repository.FilesInParallel(lister, backend.Data, n*100, f)
err := repository.FilesInParallel(lister, restic.DataFile, n*100, f)
OK(t, err)
}
}
@ -120,7 +120,7 @@ func TestFilesInParallelWithError(t *testing.T) {
}
for n := uint(1); n < 5; n++ {
err := repository.FilesInParallel(lister, backend.Data, n*100, f)
err := repository.FilesInParallel(lister, restic.DataFile, n*100, f)
Equals(t, errTest, err)
}
}

View file

@ -3,7 +3,7 @@ package repository
import (
"bytes"
"io"
"restic/backend"
"restic"
"restic/crypto"
"restic/debug"
"restic/pack"
@ -15,13 +15,13 @@ import (
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
// into a new pack. Afterwards, the packs are removed. This operation requires
// an exclusive lock on the repo.
func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err error) {
func Repack(repo *Repository, packs restic.IDSet, keepBlobs pack.BlobSet) (err error) {
debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
buf := make([]byte, 0, maxPackSize)
for packID := range packs {
// load the complete pack
h := backend.Handle{Type: backend.Data, Name: packID.String()}
h := restic.Handle{Type: restic.DataFile, Name: packID.String()}
l, err := repo.Backend().Load(h, buf[:cap(buf)], 0)
if errors.Cause(err) == io.ErrUnexpectedEOF {
@ -75,7 +75,7 @@ func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err
}
for packID := range packs {
err := repo.Backend().Remove(backend.Data, packID.String())
err := repo.Backend().Remove(restic.DataFile, packID.String())
if err != nil {
debug.Log("Repack", "error removing pack %v: %v", packID.Str(), err)
return err

View file

@ -3,7 +3,7 @@ package repository_test
import (
"io"
"math/rand"
"restic/backend"
"restic"
"restic/pack"
"restic/repository"
"testing"
@ -14,7 +14,7 @@ func randomSize(min, max int) int {
}
func random(t testing.TB, length int) []byte {
rd := repository.NewRandReader(rand.New(rand.NewSource(int64(length))))
rd := restic.NewRandReader(rand.New(rand.NewSource(int64(length))))
buf := make([]byte, length)
_, err := io.ReadFull(rd, buf)
if err != nil {
@ -40,7 +40,7 @@ func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pDa
}
buf := random(t, length)
id := backend.Hash(buf)
id := restic.Hash(buf)
if repo.Index().Has(id, pack.Data) {
t.Errorf("duplicate blob %v/%v ignored", id, pack.Data)
@ -75,7 +75,7 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l
blobs := pack.NewBlobSet()
for id := range repo.List(backend.Data, done) {
for id := range repo.List(restic.DataFile, done) {
entries, _, err := repo.ListPack(id)
if err != nil {
t.Fatalf("error listing pack %v: %v", id, err)
@ -101,20 +101,20 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l
return list1, list2
}
func listPacks(t *testing.T, repo *repository.Repository) backend.IDSet {
func listPacks(t *testing.T, repo *repository.Repository) restic.IDSet {
done := make(chan struct{})
defer close(done)
list := backend.NewIDSet()
for id := range repo.List(backend.Data, done) {
list := restic.NewIDSet()
for id := range repo.List(restic.DataFile, done) {
list.Insert(id)
}
return list
}
func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.BlobSet) backend.IDSet {
packs := backend.NewIDSet()
func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.BlobSet) restic.IDSet {
packs := restic.NewIDSet()
idx := repo.Index()
for h := range blobs {
@ -131,7 +131,7 @@ func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.Blo
return packs
}
func repack(t *testing.T, repo *repository.Repository, packs backend.IDSet, blobs pack.BlobSet) {
func repack(t *testing.T, repo *repository.Repository, packs restic.IDSet, blobs pack.BlobSet) {
err := repository.Repack(repo, packs, blobs)
if err != nil {
t.Fatal(err)

View file

@ -6,6 +6,7 @@ import (
"fmt"
"io"
"os"
"restic"
"github.com/pkg/errors"
@ -17,7 +18,7 @@ import (
// Repository is used to access a repository in a backend.
type Repository struct {
be backend.Backend
be restic.Backend
Config Config
key *crypto.Key
keyName string
@ -27,7 +28,7 @@ type Repository struct {
}
// New returns a new repository with backend be.
func New(be backend.Backend) *Repository {
func New(be restic.Backend) *Repository {
repo := &Repository{
be: be,
idx: NewMasterIndex(),
@ -40,29 +41,29 @@ func New(be backend.Backend) *Repository {
// Find loads the list of all blobs of type t and searches for names which start
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
// more than one is found, nil and ErrMultipleIDMatches is returned.
func (r *Repository) Find(t backend.Type, prefix string) (string, error) {
func (r *Repository) Find(t restic.FileType, prefix string) (string, error) {
return backend.Find(r.be, t, prefix)
}
// PrefixLength returns the number of bytes required so that all prefixes of
// all IDs of type t are unique.
func (r *Repository) PrefixLength(t backend.Type) (int, error) {
func (r *Repository) PrefixLength(t restic.FileType) (int, error) {
return backend.PrefixLength(r.be, t)
}
// LoadAndDecrypt loads and decrypts data identified by t and id from the
// backend.
func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) {
func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) {
debug.Log("Repo.Load", "load %v with id %v", t, id.Str())
h := backend.Handle{Type: t, Name: id.String()}
h := restic.Handle{Type: t, Name: id.String()}
buf, err := backend.LoadAll(r.be, h, nil)
if err != nil {
debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
return nil, err
}
if t != backend.Config && !backend.Hash(buf).Equal(id) {
if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) {
return nil, errors.New("invalid data returned")
}
@ -78,7 +79,7 @@ func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, erro
// LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob.
func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byte) ([]byte, error) {
func (r *Repository) LoadBlob(id restic.ID, t pack.BlobType, plaintextBuf []byte) ([]byte, error) {
debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
// lookup plaintext size of blob
@ -111,7 +112,7 @@ func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byt
}
// load blob from pack
h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()}
h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()}
ciphertextBuf := make([]byte, blob.Length)
n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
if err != nil {
@ -135,7 +136,7 @@ func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byt
}
// check hash
if !backend.Hash(plaintextBuf).Equal(id) {
if !restic.Hash(plaintextBuf).Equal(id) {
lastError = errors.Errorf("blob %v returned invalid hash", id)
continue
}
@ -162,7 +163,7 @@ func closeOrErr(cl io.Closer, err *error) {
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
// the item.
func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) (err error) {
func (r *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) (err error) {
buf, err := r.LoadAndDecrypt(t, id)
if err != nil {
return err
@ -173,7 +174,7 @@ func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interf
// LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the
// data and afterwards call json.Unmarshal on the item.
func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) (err error) {
func (r *Repository) LoadJSONPack(t pack.BlobType, id restic.ID, item interface{}) (err error) {
buf, err := r.LoadBlob(id, t, nil)
if err != nil {
return err
@ -183,16 +184,16 @@ func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface
}
// LookupBlobSize returns the size of blob id.
func (r *Repository) LookupBlobSize(id backend.ID, tpe pack.BlobType) (uint, error) {
func (r *Repository) LookupBlobSize(id restic.ID, tpe pack.BlobType) (uint, error) {
return r.idx.LookupSize(id, tpe)
}
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data
// is small enough, it will be packed together with other small blobs.
func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID) (backend.ID, error) {
func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *restic.ID) (restic.ID, error) {
if id == nil {
// compute plaintext hash
hashedID := backend.Hash(data)
hashedID := restic.Hash(data)
id = &hashedID
}
@ -205,19 +206,19 @@ func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID
// encrypt blob
ciphertext, err := r.Encrypt(ciphertext, data)
if err != nil {
return backend.ID{}, err
return restic.ID{}, err
}
// find suitable packer and add blob
packer, err := r.findPacker(uint(len(ciphertext)))
if err != nil {
return backend.ID{}, err
return restic.ID{}, err
}
// save ciphertext
_, err = packer.Add(t, *id, ciphertext)
if err != nil {
return backend.ID{}, err
return restic.ID{}, err
}
// if the pack is not full enough and there are less than maxPackers
@ -234,7 +235,7 @@ func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID
// SaveJSON serialises item as JSON and encrypts and saves it in a pack in the
// backend as type t.
func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error) {
func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (restic.ID, error) {
debug.Log("Repo.SaveJSON", "save %v blob", t)
buf := getBuf()[:0]
defer freeBuf(buf)
@ -244,7 +245,7 @@ func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, er
enc := json.NewEncoder(wr)
err := enc.Encode(item)
if err != nil {
return backend.ID{}, errors.Errorf("json.Encode: %v", err)
return restic.ID{}, errors.Errorf("json.Encode: %v", err)
}
buf = wr.Bytes()
@ -253,11 +254,11 @@ func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, er
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
// backend as type t, without a pack. It returns the storage hash.
func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) {
func (r *Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (restic.ID, error) {
debug.Log("Repo.SaveJSONUnpacked", "save new blob %v", t)
plaintext, err := json.Marshal(item)
if err != nil {
return backend.ID{}, errors.Wrap(err, "json.Marshal")
return restic.ID{}, errors.Wrap(err, "json.Marshal")
}
return r.SaveUnpacked(t, plaintext)
@ -265,20 +266,20 @@ func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend
// SaveUnpacked encrypts data and stores it in the backend. Returned is the
// storage hash.
func (r *Repository) SaveUnpacked(t backend.Type, p []byte) (id backend.ID, err error) {
func (r *Repository) SaveUnpacked(t restic.FileType, p []byte) (id restic.ID, err error) {
ciphertext := make([]byte, len(p)+crypto.Extension)
ciphertext, err = r.Encrypt(ciphertext, p)
if err != nil {
return backend.ID{}, err
return restic.ID{}, err
}
id = backend.Hash(ciphertext)
h := backend.Handle{Type: t, Name: id.String()}
id = restic.Hash(ciphertext)
h := restic.Handle{Type: t, Name: id.String()}
err = r.be.Save(h, ciphertext)
if err != nil {
debug.Log("Repo.SaveJSONUnpacked", "error saving blob %v: %v", h, err)
return backend.ID{}, err
return restic.ID{}, err
}
debug.Log("Repo.SaveJSONUnpacked", "blob %v saved", h)
@ -303,7 +304,7 @@ func (r *Repository) Flush() error {
}
// Backend returns the backend for the repository.
func (r *Repository) Backend() backend.Backend {
func (r *Repository) Backend() restic.Backend {
return r.be
}
@ -318,15 +319,15 @@ func (r *Repository) SetIndex(i *MasterIndex) {
}
// SaveIndex saves an index in the repository.
func SaveIndex(repo *Repository, index *Index) (backend.ID, error) {
func SaveIndex(repo *Repository, index *Index) (restic.ID, error) {
buf := bytes.NewBuffer(nil)
err := index.Finalize(buf)
if err != nil {
return backend.ID{}, err
return restic.ID{}, err
}
return repo.SaveUnpacked(backend.Index, buf.Bytes())
return repo.SaveUnpacked(restic.IndexFile, buf.Bytes())
}
// saveIndex saves all indexes in the backend.
@ -365,7 +366,7 @@ func (r *Repository) LoadIndex() error {
errCh := make(chan error, 1)
indexes := make(chan *Index)
worker := func(id backend.ID, done <-chan struct{}) error {
worker := func(id restic.ID, done <-chan struct{}) error {
idx, err := LoadIndex(r, id)
if err != nil {
return err
@ -381,7 +382,7 @@ func (r *Repository) LoadIndex() error {
go func() {
defer close(indexes)
errCh <- FilesInParallel(r.be, backend.Index, loadIndexParallelism,
errCh <- FilesInParallel(r.be, restic.IndexFile, loadIndexParallelism,
ParallelWorkFuncParseID(worker))
}()
@ -397,7 +398,7 @@ func (r *Repository) LoadIndex() error {
}
// LoadIndex loads the index id from backend and returns it.
func LoadIndex(repo *Repository, id backend.ID) (*Index, error) {
func LoadIndex(repo *Repository, id restic.ID) (*Index, error) {
idx, err := LoadIndexWithDecoder(repo, id, DecodeIndex)
if err == nil {
return idx, nil
@ -429,7 +430,7 @@ func (r *Repository) SearchKey(password string, maxKeys int) error {
// Init creates a new master key with the supplied password, initializes and
// saves the repository config.
func (r *Repository) Init(password string) error {
has, err := r.be.Test(backend.Config, "")
has, err := r.be.Test(restic.ConfigFile, "")
if err != nil {
return err
}
@ -457,7 +458,7 @@ func (r *Repository) init(password string, cfg Config) error {
r.packerManager.key = key.master
r.keyName = key.Name()
r.Config = cfg
_, err = r.SaveJSONUnpacked(backend.Config, cfg)
_, err = r.SaveJSONUnpacked(restic.ConfigFile, cfg)
return err
}
@ -497,7 +498,7 @@ func (r *Repository) KeyName() string {
}
// Count returns the number of blobs of a given type in the backend.
func (r *Repository) Count(t backend.Type) (n uint) {
func (r *Repository) Count(t restic.FileType) (n uint) {
for _ = range r.be.List(t, nil) {
n++
}
@ -505,16 +506,16 @@ func (r *Repository) Count(t backend.Type) (n uint) {
return
}
func (r *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backend.ID) {
func (r *Repository) list(t restic.FileType, done <-chan struct{}, out chan<- restic.ID) {
defer close(out)
in := r.be.List(t, done)
var (
// disable sending on the outCh until we received a job
outCh chan<- backend.ID
outCh chan<- restic.ID
// enable receiving from in
inCh = in
id backend.ID
id restic.ID
err error
)
@ -543,8 +544,8 @@ func (r *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backe
}
// List returns a channel that yields all IDs of type t in the backend.
func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.ID {
outCh := make(chan backend.ID)
func (r *Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic.ID {
outCh := make(chan restic.ID)
go r.list(t, done, outCh)
@ -553,8 +554,8 @@ func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.I
// ListPack returns the list of blobs saved in the pack id and the length of
// the file as stored in the backend.
func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, int64, error) {
h := backend.Handle{Type: backend.Data, Name: id.String()}
func (r *Repository) ListPack(id restic.ID) ([]pack.Blob, int64, error) {
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
blobInfo, err := r.Backend().Stat(h)
if err != nil {

View file

@ -11,7 +11,6 @@ import (
"testing"
"restic"
"restic/backend"
"restic/pack"
"restic/repository"
. "restic/test"
@ -80,7 +79,7 @@ func TestSave(t *testing.T) {
_, err := io.ReadFull(rand.Reader, data)
OK(t, err)
id := backend.Hash(data)
id := restic.Hash(data)
// save
sid, err := repo.SaveAndEncrypt(pack.Data, data, nil)
@ -114,7 +113,7 @@ func TestSaveFrom(t *testing.T) {
_, err := io.ReadFull(rand.Reader, data)
OK(t, err)
id := backend.Hash(data)
id := restic.Hash(data)
// save
id2, err := repo.SaveAndEncrypt(pack.Data, data, &id)
@ -147,7 +146,7 @@ func BenchmarkSaveAndEncrypt(t *testing.B) {
_, err := io.ReadFull(rand.Reader, data)
OK(t, err)
id := backend.ID(sha256.Sum256(data))
id := restic.ID(sha256.Sum256(data))
t.ResetTimer()
t.SetBytes(int64(size))
@ -211,13 +210,13 @@ func TestLoadJSONUnpacked(t *testing.T) {
sn.Hostname = "foobar"
sn.Username = "test!"
id, err := repo.SaveJSONUnpacked(backend.Snapshot, &sn)
id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, &sn)
OK(t, err)
var sn2 restic.Snapshot
// restore
err = repo.LoadJSONUnpacked(backend.Snapshot, id, &sn2)
err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, &sn2)
OK(t, err)
Equals(t, sn.Hostname, sn2.Hostname)
@ -286,19 +285,19 @@ func TestRepositoryIncrementalIndex(t *testing.T) {
OK(t, repo.SaveIndex())
type packEntry struct {
id backend.ID
id restic.ID
indexes []*repository.Index
}
packEntries := make(map[backend.ID]map[backend.ID]struct{})
packEntries := make(map[restic.ID]map[restic.ID]struct{})
for id := range repo.List(backend.Index, nil) {
for id := range repo.List(restic.IndexFile, nil) {
idx, err := repository.LoadIndex(repo, id)
OK(t, err)
for pb := range idx.Each(nil) {
if _, ok := packEntries[pb.PackID]; !ok {
packEntries[pb.PackID] = make(map[backend.ID]struct{})
packEntries[pb.PackID] = make(map[restic.ID]struct{})
}
packEntries[pb.PackID][id] = struct{}{}

View file

@ -2,7 +2,7 @@ package repository
import (
"os"
"restic/backend"
"restic"
"restic/backend/local"
"restic/backend/mem"
"restic/crypto"
@ -25,7 +25,7 @@ func TestUseLowSecurityKDFParameters(t testing.TB) {
}
// TestBackend returns a fully configured in-memory backend.
func TestBackend(t testing.TB) (be backend.Backend, cleanup func()) {
func TestBackend(t testing.TB) (be restic.Backend, cleanup func()) {
return mem.New(), func() {}
}
@ -37,7 +37,7 @@ const testChunkerPol = chunker.Pol(0x3DA3358B4DC173)
// TestRepositoryWithBackend returns a repository initialized with a test
// password. If be is nil, an in-memory backend is used. A constant polynomial
// is used for the chunker and low-security test parameters.
func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository, cleanup func()) {
func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r *Repository, cleanup func()) {
TestUseLowSecurityKDFParameters(t)
var beCleanup func()

View file

@ -6,15 +6,13 @@ import (
"github.com/pkg/errors"
"restic/backend"
"restic/debug"
"restic/fs"
"restic/repository"
)
// Restorer is used to restore a snapshot to a directory.
type Restorer struct {
repo *repository.Repository
repo Repository
sn *Snapshot
Error func(dir string, node *Node, err error) error
@ -24,7 +22,7 @@ type Restorer struct {
var restorerAbortOnAllErrors = func(str string, node *Node, err error) error { return err }
// NewRestorer creates a restorer preloaded with the content from the snapshot id.
func NewRestorer(repo *repository.Repository, id backend.ID) (*Restorer, error) {
func NewRestorer(repo Repository, id ID) (*Restorer, error) {
r := &Restorer{
repo: repo, Error: restorerAbortOnAllErrors,
SelectFilter: func(string, string, *Node) bool { return true },
@ -40,7 +38,7 @@ func NewRestorer(repo *repository.Repository, id backend.ID) (*Restorer, error)
return r, nil
}
func (res *Restorer) restoreTo(dst string, dir string, treeID backend.ID) error {
func (res *Restorer) restoreTo(dst string, dir string, treeID ID) error {
tree, err := LoadTree(res.repo, treeID)
if err != nil {
return res.Error(dir, nil, err)

View file

@ -10,22 +10,21 @@ import (
"github.com/pkg/errors"
"restic/backend"
"restic/repository"
)
// Snapshot is the state of a resource at one point in time.
type Snapshot struct {
Time time.Time `json:"time"`
Parent *backend.ID `json:"parent,omitempty"`
Tree *backend.ID `json:"tree"`
Paths []string `json:"paths"`
Hostname string `json:"hostname,omitempty"`
Username string `json:"username,omitempty"`
UID uint32 `json:"uid,omitempty"`
GID uint32 `json:"gid,omitempty"`
Excludes []string `json:"excludes,omitempty"`
Time time.Time `json:"time"`
Parent *ID `json:"parent,omitempty"`
Tree *ID `json:"tree"`
Paths []string `json:"paths"`
Hostname string `json:"hostname,omitempty"`
Username string `json:"username,omitempty"`
UID uint32 `json:"uid,omitempty"`
GID uint32 `json:"gid,omitempty"`
Excludes []string `json:"excludes,omitempty"`
id *backend.ID // plaintext ID, used during restore
id *ID // plaintext ID, used during restore
}
// NewSnapshot returns an initialized snapshot struct for the current user and
@ -56,9 +55,9 @@ func NewSnapshot(paths []string) (*Snapshot, error) {
}
// LoadSnapshot loads the snapshot with the id and returns it.
func LoadSnapshot(repo *repository.Repository, id backend.ID) (*Snapshot, error) {
func LoadSnapshot(repo Repository, id ID) (*Snapshot, error) {
sn := &Snapshot{id: &id}
err := repo.LoadJSONUnpacked(backend.Snapshot, id, sn)
err := repo.LoadJSONUnpacked(SnapshotFile, id, sn)
if err != nil {
return nil, err
}
@ -67,11 +66,11 @@ func LoadSnapshot(repo *repository.Repository, id backend.ID) (*Snapshot, error)
}
// LoadAllSnapshots returns a list of all snapshots in the repo.
func LoadAllSnapshots(repo *repository.Repository) (snapshots []*Snapshot, err error) {
func LoadAllSnapshots(repo Repository) (snapshots []*Snapshot, err error) {
done := make(chan struct{})
defer close(done)
for id := range repo.List(backend.Snapshot, done) {
for id := range repo.List(SnapshotFile, done) {
sn, err := LoadSnapshot(repo, id)
if err != nil {
return nil, err
@ -89,7 +88,7 @@ func (sn Snapshot) String() string {
}
// ID retuns the snapshot's ID.
func (sn Snapshot) ID() *backend.ID {
func (sn Snapshot) ID() *ID {
return sn.id
}
@ -131,17 +130,17 @@ func SamePaths(expected, actual []string) bool {
var ErrNoSnapshotFound = errors.New("no snapshot found")
// FindLatestSnapshot finds latest snapshot with optional target/directory and source filters
func FindLatestSnapshot(repo *repository.Repository, targets []string, source string) (backend.ID, error) {
func FindLatestSnapshot(repo Repository, targets []string, source string) (ID, error) {
var (
latest time.Time
latestID backend.ID
latestID ID
found bool
)
for snapshotID := range repo.List(backend.Snapshot, make(chan struct{})) {
for snapshotID := range repo.List(SnapshotFile, make(chan struct{})) {
snapshot, err := LoadSnapshot(repo, snapshotID)
if err != nil {
return backend.ID{}, errors.Errorf("Error listing snapshot: %v", err)
return ID{}, errors.Errorf("Error listing snapshot: %v", err)
}
if snapshot.Time.After(latest) && SamePaths(snapshot.Paths, targets) && (source == "" || source == snapshot.Hostname) {
latest = snapshot.Time
@ -151,7 +150,7 @@ func FindLatestSnapshot(repo *repository.Repository, targets []string, source st
}
if !found {
return backend.ID{}, ErrNoSnapshotFound
return ID{}, ErrNoSnapshotFound
}
return latestID, nil
@ -159,13 +158,13 @@ func FindLatestSnapshot(repo *repository.Repository, targets []string, source st
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
// the string as closely as possible.
func FindSnapshot(repo *repository.Repository, s string) (backend.ID, error) {
func FindSnapshot(repo Repository, s string) (ID, error) {
// find snapshot id with prefix
name, err := backend.Find(repo.Backend(), backend.Snapshot, s)
name, err := backend.Find(repo.Backend(), SnapshotFile, s)
if err != nil {
return backend.ID{}, err
return ID{}, err
}
return backend.ParseID(name)
return ParseID(name)
}

View file

@ -1,13 +1,12 @@
package restic
import (
crand "crypto/rand"
"encoding/json"
"fmt"
"io"
"math/rand"
"restic/backend"
"restic/pack"
"restic/repository"
"testing"
"time"
@ -17,21 +16,21 @@ import (
// fakeFile returns a reader which yields deterministic pseudo-random data.
func fakeFile(t testing.TB, seed, size int64) io.Reader {
return io.LimitReader(repository.NewRandReader(rand.New(rand.NewSource(seed))), size)
return io.LimitReader(NewRandReader(rand.New(rand.NewSource(seed))), size)
}
type fakeFileSystem struct {
t testing.TB
repo *repository.Repository
knownBlobs backend.IDSet
repo Repository
knownBlobs IDSet
duplication float32
}
// saveFile reads from rd and saves the blobs in the repository. The list of
// IDs is returned.
func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) {
blobs = backend.IDs{}
ch := chunker.New(rd, fs.repo.Config.ChunkerPolynomial)
func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) {
blobs = IDs{}
ch := chunker.New(rd, fs.repo.Config().ChunkerPolynomial())
for {
chunk, err := ch.Next(getBuf())
@ -43,7 +42,7 @@ func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) {
fs.t.Fatalf("unable to save chunk in repo: %v", err)
}
id := backend.Hash(chunk.Data)
id := Hash(chunk.Data)
if !fs.blobIsKnown(id, pack.Data) {
_, err := fs.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id)
if err != nil {
@ -66,20 +65,20 @@ const (
maxNodes = 32
)
func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, backend.ID) {
func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, ID) {
data, err := json.Marshal(tree)
if err != nil {
fs.t.Fatalf("json.Marshal(tree) returned error: %v", err)
return false, backend.ID{}
return false, ID{}
}
data = append(data, '\n')
id := backend.Hash(data)
id := Hash(data)
return fs.blobIsKnown(id, pack.Tree), id
}
func (fs fakeFileSystem) blobIsKnown(id backend.ID, t pack.BlobType) bool {
func (fs fakeFileSystem) blobIsKnown(id ID, t pack.BlobType) bool {
if rand.Float32() < fs.duplication {
return false
}
@ -97,7 +96,7 @@ func (fs fakeFileSystem) blobIsKnown(id backend.ID, t pack.BlobType) bool {
}
// saveTree saves a tree of fake files in the repo and returns the ID.
func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID {
func (fs fakeFileSystem) saveTree(seed int64, depth int) ID {
rnd := rand.NewSource(seed)
numNodes := int(rnd.Int63() % maxNodes)
@ -151,7 +150,7 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID {
// also used as the snapshot's timestamp. The tree's depth can be specified
// with the parameter depth. The parameter duplication is a probability that
// the same blob will saved again.
func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int, duplication float32) *Snapshot {
func TestCreateSnapshot(t testing.TB, repo Repository, at time.Time, depth int, duplication float32) *Snapshot {
seed := at.Unix()
t.Logf("create fake snapshot at %s with seed %d", at, seed)
@ -165,14 +164,14 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time,
fs := fakeFileSystem{
t: t,
repo: repo,
knownBlobs: backend.NewIDSet(),
knownBlobs: NewIDSet(),
duplication: duplication,
}
treeID := fs.saveTree(seed, depth)
snapshot.Tree = &treeID
id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot)
id, err := repo.SaveJSONUnpacked(SnapshotFile, snapshot)
if err != nil {
t.Fatal(err)
}
@ -194,24 +193,7 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time,
return snapshot
}
// TestResetRepository removes all packs and indexes from the repository.
func TestResetRepository(t testing.TB, repo Repository) {
done := make(chan struct{})
defer close(done)
for _, tpe := range []FileType{SnapshotFile, IndexFile, DataFile} {
for id := range repo.Backend().List(tpe, done) {
err := repo.Backend().Remove(tpe, id)
if err != nil {
t.Errorf("removing %v (%v) failed: %v", id[0:12], tpe, err)
}
}
}
repo.SetIndex(repository.NewMasterIndex())
}
// TestParseID parses s as a backend.ID and panics if that fails.
// TestParseID parses s as a ID and panics if that fails.
func TestParseID(s string) ID {
id, err := ParseID(s)
if err != nil {
@ -220,3 +202,14 @@ func TestParseID(s string) ID {
return id
}
// TestRandomID retuns a randomly generated ID. When reading from rand fails,
// the function panics.
func TestRandomID() ID {
id := ID{}
_, err := io.ReadFull(crand.Reader, id[:])
if err != nil {
panic(err)
}
return id
}

View file

@ -47,15 +47,3 @@ func TestCreateSnapshot(t *testing.T) {
checker.TestCheckRepo(t, repo)
}
func BenchmarkCreateSnapshot(b *testing.B) {
repo, cleanup := repository.TestRepository(b)
defer cleanup()
b.ResetTimer()
for i := 0; i < b.N; i++ {
restic.TestCreateSnapshot(b, repo, testSnapshotTime, testDepth, 0)
restic.TestResetRepository(b, repo)
}
}

View file

@ -6,7 +6,6 @@ import (
"github.com/pkg/errors"
"restic/backend"
"restic/debug"
"restic/pack"
)
@ -31,10 +30,10 @@ func (t Tree) String() string {
}
type TreeLoader interface {
LoadJSONPack(pack.BlobType, backend.ID, interface{}) error
LoadJSONPack(pack.BlobType, ID, interface{}) error
}
func LoadTree(repo TreeLoader, id backend.ID) (*Tree, error) {
func LoadTree(repo TreeLoader, id ID) (*Tree, error) {
tree := &Tree{}
err := repo.LoadJSONPack(pack.Tree, id, tree)
if err != nil {
@ -95,7 +94,7 @@ func (t Tree) Find(name string) (*Node, error) {
}
// Subtrees returns a slice of all subtree IDs of the tree.
func (t Tree) Subtrees() (trees backend.IDs) {
func (t Tree) Subtrees() (trees IDs) {
for _, node := range t.Nodes {
if node.FileType == "dir" && node.Subtree != nil {
trees = append(trees, *node.Subtree)

View file

@ -6,7 +6,6 @@ import (
"path/filepath"
"sync"
"restic/backend"
"restic/debug"
"restic/pack"
)
@ -35,7 +34,7 @@ func NewTreeWalker(ch chan<- loadTreeJob, out chan<- WalkTreeJob) *TreeWalker {
// Walk starts walking the tree given by id. When the channel done is closed,
// processing stops.
func (tw *TreeWalker) Walk(path string, id backend.ID, done chan struct{}) {
func (tw *TreeWalker) Walk(path string, id ID, done chan struct{}) {
debug.Log("TreeWalker.Walk", "starting on tree %v for %v", id.Str(), path)
defer debug.Log("TreeWalker.Walk", "done walking tree %v for %v", id.Str(), path)
@ -119,11 +118,11 @@ type loadTreeResult struct {
}
type loadTreeJob struct {
id backend.ID
id ID
res chan<- loadTreeResult
}
type treeLoader func(backend.ID) (*Tree, error)
type treeLoader func(ID) (*Tree, error)
func loadTreeWorker(wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader, done <-chan struct{}) {
debug.Log("loadTreeWorker", "start")
@ -162,10 +161,10 @@ const loadTreeWorkers = 10
// WalkTree walks the tree specified by id recursively and sends a job for each
// file and directory it finds. When the channel done is closed, processing
// stops.
func WalkTree(repo TreeLoader, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
func WalkTree(repo TreeLoader, id ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
debug.Log("WalkTree", "start on %v, start workers", id.Str())
load := func(id backend.ID) (*Tree, error) {
load := func(id ID) (*Tree, error) {
tree := &Tree{}
err := repo.LoadJSONPack(pack.Tree, id, tree)
if err != nil {