This commit is contained in:
Alexander Neumann 2016-08-31 22:39:36 +02:00
parent 51d8e6aa28
commit cc6a8b6e15
50 changed files with 741 additions and 668 deletions

View file

@ -1,10 +1,10 @@
package restic
package archiver
import (
"encoding/json"
"io"
"restic"
"restic/debug"
"restic/pack"
"time"
"github.com/pkg/errors"
@ -12,37 +12,37 @@ import (
)
// saveTreeJSON stores a tree in the repository.
func saveTreeJSON(repo Repository, item interface{}) (ID, error) {
func saveTreeJSON(repo restic.Repository, item interface{}) (restic.ID, error) {
data, err := json.Marshal(item)
if err != nil {
return ID{}, errors.Wrap(err, "")
return restic.ID{}, errors.Wrap(err, "")
}
data = append(data, '\n')
// check if tree has been saved before
id := Hash(data)
if repo.Index().Has(id, pack.Tree) {
id := restic.Hash(data)
if repo.Index().Has(id, restic.TreeBlob) {
return id, nil
}
return repo.SaveJSON(pack.Tree, item)
return repo.SaveJSON(restic.TreeBlob, item)
}
// ArchiveReader reads from the reader and archives the data. Returned is the
// resulting snapshot and its ID.
func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Snapshot, ID, error) {
func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, name string) (*restic.Snapshot, restic.ID, error) {
debug.Log("ArchiveReader", "start archiving %s", name)
sn, err := NewSnapshot([]string{name})
sn, err := restic.NewSnapshot([]string{name})
if err != nil {
return nil, ID{}, err
return nil, restic.ID{}, err
}
p.Start()
defer p.Done()
chnker := chunker.New(rd, repo.Config().ChunkerPolynomial())
chnker := chunker.New(rd, repo.Config().ChunkerPolynomial)
var ids IDs
var ids restic.IDs
var fileSize uint64
for {
@ -52,15 +52,15 @@ func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Sn
}
if err != nil {
return nil, ID{}, errors.Wrap(err, "chunker.Next()")
return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()")
}
id := Hash(chunk.Data)
id := restic.Hash(chunk.Data)
if !repo.Index().Has(id, pack.Data) {
_, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
if !repo.Index().Has(id, restic.DataBlob) {
_, err := repo.SaveAndEncrypt(restic.DataBlob, chunk.Data, nil)
if err != nil {
return nil, ID{}, err
return nil, restic.ID{}, err
}
debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
} else {
@ -71,13 +71,13 @@ func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Sn
ids = append(ids, id)
p.Report(Stat{Bytes: uint64(chunk.Length)})
p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
fileSize += uint64(chunk.Length)
}
tree := &Tree{
Nodes: []*Node{
&Node{
tree := &restic.Tree{
Nodes: []*restic.Node{
&restic.Node{
Name: name,
AccessTime: time.Now(),
ModTime: time.Now(),
@ -94,27 +94,26 @@ func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Sn
treeID, err := saveTreeJSON(repo, tree)
if err != nil {
return nil, ID{}, err
return nil, restic.ID{}, err
}
sn.Tree = &treeID
debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())
id, err := repo.SaveJSONUnpacked(SnapshotFile, sn)
id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
if err != nil {
return nil, ID{}, err
return nil, restic.ID{}, err
}
sn.id = &id
debug.Log("ArchiveReader", "snapshot saved as %v", id.Str())
err = repo.Flush()
if err != nil {
return nil, ID{}, err
return nil, restic.ID{}, err
}
err = repo.SaveIndex()
if err != nil {
return nil, ID{}, err
return nil, restic.ID{}, err
}
return sn, id, nil

View file

@ -1,19 +1,18 @@
package restic
package archiver
import (
"bytes"
"io"
"math/rand"
"restic/backend"
"restic/pack"
"restic"
"restic/repository"
"testing"
"github.com/restic/chunker"
)
func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []byte) []byte {
buf, err := repo.LoadBlob(id, pack.Data, buf)
func loadBlob(t *testing.T, repo *repository.Repository, id restic.ID, buf []byte) []byte {
buf, err := repo.LoadBlob(id, restic.DataBlob, buf)
if err != nil {
t.Fatalf("LoadBlob(%v) returned error %v", id, err)
}
@ -21,8 +20,8 @@ func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []by
return buf
}
func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID, name string, rd io.Reader) {
tree, err := LoadTree(repo, treeID)
func checkSavedFile(t *testing.T, repo *repository.Repository, treeID restic.ID, name string, rd io.Reader) {
tree, err := restic.LoadTree(repo, treeID)
if err != nil {
t.Fatalf("LoadTree() returned error %v", err)
}
@ -58,6 +57,11 @@ func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID
}
}
// fakeFile returns a reader which yields deterministic pseudo-random data.
func fakeFile(t testing.TB, seed, size int64) io.Reader {
return io.LimitReader(restic.NewRandReader(rand.New(rand.NewSource(seed))), size)
}
func TestArchiveReader(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()

View file

@ -1,4 +1,4 @@
package restic
package archiver
import (
"encoding/json"
@ -6,6 +6,7 @@ import (
"io"
"os"
"path/filepath"
"restic"
"sort"
"sync"
"time"
@ -14,7 +15,6 @@ import (
"restic/debug"
"restic/fs"
"restic/pack"
"restic/pipe"
"github.com/restic/chunker"
@ -30,9 +30,9 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
// Archiver is used to backup a set of directories.
type Archiver struct {
repo Repository
repo restic.Repository
knownBlobs struct {
IDSet
restic.IDSet
sync.Mutex
}
@ -43,16 +43,16 @@ type Archiver struct {
Excludes []string
}
// NewArchiver returns a new archiver.
func NewArchiver(repo Repository) *Archiver {
// New returns a new archiver.
func New(repo restic.Repository) *Archiver {
arch := &Archiver{
repo: repo,
blobToken: make(chan struct{}, maxConcurrentBlobs),
knownBlobs: struct {
IDSet
restic.IDSet
sync.Mutex
}{
IDSet: NewIDSet(),
IDSet: restic.NewIDSet(),
},
}
@ -70,7 +70,7 @@ func NewArchiver(repo Repository) *Archiver {
// When the blob is not known, false is returned and the blob is added to the
// list. This means that the caller false is returned to is responsible to save
// the blob to the backend.
func (arch *Archiver) isKnownBlob(id ID, t pack.BlobType) bool {
func (arch *Archiver) isKnownBlob(id restic.ID, t restic.BlobType) bool {
arch.knownBlobs.Lock()
defer arch.knownBlobs.Unlock()
@ -89,10 +89,10 @@ func (arch *Archiver) isKnownBlob(id ID, t pack.BlobType) bool {
}
// Save stores a blob read from rd in the repository.
func (arch *Archiver) Save(t pack.BlobType, data []byte, id ID) error {
func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error {
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
if arch.isKnownBlob(id, pack.Data) {
if arch.isKnownBlob(id, restic.DataBlob) {
debug.Log("Archiver.Save", "blob %v is known\n", id.Str())
return nil
}
@ -108,40 +108,40 @@ func (arch *Archiver) Save(t pack.BlobType, data []byte, id ID) error {
}
// SaveTreeJSON stores a tree in the repository.
func (arch *Archiver) SaveTreeJSON(item interface{}) (ID, error) {
func (arch *Archiver) SaveTreeJSON(item interface{}) (restic.ID, error) {
data, err := json.Marshal(item)
if err != nil {
return ID{}, errors.Wrap(err, "Marshal")
return restic.ID{}, errors.Wrap(err, "Marshal")
}
data = append(data, '\n')
// check if tree has been saved before
id := Hash(data)
if arch.isKnownBlob(id, pack.Tree) {
id := restic.Hash(data)
if arch.isKnownBlob(id, restic.TreeBlob) {
return id, nil
}
return arch.repo.SaveJSON(pack.Tree, item)
return arch.repo.SaveJSON(restic.TreeBlob, item)
}
func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, error) {
func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*restic.Node, error) {
fi, err := file.Stat()
if err != nil {
return nil, errors.Wrap(err, "Stat")
return nil, errors.Wrap(err, "restic.Stat")
}
if fi.ModTime() == node.ModTime {
return node, nil
}
err = arch.Error(node.path, fi, errors.New("file has changed"))
err = arch.Error(node.Path, fi, errors.New("file has changed"))
if err != nil {
return nil, err
}
node, err = NodeFromFileInfo(node.path, fi)
node, err = restic.NodeFromFileInfo(node.Path, fi)
if err != nil {
debug.Log("Archiver.SaveFile", "NodeFromFileInfo returned error for %v: %v", node.path, err)
debug.Log("Archiver.SaveFile", "restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
return nil, err
}
@ -149,21 +149,21 @@ func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, erro
}
type saveResult struct {
id ID
id restic.ID
bytes uint64
}
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
defer freeBuf(chunk.Data)
id := Hash(chunk.Data)
err := arch.Save(pack.Data, chunk.Data, id)
id := restic.Hash(chunk.Data)
err := arch.Save(restic.DataBlob, chunk.Data, id)
// TODO handle error
if err != nil {
panic(err)
}
p.Report(Stat{Bytes: uint64(chunk.Length)})
p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
arch.blobToken <- token
resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)}
}
@ -182,11 +182,11 @@ func waitForResults(resultChannels [](<-chan saveResult)) ([]saveResult, error)
return results, nil
}
func updateNodeContent(node *Node, results []saveResult) error {
debug.Log("Archiver.Save", "checking size for file %s", node.path)
func updateNodeContent(node *restic.Node, results []saveResult) error {
debug.Log("Archiver.Save", "checking size for file %s", node.Path)
var bytes uint64
node.Content = make([]ID, len(results))
node.Content = make([]restic.ID, len(results))
for i, b := range results {
node.Content[i] = b.id
@ -196,18 +196,18 @@ func updateNodeContent(node *Node, results []saveResult) error {
}
if bytes != node.Size {
return errors.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.path, bytes, node.Size)
return errors.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.Path, bytes, node.Size)
}
debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.path, len(results))
debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.Path, len(results))
return nil
}
// SaveFile stores the content of the file on the backend as a Blob by calling
// Save for each chunk.
func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
file, err := fs.Open(node.path)
func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) error {
file, err := fs.Open(node.Path)
defer file.Close()
if err != nil {
return errors.Wrap(err, "Open")
@ -218,7 +218,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
return err
}
chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial())
chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial)
resultChannels := [](<-chan saveResult){}
for {
@ -245,7 +245,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
return err
}
func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, entCh <-chan pipe.Entry) {
func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, entCh <-chan pipe.Entry) {
defer func() {
debug.Log("Archiver.fileWorker", "done")
wg.Done()
@ -267,16 +267,16 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error())
// ignore this file
e.Result() <- nil
p.Report(Stat{Errors: 1})
p.Report(restic.Stat{Errors: 1})
continue
}
node, err := NodeFromFileInfo(e.Fullpath(), e.Info())
node, err := restic.NodeFromFileInfo(e.Fullpath(), e.Info())
if err != nil {
// TODO: integrate error reporting
debug.Log("Archiver.fileWorker", "NodeFromFileInfo returned error for %v: %v", node.path, err)
debug.Log("Archiver.fileWorker", "restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
e.Result() <- nil
p.Report(Stat{Errors: 1})
p.Report(restic.Stat{Errors: 1})
continue
}
@ -284,12 +284,12 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
if e.Node != nil {
debug.Log("Archiver.fileWorker", " %v use old data", e.Path())
oldNode := e.Node.(*Node)
oldNode := e.Node.(*restic.Node)
// check if all content is still available in the repository
contentMissing := false
for _, blob := range oldNode.blobs {
if ok, err := arch.repo.Backend().Test(DataFile, blob.Storage.String()); !ok || err != nil {
debug.Log("Archiver.fileWorker", " %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str())
for _, blob := range oldNode.Content {
if !arch.repo.Index().Has(blob, restic.DataBlob) {
debug.Log("Archiver.fileWorker", " %v not using old data, %v is missing", e.Path(), blob.Str())
contentMissing = true
break
}
@ -297,7 +297,6 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
if !contentMissing {
node.Content = oldNode.Content
node.blobs = oldNode.blobs
debug.Log("Archiver.fileWorker", " %v content is complete", e.Path())
}
} else {
@ -310,20 +309,20 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
err = arch.SaveFile(p, node)
if err != nil {
// TODO: integrate error reporting
fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.path, err)
fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.Path, err)
// ignore this file
e.Result() <- nil
p.Report(Stat{Errors: 1})
p.Report(restic.Stat{Errors: 1})
continue
}
} else {
// report old data size
p.Report(Stat{Bytes: node.Size})
p.Report(restic.Stat{Bytes: node.Size})
}
debug.Log("Archiver.fileWorker", " processed %v, %d/%d blobs", e.Path(), len(node.Content), len(node.blobs))
debug.Log("Archiver.fileWorker", " processed %v, %d blobs", e.Path(), len(node.Content))
e.Result() <- node
p.Report(Stat{Files: 1})
p.Report(restic.Stat{Files: 1})
case <-done:
// pipeline was cancelled
return
@ -331,7 +330,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
}
}
func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) {
func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) {
debug.Log("Archiver.dirWorker", "start")
defer func() {
debug.Log("Archiver.dirWorker", "done")
@ -350,11 +349,11 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
if dir.Error() != nil {
fmt.Fprintf(os.Stderr, "error walking dir %v: %v\n", dir.Path(), dir.Error())
dir.Result() <- nil
p.Report(Stat{Errors: 1})
p.Report(restic.Stat{Errors: 1})
continue
}
tree := NewTree()
tree := restic.NewTree()
// wait for all content
for _, ch := range dir.Entries {
@ -369,22 +368,22 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
}
// else insert node
node := res.(*Node)
node := res.(*restic.Node)
tree.Insert(node)
if node.FileType == "dir" {
debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.path, node.Subtree)
debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.Path, node.Subtree)
if node.Subtree.IsNull() {
panic("invalid null subtree ID")
panic("invalid null subtree restic.ID")
}
}
}
node := &Node{}
node := &restic.Node{}
if dir.Path() != "" && dir.Info() != nil {
n, err := NodeFromFileInfo(dir.Path(), dir.Info())
n, err := restic.NodeFromFileInfo(dir.Path(), dir.Info())
if err != nil {
n.Error = err.Error()
dir.Result() <- n
@ -403,7 +402,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
}
debug.Log("Archiver.dirWorker", "save tree for %s: %v", dir.Path(), id.Str())
if id.IsNull() {
panic("invalid null subtree ID return from SaveTreeJSON()")
panic("invalid null subtree restic.ID return from SaveTreeJSON()")
}
node.Subtree = &id
@ -412,7 +411,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
dir.Result() <- node
if dir.Path() != "" {
p.Report(Stat{Dirs: 1})
p.Report(restic.Stat{Dirs: 1})
}
case <-done:
// pipeline was cancelled
@ -422,7 +421,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
}
type archivePipe struct {
Old <-chan WalkTreeJob
Old <-chan restic.WalkTreeJob
New <-chan pipe.Job
}
@ -457,7 +456,7 @@ func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) {
type archiveJob struct {
hasOld bool
old WalkTreeJob
old restic.WalkTreeJob
new pipe.Job
}
@ -471,7 +470,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
var (
loadOld, loadNew bool = true, true
ok bool
oldJob WalkTreeJob
oldJob restic.WalkTreeJob
newJob pipe.Job
)
@ -565,7 +564,7 @@ func (j archiveJob) Copy() pipe.Job {
}
// if file is newer, return the new job
if j.old.Node.isNewer(j.new.Fullpath(), j.new.Info()) {
if j.old.Node.IsNewer(j.new.Fullpath(), j.new.Info()) {
debug.Log("archiveJob.Copy", " job %v is newer", j.new.Path())
return j.new
}
@ -630,10 +629,10 @@ func (p baseNameSlice) Len() int { return len(p) }
func (p baseNameSlice) Less(i, j int) bool { return filepath.Base(p[i]) < filepath.Base(p[j]) }
func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Snapshot creates a snapshot of the given paths. If parentID is set, this is
// Snapshot creates a snapshot of the given paths. If parentrestic.ID is set, this is
// used to compare the files to the ones archived at the time this snapshot was
// taken.
func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snapshot, ID, error) {
func (arch *Archiver) Snapshot(p *restic.Progress, paths []string, parentID *restic.ID) (*restic.Snapshot, restic.ID, error) {
paths = unique(paths)
sort.Sort(baseNameSlice(paths))
@ -649,9 +648,9 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snap
defer p.Done()
// create new snapshot
sn, err := NewSnapshot(paths)
sn, err := restic.NewSnapshot(paths)
if err != nil {
return nil, ID{}, err
return nil, restic.ID{}, err
}
sn.Excludes = arch.Excludes
@ -662,18 +661,18 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snap
sn.Parent = parentID
// load parent snapshot
parent, err := LoadSnapshot(arch.repo, *parentID)
parent, err := restic.LoadSnapshot(arch.repo, *parentID)
if err != nil {
return nil, ID{}, err
return nil, restic.ID{}, err
}
// start walker on old tree
ch := make(chan WalkTreeJob)
go WalkTree(arch.repo, *parent.Tree, done, ch)
ch := make(chan restic.WalkTreeJob)
go restic.WalkTree(arch.repo, *parent.Tree, done, ch)
jobs.Old = ch
} else {
// use closed channel
ch := make(chan WalkTreeJob)
ch := make(chan restic.WalkTreeJob)
close(ch)
jobs.Old = ch
}
@ -728,31 +727,29 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snap
debug.Log("Archiver.Snapshot", "workers terminated")
// receive the top-level tree
root := (<-resCh).(*Node)
root := (<-resCh).(*restic.Node)
debug.Log("Archiver.Snapshot", "root node received: %v", root.Subtree.Str())
sn.Tree = root.Subtree
// save snapshot
id, err := arch.repo.SaveJSONUnpacked(SnapshotFile, sn)
id, err := arch.repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
if err != nil {
return nil, ID{}, err
return nil, restic.ID{}, err
}
// store ID in snapshot struct
sn.id = &id
debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str())
// flush repository
err = arch.repo.Flush()
if err != nil {
return nil, ID{}, err
return nil, restic.ID{}, err
}
// save index
err = arch.repo.SaveIndex()
if err != nil {
debug.Log("Archiver.Snapshot", "error saving index: %v", err)
return nil, ID{}, err
return nil, restic.ID{}, err
}
debug.Log("Archiver.Snapshot", "saved indexes")
@ -768,13 +765,13 @@ func isRegularFile(fi os.FileInfo) bool {
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
}
// Scan traverses the dirs to collect Stat information while emitting progress
// Scan traverses the dirs to collect restic.Stat information while emitting progress
// information with p.
func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Stat, error) {
p.Start()
defer p.Done()
var stat Stat
var stat restic.Stat
for _, dir := range dirs {
debug.Log("Scan", "Start for %v", dir)
@ -797,7 +794,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
return nil
}
s := Stat{}
s := restic.Stat{}
if fi.IsDir() {
s.Dirs++
} else {
@ -817,7 +814,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
debug.Log("Scan", "Done for %v, err: %v", dir, err)
if err != nil {
return Stat{}, errors.Wrap(err, "fs.Walk")
return restic.Stat{}, errors.Wrap(err, "fs.Walk")
}
}

View file

@ -1,4 +1,4 @@
package restic_test
package archiver_test
import (
"crypto/rand"
@ -103,13 +103,13 @@ func testArchiverDuplication(t *testing.T) {
id := randomID()
if repo.Index().Has(id, pack.Data) {
if repo.Index().Has(id, restic.DataBlob) {
continue
}
buf := make([]byte, 50)
err := arch.Save(pack.Data, buf, id)
err := arch.Save(restic.DataBlob, buf, id)
if err != nil {
t.Fatal(err)
}

View file

@ -1,4 +1,4 @@
package restic
package archiver
import (
"os"

View file

@ -1,4 +1,4 @@
package restic_test
package archiver_test
import (
"bytes"
@ -146,9 +146,9 @@ func archiveWithDedup(t testing.TB) {
t.Logf("archived snapshot %v", sn.ID().Str())
// get archive stats
cnt.before.packs = repo.Count(backend.Data)
cnt.before.dataBlobs = repo.Index().Count(pack.Data)
cnt.before.treeBlobs = repo.Index().Count(pack.Tree)
cnt.before.packs = repo.Count(restic.DataFile)
cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs)
@ -157,9 +157,9 @@ func archiveWithDedup(t testing.TB) {
t.Logf("archived snapshot %v", sn2.ID().Str())
// get archive stats again
cnt.after.packs = repo.Count(backend.Data)
cnt.after.dataBlobs = repo.Index().Count(pack.Data)
cnt.after.treeBlobs = repo.Index().Count(pack.Tree)
cnt.after.packs = repo.Count(restic.DataFile)
cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs)
@ -174,9 +174,9 @@ func archiveWithDedup(t testing.TB) {
t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
// get archive stats again
cnt.after2.packs = repo.Count(backend.Data)
cnt.after2.dataBlobs = repo.Index().Count(pack.Data)
cnt.after2.treeBlobs = repo.Index().Count(pack.Tree)
cnt.after2.packs = repo.Count(restic.DataFile)
cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs)
@ -210,7 +210,7 @@ func BenchmarkLoadTree(t *testing.B) {
for _, idx := range repo.Index().All() {
for blob := range idx.Each(done) {
if blob.Type != pack.Tree {
if blob.Type != restic.TreeBlob {
continue
}
@ -267,7 +267,7 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) {
id := backend.Hash(c.Data)
time.Sleep(time.Duration(id[0]))
err := arch.Save(pack.Data, c.Data, id)
err := arch.Save(restic.DataBlob, c.Data, id)
<-barrier
errChan <- err
}(c, errChan)

View file

@ -0,0 +1,21 @@
package archiver
import (
"sync"
"github.com/restic/chunker"
)
var bufPool = sync.Pool{
New: func() interface{} {
return make([]byte, chunker.MinSize)
},
}
func getBuf() []byte {
return bufPool.Get().([]byte)
}
func freeBuf(data []byte) {
bufPool.Put(data)
}

View file

@ -1,6 +1,7 @@
package backend_test
import (
"restic"
"testing"
"restic/backend"
@ -8,10 +9,10 @@ import (
)
type mockBackend struct {
list func(backend.Type, <-chan struct{}) <-chan string
list func(restic.FileType, <-chan struct{}) <-chan string
}
func (m mockBackend) List(t backend.Type, done <-chan struct{}) <-chan string {
func (m mockBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
return m.list(t, done)
}
@ -30,7 +31,7 @@ func TestPrefixLength(t *testing.T) {
list := samples
m := mockBackend{}
m.list = func(t backend.Type, done <-chan struct{}) <-chan string {
m.list = func(t restic.FileType, done <-chan struct{}) <-chan string {
ch := make(chan string)
go func() {
defer close(ch)
@ -45,17 +46,17 @@ func TestPrefixLength(t *testing.T) {
return ch
}
l, err := backend.PrefixLength(m, backend.Snapshot)
l, err := backend.PrefixLength(m, restic.SnapshotFile)
OK(t, err)
Equals(t, 19, l)
list = samples[:3]
l, err = backend.PrefixLength(m, backend.Snapshot)
l, err = backend.PrefixLength(m, restic.SnapshotFile)
OK(t, err)
Equals(t, 19, l)
list = samples[3:]
l, err = backend.PrefixLength(m, backend.Snapshot)
l, err = backend.PrefixLength(m, restic.SnapshotFile)
OK(t, err)
Equals(t, 8, l)
}

View file

@ -5,6 +5,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
"restic"
"github.com/pkg/errors"
@ -18,6 +19,8 @@ type Local struct {
p string
}
var _ restic.Backend = &Local{}
func paths(dir string) []string {
return []string{
dir,
@ -69,8 +72,8 @@ func (b *Local) Location() string {
}
// Construct path for given Type and name.
func filename(base string, t backend.Type, name string) string {
if t == backend.Config {
func filename(base string, t restic.FileType, name string) string {
if t == restic.ConfigFile {
return filepath.Join(base, "config")
}
@ -78,21 +81,21 @@ func filename(base string, t backend.Type, name string) string {
}
// Construct directory for given Type.
func dirname(base string, t backend.Type, name string) string {
func dirname(base string, t restic.FileType, name string) string {
var n string
switch t {
case backend.Data:
case restic.DataFile:
n = backend.Paths.Data
if len(name) > 2 {
n = filepath.Join(n, name[:2])
}
case backend.Snapshot:
case restic.SnapshotFile:
n = backend.Paths.Snapshots
case backend.Index:
case restic.IndexFile:
n = backend.Paths.Index
case backend.Lock:
case restic.LockFile:
n = backend.Paths.Locks
case backend.Key:
case restic.KeyFile:
n = backend.Paths.Keys
}
return filepath.Join(base, n)
@ -102,13 +105,13 @@ func dirname(base string, t backend.Type, name string) string {
// saves it in p. Load has the same semantics as io.ReaderAt, with one
// exception: when off is lower than zero, it is treated as an offset relative
// to the end of the file.
func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
func (b *Local) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
debug.Log("backend.local.Load", "Load %v, length %v at %v", h, len(p), off)
if err := h.Valid(); err != nil {
return 0, err
}
f, err := fs.Open(filename(b.p, h.Type, h.Name))
f, err := fs.Open(filename(b.p, h.FileType, h.Name))
if err != nil {
return 0, errors.Wrap(err, "Open")
}
@ -168,7 +171,7 @@ func writeToTempfile(tempdir string, p []byte) (filename string, err error) {
}
// Save stores data in the backend at the handle.
func (b *Local) Save(h backend.Handle, p []byte) (err error) {
func (b *Local) Save(h restic.Handle, p []byte) (err error) {
debug.Log("backend.local.Save", "Save %v, length %v", h, len(p))
if err := h.Valid(); err != nil {
return err
@ -180,7 +183,7 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) {
return err
}
filename := filename(b.p, h.Type, h.Name)
filename := filename(b.p, h.FileType, h.Name)
// test if new path already exists
if _, err := fs.Stat(filename); err == nil {
@ -188,7 +191,7 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) {
}
// create directories if necessary, ignore errors
if h.Type == backend.Data {
if h.FileType == restic.DataFile {
err = fs.MkdirAll(filepath.Dir(filename), backend.Modes.Dir)
if err != nil {
return errors.Wrap(err, "MkdirAll")
@ -213,22 +216,22 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) {
}
// Stat returns information about a blob.
func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) {
func (b *Local) Stat(h restic.Handle) (restic.FileInfo, error) {
debug.Log("backend.local.Stat", "Stat %v", h)
if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err
return restic.FileInfo{}, err
}
fi, err := fs.Stat(filename(b.p, h.Type, h.Name))
fi, err := fs.Stat(filename(b.p, h.FileType, h.Name))
if err != nil {
return backend.BlobInfo{}, errors.Wrap(err, "Stat")
return restic.FileInfo{}, errors.Wrap(err, "Stat")
}
return backend.BlobInfo{Size: fi.Size()}, nil
return restic.FileInfo{Size: fi.Size()}, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (b *Local) Test(t backend.Type, name string) (bool, error) {
func (b *Local) Test(t restic.FileType, name string) (bool, error) {
debug.Log("backend.local.Test", "Test %v %v", t, name)
_, err := fs.Stat(filename(b.p, t, name))
if err != nil {
@ -242,7 +245,7 @@ func (b *Local) Test(t backend.Type, name string) (bool, error) {
}
// Remove removes the blob with the given name and type.
func (b *Local) Remove(t backend.Type, name string) error {
func (b *Local) Remove(t restic.FileType, name string) error {
debug.Log("backend.local.Remove", "Remove %v %v", t, name)
fn := filename(b.p, t, name)
@ -317,10 +320,10 @@ func listDirs(dir string) (filenames []string, err error) {
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
func (b *Local) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("backend.local.List", "List %v", t)
lister := listDir
if t == backend.Data {
if t == restic.DataFile {
lister = listDirs
}

View file

@ -2,23 +2,23 @@ package mem
import (
"io"
"restic"
"sync"
"github.com/pkg/errors"
"restic/backend"
"restic/debug"
)
type entry struct {
Type backend.Type
Type restic.FileType
Name string
}
type memMap map[entry][]byte
// make sure that MemoryBackend implements backend.Backend
var _ backend.Backend = &MemoryBackend{}
var _ restic.Backend = &MemoryBackend{}
// MemoryBackend is a mock backend that uses a map for storing all data in
// memory. This should only be used for tests.
@ -39,7 +39,7 @@ func New() *MemoryBackend {
}
// Test returns whether a file exists.
func (be *MemoryBackend) Test(t backend.Type, name string) (bool, error) {
func (be *MemoryBackend) Test(t restic.FileType, name string) (bool, error) {
be.m.Lock()
defer be.m.Unlock()
@ -53,7 +53,7 @@ func (be *MemoryBackend) Test(t backend.Type, name string) (bool, error) {
}
// Load reads data from the backend.
func (be *MemoryBackend) Load(h backend.Handle, p []byte, off int64) (int, error) {
func (be *MemoryBackend) Load(h restic.Handle, p []byte, off int64) (int, error) {
if err := h.Valid(); err != nil {
return 0, err
}
@ -61,17 +61,17 @@ func (be *MemoryBackend) Load(h backend.Handle, p []byte, off int64) (int, error
be.m.Lock()
defer be.m.Unlock()
if h.Type == backend.Config {
if h.FileType == restic.ConfigFile {
h.Name = ""
}
debug.Log("MemoryBackend.Load", "get %v offset %v len %v", h, off, len(p))
if _, ok := be.data[entry{h.Type, h.Name}]; !ok {
if _, ok := be.data[entry{h.FileType, h.Name}]; !ok {
return 0, errors.New("no such data")
}
buf := be.data[entry{h.Type, h.Name}]
buf := be.data[entry{h.FileType, h.Name}]
switch {
case off > int64(len(buf)):
return 0, errors.New("offset beyond end of file")
@ -93,7 +93,7 @@ func (be *MemoryBackend) Load(h backend.Handle, p []byte, off int64) (int, error
}
// Save adds new Data to the backend.
func (be *MemoryBackend) Save(h backend.Handle, p []byte) error {
func (be *MemoryBackend) Save(h restic.Handle, p []byte) error {
if err := h.Valid(); err != nil {
return err
}
@ -101,47 +101,47 @@ func (be *MemoryBackend) Save(h backend.Handle, p []byte) error {
be.m.Lock()
defer be.m.Unlock()
if h.Type == backend.Config {
if h.FileType == restic.ConfigFile {
h.Name = ""
}
if _, ok := be.data[entry{h.Type, h.Name}]; ok {
if _, ok := be.data[entry{h.FileType, h.Name}]; ok {
return errors.New("file already exists")
}
debug.Log("MemoryBackend.Save", "save %v bytes at %v", len(p), h)
buf := make([]byte, len(p))
copy(buf, p)
be.data[entry{h.Type, h.Name}] = buf
be.data[entry{h.FileType, h.Name}] = buf
return nil
}
// Stat returns information about a file in the backend.
func (be *MemoryBackend) Stat(h backend.Handle) (backend.BlobInfo, error) {
func (be *MemoryBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
be.m.Lock()
defer be.m.Unlock()
if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err
return restic.FileInfo{}, err
}
if h.Type == backend.Config {
if h.FileType == restic.ConfigFile {
h.Name = ""
}
debug.Log("MemoryBackend.Stat", "stat %v", h)
e, ok := be.data[entry{h.Type, h.Name}]
e, ok := be.data[entry{h.FileType, h.Name}]
if !ok {
return backend.BlobInfo{}, errors.New("no such data")
return restic.FileInfo{}, errors.New("no such data")
}
return backend.BlobInfo{Size: int64(len(e))}, nil
return restic.FileInfo{Size: int64(len(e))}, nil
}
// Remove deletes a file from the backend.
func (be *MemoryBackend) Remove(t backend.Type, name string) error {
func (be *MemoryBackend) Remove(t restic.FileType, name string) error {
be.m.Lock()
defer be.m.Unlock()
@ -157,7 +157,7 @@ func (be *MemoryBackend) Remove(t backend.Type, name string) error {
}
// List returns a channel which yields entries from the backend.
func (be *MemoryBackend) List(t backend.Type, done <-chan struct{}) <-chan string {
func (be *MemoryBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
be.m.Lock()
defer be.m.Unlock()

View file

@ -8,6 +8,7 @@ import (
"net/http"
"net/url"
"path"
"restic"
"strings"
"github.com/pkg/errors"
@ -18,27 +19,27 @@ import (
const connLimit = 10
// restPath returns the path to the given resource.
func restPath(url *url.URL, h backend.Handle) string {
func restPath(url *url.URL, h restic.Handle) string {
u := *url
var dir string
switch h.Type {
case backend.Config:
switch h.FileType {
case restic.ConfigFile:
dir = ""
h.Name = "config"
case backend.Data:
case restic.DataFile:
dir = backend.Paths.Data
case backend.Snapshot:
case restic.SnapshotFile:
dir = backend.Paths.Snapshots
case backend.Index:
case restic.IndexFile:
dir = backend.Paths.Index
case backend.Lock:
case restic.LockFile:
dir = backend.Paths.Locks
case backend.Key:
case restic.KeyFile:
dir = backend.Paths.Keys
default:
dir = string(h.Type)
dir = string(h.FileType)
}
u.Path = path.Join(url.Path, dir, h.Name)
@ -71,7 +72,7 @@ func (b *restBackend) Location() string {
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
func (b *restBackend) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
if err := h.Valid(); err != nil {
return 0, err
}
@ -120,7 +121,7 @@ func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err er
}
// Save stores data in the backend at the handle.
func (b *restBackend) Save(h backend.Handle, p []byte) (err error) {
func (b *restBackend) Save(h restic.Handle, p []byte) (err error) {
if err := h.Valid(); err != nil {
return err
}
@ -151,7 +152,7 @@ func (b *restBackend) Save(h backend.Handle, p []byte) (err error) {
}
// Stat returns information about a blob.
func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) {
func (b *restBackend) Stat(h restic.Handle) (backend.BlobInfo, error) {
if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err
}
@ -183,8 +184,8 @@ func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) {
}
// Test returns true if a blob of the given type and name exists in the backend.
func (b *restBackend) Test(t backend.Type, name string) (bool, error) {
_, err := b.Stat(backend.Handle{Type: t, Name: name})
func (b *restBackend) Test(t restic.FileType, name string) (bool, error) {
_, err := b.Stat(restic.Handle{FileType: t, Name: name})
if err != nil {
return false, nil
}
@ -193,8 +194,8 @@ func (b *restBackend) Test(t backend.Type, name string) (bool, error) {
}
// Remove removes the blob with the given name and type.
func (b *restBackend) Remove(t backend.Type, name string) error {
h := backend.Handle{Type: t, Name: name}
func (b *restBackend) Remove(t restic.FileType, name string) error {
h := restic.Handle{FileType: t, Name: name}
if err := h.Valid(); err != nil {
return err
}
@ -221,10 +222,10 @@ func (b *restBackend) Remove(t backend.Type, name string) error {
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (b *restBackend) List(t backend.Type, done <-chan struct{}) <-chan string {
func (b *restBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
ch := make(chan string)
url := restPath(b.url, backend.Handle{Type: t})
url := restPath(b.url, restic.Handle{FileType: t})
if !strings.HasSuffix(url, "/") {
url += "/"
}

View file

@ -2,36 +2,36 @@ package rest
import (
"net/url"
"restic/backend"
"restic"
"testing"
)
var restPathTests = []struct {
Handle backend.Handle
Handle restic.Handle
URL *url.URL
Result string
}{
{
URL: parseURL("https://hostname.foo"),
Handle: backend.Handle{
Type: backend.Data,
Name: "foobar",
Handle: restic.Handle{
FileType: restic.DataFile,
Name: "foobar",
},
Result: "https://hostname.foo/data/foobar",
},
{
URL: parseURL("https://hostname.foo:1234/prefix/repo"),
Handle: backend.Handle{
Type: backend.Lock,
Name: "foobar",
Handle: restic.Handle{
FileType: restic.LockFile,
Name: "foobar",
},
Result: "https://hostname.foo:1234/prefix/repo/locks/foobar",
},
{
URL: parseURL("https://hostname.foo:1234/prefix/repo"),
Handle: backend.Handle{
Type: backend.Config,
Name: "foobar",
Handle: restic.Handle{
FileType: restic.ConfigFile,
Name: "foobar",
},
Result: "https://hostname.foo:1234/prefix/repo/config",
},

View file

@ -4,6 +4,7 @@ import (
"fmt"
"net/url"
"os"
"restic"
"github.com/pkg/errors"
@ -37,7 +38,7 @@ func init() {
return nil, err
}
exists, err := be.Test(backend.Config, "")
exists, err := be.Test(restic.ConfigFile, "")
if err != nil {
return nil, err
}

View file

@ -3,13 +3,13 @@ package s3
import (
"bytes"
"io"
"restic"
"strings"
"github.com/pkg/errors"
"github.com/minio/minio-go"
"restic/backend"
"restic/debug"
)
@ -25,7 +25,7 @@ type s3 struct {
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
func Open(cfg Config) (backend.Backend, error) {
func Open(cfg Config) (restic.Backend, error) {
debug.Log("s3.Open", "open, config %#v", cfg)
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
@ -53,7 +53,7 @@ func Open(cfg Config) (backend.Backend, error) {
return be, nil
}
func (be *s3) s3path(t backend.Type, name string) string {
func (be *s3) s3path(t restic.FileType, name string) string {
var path string
if be.prefix != "" {
@ -61,7 +61,7 @@ func (be *s3) s3path(t backend.Type, name string) string {
}
path += string(t)
if t == backend.Config {
if t == restic.ConfigFile {
return path
}
return path + "/" + name
@ -81,11 +81,11 @@ func (be *s3) Location() string {
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
func (be s3) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
var obj *minio.Object
debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p))
path := be.s3path(h.Type, h.Name)
path := be.s3path(h.FileType, h.Name)
<-be.connChan
defer func() {
@ -153,14 +153,14 @@ func (be s3) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
}
// Save stores data in the backend at the handle.
func (be s3) Save(h backend.Handle, p []byte) (err error) {
func (be s3) Save(h restic.Handle, p []byte) (err error) {
if err := h.Valid(); err != nil {
return err
}
debug.Log("s3.Save", "%v with %d bytes", h, len(p))
path := be.s3path(h.Type, h.Name)
path := be.s3path(h.FileType, h.Name)
// Check key does not already exist
_, err = be.client.StatObject(be.bucketname, path)
@ -183,16 +183,16 @@ func (be s3) Save(h backend.Handle, p []byte) (err error) {
}
// Stat returns information about a blob.
func (be s3) Stat(h backend.Handle) (bi backend.BlobInfo, err error) {
func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
debug.Log("s3.Stat", "%v", h)
path := be.s3path(h.Type, h.Name)
path := be.s3path(h.FileType, h.Name)
var obj *minio.Object
obj, err = be.client.GetObject(be.bucketname, path)
if err != nil {
debug.Log("s3.Stat", "GetObject() err %v", err)
return backend.BlobInfo{}, errors.Wrap(err, "client.GetObject")
return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
}
// make sure that the object is closed properly.
@ -206,14 +206,14 @@ func (be s3) Stat(h backend.Handle) (bi backend.BlobInfo, err error) {
fi, err := obj.Stat()
if err != nil {
debug.Log("s3.Stat", "Stat() err %v", err)
return backend.BlobInfo{}, errors.Wrap(err, "Stat")
return restic.FileInfo{}, errors.Wrap(err, "Stat")
}
return backend.BlobInfo{Size: fi.Size}, nil
return restic.FileInfo{Size: fi.Size}, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (be *s3) Test(t backend.Type, name string) (bool, error) {
func (be *s3) Test(t restic.FileType, name string) (bool, error) {
found := false
path := be.s3path(t, name)
_, err := be.client.StatObject(be.bucketname, path)
@ -226,7 +226,7 @@ func (be *s3) Test(t backend.Type, name string) (bool, error) {
}
// Remove removes the blob with the given name and type.
func (be *s3) Remove(t backend.Type, name string) error {
func (be *s3) Remove(t restic.FileType, name string) error {
path := be.s3path(t, name)
err := be.client.RemoveObject(be.bucketname, path)
debug.Log("s3.Remove", "%v %v -> err %v", t, name, err)
@ -236,7 +236,7 @@ func (be *s3) Remove(t backend.Type, name string) error {
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string {
func (be *s3) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("s3.List", "listing %v", t)
ch := make(chan string)
@ -264,11 +264,11 @@ func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string {
}
// Remove keys for a specified backend type.
func (be *s3) removeKeys(t backend.Type) error {
func (be *s3) removeKeys(t restic.FileType) error {
done := make(chan struct{})
defer close(done)
for key := range be.List(backend.Data, done) {
err := be.Remove(backend.Data, key)
for key := range be.List(restic.DataFile, done) {
err := be.Remove(restic.DataFile, key)
if err != nil {
return err
}
@ -279,12 +279,12 @@ func (be *s3) removeKeys(t backend.Type) error {
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *s3) Delete() error {
alltypes := []backend.Type{
backend.Data,
backend.Key,
backend.Lock,
backend.Snapshot,
backend.Index}
alltypes := []restic.FileType{
restic.DataFile,
restic.KeyFile,
restic.LockFile,
restic.SnapshotFile,
restic.IndexFile}
for _, t := range alltypes {
err := be.removeKeys(t)
@ -293,7 +293,7 @@ func (be *s3) Delete() error {
}
}
return be.Remove(backend.Config, "")
return be.Remove(restic.ConfigFile, "")
}
// Close does nothing

View file

@ -44,7 +44,7 @@ func init() {
return nil, err
}
exists, err := be.Test(backend.Config, "")
exists, err := be.Test(restic.ConfigFile, "")
if err != nil {
return nil, err
}

View file

@ -9,6 +9,7 @@ import (
"os"
"os/exec"
"path"
"restic"
"strings"
"time"
@ -256,11 +257,11 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
}
// Rename temp file to final name according to type and name.
func (r *SFTP) renameFile(oldname string, t backend.Type, name string) error {
func (r *SFTP) renameFile(oldname string, t restic.FileType, name string) error {
filename := r.filename(t, name)
// create directories if necessary
if t == backend.Data {
if t == restic.DataFile {
err := r.mkdirAll(path.Dir(filename), backend.Modes.Dir)
if err != nil {
return err
@ -293,9 +294,9 @@ func Join(parts ...string) string {
return path.Clean(path.Join(parts...))
}
// Construct path for given backend.Type and name.
func (r *SFTP) filename(t backend.Type, name string) string {
if t == backend.Config {
// Construct path for given restic.Type and name.
func (r *SFTP) filename(t restic.FileType, name string) string {
if t == restic.ConfigFile {
return Join(r.p, "config")
}
@ -303,21 +304,21 @@ func (r *SFTP) filename(t backend.Type, name string) string {
}
// Construct directory for given backend.Type.
func (r *SFTP) dirname(t backend.Type, name string) string {
func (r *SFTP) dirname(t restic.FileType, name string) string {
var n string
switch t {
case backend.Data:
case restic.DataFile:
n = backend.Paths.Data
if len(name) > 2 {
n = Join(n, name[:2])
}
case backend.Snapshot:
case restic.SnapshotFile:
n = backend.Paths.Snapshots
case backend.Index:
case restic.IndexFile:
n = backend.Paths.Index
case backend.Lock:
case restic.LockFile:
n = backend.Paths.Locks
case backend.Key:
case restic.KeyFile:
n = backend.Paths.Keys
}
return Join(r.p, n)
@ -325,7 +326,7 @@ func (r *SFTP) dirname(t backend.Type, name string) string {
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
func (r *SFTP) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
debug.Log("sftp.Load", "load %v, %d bytes, offset %v", h, len(p), off)
if err := r.clientError(); err != nil {
return 0, err
@ -335,7 +336,7 @@ func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
return 0, err
}
f, err := r.c.Open(r.filename(h.Type, h.Name))
f, err := r.c.Open(r.filename(h.FileType, h.Name))
if err != nil {
return 0, errors.Wrap(err, "Open")
}
@ -362,7 +363,7 @@ func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
}
// Save stores data in the backend at the handle.
func (r *SFTP) Save(h backend.Handle, p []byte) (err error) {
func (r *SFTP) Save(h restic.Handle, p []byte) (err error) {
debug.Log("sftp.Save", "save %v bytes to %v", h, len(p))
if err := r.clientError(); err != nil {
return err
@ -393,14 +394,14 @@ func (r *SFTP) Save(h backend.Handle, p []byte) (err error) {
return errors.Wrap(err, "Close")
}
err = r.renameFile(filename, h.Type, h.Name)
err = r.renameFile(filename, h.FileType, h.Name)
debug.Log("sftp.Save", "save %v: rename %v: %v",
h, path.Base(filename), err)
return err
}
// Stat returns information about a blob.
func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) {
func (r *SFTP) Stat(h restic.Handle) (backend.BlobInfo, error) {
debug.Log("sftp.Stat", "stat %v", h)
if err := r.clientError(); err != nil {
return backend.BlobInfo{}, err
@ -410,7 +411,7 @@ func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) {
return backend.BlobInfo{}, err
}
fi, err := r.c.Lstat(r.filename(h.Type, h.Name))
fi, err := r.c.Lstat(r.filename(h.FileType, h.Name))
if err != nil {
return backend.BlobInfo{}, errors.Wrap(err, "Lstat")
}
@ -419,7 +420,7 @@ func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) {
}
// Test returns true if a blob of the given type and name exists in the backend.
func (r *SFTP) Test(t backend.Type, name string) (bool, error) {
func (r *SFTP) Test(t restic.FileType, name string) (bool, error) {
debug.Log("sftp.Test", "type %v, name %v", t, name)
if err := r.clientError(); err != nil {
return false, err
@ -438,7 +439,7 @@ func (r *SFTP) Test(t backend.Type, name string) (bool, error) {
}
// Remove removes the content stored at name.
func (r *SFTP) Remove(t backend.Type, name string) error {
func (r *SFTP) Remove(t restic.FileType, name string) error {
debug.Log("sftp.Remove", "type %v, name %v", t, name)
if err := r.clientError(); err != nil {
return err
@ -450,14 +451,14 @@ func (r *SFTP) Remove(t backend.Type, name string) error {
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
func (r *SFTP) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("sftp.List", "list all %v", t)
ch := make(chan string)
go func() {
defer close(ch)
if t == backend.Data {
if t == restic.DataFile {
// read first level
basedir := r.dirname(t, "")

View file

@ -7,6 +7,7 @@ import (
"io/ioutil"
"math/rand"
"reflect"
"restic"
"sort"
"testing"
@ -118,7 +119,7 @@ func TestCreateWithConfig(t testing.TB) {
defer close(t)
// save a config
store(t, b, backend.Config, []byte("test config"))
store(t, b, restic.ConfigFile, []byte("test config"))
// now create the backend again, this must fail
_, err := CreateFn()
@ -127,7 +128,7 @@ func TestCreateWithConfig(t testing.TB) {
}
// remove config
err = b.Remove(backend.Config, "")
err = b.Remove(restic.ConfigFile, "")
if err != nil {
t.Fatalf("unexpected error removing config: %v", err)
}
@ -152,12 +153,12 @@ func TestConfig(t testing.TB) {
var testString = "Config"
// create config and read it back
_, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil)
_, err := backend.LoadAll(b, restic.Handle{Type: restic.ConfigFile}, nil)
if err == nil {
t.Fatalf("did not get expected error for non-existing config")
}
err = b.Save(backend.Handle{Type: backend.Config}, []byte(testString))
err = b.Save(restic.Handle{Type: restic.ConfigFile}, []byte(testString))
if err != nil {
t.Fatalf("Save() error: %v", err)
}
@ -165,7 +166,7 @@ func TestConfig(t testing.TB) {
// try accessing the config with different names, should all return the
// same config
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
h := backend.Handle{Type: backend.Config, Name: name}
h := restic.Handle{Type: restic.ConfigFile, Name: name}
buf, err := backend.LoadAll(b, h, nil)
if err != nil {
t.Fatalf("unable to read config with name %q: %v", name, err)
@ -182,12 +183,12 @@ func TestLoad(t testing.TB) {
b := open(t)
defer close(t)
_, err := b.Load(backend.Handle{}, nil, 0)
_, err := b.Load(restic.Handle{}, nil, 0)
if err == nil {
t.Fatalf("Load() did not return an error for invalid handle")
}
_, err = b.Load(backend.Handle{Type: backend.Data, Name: "foobar"}, nil, 0)
_, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, nil, 0)
if err == nil {
t.Fatalf("Load() did not return an error for non-existing blob")
}
@ -197,7 +198,7 @@ func TestLoad(t testing.TB) {
data := Random(23, length)
id := backend.Hash(data)
handle := backend.Handle{Type: backend.Data, Name: id.String()}
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
err = b.Save(handle, data)
if err != nil {
t.Fatalf("Save() error: %v", err)
@ -309,7 +310,7 @@ func TestLoad(t testing.TB) {
t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err)
}
OK(t, b.Remove(backend.Data, id.String()))
OK(t, b.Remove(restic.DataFile, id.String()))
}
// TestLoadNegativeOffset tests the backend's Load function with negative offsets.
@ -322,7 +323,7 @@ func TestLoadNegativeOffset(t testing.TB) {
data := Random(23, length)
id := backend.Hash(data)
handle := backend.Handle{Type: backend.Data, Name: id.String()}
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
err := b.Save(handle, data)
if err != nil {
t.Fatalf("Save() error: %v", err)
@ -365,7 +366,7 @@ func TestLoadNegativeOffset(t testing.TB) {
}
OK(t, b.Remove(backend.Data, id.String()))
OK(t, b.Remove(restic.DataFile, id.String()))
}
// TestSave tests saving data in the backend.
@ -380,8 +381,8 @@ func TestSave(t testing.TB) {
// use the first 32 byte as the ID
copy(id[:], data)
h := backend.Handle{
Type: backend.Data,
h := restic.Handle{
Type: restic.DataFile,
Name: fmt.Sprintf("%s-%d", id, i),
}
err := b.Save(h, data)
@ -429,7 +430,7 @@ func TestSaveFilenames(t testing.TB) {
defer close(t)
for i, test := range filenameTests {
h := backend.Handle{Name: test.name, Type: backend.Data}
h := restic.Handle{Name: test.name, Type: restic.DataFile}
err := b.Save(h, []byte(test.data))
if err != nil {
t.Errorf("test %d failed: Save() returned %v", i, err)
@ -464,9 +465,9 @@ var testStrings = []struct {
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
}
func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) {
func store(t testing.TB, b backend.Backend, tpe restic.FileType, data []byte) {
id := backend.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: tpe}, data)
err := b.Save(restic.Handle{Name: id.String(), Type: tpe}, data)
OK(t, err)
}
@ -483,9 +484,9 @@ func TestBackend(t testing.TB) {
b := open(t)
defer close(t)
for _, tpe := range []backend.Type{
backend.Data, backend.Key, backend.Lock,
backend.Snapshot, backend.Index,
for _, tpe := range []restic.FileType{
restic.DataFile, restic.KeyFile, restic.LockFile,
restic.SnapshotFile, restic.IndexFile,
} {
// detect non-existing files
for _, test := range testStrings {
@ -498,7 +499,7 @@ func TestBackend(t testing.TB) {
Assert(t, !ret, "blob was found to exist before creating")
// try to stat a not existing blob
h := backend.Handle{Type: tpe, Name: id.String()}
h := restic.Handle{Type: tpe, Name: id.String()}
_, err = b.Stat(h)
Assert(t, err != nil, "blob data could be extracted before creation")
@ -517,7 +518,7 @@ func TestBackend(t testing.TB) {
store(t, b, tpe, []byte(test.data))
// test Load()
h := backend.Handle{Type: tpe, Name: test.id}
h := restic.Handle{Type: tpe, Name: test.id}
buf, err := backend.LoadAll(b, h, nil)
OK(t, err)
Equals(t, test.data, string(buf))
@ -538,7 +539,7 @@ func TestBackend(t testing.TB) {
test := testStrings[0]
// create blob
err := b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data))
err := b.Save(restic.Handle{Type: tpe, Name: test.id}, []byte(test.data))
Assert(t, err != nil, "expected error, got %v", err)
// remove and recreate
@ -551,7 +552,7 @@ func TestBackend(t testing.TB) {
Assert(t, ok == false, "removed blob still present")
// create blob
err = b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data))
err = b.Save(restic.Handle{Type: tpe, Name: test.id}, []byte(test.data))
OK(t, err)
// list items

View file

@ -2,6 +2,7 @@ package backend
import (
"io"
"restic"
"github.com/pkg/errors"
)
@ -10,7 +11,7 @@ import (
// is resized to accomodate all data in the blob. Errors returned by be.Load()
// are passed on, except io.ErrUnexpectedEOF is silenced and nil returned
// instead, since it means this function is working properly.
func LoadAll(be Backend, h Handle, buf []byte) ([]byte, error) {
func LoadAll(be restic.Backend, h restic.Handle, buf []byte) ([]byte, error) {
fi, err := be.Stat(h)
if err != nil {
return nil, errors.Wrap(err, "Stat")

View file

@ -20,10 +20,10 @@ func TestLoadAll(t *testing.T) {
data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := backend.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data)
err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
OK(t, err)
buf, err := backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, nil)
buf, err := backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, nil)
OK(t, err)
if len(buf) != len(data) {
@ -45,11 +45,11 @@ func TestLoadSmallBuffer(t *testing.T) {
data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := backend.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data)
err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
OK(t, err)
buf := make([]byte, len(data)-23)
buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf)
buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf)
OK(t, err)
if len(buf) != len(data) {
@ -71,11 +71,11 @@ func TestLoadLargeBuffer(t *testing.T) {
data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := backend.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data)
err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
OK(t, err)
buf := make([]byte, len(data)+100)
buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf)
buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf)
OK(t, err)
if len(buf) != len(data) {

View file

@ -0,0 +1,70 @@
package restic
import "github.com/pkg/errors"
// ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix
// could be found.
var ErrNoIDPrefixFound = errors.New("no ID found")
// ErrMultipleIDMatches is returned by Find() when multiple IDs with the given
// prefix are found.
var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found")
// Find loads the list of all files of type t and searches for names which
// start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned.
// If more than one is found, nil and ErrMultipleIDMatches is returned.
func Find(be Lister, t FileType, prefix string) (string, error) {
done := make(chan struct{})
defer close(done)
match := ""
// TODO: optimize by sorting list etc.
for name := range be.List(t, done) {
if prefix == name[:len(prefix)] {
if match == "" {
match = name
} else {
return "", ErrMultipleIDMatches
}
}
}
if match != "" {
return match, nil
}
return "", ErrNoIDPrefixFound
}
const minPrefixLength = 8
// PrefixLength returns the number of bytes required so that all prefixes of
// all names of type t are unique.
func PrefixLength(be Lister, t FileType) (int, error) {
done := make(chan struct{})
defer close(done)
// load all IDs of the given type
list := make([]string, 0, 100)
for name := range be.List(t, done) {
list = append(list, name)
}
// select prefixes of length l, test if the last one is the same as the current one
outer:
for l := minPrefixLength; l < IDSize; l++ {
var last string
for _, name := range list {
if last == name[:l] {
continue outer
}
last = name[:l]
}
return l, nil
}
return IDSize, nil
}

View file

@ -21,14 +21,14 @@ import (
// A Checker only tests for internal errors within the data structures of the
// repository (e.g. missing blobs), and needs a valid Repository to work on.
type Checker struct {
packs backend.IDSet
blobs backend.IDSet
packs restic.IDSet
blobs restic.IDSet
blobRefs struct {
sync.Mutex
M map[backend.ID]uint
M map[restic.ID]uint
}
indexes map[backend.ID]*repository.Index
orphanedPacks backend.IDs
indexes map[restic.ID]*repository.Index
orphanedPacks restic.IDs
masterIndex *repository.MasterIndex
@ -38,14 +38,14 @@ type Checker struct {
// New returns a new checker which runs on repo.
func New(repo *repository.Repository) *Checker {
c := &Checker{
packs: backend.NewIDSet(),
blobs: backend.NewIDSet(),
packs: restic.NewIDSet(),
blobs: restic.NewIDSet(),
masterIndex: repository.NewMasterIndex(),
indexes: make(map[backend.ID]*repository.Index),
indexes: make(map[restic.ID]*repository.Index),
repo: repo,
}
c.blobRefs.M = make(map[backend.ID]uint)
c.blobRefs.M = make(map[restic.ID]uint)
return c
}
@ -54,8 +54,8 @@ const defaultParallelism = 40
// ErrDuplicatePacks is returned when a pack is found in more than one index.
type ErrDuplicatePacks struct {
PackID backend.ID
Indexes backend.IDSet
PackID restic.ID
Indexes restic.IDSet
}
func (e ErrDuplicatePacks) Error() string {
@ -65,7 +65,7 @@ func (e ErrDuplicatePacks) Error() string {
// ErrOldIndexFormat is returned when an index with the old format is
// found.
type ErrOldIndexFormat struct {
backend.ID
restic.ID
}
func (err ErrOldIndexFormat) Error() string {
@ -82,7 +82,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
indexCh := make(chan indexRes)
worker := func(id backend.ID, done <-chan struct{}) error {
worker := func(id restic.ID, done <-chan struct{}) error {
debug.Log("LoadIndex", "worker got index %v", id)
idx, err := repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeIndex)
if errors.Cause(err) == repository.ErrOldIndexFormat {
@ -108,7 +108,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
go func() {
defer close(indexCh)
debug.Log("LoadIndex", "start loading indexes in parallel")
perr = repository.FilesInParallel(c.repo.Backend(), backend.Index, defaultParallelism,
perr = repository.FilesInParallel(c.repo.Backend(), restic.IndexFile, defaultParallelism,
repository.ParallelWorkFuncParseID(worker))
debug.Log("LoadIndex", "loading indexes finished, error: %v", perr)
}()
@ -121,11 +121,11 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
return hints, errs
}
packToIndex := make(map[backend.ID]backend.IDSet)
packToIndex := make(map[restic.ID]restic.IDSet)
for res := range indexCh {
debug.Log("LoadIndex", "process index %v", res.ID)
idxID, err := backend.ParseID(res.ID)
idxID, err := restic.ParseID(res.ID)
if err != nil {
errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID))
continue
@ -143,7 +143,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
cnt++
if _, ok := packToIndex[blob.PackID]; !ok {
packToIndex[blob.PackID] = backend.NewIDSet()
packToIndex[blob.PackID] = restic.NewIDSet()
}
packToIndex[blob.PackID].Insert(idxID)
}
@ -171,7 +171,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
// PackError describes an error with a specific pack.
type PackError struct {
ID backend.ID
ID restic.ID
Orphaned bool
Err error
}
@ -180,14 +180,14 @@ func (e PackError) Error() string {
return "pack " + e.ID.String() + ": " + e.Err.Error()
}
func packIDTester(repo *repository.Repository, inChan <-chan backend.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
func packIDTester(repo *repository.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
debug.Log("Checker.testPackID", "worker start")
defer debug.Log("Checker.testPackID", "worker done")
defer wg.Done()
for id := range inChan {
ok, err := repo.Backend().Test(backend.Data, id.String())
ok, err := repo.Backend().Test(restic.DataFile, id.String())
if err != nil {
err = PackError{ID: id, Err: err}
} else {
@ -218,11 +218,11 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
defer close(errChan)
debug.Log("Checker.Packs", "checking for %d packs", len(c.packs))
seenPacks := backend.NewIDSet()
seenPacks := restic.NewIDSet()
var workerWG sync.WaitGroup
IDChan := make(chan backend.ID)
IDChan := make(chan restic.ID)
for i := 0; i < defaultParallelism; i++ {
workerWG.Add(1)
go packIDTester(c.repo, IDChan, errChan, &workerWG, done)
@ -238,7 +238,7 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
workerWG.Wait()
debug.Log("Checker.Packs", "workers terminated")
for id := range c.repo.List(backend.Data, done) {
for id := range c.repo.List(restic.DataFile, done) {
debug.Log("Checker.Packs", "check data blob %v", id.Str())
if !seenPacks.Has(id) {
c.orphanedPacks = append(c.orphanedPacks, id)
@ -253,8 +253,8 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
// Error is an error that occurred while checking a repository.
type Error struct {
TreeID backend.ID
BlobID backend.ID
TreeID restic.ID
BlobID restic.ID
Err error
}
@ -273,25 +273,25 @@ func (e Error) Error() string {
return e.Err.Error()
}
func loadTreeFromSnapshot(repo *repository.Repository, id backend.ID) (backend.ID, error) {
func loadTreeFromSnapshot(repo *repository.Repository, id restic.ID) (restic.ID, error) {
sn, err := restic.LoadSnapshot(repo, id)
if err != nil {
debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err)
return backend.ID{}, err
return restic.ID{}, err
}
if sn.Tree == nil {
debug.Log("Checker.loadTreeFromSnapshot", "snapshot %v has no tree", id.Str())
return backend.ID{}, errors.Errorf("snapshot %v has no tree", id)
return restic.ID{}, errors.Errorf("snapshot %v has no tree", id)
}
return *sn.Tree, nil
}
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs.
func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
func loadSnapshotTreeIDs(repo *repository.Repository) (restic.IDs, []error) {
var trees struct {
IDs backend.IDs
IDs restic.IDs
sync.Mutex
}
@ -301,7 +301,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
}
snapshotWorker := func(strID string, done <-chan struct{}) error {
id, err := backend.ParseID(strID)
id, err := restic.ParseID(strID)
if err != nil {
return err
}
@ -324,7 +324,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
return nil
}
err := repository.FilesInParallel(repo.Backend(), backend.Snapshot, defaultParallelism, snapshotWorker)
err := repository.FilesInParallel(repo.Backend(), restic.SnapshotFile, defaultParallelism, snapshotWorker)
if err != nil {
errs.errs = append(errs.errs, err)
}
@ -334,7 +334,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
// TreeError collects several errors that occurred while processing a tree.
type TreeError struct {
ID backend.ID
ID restic.ID
Errors []error
}
@ -343,14 +343,14 @@ func (e TreeError) Error() string {
}
type treeJob struct {
backend.ID
restic.ID
error
*restic.Tree
}
// loadTreeWorker loads trees from repo and sends them to out.
func loadTreeWorker(repo *repository.Repository,
in <-chan backend.ID, out chan<- treeJob,
in <-chan restic.ID, out chan<- treeJob,
done <-chan struct{}, wg *sync.WaitGroup) {
defer func() {
@ -454,7 +454,7 @@ func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-ch
}
}
func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) {
func filterTrees(backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) {
defer func() {
debug.Log("checker.filterTrees", "closing output channels")
close(loaderChan)
@ -466,7 +466,7 @@ func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan tr
outCh = out
loadCh = loaderChan
job treeJob
nextTreeID backend.ID
nextTreeID restic.ID
outstandingLoadTreeJobs = 0
)
@ -559,7 +559,7 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) {
}
}
treeIDChan := make(chan backend.ID)
treeIDChan := make(chan restic.ID)
treeJobChan1 := make(chan treeJob)
treeJobChan2 := make(chan treeJob)
@ -575,10 +575,10 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) {
wg.Wait()
}
func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
debug.Log("Checker.checkTree", "checking tree %v", id.Str())
var blobs []backend.ID
var blobs []restic.ID
for _, node := range tree.Nodes {
switch node.FileType {
@ -634,7 +634,7 @@ func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
}
// UnusedBlobs returns all blobs that have never been referenced.
func (c *Checker) UnusedBlobs() (blobs backend.IDs) {
func (c *Checker) UnusedBlobs() (blobs restic.IDs) {
c.blobRefs.Lock()
defer c.blobRefs.Unlock()
@ -650,7 +650,7 @@ func (c *Checker) UnusedBlobs() (blobs backend.IDs) {
}
// OrphanedPacks returns a slice of unused packs (only available after Packs() was run).
func (c *Checker) OrphanedPacks() backend.IDs {
func (c *Checker) OrphanedPacks() restic.IDs {
return c.orphanedPacks
}
@ -660,15 +660,15 @@ func (c *Checker) CountPacks() uint64 {
}
// checkPack reads a pack and checks the integrity of all blobs.
func checkPack(r *repository.Repository, id backend.ID) error {
func checkPack(r *repository.Repository, id restic.ID) error {
debug.Log("Checker.checkPack", "checking pack %v", id.Str())
h := backend.Handle{Type: backend.Data, Name: id.String()}
h := restic.Handle{FileType: restic.DataFile, Name: id.String()}
buf, err := backend.LoadAll(r.Backend(), h, nil)
if err != nil {
return err
}
hash := backend.Hash(buf)
hash := restic.Hash(buf)
if !hash.Equal(id) {
debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
@ -691,7 +691,7 @@ func checkPack(r *repository.Repository, id backend.ID) error {
continue
}
hash := backend.Hash(plainBuf)
hash := restic.Hash(plainBuf)
if !hash.Equal(blob.ID) {
debug.Log("Checker.checkPack", " Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
@ -713,10 +713,10 @@ func (c *Checker) ReadData(p *restic.Progress, errChan chan<- error, done <-chan
p.Start()
defer p.Done()
worker := func(wg *sync.WaitGroup, in <-chan backend.ID) {
worker := func(wg *sync.WaitGroup, in <-chan restic.ID) {
defer wg.Done()
for {
var id backend.ID
var id restic.ID
var ok bool
select {
@ -742,7 +742,7 @@ func (c *Checker) ReadData(p *restic.Progress, errChan chan<- error, done <-chan
}
}
ch := c.repo.List(backend.Data, done)
ch := c.repo.List(restic.DataFile, done)
var wg sync.WaitGroup
for i := 0; i < defaultParallelism; i++ {

View file

@ -17,7 +17,7 @@ import (
var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz")
func list(repo *repository.Repository, t backend.Type) (IDs []string) {
func list(repo *repository.Repository, t restic.FileType) (IDs []string) {
done := make(chan struct{})
defer close(done)
@ -83,7 +83,7 @@ func TestMissingPack(t *testing.T) {
repo := OpenLocalRepo(t, repodir)
packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6"
OK(t, repo.Backend().Remove(backend.Data, packID))
OK(t, repo.Backend().Remove(restic.DataFile, packID))
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex()
@ -115,7 +115,7 @@ func TestUnreferencedPack(t *testing.T) {
// index 3f1a only references pack 60e0
indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44"
packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"
OK(t, repo.Backend().Remove(backend.Index, indexID))
OK(t, repo.Backend().Remove(restic.IndexFile, indexID))
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex()
@ -145,7 +145,7 @@ func TestUnreferencedBlobs(t *testing.T) {
repo := OpenLocalRepo(t, repodir)
snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"
OK(t, repo.Backend().Remove(backend.Snapshot, snID))
OK(t, repo.Backend().Remove(restic.SnapshotFile, snID))
unusedBlobsBySnapshot := backend.IDs{
ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"),
@ -216,7 +216,7 @@ type errorBackend struct {
ProduceErrors bool
}
func (b errorBackend) Load(h backend.Handle, p []byte, off int64) (int, error) {
func (b errorBackend) Load(h restic.Handle, p []byte, off int64) (int, error) {
fmt.Printf("load %v\n", h)
n, err := b.Backend.Load(h, p, off)

View file

@ -1,11 +1,10 @@
package repository
package restic
import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"io"
"restic"
"testing"
"github.com/pkg/errors"
@ -31,12 +30,12 @@ const RepoVersion = 1
// JSONUnpackedSaver saves unpacked JSON.
type JSONUnpackedSaver interface {
SaveJSONUnpacked(restic.FileType, interface{}) (restic.ID, error)
SaveJSONUnpacked(FileType, interface{}) (ID, error)
}
// JSONUnpackedLoader loads unpacked JSON.
type JSONUnpackedLoader interface {
LoadJSONUnpacked(restic.FileType, restic.ID, interface{}) error
LoadJSONUnpacked(FileType, ID, interface{}) error
}
// CreateConfig creates a config file with a randomly selected polynomial and
@ -87,7 +86,7 @@ func LoadConfig(r JSONUnpackedLoader) (Config, error) {
cfg Config
)
err := r.LoadJSONUnpacked(restic.ConfigFile, restic.ID{}, &cfg)
err := r.LoadJSONUnpacked(ConfigFile, ID{}, &cfg)
if err != nil {
return Config{}, err
}

View file

@ -1,10 +1,9 @@
package repository_test
package restic_test
import (
"restic"
"testing"
"restic/repository"
. "restic/test"
)
@ -21,18 +20,18 @@ func (l loader) LoadJSONUnpacked(t restic.FileType, id restic.ID, arg interface{
}
func TestConfig(t *testing.T) {
resultConfig := repository.Config{}
resultConfig := restic.Config{}
save := func(tpe restic.FileType, arg interface{}) (restic.ID, error) {
Assert(t, tpe == restic.ConfigFile,
"wrong backend type: got %v, wanted %v",
tpe, restic.ConfigFile)
cfg := arg.(repository.Config)
cfg := arg.(restic.Config)
resultConfig = cfg
return restic.ID{}, nil
}
cfg1, err := repository.CreateConfig()
cfg1, err := restic.CreateConfig()
OK(t, err)
_, err = saver(save).SaveJSONUnpacked(restic.ConfigFile, cfg1)
@ -42,12 +41,12 @@ func TestConfig(t *testing.T) {
"wrong backend type: got %v, wanted %v",
tpe, restic.ConfigFile)
cfg := arg.(*repository.Config)
cfg := arg.(*restic.Config)
*cfg = resultConfig
return nil
}
cfg2, err := repository.LoadConfig(loader(load))
cfg2, err := restic.LoadConfig(loader(load))
OK(t, err)
Assert(t, cfg1 == cfg2,

View file

@ -15,18 +15,18 @@ import (
"restic/repository"
)
func loadIDSet(t testing.TB, filename string) BlobSet {
func loadIDSet(t testing.TB, filename string) restic.BlobSet {
f, err := os.Open(filename)
if err != nil {
t.Logf("unable to open golden file %v: %v", filename, err)
return NewBlobSet()
return restic.NewBlobSet()
}
sc := bufio.NewScanner(f)
blobs := NewBlobSet()
blobs := restic.NewBlobSet()
for sc.Scan() {
var h Handle
var h restic.BlobHandle
err := json.Unmarshal([]byte(sc.Text()), &h)
if err != nil {
t.Errorf("file %v contained invalid blob: %#v", filename, err)
@ -43,14 +43,14 @@ func loadIDSet(t testing.TB, filename string) BlobSet {
return blobs
}
func saveIDSet(t testing.TB, filename string, s BlobSet) {
func saveIDSet(t testing.TB, filename string, s restic.BlobSet) {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatalf("unable to update golden file %v: %v", filename, err)
return
}
var hs Handles
var hs restic.BlobHandles
for h := range s {
hs = append(hs, h)
}
@ -83,16 +83,16 @@ func TestFindUsedBlobs(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
var snapshots []*Snapshot
var snapshots []*restic.Snapshot
for i := 0; i < findTestSnapshots; i++ {
sn := TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0)
sn := restic.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0)
t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str())
snapshots = append(snapshots, sn)
}
for i, sn := range snapshots {
usedBlobs := NewBlobSet()
err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, NewBlobSet())
usedBlobs := restic.NewBlobSet()
err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, restic.NewBlobSet())
if err != nil {
t.Errorf("FindUsedBlobs returned error: %v", err)
continue
@ -121,13 +121,13 @@ func BenchmarkFindUsedBlobs(b *testing.B) {
repo, cleanup := repository.TestRepository(b)
defer cleanup()
sn := TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0)
sn := restic.TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0)
b.ResetTimer()
for i := 0; i < b.N; i++ {
seen := NewBlobSet()
blobs := NewBlobSet()
seen := restic.NewBlobSet()
blobs := restic.NewBlobSet()
err := restic.FindUsedBlobs(repo, *sn.Tree, blobs, seen)
if err != nil {
b.Error(err)

View file

@ -65,7 +65,7 @@ func (sn *SnapshotsDir) updateCache(ctx context.Context) error {
sn.Lock()
defer sn.Unlock()
for id := range sn.repo.List(backend.Snapshot, ctx.Done()) {
for id := range sn.repo.List(restic.SnapshotFile, ctx.Done()) {
snapshot, err := restic.LoadSnapshot(sn.repo, id)
if err != nil {
return err

View file

@ -103,7 +103,7 @@ func loadIndexJSON(repo types.Repository, id backend.ID) (*indexJSON, error) {
debug.Log("index.loadIndexJSON", "process index %v\n", id.Str())
var idx indexJSON
err := repo.LoadJSONUnpacked(backend.Index, id, &idx)
err := repo.LoadJSONUnpacked(restic.IndexFile, id, &idx)
if err != nil {
return nil, err
}
@ -126,7 +126,7 @@ func Load(repo types.Repository, p *restic.Progress) (*Index, error) {
index := newIndex()
for id := range repo.List(backend.Index, done) {
for id := range repo.List(restic.IndexFile, done) {
p.Report(restic.Stat{Blobs: 1})
debug.Log("index.Load", "Load index %v", id.Str())
@ -335,5 +335,5 @@ func Save(repo types.Repository, packs map[backend.ID][]pack.Blob, supersedes ba
idx.Packs = append(idx.Packs, p)
}
return repo.SaveJSONUnpacked(backend.Index, idx)
return repo.SaveJSONUnpacked(restic.IndexFile, idx)
}

View file

@ -28,7 +28,7 @@ func createFilledRepo(t testing.TB, snapshots int, dup float32) (*repository.Rep
}
func validateIndex(t testing.TB, repo *repository.Repository, idx *Index) {
for id := range repo.List(backend.Data, nil) {
for id := range repo.List(restic.DataFile, nil) {
if _, ok := idx.Packs[id]; !ok {
t.Errorf("pack %v missing from index", id.Str())
}
@ -197,7 +197,7 @@ func TestIndexSave(t *testing.T) {
for id := range idx.IndexIDs {
t.Logf("remove index %v", id.Str())
err = repo.Backend().Remove(backend.Index, id.String())
err = repo.Backend().Remove(restic.IndexFile, id.String())
if err != nil {
t.Errorf("error removing index %v: %v", id, err)
}
@ -235,7 +235,7 @@ func TestIndexAddRemovePack(t *testing.T) {
done := make(chan struct{})
defer close(done)
packID := <-repo.List(backend.Data, done)
packID := <-repo.List(restic.DataFile, done)
t.Logf("selected pack %v", packID.Str())
@ -298,7 +298,7 @@ func TestIndexLoadDocReference(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
id, err := repo.SaveUnpacked(backend.Index, docExample)
id, err := repo.SaveUnpacked(restic.IndexFile, docExample)
if err != nil {
t.Fatalf("SaveUnpacked() returned error %v", err)
}

View file

@ -11,7 +11,7 @@ type Backend struct {
CloseFn func() error
LoadFn func(h restic.Handle, p []byte, off int64) (int, error)
SaveFn func(h restic.Handle, p []byte) error
StatFn func(h restic.Handle) (restic.BlobInfo, error)
StatFn func(h restic.Handle) (restic.FileInfo, error)
ListFn func(restic.FileType, <-chan struct{}) <-chan string
RemoveFn func(restic.FileType, string) error
TestFn func(restic.FileType, string) (bool, error)
@ -56,9 +56,9 @@ func (m *Backend) Save(h restic.Handle, p []byte) error {
}
// Stat an object in the backend.
func (m *Backend) Stat(h restic.Handle) (restic.BlobInfo, error) {
func (m *Backend) Stat(h restic.Handle) (restic.FileInfo, error) {
if m.StatFn == nil {
return restic.BlobInfo{}, errors.New("not implemented")
return restic.FileInfo{}, errors.New("not implemented")
}
return m.StatFn(h)

View file

@ -42,7 +42,7 @@ type Node struct {
tree *Tree
path string
Path string `json:"-"`
err error
}
@ -67,7 +67,7 @@ func (node Node) Tree() *Tree {
func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) {
mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky
node := &Node{
path: path,
Path: path,
Name: fi.Name(),
Mode: fi.Mode() & mask,
ModTime: fi.ModTime(),
@ -370,15 +370,15 @@ func (node Node) sameContent(other Node) bool {
return true
}
func (node *Node) isNewer(path string, fi os.FileInfo) bool {
func (node *Node) IsNewer(path string, fi os.FileInfo) bool {
if node.FileType != "file" {
debug.Log("node.isNewer", "node %v is newer: not file", path)
debug.Log("node.IsNewer", "node %v is newer: not file", path)
return true
}
tpe := nodeTypeFromFileInfo(fi)
if node.Name != fi.Name() || node.FileType != tpe {
debug.Log("node.isNewer", "node %v is newer: name or type changed", path)
debug.Log("node.IsNewer", "node %v is newer: name or type changed", path)
return true
}
@ -388,7 +388,7 @@ func (node *Node) isNewer(path string, fi os.FileInfo) bool {
if !ok {
if node.ModTime != fi.ModTime() ||
node.Size != size {
debug.Log("node.isNewer", "node %v is newer: timestamp or size changed", path)
debug.Log("node.IsNewer", "node %v is newer: timestamp or size changed", path)
return true
}
return false
@ -400,11 +400,11 @@ func (node *Node) isNewer(path string, fi os.FileInfo) bool {
node.ChangeTime != changeTime(extendedStat) ||
node.Inode != uint64(inode) ||
node.Size != size {
debug.Log("node.isNewer", "node %v is newer: timestamp, size or inode changed", path)
debug.Log("node.IsNewer", "node %v is newer: timestamp, size or inode changed", path)
return true
}
debug.Log("node.isNewer", "node %v is not newer", path)
debug.Log("node.IsNewer", "node %v is not newer", path)
return false
}

View file

@ -9,7 +9,6 @@ import (
"time"
"restic"
"restic/backend"
. "restic/test"
)
@ -75,7 +74,7 @@ var nodeTests = []restic.Node{
restic.Node{
Name: "testFile",
FileType: "file",
Content: []backend.ID{},
Content: restic.IDs{},
UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()),
Mode: 0604,
@ -86,7 +85,7 @@ var nodeTests = []restic.Node{
restic.Node{
Name: "testSuidFile",
FileType: "file",
Content: []backend.ID{},
Content: restic.IDs{},
UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()),
Mode: 0755 | os.ModeSetuid,
@ -97,7 +96,7 @@ var nodeTests = []restic.Node{
restic.Node{
Name: "testSuidFile2",
FileType: "file",
Content: []backend.ID{},
Content: restic.IDs{},
UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()),
Mode: 0755 | os.ModeSetgid,
@ -108,7 +107,7 @@ var nodeTests = []restic.Node{
restic.Node{
Name: "testSticky",
FileType: "file",
Content: []backend.ID{},
Content: restic.IDs{},
UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()),
Mode: 0755 | os.ModeSticky,

View file

@ -10,26 +10,12 @@ import (
"github.com/pkg/errors"
"restic/backend"
"restic/crypto"
)
// Blob is a blob within a pack.
type Blob struct {
Type restic.BlobType
Length uint
ID restic.ID
Offset uint
}
func (b Blob) String() string {
return fmt.Sprintf("<Blob %v/%v len %v, off %v>",
b.ID.Str(), b.Type, b.Length, b.Offset)
}
// Packer is used to create a new Pack.
type Packer struct {
blobs []Blob
blobs []restic.Blob
bytes uint
k *crypto.Key
@ -53,7 +39,7 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error)
p.m.Lock()
defer p.m.Unlock()
c := Blob{Type: t, ID: id}
c := restic.Blob{Type: t, ID: id}
n, err := p.wr.Write(data)
c.Length = uint(n)
@ -64,13 +50,13 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error)
return n, errors.Wrap(err, "Write")
}
var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize)
var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + restic.IDSize)
// headerEntry is used with encoding/binary to read and write header entries
type headerEntry struct {
Type uint8
Length uint32
ID [backend.IDSize]byte
ID [restic.IDSize]byte
}
// Finalize writes the header for all added blobs and finalizes the pack.
@ -167,7 +153,7 @@ func (p *Packer) Count() int {
}
// Blobs returns the slice of blobs that have been written.
func (p *Packer) Blobs() []Blob {
func (p *Packer) Blobs() []restic.Blob {
p.m.Lock()
defer p.m.Unlock()
@ -233,7 +219,7 @@ func readHeader(rd io.ReaderAt, size int64) ([]byte, error) {
}
// List returns the list of entries found in a pack file.
func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error) {
func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err error) {
buf, err := readHeader(rd, size)
if err != nil {
return nil, err
@ -258,7 +244,7 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error)
return nil, errors.Wrap(err, "binary.Read")
}
entry := Blob{
entry := restic.Blob{
Length: uint(e.Length),
ID: e.ID,
Offset: pos,

View file

@ -7,6 +7,7 @@ import (
"encoding/binary"
"encoding/json"
"io"
"restic"
"testing"
"restic/backend"
@ -126,9 +127,9 @@ func TestUnpackReadSeeker(t *testing.T) {
b := mem.New()
id := backend.Hash(packData)
handle := backend.Handle{Type: backend.Data, Name: id.String()}
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
OK(t, b.Save(handle, packData))
verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize)
verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize)
}
func TestShortPack(t *testing.T) {
@ -139,7 +140,7 @@ func TestShortPack(t *testing.T) {
b := mem.New()
id := backend.Hash(packData)
handle := backend.Handle{Type: backend.Data, Name: id.String()}
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
OK(t, b.Save(handle, packData))
verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize)
}

View file

@ -1,4 +1,4 @@
package backend
package restic
import (
"io"

View file

@ -1,7 +1,5 @@
package restic
import "github.com/restic/chunker"
// Repository stores data in a backend. It provides high-level functions and
// transparently encrypts/decrypts data.
type Repository interface {
@ -9,12 +7,13 @@ type Repository interface {
// Backend returns the backend used by the repository
Backend() Backend
SetIndex(interface{})
SetIndex(Index)
Index() Index
SaveFullIndex() error
SaveJSON(BlobType, interface{}) (ID, error)
SaveUnpacked(FileType, []byte) (ID, error)
Config() Config
@ -34,13 +33,13 @@ type Repository interface {
Flush() error
}
// Lister allows listing files in a backend.
type Lister interface {
List(FileType, <-chan struct{}) <-chan string
}
// Index keeps track of the blobs are stored within files.
type Index interface {
Has(ID, BlobType) bool
Lookup(ID, BlobType) ([]PackedBlob, error)
}
// Config stores information about the repository.
type Config interface {
ChunkerPolynomial() chunker.Pol
}

View file

@ -3,7 +3,6 @@ package repository
import (
"bytes"
"encoding/json"
"fmt"
"io"
"restic"
"sync"
@ -40,7 +39,7 @@ func NewIndex() *Index {
}
}
func (idx *Index) store(blob PackedBlob) {
func (idx *Index) store(blob restic.PackedBlob) {
newEntry := indexEntry{
packID: blob.PackID,
offset: blob.Offset,
@ -97,7 +96,7 @@ var IndexFull = func(idx *Index) bool {
// Store remembers the id and pack in the index. An existing entry will be
// silently overwritten.
func (idx *Index) Store(blob PackedBlob) {
func (idx *Index) Store(blob restic.PackedBlob) {
idx.m.Lock()
defer idx.m.Unlock()
@ -110,25 +109,27 @@ func (idx *Index) Store(blob PackedBlob) {
idx.store(blob)
}
// Lookup queries the index for the blob ID and returns a PackedBlob.
func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob, err error) {
// Lookup queries the index for the blob ID and returns a restic.PackedBlob.
func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) {
idx.m.Lock()
defer idx.m.Unlock()
h := restic.BlobHandle{ID: id, Type: tpe}
if packs, ok := idx.pack[h]; ok {
blobs = make([]PackedBlob, 0, len(packs))
blobs = make([]restic.PackedBlob, 0, len(packs))
for _, p := range packs {
debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d",
id.Str(), p.packID.Str(), p.offset, p.length)
blob := PackedBlob{
Type: tpe,
Length: p.length,
ID: id,
Offset: p.offset,
blob := restic.PackedBlob{
Blob: restic.Blob{
Type: tpe,
Length: p.length,
ID: id,
Offset: p.offset,
},
PackID: p.packID,
}
@ -143,18 +144,20 @@ func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob,
}
// ListPack returns a list of blobs contained in a pack.
func (idx *Index) ListPack(id restic.ID) (list []PackedBlob) {
func (idx *Index) ListPack(id restic.ID) (list []restic.PackedBlob) {
idx.m.Lock()
defer idx.m.Unlock()
for h, packList := range idx.pack {
for _, entry := range packList {
if entry.packID == id {
list = append(list, PackedBlob{
ID: h.ID,
Type: h.Type,
Length: entry.length,
Offset: entry.offset,
list = append(list, restic.PackedBlob{
Blob: restic.Blob{
ID: h.ID,
Type: h.Type,
Length: entry.length,
Offset: entry.offset,
},
PackID: entry.packID,
})
}
@ -182,7 +185,7 @@ func (idx *Index) LookupSize(id restic.ID, tpe restic.BlobType) (cleartextLength
return 0, err
}
return blobs[0].PlaintextLength(), nil
return blobs[0].Length - crypto.Extension, nil
}
// Supersedes returns the list of indexes this index supersedes, if any.
@ -204,32 +207,13 @@ func (idx *Index) AddToSupersedes(ids ...restic.ID) error {
return nil
}
// PackedBlob is a blob already saved within a pack.
type PackedBlob struct {
Type restic.BlobType
Length uint
ID restic.ID
Offset uint
PackID restic.ID
}
func (pb PackedBlob) String() string {
return fmt.Sprintf("<PackedBlob %v type %v in pack %v: len %v, offset %v",
pb.ID.Str(), pb.Type, pb.PackID.Str(), pb.Length, pb.Offset)
}
// PlaintextLength returns the number of bytes the blob's plaintext occupies.
func (pb PackedBlob) PlaintextLength() uint {
return pb.Length - crypto.Extension
}
// Each returns a channel that yields all blobs known to the index. If done is
// closed, the background goroutine terminates. This blocks any modification of
// the index.
func (idx *Index) Each(done chan struct{}) <-chan PackedBlob {
func (idx *Index) Each(done chan struct{}) <-chan restic.PackedBlob {
idx.m.Lock()
ch := make(chan PackedBlob)
ch := make(chan restic.PackedBlob)
go func() {
defer idx.m.Unlock()
@ -242,11 +226,13 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob {
select {
case <-done:
return
case ch <- PackedBlob{
ID: h.ID,
Type: h.Type,
Offset: blob.offset,
Length: blob.length,
case ch <- restic.PackedBlob{
Blob: restic.Blob{
ID: h.ID,
Type: h.Type,
Offset: blob.offset,
Length: blob.length,
},
PackID: blob.packID,
}:
}
@ -497,11 +483,13 @@ func DecodeIndex(rd io.Reader) (idx *Index, err error) {
idx = NewIndex()
for _, pack := range idxJSON.Packs {
for _, blob := range pack.Blobs {
idx.store(PackedBlob{
Type: blob.Type,
ID: blob.ID,
Offset: blob.Offset,
Length: blob.Length,
idx.store(restic.PackedBlob{
Blob: restic.Blob{
Type: blob.Type,
ID: blob.ID,
Offset: blob.Offset,
Length: blob.Length,
},
PackID: pack.ID,
})
}
@ -528,12 +516,14 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
idx = NewIndex()
for _, pack := range list {
for _, blob := range pack.Blobs {
idx.store(PackedBlob{
Type: blob.Type,
ID: blob.ID,
idx.store(restic.PackedBlob{
Blob: restic.Blob{
Type: blob.Type,
ID: blob.ID,
Offset: blob.Offset,
Length: blob.Length,
},
PackID: pack.ID,
Offset: blob.Offset,
Length: blob.Length,
})
}
}

View file

@ -33,11 +33,8 @@ func RebuildIndex(repo restic.Repository) error {
res := job.Result.(list.Result)
for _, entry := range res.Entries() {
pb := PackedBlob{
ID: entry.ID,
Type: entry.Type,
Length: entry.Length,
Offset: entry.Offset,
pb := restic.PackedBlob{
Blob: entry,
PackID: res.PackID(),
}
idx.Store(pb)

View file

@ -116,7 +116,7 @@ func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) {
// try at most maxKeysForSearch keys in repo
done := make(chan struct{})
defer close(done)
for name := range s.Backend().List(backend.Key, done) {
for name := range s.Backend().List(restic.KeyFile, done) {
if maxKeys > 0 && checked > maxKeys {
return nil, ErrMaxKeysReached
}
@ -226,8 +226,8 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error)
// store in repository and return
h := restic.Handle{
Type: backend.Key,
Name: restic.Hash(buf).String(),
FileType: restic.KeyFile,
Name: restic.Hash(buf).String(),
}
err = s.be.Save(h, buf)

View file

@ -7,7 +7,6 @@ import (
"github.com/pkg/errors"
"restic/debug"
"restic/pack"
)
// MasterIndex is a collection of indexes and IDs of chunks that are in the process of being saved.
@ -22,7 +21,7 @@ func NewMasterIndex() *MasterIndex {
}
// Lookup queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob, err error) {
func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
@ -58,7 +57,7 @@ func (mi *MasterIndex) LookupSize(id restic.ID, tpe restic.BlobType) (uint, erro
// ListPack returns the list of blobs in a pack. The first matching index is
// returned, or nil if no index contains information about the pack id.
func (mi *MasterIndex) ListPack(id restic.ID) (list []PackedBlob) {
func (mi *MasterIndex) ListPack(id restic.ID) (list []restic.PackedBlob) {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()

View file

@ -115,7 +115,7 @@ func (r *Repository) savePacker(p *pack.Packer) error {
}
id := restic.Hash(data)
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
h := restic.Handle{FileType: restic.DataFile, Name: id.String()}
err = r.be.Save(h, data)
if err != nil {
@ -133,12 +133,14 @@ func (r *Repository) savePacker(p *pack.Packer) error {
// update blobs in the index
for _, b := range p.Blobs() {
debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), id.Str())
r.idx.Current().Store(PackedBlob{
Type: b.Type,
ID: b.ID,
r.idx.Current().Store(restic.PackedBlob{
Blob: restic.Blob{
Type: b.Type,
ID: b.ID,
Offset: b.Offset,
Length: uint(b.Length),
},
PackID: id,
Offset: b.Offset,
Length: uint(b.Length),
})
}

View file

@ -4,7 +4,6 @@ import (
"restic"
"sync"
"restic/backend"
"restic/debug"
)
@ -22,14 +21,14 @@ func closeIfOpen(ch chan struct{}) {
// processing stops. If done is closed, the function should return.
type ParallelWorkFunc func(id string, done <-chan struct{}) error
// ParallelIDWorkFunc gets one backend.ID to work on. If an error is returned,
// ParallelIDWorkFunc gets one restic.ID to work on. If an error is returned,
// processing stops. If done is closed, the function should return.
type ParallelIDWorkFunc func(id restic.ID, done <-chan struct{}) error
// FilesInParallel runs n workers of f in parallel, on the IDs that
// repo.List(t) yield. If f returns an error, the process is aborted and the
// first error is returned.
func FilesInParallel(repo backend.Lister, t restic.FileType, n uint, f ParallelWorkFunc) error {
func FilesInParallel(repo restic.Lister, t restic.FileType, n uint, f ParallelWorkFunc) error {
done := make(chan struct{})
defer closeIfOpen(done)
@ -76,12 +75,12 @@ func FilesInParallel(repo backend.Lister, t restic.FileType, n uint, f ParallelW
return nil
}
// ParallelWorkFuncParseID converts a function that takes a backend.ID to a
// function that takes a string. Filenames that do not parse as a backend.ID
// ParallelWorkFuncParseID converts a function that takes a restic.ID to a
// function that takes a string. Filenames that do not parse as a restic.ID
// are ignored.
func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc {
return func(s string, done <-chan struct{}) error {
id, err := backend.ParseID(s)
id, err := restic.ParseID(s)
if err != nil {
debug.Log("repository.ParallelWorkFuncParseID", "invalid ID %q: %v", id, err)
return err

View file

@ -15,13 +15,13 @@ import (
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
// into a new pack. Afterwards, the packs are removed. This operation requires
// an exclusive lock on the repo.
func Repack(repo *Repository, packs restic.IDSet, keepBlobs pack.BlobSet) (err error) {
func Repack(repo *Repository, packs restic.IDSet, keepBlobs restic.BlobSet) (err error) {
debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
buf := make([]byte, 0, maxPackSize)
for packID := range packs {
// load the complete pack
h := restic.Handle{Type: restic.DataFile, Name: packID.String()}
h := restic.Handle{FileType: restic.DataFile, Name: packID.String()}
l, err := repo.Backend().Load(h, buf[:cap(buf)], 0)
if errors.Cause(err) == io.ErrUnexpectedEOF {
@ -43,7 +43,7 @@ func Repack(repo *Repository, packs restic.IDSet, keepBlobs pack.BlobSet) (err e
debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(blobs))
var plaintext []byte
for _, entry := range blobs {
h := pack.Handle{ID: entry.ID, Type: entry.Type}
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
if !keepBlobs.Has(h) {
continue
}

View file

@ -19,7 +19,7 @@ import (
// Repository is used to access a repository in a backend.
type Repository struct {
be restic.Backend
Config Config
cfg restic.Config
key *crypto.Key
keyName string
idx *MasterIndex
@ -38,17 +38,21 @@ func New(be restic.Backend) *Repository {
return repo
}
func (r *Repository) Config() restic.Config {
return r.cfg
}
// Find loads the list of all blobs of type t and searches for names which start
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
// more than one is found, nil and ErrMultipleIDMatches is returned.
func (r *Repository) Find(t restic.FileType, prefix string) (string, error) {
return backend.Find(r.be, t, prefix)
return restic.Find(r.be, t, prefix)
}
// PrefixLength returns the number of bytes required so that all prefixes of
// all IDs of type t are unique.
func (r *Repository) PrefixLength(t restic.FileType) (int, error) {
return backend.PrefixLength(r.be, t)
return restic.PrefixLength(r.be, t)
}
// LoadAndDecrypt loads and decrypts data identified by t and id from the
@ -56,7 +60,7 @@ func (r *Repository) PrefixLength(t restic.FileType) (int, error) {
func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) {
debug.Log("Repo.Load", "load %v with id %v", t, id.Str())
h := restic.Handle{Type: t, Name: id.String()}
h := restic.Handle{FileType: t, Name: id.String()}
buf, err := backend.LoadAll(r.be, h, nil)
if err != nil {
debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
@ -112,7 +116,7 @@ func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by
}
// load blob from pack
h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()}
h := restic.Handle{FileType: restic.DataFile, Name: blob.PackID.String()}
ciphertextBuf := make([]byte, blob.Length)
n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
if err != nil {
@ -274,7 +278,7 @@ func (r *Repository) SaveUnpacked(t restic.FileType, p []byte) (id restic.ID, er
}
id = restic.Hash(ciphertext)
h := restic.Handle{Type: t, Name: id.String()}
h := restic.Handle{FileType: t, Name: id.String()}
err = r.be.Save(h, ciphertext)
if err != nil {
@ -309,13 +313,13 @@ func (r *Repository) Backend() restic.Backend {
}
// Index returns the currently used MasterIndex.
func (r *Repository) Index() *MasterIndex {
func (r *Repository) Index() restic.Index {
return r.idx
}
// SetIndex instructs the repository to use the given index.
func (r *Repository) SetIndex(i *MasterIndex) {
r.idx = i
func (r *Repository) SetIndex(i restic.Index) {
r.idx = i.(*MasterIndex)
}
// SaveIndex saves an index in the repository.
@ -423,7 +427,7 @@ func (r *Repository) SearchKey(password string, maxKeys int) error {
r.key = key.master
r.packerManager.key = key.master
r.keyName = key.Name()
r.Config, err = LoadConfig(r)
r.cfg, err = restic.LoadConfig(r)
return err
}
@ -438,7 +442,7 @@ func (r *Repository) Init(password string) error {
return errors.New("repository master key and config already initialized")
}
cfg, err := CreateConfig()
cfg, err := restic.CreateConfig()
if err != nil {
return err
}
@ -448,7 +452,7 @@ func (r *Repository) Init(password string) error {
// init creates a new master key with the supplied password and uses it to save
// the config into the repo.
func (r *Repository) init(password string, cfg Config) error {
func (r *Repository) init(password string, cfg restic.Config) error {
key, err := createMasterKey(r, password)
if err != nil {
return err
@ -457,7 +461,7 @@ func (r *Repository) init(password string, cfg Config) error {
r.key = key.master
r.packerManager.key = key.master
r.keyName = key.Name()
r.Config = cfg
r.cfg = cfg
_, err = r.SaveJSONUnpacked(restic.ConfigFile, cfg)
return err
}
@ -528,7 +532,7 @@ func (r *Repository) list(t restic.FileType, done <-chan struct{}, out chan<- re
// input channel closed, we're done
return
}
id, err = backend.ParseID(strID)
id, err = restic.ParseID(strID)
if err != nil {
// ignore invalid IDs
continue
@ -554,15 +558,15 @@ func (r *Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic
// ListPack returns the list of blobs saved in the pack id and the length of
// the file as stored in the backend.
func (r *Repository) ListPack(id restic.ID) ([]pack.Blob, int64, error) {
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
func (r *Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) {
h := restic.Handle{FileType: restic.DataFile, Name: id.String()}
blobInfo, err := r.Backend().Stat(h)
if err != nil {
return nil, 0, err
}
blobs, err := pack.List(r.Key(), backend.ReaderAt(r.Backend(), h), blobInfo.Size)
blobs, err := pack.List(r.Key(), restic.ReaderAt(r.Backend(), h), blobInfo.Size)
if err != nil {
return nil, 0, err
}

View file

@ -47,7 +47,7 @@ func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r *Repository,
r = New(be)
cfg := TestCreateConfig(t, testChunkerPol)
cfg := restic.TestCreateConfig(t, testChunkerPol)
err := r.init(TestPassword, cfg)
if err != nil {
t.Fatalf("TestRepository(): initialize repo failed: %v", err)

View file

@ -1,20 +1,18 @@
package restic
package restic_test
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"path/filepath"
"reflect"
"restic"
"sort"
"testing"
"time"
)
var updateGoldenFiles = flag.Bool("update", false, "update golden files in testdata/")
func parseTime(s string) time.Time {
func parseTimeUTC(s string) time.Time {
t, err := time.Parse("2006-01-02 15:04:05", s)
if err != nil {
panic(err)
@ -23,29 +21,29 @@ func parseTime(s string) time.Time {
return t.UTC()
}
var testFilterSnapshots = Snapshots{
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-01 01:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 01:03:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-03 07:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 07:08:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 10:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 11:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:24:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:28:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:30:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 16:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-05 09:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-06 08:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-07 10:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "root", Time: parseTime("2016-01-08 20:02:03"), Paths: []string{"/usr", "/sbin"}},
{Hostname: "foo", Username: "root", Time: parseTime("2016-01-09 21:02:03"), Paths: []string{"/usr", "/sbin"}},
{Hostname: "bar", Username: "root", Time: parseTime("2016-01-12 21:02:03"), Paths: []string{"/usr", "/sbin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-12 21:08:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-18 12:02:03"), Paths: []string{"/usr", "/bin"}},
var testFilterSnapshots = restic.Snapshots{
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-01 01:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "bar", Username: "testuser", Time: parseTimeUTC("2016-01-01 01:03:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-03 07:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "bar", Username: "testuser", Time: parseTimeUTC("2016-01-01 07:08:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 10:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 11:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:24:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:28:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:30:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 16:23:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-05 09:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-06 08:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-07 10:02:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "root", Time: parseTimeUTC("2016-01-08 20:02:03"), Paths: []string{"/usr", "/sbin"}},
{Hostname: "foo", Username: "root", Time: parseTimeUTC("2016-01-09 21:02:03"), Paths: []string{"/usr", "/sbin"}},
{Hostname: "bar", Username: "root", Time: parseTimeUTC("2016-01-12 21:02:03"), Paths: []string{"/usr", "/sbin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-12 21:08:03"), Paths: []string{"/usr", "/bin"}},
{Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-18 12:02:03"), Paths: []string{"/usr", "/bin"}},
}
var filterTests = []SnapshotFilter{
var filterTests = []restic.SnapshotFilter{
{Hostname: "foo"},
{Username: "root"},
{Hostname: "foo", Username: "root"},
@ -58,7 +56,7 @@ func TestFilterSnapshots(t *testing.T) {
sort.Sort(testFilterSnapshots)
for i, f := range filterTests {
res := FilterSnapshots(testFilterSnapshots, f)
res := restic.FilterSnapshots(testFilterSnapshots, f)
goldenFilename := filepath.Join("testdata", fmt.Sprintf("filter_snapshots_%d", i))
@ -79,7 +77,7 @@ func TestFilterSnapshots(t *testing.T) {
continue
}
var want Snapshots
var want restic.Snapshots
err = json.Unmarshal(buf, &want)
if !reflect.DeepEqual(res, want) {
@ -89,109 +87,109 @@ func TestFilterSnapshots(t *testing.T) {
}
}
var testExpireSnapshots = Snapshots{
{Time: parseTime("2014-09-01 10:20:30")},
{Time: parseTime("2014-09-02 10:20:30")},
{Time: parseTime("2014-09-05 10:20:30")},
{Time: parseTime("2014-09-06 10:20:30")},
{Time: parseTime("2014-09-08 10:20:30")},
{Time: parseTime("2014-09-09 10:20:30")},
{Time: parseTime("2014-09-10 10:20:30")},
{Time: parseTime("2014-09-11 10:20:30")},
{Time: parseTime("2014-09-20 10:20:30")},
{Time: parseTime("2014-09-22 10:20:30")},
{Time: parseTime("2014-08-08 10:20:30")},
{Time: parseTime("2014-08-10 10:20:30")},
{Time: parseTime("2014-08-12 10:20:30")},
{Time: parseTime("2014-08-13 10:20:30")},
{Time: parseTime("2014-08-13 10:20:30")},
{Time: parseTime("2014-08-15 10:20:30")},
{Time: parseTime("2014-08-18 10:20:30")},
{Time: parseTime("2014-08-20 10:20:30")},
{Time: parseTime("2014-08-21 10:20:30")},
{Time: parseTime("2014-08-22 10:20:30")},
{Time: parseTime("2014-10-01 10:20:30")},
{Time: parseTime("2014-10-02 10:20:30")},
{Time: parseTime("2014-10-05 10:20:30")},
{Time: parseTime("2014-10-06 10:20:30")},
{Time: parseTime("2014-10-08 10:20:30")},
{Time: parseTime("2014-10-09 10:20:30")},
{Time: parseTime("2014-10-10 10:20:30")},
{Time: parseTime("2014-10-11 10:20:30")},
{Time: parseTime("2014-10-20 10:20:30")},
{Time: parseTime("2014-10-22 10:20:30")},
{Time: parseTime("2014-11-08 10:20:30")},
{Time: parseTime("2014-11-10 10:20:30")},
{Time: parseTime("2014-11-12 10:20:30")},
{Time: parseTime("2014-11-13 10:20:30")},
{Time: parseTime("2014-11-13 10:20:30")},
{Time: parseTime("2014-11-15 10:20:30")},
{Time: parseTime("2014-11-18 10:20:30")},
{Time: parseTime("2014-11-20 10:20:30")},
{Time: parseTime("2014-11-21 10:20:30")},
{Time: parseTime("2014-11-22 10:20:30")},
{Time: parseTime("2015-09-01 10:20:30")},
{Time: parseTime("2015-09-02 10:20:30")},
{Time: parseTime("2015-09-05 10:20:30")},
{Time: parseTime("2015-09-06 10:20:30")},
{Time: parseTime("2015-09-08 10:20:30")},
{Time: parseTime("2015-09-09 10:20:30")},
{Time: parseTime("2015-09-10 10:20:30")},
{Time: parseTime("2015-09-11 10:20:30")},
{Time: parseTime("2015-09-20 10:20:30")},
{Time: parseTime("2015-09-22 10:20:30")},
{Time: parseTime("2015-08-08 10:20:30")},
{Time: parseTime("2015-08-10 10:20:30")},
{Time: parseTime("2015-08-12 10:20:30")},
{Time: parseTime("2015-08-13 10:20:30")},
{Time: parseTime("2015-08-13 10:20:30")},
{Time: parseTime("2015-08-15 10:20:30")},
{Time: parseTime("2015-08-18 10:20:30")},
{Time: parseTime("2015-08-20 10:20:30")},
{Time: parseTime("2015-08-21 10:20:30")},
{Time: parseTime("2015-08-22 10:20:30")},
{Time: parseTime("2015-10-01 10:20:30")},
{Time: parseTime("2015-10-02 10:20:30")},
{Time: parseTime("2015-10-05 10:20:30")},
{Time: parseTime("2015-10-06 10:20:30")},
{Time: parseTime("2015-10-08 10:20:30")},
{Time: parseTime("2015-10-09 10:20:30")},
{Time: parseTime("2015-10-10 10:20:30")},
{Time: parseTime("2015-10-11 10:20:30")},
{Time: parseTime("2015-10-20 10:20:30")},
{Time: parseTime("2015-10-22 10:20:30")},
{Time: parseTime("2015-11-08 10:20:30")},
{Time: parseTime("2015-11-10 10:20:30")},
{Time: parseTime("2015-11-12 10:20:30")},
{Time: parseTime("2015-11-13 10:20:30")},
{Time: parseTime("2015-11-13 10:20:30")},
{Time: parseTime("2015-11-15 10:20:30")},
{Time: parseTime("2015-11-18 10:20:30")},
{Time: parseTime("2015-11-20 10:20:30")},
{Time: parseTime("2015-11-21 10:20:30")},
{Time: parseTime("2015-11-22 10:20:30")},
{Time: parseTime("2016-01-01 01:02:03")},
{Time: parseTime("2016-01-01 01:03:03")},
{Time: parseTime("2016-01-01 07:08:03")},
{Time: parseTime("2016-01-03 07:02:03")},
{Time: parseTime("2016-01-04 10:23:03")},
{Time: parseTime("2016-01-04 11:23:03")},
{Time: parseTime("2016-01-04 12:23:03")},
{Time: parseTime("2016-01-04 12:24:03")},
{Time: parseTime("2016-01-04 12:28:03")},
{Time: parseTime("2016-01-04 12:30:03")},
{Time: parseTime("2016-01-04 16:23:03")},
{Time: parseTime("2016-01-05 09:02:03")},
{Time: parseTime("2016-01-06 08:02:03")},
{Time: parseTime("2016-01-07 10:02:03")},
{Time: parseTime("2016-01-08 20:02:03")},
{Time: parseTime("2016-01-09 21:02:03")},
{Time: parseTime("2016-01-12 21:02:03")},
{Time: parseTime("2016-01-12 21:08:03")},
{Time: parseTime("2016-01-18 12:02:03")},
var testExpireSnapshots = restic.Snapshots{
{Time: parseTimeUTC("2014-09-01 10:20:30")},
{Time: parseTimeUTC("2014-09-02 10:20:30")},
{Time: parseTimeUTC("2014-09-05 10:20:30")},
{Time: parseTimeUTC("2014-09-06 10:20:30")},
{Time: parseTimeUTC("2014-09-08 10:20:30")},
{Time: parseTimeUTC("2014-09-09 10:20:30")},
{Time: parseTimeUTC("2014-09-10 10:20:30")},
{Time: parseTimeUTC("2014-09-11 10:20:30")},
{Time: parseTimeUTC("2014-09-20 10:20:30")},
{Time: parseTimeUTC("2014-09-22 10:20:30")},
{Time: parseTimeUTC("2014-08-08 10:20:30")},
{Time: parseTimeUTC("2014-08-10 10:20:30")},
{Time: parseTimeUTC("2014-08-12 10:20:30")},
{Time: parseTimeUTC("2014-08-13 10:20:30")},
{Time: parseTimeUTC("2014-08-13 10:20:30")},
{Time: parseTimeUTC("2014-08-15 10:20:30")},
{Time: parseTimeUTC("2014-08-18 10:20:30")},
{Time: parseTimeUTC("2014-08-20 10:20:30")},
{Time: parseTimeUTC("2014-08-21 10:20:30")},
{Time: parseTimeUTC("2014-08-22 10:20:30")},
{Time: parseTimeUTC("2014-10-01 10:20:30")},
{Time: parseTimeUTC("2014-10-02 10:20:30")},
{Time: parseTimeUTC("2014-10-05 10:20:30")},
{Time: parseTimeUTC("2014-10-06 10:20:30")},
{Time: parseTimeUTC("2014-10-08 10:20:30")},
{Time: parseTimeUTC("2014-10-09 10:20:30")},
{Time: parseTimeUTC("2014-10-10 10:20:30")},
{Time: parseTimeUTC("2014-10-11 10:20:30")},
{Time: parseTimeUTC("2014-10-20 10:20:30")},
{Time: parseTimeUTC("2014-10-22 10:20:30")},
{Time: parseTimeUTC("2014-11-08 10:20:30")},
{Time: parseTimeUTC("2014-11-10 10:20:30")},
{Time: parseTimeUTC("2014-11-12 10:20:30")},
{Time: parseTimeUTC("2014-11-13 10:20:30")},
{Time: parseTimeUTC("2014-11-13 10:20:30")},
{Time: parseTimeUTC("2014-11-15 10:20:30")},
{Time: parseTimeUTC("2014-11-18 10:20:30")},
{Time: parseTimeUTC("2014-11-20 10:20:30")},
{Time: parseTimeUTC("2014-11-21 10:20:30")},
{Time: parseTimeUTC("2014-11-22 10:20:30")},
{Time: parseTimeUTC("2015-09-01 10:20:30")},
{Time: parseTimeUTC("2015-09-02 10:20:30")},
{Time: parseTimeUTC("2015-09-05 10:20:30")},
{Time: parseTimeUTC("2015-09-06 10:20:30")},
{Time: parseTimeUTC("2015-09-08 10:20:30")},
{Time: parseTimeUTC("2015-09-09 10:20:30")},
{Time: parseTimeUTC("2015-09-10 10:20:30")},
{Time: parseTimeUTC("2015-09-11 10:20:30")},
{Time: parseTimeUTC("2015-09-20 10:20:30")},
{Time: parseTimeUTC("2015-09-22 10:20:30")},
{Time: parseTimeUTC("2015-08-08 10:20:30")},
{Time: parseTimeUTC("2015-08-10 10:20:30")},
{Time: parseTimeUTC("2015-08-12 10:20:30")},
{Time: parseTimeUTC("2015-08-13 10:20:30")},
{Time: parseTimeUTC("2015-08-13 10:20:30")},
{Time: parseTimeUTC("2015-08-15 10:20:30")},
{Time: parseTimeUTC("2015-08-18 10:20:30")},
{Time: parseTimeUTC("2015-08-20 10:20:30")},
{Time: parseTimeUTC("2015-08-21 10:20:30")},
{Time: parseTimeUTC("2015-08-22 10:20:30")},
{Time: parseTimeUTC("2015-10-01 10:20:30")},
{Time: parseTimeUTC("2015-10-02 10:20:30")},
{Time: parseTimeUTC("2015-10-05 10:20:30")},
{Time: parseTimeUTC("2015-10-06 10:20:30")},
{Time: parseTimeUTC("2015-10-08 10:20:30")},
{Time: parseTimeUTC("2015-10-09 10:20:30")},
{Time: parseTimeUTC("2015-10-10 10:20:30")},
{Time: parseTimeUTC("2015-10-11 10:20:30")},
{Time: parseTimeUTC("2015-10-20 10:20:30")},
{Time: parseTimeUTC("2015-10-22 10:20:30")},
{Time: parseTimeUTC("2015-11-08 10:20:30")},
{Time: parseTimeUTC("2015-11-10 10:20:30")},
{Time: parseTimeUTC("2015-11-12 10:20:30")},
{Time: parseTimeUTC("2015-11-13 10:20:30")},
{Time: parseTimeUTC("2015-11-13 10:20:30")},
{Time: parseTimeUTC("2015-11-15 10:20:30")},
{Time: parseTimeUTC("2015-11-18 10:20:30")},
{Time: parseTimeUTC("2015-11-20 10:20:30")},
{Time: parseTimeUTC("2015-11-21 10:20:30")},
{Time: parseTimeUTC("2015-11-22 10:20:30")},
{Time: parseTimeUTC("2016-01-01 01:02:03")},
{Time: parseTimeUTC("2016-01-01 01:03:03")},
{Time: parseTimeUTC("2016-01-01 07:08:03")},
{Time: parseTimeUTC("2016-01-03 07:02:03")},
{Time: parseTimeUTC("2016-01-04 10:23:03")},
{Time: parseTimeUTC("2016-01-04 11:23:03")},
{Time: parseTimeUTC("2016-01-04 12:23:03")},
{Time: parseTimeUTC("2016-01-04 12:24:03")},
{Time: parseTimeUTC("2016-01-04 12:28:03")},
{Time: parseTimeUTC("2016-01-04 12:30:03")},
{Time: parseTimeUTC("2016-01-04 16:23:03")},
{Time: parseTimeUTC("2016-01-05 09:02:03")},
{Time: parseTimeUTC("2016-01-06 08:02:03")},
{Time: parseTimeUTC("2016-01-07 10:02:03")},
{Time: parseTimeUTC("2016-01-08 20:02:03")},
{Time: parseTimeUTC("2016-01-09 21:02:03")},
{Time: parseTimeUTC("2016-01-12 21:02:03")},
{Time: parseTimeUTC("2016-01-12 21:08:03")},
{Time: parseTimeUTC("2016-01-18 12:02:03")},
}
var expireTests = []ExpirePolicy{
var expireTests = []restic.ExpirePolicy{
{},
{Last: 10},
{Last: 15},
@ -214,7 +212,7 @@ var expireTests = []ExpirePolicy{
func TestApplyPolicy(t *testing.T) {
for i, p := range expireTests {
keep, remove := ApplyPolicy(testExpireSnapshots, p)
keep, remove := restic.ApplyPolicy(testExpireSnapshots, p)
t.Logf("test %d: returned keep %v, remove %v (of %v) expired snapshots for policy %v",
i, len(keep), len(remove), len(testExpireSnapshots), p)
@ -255,7 +253,7 @@ func TestApplyPolicy(t *testing.T) {
continue
}
var want Snapshots
var want restic.Snapshots
err = json.Unmarshal(buf, &want)
if !reflect.DeepEqual(keep, want) {

View file

@ -8,7 +8,7 @@ import (
"testing"
"restic"
"restic/backend"
"restic/archiver"
"restic/backend/local"
"restic/repository"
)
@ -83,8 +83,8 @@ func TeardownRepo(repo *repository.Repository) {
}
}
func SnapshotDir(t testing.TB, repo *repository.Repository, path string, parent *backend.ID) *restic.Snapshot {
arch := restic.NewArchiver(repo)
func SnapshotDir(t testing.TB, repo *repository.Repository, path string, parent *restic.ID) *restic.Snapshot {
arch := archiver.New(repo)
sn, _, err := arch.Snapshot(nil, []string{path}, parent)
OK(t, err)
return sn

View file

@ -29,7 +29,7 @@ type fakeFileSystem struct {
// IDs is returned.
func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) {
blobs = IDs{}
ch := chunker.New(rd, fs.repo.Config().ChunkerPolynomial())
ch := chunker.New(rd, fs.repo.Config().ChunkerPolynomial)
for {
chunk, err := ch.Next(getBuf())

View file

@ -97,7 +97,7 @@ func TestLoadTree(t *testing.T) {
// save tree
tree := restic.NewTree()
id, err := repo.SaveJSON(TreeBlob, tree)
id, err := repo.SaveJSON(restic.TreeBlob, tree)
OK(t, err)
// save packs

View file

@ -1,20 +1,21 @@
package types
import (
"restic"
"restic/backend"
"restic/pack"
)
// Repository manages encrypted and packed data stored in a backend.
type Repository interface {
LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error
SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error)
LoadJSONUnpacked(restic.FileType, backend.ID, interface{}) error
SaveJSONUnpacked(restic.FileType, interface{}) (backend.ID, error)
Lister
}
// Lister combines lists packs in a repo and blobs in a pack.
type Lister interface {
List(backend.Type, <-chan struct{}) <-chan backend.ID
List(restic.FileType, <-chan struct{}) <-chan backend.ID
ListPack(backend.ID) ([]pack.Blob, int64, error)
}

View file

@ -8,7 +8,7 @@ import (
"time"
"restic"
"restic/backend"
"restic/archiver"
"restic/pipe"
"restic/repository"
. "restic/test"
@ -22,7 +22,7 @@ func TestWalkTree(t *testing.T) {
OK(t, err)
// archive a few files
arch := restic.NewArchiver(repo)
arch := archiver.New(repo)
sn, _, err := arch.Snapshot(nil, dirs, nil)
OK(t, err)
@ -94,7 +94,7 @@ type delayRepo struct {
delay time.Duration
}
func (d delayRepo) LoadJSONPack(t BlobType, id backend.ID, dst interface{}) error {
func (d delayRepo) LoadJSONPack(t restic.BlobType, id restic.ID, dst interface{}) error {
time.Sleep(d.delay)
return d.repo.LoadJSONPack(t, id, dst)
}
@ -1344,7 +1344,7 @@ func TestDelayedWalkTree(t *testing.T) {
repo := OpenLocalRepo(t, repodir)
OK(t, repo.LoadIndex())
root, err := backend.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da")
root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da")
OK(t, err)
dr := delayRepo{repo, 100 * time.Millisecond}
@ -1373,7 +1373,7 @@ func BenchmarkDelayedWalkTree(t *testing.B) {
repo := OpenLocalRepo(t, repodir)
OK(t, repo.LoadIndex())
root, err := backend.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da")
root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da")
OK(t, err)
dr := delayRepo{repo, 10 * time.Millisecond}