Merge pull request #600 from restic/restructure

WIP: restructure code
This commit is contained in:
Alexander Neumann 2016-09-04 15:36:26 +02:00
commit 1dd9a58e5a
146 changed files with 2829 additions and 3196 deletions

View file

@ -6,14 +6,14 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"restic" "restic"
"restic/backend" "restic/archiver"
"restic/debug" "restic/debug"
"restic/filter" "restic/filter"
"restic/fs" "restic/fs"
"strings" "strings"
"time" "time"
"github.com/pkg/errors" "restic/errors"
"golang.org/x/crypto/ssh/terminal" "golang.org/x/crypto/ssh/terminal"
) )
@ -232,7 +232,7 @@ func filterExisting(items []string) (result []string, err error) {
} }
if len(result) == 0 { if len(result) == 0 {
return nil, restic.Fatal("all target directories/files do not exist") return nil, errors.Fatal("all target directories/files do not exist")
} }
return return
@ -240,7 +240,7 @@ func filterExisting(items []string) (result []string, err error) {
func (cmd CmdBackup) readFromStdin(args []string) error { func (cmd CmdBackup) readFromStdin(args []string) error {
if len(args) != 0 { if len(args) != 0 {
return restic.Fatalf("when reading from stdin, no additional files can be specified") return errors.Fatalf("when reading from stdin, no additional files can be specified")
} }
repo, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
@ -259,7 +259,7 @@ func (cmd CmdBackup) readFromStdin(args []string) error {
return err return err
} }
_, id, err := restic.ArchiveReader(repo, cmd.newArchiveStdinProgress(), os.Stdin, cmd.StdinFilename) _, id, err := archiver.ArchiveReader(repo, cmd.newArchiveStdinProgress(), os.Stdin, cmd.StdinFilename)
if err != nil { if err != nil {
return err return err
} }
@ -274,7 +274,7 @@ func (cmd CmdBackup) Execute(args []string) error {
} }
if len(args) == 0 { if len(args) == 0 {
return restic.Fatalf("wrong number of parameters, Usage: %s", cmd.Usage()) return errors.Fatalf("wrong number of parameters, Usage: %s", cmd.Usage())
} }
target := make([]string, 0, len(args)) target := make([]string, 0, len(args))
@ -306,13 +306,13 @@ func (cmd CmdBackup) Execute(args []string) error {
return err return err
} }
var parentSnapshotID *backend.ID var parentSnapshotID *restic.ID
// Force using a parent // Force using a parent
if !cmd.Force && cmd.Parent != "" { if !cmd.Force && cmd.Parent != "" {
id, err := restic.FindSnapshot(repo, cmd.Parent) id, err := restic.FindSnapshot(repo, cmd.Parent)
if err != nil { if err != nil {
return restic.Fatalf("invalid id %q: %v", cmd.Parent, err) return errors.Fatalf("invalid id %q: %v", cmd.Parent, err)
} }
parentSnapshotID = &id parentSnapshotID = &id
@ -365,12 +365,12 @@ func (cmd CmdBackup) Execute(args []string) error {
return !matched return !matched
} }
stat, err := restic.Scan(target, selectFilter, cmd.newScanProgress()) stat, err := archiver.Scan(target, selectFilter, cmd.newScanProgress())
if err != nil { if err != nil {
return err return err
} }
arch := restic.NewArchiver(repo) arch := archiver.New(repo)
arch.Excludes = cmd.Excludes arch.Excludes = cmd.Excludes
arch.SelectFilter = selectFilter arch.SelectFilter = selectFilter

View file

@ -1,52 +0,0 @@
package main
import (
"fmt"
"restic"
)
type CmdCache struct {
global *GlobalOptions
}
func init() {
_, err := parser.AddCommand("cache",
"manage cache",
"The cache command creates and manages the local cache",
&CmdCache{global: &globalOpts})
if err != nil {
panic(err)
}
}
func (cmd CmdCache) Usage() string {
return "[update|clear]"
}
func (cmd CmdCache) Execute(args []string) error {
repo, err := cmd.global.OpenRepository()
if err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
cache, err := restic.NewCache(repo, cmd.global.CacheDir)
if err != nil {
return err
}
fmt.Printf("clear cache for old snapshots\n")
err = cache.Clear(repo)
if err != nil {
return err
}
fmt.Printf("done\n")
return nil
}

View file

@ -8,7 +8,7 @@ import (
"restic" "restic"
"restic/backend" "restic/backend"
"restic/debug" "restic/debug"
"restic/pack" "restic/errors"
"restic/repository" "restic/repository"
) )
@ -32,7 +32,7 @@ func (cmd CmdCat) Usage() string {
func (cmd CmdCat) Execute(args []string) error { func (cmd CmdCat) Execute(args []string) error {
if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) { if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) {
return restic.Fatalf("type or ID not specified, Usage: %s", cmd.Usage()) return errors.Fatalf("type or ID not specified, Usage: %s", cmd.Usage())
} }
repo, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
@ -48,12 +48,12 @@ func (cmd CmdCat) Execute(args []string) error {
tpe := args[0] tpe := args[0]
var id backend.ID var id restic.ID
if tpe != "masterkey" && tpe != "config" { if tpe != "masterkey" && tpe != "config" {
id, err = backend.ParseID(args[1]) id, err = restic.ParseID(args[1])
if err != nil { if err != nil {
if tpe != "snapshot" { if tpe != "snapshot" {
return restic.Fatalf("unable to parse ID: %v\n", err) return errors.Fatalf("unable to parse ID: %v\n", err)
} }
// find snapshot id with prefix // find snapshot id with prefix
@ -67,7 +67,7 @@ func (cmd CmdCat) Execute(args []string) error {
// handle all types that don't need an index // handle all types that don't need an index
switch tpe { switch tpe {
case "config": case "config":
buf, err := json.MarshalIndent(repo.Config, "", " ") buf, err := json.MarshalIndent(repo.Config(), "", " ")
if err != nil { if err != nil {
return err return err
} }
@ -75,7 +75,7 @@ func (cmd CmdCat) Execute(args []string) error {
fmt.Println(string(buf)) fmt.Println(string(buf))
return nil return nil
case "index": case "index":
buf, err := repo.LoadAndDecrypt(backend.Index, id) buf, err := repo.LoadAndDecrypt(restic.IndexFile, id)
if err != nil { if err != nil {
return err return err
} }
@ -85,7 +85,7 @@ func (cmd CmdCat) Execute(args []string) error {
case "snapshot": case "snapshot":
sn := &restic.Snapshot{} sn := &restic.Snapshot{}
err = repo.LoadJSONUnpacked(backend.Snapshot, id, sn) err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, sn)
if err != nil { if err != nil {
return err return err
} }
@ -99,7 +99,7 @@ func (cmd CmdCat) Execute(args []string) error {
return nil return nil
case "key": case "key":
h := backend.Handle{Type: backend.Key, Name: id.String()} h := restic.Handle{Type: restic.KeyFile, Name: id.String()}
buf, err := backend.LoadAll(repo.Backend(), h, nil) buf, err := backend.LoadAll(repo.Backend(), h, nil)
if err != nil { if err != nil {
return err return err
@ -150,13 +150,13 @@ func (cmd CmdCat) Execute(args []string) error {
switch tpe { switch tpe {
case "pack": case "pack":
h := backend.Handle{Type: backend.Data, Name: id.String()} h := restic.Handle{Type: restic.DataFile, Name: id.String()}
buf, err := backend.LoadAll(repo.Backend(), h, nil) buf, err := backend.LoadAll(repo.Backend(), h, nil)
if err != nil { if err != nil {
return err return err
} }
hash := backend.Hash(buf) hash := restic.Hash(buf)
if !hash.Equal(id) { if !hash.Equal(id) {
fmt.Fprintf(cmd.global.stderr, "Warning: hash of data does not match ID, want\n %v\ngot:\n %v\n", id.String(), hash.String()) fmt.Fprintf(cmd.global.stderr, "Warning: hash of data does not match ID, want\n %v\ngot:\n %v\n", id.String(), hash.String())
} }
@ -165,7 +165,7 @@ func (cmd CmdCat) Execute(args []string) error {
return err return err
case "blob": case "blob":
for _, t := range []pack.BlobType{pack.Data, pack.Tree} { for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
list, err := repo.Index().Lookup(id, t) list, err := repo.Index().Lookup(id, t)
if err != nil { if err != nil {
continue continue
@ -173,21 +173,21 @@ func (cmd CmdCat) Execute(args []string) error {
blob := list[0] blob := list[0]
buf := make([]byte, blob.Length) buf := make([]byte, blob.Length)
data, err := repo.LoadBlob(id, t, buf) n, err := repo.LoadBlob(restic.DataBlob, id, buf)
if err != nil { if err != nil {
return err return err
} }
buf = buf[:n]
_, err = os.Stdout.Write(data) _, err = os.Stdout.Write(buf)
return err return err
} }
return restic.Fatal("blob not found") return errors.Fatal("blob not found")
case "tree": case "tree":
debug.Log("cat", "cat tree %v", id.Str()) debug.Log("cat", "cat tree %v", id.Str())
tree := restic.NewTree() tree, err := repo.LoadTree(id)
err = repo.LoadJSONPack(pack.Tree, id, tree)
if err != nil { if err != nil {
debug.Log("cat", "unable to load tree %v: %v", id.Str(), err) debug.Log("cat", "unable to load tree %v: %v", id.Str(), err)
return err return err
@ -203,6 +203,6 @@ func (cmd CmdCat) Execute(args []string) error {
return nil return nil
default: default:
return restic.Fatal("invalid type") return errors.Fatal("invalid type")
} }
} }

View file

@ -9,6 +9,7 @@ import (
"restic" "restic"
"restic/checker" "restic/checker"
"restic/errors"
) )
type CmdCheck struct { type CmdCheck struct {
@ -65,7 +66,7 @@ func (cmd CmdCheck) newReadProgress(todo restic.Stat) *restic.Progress {
func (cmd CmdCheck) Execute(args []string) error { func (cmd CmdCheck) Execute(args []string) error {
if len(args) != 0 { if len(args) != 0 {
return restic.Fatal("check has no arguments") return errors.Fatal("check has no arguments")
} }
repo, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
@ -103,7 +104,7 @@ func (cmd CmdCheck) Execute(args []string) error {
for _, err := range errs { for _, err := range errs {
cmd.global.Warnf("error: %v\n", err) cmd.global.Warnf("error: %v\n", err)
} }
return restic.Fatal("LoadIndex returned errors") return errors.Fatal("LoadIndex returned errors")
} }
done := make(chan struct{}) done := make(chan struct{})
@ -158,7 +159,7 @@ func (cmd CmdCheck) Execute(args []string) error {
} }
if errorsFound { if errorsFound {
return restic.Fatal("repository contains errors") return errors.Fatal("repository contains errors")
} }
return nil return nil
} }

View file

@ -9,7 +9,7 @@ import (
"os" "os"
"restic" "restic"
"restic/backend" "restic/errors"
"restic/pack" "restic/pack"
"restic/repository" "restic/repository"
@ -50,7 +50,7 @@ func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
for id := range repo.List(backend.Snapshot, done) { for id := range repo.List(restic.SnapshotFile, done) {
snapshot, err := restic.LoadSnapshot(repo, id) snapshot, err := restic.LoadSnapshot(repo, id)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err) fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err)
@ -68,37 +68,6 @@ func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error {
return nil return nil
} }
func printTrees(repo *repository.Repository, wr io.Writer) error {
done := make(chan struct{})
defer close(done)
trees := []backend.ID{}
for _, idx := range repo.Index().All() {
for blob := range idx.Each(nil) {
if blob.Type != pack.Tree {
continue
}
trees = append(trees, blob.ID)
}
}
for _, id := range trees {
tree, err := restic.LoadTree(repo, id)
if err != nil {
fmt.Fprintf(os.Stderr, "LoadTree(%v): %v", id.Str(), err)
continue
}
fmt.Fprintf(wr, "tree_id: %v\n", id)
prettyPrintJSON(wr, tree)
}
return nil
}
const dumpPackWorkers = 10 const dumpPackWorkers = 10
// Pack is the struct used in printPacks. // Pack is the struct used in printPacks.
@ -110,9 +79,9 @@ type Pack struct {
// Blob is the struct used in printPacks. // Blob is the struct used in printPacks.
type Blob struct { type Blob struct {
Type pack.BlobType `json:"type"` Type restic.BlobType `json:"type"`
Length uint `json:"length"` Length uint `json:"length"`
ID backend.ID `json:"id"` ID restic.ID `json:"id"`
Offset uint `json:"offset"` Offset uint `json:"offset"`
} }
@ -123,14 +92,14 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { f := func(job worker.Job, done <-chan struct{}) (interface{}, error) {
name := job.Data.(string) name := job.Data.(string)
h := backend.Handle{Type: backend.Data, Name: name} h := restic.Handle{Type: restic.DataFile, Name: name}
blobInfo, err := repo.Backend().Stat(h) blobInfo, err := repo.Backend().Stat(h)
if err != nil { if err != nil {
return nil, err return nil, err
} }
blobs, err := pack.List(repo.Key(), backend.ReaderAt(repo.Backend(), h), blobInfo.Size) blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), blobInfo.Size)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -143,7 +112,7 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
wp := worker.New(dumpPackWorkers, f, jobCh, resCh) wp := worker.New(dumpPackWorkers, f, jobCh, resCh)
go func() { go func() {
for name := range repo.Backend().List(backend.Data, done) { for name := range repo.Backend().List(restic.DataFile, done) {
jobCh <- worker.Job{Data: name} jobCh <- worker.Job{Data: name}
} }
close(jobCh) close(jobCh)
@ -157,7 +126,7 @@ func printPacks(repo *repository.Repository, wr io.Writer) error {
continue continue
} }
entries := job.Result.([]pack.Blob) entries := job.Result.([]restic.Blob)
p := Pack{ p := Pack{
Name: name, Name: name,
Blobs: make([]Blob, len(entries)), Blobs: make([]Blob, len(entries)),
@ -183,7 +152,7 @@ func (cmd CmdDump) DumpIndexes() error {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
for id := range cmd.repo.List(backend.Index, done) { for id := range cmd.repo.List(restic.IndexFile, done) {
fmt.Printf("index_id: %v\n", id) fmt.Printf("index_id: %v\n", id)
idx, err := repository.LoadIndex(cmd.repo, id) idx, err := repository.LoadIndex(cmd.repo, id)
@ -202,7 +171,7 @@ func (cmd CmdDump) DumpIndexes() error {
func (cmd CmdDump) Execute(args []string) error { func (cmd CmdDump) Execute(args []string) error {
if len(args) != 1 { if len(args) != 1 {
return restic.Fatalf("type not specified, Usage: %s", cmd.Usage()) return errors.Fatalf("type not specified, Usage: %s", cmd.Usage())
} }
repo, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
@ -229,8 +198,6 @@ func (cmd CmdDump) Execute(args []string) error {
return cmd.DumpIndexes() return cmd.DumpIndexes()
case "snapshots": case "snapshots":
return debugPrintSnapshots(repo, os.Stdout) return debugPrintSnapshots(repo, os.Stdout)
case "trees":
return printTrees(repo, os.Stdout)
case "packs": case "packs":
return printPacks(repo, os.Stdout) return printPacks(repo, os.Stdout)
case "all": case "all":
@ -240,13 +207,6 @@ func (cmd CmdDump) Execute(args []string) error {
return err return err
} }
fmt.Printf("\ntrees:\n")
err = printTrees(repo, os.Stdout)
if err != nil {
return err
}
fmt.Printf("\nindexes:\n") fmt.Printf("\nindexes:\n")
err = cmd.DumpIndexes() err = cmd.DumpIndexes()
if err != nil { if err != nil {
@ -255,6 +215,6 @@ func (cmd CmdDump) Execute(args []string) error {
return nil return nil
default: default:
return restic.Fatalf("no such type %q", tpe) return errors.Fatalf("no such type %q", tpe)
} }
} }

View file

@ -5,8 +5,8 @@ import (
"time" "time"
"restic" "restic"
"restic/backend"
"restic/debug" "restic/debug"
"restic/errors"
"restic/repository" "restic/repository"
) )
@ -56,12 +56,12 @@ func parseTime(str string) (time.Time, error) {
} }
} }
return time.Time{}, restic.Fatalf("unable to parse time: %q", str) return time.Time{}, errors.Fatalf("unable to parse time: %q", str)
} }
func (c CmdFind) findInTree(repo *repository.Repository, id backend.ID, path string) ([]findResult, error) { func (c CmdFind) findInTree(repo *repository.Repository, id restic.ID, path string) ([]findResult, error) {
debug.Log("restic.find", "checking tree %v\n", id) debug.Log("restic.find", "checking tree %v\n", id)
tree, err := restic.LoadTree(repo, id) tree, err := repo.LoadTree(id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -105,7 +105,7 @@ func (c CmdFind) findInTree(repo *repository.Repository, id backend.ID, path str
return results, nil return results, nil
} }
func (c CmdFind) findInSnapshot(repo *repository.Repository, id backend.ID) error { func (c CmdFind) findInSnapshot(repo *repository.Repository, id restic.ID) error {
debug.Log("restic.find", "searching in snapshot %s\n for entries within [%s %s]", id.Str(), c.oldest, c.newest) debug.Log("restic.find", "searching in snapshot %s\n for entries within [%s %s]", id.Str(), c.oldest, c.newest)
sn, err := restic.LoadSnapshot(repo, id) sn, err := restic.LoadSnapshot(repo, id)
@ -136,7 +136,7 @@ func (CmdFind) Usage() string {
func (c CmdFind) Execute(args []string) error { func (c CmdFind) Execute(args []string) error {
if len(args) != 1 { if len(args) != 1 {
return restic.Fatalf("wrong number of arguments, Usage: %s", c.Usage()) return errors.Fatalf("wrong number of arguments, Usage: %s", c.Usage())
} }
var err error var err error
@ -176,7 +176,7 @@ func (c CmdFind) Execute(args []string) error {
if c.Snapshot != "" { if c.Snapshot != "" {
snapshotID, err := restic.FindSnapshot(repo, c.Snapshot) snapshotID, err := restic.FindSnapshot(repo, c.Snapshot)
if err != nil { if err != nil {
return restic.Fatalf("invalid id %q: %v", args[1], err) return errors.Fatalf("invalid id %q: %v", args[1], err)
} }
return c.findInSnapshot(repo, snapshotID) return c.findInSnapshot(repo, snapshotID)
@ -184,7 +184,7 @@ func (c CmdFind) Execute(args []string) error {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
for snapshotID := range repo.List(backend.Snapshot, done) { for snapshotID := range repo.List(restic.SnapshotFile, done) {
err := c.findInSnapshot(repo, snapshotID) err := c.findInSnapshot(repo, snapshotID)
if err != nil { if err != nil {

View file

@ -4,7 +4,6 @@ import (
"fmt" "fmt"
"io" "io"
"restic" "restic"
"restic/backend"
"strings" "strings"
) )
@ -93,7 +92,7 @@ func (cmd CmdForget) Execute(args []string) error {
} }
if !cmd.DryRun { if !cmd.DryRun {
err = repo.Backend().Remove(backend.Snapshot, id.String()) err = repo.Backend().Remove(restic.SnapshotFile, id.String())
if err != nil { if err != nil {
return err return err
} }
@ -156,7 +155,7 @@ func (cmd CmdForget) Execute(args []string) error {
if !cmd.DryRun { if !cmd.DryRun {
for _, sn := range remove { for _, sn := range remove {
err = repo.Backend().Remove(backend.Snapshot, sn.ID().String()) err = repo.Backend().Remove(restic.SnapshotFile, sn.ID().String())
if err != nil { if err != nil {
return err return err
} }

View file

@ -1,7 +1,7 @@
package main package main
import ( import (
"restic" "restic/errors"
"restic/repository" "restic/repository"
) )
@ -11,7 +11,7 @@ type CmdInit struct {
func (cmd CmdInit) Execute(args []string) error { func (cmd CmdInit) Execute(args []string) error {
if cmd.global.Repo == "" { if cmd.global.Repo == "" {
return restic.Fatal("Please specify repository location (-r)") return errors.Fatal("Please specify repository location (-r)")
} }
be, err := create(cmd.global.Repo) be, err := create(cmd.global.Repo)
@ -32,7 +32,7 @@ func (cmd CmdInit) Execute(args []string) error {
cmd.global.Exitf(1, "creating key in backend at %s failed: %v\n", cmd.global.Repo, err) cmd.global.Exitf(1, "creating key in backend at %s failed: %v\n", cmd.global.Repo, err)
} }
cmd.global.Verbosef("created restic backend %v at %s\n", s.Config.ID[:10], cmd.global.Repo) cmd.global.Verbosef("created restic backend %v at %s\n", s.Config().ID[:10], cmd.global.Repo)
cmd.global.Verbosef("\n") cmd.global.Verbosef("\n")
cmd.global.Verbosef("Please note that knowledge of your password is required to access\n") cmd.global.Verbosef("Please note that knowledge of your password is required to access\n")
cmd.global.Verbosef("the repository. Losing your password means that your data is\n") cmd.global.Verbosef("the repository. Losing your password means that your data is\n")

View file

@ -4,7 +4,7 @@ import (
"fmt" "fmt"
"restic" "restic"
"restic/backend" "restic/errors"
"restic/repository" "restic/repository"
) )
@ -28,7 +28,7 @@ func (cmd CmdKey) listKeys(s *repository.Repository) error {
tab.Header = fmt.Sprintf(" %-10s %-10s %-10s %s", "ID", "User", "Host", "Created") tab.Header = fmt.Sprintf(" %-10s %-10s %-10s %s", "ID", "User", "Host", "Created")
tab.RowFormat = "%s%-10s %-10s %-10s %s" tab.RowFormat = "%s%-10s %-10s %-10s %s"
plen, err := s.PrefixLength(backend.Key) plen, err := s.PrefixLength(restic.KeyFile)
if err != nil { if err != nil {
return err return err
} }
@ -36,7 +36,7 @@ func (cmd CmdKey) listKeys(s *repository.Repository) error {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
for id := range s.List(backend.Key, done) { for id := range s.List(restic.KeyFile, done) {
k, err := repository.LoadKey(s, id.String()) k, err := repository.LoadKey(s, id.String())
if err != nil { if err != nil {
cmd.global.Warnf("LoadKey() failed: %v\n", err) cmd.global.Warnf("LoadKey() failed: %v\n", err)
@ -69,7 +69,7 @@ func (cmd CmdKey) getNewPassword() string {
func (cmd CmdKey) addKey(repo *repository.Repository) error { func (cmd CmdKey) addKey(repo *repository.Repository) error {
id, err := repository.AddKey(repo, cmd.getNewPassword(), repo.Key()) id, err := repository.AddKey(repo, cmd.getNewPassword(), repo.Key())
if err != nil { if err != nil {
return restic.Fatalf("creating new key failed: %v\n", err) return errors.Fatalf("creating new key failed: %v\n", err)
} }
cmd.global.Verbosef("saved new key as %s\n", id) cmd.global.Verbosef("saved new key as %s\n", id)
@ -79,10 +79,10 @@ func (cmd CmdKey) addKey(repo *repository.Repository) error {
func (cmd CmdKey) deleteKey(repo *repository.Repository, name string) error { func (cmd CmdKey) deleteKey(repo *repository.Repository, name string) error {
if name == repo.KeyName() { if name == repo.KeyName() {
return restic.Fatal("refusing to remove key currently used to access repository") return errors.Fatal("refusing to remove key currently used to access repository")
} }
err := repo.Backend().Remove(backend.Key, name) err := repo.Backend().Remove(restic.KeyFile, name)
if err != nil { if err != nil {
return err return err
} }
@ -94,10 +94,10 @@ func (cmd CmdKey) deleteKey(repo *repository.Repository, name string) error {
func (cmd CmdKey) changePassword(repo *repository.Repository) error { func (cmd CmdKey) changePassword(repo *repository.Repository) error {
id, err := repository.AddKey(repo, cmd.getNewPassword(), repo.Key()) id, err := repository.AddKey(repo, cmd.getNewPassword(), repo.Key())
if err != nil { if err != nil {
return restic.Fatalf("creating new key failed: %v\n", err) return errors.Fatalf("creating new key failed: %v\n", err)
} }
err = repo.Backend().Remove(backend.Key, repo.KeyName()) err = repo.Backend().Remove(restic.KeyFile, repo.KeyName())
if err != nil { if err != nil {
return err return err
} }
@ -113,7 +113,7 @@ func (cmd CmdKey) Usage() string {
func (cmd CmdKey) Execute(args []string) error { func (cmd CmdKey) Execute(args []string) error {
if len(args) < 1 || (args[0] == "rm" && len(args) != 2) { if len(args) < 1 || (args[0] == "rm" && len(args) != 2) {
return restic.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) return errors.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage())
} }
repo, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
@ -145,7 +145,7 @@ func (cmd CmdKey) Execute(args []string) error {
return err return err
} }
id, err := backend.Find(repo.Backend(), backend.Key, args[1]) id, err := restic.Find(repo.Backend(), restic.KeyFile, args[1])
if err != nil { if err != nil {
return err return err
} }

View file

@ -2,7 +2,7 @@ package main
import ( import (
"restic" "restic"
"restic/backend" "restic/errors"
) )
type CmdList struct { type CmdList struct {
@ -25,7 +25,7 @@ func (cmd CmdList) Usage() string {
func (cmd CmdList) Execute(args []string) error { func (cmd CmdList) Execute(args []string) error {
if len(args) != 1 { if len(args) != 1 {
return restic.Fatalf("type not specified, Usage: %s", cmd.Usage()) return errors.Fatalf("type not specified, Usage: %s", cmd.Usage())
} }
repo, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
@ -41,33 +41,20 @@ func (cmd CmdList) Execute(args []string) error {
} }
} }
var t backend.Type var t restic.FileType
switch args[0] { switch args[0] {
case "blobs":
err = repo.LoadIndex()
if err != nil {
return err
}
for _, idx := range repo.Index().All() {
for blob := range idx.Each(nil) {
cmd.global.Printf("%s\n", blob.ID)
}
}
return nil
case "packs": case "packs":
t = backend.Data t = restic.DataFile
case "index": case "index":
t = backend.Index t = restic.IndexFile
case "snapshots": case "snapshots":
t = backend.Snapshot t = restic.SnapshotFile
case "keys": case "keys":
t = backend.Key t = restic.KeyFile
case "locks": case "locks":
t = backend.Lock t = restic.LockFile
default: default:
return restic.Fatal("invalid type") return errors.Fatal("invalid type")
} }
for id := range repo.List(t, nil) { for id := range repo.List(t, nil) {

View file

@ -6,7 +6,7 @@ import (
"path/filepath" "path/filepath"
"restic" "restic"
"restic/backend" "restic/errors"
"restic/repository" "restic/repository"
) )
@ -46,8 +46,8 @@ func (cmd CmdLs) printNode(prefix string, n *restic.Node) string {
} }
} }
func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id backend.ID) error { func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id restic.ID) error {
tree, err := restic.LoadTree(repo, id) tree, err := repo.LoadTree(id)
if err != nil { if err != nil {
return err return err
} }
@ -72,7 +72,7 @@ func (cmd CmdLs) Usage() string {
func (cmd CmdLs) Execute(args []string) error { func (cmd CmdLs) Execute(args []string) error {
if len(args) < 1 || len(args) > 2 { if len(args) < 1 || len(args) > 2 {
return restic.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) return errors.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage())
} }
repo, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()

View file

@ -5,9 +5,8 @@ package main
import ( import (
"os" "os"
"restic"
"github.com/pkg/errors" "restic/errors"
resticfs "restic/fs" resticfs "restic/fs"
"restic/fuse" "restic/fuse"
@ -44,7 +43,7 @@ func (cmd CmdMount) Usage() string {
func (cmd CmdMount) Execute(args []string) error { func (cmd CmdMount) Execute(args []string) error {
if len(args) == 0 { if len(args) == 0 {
return restic.Fatalf("wrong number of parameters, Usage: %s", cmd.Usage()) return errors.Fatalf("wrong number of parameters, Usage: %s", cmd.Usage())
} }
repo, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()

View file

@ -4,10 +4,9 @@ import (
"fmt" "fmt"
"os" "os"
"restic" "restic"
"restic/backend"
"restic/debug" "restic/debug"
"restic/errors"
"restic/index" "restic/index"
"restic/pack"
"restic/repository" "restic/repository"
"time" "time"
@ -94,7 +93,7 @@ func (cmd CmdPrune) Execute(args []string) error {
} }
cmd.global.Verbosef("counting files in repo\n") cmd.global.Verbosef("counting files in repo\n")
for _ = range repo.List(backend.Data, done) { for _ = range repo.List(restic.DataFile, done) {
stats.packs++ stats.packs++
} }
@ -112,7 +111,7 @@ func (cmd CmdPrune) Execute(args []string) error {
cmd.global.Verbosef("repository contains %v packs (%v blobs) with %v bytes\n", cmd.global.Verbosef("repository contains %v packs (%v blobs) with %v bytes\n",
len(idx.Packs), len(idx.Blobs), formatBytes(uint64(stats.bytes))) len(idx.Packs), len(idx.Blobs), formatBytes(uint64(stats.bytes)))
blobCount := make(map[pack.Handle]int) blobCount := make(map[restic.BlobHandle]int)
duplicateBlobs := 0 duplicateBlobs := 0
duplicateBytes := 0 duplicateBytes := 0
@ -120,7 +119,7 @@ func (cmd CmdPrune) Execute(args []string) error {
for _, p := range idx.Packs { for _, p := range idx.Packs {
for _, entry := range p.Entries { for _, entry := range p.Entries {
stats.blobs++ stats.blobs++
h := pack.Handle{ID: entry.ID, Type: entry.Type} h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
blobCount[h]++ blobCount[h]++
if blobCount[h] > 1 { if blobCount[h] > 1 {
@ -144,8 +143,8 @@ func (cmd CmdPrune) Execute(args []string) error {
cmd.global.Verbosef("find data that is still in use for %d snapshots\n", stats.snapshots) cmd.global.Verbosef("find data that is still in use for %d snapshots\n", stats.snapshots)
usedBlobs := pack.NewBlobSet() usedBlobs := restic.NewBlobSet()
seenBlobs := pack.NewBlobSet() seenBlobs := restic.NewBlobSet()
bar = newProgressMax(cmd.global.ShowProgress(), uint64(len(snapshots)), "snapshots") bar = newProgressMax(cmd.global.ShowProgress(), uint64(len(snapshots)), "snapshots")
bar.Start() bar.Start()
@ -165,7 +164,7 @@ func (cmd CmdPrune) Execute(args []string) error {
cmd.global.Verbosef("found %d of %d data blobs still in use\n", len(usedBlobs), stats.blobs) cmd.global.Verbosef("found %d of %d data blobs still in use\n", len(usedBlobs), stats.blobs)
// find packs that need a rewrite // find packs that need a rewrite
rewritePacks := backend.NewIDSet() rewritePacks := restic.NewIDSet()
for h, blob := range idx.Blobs { for h, blob := range idx.Blobs {
if !usedBlobs.Has(h) { if !usedBlobs.Has(h) {
rewritePacks.Merge(blob.Packs) rewritePacks.Merge(blob.Packs)
@ -178,11 +177,11 @@ func (cmd CmdPrune) Execute(args []string) error {
} }
// find packs that are unneeded // find packs that are unneeded
removePacks := backend.NewIDSet() removePacks := restic.NewIDSet()
nextPack: nextPack:
for packID, p := range idx.Packs { for packID, p := range idx.Packs {
for _, blob := range p.Entries { for _, blob := range p.Entries {
h := pack.Handle{ID: blob.ID, Type: blob.Type} h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
if usedBlobs.Has(h) { if usedBlobs.Has(h) {
continue nextPack continue nextPack
} }
@ -191,7 +190,7 @@ nextPack:
removePacks.Insert(packID) removePacks.Insert(packID)
if !rewritePacks.Has(packID) { if !rewritePacks.Has(packID) {
return restic.Fatalf("pack %v is unneeded, but not contained in rewritePacks", packID.Str()) return errors.Fatalf("pack %v is unneeded, but not contained in rewritePacks", packID.Str())
} }
rewritePacks.Delete(packID) rewritePacks.Delete(packID)
@ -205,7 +204,7 @@ nextPack:
} }
for packID := range removePacks { for packID := range removePacks {
err = repo.Backend().Remove(backend.Data, packID.String()) err = repo.Backend().Remove(restic.DataFile, packID.String())
if err != nil { if err != nil {
cmd.global.Warnf("unable to remove file %v from the repository\n", packID.Str()) cmd.global.Warnf("unable to remove file %v from the repository\n", packID.Str())
} }
@ -214,7 +213,7 @@ nextPack:
cmd.global.Verbosef("creating new index\n") cmd.global.Verbosef("creating new index\n")
stats.packs = 0 stats.packs = 0
for _ = range repo.List(backend.Data, done) { for _ = range repo.List(restic.DataFile, done) {
stats.packs++ stats.packs++
} }
bar = newProgressMax(cmd.global.ShowProgress(), uint64(stats.packs), "packs") bar = newProgressMax(cmd.global.ShowProgress(), uint64(stats.packs), "packs")
@ -223,9 +222,9 @@ nextPack:
return err return err
} }
var supersedes backend.IDs var supersedes restic.IDs
for idxID := range repo.List(backend.Index, done) { for idxID := range repo.List(restic.IndexFile, done) {
err := repo.Backend().Remove(backend.Index, idxID.String()) err := repo.Backend().Remove(restic.IndexFile, idxID.String())
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", idxID.Str(), err) fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", idxID.Str(), err)
} }

View file

@ -2,8 +2,8 @@ package main
import ( import (
"restic" "restic"
"restic/backend"
"restic/debug" "restic/debug"
"restic/errors"
"restic/filter" "restic/filter"
) )
@ -33,15 +33,15 @@ func (cmd CmdRestore) Usage() string {
func (cmd CmdRestore) Execute(args []string) error { func (cmd CmdRestore) Execute(args []string) error {
if len(args) != 1 { if len(args) != 1 {
return restic.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) return errors.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage())
} }
if cmd.Target == "" { if cmd.Target == "" {
return restic.Fatal("please specify a directory to restore to (--target)") return errors.Fatal("please specify a directory to restore to (--target)")
} }
if len(cmd.Exclude) > 0 && len(cmd.Include) > 0 { if len(cmd.Exclude) > 0 && len(cmd.Include) > 0 {
return restic.Fatal("exclude and include patterns are mutually exclusive") return errors.Fatal("exclude and include patterns are mutually exclusive")
} }
snapshotIDString := args[0] snapshotIDString := args[0]
@ -66,7 +66,7 @@ func (cmd CmdRestore) Execute(args []string) error {
return err return err
} }
var id backend.ID var id restic.ID
if snapshotIDString == "latest" { if snapshotIDString == "latest" {
id, err = restic.FindLatestSnapshot(repo, cmd.Paths, cmd.Host) id, err = restic.FindLatestSnapshot(repo, cmd.Paths, cmd.Host)

View file

@ -5,11 +5,11 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"restic/errors"
"sort" "sort"
"strings" "strings"
"restic" "restic"
"restic/backend"
) )
type Table struct { type Table struct {
@ -70,7 +70,7 @@ func (cmd CmdSnapshots) Usage() string {
func (cmd CmdSnapshots) Execute(args []string) error { func (cmd CmdSnapshots) Execute(args []string) error {
if len(args) != 0 { if len(args) != 0 {
return restic.Fatalf("wrong number of arguments, usage: %s", cmd.Usage()) return errors.Fatalf("wrong number of arguments, usage: %s", cmd.Usage())
} }
repo, err := cmd.global.OpenRepository() repo, err := cmd.global.OpenRepository()
@ -92,7 +92,7 @@ func (cmd CmdSnapshots) Execute(args []string) error {
defer close(done) defer close(done)
list := []*restic.Snapshot{} list := []*restic.Snapshot{}
for id := range repo.List(backend.Snapshot, done) { for id := range repo.List(restic.SnapshotFile, done) {
sn, err := restic.LoadSnapshot(repo, id) sn, err := restic.LoadSnapshot(repo, id)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "error loading snapshot %s: %v\n", id, err) fmt.Fprintf(os.Stderr, "error loading snapshot %s: %v\n", id, err)
@ -115,7 +115,7 @@ func (cmd CmdSnapshots) Execute(args []string) error {
} }
plen, err := repo.PrefixLength(backend.Snapshot) plen, err := repo.PrefixLength(restic.SnapshotFile)
if err != nil { if err != nil {
return err return err
} }

View file

@ -9,7 +9,6 @@ import (
"strings" "strings"
"syscall" "syscall"
"restic/backend"
"restic/backend/local" "restic/backend/local"
"restic/backend/rest" "restic/backend/rest"
"restic/backend/s3" "restic/backend/s3"
@ -18,8 +17,9 @@ import (
"restic/location" "restic/location"
"restic/repository" "restic/repository"
"restic/errors"
"github.com/jessevdk/go-flags" "github.com/jessevdk/go-flags"
"github.com/pkg/errors"
"golang.org/x/crypto/ssh/terminal" "golang.org/x/crypto/ssh/terminal"
) )
@ -247,7 +247,7 @@ const maxKeys = 20
// OpenRepository reads the password and opens the repository. // OpenRepository reads the password and opens the repository.
func (o GlobalOptions) OpenRepository() (*repository.Repository, error) { func (o GlobalOptions) OpenRepository() (*repository.Repository, error) {
if o.Repo == "" { if o.Repo == "" {
return nil, restic.Fatal("Please specify repository location (-r)") return nil, errors.Fatal("Please specify repository location (-r)")
} }
be, err := open(o.Repo) be, err := open(o.Repo)
@ -263,14 +263,14 @@ func (o GlobalOptions) OpenRepository() (*repository.Repository, error) {
err = s.SearchKey(o.password, maxKeys) err = s.SearchKey(o.password, maxKeys)
if err != nil { if err != nil {
return nil, restic.Fatalf("unable to open repo: %v", err) return nil, errors.Fatalf("unable to open repo: %v", err)
} }
return s, nil return s, nil
} }
// Open the backend specified by a location config. // Open the backend specified by a location config.
func open(s string) (backend.Backend, error) { func open(s string) (restic.Backend, error) {
debug.Log("open", "parsing location %v", s) debug.Log("open", "parsing location %v", s)
loc, err := location.Parse(s) loc, err := location.Parse(s)
if err != nil { if err != nil {
@ -301,11 +301,11 @@ func open(s string) (backend.Backend, error) {
} }
debug.Log("open", "invalid repository location: %v", s) debug.Log("open", "invalid repository location: %v", s)
return nil, restic.Fatalf("invalid scheme %q", loc.Scheme) return nil, errors.Fatalf("invalid scheme %q", loc.Scheme)
} }
// Create the backend specified by URI. // Create the backend specified by URI.
func create(s string) (backend.Backend, error) { func create(s string) (restic.Backend, error) {
debug.Log("open", "parsing location %v", s) debug.Log("open", "parsing location %v", s)
loc, err := location.Parse(s) loc, err := location.Parse(s)
if err != nil { if err != nil {
@ -336,5 +336,5 @@ func create(s string) (backend.Backend, error) {
} }
debug.Log("open", "invalid repository scheme: %v", s) debug.Log("open", "invalid repository scheme: %v", s)
return nil, restic.Fatalf("invalid scheme %q", loc.Scheme) return nil, errors.Fatalf("invalid scheme %q", loc.Scheme)
} }

View file

@ -10,10 +10,9 @@ import (
"testing" "testing"
"time" "time"
"github.com/pkg/errors" "restic/errors"
"restic" "restic"
"restic/backend"
"restic/repository" "restic/repository"
. "restic/test" . "restic/test"
) )
@ -51,7 +50,7 @@ func waitForMount(dir string) error {
time.Sleep(mountSleep) time.Sleep(mountSleep)
} }
return restic.Fatalf("subdir %q of dir %s never appeared", mountTestSubdir, dir) return errors.Fatalf("subdir %q of dir %s never appeared", mountTestSubdir, dir)
} }
func cmdMount(t testing.TB, global GlobalOptions, dir string, ready, done chan struct{}) { func cmdMount(t testing.TB, global GlobalOptions, dir string, ready, done chan struct{}) {
@ -71,7 +70,7 @@ func TestMount(t *testing.T) {
t.Skip("Skipping fuse tests") t.Skip("Skipping fuse tests")
} }
checkSnapshots := func(repo *repository.Repository, mountpoint string, snapshotIDs []backend.ID) { checkSnapshots := func(repo *repository.Repository, mountpoint string, snapshotIDs []restic.ID) {
snapshotsDir, err := os.Open(filepath.Join(mountpoint, "snapshots")) snapshotsDir, err := os.Open(filepath.Join(mountpoint, "snapshots"))
OK(t, err) OK(t, err)
namesInSnapshots, err := snapshotsDir.Readdirnames(-1) namesInSnapshots, err := snapshotsDir.Readdirnames(-1)
@ -123,7 +122,7 @@ func TestMount(t *testing.T) {
Assert(t, len(names) == 1 && names[0] == "snapshots", `The fuse virtual directory "snapshots" doesn't exist`) Assert(t, len(names) == 1 && names[0] == "snapshots", `The fuse virtual directory "snapshots" doesn't exist`)
OK(t, mountpointDir.Close()) OK(t, mountpointDir.Close())
checkSnapshots(repo, mountpoint, []backend.ID{}) checkSnapshots(repo, mountpoint, []restic.ID{})
datafile := filepath.Join("testdata", "backup-data.tar.gz") datafile := filepath.Join("testdata", "backup-data.tar.gz")
fd, err := os.Open(datafile) fd, err := os.Open(datafile)

View file

@ -8,6 +8,7 @@ import (
"runtime" "runtime"
"testing" "testing"
"restic/repository"
. "restic/test" . "restic/test"
) )
@ -193,6 +194,8 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions))
t.Skip("integration tests disabled") t.Skip("integration tests disabled")
} }
repository.TestUseLowSecurityKDFParameters(t)
tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-")
OK(t, err) OK(t, err)

View file

@ -16,21 +16,20 @@ import (
"testing" "testing"
"time" "time"
"github.com/pkg/errors" "restic/errors"
"restic/backend"
"restic/debug" "restic/debug"
"restic/filter" "restic/filter"
"restic/repository" "restic/repository"
. "restic/test" . "restic/test"
) )
func parseIDsFromReader(t testing.TB, rd io.Reader) backend.IDs { func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
IDs := backend.IDs{} IDs := restic.IDs{}
sc := bufio.NewScanner(rd) sc := bufio.NewScanner(rd)
for sc.Scan() { for sc.Scan() {
id, err := backend.ParseID(sc.Text()) id, err := restic.ParseID(sc.Text())
if err != nil { if err != nil {
t.Logf("parse id %v: %v", sc.Text(), err) t.Logf("parse id %v: %v", sc.Text(), err)
continue continue
@ -44,6 +43,7 @@ func parseIDsFromReader(t testing.TB, rd io.Reader) backend.IDs {
func cmdInit(t testing.TB, global GlobalOptions) { func cmdInit(t testing.TB, global GlobalOptions) {
repository.TestUseLowSecurityKDFParameters(t) repository.TestUseLowSecurityKDFParameters(t)
restic.TestSetLockTimeout(t, 0)
cmd := &CmdInit{global: &global} cmd := &CmdInit{global: &global}
OK(t, cmd.Execute(nil)) OK(t, cmd.Execute(nil))
@ -51,11 +51,11 @@ func cmdInit(t testing.TB, global GlobalOptions) {
t.Logf("repository initialized at %v", global.Repo) t.Logf("repository initialized at %v", global.Repo)
} }
func cmdBackup(t testing.TB, global GlobalOptions, target []string, parentID *backend.ID) { func cmdBackup(t testing.TB, global GlobalOptions, target []string, parentID *restic.ID) {
cmdBackupExcludes(t, global, target, parentID, nil) cmdBackupExcludes(t, global, target, parentID, nil)
} }
func cmdBackupExcludes(t testing.TB, global GlobalOptions, target []string, parentID *backend.ID, excludes []string) { func cmdBackupExcludes(t testing.TB, global GlobalOptions, target []string, parentID *restic.ID, excludes []string) {
cmd := &CmdBackup{global: &global, Excludes: excludes} cmd := &CmdBackup{global: &global, Excludes: excludes}
if parentID != nil { if parentID != nil {
cmd.Parent = parentID.String() cmd.Parent = parentID.String()
@ -66,19 +66,19 @@ func cmdBackupExcludes(t testing.TB, global GlobalOptions, target []string, pare
OK(t, cmd.Execute(target)) OK(t, cmd.Execute(target))
} }
func cmdList(t testing.TB, global GlobalOptions, tpe string) backend.IDs { func cmdList(t testing.TB, global GlobalOptions, tpe string) restic.IDs {
cmd := &CmdList{global: &global} cmd := &CmdList{global: &global}
return executeAndParseIDs(t, cmd, tpe) return executeAndParseIDs(t, cmd, tpe)
} }
func executeAndParseIDs(t testing.TB, cmd *CmdList, args ...string) backend.IDs { func executeAndParseIDs(t testing.TB, cmd *CmdList, args ...string) restic.IDs {
buf := bytes.NewBuffer(nil) buf := bytes.NewBuffer(nil)
cmd.global.stdout = buf cmd.global.stdout = buf
OK(t, cmd.Execute(args)) OK(t, cmd.Execute(args))
return parseIDsFromReader(t, buf) return parseIDsFromReader(t, buf)
} }
func cmdRestore(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID) { func cmdRestore(t testing.TB, global GlobalOptions, dir string, snapshotID restic.ID) {
cmdRestoreExcludes(t, global, dir, snapshotID, nil) cmdRestoreExcludes(t, global, dir, snapshotID, nil)
} }
@ -87,12 +87,12 @@ func cmdRestoreLatest(t testing.TB, global GlobalOptions, dir string, paths []st
OK(t, cmd.Execute([]string{"latest"})) OK(t, cmd.Execute([]string{"latest"}))
} }
func cmdRestoreExcludes(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID, excludes []string) { func cmdRestoreExcludes(t testing.TB, global GlobalOptions, dir string, snapshotID restic.ID, excludes []string) {
cmd := &CmdRestore{global: &global, Target: dir, Exclude: excludes} cmd := &CmdRestore{global: &global, Target: dir, Exclude: excludes}
OK(t, cmd.Execute([]string{snapshotID.String()})) OK(t, cmd.Execute([]string{snapshotID.String()}))
} }
func cmdRestoreIncludes(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID, includes []string) { func cmdRestoreIncludes(t testing.TB, global GlobalOptions, dir string, snapshotID restic.ID, includes []string) {
cmd := &CmdRestore{global: &global, Target: dir, Include: includes} cmd := &CmdRestore{global: &global, Target: dir, Include: includes}
OK(t, cmd.Execute([]string{snapshotID.String()})) OK(t, cmd.Execute([]string{snapshotID.String()}))
} }
@ -582,7 +582,7 @@ func testFileSize(filename string, size int64) error {
} }
if fi.Size() != size { if fi.Size() != size {
return restic.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size()) return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size())
} }
return nil return nil
@ -811,11 +811,11 @@ func TestRebuildIndexAlwaysFull(t *testing.T) {
var optimizeTests = []struct { var optimizeTests = []struct {
testFilename string testFilename string
snapshots backend.IDSet snapshots restic.IDSet
}{ }{
{ {
filepath.Join("..", "..", "restic", "checker", "testdata", "checker-test-repo.tar.gz"), filepath.Join("..", "..", "restic", "checker", "testdata", "checker-test-repo.tar.gz"),
backend.NewIDSet(ParseID("a13c11e582b77a693dd75ab4e3a3ba96538a056594a4b9076e4cacebe6e06d43")), restic.NewIDSet(restic.TestParseID("a13c11e582b77a693dd75ab4e3a3ba96538a056594a4b9076e4cacebe6e06d43")),
}, },
{ {
filepath.Join("testdata", "old-index-repo.tar.gz"), filepath.Join("testdata", "old-index-repo.tar.gz"),
@ -823,9 +823,9 @@ var optimizeTests = []struct {
}, },
{ {
filepath.Join("testdata", "old-index-repo.tar.gz"), filepath.Join("testdata", "old-index-repo.tar.gz"),
backend.NewIDSet( restic.NewIDSet(
ParseID("f7d83db709977178c9d1a09e4009355e534cde1a135b8186b8b118a3fc4fcd41"), restic.TestParseID("f7d83db709977178c9d1a09e4009355e534cde1a135b8186b8b118a3fc4fcd41"),
ParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"), restic.TestParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"),
), ),
}, },
} }

View file

@ -7,8 +7,9 @@ import (
"restic/debug" "restic/debug"
"runtime" "runtime"
"restic/errors"
"github.com/jessevdk/go-flags" "github.com/jessevdk/go-flags"
"github.com/pkg/errors"
) )
func init() { func init() {
@ -42,7 +43,7 @@ func main() {
switch { switch {
case restic.IsAlreadyLocked(errors.Cause(err)): case restic.IsAlreadyLocked(errors.Cause(err)):
fmt.Fprintf(os.Stderr, "%v\nthe `unlock` command can be used to remove stale locks\n", err) fmt.Fprintf(os.Stderr, "%v\nthe `unlock` command can be used to remove stale locks\n", err)
case restic.IsFatal(errors.Cause(err)): case errors.IsFatal(errors.Cause(err)):
fmt.Fprintf(os.Stderr, "%v\n", err) fmt.Fprintf(os.Stderr, "%v\n", err)
case err != nil: case err != nil:
fmt.Fprintf(os.Stderr, "%+v\n", err) fmt.Fprintf(os.Stderr, "%+v\n", err)

View file

@ -1,123 +0,0 @@
package restic
import (
"encoding/json"
"io"
"restic/backend"
"restic/debug"
"restic/pack"
"restic/repository"
"time"
"github.com/pkg/errors"
"github.com/restic/chunker"
)
// saveTreeJSON stores a tree in the repository.
func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, error) {
data, err := json.Marshal(item)
if err != nil {
return backend.ID{}, errors.Wrap(err, "")
}
data = append(data, '\n')
// check if tree has been saved before
id := backend.Hash(data)
if repo.Index().Has(id, pack.Tree) {
return id, nil
}
return repo.SaveJSON(pack.Tree, item)
}
// ArchiveReader reads from the reader and archives the data. Returned is the
// resulting snapshot and its ID.
func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name string) (*Snapshot, backend.ID, error) {
debug.Log("ArchiveReader", "start archiving %s", name)
sn, err := NewSnapshot([]string{name})
if err != nil {
return nil, backend.ID{}, err
}
p.Start()
defer p.Done()
chnker := chunker.New(rd, repo.Config.ChunkerPolynomial)
var ids backend.IDs
var fileSize uint64
for {
chunk, err := chnker.Next(getBuf())
if errors.Cause(err) == io.EOF {
break
}
if err != nil {
return nil, backend.ID{}, errors.Wrap(err, "chunker.Next()")
}
id := backend.Hash(chunk.Data)
if !repo.Index().Has(id, pack.Data) {
_, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
if err != nil {
return nil, backend.ID{}, err
}
debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
} else {
debug.Log("ArchiveReader", "blob %v already saved in the repo\n", id.Str())
}
freeBuf(chunk.Data)
ids = append(ids, id)
p.Report(Stat{Bytes: uint64(chunk.Length)})
fileSize += uint64(chunk.Length)
}
tree := &Tree{
Nodes: []*Node{
&Node{
Name: name,
AccessTime: time.Now(),
ModTime: time.Now(),
Type: "file",
Mode: 0644,
Size: fileSize,
UID: sn.UID,
GID: sn.GID,
User: sn.Username,
Content: ids,
},
},
}
treeID, err := saveTreeJSON(repo, tree)
if err != nil {
return nil, backend.ID{}, err
}
sn.Tree = &treeID
debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())
id, err := repo.SaveJSONUnpacked(backend.Snapshot, sn)
if err != nil {
return nil, backend.ID{}, err
}
sn.id = &id
debug.Log("ArchiveReader", "snapshot saved as %v", id.Str())
err = repo.Flush()
if err != nil {
return nil, backend.ID{}, err
}
err = repo.SaveIndex()
if err != nil {
return nil, backend.ID{}, err
}
return sn, id, nil
}

View file

@ -0,0 +1,103 @@
package archiver
import (
"io"
"restic"
"restic/debug"
"time"
"restic/errors"
"github.com/restic/chunker"
)
// ArchiveReader reads from the reader and archives the data. Returned is the
// resulting snapshot and its ID.
func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, name string) (*restic.Snapshot, restic.ID, error) {
debug.Log("ArchiveReader", "start archiving %s", name)
sn, err := restic.NewSnapshot([]string{name})
if err != nil {
return nil, restic.ID{}, err
}
p.Start()
defer p.Done()
chnker := chunker.New(rd, repo.Config().ChunkerPolynomial)
var ids restic.IDs
var fileSize uint64
for {
chunk, err := chnker.Next(getBuf())
if errors.Cause(err) == io.EOF {
break
}
if err != nil {
return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()")
}
id := restic.Hash(chunk.Data)
if !repo.Index().Has(id, restic.DataBlob) {
_, err := repo.SaveBlob(restic.DataBlob, chunk.Data, id)
if err != nil {
return nil, restic.ID{}, err
}
debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
} else {
debug.Log("ArchiveReader", "blob %v already saved in the repo\n", id.Str())
}
freeBuf(chunk.Data)
ids = append(ids, id)
p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
fileSize += uint64(chunk.Length)
}
tree := &restic.Tree{
Nodes: []*restic.Node{
&restic.Node{
Name: name,
AccessTime: time.Now(),
ModTime: time.Now(),
Type: "file",
Mode: 0644,
Size: fileSize,
UID: sn.UID,
GID: sn.GID,
User: sn.Username,
Content: ids,
},
},
}
treeID, err := repo.SaveTree(tree)
if err != nil {
return nil, restic.ID{}, err
}
sn.Tree = &treeID
debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())
id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
if err != nil {
return nil, restic.ID{}, err
}
debug.Log("ArchiveReader", "snapshot saved as %v", id.Str())
err = repo.Flush()
if err != nil {
return nil, restic.ID{}, err
}
err = repo.SaveIndex()
if err != nil {
return nil, restic.ID{}, err
}
return sn, id, nil
}

View file

@ -1,28 +1,25 @@
package restic package archiver
import ( import (
"bytes" "bytes"
"io" "io"
"math/rand" "math/rand"
"restic/backend" "restic"
"restic/pack"
"restic/repository" "restic/repository"
"testing" "testing"
"github.com/restic/chunker"
) )
func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []byte) []byte { func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) int {
buf, err := repo.LoadBlob(id, pack.Data, buf) n, err := repo.LoadBlob(restic.DataBlob, id, buf)
if err != nil { if err != nil {
t.Fatalf("LoadBlob(%v) returned error %v", id, err) t.Fatalf("LoadBlob(%v) returned error %v", id, err)
} }
return buf return n
} }
func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID, name string, rd io.Reader) { func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name string, rd io.Reader) {
tree, err := LoadTree(repo, treeID) tree, err := repo.LoadTree(treeID)
if err != nil { if err != nil {
t.Fatalf("LoadTree() returned error %v", err) t.Fatalf("LoadTree() returned error %v", err)
} }
@ -41,12 +38,19 @@ func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID
} }
// check blobs // check blobs
buf := make([]byte, chunker.MaxSize)
buf2 := make([]byte, chunker.MaxSize)
for i, id := range node.Content { for i, id := range node.Content {
buf = loadBlob(t, repo, id, buf) size, err := repo.LookupBlobSize(id, restic.DataBlob)
if err != nil {
t.Fatal(err)
}
buf2 = buf2[:len(buf)] buf := make([]byte, int(size))
n := loadBlob(t, repo, id, buf)
if n != len(buf) {
t.Errorf("wrong number of bytes read, want %d, got %d", len(buf), n)
}
buf2 := make([]byte, int(size))
_, err = io.ReadFull(rd, buf2) _, err = io.ReadFull(rd, buf2)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -58,6 +62,11 @@ func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID
} }
} }
// fakeFile returns a reader which yields deterministic pseudo-random data.
func fakeFile(t testing.TB, seed, size int64) io.Reader {
return io.LimitReader(restic.NewRandReader(rand.New(rand.NewSource(seed))), size)
}
func TestArchiveReader(t *testing.T) { func TestArchiveReader(t *testing.T) {
repo, cleanup := repository.TestRepository(t) repo, cleanup := repository.TestRepository(t)
defer cleanup() defer cleanup()

View file

@ -1,4 +1,4 @@
package restic package archiver
import ( import (
"encoding/json" "encoding/json"
@ -6,18 +6,17 @@ import (
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"restic"
"sort" "sort"
"sync" "sync"
"time" "time"
"github.com/pkg/errors" "restic/errors"
"restic/walk"
"restic/backend"
"restic/debug" "restic/debug"
"restic/fs" "restic/fs"
"restic/pack"
"restic/pipe" "restic/pipe"
"restic/repository"
"github.com/restic/chunker" "github.com/restic/chunker"
) )
@ -32,9 +31,9 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
// Archiver is used to backup a set of directories. // Archiver is used to backup a set of directories.
type Archiver struct { type Archiver struct {
repo *repository.Repository repo restic.Repository
knownBlobs struct { knownBlobs struct {
backend.IDSet restic.IDSet
sync.Mutex sync.Mutex
} }
@ -45,16 +44,16 @@ type Archiver struct {
Excludes []string Excludes []string
} }
// NewArchiver returns a new archiver. // New returns a new archiver.
func NewArchiver(repo *repository.Repository) *Archiver { func New(repo restic.Repository) *Archiver {
arch := &Archiver{ arch := &Archiver{
repo: repo, repo: repo,
blobToken: make(chan struct{}, maxConcurrentBlobs), blobToken: make(chan struct{}, maxConcurrentBlobs),
knownBlobs: struct { knownBlobs: struct {
backend.IDSet restic.IDSet
sync.Mutex sync.Mutex
}{ }{
IDSet: backend.NewIDSet(), IDSet: restic.NewIDSet(),
}, },
} }
@ -72,7 +71,7 @@ func NewArchiver(repo *repository.Repository) *Archiver {
// When the blob is not known, false is returned and the blob is added to the // When the blob is not known, false is returned and the blob is added to the
// list. This means that the caller false is returned to is responsible to save // list. This means that the caller false is returned to is responsible to save
// the blob to the backend. // the blob to the backend.
func (arch *Archiver) isKnownBlob(id backend.ID, t pack.BlobType) bool { func (arch *Archiver) isKnownBlob(id restic.ID, t restic.BlobType) bool {
arch.knownBlobs.Lock() arch.knownBlobs.Lock()
defer arch.knownBlobs.Unlock() defer arch.knownBlobs.Unlock()
@ -91,15 +90,15 @@ func (arch *Archiver) isKnownBlob(id backend.ID, t pack.BlobType) bool {
} }
// Save stores a blob read from rd in the repository. // Save stores a blob read from rd in the repository.
func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error { func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error {
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str()) debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
if arch.isKnownBlob(id, pack.Data) { if arch.isKnownBlob(id, restic.DataBlob) {
debug.Log("Archiver.Save", "blob %v is known\n", id.Str()) debug.Log("Archiver.Save", "blob %v is known\n", id.Str())
return nil return nil
} }
_, err := arch.repo.SaveAndEncrypt(t, data, &id) _, err := arch.repo.SaveBlob(t, data, id)
if err != nil { if err != nil {
debug.Log("Archiver.Save", "Save(%v, %v): error %v\n", t, id.Str(), err) debug.Log("Archiver.Save", "Save(%v, %v): error %v\n", t, id.Str(), err)
return err return err
@ -110,40 +109,40 @@ func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error {
} }
// SaveTreeJSON stores a tree in the repository. // SaveTreeJSON stores a tree in the repository.
func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) { func (arch *Archiver) SaveTreeJSON(item interface{}) (restic.ID, error) {
data, err := json.Marshal(item) data, err := json.Marshal(item)
if err != nil { if err != nil {
return backend.ID{}, errors.Wrap(err, "Marshal") return restic.ID{}, errors.Wrap(err, "Marshal")
} }
data = append(data, '\n') data = append(data, '\n')
// check if tree has been saved before // check if tree has been saved before
id := backend.Hash(data) id := restic.Hash(data)
if arch.isKnownBlob(id, pack.Tree) { if arch.isKnownBlob(id, restic.TreeBlob) {
return id, nil return id, nil
} }
return arch.repo.SaveJSON(pack.Tree, item) return arch.repo.SaveBlob(restic.TreeBlob, data, id)
} }
func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, error) { func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*restic.Node, error) {
fi, err := file.Stat() fi, err := file.Stat()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Stat") return nil, errors.Wrap(err, "restic.Stat")
} }
if fi.ModTime() == node.ModTime { if fi.ModTime() == node.ModTime {
return node, nil return node, nil
} }
err = arch.Error(node.path, fi, errors.New("file has changed")) err = arch.Error(node.Path, fi, errors.New("file has changed"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
node, err = NodeFromFileInfo(node.path, fi) node, err = restic.NodeFromFileInfo(node.Path, fi)
if err != nil { if err != nil {
debug.Log("Archiver.SaveFile", "NodeFromFileInfo returned error for %v: %v", node.path, err) debug.Log("Archiver.SaveFile", "restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
return nil, err return nil, err
} }
@ -151,21 +150,21 @@ func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, erro
} }
type saveResult struct { type saveResult struct {
id backend.ID id restic.ID
bytes uint64 bytes uint64
} }
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) { func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
defer freeBuf(chunk.Data) defer freeBuf(chunk.Data)
id := backend.Hash(chunk.Data) id := restic.Hash(chunk.Data)
err := arch.Save(pack.Data, chunk.Data, id) err := arch.Save(restic.DataBlob, chunk.Data, id)
// TODO handle error // TODO handle error
if err != nil { if err != nil {
panic(err) panic(err)
} }
p.Report(Stat{Bytes: uint64(chunk.Length)}) p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
arch.blobToken <- token arch.blobToken <- token
resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)} resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)}
} }
@ -184,11 +183,11 @@ func waitForResults(resultChannels [](<-chan saveResult)) ([]saveResult, error)
return results, nil return results, nil
} }
func updateNodeContent(node *Node, results []saveResult) error { func updateNodeContent(node *restic.Node, results []saveResult) error {
debug.Log("Archiver.Save", "checking size for file %s", node.path) debug.Log("Archiver.Save", "checking size for file %s", node.Path)
var bytes uint64 var bytes uint64
node.Content = make([]backend.ID, len(results)) node.Content = make([]restic.ID, len(results))
for i, b := range results { for i, b := range results {
node.Content[i] = b.id node.Content[i] = b.id
@ -198,18 +197,18 @@ func updateNodeContent(node *Node, results []saveResult) error {
} }
if bytes != node.Size { if bytes != node.Size {
return errors.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.path, bytes, node.Size) return errors.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.Path, bytes, node.Size)
} }
debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.path, len(results)) debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.Path, len(results))
return nil return nil
} }
// SaveFile stores the content of the file on the backend as a Blob by calling // SaveFile stores the content of the file on the backend as a Blob by calling
// Save for each chunk. // Save for each chunk.
func (arch *Archiver) SaveFile(p *Progress, node *Node) error { func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) error {
file, err := fs.Open(node.path) file, err := fs.Open(node.Path)
defer file.Close() defer file.Close()
if err != nil { if err != nil {
return errors.Wrap(err, "Open") return errors.Wrap(err, "Open")
@ -220,7 +219,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
return err return err
} }
chnker := chunker.New(file, arch.repo.Config.ChunkerPolynomial) chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial)
resultChannels := [](<-chan saveResult){} resultChannels := [](<-chan saveResult){}
for { for {
@ -247,7 +246,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
return err return err
} }
func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, entCh <-chan pipe.Entry) { func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, entCh <-chan pipe.Entry) {
defer func() { defer func() {
debug.Log("Archiver.fileWorker", "done") debug.Log("Archiver.fileWorker", "done")
wg.Done() wg.Done()
@ -269,16 +268,16 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error()) fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error())
// ignore this file // ignore this file
e.Result() <- nil e.Result() <- nil
p.Report(Stat{Errors: 1}) p.Report(restic.Stat{Errors: 1})
continue continue
} }
node, err := NodeFromFileInfo(e.Fullpath(), e.Info()) node, err := restic.NodeFromFileInfo(e.Fullpath(), e.Info())
if err != nil { if err != nil {
// TODO: integrate error reporting // TODO: integrate error reporting
debug.Log("Archiver.fileWorker", "NodeFromFileInfo returned error for %v: %v", node.path, err) debug.Log("Archiver.fileWorker", "restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
e.Result() <- nil e.Result() <- nil
p.Report(Stat{Errors: 1}) p.Report(restic.Stat{Errors: 1})
continue continue
} }
@ -286,12 +285,12 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
if e.Node != nil { if e.Node != nil {
debug.Log("Archiver.fileWorker", " %v use old data", e.Path()) debug.Log("Archiver.fileWorker", " %v use old data", e.Path())
oldNode := e.Node.(*Node) oldNode := e.Node.(*restic.Node)
// check if all content is still available in the repository // check if all content is still available in the repository
contentMissing := false contentMissing := false
for _, blob := range oldNode.blobs { for _, blob := range oldNode.Content {
if ok, err := arch.repo.Backend().Test(backend.Data, blob.Storage.String()); !ok || err != nil { if !arch.repo.Index().Has(blob, restic.DataBlob) {
debug.Log("Archiver.fileWorker", " %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str()) debug.Log("Archiver.fileWorker", " %v not using old data, %v is missing", e.Path(), blob.Str())
contentMissing = true contentMissing = true
break break
} }
@ -299,7 +298,6 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
if !contentMissing { if !contentMissing {
node.Content = oldNode.Content node.Content = oldNode.Content
node.blobs = oldNode.blobs
debug.Log("Archiver.fileWorker", " %v content is complete", e.Path()) debug.Log("Archiver.fileWorker", " %v content is complete", e.Path())
} }
} else { } else {
@ -312,20 +310,20 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
err = arch.SaveFile(p, node) err = arch.SaveFile(p, node)
if err != nil { if err != nil {
// TODO: integrate error reporting // TODO: integrate error reporting
fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.path, err) fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.Path, err)
// ignore this file // ignore this file
e.Result() <- nil e.Result() <- nil
p.Report(Stat{Errors: 1}) p.Report(restic.Stat{Errors: 1})
continue continue
} }
} else { } else {
// report old data size // report old data size
p.Report(Stat{Bytes: node.Size}) p.Report(restic.Stat{Bytes: node.Size})
} }
debug.Log("Archiver.fileWorker", " processed %v, %d/%d blobs", e.Path(), len(node.Content), len(node.blobs)) debug.Log("Archiver.fileWorker", " processed %v, %d blobs", e.Path(), len(node.Content))
e.Result() <- node e.Result() <- node
p.Report(Stat{Files: 1}) p.Report(restic.Stat{Files: 1})
case <-done: case <-done:
// pipeline was cancelled // pipeline was cancelled
return return
@ -333,7 +331,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
} }
} }
func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) { func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) {
debug.Log("Archiver.dirWorker", "start") debug.Log("Archiver.dirWorker", "start")
defer func() { defer func() {
debug.Log("Archiver.dirWorker", "done") debug.Log("Archiver.dirWorker", "done")
@ -352,11 +350,11 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
if dir.Error() != nil { if dir.Error() != nil {
fmt.Fprintf(os.Stderr, "error walking dir %v: %v\n", dir.Path(), dir.Error()) fmt.Fprintf(os.Stderr, "error walking dir %v: %v\n", dir.Path(), dir.Error())
dir.Result() <- nil dir.Result() <- nil
p.Report(Stat{Errors: 1}) p.Report(restic.Stat{Errors: 1})
continue continue
} }
tree := NewTree() tree := restic.NewTree()
// wait for all content // wait for all content
for _, ch := range dir.Entries { for _, ch := range dir.Entries {
@ -371,22 +369,22 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
} }
// else insert node // else insert node
node := res.(*Node) node := res.(*restic.Node)
tree.Insert(node) tree.Insert(node)
if node.Type == "dir" { if node.Type == "dir" {
debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.path, node.Subtree) debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.Path, node.Subtree)
if node.Subtree.IsNull() { if node.Subtree.IsNull() {
panic("invalid null subtree ID") panic("invalid null subtree restic.ID")
} }
} }
} }
node := &Node{} node := &restic.Node{}
if dir.Path() != "" && dir.Info() != nil { if dir.Path() != "" && dir.Info() != nil {
n, err := NodeFromFileInfo(dir.Path(), dir.Info()) n, err := restic.NodeFromFileInfo(dir.Path(), dir.Info())
if err != nil { if err != nil {
n.Error = err.Error() n.Error = err.Error()
dir.Result() <- n dir.Result() <- n
@ -405,7 +403,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
} }
debug.Log("Archiver.dirWorker", "save tree for %s: %v", dir.Path(), id.Str()) debug.Log("Archiver.dirWorker", "save tree for %s: %v", dir.Path(), id.Str())
if id.IsNull() { if id.IsNull() {
panic("invalid null subtree ID return from SaveTreeJSON()") panic("invalid null subtree restic.ID return from SaveTreeJSON()")
} }
node.Subtree = &id node.Subtree = &id
@ -414,7 +412,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
dir.Result() <- node dir.Result() <- node
if dir.Path() != "" { if dir.Path() != "" {
p.Report(Stat{Dirs: 1}) p.Report(restic.Stat{Dirs: 1})
} }
case <-done: case <-done:
// pipeline was cancelled // pipeline was cancelled
@ -424,7 +422,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
} }
type archivePipe struct { type archivePipe struct {
Old <-chan WalkTreeJob Old <-chan walk.TreeJob
New <-chan pipe.Job New <-chan pipe.Job
} }
@ -459,7 +457,7 @@ func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) {
type archiveJob struct { type archiveJob struct {
hasOld bool hasOld bool
old WalkTreeJob old walk.TreeJob
new pipe.Job new pipe.Job
} }
@ -473,7 +471,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
var ( var (
loadOld, loadNew bool = true, true loadOld, loadNew bool = true, true
ok bool ok bool
oldJob WalkTreeJob oldJob walk.TreeJob
newJob pipe.Job newJob pipe.Job
) )
@ -567,7 +565,7 @@ func (j archiveJob) Copy() pipe.Job {
} }
// if file is newer, return the new job // if file is newer, return the new job
if j.old.Node.isNewer(j.new.Fullpath(), j.new.Info()) { if j.old.Node.IsNewer(j.new.Fullpath(), j.new.Info()) {
debug.Log("archiveJob.Copy", " job %v is newer", j.new.Path()) debug.Log("archiveJob.Copy", " job %v is newer", j.new.Path())
return j.new return j.new
} }
@ -632,10 +630,10 @@ func (p baseNameSlice) Len() int { return len(p) }
func (p baseNameSlice) Less(i, j int) bool { return filepath.Base(p[i]) < filepath.Base(p[j]) } func (p baseNameSlice) Less(i, j int) bool { return filepath.Base(p[i]) < filepath.Base(p[j]) }
func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Snapshot creates a snapshot of the given paths. If parentID is set, this is // Snapshot creates a snapshot of the given paths. If parentrestic.ID is set, this is
// used to compare the files to the ones archived at the time this snapshot was // used to compare the files to the ones archived at the time this snapshot was
// taken. // taken.
func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID) (*Snapshot, backend.ID, error) { func (arch *Archiver) Snapshot(p *restic.Progress, paths []string, parentID *restic.ID) (*restic.Snapshot, restic.ID, error) {
paths = unique(paths) paths = unique(paths)
sort.Sort(baseNameSlice(paths)) sort.Sort(baseNameSlice(paths))
@ -651,9 +649,9 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
defer p.Done() defer p.Done()
// create new snapshot // create new snapshot
sn, err := NewSnapshot(paths) sn, err := restic.NewSnapshot(paths)
if err != nil { if err != nil {
return nil, backend.ID{}, err return nil, restic.ID{}, err
} }
sn.Excludes = arch.Excludes sn.Excludes = arch.Excludes
@ -664,18 +662,18 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
sn.Parent = parentID sn.Parent = parentID
// load parent snapshot // load parent snapshot
parent, err := LoadSnapshot(arch.repo, *parentID) parent, err := restic.LoadSnapshot(arch.repo, *parentID)
if err != nil { if err != nil {
return nil, backend.ID{}, err return nil, restic.ID{}, err
} }
// start walker on old tree // start walker on old tree
ch := make(chan WalkTreeJob) ch := make(chan walk.TreeJob)
go WalkTree(arch.repo, *parent.Tree, done, ch) go walk.Tree(arch.repo, *parent.Tree, done, ch)
jobs.Old = ch jobs.Old = ch
} else { } else {
// use closed channel // use closed channel
ch := make(chan WalkTreeJob) ch := make(chan walk.TreeJob)
close(ch) close(ch)
jobs.Old = ch jobs.Old = ch
} }
@ -730,31 +728,29 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
debug.Log("Archiver.Snapshot", "workers terminated") debug.Log("Archiver.Snapshot", "workers terminated")
// receive the top-level tree // receive the top-level tree
root := (<-resCh).(*Node) root := (<-resCh).(*restic.Node)
debug.Log("Archiver.Snapshot", "root node received: %v", root.Subtree.Str()) debug.Log("Archiver.Snapshot", "root node received: %v", root.Subtree.Str())
sn.Tree = root.Subtree sn.Tree = root.Subtree
// save snapshot // save snapshot
id, err := arch.repo.SaveJSONUnpacked(backend.Snapshot, sn) id, err := arch.repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
if err != nil { if err != nil {
return nil, backend.ID{}, err return nil, restic.ID{}, err
} }
// store ID in snapshot struct
sn.id = &id
debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str()) debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str())
// flush repository // flush repository
err = arch.repo.Flush() err = arch.repo.Flush()
if err != nil { if err != nil {
return nil, backend.ID{}, err return nil, restic.ID{}, err
} }
// save index // save index
err = arch.repo.SaveIndex() err = arch.repo.SaveIndex()
if err != nil { if err != nil {
debug.Log("Archiver.Snapshot", "error saving index: %v", err) debug.Log("Archiver.Snapshot", "error saving index: %v", err)
return nil, backend.ID{}, err return nil, restic.ID{}, err
} }
debug.Log("Archiver.Snapshot", "saved indexes") debug.Log("Archiver.Snapshot", "saved indexes")
@ -770,13 +766,13 @@ func isRegularFile(fi os.FileInfo) bool {
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
} }
// Scan traverses the dirs to collect Stat information while emitting progress // Scan traverses the dirs to collect restic.Stat information while emitting progress
// information with p. // information with p.
func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) { func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Stat, error) {
p.Start() p.Start()
defer p.Done() defer p.Done()
var stat Stat var stat restic.Stat
for _, dir := range dirs { for _, dir := range dirs {
debug.Log("Scan", "Start for %v", dir) debug.Log("Scan", "Start for %v", dir)
@ -799,7 +795,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
return nil return nil
} }
s := Stat{} s := restic.Stat{}
if fi.IsDir() { if fi.IsDir() {
s.Dirs++ s.Dirs++
} else { } else {
@ -819,7 +815,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
debug.Log("Scan", "Done for %v, err: %v", dir, err) debug.Log("Scan", "Done for %v, err: %v", dir, err)
if err != nil { if err != nil {
return Stat{}, errors.Wrap(err, "fs.Walk") return restic.Stat{}, errors.Wrap(err, "fs.Walk")
} }
} }

View file

@ -1,4 +1,4 @@
package restic_test package archiver_test
import ( import (
"crypto/rand" "crypto/rand"
@ -8,11 +8,11 @@ import (
"testing" "testing"
"time" "time"
"github.com/pkg/errors" "restic/errors"
"restic" "restic"
"restic/backend" "restic/archiver"
"restic/pack" "restic/mock"
"restic/repository" "restic/repository"
) )
@ -20,14 +20,14 @@ const parallelSaves = 50
const testSaveIndexTime = 100 * time.Millisecond const testSaveIndexTime = 100 * time.Millisecond
const testTimeout = 2 * time.Second const testTimeout = 2 * time.Second
var DupID backend.ID var DupID restic.ID
func randomID() backend.ID { func randomID() restic.ID {
if mrand.Float32() < 0.5 { if mrand.Float32() < 0.5 {
return DupID return DupID
} }
id := backend.ID{} id := restic.ID{}
_, err := io.ReadFull(rand.Reader, id[:]) _, err := io.ReadFull(rand.Reader, id[:])
if err != nil { if err != nil {
panic(err) panic(err)
@ -36,30 +36,30 @@ func randomID() backend.ID {
} }
// forgetfulBackend returns a backend that forgets everything. // forgetfulBackend returns a backend that forgets everything.
func forgetfulBackend() backend.Backend { func forgetfulBackend() restic.Backend {
be := &backend.MockBackend{} be := &mock.Backend{}
be.TestFn = func(t backend.Type, name string) (bool, error) { be.TestFn = func(t restic.FileType, name string) (bool, error) {
return false, nil return false, nil
} }
be.LoadFn = func(h backend.Handle, p []byte, off int64) (int, error) { be.LoadFn = func(h restic.Handle, p []byte, off int64) (int, error) {
return 0, errors.New("not found") return 0, errors.New("not found")
} }
be.SaveFn = func(h backend.Handle, p []byte) error { be.SaveFn = func(h restic.Handle, p []byte) error {
return nil return nil
} }
be.StatFn = func(h backend.Handle) (backend.BlobInfo, error) { be.StatFn = func(h restic.Handle) (restic.FileInfo, error) {
return backend.BlobInfo{}, errors.New("not found") return restic.FileInfo{}, errors.New("not found")
} }
be.RemoveFn = func(t backend.Type, name string) error { be.RemoveFn = func(t restic.FileType, name string) error {
return nil return nil
} }
be.ListFn = func(t backend.Type, done <-chan struct{}) <-chan string { be.ListFn = func(t restic.FileType, done <-chan struct{}) <-chan string {
ch := make(chan string) ch := make(chan string)
close(ch) close(ch)
return ch return ch
@ -85,7 +85,7 @@ func testArchiverDuplication(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
arch := restic.NewArchiver(repo) arch := archiver.New(repo)
wg := &sync.WaitGroup{} wg := &sync.WaitGroup{}
done := make(chan struct{}) done := make(chan struct{})
@ -102,13 +102,13 @@ func testArchiverDuplication(t *testing.T) {
id := randomID() id := randomID()
if repo.Index().Has(id, pack.Data) { if repo.Index().Has(id, restic.DataBlob) {
continue continue
} }
buf := make([]byte, 50) buf := make([]byte, 50)
err := arch.Save(pack.Data, buf, id) err := arch.Save(restic.DataBlob, buf, id)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -1,10 +1,11 @@
package restic package archiver
import ( import (
"os" "os"
"testing" "testing"
"restic/pipe" "restic/pipe"
"restic/walk"
) )
var treeJobs = []string{ var treeJobs = []string{
@ -82,12 +83,12 @@ func (j testPipeJob) Error() error { return j.err }
func (j testPipeJob) Info() os.FileInfo { return j.fi } func (j testPipeJob) Info() os.FileInfo { return j.fi }
func (j testPipeJob) Result() chan<- pipe.Result { return j.res } func (j testPipeJob) Result() chan<- pipe.Result { return j.res }
func testTreeWalker(done <-chan struct{}, out chan<- WalkTreeJob) { func testTreeWalker(done <-chan struct{}, out chan<- walk.TreeJob) {
for _, e := range treeJobs { for _, e := range treeJobs {
select { select {
case <-done: case <-done:
return return
case out <- WalkTreeJob{Path: e}: case out <- walk.TreeJob{Path: e}:
} }
} }
@ -109,7 +110,7 @@ func testPipeWalker(done <-chan struct{}, out chan<- pipe.Job) {
func TestArchivePipe(t *testing.T) { func TestArchivePipe(t *testing.T) {
done := make(chan struct{}) done := make(chan struct{})
treeCh := make(chan WalkTreeJob) treeCh := make(chan walk.TreeJob)
pipeCh := make(chan pipe.Job) pipeCh := make(chan pipe.Job)
go testTreeWalker(done, treeCh) go testTreeWalker(done, treeCh)

View file

@ -1,4 +1,4 @@
package restic_test package archiver_test
import ( import (
"bytes" "bytes"
@ -7,14 +7,14 @@ import (
"time" "time"
"restic" "restic"
"restic/backend" "restic/archiver"
"restic/checker" "restic/checker"
"restic/crypto" "restic/crypto"
"restic/pack"
"restic/repository" "restic/repository"
. "restic/test" . "restic/test"
"github.com/pkg/errors" "restic/errors"
"github.com/restic/chunker" "github.com/restic/chunker"
) )
@ -48,8 +48,8 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.K
} }
func BenchmarkChunkEncrypt(b *testing.B) { func BenchmarkChunkEncrypt(b *testing.B) {
repo := SetupRepo() repo, cleanup := repository.TestRepository(b)
defer TeardownRepo(repo) defer cleanup()
data := Random(23, 10<<20) // 10MiB data := Random(23, 10<<20) // 10MiB
rd := bytes.NewReader(data) rd := bytes.NewReader(data)
@ -80,8 +80,8 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key)
} }
func BenchmarkChunkEncryptParallel(b *testing.B) { func BenchmarkChunkEncryptParallel(b *testing.B) {
repo := SetupRepo() repo, cleanup := repository.TestRepository(b)
defer TeardownRepo(repo) defer cleanup()
data := Random(23, 10<<20) // 10MiB data := Random(23, 10<<20) // 10MiB
@ -99,10 +99,10 @@ func BenchmarkChunkEncryptParallel(b *testing.B) {
} }
func archiveDirectory(b testing.TB) { func archiveDirectory(b testing.TB) {
repo := SetupRepo() repo, cleanup := repository.TestRepository(b)
defer TeardownRepo(repo) defer cleanup()
arch := restic.NewArchiver(repo) arch := archiver.New(repo)
_, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil) _, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
OK(b, err) OK(b, err)
@ -128,9 +128,17 @@ func BenchmarkArchiveDirectory(b *testing.B) {
} }
} }
func countPacks(repo restic.Repository, t restic.FileType) (n uint) {
for _ = range repo.Backend().List(t, nil) {
n++
}
return n
}
func archiveWithDedup(t testing.TB) { func archiveWithDedup(t testing.TB) {
repo := SetupRepo() repo, cleanup := repository.TestRepository(t)
defer TeardownRepo(repo) defer cleanup()
if BenchArchiveDirectory == "" { if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiverDedup") t.Skip("benchdir not set, skipping TestArchiverDedup")
@ -143,24 +151,24 @@ func archiveWithDedup(t testing.TB) {
} }
// archive a few files // archive a few files
sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil) sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn.ID().Str()) t.Logf("archived snapshot %v", sn.ID().Str())
// get archive stats // get archive stats
cnt.before.packs = repo.Count(backend.Data) cnt.before.packs = countPacks(repo, restic.DataFile)
cnt.before.dataBlobs = repo.Index().Count(pack.Data) cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.before.treeBlobs = repo.Index().Count(pack.Tree) cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v", t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs) cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs)
// archive the same files again, without parent snapshot // archive the same files again, without parent snapshot
sn2 := SnapshotDir(t, repo, BenchArchiveDirectory, nil) sn2 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
t.Logf("archived snapshot %v", sn2.ID().Str()) t.Logf("archived snapshot %v", sn2.ID().Str())
// get archive stats again // get archive stats again
cnt.after.packs = repo.Count(backend.Data) cnt.after.packs = countPacks(repo, restic.DataFile)
cnt.after.dataBlobs = repo.Index().Count(pack.Data) cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.after.treeBlobs = repo.Index().Count(pack.Tree) cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v", t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs) cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs)
@ -171,13 +179,13 @@ func archiveWithDedup(t testing.TB) {
} }
// archive the same files again, with a parent snapshot // archive the same files again, with a parent snapshot
sn3 := SnapshotDir(t, repo, BenchArchiveDirectory, sn2.ID()) sn3 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, sn2.ID())
t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str()) t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
// get archive stats again // get archive stats again
cnt.after2.packs = repo.Count(backend.Data) cnt.after2.packs = countPacks(repo, restic.DataFile)
cnt.after2.dataBlobs = repo.Index().Count(pack.Data) cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.after2.treeBlobs = repo.Index().Count(pack.Tree) cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v", t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs) cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs)
@ -192,48 +200,6 @@ func TestArchiveDedup(t *testing.T) {
archiveWithDedup(t) archiveWithDedup(t)
} }
func BenchmarkLoadTree(t *testing.B) {
repo := SetupRepo()
defer TeardownRepo(repo)
if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiverDedup")
}
// archive a few files
arch := restic.NewArchiver(repo)
sn, _, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
OK(t, err)
t.Logf("archived snapshot %v", sn.ID())
list := make([]backend.ID, 0, 10)
done := make(chan struct{})
for _, idx := range repo.Index().All() {
for blob := range idx.Each(done) {
if blob.Type != pack.Tree {
continue
}
list = append(list, blob.ID)
if len(list) == cap(list) {
close(done)
break
}
}
}
// start benchmark
t.ResetTimer()
for i := 0; i < t.N; i++ {
for _, id := range list {
_, err := restic.LoadTree(repo, id)
OK(t, err)
}
}
}
// Saves several identical chunks concurrently and later checks that there are no // Saves several identical chunks concurrently and later checks that there are no
// unreferenced packs in the repository. See also #292 and #358. // unreferenced packs in the repository. See also #292 and #358.
func TestParallelSaveWithDuplication(t *testing.T) { func TestParallelSaveWithDuplication(t *testing.T) {
@ -243,13 +209,13 @@ func TestParallelSaveWithDuplication(t *testing.T) {
} }
func testParallelSaveWithDuplication(t *testing.T, seed int) { func testParallelSaveWithDuplication(t *testing.T, seed int) {
repo := SetupRepo() repo, cleanup := repository.TestRepository(t)
defer TeardownRepo(repo) defer cleanup()
dataSizeMb := 128 dataSizeMb := 128
duplication := 7 duplication := 7
arch := restic.NewArchiver(repo) arch := archiver.New(repo)
chunks := getRandomData(seed, dataSizeMb*1024*1024) chunks := getRandomData(seed, dataSizeMb*1024*1024)
errChannels := [](<-chan error){} errChannels := [](<-chan error){}
@ -266,9 +232,9 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) {
go func(c chunker.Chunk, errChan chan<- error) { go func(c chunker.Chunk, errChan chan<- error) {
barrier <- struct{}{} barrier <- struct{}{}
id := backend.Hash(c.Data) id := restic.Hash(c.Data)
time.Sleep(time.Duration(id[0])) time.Sleep(time.Duration(id[0]))
err := arch.Save(pack.Data, c.Data, id) err := arch.Save(restic.DataBlob, c.Data, id)
<-barrier <-barrier
errChan <- err errChan <- err
}(c, errChan) }(c, errChan)
@ -302,7 +268,7 @@ func getRandomData(seed int, size int) []chunker.Chunk {
return chunks return chunks
} }
func createAndInitChecker(t *testing.T, repo *repository.Repository) *checker.Checker { func createAndInitChecker(t *testing.T, repo restic.Repository) *checker.Checker {
chkr := checker.New(repo) chkr := checker.New(repo)
hints, errs := chkr.LoadIndex() hints, errs := chkr.LoadIndex()

View file

@ -0,0 +1,21 @@
package archiver
import (
"sync"
"github.com/restic/chunker"
)
var bufPool = sync.Pool{
New: func() interface{} {
return make([]byte, chunker.MinSize)
},
}
func getBuf() []byte {
return bufPool.Get().([]byte)
}
func freeBuf(data []byte) {
bufPool.Put(data)
}

View file

@ -0,0 +1,16 @@
package archiver
import (
"restic"
"testing"
)
// TestSnapshot creates a new snapshot of path.
func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot {
arch := New(repo)
sn, _, err := arch.Snapshot(nil, []string{path}, parent)
if err != nil {
t.Fatal(err)
}
return sn
}

38
src/restic/backend.go Normal file
View file

@ -0,0 +1,38 @@
package restic
// Backend is used to store and access data.
type Backend interface {
// Location returns a string that describes the type and location of the
// repository.
Location() string
// Test a boolean value whether a File with the name and type exists.
Test(t FileType, name string) (bool, error)
// Remove removes a File with type t and name.
Remove(t FileType, name string) error
// Close the backend
Close() error
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt, except
// that a negative offset is also allowed. In this case it references a
// position relative to the end of the file (similar to Seek()).
Load(h Handle, p []byte, off int64) (int, error)
// Save stores the data in the backend under the given handle.
Save(h Handle, p []byte) error
// Stat returns information about the File identified by h.
Stat(h Handle) (FileInfo, error)
// List returns a channel that yields all names of files of type t in an
// arbitrary order. A goroutine is started for this. If the channel done is
// closed, sending stops.
List(t FileType, done <-chan struct{}) <-chan string
}
// FileInfo is returned by Stat() and contains information about a file in the
// backend.
type FileInfo struct{ Size int64 }

View file

@ -1,5 +1,4 @@
// Package backend provides local and remote storage for restic repositories. // Package backend provides local and remote storage for restic repositories.
// All backends need to implement the Backend interface. There is a // All backends need to implement the Backend interface. There is a MemBackend,
// MockBackend, which can be used for mocking in tests, and a MemBackend, which // which stores all data in a map internally and can be used for testing.
// stores all data in a hash internally.
package backend package backend

View file

@ -1,61 +0,0 @@
package backend_test
import (
"testing"
"restic/backend"
. "restic/test"
)
type mockBackend struct {
list func(backend.Type, <-chan struct{}) <-chan string
}
func (m mockBackend) List(t backend.Type, done <-chan struct{}) <-chan string {
return m.list(t, done)
}
var samples = backend.IDs{
ParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"),
ParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"),
ParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"),
ParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"),
ParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"),
ParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"),
ParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"),
ParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"),
}
func TestPrefixLength(t *testing.T) {
list := samples
m := mockBackend{}
m.list = func(t backend.Type, done <-chan struct{}) <-chan string {
ch := make(chan string)
go func() {
defer close(ch)
for _, id := range list {
select {
case ch <- id.String():
case <-done:
return
}
}
}()
return ch
}
l, err := backend.PrefixLength(m, backend.Snapshot)
OK(t, err)
Equals(t, 19, l)
list = samples[:3]
l, err = backend.PrefixLength(m, backend.Snapshot)
OK(t, err)
Equals(t, 19, l)
list = samples[3:]
l, err = backend.PrefixLength(m, backend.Snapshot)
OK(t, err)
Equals(t, 8, l)
}

View file

@ -1,58 +0,0 @@
package backend_test
import (
"reflect"
"testing"
"restic/backend"
. "restic/test"
)
var uniqTests = []struct {
before, after backend.IDs
}{
{
backend.IDs{
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
backend.IDs{
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
},
},
{
backend.IDs{
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
backend.IDs{
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
},
{
backend.IDs{
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
backend.IDs{
ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"),
ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
},
}
func TestUniqIDs(t *testing.T) {
for i, test := range uniqTests {
uniq := test.before.Uniq()
if !reflect.DeepEqual(uniq, test.after) {
t.Errorf("uniqIDs() test %v failed\n wanted: %v\n got: %v", i, test.after, uniq)
}
}
}

View file

@ -1,35 +0,0 @@
package backend_test
import (
"testing"
"restic/backend"
. "restic/test"
)
var idsetTests = []struct {
id backend.ID
seen bool
}{
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false},
{ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false},
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true},
{ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false},
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true},
{ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true},
{ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
}
func TestIDSet(t *testing.T) {
set := backend.NewIDSet()
for i, test := range idsetTests {
seen := set.Has(test.id)
if seen != test.seen {
t.Errorf("IDSet test %v failed: wanted %v, got %v", i, test.seen, seen)
}
set.Insert(test.id)
}
}

View file

@ -1,63 +0,0 @@
package backend
// Type is the type of a Blob.
type Type string
// These are the different data types a backend can store.
const (
Data Type = "data"
Key = "key"
Lock = "lock"
Snapshot = "snapshot"
Index = "index"
Config = "config"
)
// Backend is used to store and access data.
type Backend interface {
// Location returns a string that describes the type and location of the
// repository.
Location() string
// Test a boolean value whether a Blob with the name and type exists.
Test(t Type, name string) (bool, error)
// Remove removes a Blob with type t and name.
Remove(t Type, name string) error
// Close the backend
Close() error
Lister
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt, except
// that a negative offset is also allowed. In this case it references a
// position relative to the end of the file (similar to Seek()).
Load(h Handle, p []byte, off int64) (int, error)
// Save stores the data in the backend under the given handle.
Save(h Handle, p []byte) error
// Stat returns information about the blob identified by h.
Stat(h Handle) (BlobInfo, error)
}
// Lister implements listing data items stored in a backend.
type Lister interface {
// List returns a channel that yields all names of blobs of type t in an
// arbitrary order. A goroutine is started for this. If the channel done is
// closed, sending stops.
List(t Type, done <-chan struct{}) <-chan string
}
// Deleter are backends that allow to self-delete all content stored in them.
type Deleter interface {
// Delete the complete repository.
Delete() error
}
// BlobInfo is returned by Stat() and contains information about a stored blob.
type BlobInfo struct {
Size int64
}

View file

@ -3,7 +3,7 @@ package local
import ( import (
"strings" "strings"
"github.com/pkg/errors" "restic/errors"
) )
// ParseConfig parses a local backend config. // ParseConfig parses a local backend config.

View file

@ -5,8 +5,9 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"restic"
"github.com/pkg/errors" "restic/errors"
"restic/backend" "restic/backend"
"restic/debug" "restic/debug"
@ -18,6 +19,8 @@ type Local struct {
p string p string
} }
var _ restic.Backend = &Local{}
func paths(dir string) []string { func paths(dir string) []string {
return []string{ return []string{
dir, dir,
@ -69,8 +72,8 @@ func (b *Local) Location() string {
} }
// Construct path for given Type and name. // Construct path for given Type and name.
func filename(base string, t backend.Type, name string) string { func filename(base string, t restic.FileType, name string) string {
if t == backend.Config { if t == restic.ConfigFile {
return filepath.Join(base, "config") return filepath.Join(base, "config")
} }
@ -78,21 +81,21 @@ func filename(base string, t backend.Type, name string) string {
} }
// Construct directory for given Type. // Construct directory for given Type.
func dirname(base string, t backend.Type, name string) string { func dirname(base string, t restic.FileType, name string) string {
var n string var n string
switch t { switch t {
case backend.Data: case restic.DataFile:
n = backend.Paths.Data n = backend.Paths.Data
if len(name) > 2 { if len(name) > 2 {
n = filepath.Join(n, name[:2]) n = filepath.Join(n, name[:2])
} }
case backend.Snapshot: case restic.SnapshotFile:
n = backend.Paths.Snapshots n = backend.Paths.Snapshots
case backend.Index: case restic.IndexFile:
n = backend.Paths.Index n = backend.Paths.Index
case backend.Lock: case restic.LockFile:
n = backend.Paths.Locks n = backend.Paths.Locks
case backend.Key: case restic.KeyFile:
n = backend.Paths.Keys n = backend.Paths.Keys
} }
return filepath.Join(base, n) return filepath.Join(base, n)
@ -102,7 +105,7 @@ func dirname(base string, t backend.Type, name string) string {
// saves it in p. Load has the same semantics as io.ReaderAt, with one // saves it in p. Load has the same semantics as io.ReaderAt, with one
// exception: when off is lower than zero, it is treated as an offset relative // exception: when off is lower than zero, it is treated as an offset relative
// to the end of the file. // to the end of the file.
func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) { func (b *Local) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
debug.Log("backend.local.Load", "Load %v, length %v at %v", h, len(p), off) debug.Log("backend.local.Load", "Load %v, length %v at %v", h, len(p), off)
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return 0, err return 0, err
@ -168,7 +171,7 @@ func writeToTempfile(tempdir string, p []byte) (filename string, err error) {
} }
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
func (b *Local) Save(h backend.Handle, p []byte) (err error) { func (b *Local) Save(h restic.Handle, p []byte) (err error) {
debug.Log("backend.local.Save", "Save %v, length %v", h, len(p)) debug.Log("backend.local.Save", "Save %v, length %v", h, len(p))
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return err return err
@ -188,7 +191,7 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) {
} }
// create directories if necessary, ignore errors // create directories if necessary, ignore errors
if h.Type == backend.Data { if h.Type == restic.DataFile {
err = fs.MkdirAll(filepath.Dir(filename), backend.Modes.Dir) err = fs.MkdirAll(filepath.Dir(filename), backend.Modes.Dir)
if err != nil { if err != nil {
return errors.Wrap(err, "MkdirAll") return errors.Wrap(err, "MkdirAll")
@ -213,22 +216,22 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) {
} }
// Stat returns information about a blob. // Stat returns information about a blob.
func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) { func (b *Local) Stat(h restic.Handle) (restic.FileInfo, error) {
debug.Log("backend.local.Stat", "Stat %v", h) debug.Log("backend.local.Stat", "Stat %v", h)
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err return restic.FileInfo{}, err
} }
fi, err := fs.Stat(filename(b.p, h.Type, h.Name)) fi, err := fs.Stat(filename(b.p, h.Type, h.Name))
if err != nil { if err != nil {
return backend.BlobInfo{}, errors.Wrap(err, "Stat") return restic.FileInfo{}, errors.Wrap(err, "Stat")
} }
return backend.BlobInfo{Size: fi.Size()}, nil return restic.FileInfo{Size: fi.Size()}, nil
} }
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
func (b *Local) Test(t backend.Type, name string) (bool, error) { func (b *Local) Test(t restic.FileType, name string) (bool, error) {
debug.Log("backend.local.Test", "Test %v %v", t, name) debug.Log("backend.local.Test", "Test %v %v", t, name)
_, err := fs.Stat(filename(b.p, t, name)) _, err := fs.Stat(filename(b.p, t, name))
if err != nil { if err != nil {
@ -242,7 +245,7 @@ func (b *Local) Test(t backend.Type, name string) (bool, error) {
} }
// Remove removes the blob with the given name and type. // Remove removes the blob with the given name and type.
func (b *Local) Remove(t backend.Type, name string) error { func (b *Local) Remove(t restic.FileType, name string) error {
debug.Log("backend.local.Remove", "Remove %v %v", t, name) debug.Log("backend.local.Remove", "Remove %v %v", t, name)
fn := filename(b.p, t, name) fn := filename(b.p, t, name)
@ -317,10 +320,10 @@ func listDirs(dir string) (filenames []string, err error) {
// List returns a channel that yields all names of blobs of type t. A // List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending // goroutine is started for this. If the channel done is closed, sending
// stops. // stops.
func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { func (b *Local) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("backend.local.List", "List %v", t) debug.Log("backend.local.List", "List %v", t)
lister := listDir lister := listDir
if t == backend.Data { if t == restic.DataFile {
lister = listDirs lister = listDirs
} }

View file

@ -4,8 +4,8 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"restic"
"restic/backend"
"restic/backend/local" "restic/backend/local"
"restic/backend/test" "restic/backend/test"
) )
@ -30,7 +30,7 @@ func createTempdir() error {
} }
func init() { func init() {
test.CreateFn = func() (backend.Backend, error) { test.CreateFn = func() (restic.Backend, error) {
err := createTempdir() err := createTempdir()
if err != nil { if err != nil {
return nil, err return nil, err
@ -38,7 +38,7 @@ func init() {
return local.Create(tempBackendDir) return local.Create(tempBackendDir)
} }
test.OpenFn = func() (backend.Backend, error) { test.OpenFn = func() (restic.Backend, error) {
err := createTempdir() err := createTempdir()
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -2,28 +2,29 @@ package mem
import ( import (
"io" "io"
"restic"
"sync" "sync"
"github.com/pkg/errors" "restic/errors"
"restic/backend"
"restic/debug" "restic/debug"
) )
type entry struct { type entry struct {
Type backend.Type Type restic.FileType
Name string Name string
} }
type memMap map[entry][]byte type memMap map[entry][]byte
// make sure that MemoryBackend implements backend.Backend
var _ restic.Backend = &MemoryBackend{}
// MemoryBackend is a mock backend that uses a map for storing all data in // MemoryBackend is a mock backend that uses a map for storing all data in
// memory. This should only be used for tests. // memory. This should only be used for tests.
type MemoryBackend struct { type MemoryBackend struct {
data memMap data memMap
m sync.Mutex m sync.Mutex
backend.MockBackend
} }
// New returns a new backend that saves all data in a map in memory. // New returns a new backend that saves all data in a map in memory.
@ -32,60 +33,13 @@ func New() *MemoryBackend {
data: make(memMap), data: make(memMap),
} }
be.MockBackend.TestFn = func(t backend.Type, name string) (bool, error) {
return memTest(be, t, name)
}
be.MockBackend.LoadFn = func(h backend.Handle, p []byte, off int64) (int, error) {
return memLoad(be, h, p, off)
}
be.MockBackend.SaveFn = func(h backend.Handle, p []byte) error {
return memSave(be, h, p)
}
be.MockBackend.StatFn = func(h backend.Handle) (backend.BlobInfo, error) {
return memStat(be, h)
}
be.MockBackend.RemoveFn = func(t backend.Type, name string) error {
return memRemove(be, t, name)
}
be.MockBackend.ListFn = func(t backend.Type, done <-chan struct{}) <-chan string {
return memList(be, t, done)
}
be.MockBackend.DeleteFn = func() error {
be.m.Lock()
defer be.m.Unlock()
be.data = make(memMap)
return nil
}
be.MockBackend.LocationFn = func() string {
return "Memory Backend"
}
debug.Log("MemoryBackend.New", "created new memory backend") debug.Log("MemoryBackend.New", "created new memory backend")
return be return be
} }
func (be *MemoryBackend) insert(t backend.Type, name string, data []byte) error { // Test returns whether a file exists.
be.m.Lock() func (be *MemoryBackend) Test(t restic.FileType, name string) (bool, error) {
defer be.m.Unlock()
if _, ok := be.data[entry{t, name}]; ok {
return errors.New("already present")
}
be.data[entry{t, name}] = data
return nil
}
func memTest(be *MemoryBackend, t backend.Type, name string) (bool, error) {
be.m.Lock() be.m.Lock()
defer be.m.Unlock() defer be.m.Unlock()
@ -98,7 +52,8 @@ func memTest(be *MemoryBackend, t backend.Type, name string) (bool, error) {
return false, nil return false, nil
} }
func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, error) { // Load reads data from the backend.
func (be *MemoryBackend) Load(h restic.Handle, p []byte, off int64) (int, error) {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return 0, err return 0, err
} }
@ -106,7 +61,7 @@ func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, err
be.m.Lock() be.m.Lock()
defer be.m.Unlock() defer be.m.Unlock()
if h.Type == backend.Config { if h.Type == restic.ConfigFile {
h.Name = "" h.Name = ""
} }
@ -137,7 +92,8 @@ func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, err
return n, nil return n, nil
} }
func memSave(be *MemoryBackend, h backend.Handle, p []byte) error { // Save adds new Data to the backend.
func (be *MemoryBackend) Save(h restic.Handle, p []byte) error {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return err return err
} }
@ -145,7 +101,7 @@ func memSave(be *MemoryBackend, h backend.Handle, p []byte) error {
be.m.Lock() be.m.Lock()
defer be.m.Unlock() defer be.m.Unlock()
if h.Type == backend.Config { if h.Type == restic.ConfigFile {
h.Name = "" h.Name = ""
} }
@ -161,15 +117,16 @@ func memSave(be *MemoryBackend, h backend.Handle, p []byte) error {
return nil return nil
} }
func memStat(be *MemoryBackend, h backend.Handle) (backend.BlobInfo, error) { // Stat returns information about a file in the backend.
func (be *MemoryBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
be.m.Lock() be.m.Lock()
defer be.m.Unlock() defer be.m.Unlock()
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err return restic.FileInfo{}, err
} }
if h.Type == backend.Config { if h.Type == restic.ConfigFile {
h.Name = "" h.Name = ""
} }
@ -177,13 +134,14 @@ func memStat(be *MemoryBackend, h backend.Handle) (backend.BlobInfo, error) {
e, ok := be.data[entry{h.Type, h.Name}] e, ok := be.data[entry{h.Type, h.Name}]
if !ok { if !ok {
return backend.BlobInfo{}, errors.New("no such data") return restic.FileInfo{}, errors.New("no such data")
} }
return backend.BlobInfo{Size: int64(len(e))}, nil return restic.FileInfo{Size: int64(len(e))}, nil
} }
func memRemove(be *MemoryBackend, t backend.Type, name string) error { // Remove deletes a file from the backend.
func (be *MemoryBackend) Remove(t restic.FileType, name string) error {
be.m.Lock() be.m.Lock()
defer be.m.Unlock() defer be.m.Unlock()
@ -198,7 +156,8 @@ func memRemove(be *MemoryBackend, t backend.Type, name string) error {
return nil return nil
} }
func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan string { // List returns a channel which yields entries from the backend.
func (be *MemoryBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
be.m.Lock() be.m.Lock()
defer be.m.Unlock() defer be.m.Unlock()
@ -227,3 +186,22 @@ func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan str
return ch return ch
} }
// Location returns the location of the backend (RAM).
func (be *MemoryBackend) Location() string {
return "RAM"
}
// Delete removes all data in the backend.
func (be *MemoryBackend) Delete() error {
be.m.Lock()
defer be.m.Unlock()
be.data = make(memMap)
return nil
}
// Close closes the backend.
func (be *MemoryBackend) Close() error {
return nil
}

View file

@ -1,19 +1,20 @@
package mem_test package mem_test
import ( import (
"github.com/pkg/errors" "restic"
"restic/errors"
"restic/backend"
"restic/backend/mem" "restic/backend/mem"
"restic/backend/test" "restic/backend/test"
) )
var be backend.Backend var be restic.Backend
//go:generate go run ../test/generate_backend_tests.go //go:generate go run ../test/generate_backend_tests.go
func init() { func init() {
test.CreateFn = func() (backend.Backend, error) { test.CreateFn = func() (restic.Backend, error) {
if be != nil { if be != nil {
return nil, errors.New("temporary memory backend dir already exists") return nil, errors.New("temporary memory backend dir already exists")
} }
@ -23,7 +24,7 @@ func init() {
return be, nil return be, nil
} }
test.OpenFn = func() (backend.Backend, error) { test.OpenFn = func() (restic.Backend, error) {
if be == nil { if be == nil {
return nil, errors.New("repository not initialized") return nil, errors.New("repository not initialized")
} }

View file

@ -1,103 +0,0 @@
package backend
import "github.com/pkg/errors"
// MockBackend implements a backend whose functions can be specified. This
// should only be used for tests.
type MockBackend struct {
CloseFn func() error
LoadFn func(h Handle, p []byte, off int64) (int, error)
SaveFn func(h Handle, p []byte) error
StatFn func(h Handle) (BlobInfo, error)
ListFn func(Type, <-chan struct{}) <-chan string
RemoveFn func(Type, string) error
TestFn func(Type, string) (bool, error)
DeleteFn func() error
LocationFn func() string
}
// Close the backend.
func (m *MockBackend) Close() error {
if m.CloseFn == nil {
return nil
}
return m.CloseFn()
}
// Location returns a location string.
func (m *MockBackend) Location() string {
if m.LocationFn == nil {
return ""
}
return m.LocationFn()
}
// Load loads data from the backend.
func (m *MockBackend) Load(h Handle, p []byte, off int64) (int, error) {
if m.LoadFn == nil {
return 0, errors.New("not implemented")
}
return m.LoadFn(h, p, off)
}
// Save data in the backend.
func (m *MockBackend) Save(h Handle, p []byte) error {
if m.SaveFn == nil {
return errors.New("not implemented")
}
return m.SaveFn(h, p)
}
// Stat an object in the backend.
func (m *MockBackend) Stat(h Handle) (BlobInfo, error) {
if m.StatFn == nil {
return BlobInfo{}, errors.New("not implemented")
}
return m.StatFn(h)
}
// List items of type t.
func (m *MockBackend) List(t Type, done <-chan struct{}) <-chan string {
if m.ListFn == nil {
ch := make(chan string)
close(ch)
return ch
}
return m.ListFn(t, done)
}
// Remove data from the backend.
func (m *MockBackend) Remove(t Type, name string) error {
if m.RemoveFn == nil {
return errors.New("not implemented")
}
return m.RemoveFn(t, name)
}
// Test for the existence of a specific item.
func (m *MockBackend) Test(t Type, name string) (bool, error) {
if m.TestFn == nil {
return false, errors.New("not implemented")
}
return m.TestFn(t, name)
}
// Delete all data.
func (m *MockBackend) Delete() error {
if m.DeleteFn == nil {
return errors.New("not implemented")
}
return m.DeleteFn()
}
// Make sure that MockBackend implements the backend interface.
var _ Backend = &MockBackend{}

View file

@ -4,7 +4,7 @@ import (
"net/url" "net/url"
"strings" "strings"
"github.com/pkg/errors" "restic/errors"
) )
// Config contains all configuration necessary to connect to a REST server. // Config contains all configuration necessary to connect to a REST server.

View file

@ -8,9 +8,10 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"restic"
"strings" "strings"
"github.com/pkg/errors" "restic/errors"
"restic/backend" "restic/backend"
) )
@ -18,24 +19,24 @@ import (
const connLimit = 10 const connLimit = 10
// restPath returns the path to the given resource. // restPath returns the path to the given resource.
func restPath(url *url.URL, h backend.Handle) string { func restPath(url *url.URL, h restic.Handle) string {
u := *url u := *url
var dir string var dir string
switch h.Type { switch h.Type {
case backend.Config: case restic.ConfigFile:
dir = "" dir = ""
h.Name = "config" h.Name = "config"
case backend.Data: case restic.DataFile:
dir = backend.Paths.Data dir = backend.Paths.Data
case backend.Snapshot: case restic.SnapshotFile:
dir = backend.Paths.Snapshots dir = backend.Paths.Snapshots
case backend.Index: case restic.IndexFile:
dir = backend.Paths.Index dir = backend.Paths.Index
case backend.Lock: case restic.LockFile:
dir = backend.Paths.Locks dir = backend.Paths.Locks
case backend.Key: case restic.KeyFile:
dir = backend.Paths.Keys dir = backend.Paths.Keys
default: default:
dir = string(h.Type) dir = string(h.Type)
@ -53,7 +54,7 @@ type restBackend struct {
} }
// Open opens the REST backend with the given config. // Open opens the REST backend with the given config.
func Open(cfg Config) (backend.Backend, error) { func Open(cfg Config) (restic.Backend, error) {
connChan := make(chan struct{}, connLimit) connChan := make(chan struct{}, connLimit)
for i := 0; i < connLimit; i++ { for i := 0; i < connLimit; i++ {
connChan <- struct{}{} connChan <- struct{}{}
@ -71,7 +72,7 @@ func (b *restBackend) Location() string {
// Load returns the data stored in the backend for h at the given offset // Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt. // and saves it in p. Load has the same semantics as io.ReaderAt.
func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err error) { func (b *restBackend) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return 0, err return 0, err
} }
@ -120,7 +121,7 @@ func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err er
} }
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
func (b *restBackend) Save(h backend.Handle, p []byte) (err error) { func (b *restBackend) Save(h restic.Handle, p []byte) (err error) {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return err return err
} }
@ -151,31 +152,31 @@ func (b *restBackend) Save(h backend.Handle, p []byte) (err error) {
} }
// Stat returns information about a blob. // Stat returns information about a blob.
func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) { func (b *restBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err return restic.FileInfo{}, err
} }
<-b.connChan <-b.connChan
resp, err := b.client.Head(restPath(b.url, h)) resp, err := b.client.Head(restPath(b.url, h))
b.connChan <- struct{}{} b.connChan <- struct{}{}
if err != nil { if err != nil {
return backend.BlobInfo{}, errors.Wrap(err, "client.Head") return restic.FileInfo{}, errors.Wrap(err, "client.Head")
} }
if err = resp.Body.Close(); err != nil { if err = resp.Body.Close(); err != nil {
return backend.BlobInfo{}, errors.Wrap(err, "Close") return restic.FileInfo{}, errors.Wrap(err, "Close")
} }
if resp.StatusCode != 200 { if resp.StatusCode != 200 {
return backend.BlobInfo{}, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode) return restic.FileInfo{}, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode)
} }
if resp.ContentLength < 0 { if resp.ContentLength < 0 {
return backend.BlobInfo{}, errors.New("negative content length") return restic.FileInfo{}, errors.New("negative content length")
} }
bi := backend.BlobInfo{ bi := restic.FileInfo{
Size: resp.ContentLength, Size: resp.ContentLength,
} }
@ -183,8 +184,8 @@ func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) {
} }
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
func (b *restBackend) Test(t backend.Type, name string) (bool, error) { func (b *restBackend) Test(t restic.FileType, name string) (bool, error) {
_, err := b.Stat(backend.Handle{Type: t, Name: name}) _, err := b.Stat(restic.Handle{Type: t, Name: name})
if err != nil { if err != nil {
return false, nil return false, nil
} }
@ -193,8 +194,8 @@ func (b *restBackend) Test(t backend.Type, name string) (bool, error) {
} }
// Remove removes the blob with the given name and type. // Remove removes the blob with the given name and type.
func (b *restBackend) Remove(t backend.Type, name string) error { func (b *restBackend) Remove(t restic.FileType, name string) error {
h := backend.Handle{Type: t, Name: name} h := restic.Handle{Type: t, Name: name}
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return err return err
} }
@ -221,10 +222,10 @@ func (b *restBackend) Remove(t backend.Type, name string) error {
// List returns a channel that yields all names of blobs of type t. A // List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending // goroutine is started for this. If the channel done is closed, sending
// stops. // stops.
func (b *restBackend) List(t backend.Type, done <-chan struct{}) <-chan string { func (b *restBackend) List(t restic.FileType, done <-chan struct{}) <-chan string {
ch := make(chan string) ch := make(chan string)
url := restPath(b.url, backend.Handle{Type: t}) url := restPath(b.url, restic.Handle{Type: t})
if !strings.HasSuffix(url, "/") { if !strings.HasSuffix(url, "/") {
url += "/" url += "/"
} }

View file

@ -2,35 +2,35 @@ package rest
import ( import (
"net/url" "net/url"
"restic/backend" "restic"
"testing" "testing"
) )
var restPathTests = []struct { var restPathTests = []struct {
Handle backend.Handle Handle restic.Handle
URL *url.URL URL *url.URL
Result string Result string
}{ }{
{ {
URL: parseURL("https://hostname.foo"), URL: parseURL("https://hostname.foo"),
Handle: backend.Handle{ Handle: restic.Handle{
Type: backend.Data, Type: restic.DataFile,
Name: "foobar", Name: "foobar",
}, },
Result: "https://hostname.foo/data/foobar", Result: "https://hostname.foo/data/foobar",
}, },
{ {
URL: parseURL("https://hostname.foo:1234/prefix/repo"), URL: parseURL("https://hostname.foo:1234/prefix/repo"),
Handle: backend.Handle{ Handle: restic.Handle{
Type: backend.Lock, Type: restic.LockFile,
Name: "foobar", Name: "foobar",
}, },
Result: "https://hostname.foo:1234/prefix/repo/locks/foobar", Result: "https://hostname.foo:1234/prefix/repo/locks/foobar",
}, },
{ {
URL: parseURL("https://hostname.foo:1234/prefix/repo"), URL: parseURL("https://hostname.foo:1234/prefix/repo"),
Handle: backend.Handle{ Handle: restic.Handle{
Type: backend.Config, Type: restic.ConfigFile,
Name: "foobar", Name: "foobar",
}, },
Result: "https://hostname.foo:1234/prefix/repo/config", Result: "https://hostname.foo:1234/prefix/repo/config",

View file

@ -4,10 +4,10 @@ import (
"fmt" "fmt"
"net/url" "net/url"
"os" "os"
"restic"
"github.com/pkg/errors" "restic/errors"
"restic/backend"
"restic/backend/rest" "restic/backend/rest"
"restic/backend/test" "restic/backend/test"
. "restic/test" . "restic/test"
@ -31,13 +31,13 @@ func init() {
URL: url, URL: url,
} }
test.CreateFn = func() (backend.Backend, error) { test.CreateFn = func() (restic.Backend, error) {
be, err := rest.Open(cfg) be, err := rest.Open(cfg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
exists, err := be.Test(backend.Config, "") exists, err := be.Test(restic.ConfigFile, "")
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -49,7 +49,7 @@ func init() {
return be, nil return be, nil
} }
test.OpenFn = func() (backend.Backend, error) { test.OpenFn = func() (restic.Backend, error) {
return rest.Open(cfg) return rest.Open(cfg)
} }
} }

View file

@ -5,7 +5,7 @@ import (
"path" "path"
"strings" "strings"
"github.com/pkg/errors" "restic/errors"
) )
// Config contains all configuration necessary to connect to an s3 compatible // Config contains all configuration necessary to connect to an s3 compatible

View file

@ -3,13 +3,13 @@ package s3
import ( import (
"bytes" "bytes"
"io" "io"
"restic"
"strings" "strings"
"github.com/pkg/errors" "restic/errors"
"github.com/minio/minio-go" "github.com/minio/minio-go"
"restic/backend"
"restic/debug" "restic/debug"
) )
@ -25,7 +25,7 @@ type s3 struct {
// Open opens the S3 backend at bucket and region. The bucket is created if it // Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet. // does not exist yet.
func Open(cfg Config) (backend.Backend, error) { func Open(cfg Config) (restic.Backend, error) {
debug.Log("s3.Open", "open, config %#v", cfg) debug.Log("s3.Open", "open, config %#v", cfg)
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP) client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
@ -53,7 +53,7 @@ func Open(cfg Config) (backend.Backend, error) {
return be, nil return be, nil
} }
func (be *s3) s3path(t backend.Type, name string) string { func (be *s3) s3path(t restic.FileType, name string) string {
var path string var path string
if be.prefix != "" { if be.prefix != "" {
@ -61,7 +61,7 @@ func (be *s3) s3path(t backend.Type, name string) string {
} }
path += string(t) path += string(t)
if t == backend.Config { if t == restic.ConfigFile {
return path return path
} }
return path + "/" + name return path + "/" + name
@ -81,7 +81,7 @@ func (be *s3) Location() string {
// Load returns the data stored in the backend for h at the given offset // Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt. // and saves it in p. Load has the same semantics as io.ReaderAt.
func (be s3) Load(h backend.Handle, p []byte, off int64) (n int, err error) { func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
var obj *minio.Object var obj *minio.Object
debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p)) debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p))
@ -153,7 +153,7 @@ func (be s3) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
} }
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
func (be s3) Save(h backend.Handle, p []byte) (err error) { func (be s3) Save(h restic.Handle, p []byte) (err error) {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return err return err
} }
@ -183,7 +183,7 @@ func (be s3) Save(h backend.Handle, p []byte) (err error) {
} }
// Stat returns information about a blob. // Stat returns information about a blob.
func (be s3) Stat(h backend.Handle) (bi backend.BlobInfo, err error) { func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
debug.Log("s3.Stat", "%v", h) debug.Log("s3.Stat", "%v", h)
path := be.s3path(h.Type, h.Name) path := be.s3path(h.Type, h.Name)
@ -192,7 +192,7 @@ func (be s3) Stat(h backend.Handle) (bi backend.BlobInfo, err error) {
obj, err = be.client.GetObject(be.bucketname, path) obj, err = be.client.GetObject(be.bucketname, path)
if err != nil { if err != nil {
debug.Log("s3.Stat", "GetObject() err %v", err) debug.Log("s3.Stat", "GetObject() err %v", err)
return backend.BlobInfo{}, errors.Wrap(err, "client.GetObject") return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
} }
// make sure that the object is closed properly. // make sure that the object is closed properly.
@ -206,14 +206,14 @@ func (be s3) Stat(h backend.Handle) (bi backend.BlobInfo, err error) {
fi, err := obj.Stat() fi, err := obj.Stat()
if err != nil { if err != nil {
debug.Log("s3.Stat", "Stat() err %v", err) debug.Log("s3.Stat", "Stat() err %v", err)
return backend.BlobInfo{}, errors.Wrap(err, "Stat") return restic.FileInfo{}, errors.Wrap(err, "Stat")
} }
return backend.BlobInfo{Size: fi.Size}, nil return restic.FileInfo{Size: fi.Size}, nil
} }
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
func (be *s3) Test(t backend.Type, name string) (bool, error) { func (be *s3) Test(t restic.FileType, name string) (bool, error) {
found := false found := false
path := be.s3path(t, name) path := be.s3path(t, name)
_, err := be.client.StatObject(be.bucketname, path) _, err := be.client.StatObject(be.bucketname, path)
@ -226,7 +226,7 @@ func (be *s3) Test(t backend.Type, name string) (bool, error) {
} }
// Remove removes the blob with the given name and type. // Remove removes the blob with the given name and type.
func (be *s3) Remove(t backend.Type, name string) error { func (be *s3) Remove(t restic.FileType, name string) error {
path := be.s3path(t, name) path := be.s3path(t, name)
err := be.client.RemoveObject(be.bucketname, path) err := be.client.RemoveObject(be.bucketname, path)
debug.Log("s3.Remove", "%v %v -> err %v", t, name, err) debug.Log("s3.Remove", "%v %v -> err %v", t, name, err)
@ -236,7 +236,7 @@ func (be *s3) Remove(t backend.Type, name string) error {
// List returns a channel that yields all names of blobs of type t. A // List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending // goroutine is started for this. If the channel done is closed, sending
// stops. // stops.
func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string { func (be *s3) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("s3.List", "listing %v", t) debug.Log("s3.List", "listing %v", t)
ch := make(chan string) ch := make(chan string)
@ -264,11 +264,11 @@ func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string {
} }
// Remove keys for a specified backend type. // Remove keys for a specified backend type.
func (be *s3) removeKeys(t backend.Type) error { func (be *s3) removeKeys(t restic.FileType) error {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
for key := range be.List(backend.Data, done) { for key := range be.List(restic.DataFile, done) {
err := be.Remove(backend.Data, key) err := be.Remove(restic.DataFile, key)
if err != nil { if err != nil {
return err return err
} }
@ -279,12 +279,12 @@ func (be *s3) removeKeys(t backend.Type) error {
// Delete removes all restic keys in the bucket. It will not remove the bucket itself. // Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *s3) Delete() error { func (be *s3) Delete() error {
alltypes := []backend.Type{ alltypes := []restic.FileType{
backend.Data, restic.DataFile,
backend.Key, restic.KeyFile,
backend.Lock, restic.LockFile,
backend.Snapshot, restic.SnapshotFile,
backend.Index} restic.IndexFile}
for _, t := range alltypes { for _, t := range alltypes {
err := be.removeKeys(t) err := be.removeKeys(t)
@ -293,7 +293,7 @@ func (be *s3) Delete() error {
} }
} }
return be.Remove(backend.Config, "") return be.Remove(restic.ConfigFile, "")
} }
// Close does nothing // Close does nothing

View file

@ -4,10 +4,10 @@ import (
"fmt" "fmt"
"net/url" "net/url"
"os" "os"
"restic"
"github.com/pkg/errors" "restic/errors"
"restic/backend"
"restic/backend/s3" "restic/backend/s3"
"restic/backend/test" "restic/backend/test"
. "restic/test" . "restic/test"
@ -38,13 +38,13 @@ func init() {
cfg.UseHTTP = true cfg.UseHTTP = true
} }
test.CreateFn = func() (backend.Backend, error) { test.CreateFn = func() (restic.Backend, error) {
be, err := s3.Open(cfg) be, err := s3.Open(cfg)
if err != nil { if err != nil {
return nil, err return nil, err
} }
exists, err := be.Test(backend.Config, "") exists, err := be.Test(restic.ConfigFile, "")
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -56,7 +56,7 @@ func init() {
return be, nil return be, nil
} }
test.OpenFn = func() (backend.Backend, error) { test.OpenFn = func() (restic.Backend, error) {
return s3.Open(cfg) return s3.Open(cfg)
} }

View file

@ -5,7 +5,7 @@ import (
"path" "path"
"strings" "strings"
"github.com/pkg/errors" "restic/errors"
) )
// Config collects all information required to connect to an sftp server. // Config collects all information required to connect to an sftp server.

View file

@ -9,10 +9,11 @@ import (
"os" "os"
"os/exec" "os/exec"
"path" "path"
"restic"
"strings" "strings"
"time" "time"
"github.com/pkg/errors" "restic/errors"
"restic/backend" "restic/backend"
"restic/debug" "restic/debug"
@ -33,6 +34,8 @@ type SFTP struct {
result <-chan error result <-chan error
} }
var _ restic.Backend = &SFTP{}
func startClient(program string, args ...string) (*SFTP, error) { func startClient(program string, args ...string) (*SFTP, error) {
// Connect to a remote host and request the sftp subsystem via the 'ssh' // Connect to a remote host and request the sftp subsystem via the 'ssh'
// command. This assumes that passwordless login is correctly configured. // command. This assumes that passwordless login is correctly configured.
@ -256,11 +259,11 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
} }
// Rename temp file to final name according to type and name. // Rename temp file to final name according to type and name.
func (r *SFTP) renameFile(oldname string, t backend.Type, name string) error { func (r *SFTP) renameFile(oldname string, t restic.FileType, name string) error {
filename := r.filename(t, name) filename := r.filename(t, name)
// create directories if necessary // create directories if necessary
if t == backend.Data { if t == restic.DataFile {
err := r.mkdirAll(path.Dir(filename), backend.Modes.Dir) err := r.mkdirAll(path.Dir(filename), backend.Modes.Dir)
if err != nil { if err != nil {
return err return err
@ -293,9 +296,9 @@ func Join(parts ...string) string {
return path.Clean(path.Join(parts...)) return path.Clean(path.Join(parts...))
} }
// Construct path for given backend.Type and name. // Construct path for given restic.Type and name.
func (r *SFTP) filename(t backend.Type, name string) string { func (r *SFTP) filename(t restic.FileType, name string) string {
if t == backend.Config { if t == restic.ConfigFile {
return Join(r.p, "config") return Join(r.p, "config")
} }
@ -303,21 +306,21 @@ func (r *SFTP) filename(t backend.Type, name string) string {
} }
// Construct directory for given backend.Type. // Construct directory for given backend.Type.
func (r *SFTP) dirname(t backend.Type, name string) string { func (r *SFTP) dirname(t restic.FileType, name string) string {
var n string var n string
switch t { switch t {
case backend.Data: case restic.DataFile:
n = backend.Paths.Data n = backend.Paths.Data
if len(name) > 2 { if len(name) > 2 {
n = Join(n, name[:2]) n = Join(n, name[:2])
} }
case backend.Snapshot: case restic.SnapshotFile:
n = backend.Paths.Snapshots n = backend.Paths.Snapshots
case backend.Index: case restic.IndexFile:
n = backend.Paths.Index n = backend.Paths.Index
case backend.Lock: case restic.LockFile:
n = backend.Paths.Locks n = backend.Paths.Locks
case backend.Key: case restic.KeyFile:
n = backend.Paths.Keys n = backend.Paths.Keys
} }
return Join(r.p, n) return Join(r.p, n)
@ -325,7 +328,7 @@ func (r *SFTP) dirname(t backend.Type, name string) string {
// Load returns the data stored in the backend for h at the given offset // Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt. // and saves it in p. Load has the same semantics as io.ReaderAt.
func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) { func (r *SFTP) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
debug.Log("sftp.Load", "load %v, %d bytes, offset %v", h, len(p), off) debug.Log("sftp.Load", "load %v, %d bytes, offset %v", h, len(p), off)
if err := r.clientError(); err != nil { if err := r.clientError(); err != nil {
return 0, err return 0, err
@ -362,7 +365,7 @@ func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
} }
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
func (r *SFTP) Save(h backend.Handle, p []byte) (err error) { func (r *SFTP) Save(h restic.Handle, p []byte) (err error) {
debug.Log("sftp.Save", "save %v bytes to %v", h, len(p)) debug.Log("sftp.Save", "save %v bytes to %v", h, len(p))
if err := r.clientError(); err != nil { if err := r.clientError(); err != nil {
return err return err
@ -400,26 +403,26 @@ func (r *SFTP) Save(h backend.Handle, p []byte) (err error) {
} }
// Stat returns information about a blob. // Stat returns information about a blob.
func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) { func (r *SFTP) Stat(h restic.Handle) (restic.FileInfo, error) {
debug.Log("sftp.Stat", "stat %v", h) debug.Log("sftp.Stat", "stat %v", h)
if err := r.clientError(); err != nil { if err := r.clientError(); err != nil {
return backend.BlobInfo{}, err return restic.FileInfo{}, err
} }
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err return restic.FileInfo{}, err
} }
fi, err := r.c.Lstat(r.filename(h.Type, h.Name)) fi, err := r.c.Lstat(r.filename(h.Type, h.Name))
if err != nil { if err != nil {
return backend.BlobInfo{}, errors.Wrap(err, "Lstat") return restic.FileInfo{}, errors.Wrap(err, "Lstat")
} }
return backend.BlobInfo{Size: fi.Size()}, nil return restic.FileInfo{Size: fi.Size()}, nil
} }
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
func (r *SFTP) Test(t backend.Type, name string) (bool, error) { func (r *SFTP) Test(t restic.FileType, name string) (bool, error) {
debug.Log("sftp.Test", "type %v, name %v", t, name) debug.Log("sftp.Test", "type %v, name %v", t, name)
if err := r.clientError(); err != nil { if err := r.clientError(); err != nil {
return false, err return false, err
@ -438,7 +441,7 @@ func (r *SFTP) Test(t backend.Type, name string) (bool, error) {
} }
// Remove removes the content stored at name. // Remove removes the content stored at name.
func (r *SFTP) Remove(t backend.Type, name string) error { func (r *SFTP) Remove(t restic.FileType, name string) error {
debug.Log("sftp.Remove", "type %v, name %v", t, name) debug.Log("sftp.Remove", "type %v, name %v", t, name)
if err := r.clientError(); err != nil { if err := r.clientError(); err != nil {
return err return err
@ -450,14 +453,14 @@ func (r *SFTP) Remove(t backend.Type, name string) error {
// List returns a channel that yields all names of blobs of type t. A // List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending // goroutine is started for this. If the channel done is closed, sending
// stops. // stops.
func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string { func (r *SFTP) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("sftp.List", "list all %v", t) debug.Log("sftp.List", "list all %v", t)
ch := make(chan string) ch := make(chan string)
go func() { go func() {
defer close(ch) defer close(ch)
if t == backend.Data { if t == restic.DataFile {
// read first level // read first level
basedir := r.dirname(t, "") basedir := r.dirname(t, "")

View file

@ -4,11 +4,11 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"restic"
"strings" "strings"
"github.com/pkg/errors" "restic/errors"
"restic/backend"
"restic/backend/sftp" "restic/backend/sftp"
"restic/backend/test" "restic/backend/test"
@ -52,7 +52,7 @@ func init() {
args := []string{"-e"} args := []string{"-e"}
test.CreateFn = func() (backend.Backend, error) { test.CreateFn = func() (restic.Backend, error) {
err := createTempdir() err := createTempdir()
if err != nil { if err != nil {
return nil, err return nil, err
@ -61,7 +61,7 @@ func init() {
return sftp.Create(tempBackendDir, sftpserver, args...) return sftp.Create(tempBackendDir, sftpserver, args...)
} }
test.OpenFn = func() (backend.Backend, error) { test.OpenFn = func() (restic.Backend, error) {
err := createTempdir() err := createTempdir()
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -7,28 +7,29 @@ import (
"io/ioutil" "io/ioutil"
"math/rand" "math/rand"
"reflect" "reflect"
"restic"
"sort" "sort"
"testing" "testing"
"github.com/pkg/errors" "restic/errors"
"restic/test"
"restic/backend" "restic/backend"
. "restic/test"
) )
// CreateFn is a function that creates a temporary repository for the tests. // CreateFn is a function that creates a temporary repository for the tests.
var CreateFn func() (backend.Backend, error) var CreateFn func() (restic.Backend, error)
// OpenFn is a function that opens a previously created temporary repository. // OpenFn is a function that opens a previously created temporary repository.
var OpenFn func() (backend.Backend, error) var OpenFn func() (restic.Backend, error)
// CleanupFn removes temporary files and directories created during the tests. // CleanupFn removes temporary files and directories created during the tests.
var CleanupFn func() error var CleanupFn func() error
var but backend.Backend // backendUnderTest var but restic.Backend // backendUnderTest
var butInitialized bool var butInitialized bool
func open(t testing.TB) backend.Backend { func open(t testing.TB) restic.Backend {
if OpenFn == nil { if OpenFn == nil {
t.Fatal("OpenFn not set") t.Fatal("OpenFn not set")
} }
@ -118,7 +119,7 @@ func TestCreateWithConfig(t testing.TB) {
defer close(t) defer close(t)
// save a config // save a config
store(t, b, backend.Config, []byte("test config")) store(t, b, restic.ConfigFile, []byte("test config"))
// now create the backend again, this must fail // now create the backend again, this must fail
_, err := CreateFn() _, err := CreateFn()
@ -127,7 +128,7 @@ func TestCreateWithConfig(t testing.TB) {
} }
// remove config // remove config
err = b.Remove(backend.Config, "") err = b.Remove(restic.ConfigFile, "")
if err != nil { if err != nil {
t.Fatalf("unexpected error removing config: %v", err) t.Fatalf("unexpected error removing config: %v", err)
} }
@ -152,12 +153,12 @@ func TestConfig(t testing.TB) {
var testString = "Config" var testString = "Config"
// create config and read it back // create config and read it back
_, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil) _, err := backend.LoadAll(b, restic.Handle{Type: restic.ConfigFile}, nil)
if err == nil { if err == nil {
t.Fatalf("did not get expected error for non-existing config") t.Fatalf("did not get expected error for non-existing config")
} }
err = b.Save(backend.Handle{Type: backend.Config}, []byte(testString)) err = b.Save(restic.Handle{Type: restic.ConfigFile}, []byte(testString))
if err != nil { if err != nil {
t.Fatalf("Save() error: %v", err) t.Fatalf("Save() error: %v", err)
} }
@ -165,7 +166,7 @@ func TestConfig(t testing.TB) {
// try accessing the config with different names, should all return the // try accessing the config with different names, should all return the
// same config // same config
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} { for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
h := backend.Handle{Type: backend.Config, Name: name} h := restic.Handle{Type: restic.ConfigFile, Name: name}
buf, err := backend.LoadAll(b, h, nil) buf, err := backend.LoadAll(b, h, nil)
if err != nil { if err != nil {
t.Fatalf("unable to read config with name %q: %v", name, err) t.Fatalf("unable to read config with name %q: %v", name, err)
@ -182,22 +183,22 @@ func TestLoad(t testing.TB) {
b := open(t) b := open(t)
defer close(t) defer close(t)
_, err := b.Load(backend.Handle{}, nil, 0) _, err := b.Load(restic.Handle{}, nil, 0)
if err == nil { if err == nil {
t.Fatalf("Load() did not return an error for invalid handle") t.Fatalf("Load() did not return an error for invalid handle")
} }
_, err = b.Load(backend.Handle{Type: backend.Data, Name: "foobar"}, nil, 0) _, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, nil, 0)
if err == nil { if err == nil {
t.Fatalf("Load() did not return an error for non-existing blob") t.Fatalf("Load() did not return an error for non-existing blob")
} }
length := rand.Intn(1<<24) + 2000 length := rand.Intn(1<<24) + 2000
data := Random(23, length) data := test.Random(23, length)
id := backend.Hash(data) id := restic.Hash(data)
handle := backend.Handle{Type: backend.Data, Name: id.String()} handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
err = b.Save(handle, data) err = b.Save(handle, data)
if err != nil { if err != nil {
t.Fatalf("Save() error: %v", err) t.Fatalf("Save() error: %v", err)
@ -309,7 +310,7 @@ func TestLoad(t testing.TB) {
t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err) t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err)
} }
OK(t, b.Remove(backend.Data, id.String())) test.OK(t, b.Remove(restic.DataFile, id.String()))
} }
// TestLoadNegativeOffset tests the backend's Load function with negative offsets. // TestLoadNegativeOffset tests the backend's Load function with negative offsets.
@ -319,10 +320,10 @@ func TestLoadNegativeOffset(t testing.TB) {
length := rand.Intn(1<<24) + 2000 length := rand.Intn(1<<24) + 2000
data := Random(23, length) data := test.Random(23, length)
id := backend.Hash(data) id := restic.Hash(data)
handle := backend.Handle{Type: backend.Data, Name: id.String()} handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
err := b.Save(handle, data) err := b.Save(handle, data)
if err != nil { if err != nil {
t.Fatalf("Save() error: %v", err) t.Fatalf("Save() error: %v", err)
@ -365,30 +366,30 @@ func TestLoadNegativeOffset(t testing.TB) {
} }
OK(t, b.Remove(backend.Data, id.String())) test.OK(t, b.Remove(restic.DataFile, id.String()))
} }
// TestSave tests saving data in the backend. // TestSave tests saving data in the backend.
func TestSave(t testing.TB) { func TestSave(t testing.TB) {
b := open(t) b := open(t)
defer close(t) defer close(t)
var id backend.ID var id restic.ID
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
length := rand.Intn(1<<23) + 200000 length := rand.Intn(1<<23) + 200000
data := Random(23, length) data := test.Random(23, length)
// use the first 32 byte as the ID // use the first 32 byte as the ID
copy(id[:], data) copy(id[:], data)
h := backend.Handle{ h := restic.Handle{
Type: backend.Data, Type: restic.DataFile,
Name: fmt.Sprintf("%s-%d", id, i), Name: fmt.Sprintf("%s-%d", id, i),
} }
err := b.Save(h, data) err := b.Save(h, data)
OK(t, err) test.OK(t, err)
buf, err := backend.LoadAll(b, h, nil) buf, err := backend.LoadAll(b, h, nil)
OK(t, err) test.OK(t, err)
if len(buf) != len(data) { if len(buf) != len(data) {
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf)) t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
} }
@ -398,7 +399,7 @@ func TestSave(t testing.TB) {
} }
fi, err := b.Stat(h) fi, err := b.Stat(h)
OK(t, err) test.OK(t, err)
if fi.Size != int64(len(data)) { if fi.Size != int64(len(data)) {
t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size) t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size)
@ -429,7 +430,7 @@ func TestSaveFilenames(t testing.TB) {
defer close(t) defer close(t)
for i, test := range filenameTests { for i, test := range filenameTests {
h := backend.Handle{Name: test.name, Type: backend.Data} h := restic.Handle{Name: test.name, Type: restic.DataFile}
err := b.Save(h, []byte(test.data)) err := b.Save(h, []byte(test.data))
if err != nil { if err != nil {
t.Errorf("test %d failed: Save() returned %v", i, err) t.Errorf("test %d failed: Save() returned %v", i, err)
@ -464,17 +465,17 @@ var testStrings = []struct {
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"}, {"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
} }
func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) { func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) {
id := backend.Hash(data) id := restic.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: tpe}, data) err := b.Save(restic.Handle{Name: id.String(), Type: tpe}, data)
OK(t, err) test.OK(t, err)
} }
func read(t testing.TB, rd io.Reader, expectedData []byte) { func read(t testing.TB, rd io.Reader, expectedData []byte) {
buf, err := ioutil.ReadAll(rd) buf, err := ioutil.ReadAll(rd)
OK(t, err) test.OK(t, err)
if expectedData != nil { if expectedData != nil {
Equals(t, expectedData, buf) test.Equals(t, expectedData, buf)
} }
} }
@ -483,90 +484,90 @@ func TestBackend(t testing.TB) {
b := open(t) b := open(t)
defer close(t) defer close(t)
for _, tpe := range []backend.Type{ for _, tpe := range []restic.FileType{
backend.Data, backend.Key, backend.Lock, restic.DataFile, restic.KeyFile, restic.LockFile,
backend.Snapshot, backend.Index, restic.SnapshotFile, restic.IndexFile,
} { } {
// detect non-existing files // detect non-existing files
for _, test := range testStrings { for _, ts := range testStrings {
id, err := backend.ParseID(test.id) id, err := restic.ParseID(ts.id)
OK(t, err) test.OK(t, err)
// test if blob is already in repository // test if blob is already in repository
ret, err := b.Test(tpe, id.String()) ret, err := b.Test(tpe, id.String())
OK(t, err) test.OK(t, err)
Assert(t, !ret, "blob was found to exist before creating") test.Assert(t, !ret, "blob was found to exist before creating")
// try to stat a not existing blob // try to stat a not existing blob
h := backend.Handle{Type: tpe, Name: id.String()} h := restic.Handle{Type: tpe, Name: id.String()}
_, err = b.Stat(h) _, err = b.Stat(h)
Assert(t, err != nil, "blob data could be extracted before creation") test.Assert(t, err != nil, "blob data could be extracted before creation")
// try to read not existing blob // try to read not existing blob
_, err = b.Load(h, nil, 0) _, err = b.Load(h, nil, 0)
Assert(t, err != nil, "blob reader could be obtained before creation") test.Assert(t, err != nil, "blob reader could be obtained before creation")
// try to get string out, should fail // try to get string out, should fail
ret, err = b.Test(tpe, id.String()) ret, err = b.Test(tpe, id.String())
OK(t, err) test.OK(t, err)
Assert(t, !ret, "id %q was found (but should not have)", test.id) test.Assert(t, !ret, "id %q was found (but should not have)", ts.id)
} }
// add files // add files
for _, test := range testStrings { for _, ts := range testStrings {
store(t, b, tpe, []byte(test.data)) store(t, b, tpe, []byte(ts.data))
// test Load() // test Load()
h := backend.Handle{Type: tpe, Name: test.id} h := restic.Handle{Type: tpe, Name: ts.id}
buf, err := backend.LoadAll(b, h, nil) buf, err := backend.LoadAll(b, h, nil)
OK(t, err) test.OK(t, err)
Equals(t, test.data, string(buf)) test.Equals(t, ts.data, string(buf))
// try to read it out with an offset and a length // try to read it out with an offset and a length
start := 1 start := 1
end := len(test.data) - 2 end := len(ts.data) - 2
length := end - start length := end - start
buf2 := make([]byte, length) buf2 := make([]byte, length)
n, err := b.Load(h, buf2, int64(start)) n, err := b.Load(h, buf2, int64(start))
OK(t, err) test.OK(t, err)
Equals(t, length, n) test.Equals(t, length, n)
Equals(t, test.data[start:end], string(buf2)) test.Equals(t, ts.data[start:end], string(buf2))
} }
// test adding the first file again // test adding the first file again
test := testStrings[0] ts := testStrings[0]
// create blob // create blob
err := b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data)) err := b.Save(restic.Handle{Type: tpe, Name: ts.id}, []byte(ts.data))
Assert(t, err != nil, "expected error, got %v", err) test.Assert(t, err != nil, "expected error, got %v", err)
// remove and recreate // remove and recreate
err = b.Remove(tpe, test.id) err = b.Remove(tpe, ts.id)
OK(t, err) test.OK(t, err)
// test that the blob is gone // test that the blob is gone
ok, err := b.Test(tpe, test.id) ok, err := b.Test(tpe, ts.id)
OK(t, err) test.OK(t, err)
Assert(t, ok == false, "removed blob still present") test.Assert(t, ok == false, "removed blob still present")
// create blob // create blob
err = b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data)) err = b.Save(restic.Handle{Type: tpe, Name: ts.id}, []byte(ts.data))
OK(t, err) test.OK(t, err)
// list items // list items
IDs := backend.IDs{} IDs := restic.IDs{}
for _, test := range testStrings { for _, ts := range testStrings {
id, err := backend.ParseID(test.id) id, err := restic.ParseID(ts.id)
OK(t, err) test.OK(t, err)
IDs = append(IDs, id) IDs = append(IDs, id)
} }
list := backend.IDs{} list := restic.IDs{}
for s := range b.List(tpe, nil) { for s := range b.List(tpe, nil) {
list = append(list, ParseID(s)) list = append(list, restic.TestParseID(s))
} }
if len(IDs) != len(list) { if len(IDs) != len(list) {
@ -581,19 +582,19 @@ func TestBackend(t testing.TB) {
} }
// remove content if requested // remove content if requested
if TestCleanupTempDirs { if test.TestCleanupTempDirs {
for _, test := range testStrings { for _, ts := range testStrings {
id, err := backend.ParseID(test.id) id, err := restic.ParseID(ts.id)
OK(t, err) test.OK(t, err)
found, err := b.Test(tpe, id.String()) found, err := b.Test(tpe, id.String())
OK(t, err) test.OK(t, err)
OK(t, b.Remove(tpe, id.String())) test.OK(t, b.Remove(tpe, id.String()))
found, err = b.Test(tpe, id.String()) found, err = b.Test(tpe, id.String())
OK(t, err) test.OK(t, err)
Assert(t, !found, fmt.Sprintf("id %q not found after removal", id)) test.Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
} }
} }
} }
@ -604,7 +605,7 @@ func TestDelete(t testing.TB) {
b := open(t) b := open(t)
defer close(t) defer close(t)
be, ok := b.(backend.Deleter) be, ok := b.(restic.Deleter)
if !ok { if !ok {
return return
} }
@ -622,7 +623,7 @@ func TestCleanup(t testing.TB) {
return return
} }
if !TestCleanupTempDirs { if !test.TestCleanupTempDirs {
t.Logf("not cleaning up backend") t.Logf("not cleaning up backend")
return return
} }

View file

@ -1,19 +1,20 @@
package test_test package test_test
import ( import (
"github.com/pkg/errors" "restic"
"restic/errors"
"restic/backend"
"restic/backend/mem" "restic/backend/mem"
"restic/backend/test" "restic/backend/test"
) )
var be backend.Backend var be restic.Backend
//go:generate go run ../test/generate_backend_tests.go //go:generate go run ../test/generate_backend_tests.go
func init() { func init() {
test.CreateFn = func() (backend.Backend, error) { test.CreateFn = func() (restic.Backend, error) {
if be != nil { if be != nil {
return nil, errors.New("temporary memory backend dir already exists") return nil, errors.New("temporary memory backend dir already exists")
} }
@ -23,7 +24,7 @@ func init() {
return be, nil return be, nil
} }
test.OpenFn = func() (backend.Backend, error) { test.OpenFn = func() (restic.Backend, error) {
if be == nil { if be == nil {
return nil, errors.New("repository not initialized") return nil, errors.New("repository not initialized")
} }

View file

@ -1,17 +0,0 @@
package backend
import (
"crypto/rand"
"io"
)
// RandomID retuns a randomly generated ID. This is mainly used for testing.
// When reading from rand fails, the function panics.
func RandomID() ID {
id := ID{}
_, err := io.ReadFull(rand.Reader, id[:])
if err != nil {
panic(err)
}
return id
}

View file

@ -2,15 +2,16 @@ package backend
import ( import (
"io" "io"
"restic"
"github.com/pkg/errors" "restic/errors"
) )
// LoadAll reads all data stored in the backend for the handle. The buffer buf // LoadAll reads all data stored in the backend for the handle. The buffer buf
// is resized to accomodate all data in the blob. Errors returned by be.Load() // is resized to accomodate all data in the blob. Errors returned by be.Load()
// are passed on, except io.ErrUnexpectedEOF is silenced and nil returned // are passed on, except io.ErrUnexpectedEOF is silenced and nil returned
// instead, since it means this function is working properly. // instead, since it means this function is working properly.
func LoadAll(be Backend, h Handle, buf []byte) ([]byte, error) { func LoadAll(be restic.Backend, h restic.Handle, buf []byte) ([]byte, error) {
fi, err := be.Stat(h) fi, err := be.Stat(h)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Stat") return nil, errors.Wrap(err, "Stat")

View file

@ -3,6 +3,7 @@ package backend_test
import ( import (
"bytes" "bytes"
"math/rand" "math/rand"
"restic"
"testing" "testing"
"restic/backend" "restic/backend"
@ -19,11 +20,11 @@ func TestLoadAll(t *testing.T) {
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
data := Random(23+i, rand.Intn(MiB)+500*KiB) data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := backend.Hash(data) id := restic.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
OK(t, err) OK(t, err)
buf, err := backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, nil) buf, err := backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, nil)
OK(t, err) OK(t, err)
if len(buf) != len(data) { if len(buf) != len(data) {
@ -44,12 +45,12 @@ func TestLoadSmallBuffer(t *testing.T) {
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
data := Random(23+i, rand.Intn(MiB)+500*KiB) data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := backend.Hash(data) id := restic.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
OK(t, err) OK(t, err)
buf := make([]byte, len(data)-23) buf := make([]byte, len(data)-23)
buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf) buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf)
OK(t, err) OK(t, err)
if len(buf) != len(data) { if len(buf) != len(data) {
@ -70,12 +71,12 @@ func TestLoadLargeBuffer(t *testing.T) {
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
data := Random(23+i, rand.Intn(MiB)+500*KiB) data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := backend.Hash(data) id := restic.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data)
OK(t, err) OK(t, err)
buf := make([]byte, len(data)+100) buf := make([]byte, len(data)+100)
buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf) buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf)
OK(t, err) OK(t, err)
if len(buf) != len(data) { if len(buf) != len(data) {

View file

@ -1,6 +1,6 @@
package backend package restic
import "github.com/pkg/errors" import "restic/errors"
// ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix // ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix
// could be found. // could be found.
@ -10,10 +10,10 @@ var ErrNoIDPrefixFound = errors.New("no ID found")
// prefix are found. // prefix are found.
var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found") var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found")
// Find loads the list of all blobs of type t and searches for names which // Find loads the list of all files of type t and searches for names which
// start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. // start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned.
// If more than one is found, nil and ErrMultipleIDMatches is returned. // If more than one is found, nil and ErrMultipleIDMatches is returned.
func Find(be Lister, t Type, prefix string) (string, error) { func Find(be Lister, t FileType, prefix string) (string, error) {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
@ -41,7 +41,7 @@ const minPrefixLength = 8
// PrefixLength returns the number of bytes required so that all prefixes of // PrefixLength returns the number of bytes required so that all prefixes of
// all names of type t are unique. // all names of type t are unique.
func PrefixLength(be Lister, t Type) (int, error) { func PrefixLength(be Lister, t FileType) (int, error) {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
@ -52,8 +52,9 @@ func PrefixLength(be Lister, t Type) (int, error) {
} }
// select prefixes of length l, test if the last one is the same as the current one // select prefixes of length l, test if the last one is the same as the current one
id := ID{}
outer: outer:
for l := minPrefixLength; l < IDSize; l++ { for l := minPrefixLength; l < len(id); l++ {
var last string var last string
for _, name := range list { for _, name := range list {
@ -66,5 +67,5 @@ outer:
return l, nil return l, nil
} }
return IDSize, nil return len(id), nil
} }

View file

@ -0,0 +1,70 @@
package restic
import (
"testing"
)
type mockBackend struct {
list func(FileType, <-chan struct{}) <-chan string
}
func (m mockBackend) List(t FileType, done <-chan struct{}) <-chan string {
return m.list(t, done)
}
var samples = IDs{
TestParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"),
TestParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"),
TestParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"),
TestParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"),
TestParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"),
TestParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"),
TestParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"),
TestParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"),
}
func TestPrefixLength(t *testing.T) {
list := samples
m := mockBackend{}
m.list = func(t FileType, done <-chan struct{}) <-chan string {
ch := make(chan string)
go func() {
defer close(ch)
for _, id := range list {
select {
case ch <- id.String():
case <-done:
return
}
}
}()
return ch
}
l, err := PrefixLength(m, SnapshotFile)
if err != nil {
t.Error(err)
}
if l != 19 {
t.Errorf("wrong prefix length returned, want %d, got %d", 19, l)
}
list = samples[:3]
l, err = PrefixLength(m, SnapshotFile)
if err != nil {
t.Error(err)
}
if l != 19 {
t.Errorf("wrong prefix length returned, want %d, got %d", 19, l)
}
list = samples[3:]
l, err = PrefixLength(m, SnapshotFile)
if err != nil {
t.Error(err)
}
if l != 8 {
t.Errorf("wrong prefix length returned, want %d, got %d", 8, l)
}
}

113
src/restic/blob.go Normal file
View file

@ -0,0 +1,113 @@
package restic
import (
"fmt"
"restic/errors"
)
// Blob is one part of a file or a tree.
type Blob struct {
Type BlobType
Length uint
ID ID
Offset uint
}
// PackedBlob is a blob stored within a file.
type PackedBlob struct {
Blob
PackID ID
}
// BlobHandle identifies a blob of a given type.
type BlobHandle struct {
ID ID
Type BlobType
}
func (h BlobHandle) String() string {
return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str())
}
// BlobType specifies what a blob stored in a pack is.
type BlobType uint8
// These are the blob types that can be stored in a pack.
const (
InvalidBlob BlobType = iota
DataBlob
TreeBlob
)
func (t BlobType) String() string {
switch t {
case DataBlob:
return "data"
case TreeBlob:
return "tree"
}
return fmt.Sprintf("<BlobType %d>", t)
}
// MarshalJSON encodes the BlobType into JSON.
func (t BlobType) MarshalJSON() ([]byte, error) {
switch t {
case DataBlob:
return []byte(`"data"`), nil
case TreeBlob:
return []byte(`"tree"`), nil
}
return nil, errors.New("unknown blob type")
}
// UnmarshalJSON decodes the BlobType from JSON.
func (t *BlobType) UnmarshalJSON(buf []byte) error {
switch string(buf) {
case `"data"`:
*t = DataBlob
case `"tree"`:
*t = TreeBlob
default:
return errors.New("unknown blob type")
}
return nil
}
// BlobHandles is an ordered list of BlobHandles that implements sort.Interface.
type BlobHandles []BlobHandle
func (h BlobHandles) Len() int {
return len(h)
}
func (h BlobHandles) Less(i, j int) bool {
for k, b := range h[i].ID {
if b == h[j].ID[k] {
continue
}
if b < h[j].ID[k] {
return true
}
return false
}
return h[i].Type < h[j].Type
}
func (h BlobHandles) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h BlobHandles) String() string {
elements := make([]string, 0, len(h))
for _, e := range h {
elements = append(elements, e.String())
}
return fmt.Sprintf("%v", elements)
}

View file

@ -1,12 +1,12 @@
package pack package restic
import "sort" import "sort"
// BlobSet is a set of blobs. // BlobSet is a set of blobs.
type BlobSet map[Handle]struct{} type BlobSet map[BlobHandle]struct{}
// NewBlobSet returns a new BlobSet, populated with ids. // NewBlobSet returns a new BlobSet, populated with ids.
func NewBlobSet(handles ...Handle) BlobSet { func NewBlobSet(handles ...BlobHandle) BlobSet {
m := make(BlobSet) m := make(BlobSet)
for _, h := range handles { for _, h := range handles {
m[h] = struct{}{} m[h] = struct{}{}
@ -16,18 +16,18 @@ func NewBlobSet(handles ...Handle) BlobSet {
} }
// Has returns true iff id is contained in the set. // Has returns true iff id is contained in the set.
func (s BlobSet) Has(h Handle) bool { func (s BlobSet) Has(h BlobHandle) bool {
_, ok := s[h] _, ok := s[h]
return ok return ok
} }
// Insert adds id to the set. // Insert adds id to the set.
func (s BlobSet) Insert(h Handle) { func (s BlobSet) Insert(h BlobHandle) {
s[h] = struct{}{} s[h] = struct{}{}
} }
// Delete removes id from the set. // Delete removes id from the set.
func (s BlobSet) Delete(h Handle) { func (s BlobSet) Delete(h BlobHandle) {
delete(s, h) delete(s, h)
} }
@ -87,9 +87,9 @@ func (s BlobSet) Sub(other BlobSet) (result BlobSet) {
return result return result
} }
// List returns a slice of all Handles in the set. // List returns a sorted slice of all BlobHandle in the set.
func (s BlobSet) List() Handles { func (s BlobSet) List() BlobHandles {
list := make(Handles, 0, len(s)) list := make(BlobHandles, 0, len(s))
for h := range s { for h := range s {
list = append(list, h) list = append(list, h)
} }

41
src/restic/blob_test.go Normal file
View file

@ -0,0 +1,41 @@
package restic
import (
"encoding/json"
"testing"
)
var blobTypeJSON = []struct {
t BlobType
res string
}{
{DataBlob, `"data"`},
{TreeBlob, `"tree"`},
}
func TestBlobTypeJSON(t *testing.T) {
for _, test := range blobTypeJSON {
// test serialize
buf, err := json.Marshal(test.t)
if err != nil {
t.Error(err)
continue
}
if test.res != string(buf) {
t.Errorf("want %q, got %q", test.res, string(buf))
continue
}
// test unserialize
var v BlobType
err = json.Unmarshal([]byte(test.res), &v)
if err != nil {
t.Error(err)
continue
}
if test.t != v {
t.Errorf("want %v, got %v", test.t, v)
continue
}
}
}

View file

@ -1,290 +0,0 @@
package restic
import (
"io"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/pkg/errors"
"restic/backend"
"restic/debug"
"restic/fs"
"restic/repository"
)
// Cache is used to locally cache items from a repository.
type Cache struct {
base string
}
// NewCache returns a new cache at cacheDir. If it is the empty string, the
// default cache location is chosen.
func NewCache(repo *repository.Repository, cacheDir string) (*Cache, error) {
var err error
if cacheDir == "" {
cacheDir, err = getCacheDir()
if err != nil {
return nil, err
}
}
basedir := filepath.Join(cacheDir, repo.Config.ID)
debug.Log("Cache.New", "opened cache at %v", basedir)
return &Cache{base: basedir}, nil
}
// Has checks if the local cache has the id.
func (c *Cache) Has(t backend.Type, subtype string, id backend.ID) (bool, error) {
filename, err := c.filename(t, subtype, id)
if err != nil {
return false, err
}
fd, err := fs.Open(filename)
defer fd.Close()
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
debug.Log("Cache.Has", "test for file %v: not cached", filename)
return false, nil
}
debug.Log("Cache.Has", "test for file %v: error %v", filename, err)
return false, errors.Wrap(err, "Open")
}
debug.Log("Cache.Has", "test for file %v: is cached", filename)
return true, nil
}
// Store returns an io.WriteCloser that is used to save new information to the
// cache. The returned io.WriteCloser must be closed by the caller after all
// data has been written.
func (c *Cache) Store(t backend.Type, subtype string, id backend.ID) (io.WriteCloser, error) {
filename, err := c.filename(t, subtype, id)
if err != nil {
return nil, err
}
dirname := filepath.Dir(filename)
err = fs.MkdirAll(dirname, 0700)
if err != nil {
return nil, errors.Wrap(err, "MkdirAll")
}
file, err := fs.Create(filename)
if err != nil {
debug.Log("Cache.Store", "error creating file %v: %v", filename, err)
return nil, errors.Wrap(err, "Create")
}
debug.Log("Cache.Store", "created file %v", filename)
return file, nil
}
// Load returns information from the cache. The returned io.ReadCloser must be
// closed by the caller.
func (c *Cache) Load(t backend.Type, subtype string, id backend.ID) (io.ReadCloser, error) {
filename, err := c.filename(t, subtype, id)
if err != nil {
return nil, err
}
return fs.Open(filename)
}
func (c *Cache) purge(t backend.Type, subtype string, id backend.ID) error {
filename, err := c.filename(t, subtype, id)
if err != nil {
return err
}
err = fs.Remove(filename)
debug.Log("Cache.purge", "Remove file %v: %v", filename, err)
if err != nil && os.IsNotExist(errors.Cause(err)) {
return nil
}
return errors.Wrap(err, "Remove")
}
// Clear removes information from the cache that isn't present in the repository any more.
func (c *Cache) Clear(repo *repository.Repository) error {
list, err := c.list(backend.Snapshot)
if err != nil {
return err
}
for _, entry := range list {
debug.Log("Cache.Clear", "found entry %v", entry)
if ok, err := repo.Backend().Test(backend.Snapshot, entry.ID.String()); !ok || err != nil {
debug.Log("Cache.Clear", "snapshot %v doesn't exist any more, removing %v", entry.ID, entry)
err = c.purge(backend.Snapshot, entry.Subtype, entry.ID)
if err != nil {
return err
}
}
}
return nil
}
type cacheEntry struct {
ID backend.ID
Subtype string
}
func (c cacheEntry) String() string {
if c.Subtype != "" {
return c.ID.Str() + "." + c.Subtype
}
return c.ID.Str()
}
func (c *Cache) list(t backend.Type) ([]cacheEntry, error) {
var dir string
switch t {
case backend.Snapshot:
dir = filepath.Join(c.base, "snapshots")
default:
return nil, errors.Errorf("cache not supported for type %v", t)
}
fd, err := fs.Open(dir)
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
return []cacheEntry{}, nil
}
return nil, errors.Wrap(err, "Open")
}
defer fd.Close()
fis, err := fd.Readdir(-1)
if err != nil {
return nil, errors.Wrap(err, "Readdir")
}
entries := make([]cacheEntry, 0, len(fis))
for _, fi := range fis {
parts := strings.SplitN(fi.Name(), ".", 2)
id, err := backend.ParseID(parts[0])
// ignore invalid cache entries for now
if err != nil {
debug.Log("Cache.List", "unable to parse name %v as id: %v", parts[0], err)
continue
}
e := cacheEntry{ID: id}
if len(parts) == 2 {
e.Subtype = parts[1]
}
entries = append(entries, e)
}
return entries, nil
}
func (c *Cache) filename(t backend.Type, subtype string, id backend.ID) (string, error) {
filename := id.String()
if subtype != "" {
filename += "." + subtype
}
switch t {
case backend.Snapshot:
return filepath.Join(c.base, "snapshots", filename), nil
}
return "", errors.Errorf("cache not supported for type %v", t)
}
func getCacheDir() (string, error) {
if dir := os.Getenv("RESTIC_CACHE"); dir != "" {
return dir, nil
}
if runtime.GOOS == "windows" {
return getWindowsCacheDir()
}
return getXDGCacheDir()
}
// getWindowsCacheDir will return %APPDATA%\restic or create
// a folder in the temporary folder called "restic".
func getWindowsCacheDir() (string, error) {
cachedir := os.Getenv("APPDATA")
if cachedir == "" {
cachedir = os.TempDir()
}
cachedir = filepath.Join(cachedir, "restic")
fi, err := fs.Stat(cachedir)
if os.IsNotExist(errors.Cause(err)) {
err = fs.MkdirAll(cachedir, 0700)
if err != nil {
return "", errors.Wrap(err, "MkdirAll")
}
return cachedir, nil
}
if err != nil {
return "", errors.Wrap(err, "Stat")
}
if !fi.IsDir() {
return "", errors.Errorf("cache dir %v is not a directory", cachedir)
}
return cachedir, nil
}
// getXDGCacheDir returns the cache directory according to XDG basedir spec, see
// http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
func getXDGCacheDir() (string, error) {
xdgcache := os.Getenv("XDG_CACHE_HOME")
home := os.Getenv("HOME")
if xdgcache == "" && home == "" {
return "", errors.New("unable to locate cache directory (XDG_CACHE_HOME and HOME unset)")
}
cachedir := ""
if xdgcache != "" {
cachedir = filepath.Join(xdgcache, "restic")
} else if home != "" {
cachedir = filepath.Join(home, ".cache", "restic")
}
fi, err := fs.Stat(cachedir)
if os.IsNotExist(errors.Cause(err)) {
err = fs.MkdirAll(cachedir, 0700)
if err != nil {
return "", errors.Wrap(err, "MkdirAll")
}
fi, err = fs.Stat(cachedir)
debug.Log("getCacheDir", "create cache dir %v", cachedir)
}
if err != nil {
return "", errors.Wrap(err, "Stat")
}
if !fi.IsDir() {
return "", errors.Errorf("cache dir %v is not a directory", cachedir)
}
return cachedir, nil
}

View file

@ -1,26 +0,0 @@
package restic_test
import (
"testing"
"restic"
. "restic/test"
)
func TestCache(t *testing.T) {
repo := SetupRepo()
defer TeardownRepo(repo)
_, err := restic.NewCache(repo, "")
OK(t, err)
arch := restic.NewArchiver(repo)
// archive some files, this should automatically cache all blobs from the snapshot
_, _, err = arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
if err != nil {
t.Fatal(err)
}
// TODO: test caching index
}

View file

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"sync" "sync"
"github.com/pkg/errors" "restic/errors"
"restic" "restic"
"restic/backend" "restic/backend"
@ -21,31 +21,31 @@ import (
// A Checker only tests for internal errors within the data structures of the // A Checker only tests for internal errors within the data structures of the
// repository (e.g. missing blobs), and needs a valid Repository to work on. // repository (e.g. missing blobs), and needs a valid Repository to work on.
type Checker struct { type Checker struct {
packs backend.IDSet packs restic.IDSet
blobs backend.IDSet blobs restic.IDSet
blobRefs struct { blobRefs struct {
sync.Mutex sync.Mutex
M map[backend.ID]uint M map[restic.ID]uint
} }
indexes map[backend.ID]*repository.Index indexes map[restic.ID]*repository.Index
orphanedPacks backend.IDs orphanedPacks restic.IDs
masterIndex *repository.MasterIndex masterIndex *repository.MasterIndex
repo *repository.Repository repo restic.Repository
} }
// New returns a new checker which runs on repo. // New returns a new checker which runs on repo.
func New(repo *repository.Repository) *Checker { func New(repo restic.Repository) *Checker {
c := &Checker{ c := &Checker{
packs: backend.NewIDSet(), packs: restic.NewIDSet(),
blobs: backend.NewIDSet(), blobs: restic.NewIDSet(),
masterIndex: repository.NewMasterIndex(), masterIndex: repository.NewMasterIndex(),
indexes: make(map[backend.ID]*repository.Index), indexes: make(map[restic.ID]*repository.Index),
repo: repo, repo: repo,
} }
c.blobRefs.M = make(map[backend.ID]uint) c.blobRefs.M = make(map[restic.ID]uint)
return c return c
} }
@ -54,8 +54,8 @@ const defaultParallelism = 40
// ErrDuplicatePacks is returned when a pack is found in more than one index. // ErrDuplicatePacks is returned when a pack is found in more than one index.
type ErrDuplicatePacks struct { type ErrDuplicatePacks struct {
PackID backend.ID PackID restic.ID
Indexes backend.IDSet Indexes restic.IDSet
} }
func (e ErrDuplicatePacks) Error() string { func (e ErrDuplicatePacks) Error() string {
@ -65,7 +65,7 @@ func (e ErrDuplicatePacks) Error() string {
// ErrOldIndexFormat is returned when an index with the old format is // ErrOldIndexFormat is returned when an index with the old format is
// found. // found.
type ErrOldIndexFormat struct { type ErrOldIndexFormat struct {
backend.ID restic.ID
} }
func (err ErrOldIndexFormat) Error() string { func (err ErrOldIndexFormat) Error() string {
@ -82,7 +82,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
indexCh := make(chan indexRes) indexCh := make(chan indexRes)
worker := func(id backend.ID, done <-chan struct{}) error { worker := func(id restic.ID, done <-chan struct{}) error {
debug.Log("LoadIndex", "worker got index %v", id) debug.Log("LoadIndex", "worker got index %v", id)
idx, err := repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeIndex) idx, err := repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeIndex)
if errors.Cause(err) == repository.ErrOldIndexFormat { if errors.Cause(err) == repository.ErrOldIndexFormat {
@ -108,7 +108,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
go func() { go func() {
defer close(indexCh) defer close(indexCh)
debug.Log("LoadIndex", "start loading indexes in parallel") debug.Log("LoadIndex", "start loading indexes in parallel")
perr = repository.FilesInParallel(c.repo.Backend(), backend.Index, defaultParallelism, perr = repository.FilesInParallel(c.repo.Backend(), restic.IndexFile, defaultParallelism,
repository.ParallelWorkFuncParseID(worker)) repository.ParallelWorkFuncParseID(worker))
debug.Log("LoadIndex", "loading indexes finished, error: %v", perr) debug.Log("LoadIndex", "loading indexes finished, error: %v", perr)
}() }()
@ -121,11 +121,11 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
return hints, errs return hints, errs
} }
packToIndex := make(map[backend.ID]backend.IDSet) packToIndex := make(map[restic.ID]restic.IDSet)
for res := range indexCh { for res := range indexCh {
debug.Log("LoadIndex", "process index %v", res.ID) debug.Log("LoadIndex", "process index %v", res.ID)
idxID, err := backend.ParseID(res.ID) idxID, err := restic.ParseID(res.ID)
if err != nil { if err != nil {
errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID)) errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID))
continue continue
@ -143,7 +143,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
cnt++ cnt++
if _, ok := packToIndex[blob.PackID]; !ok { if _, ok := packToIndex[blob.PackID]; !ok {
packToIndex[blob.PackID] = backend.NewIDSet() packToIndex[blob.PackID] = restic.NewIDSet()
} }
packToIndex[blob.PackID].Insert(idxID) packToIndex[blob.PackID].Insert(idxID)
} }
@ -171,7 +171,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
// PackError describes an error with a specific pack. // PackError describes an error with a specific pack.
type PackError struct { type PackError struct {
ID backend.ID ID restic.ID
Orphaned bool Orphaned bool
Err error Err error
} }
@ -180,14 +180,14 @@ func (e PackError) Error() string {
return "pack " + e.ID.String() + ": " + e.Err.Error() return "pack " + e.ID.String() + ": " + e.Err.Error()
} }
func packIDTester(repo *repository.Repository, inChan <-chan backend.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) { func packIDTester(repo restic.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
debug.Log("Checker.testPackID", "worker start") debug.Log("Checker.testPackID", "worker start")
defer debug.Log("Checker.testPackID", "worker done") defer debug.Log("Checker.testPackID", "worker done")
defer wg.Done() defer wg.Done()
for id := range inChan { for id := range inChan {
ok, err := repo.Backend().Test(backend.Data, id.String()) ok, err := repo.Backend().Test(restic.DataFile, id.String())
if err != nil { if err != nil {
err = PackError{ID: id, Err: err} err = PackError{ID: id, Err: err}
} else { } else {
@ -218,11 +218,11 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
defer close(errChan) defer close(errChan)
debug.Log("Checker.Packs", "checking for %d packs", len(c.packs)) debug.Log("Checker.Packs", "checking for %d packs", len(c.packs))
seenPacks := backend.NewIDSet() seenPacks := restic.NewIDSet()
var workerWG sync.WaitGroup var workerWG sync.WaitGroup
IDChan := make(chan backend.ID) IDChan := make(chan restic.ID)
for i := 0; i < defaultParallelism; i++ { for i := 0; i < defaultParallelism; i++ {
workerWG.Add(1) workerWG.Add(1)
go packIDTester(c.repo, IDChan, errChan, &workerWG, done) go packIDTester(c.repo, IDChan, errChan, &workerWG, done)
@ -238,7 +238,7 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
workerWG.Wait() workerWG.Wait()
debug.Log("Checker.Packs", "workers terminated") debug.Log("Checker.Packs", "workers terminated")
for id := range c.repo.List(backend.Data, done) { for id := range c.repo.List(restic.DataFile, done) {
debug.Log("Checker.Packs", "check data blob %v", id.Str()) debug.Log("Checker.Packs", "check data blob %v", id.Str())
if !seenPacks.Has(id) { if !seenPacks.Has(id) {
c.orphanedPacks = append(c.orphanedPacks, id) c.orphanedPacks = append(c.orphanedPacks, id)
@ -253,8 +253,8 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
// Error is an error that occurred while checking a repository. // Error is an error that occurred while checking a repository.
type Error struct { type Error struct {
TreeID backend.ID TreeID restic.ID
BlobID backend.ID BlobID restic.ID
Err error Err error
} }
@ -273,25 +273,25 @@ func (e Error) Error() string {
return e.Err.Error() return e.Err.Error()
} }
func loadTreeFromSnapshot(repo *repository.Repository, id backend.ID) (backend.ID, error) { func loadTreeFromSnapshot(repo restic.Repository, id restic.ID) (restic.ID, error) {
sn, err := restic.LoadSnapshot(repo, id) sn, err := restic.LoadSnapshot(repo, id)
if err != nil { if err != nil {
debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err) debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err)
return backend.ID{}, err return restic.ID{}, err
} }
if sn.Tree == nil { if sn.Tree == nil {
debug.Log("Checker.loadTreeFromSnapshot", "snapshot %v has no tree", id.Str()) debug.Log("Checker.loadTreeFromSnapshot", "snapshot %v has no tree", id.Str())
return backend.ID{}, errors.Errorf("snapshot %v has no tree", id) return restic.ID{}, errors.Errorf("snapshot %v has no tree", id)
} }
return *sn.Tree, nil return *sn.Tree, nil
} }
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs. // loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs.
func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) { func loadSnapshotTreeIDs(repo restic.Repository) (restic.IDs, []error) {
var trees struct { var trees struct {
IDs backend.IDs IDs restic.IDs
sync.Mutex sync.Mutex
} }
@ -301,7 +301,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
} }
snapshotWorker := func(strID string, done <-chan struct{}) error { snapshotWorker := func(strID string, done <-chan struct{}) error {
id, err := backend.ParseID(strID) id, err := restic.ParseID(strID)
if err != nil { if err != nil {
return err return err
} }
@ -324,7 +324,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
return nil return nil
} }
err := repository.FilesInParallel(repo.Backend(), backend.Snapshot, defaultParallelism, snapshotWorker) err := repository.FilesInParallel(repo.Backend(), restic.SnapshotFile, defaultParallelism, snapshotWorker)
if err != nil { if err != nil {
errs.errs = append(errs.errs, err) errs.errs = append(errs.errs, err)
} }
@ -334,7 +334,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
// TreeError collects several errors that occurred while processing a tree. // TreeError collects several errors that occurred while processing a tree.
type TreeError struct { type TreeError struct {
ID backend.ID ID restic.ID
Errors []error Errors []error
} }
@ -343,14 +343,14 @@ func (e TreeError) Error() string {
} }
type treeJob struct { type treeJob struct {
backend.ID restic.ID
error error
*restic.Tree *restic.Tree
} }
// loadTreeWorker loads trees from repo and sends them to out. // loadTreeWorker loads trees from repo and sends them to out.
func loadTreeWorker(repo *repository.Repository, func loadTreeWorker(repo restic.Repository,
in <-chan backend.ID, out chan<- treeJob, in <-chan restic.ID, out chan<- treeJob,
done <-chan struct{}, wg *sync.WaitGroup) { done <-chan struct{}, wg *sync.WaitGroup) {
defer func() { defer func() {
@ -376,7 +376,7 @@ func loadTreeWorker(repo *repository.Repository,
} }
debug.Log("checker.loadTreeWorker", "load tree %v", treeID.Str()) debug.Log("checker.loadTreeWorker", "load tree %v", treeID.Str())
tree, err := restic.LoadTree(repo, treeID) tree, err := repo.LoadTree(treeID)
debug.Log("checker.loadTreeWorker", "load tree %v (%v) returned err: %v", tree, treeID.Str(), err) debug.Log("checker.loadTreeWorker", "load tree %v (%v) returned err: %v", tree, treeID.Str(), err)
job = treeJob{ID: treeID, error: err, Tree: tree} job = treeJob{ID: treeID, error: err, Tree: tree}
outCh = out outCh = out
@ -454,7 +454,7 @@ func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-ch
} }
} }
func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) { func filterTrees(backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) {
defer func() { defer func() {
debug.Log("checker.filterTrees", "closing output channels") debug.Log("checker.filterTrees", "closing output channels")
close(loaderChan) close(loaderChan)
@ -466,7 +466,7 @@ func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan tr
outCh = out outCh = out
loadCh = loaderChan loadCh = loaderChan
job treeJob job treeJob
nextTreeID backend.ID nextTreeID restic.ID
outstandingLoadTreeJobs = 0 outstandingLoadTreeJobs = 0
) )
@ -559,7 +559,7 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) {
} }
} }
treeIDChan := make(chan backend.ID) treeIDChan := make(chan restic.ID)
treeJobChan1 := make(chan treeJob) treeJobChan1 := make(chan treeJob)
treeJobChan2 := make(chan treeJob) treeJobChan2 := make(chan treeJob)
@ -575,10 +575,10 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) {
wg.Wait() wg.Wait()
} }
func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) { func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
debug.Log("Checker.checkTree", "checking tree %v", id.Str()) debug.Log("Checker.checkTree", "checking tree %v", id.Str())
var blobs []backend.ID var blobs []restic.ID
for _, node := range tree.Nodes { for _, node := range tree.Nodes {
switch node.Type { switch node.Type {
@ -634,7 +634,7 @@ func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
} }
// UnusedBlobs returns all blobs that have never been referenced. // UnusedBlobs returns all blobs that have never been referenced.
func (c *Checker) UnusedBlobs() (blobs backend.IDs) { func (c *Checker) UnusedBlobs() (blobs restic.IDs) {
c.blobRefs.Lock() c.blobRefs.Lock()
defer c.blobRefs.Unlock() defer c.blobRefs.Unlock()
@ -650,7 +650,7 @@ func (c *Checker) UnusedBlobs() (blobs backend.IDs) {
} }
// OrphanedPacks returns a slice of unused packs (only available after Packs() was run). // OrphanedPacks returns a slice of unused packs (only available after Packs() was run).
func (c *Checker) OrphanedPacks() backend.IDs { func (c *Checker) OrphanedPacks() restic.IDs {
return c.orphanedPacks return c.orphanedPacks
} }
@ -660,15 +660,15 @@ func (c *Checker) CountPacks() uint64 {
} }
// checkPack reads a pack and checks the integrity of all blobs. // checkPack reads a pack and checks the integrity of all blobs.
func checkPack(r *repository.Repository, id backend.ID) error { func checkPack(r restic.Repository, id restic.ID) error {
debug.Log("Checker.checkPack", "checking pack %v", id.Str()) debug.Log("Checker.checkPack", "checking pack %v", id.Str())
h := backend.Handle{Type: backend.Data, Name: id.String()} h := restic.Handle{Type: restic.DataFile, Name: id.String()}
buf, err := backend.LoadAll(r.Backend(), h, nil) buf, err := backend.LoadAll(r.Backend(), h, nil)
if err != nil { if err != nil {
return err return err
} }
hash := backend.Hash(buf) hash := restic.Hash(buf)
if !hash.Equal(id) { if !hash.Equal(id) {
debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
@ -684,14 +684,15 @@ func checkPack(r *repository.Repository, id backend.ID) error {
debug.Log("Checker.checkPack", " check blob %d: %v", i, blob.ID.Str()) debug.Log("Checker.checkPack", " check blob %d: %v", i, blob.ID.Str())
plainBuf := make([]byte, blob.Length) plainBuf := make([]byte, blob.Length)
plainBuf, err = crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length]) n, err := crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length])
if err != nil { if err != nil {
debug.Log("Checker.checkPack", " error decrypting blob %v: %v", blob.ID.Str(), err) debug.Log("Checker.checkPack", " error decrypting blob %v: %v", blob.ID.Str(), err)
errs = append(errs, errors.Errorf("blob %v: %v", i, err)) errs = append(errs, errors.Errorf("blob %v: %v", i, err))
continue continue
} }
plainBuf = plainBuf[:n]
hash := backend.Hash(plainBuf) hash := restic.Hash(plainBuf)
if !hash.Equal(blob.ID) { if !hash.Equal(blob.ID) {
debug.Log("Checker.checkPack", " Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()) debug.Log("Checker.checkPack", " Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())) errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
@ -713,10 +714,10 @@ func (c *Checker) ReadData(p *restic.Progress, errChan chan<- error, done <-chan
p.Start() p.Start()
defer p.Done() defer p.Done()
worker := func(wg *sync.WaitGroup, in <-chan backend.ID) { worker := func(wg *sync.WaitGroup, in <-chan restic.ID) {
defer wg.Done() defer wg.Done()
for { for {
var id backend.ID var id restic.ID
var ok bool var ok bool
select { select {
@ -742,7 +743,7 @@ func (c *Checker) ReadData(p *restic.Progress, errChan chan<- error, done <-chan
} }
} }
ch := c.repo.List(backend.Data, done) ch := c.repo.List(restic.DataFile, done)
var wg sync.WaitGroup var wg sync.WaitGroup
for i := 0; i < defaultParallelism; i++ { for i := 0; i < defaultParallelism; i++ {

View file

@ -1,23 +1,22 @@
package checker_test package checker_test
import ( import (
"fmt"
"math/rand" "math/rand"
"path/filepath" "path/filepath"
"sort" "sort"
"testing" "testing"
"restic" "restic"
"restic/backend" "restic/archiver"
"restic/backend/mem" "restic/backend/mem"
"restic/checker" "restic/checker"
"restic/repository" "restic/repository"
. "restic/test" "restic/test"
) )
var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz") var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz")
func list(repo *repository.Repository, t backend.Type) (IDs []string) { func list(repo restic.Repository, t restic.FileType) (IDs []string) {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
@ -60,8 +59,10 @@ func checkData(chkr *checker.Checker) []error {
} }
func TestCheckRepo(t *testing.T) { func TestCheckRepo(t *testing.T) {
WithTestEnvironment(t, checkerTestData, func(repodir string) { repodir, cleanup := test.Env(t, checkerTestData)
repo := OpenLocalRepo(t, repodir) defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
chkr := checker.New(repo) chkr := checker.New(repo)
hints, errs := chkr.LoadIndex() hints, errs := chkr.LoadIndex()
@ -73,17 +74,18 @@ func TestCheckRepo(t *testing.T) {
t.Errorf("expected no hints, got %v: %v", len(hints), hints) t.Errorf("expected no hints, got %v: %v", len(hints), hints)
} }
OKs(t, checkPacks(chkr)) test.OKs(t, checkPacks(chkr))
OKs(t, checkStruct(chkr)) test.OKs(t, checkStruct(chkr))
})
} }
func TestMissingPack(t *testing.T) { func TestMissingPack(t *testing.T) {
WithTestEnvironment(t, checkerTestData, func(repodir string) { repodir, cleanup := test.Env(t, checkerTestData)
repo := OpenLocalRepo(t, repodir) defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6" packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6"
OK(t, repo.Backend().Remove(backend.Data, packID)) test.OK(t, repo.Backend().Remove(restic.DataFile, packID))
chkr := checker.New(repo) chkr := checker.New(repo)
hints, errs := chkr.LoadIndex() hints, errs := chkr.LoadIndex()
@ -97,25 +99,26 @@ func TestMissingPack(t *testing.T) {
errs = checkPacks(chkr) errs = checkPacks(chkr)
Assert(t, len(errs) == 1, test.Assert(t, len(errs) == 1,
"expected exactly one error, got %v", len(errs)) "expected exactly one error, got %v", len(errs))
if err, ok := errs[0].(checker.PackError); ok { if err, ok := errs[0].(checker.PackError); ok {
Equals(t, packID, err.ID.String()) test.Equals(t, packID, err.ID.String())
} else { } else {
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
} }
})
} }
func TestUnreferencedPack(t *testing.T) { func TestUnreferencedPack(t *testing.T) {
WithTestEnvironment(t, checkerTestData, func(repodir string) { repodir, cleanup := test.Env(t, checkerTestData)
repo := OpenLocalRepo(t, repodir) defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
// index 3f1a only references pack 60e0 // index 3f1a only references pack 60e0
indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44" indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44"
packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"
OK(t, repo.Backend().Remove(backend.Index, indexID)) test.OK(t, repo.Backend().Remove(restic.IndexFile, indexID))
chkr := checker.New(repo) chkr := checker.New(repo)
hints, errs := chkr.LoadIndex() hints, errs := chkr.LoadIndex()
@ -129,31 +132,32 @@ func TestUnreferencedPack(t *testing.T) {
errs = checkPacks(chkr) errs = checkPacks(chkr)
Assert(t, len(errs) == 1, test.Assert(t, len(errs) == 1,
"expected exactly one error, got %v", len(errs)) "expected exactly one error, got %v", len(errs))
if err, ok := errs[0].(checker.PackError); ok { if err, ok := errs[0].(checker.PackError); ok {
Equals(t, packID, err.ID.String()) test.Equals(t, packID, err.ID.String())
} else { } else {
t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err)
} }
})
} }
func TestUnreferencedBlobs(t *testing.T) { func TestUnreferencedBlobs(t *testing.T) {
WithTestEnvironment(t, checkerTestData, func(repodir string) { repodir, cleanup := test.Env(t, checkerTestData)
repo := OpenLocalRepo(t, repodir) defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02" snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"
OK(t, repo.Backend().Remove(backend.Snapshot, snID)) test.OK(t, repo.Backend().Remove(restic.SnapshotFile, snID))
unusedBlobsBySnapshot := backend.IDs{ unusedBlobsBySnapshot := restic.IDs{
ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"), restic.TestParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"),
ParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"), restic.TestParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"),
ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"), restic.TestParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"),
ParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"), restic.TestParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"),
ParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"), restic.TestParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"),
ParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"), restic.TestParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"),
} }
sort.Sort(unusedBlobsBySnapshot) sort.Sort(unusedBlobsBySnapshot)
@ -168,21 +172,22 @@ func TestUnreferencedBlobs(t *testing.T) {
t.Errorf("expected no hints, got %v: %v", len(hints), hints) t.Errorf("expected no hints, got %v: %v", len(hints), hints)
} }
OKs(t, checkPacks(chkr)) test.OKs(t, checkPacks(chkr))
OKs(t, checkStruct(chkr)) test.OKs(t, checkStruct(chkr))
blobs := chkr.UnusedBlobs() blobs := chkr.UnusedBlobs()
sort.Sort(blobs) sort.Sort(blobs)
Equals(t, unusedBlobsBySnapshot, blobs) test.Equals(t, unusedBlobsBySnapshot, blobs)
})
} }
var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz") var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz")
func TestDuplicatePacksInIndex(t *testing.T) { func TestDuplicatePacksInIndex(t *testing.T) {
WithTestEnvironment(t, checkerDuplicateIndexTestData, func(repodir string) { repodir, cleanup := test.Env(t, checkerDuplicateIndexTestData)
repo := OpenLocalRepo(t, repodir) defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
chkr := checker.New(repo) chkr := checker.New(repo)
hints, errs := chkr.LoadIndex() hints, errs := chkr.LoadIndex()
@ -206,18 +211,15 @@ func TestDuplicatePacksInIndex(t *testing.T) {
if len(errs) > 0 { if len(errs) > 0 {
t.Errorf("expected no errors, got %v: %v", len(errs), errs) t.Errorf("expected no errors, got %v: %v", len(errs), errs)
} }
})
} }
// errorBackend randomly modifies data after reading. // errorBackend randomly modifies data after reading.
type errorBackend struct { type errorBackend struct {
backend.Backend restic.Backend
ProduceErrors bool ProduceErrors bool
} }
func (b errorBackend) Load(h backend.Handle, p []byte, off int64) (int, error) { func (b errorBackend) Load(h restic.Handle, p []byte, off int64) (int, error) {
fmt.Printf("load %v\n", h)
n, err := b.Backend.Load(h, p, off) n, err := b.Backend.Load(h, p, off)
if b.ProduceErrors { if b.ProduceErrors {
@ -242,16 +244,16 @@ func TestCheckerModifiedData(t *testing.T) {
repository.TestUseLowSecurityKDFParameters(t) repository.TestUseLowSecurityKDFParameters(t)
repo := repository.New(be) repo := repository.New(be)
OK(t, repo.Init(TestPassword)) test.OK(t, repo.Init(test.TestPassword))
arch := restic.NewArchiver(repo) arch := archiver.New(repo)
_, id, err := arch.Snapshot(nil, []string{"."}, nil) _, id, err := arch.Snapshot(nil, []string{"."}, nil)
OK(t, err) test.OK(t, err)
t.Logf("archived as %v", id.Str()) t.Logf("archived as %v", id.Str())
beError := &errorBackend{Backend: be} beError := &errorBackend{Backend: be}
checkRepo := repository.New(beError) checkRepo := repository.New(beError)
OK(t, checkRepo.SearchKey(TestPassword, 5)) test.OK(t, checkRepo.SearchKey(test.TestPassword, 5))
chkr := checker.New(checkRepo) chkr := checker.New(checkRepo)

View file

@ -1,12 +1,12 @@
package checker package checker
import ( import (
"restic/repository" "restic"
"testing" "testing"
) )
// TestCheckRepo runs the checker on repo. // TestCheckRepo runs the checker on repo.
func TestCheckRepo(t testing.TB, repo *repository.Repository) { func TestCheckRepo(t testing.TB, repo restic.Repository) {
chkr := New(repo) chkr := New(repo)
hints, errs := chkr.LoadIndex() hints, errs := chkr.LoadIndex()

View file

@ -1,15 +1,10 @@
package repository package restic
import ( import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"io"
"testing" "testing"
"github.com/pkg/errors" "restic/errors"
"restic/backend"
"restic/debug" "restic/debug"
"github.com/restic/chunker" "github.com/restic/chunker"
@ -22,21 +17,18 @@ type Config struct {
ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"` ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"`
} }
// repositoryIDSize is the length of the ID chosen at random for a new repository.
const repositoryIDSize = sha256.Size
// RepoVersion is the version that is written to the config when a repository // RepoVersion is the version that is written to the config when a repository
// is newly created with Init(). // is newly created with Init().
const RepoVersion = 1 const RepoVersion = 1
// JSONUnpackedSaver saves unpacked JSON. // JSONUnpackedSaver saves unpacked JSON.
type JSONUnpackedSaver interface { type JSONUnpackedSaver interface {
SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error) SaveJSONUnpacked(FileType, interface{}) (ID, error)
} }
// JSONUnpackedLoader loads unpacked JSON. // JSONUnpackedLoader loads unpacked JSON.
type JSONUnpackedLoader interface { type JSONUnpackedLoader interface {
LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error LoadJSONUnpacked(FileType, ID, interface{}) error
} }
// CreateConfig creates a config file with a randomly selected polynomial and // CreateConfig creates a config file with a randomly selected polynomial and
@ -52,13 +44,7 @@ func CreateConfig() (Config, error) {
return Config{}, errors.Wrap(err, "chunker.RandomPolynomial") return Config{}, errors.Wrap(err, "chunker.RandomPolynomial")
} }
newID := make([]byte, repositoryIDSize) cfg.ID = NewRandomID().String()
_, err = io.ReadFull(rand.Reader, newID)
if err != nil {
return Config{}, errors.Wrap(err, "io.ReadFull")
}
cfg.ID = hex.EncodeToString(newID)
cfg.Version = RepoVersion cfg.Version = RepoVersion
debug.Log("Repo.CreateConfig", "New config: %#v", cfg) debug.Log("Repo.CreateConfig", "New config: %#v", cfg)
@ -69,13 +55,7 @@ func CreateConfig() (Config, error) {
func TestCreateConfig(t testing.TB, pol chunker.Pol) (cfg Config) { func TestCreateConfig(t testing.TB, pol chunker.Pol) (cfg Config) {
cfg.ChunkerPolynomial = pol cfg.ChunkerPolynomial = pol
newID := make([]byte, repositoryIDSize) cfg.ID = NewRandomID().String()
_, err := io.ReadFull(rand.Reader, newID)
if err != nil {
t.Fatalf("unable to create random ID: %v", err)
}
cfg.ID = hex.EncodeToString(newID)
cfg.Version = RepoVersion cfg.Version = RepoVersion
return cfg return cfg
@ -87,7 +67,7 @@ func LoadConfig(r JSONUnpackedLoader) (Config, error) {
cfg Config cfg Config
) )
err := r.LoadJSONUnpacked(backend.Config, backend.ID{}, &cfg) err := r.LoadJSONUnpacked(ConfigFile, ID{}, &cfg)
if err != nil { if err != nil {
return Config{}, err return Config{}, err
} }

54
src/restic/config_test.go Normal file
View file

@ -0,0 +1,54 @@
package restic_test
import (
"restic"
"testing"
. "restic/test"
)
type saver func(restic.FileType, interface{}) (restic.ID, error)
func (s saver) SaveJSONUnpacked(t restic.FileType, arg interface{}) (restic.ID, error) {
return s(t, arg)
}
type loader func(restic.FileType, restic.ID, interface{}) error
func (l loader) LoadJSONUnpacked(t restic.FileType, id restic.ID, arg interface{}) error {
return l(t, id, arg)
}
func TestConfig(t *testing.T) {
resultConfig := restic.Config{}
save := func(tpe restic.FileType, arg interface{}) (restic.ID, error) {
Assert(t, tpe == restic.ConfigFile,
"wrong backend type: got %v, wanted %v",
tpe, restic.ConfigFile)
cfg := arg.(restic.Config)
resultConfig = cfg
return restic.ID{}, nil
}
cfg1, err := restic.CreateConfig()
OK(t, err)
_, err = saver(save).SaveJSONUnpacked(restic.ConfigFile, cfg1)
load := func(tpe restic.FileType, id restic.ID, arg interface{}) error {
Assert(t, tpe == restic.ConfigFile,
"wrong backend type: got %v, wanted %v",
tpe, restic.ConfigFile)
cfg := arg.(*restic.Config)
*cfg = resultConfig
return nil
}
cfg2, err := restic.LoadConfig(loader(load))
OK(t, err)
Assert(t, cfg1 == cfg2,
"configs aren't equal: %v != %v", cfg1, cfg2)
}

View file

@ -7,7 +7,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/pkg/errors" "restic/errors"
"golang.org/x/crypto/poly1305" "golang.org/x/crypto/poly1305"
) )
@ -274,9 +274,9 @@ func Encrypt(ks *Key, ciphertext []byte, plaintext []byte) ([]byte, error) {
// Decrypt verifies and decrypts the ciphertext. Ciphertext must be in the form // Decrypt verifies and decrypts the ciphertext. Ciphertext must be in the form
// IV || Ciphertext || MAC. plaintext and ciphertext may point to (exactly) the // IV || Ciphertext || MAC. plaintext and ciphertext may point to (exactly) the
// same slice. // same slice.
func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) ([]byte, error) { func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) (int, error) {
if !ks.Valid() { if !ks.Valid() {
return nil, errors.New("invalid key") return 0, errors.New("invalid key")
} }
// check for plausible length // check for plausible length
@ -284,21 +284,26 @@ func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) ([]byte, error
panic("trying to decrypt invalid data: ciphertext too small") panic("trying to decrypt invalid data: ciphertext too small")
} }
// check buffer length for plaintext
plaintextLength := len(ciphertextWithMac) - ivSize - macSize
if len(plaintext) < plaintextLength {
return 0, errors.Errorf("plaintext buffer too small, %d < %d", len(plaintext), plaintextLength)
}
// extract mac // extract mac
l := len(ciphertextWithMac) - macSize l := len(ciphertextWithMac) - macSize
ciphertextWithIV, mac := ciphertextWithMac[:l], ciphertextWithMac[l:] ciphertextWithIV, mac := ciphertextWithMac[:l], ciphertextWithMac[l:]
// verify mac // verify mac
if !poly1305Verify(ciphertextWithIV[ivSize:], ciphertextWithIV[:ivSize], &ks.MAC, mac) { if !poly1305Verify(ciphertextWithIV[ivSize:], ciphertextWithIV[:ivSize], &ks.MAC, mac) {
return nil, ErrUnauthenticated return 0, ErrUnauthenticated
} }
// extract iv // extract iv
iv, ciphertext := ciphertextWithIV[:ivSize], ciphertextWithIV[ivSize:] iv, ciphertext := ciphertextWithIV[:ivSize], ciphertextWithIV[ivSize:]
if cap(plaintext) < len(ciphertext) { if len(ciphertext) != plaintextLength {
// extend plaintext return 0, errors.Errorf("plaintext and ciphertext lengths do not match: %d != %d", len(ciphertext), plaintextLength)
plaintext = append(plaintext, make([]byte, len(ciphertext)-cap(plaintext))...)
} }
// decrypt data // decrypt data
@ -312,7 +317,7 @@ func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) ([]byte, error
plaintext = plaintext[:len(ciphertext)] plaintext = plaintext[:len(ciphertext)]
e.XORKeyStream(plaintext, ciphertext) e.XORKeyStream(plaintext, ciphertext)
return plaintext, nil return plaintextLength, nil
} }
// Valid tests if the key is valid. // Valid tests if the key is valid.

View file

@ -100,15 +100,17 @@ func TestCrypto(t *testing.T) {
} }
// decrypt message // decrypt message
_, err = Decrypt(k, []byte{}, msg) buf := make([]byte, len(tv.plaintext))
n, err := Decrypt(k, buf, msg)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
buf = buf[:n]
// change mac, this must fail // change mac, this must fail
msg[len(msg)-8] ^= 0x23 msg[len(msg)-8] ^= 0x23
if _, err = Decrypt(k, []byte{}, msg); err != ErrUnauthenticated { if _, err = Decrypt(k, buf, msg); err != ErrUnauthenticated {
t.Fatal("wrong MAC value not detected") t.Fatal("wrong MAC value not detected")
} }
@ -118,15 +120,17 @@ func TestCrypto(t *testing.T) {
// tamper with message, this must fail // tamper with message, this must fail
msg[16+5] ^= 0x85 msg[16+5] ^= 0x85
if _, err = Decrypt(k, []byte{}, msg); err != ErrUnauthenticated { if _, err = Decrypt(k, buf, msg); err != ErrUnauthenticated {
t.Fatal("tampered message not detected") t.Fatal("tampered message not detected")
} }
// test decryption // test decryption
p, err := Decrypt(k, []byte{}, tv.ciphertext) p := make([]byte, len(tv.ciphertext))
n, err = Decrypt(k, p, tv.ciphertext)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
p = p[:n]
if !bytes.Equal(p, tv.plaintext) { if !bytes.Equal(p, tv.plaintext) {
t.Fatalf("wrong plaintext: expected %q but got %q\n", tv.plaintext, p) t.Fatalf("wrong plaintext: expected %q but got %q\n", tv.plaintext, p)

View file

@ -32,8 +32,10 @@ func TestEncryptDecrypt(t *testing.T) {
"ciphertext length does not match: want %d, got %d", "ciphertext length does not match: want %d, got %d",
len(data)+crypto.Extension, len(ciphertext)) len(data)+crypto.Extension, len(ciphertext))
plaintext, err := crypto.Decrypt(k, nil, ciphertext) plaintext := make([]byte, len(ciphertext))
n, err := crypto.Decrypt(k, plaintext, ciphertext)
OK(t, err) OK(t, err)
plaintext = plaintext[:n]
Assert(t, len(plaintext) == len(data), Assert(t, len(plaintext) == len(data),
"plaintext length does not match: want %d, got %d", "plaintext length does not match: want %d, got %d",
len(data), len(plaintext)) len(data), len(plaintext))
@ -58,8 +60,10 @@ func TestSmallBuffer(t *testing.T) {
cap(ciphertext)) cap(ciphertext))
// check for the correct plaintext // check for the correct plaintext
plaintext, err := crypto.Decrypt(k, nil, ciphertext) plaintext := make([]byte, len(ciphertext))
n, err := crypto.Decrypt(k, plaintext, ciphertext)
OK(t, err) OK(t, err)
plaintext = plaintext[:n]
Assert(t, bytes.Equal(plaintext, data), Assert(t, bytes.Equal(plaintext, data),
"wrong plaintext returned") "wrong plaintext returned")
} }
@ -78,8 +82,9 @@ func TestSameBuffer(t *testing.T) {
OK(t, err) OK(t, err)
// use the same buffer for decryption // use the same buffer for decryption
ciphertext, err = crypto.Decrypt(k, ciphertext, ciphertext) n, err := crypto.Decrypt(k, ciphertext, ciphertext)
OK(t, err) OK(t, err)
ciphertext = ciphertext[:n]
Assert(t, bytes.Equal(ciphertext, data), Assert(t, bytes.Equal(ciphertext, data),
"wrong plaintext returned") "wrong plaintext returned")
} }
@ -97,9 +102,9 @@ func TestCornerCases(t *testing.T) {
len(c)) len(c))
// this should decrypt to nil // this should decrypt to nil
p, err := crypto.Decrypt(k, nil, c) n, err := crypto.Decrypt(k, nil, c)
OK(t, err) OK(t, err)
Equals(t, []byte(nil), p) Equals(t, 0, n)
// test encryption for same slice, this should return an error // test encryption for same slice, this should return an error
_, err = crypto.Encrypt(k, c, c) _, err = crypto.Encrypt(k, c, c)
@ -160,7 +165,7 @@ func BenchmarkDecrypt(b *testing.B) {
b.SetBytes(int64(size)) b.SetBytes(int64(size))
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
plaintext, err = crypto.Decrypt(k, plaintext, ciphertext) _, err = crypto.Decrypt(k, plaintext, ciphertext)
OK(b, err) OK(b, err)
} }
} }

View file

@ -4,8 +4,9 @@ import (
"crypto/rand" "crypto/rand"
"time" "time"
"restic/errors"
sscrypt "github.com/elithrar/simple-scrypt" sscrypt "github.com/elithrar/simple-scrypt"
"github.com/pkg/errors"
"golang.org/x/crypto/scrypt" "golang.org/x/crypto/scrypt"
) )

View file

@ -15,7 +15,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/pkg/errors" "restic/errors"
) )
type process struct { type process struct {

View file

@ -1,6 +1,5 @@
// Package restic is the top level package for the restic backup program, // Package restic is the top level package for the restic backup program,
// please see https://github.com/restic/restic for more information. // please see https://github.com/restic/restic for more information.
// //
// This package exposes the main components needed to create and restore a // This package exposes the main objects that are handled in restic.
// backup as well as handling things like a local cache of objects.
package restic package restic

2
src/restic/errors/doc.go Normal file
View file

@ -0,0 +1,2 @@
// Package errors provides custom error types used within restic.
package errors

View file

@ -1,4 +1,4 @@
package restic package errors
import "fmt" import "fmt"

23
src/restic/errors/wrap.go Normal file
View file

@ -0,0 +1,23 @@
package errors
import "github.com/pkg/errors"
// Cause returns the cause of an error.
func Cause(err error) error {
return errors.Cause(err)
}
// New creates a new error based on message.
func New(message string) error {
return errors.New(message)
}
// Errorf creates an error based on a format string and values.
func Errorf(format string, args ...interface{}) error {
return errors.Errorf(format, args...)
}
// Wrap wraps an error retrieved from outside of restic.
func Wrap(err error, message string) error {
return errors.Wrap(err, message)
}

View file

@ -1,14 +1,27 @@
package backend package restic
import ( import (
"fmt" "fmt"
"github.com/pkg/errors" "restic/errors"
)
// FileType is the type of a file in the backend.
type FileType string
// These are the different data types a backend can store.
const (
DataFile FileType = "data"
KeyFile = "key"
LockFile = "lock"
SnapshotFile = "snapshot"
IndexFile = "index"
ConfigFile = "config"
) )
// Handle is used to store and access data in a backend. // Handle is used to store and access data in a backend.
type Handle struct { type Handle struct {
Type Type Type FileType
Name string Name string
} }
@ -27,17 +40,17 @@ func (h Handle) Valid() error {
} }
switch h.Type { switch h.Type {
case Data: case DataFile:
case Key: case KeyFile:
case Lock: case LockFile:
case Snapshot: case SnapshotFile:
case Index: case IndexFile:
case Config: case ConfigFile:
default: default:
return errors.Errorf("invalid Type %q", h.Type) return errors.Errorf("invalid Type %q", h.Type)
} }
if h.Type == Config { if h.Type == ConfigFile {
return nil return nil
} }

View file

@ -1,4 +1,4 @@
package backend package restic
import "testing" import "testing"
@ -8,10 +8,10 @@ var handleTests = []struct {
}{ }{
{Handle{Name: "foo"}, false}, {Handle{Name: "foo"}, false},
{Handle{Type: "foobar"}, false}, {Handle{Type: "foobar"}, false},
{Handle{Type: Config, Name: ""}, true}, {Handle{Type: ConfigFile, Name: ""}, true},
{Handle{Type: Data, Name: ""}, false}, {Handle{Type: DataFile, Name: ""}, false},
{Handle{Type: "", Name: "x"}, false}, {Handle{Type: "", Name: "x"}, false},
{Handle{Type: Lock, Name: "010203040506"}, true}, {Handle{Type: LockFile, Name: "010203040506"}, true},
} }
func TestHandleValid(t *testing.T) { func TestHandleValid(t *testing.T) {

View file

@ -4,7 +4,7 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/pkg/errors" "restic/errors"
) )
// ErrBadString is returned when Match is called with the empty string as the // ErrBadString is returned when Match is called with the empty string as the

View file

@ -1,18 +1,12 @@
package restic package restic
import (
"restic/backend"
"restic/pack"
"restic/repository"
)
// FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data // FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data
// blobs) to the set blobs. The tree blobs in the `seen` BlobSet will not be visited // blobs) to the set blobs. The tree blobs in the `seen` BlobSet will not be visited
// again. // again.
func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.BlobSet, seen pack.BlobSet) error { func FindUsedBlobs(repo Repository, treeID ID, blobs BlobSet, seen BlobSet) error {
blobs.Insert(pack.Handle{ID: treeID, Type: pack.Tree}) blobs.Insert(BlobHandle{ID: treeID, Type: TreeBlob})
tree, err := LoadTree(repo, treeID) tree, err := repo.LoadTree(treeID)
if err != nil { if err != nil {
return err return err
} }
@ -21,11 +15,11 @@ func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.Bl
switch node.Type { switch node.Type {
case "file": case "file":
for _, blob := range node.Content { for _, blob := range node.Content {
blobs.Insert(pack.Handle{ID: blob, Type: pack.Data}) blobs.Insert(BlobHandle{ID: blob, Type: DataBlob})
} }
case "dir": case "dir":
subtreeID := *node.Subtree subtreeID := *node.Subtree
h := pack.Handle{ID: subtreeID, Type: pack.Tree} h := BlobHandle{ID: subtreeID, Type: TreeBlob}
if seen.Has(h) { if seen.Has(h) {
continue continue
} }

View file

@ -1,4 +1,4 @@
package restic package restic_test
import ( import (
"bufio" "bufio"
@ -7,26 +7,26 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"restic"
"sort" "sort"
"testing" "testing"
"time" "time"
"restic/pack"
"restic/repository" "restic/repository"
) )
func loadIDSet(t testing.TB, filename string) pack.BlobSet { func loadIDSet(t testing.TB, filename string) restic.BlobSet {
f, err := os.Open(filename) f, err := os.Open(filename)
if err != nil { if err != nil {
t.Logf("unable to open golden file %v: %v", filename, err) t.Logf("unable to open golden file %v: %v", filename, err)
return pack.NewBlobSet() return restic.NewBlobSet()
} }
sc := bufio.NewScanner(f) sc := bufio.NewScanner(f)
blobs := pack.NewBlobSet() blobs := restic.NewBlobSet()
for sc.Scan() { for sc.Scan() {
var h pack.Handle var h restic.BlobHandle
err := json.Unmarshal([]byte(sc.Text()), &h) err := json.Unmarshal([]byte(sc.Text()), &h)
if err != nil { if err != nil {
t.Errorf("file %v contained invalid blob: %#v", filename, err) t.Errorf("file %v contained invalid blob: %#v", filename, err)
@ -43,14 +43,14 @@ func loadIDSet(t testing.TB, filename string) pack.BlobSet {
return blobs return blobs
} }
func saveIDSet(t testing.TB, filename string, s pack.BlobSet) { func saveIDSet(t testing.TB, filename string, s restic.BlobSet) {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644) f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil { if err != nil {
t.Fatalf("unable to update golden file %v: %v", filename, err) t.Fatalf("unable to update golden file %v: %v", filename, err)
return return
} }
var hs pack.Handles var hs restic.BlobHandles
for h := range s { for h := range s {
hs = append(hs, h) hs = append(hs, h)
} }
@ -83,16 +83,16 @@ func TestFindUsedBlobs(t *testing.T) {
repo, cleanup := repository.TestRepository(t) repo, cleanup := repository.TestRepository(t)
defer cleanup() defer cleanup()
var snapshots []*Snapshot var snapshots []*restic.Snapshot
for i := 0; i < findTestSnapshots; i++ { for i := 0; i < findTestSnapshots; i++ {
sn := TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0) sn := restic.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0)
t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str()) t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str())
snapshots = append(snapshots, sn) snapshots = append(snapshots, sn)
} }
for i, sn := range snapshots { for i, sn := range snapshots {
usedBlobs := pack.NewBlobSet() usedBlobs := restic.NewBlobSet()
err := FindUsedBlobs(repo, *sn.Tree, usedBlobs, pack.NewBlobSet()) err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, restic.NewBlobSet())
if err != nil { if err != nil {
t.Errorf("FindUsedBlobs returned error: %v", err) t.Errorf("FindUsedBlobs returned error: %v", err)
continue continue
@ -121,14 +121,14 @@ func BenchmarkFindUsedBlobs(b *testing.B) {
repo, cleanup := repository.TestRepository(b) repo, cleanup := repository.TestRepository(b)
defer cleanup() defer cleanup()
sn := TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0) sn := restic.TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
seen := pack.NewBlobSet() seen := restic.NewBlobSet()
blobs := pack.NewBlobSet() blobs := restic.NewBlobSet()
err := FindUsedBlobs(repo, *sn.Tree, blobs, seen) err := restic.FindUsedBlobs(repo, *sn.Tree, blobs, seen)
if err != nil { if err != nil {
b.Error(err) b.Error(err)
} }

View file

@ -6,7 +6,7 @@ import (
"os" "os"
"syscall" "syscall"
"github.com/pkg/errors" "restic/errors"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )

View file

@ -12,7 +12,6 @@ import (
"restic" "restic"
"restic/debug" "restic/debug"
"restic/repository"
) )
// Statically ensure that *dir implement those interface // Statically ensure that *dir implement those interface
@ -20,16 +19,16 @@ var _ = fs.HandleReadDirAller(&dir{})
var _ = fs.NodeStringLookuper(&dir{}) var _ = fs.NodeStringLookuper(&dir{})
type dir struct { type dir struct {
repo *repository.Repository repo restic.Repository
items map[string]*restic.Node items map[string]*restic.Node
inode uint64 inode uint64
node *restic.Node node *restic.Node
ownerIsRoot bool ownerIsRoot bool
} }
func newDir(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (*dir, error) { func newDir(repo restic.Repository, node *restic.Node, ownerIsRoot bool) (*dir, error) {
debug.Log("newDir", "new dir for %v (%v)", node.Name, node.Subtree.Str()) debug.Log("newDir", "new dir for %v (%v)", node.Name, node.Subtree.Str())
tree, err := restic.LoadTree(repo, *node.Subtree) tree, err := repo.LoadTree(*node.Subtree)
if err != nil { if err != nil {
debug.Log("newDir", " error loading tree %v: %v", node.Subtree.Str(), err) debug.Log("newDir", " error loading tree %v: %v", node.Subtree.Str(), err)
return nil, err return nil, err
@ -50,7 +49,7 @@ func newDir(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (*
// replaceSpecialNodes replaces nodes with name "." and "/" by their contents. // replaceSpecialNodes replaces nodes with name "." and "/" by their contents.
// Otherwise, the node is returned. // Otherwise, the node is returned.
func replaceSpecialNodes(repo *repository.Repository, node *restic.Node) ([]*restic.Node, error) { func replaceSpecialNodes(repo restic.Repository, node *restic.Node) ([]*restic.Node, error) {
if node.Type != "dir" || node.Subtree == nil { if node.Type != "dir" || node.Subtree == nil {
return []*restic.Node{node}, nil return []*restic.Node{node}, nil
} }
@ -59,7 +58,7 @@ func replaceSpecialNodes(repo *repository.Repository, node *restic.Node) ([]*res
return []*restic.Node{node}, nil return []*restic.Node{node}, nil
} }
tree, err := restic.LoadTree(repo, *node.Subtree) tree, err := repo.LoadTree(*node.Subtree)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -67,9 +66,9 @@ func replaceSpecialNodes(repo *repository.Repository, node *restic.Node) ([]*res
return tree.Nodes, nil return tree.Nodes, nil
} }
func newDirFromSnapshot(repo *repository.Repository, snapshot SnapshotWithId, ownerIsRoot bool) (*dir, error) { func newDirFromSnapshot(repo restic.Repository, snapshot SnapshotWithId, ownerIsRoot bool) (*dir, error) {
debug.Log("newDirFromSnapshot", "new dir for snapshot %v (%v)", snapshot.ID.Str(), snapshot.Tree.Str()) debug.Log("newDirFromSnapshot", "new dir for snapshot %v (%v)", snapshot.ID.Str(), snapshot.Tree.Str())
tree, err := restic.LoadTree(repo, *snapshot.Tree) tree, err := repo.LoadTree(*snapshot.Tree)
if err != nil { if err != nil {
debug.Log("newDirFromSnapshot", " loadTree(%v) failed: %v", snapshot.ID.Str(), err) debug.Log("newDirFromSnapshot", " loadTree(%v) failed: %v", snapshot.ID.Str(), err)
return nil, err return nil, err
@ -98,7 +97,7 @@ func newDirFromSnapshot(repo *repository.Repository, snapshot SnapshotWithId, ow
Mode: os.ModeDir | 0555, Mode: os.ModeDir | 0555,
}, },
items: items, items: items,
inode: inodeFromBackendId(snapshot.ID), inode: inodeFromBackendID(snapshot.ID),
ownerIsRoot: ownerIsRoot, ownerIsRoot: ownerIsRoot,
}, nil }, nil
} }

View file

@ -6,12 +6,10 @@ package fuse
import ( import (
"sync" "sync"
"github.com/pkg/errors" "restic/errors"
"restic" "restic"
"restic/backend"
"restic/debug" "restic/debug"
"restic/pack"
"bazil.org/fuse" "bazil.org/fuse"
"bazil.org/fuse/fs" "bazil.org/fuse/fs"
@ -28,8 +26,8 @@ var _ = fs.HandleReleaser(&file{})
// BlobLoader is an abstracted repository with a reduced set of methods used // BlobLoader is an abstracted repository with a reduced set of methods used
// for fuse operations. // for fuse operations.
type BlobLoader interface { type BlobLoader interface {
LookupBlobSize(backend.ID, pack.BlobType) (uint, error) LookupBlobSize(restic.ID, restic.BlobType) (uint, error)
LoadBlob(backend.ID, pack.BlobType, []byte) ([]byte, error) LoadBlob(restic.BlobType, restic.ID, []byte) (int, error)
} }
type file struct { type file struct {
@ -54,7 +52,7 @@ func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool) (*file, error
var bytes uint64 var bytes uint64
sizes := make([]uint, len(node.Content)) sizes := make([]uint, len(node.Content))
for i, id := range node.Content { for i, id := range node.Content {
size, err := repo.LookupBlobSize(id, pack.Data) size, err := repo.LookupBlobSize(id, restic.DataBlob)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -111,14 +109,14 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) {
buf = make([]byte, f.sizes[i]) buf = make([]byte, f.sizes[i])
} }
blob, err = f.repo.LoadBlob(f.node.Content[i], pack.Data, buf) n, err := f.repo.LoadBlob(restic.DataBlob, f.node.Content[i], buf)
if err != nil { if err != nil {
debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err) debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
return nil, err return nil, err
} }
f.blobs[i] = blob f.blobs[i] = buf[:n]
return blob, nil return buf[:n], nil
} }
func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {

View file

@ -9,25 +9,23 @@ import (
"testing" "testing"
"time" "time"
"github.com/pkg/errors" "restic/errors"
"bazil.org/fuse" "bazil.org/fuse"
"restic" "restic"
"restic/backend"
"restic/pack"
. "restic/test" . "restic/test"
) )
type MockRepo struct { type MockRepo struct {
blobs map[backend.ID][]byte blobs map[restic.ID][]byte
} }
func NewMockRepo(content map[backend.ID][]byte) *MockRepo { func NewMockRepo(content map[restic.ID][]byte) *MockRepo {
return &MockRepo{blobs: content} return &MockRepo{blobs: content}
} }
func (m *MockRepo) LookupBlobSize(id backend.ID, t pack.BlobType) (uint, error) { func (m *MockRepo) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, error) {
buf, ok := m.blobs[id] buf, ok := m.blobs[id]
if !ok { if !ok {
return 0, errors.New("blob not found") return 0, errors.New("blob not found")
@ -36,19 +34,19 @@ func (m *MockRepo) LookupBlobSize(id backend.ID, t pack.BlobType) (uint, error)
return uint(len(buf)), nil return uint(len(buf)), nil
} }
func (m *MockRepo) LoadBlob(id backend.ID, t pack.BlobType, buf []byte) ([]byte, error) { func (m *MockRepo) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) {
size, err := m.LookupBlobSize(id, t) size, err := m.LookupBlobSize(id, t)
if err != nil { if err != nil {
return nil, err return 0, err
} }
if uint(cap(buf)) < size { if uint(len(buf)) < size {
return nil, errors.New("buffer too small") return 0, errors.New("buffer too small")
} }
buf = buf[:size] buf = buf[:size]
copy(buf, m.blobs[id]) copy(buf, m.blobs[id])
return buf, nil return int(size), nil
} }
type MockContext struct{} type MockContext struct{}
@ -68,12 +66,12 @@ var testContentLengths = []uint{
} }
var testMaxFileSize uint var testMaxFileSize uint
func genTestContent() map[backend.ID][]byte { func genTestContent() map[restic.ID][]byte {
m := make(map[backend.ID][]byte) m := make(map[restic.ID][]byte)
for _, length := range testContentLengths { for _, length := range testContentLengths {
buf := Random(int(length), int(length)) buf := Random(int(length), int(length))
id := backend.Hash(buf) id := restic.Hash(buf)
m[id] = buf m[id] = buf
testMaxFileSize += length testMaxFileSize += length
} }
@ -83,7 +81,7 @@ func genTestContent() map[backend.ID][]byte {
const maxBufSize = 20 * 1024 * 1024 const maxBufSize = 20 * 1024 * 1024
func testRead(t *testing.T, f *file, offset, length int, data []byte) []byte { func testRead(t *testing.T, f *file, offset, length int, data []byte) {
ctx := MockContext{} ctx := MockContext{}
req := &fuse.ReadRequest{ req := &fuse.ReadRequest{
@ -94,8 +92,6 @@ func testRead(t *testing.T, f *file, offset, length int, data []byte) []byte {
Data: make([]byte, length), Data: make([]byte, length),
} }
OK(t, f.Read(ctx, req, resp)) OK(t, f.Read(ctx, req, resp))
return resp.Data
} }
var offsetReadsTests = []struct { var offsetReadsTests = []struct {
@ -111,7 +107,7 @@ func TestFuseFile(t *testing.T) {
memfile := make([]byte, 0, maxBufSize) memfile := make([]byte, 0, maxBufSize)
var ids backend.IDs var ids restic.IDs
for id, buf := range repo.blobs { for id, buf := range repo.blobs {
ids = append(ids, id) ids = append(ids, id)
memfile = append(memfile, buf...) memfile = append(memfile, buf...)
@ -137,8 +133,9 @@ func TestFuseFile(t *testing.T) {
for i, test := range offsetReadsTests { for i, test := range offsetReadsTests {
b := memfile[test.offset : test.offset+test.length] b := memfile[test.offset : test.offset+test.length]
res := testRead(t, f, test.offset, test.length, b) buf := make([]byte, test.length)
if !bytes.Equal(b, res) { testRead(t, f, test.offset, test.length, buf)
if !bytes.Equal(b, buf) {
t.Errorf("test %d failed, wrong data returned", i) t.Errorf("test %d failed, wrong data returned", i)
} }
} }
@ -152,8 +149,9 @@ func TestFuseFile(t *testing.T) {
} }
b := memfile[offset : offset+length] b := memfile[offset : offset+length]
res := testRead(t, f, offset, length, b) buf := make([]byte, length)
if !bytes.Equal(b, res) { testRead(t, f, offset, length, buf)
if !bytes.Equal(b, buf) {
t.Errorf("test %d failed (offset %d, length %d), wrong data returned", i, offset, length) t.Errorf("test %d failed (offset %d, length %d), wrong data returned", i, offset, length)
} }
} }

View file

@ -5,13 +5,12 @@ package fuse
import ( import (
"encoding/binary" "encoding/binary"
"restic"
"restic/backend"
) )
// inodeFromBackendId returns a unique uint64 from a backend id. // inodeFromBackendId returns a unique uint64 from a backend id.
// Endianness has no specific meaning, it is just the simplest way to // Endianness has no specific meaning, it is just the simplest way to
// transform a []byte to an uint64 // transform a []byte to an uint64
func inodeFromBackendId(id backend.ID) uint64 { func inodeFromBackendID(id restic.ID) uint64 {
return binary.BigEndian.Uint64(id[:8]) return binary.BigEndian.Uint64(id[:8])
} }

View file

@ -5,7 +5,6 @@ package fuse
import ( import (
"restic" "restic"
"restic/repository"
"bazil.org/fuse" "bazil.org/fuse"
"bazil.org/fuse/fs" "bazil.org/fuse/fs"
@ -20,7 +19,7 @@ type link struct {
ownerIsRoot bool ownerIsRoot bool
} }
func newLink(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (*link, error) { func newLink(repo restic.Repository, node *restic.Node, ownerIsRoot bool) (*link, error) {
return &link{node: node, ownerIsRoot: ownerIsRoot}, nil return &link{node: node, ownerIsRoot: ownerIsRoot}, nil
} }

View file

@ -12,16 +12,14 @@ import (
"bazil.org/fuse/fs" "bazil.org/fuse/fs"
"restic" "restic"
"restic/backend"
"restic/debug" "restic/debug"
"restic/repository"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
type SnapshotWithId struct { type SnapshotWithId struct {
*restic.Snapshot *restic.Snapshot
backend.ID restic.ID
} }
// These lines statically ensure that a *SnapshotsDir implement the given // These lines statically ensure that a *SnapshotsDir implement the given
@ -31,7 +29,7 @@ var _ = fs.HandleReadDirAller(&SnapshotsDir{})
var _ = fs.NodeStringLookuper(&SnapshotsDir{}) var _ = fs.NodeStringLookuper(&SnapshotsDir{})
type SnapshotsDir struct { type SnapshotsDir struct {
repo *repository.Repository repo restic.Repository
ownerIsRoot bool ownerIsRoot bool
// knownSnapshots maps snapshot timestamp to the snapshot // knownSnapshots maps snapshot timestamp to the snapshot
@ -39,7 +37,8 @@ type SnapshotsDir struct {
knownSnapshots map[string]SnapshotWithId knownSnapshots map[string]SnapshotWithId
} }
func NewSnapshotsDir(repo *repository.Repository, ownerIsRoot bool) *SnapshotsDir { // NewSnapshotsDir returns a new dir object for the snapshots.
func NewSnapshotsDir(repo restic.Repository, ownerIsRoot bool) *SnapshotsDir {
debug.Log("NewSnapshotsDir", "fuse mount initiated") debug.Log("NewSnapshotsDir", "fuse mount initiated")
return &SnapshotsDir{ return &SnapshotsDir{
repo: repo, repo: repo,
@ -65,7 +64,7 @@ func (sn *SnapshotsDir) updateCache(ctx context.Context) error {
sn.Lock() sn.Lock()
defer sn.Unlock() defer sn.Unlock()
for id := range sn.repo.List(backend.Snapshot, ctx.Done()) { for id := range sn.repo.List(restic.SnapshotFile, ctx.Done()) {
snapshot, err := restic.LoadSnapshot(sn.repo, id) snapshot, err := restic.LoadSnapshot(sn.repo, id)
if err != nil { if err != nil {
return err return err
@ -96,7 +95,7 @@ func (sn *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
ret := make([]fuse.Dirent, 0) ret := make([]fuse.Dirent, 0)
for _, snapshot := range sn.knownSnapshots { for _, snapshot := range sn.knownSnapshots {
ret = append(ret, fuse.Dirent{ ret = append(ret, fuse.Dirent{
Inode: inodeFromBackendId(snapshot.ID), Inode: inodeFromBackendID(snapshot.ID),
Type: fuse.DT_Dir, Type: fuse.DT_Dir,
Name: snapshot.Time.Format(time.RFC3339), Name: snapshot.Time.Format(time.RFC3339),
}) })

View file

@ -1,12 +1,14 @@
package backend package restic
import ( import (
"bytes" "bytes"
"crypto/rand"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"io"
"github.com/pkg/errors" "restic/errors"
) )
// Hash returns the ID for data. // Hash returns the ID for data.
@ -14,11 +16,11 @@ func Hash(data []byte) ID {
return sha256.Sum256(data) return sha256.Sum256(data)
} }
// IDSize contains the size of an ID, in bytes. // idSize contains the size of an ID, in bytes.
const IDSize = sha256.Size const idSize = sha256.Size
// ID references content within a repository. // ID references content within a repository.
type ID [IDSize]byte type ID [idSize]byte
// ParseID converts the given string to an ID. // ParseID converts the given string to an ID.
func ParseID(s string) (ID, error) { func ParseID(s string) (ID, error) {
@ -28,7 +30,7 @@ func ParseID(s string) (ID, error) {
return ID{}, errors.Wrap(err, "hex.DecodeString") return ID{}, errors.Wrap(err, "hex.DecodeString")
} }
if len(b) != IDSize { if len(b) != idSize {
return ID{}, errors.New("invalid length for hash") return ID{}, errors.New("invalid length for hash")
} }
@ -42,6 +44,17 @@ func (id ID) String() string {
return hex.EncodeToString(id[:]) return hex.EncodeToString(id[:])
} }
// NewRandomID retuns a randomly generated ID. When reading from rand fails,
// the function panics.
func NewRandomID() ID {
id := ID{}
_, err := io.ReadFull(rand.Reader, id[:])
if err != nil {
panic(err)
}
return id
}
const shortStr = 4 const shortStr = 4
// Str returns the shortened string version of id. // Str returns the shortened string version of id.

View file

@ -1,4 +1,4 @@
package backend package restic
import "testing" import "testing"

View file

@ -1,10 +1,8 @@
package backend_test package restic
import ( import (
"reflect"
"testing" "testing"
"restic/backend"
. "restic/test"
) )
var TestStrings = []struct { var TestStrings = []struct {
@ -19,25 +17,44 @@ var TestStrings = []struct {
func TestID(t *testing.T) { func TestID(t *testing.T) {
for _, test := range TestStrings { for _, test := range TestStrings {
id, err := backend.ParseID(test.id) id, err := ParseID(test.id)
OK(t, err) if err != nil {
t.Error(err)
}
id2, err := backend.ParseID(test.id) id2, err := ParseID(test.id)
OK(t, err) if err != nil {
Assert(t, id.Equal(id2), "ID.Equal() does not work as expected") t.Error(err)
}
if !id.Equal(id2) {
t.Errorf("ID.Equal() does not work as expected")
}
ret, err := id.EqualString(test.id) ret, err := id.EqualString(test.id)
OK(t, err) if err != nil {
Assert(t, ret, "ID.EqualString() returned wrong value") t.Error(err)
}
if !ret {
t.Error("ID.EqualString() returned wrong value")
}
// test json marshalling // test json marshalling
buf, err := id.MarshalJSON() buf, err := id.MarshalJSON()
OK(t, err) if err != nil {
Equals(t, "\""+test.id+"\"", string(buf)) t.Error(err)
}
want := `"` + test.id + `"`
if string(buf) != want {
t.Errorf("string comparison failed, wanted %q, got %q", want, string(buf))
}
var id3 backend.ID var id3 ID
err = id3.UnmarshalJSON(buf) err = id3.UnmarshalJSON(buf)
OK(t, err) if err != nil {
Equals(t, id, id3) t.Fatal(err)
}
if !reflect.DeepEqual(id, id3) {
t.Error("ids are not equal")
}
} }
} }

View file

@ -1,4 +1,4 @@
package backend package restic
import ( import (
"encoding/hex" "encoding/hex"

55
src/restic/ids_test.go Normal file
View file

@ -0,0 +1,55 @@
package restic
import (
"reflect"
"testing"
)
var uniqTests = []struct {
before, after IDs
}{
{
IDs{
TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
IDs{
TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
},
},
{
IDs{
TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
IDs{
TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
},
{
IDs{
TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"),
TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
IDs{
TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"),
TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
},
},
}
func TestUniqIDs(t *testing.T) {
for i, test := range uniqTests {
uniq := test.before.Uniq()
if !reflect.DeepEqual(uniq, test.after) {
t.Errorf("uniqIDs() test %v failed\n wanted: %v\n got: %v", i, test.after, uniq)
}
}
}

View file

@ -1,4 +1,4 @@
package backend package restic
import "sort" import "sort"

32
src/restic/idset_test.go Normal file
View file

@ -0,0 +1,32 @@
package restic
import (
"testing"
)
var idsetTests = []struct {
id ID
seen bool
}{
{TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false},
{TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false},
{TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true},
{TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false},
{TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true},
{TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true},
{TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
}
func TestIDSet(t *testing.T) {
set := NewIDSet()
for i, test := range idsetTests {
seen := set.Has(test.id)
if seen != test.seen {
t.Errorf("IDSet test %v failed: wanted %v, got %v", i, test.seen, seen)
}
set.Insert(test.id)
}
}

View file

@ -5,45 +5,42 @@ import (
"fmt" "fmt"
"os" "os"
"restic" "restic"
"restic/backend"
"restic/debug" "restic/debug"
"restic/list" "restic/list"
"restic/pack"
"restic/types"
"restic/worker" "restic/worker"
"github.com/pkg/errors" "restic/errors"
) )
// Pack contains information about the contents of a pack. // Pack contains information about the contents of a pack.
type Pack struct { type Pack struct {
Size int64 Size int64
Entries []pack.Blob Entries []restic.Blob
} }
// Blob contains information about a blob. // Blob contains information about a blob.
type Blob struct { type Blob struct {
Size int64 Size int64
Packs backend.IDSet Packs restic.IDSet
} }
// Index contains information about blobs and packs stored in a repo. // Index contains information about blobs and packs stored in a repo.
type Index struct { type Index struct {
Packs map[backend.ID]Pack Packs map[restic.ID]Pack
Blobs map[pack.Handle]Blob Blobs map[restic.BlobHandle]Blob
IndexIDs backend.IDSet IndexIDs restic.IDSet
} }
func newIndex() *Index { func newIndex() *Index {
return &Index{ return &Index{
Packs: make(map[backend.ID]Pack), Packs: make(map[restic.ID]Pack),
Blobs: make(map[pack.Handle]Blob), Blobs: make(map[restic.BlobHandle]Blob),
IndexIDs: backend.NewIDSet(), IndexIDs: restic.NewIDSet(),
} }
} }
// New creates a new index for repo from scratch. // New creates a new index for repo from scratch.
func New(repo types.Repository, p *restic.Progress) (*Index, error) { func New(repo restic.Repository, p *restic.Progress) (*Index, error) {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
@ -58,7 +55,7 @@ func New(repo types.Repository, p *restic.Progress) (*Index, error) {
for job := range ch { for job := range ch {
p.Report(restic.Stat{Blobs: 1}) p.Report(restic.Stat{Blobs: 1})
packID := job.Data.(backend.ID) packID := job.Data.(restic.ID)
if job.Error != nil { if job.Error != nil {
fmt.Fprintf(os.Stderr, "unable to list pack %v: %v\n", packID.Str(), job.Error) fmt.Fprintf(os.Stderr, "unable to list pack %v: %v\n", packID.Str(), job.Error)
continue continue
@ -83,27 +80,27 @@ func New(repo types.Repository, p *restic.Progress) (*Index, error) {
const loadIndexParallelism = 20 const loadIndexParallelism = 20
type packJSON struct { type packJSON struct {
ID backend.ID `json:"id"` ID restic.ID `json:"id"`
Blobs []blobJSON `json:"blobs"` Blobs []blobJSON `json:"blobs"`
} }
type blobJSON struct { type blobJSON struct {
ID backend.ID `json:"id"` ID restic.ID `json:"id"`
Type pack.BlobType `json:"type"` Type restic.BlobType `json:"type"`
Offset uint `json:"offset"` Offset uint `json:"offset"`
Length uint `json:"length"` Length uint `json:"length"`
} }
type indexJSON struct { type indexJSON struct {
Supersedes backend.IDs `json:"supersedes,omitempty"` Supersedes restic.IDs `json:"supersedes,omitempty"`
Packs []*packJSON `json:"packs"` Packs []*packJSON `json:"packs"`
} }
func loadIndexJSON(repo types.Repository, id backend.ID) (*indexJSON, error) { func loadIndexJSON(repo restic.Repository, id restic.ID) (*indexJSON, error) {
debug.Log("index.loadIndexJSON", "process index %v\n", id.Str()) debug.Log("index.loadIndexJSON", "process index %v\n", id.Str())
var idx indexJSON var idx indexJSON
err := repo.LoadJSONUnpacked(backend.Index, id, &idx) err := repo.LoadJSONUnpacked(restic.IndexFile, id, &idx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -112,7 +109,7 @@ func loadIndexJSON(repo types.Repository, id backend.ID) (*indexJSON, error) {
} }
// Load creates an index by loading all index files from the repo. // Load creates an index by loading all index files from the repo.
func Load(repo types.Repository, p *restic.Progress) (*Index, error) { func Load(repo restic.Repository, p *restic.Progress) (*Index, error) {
debug.Log("index.Load", "loading indexes") debug.Log("index.Load", "loading indexes")
p.Start() p.Start()
@ -121,12 +118,12 @@ func Load(repo types.Repository, p *restic.Progress) (*Index, error) {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
supersedes := make(map[backend.ID]backend.IDSet) supersedes := make(map[restic.ID]restic.IDSet)
results := make(map[backend.ID]map[backend.ID]Pack) results := make(map[restic.ID]map[restic.ID]Pack)
index := newIndex() index := newIndex()
for id := range repo.List(backend.Index, done) { for id := range repo.List(restic.IndexFile, done) {
p.Report(restic.Stat{Blobs: 1}) p.Report(restic.Stat{Blobs: 1})
debug.Log("index.Load", "Load index %v", id.Str()) debug.Log("index.Load", "Load index %v", id.Str())
@ -135,17 +132,17 @@ func Load(repo types.Repository, p *restic.Progress) (*Index, error) {
return nil, err return nil, err
} }
res := make(map[backend.ID]Pack) res := make(map[restic.ID]Pack)
supersedes[id] = backend.NewIDSet() supersedes[id] = restic.NewIDSet()
for _, sid := range idx.Supersedes { for _, sid := range idx.Supersedes {
debug.Log("index.Load", " index %v supersedes %v", id.Str(), sid) debug.Log("index.Load", " index %v supersedes %v", id.Str(), sid)
supersedes[id].Insert(sid) supersedes[id].Insert(sid)
} }
for _, jpack := range idx.Packs { for _, jpack := range idx.Packs {
entries := make([]pack.Blob, 0, len(jpack.Blobs)) entries := make([]restic.Blob, 0, len(jpack.Blobs))
for _, blob := range jpack.Blobs { for _, blob := range jpack.Blobs {
entry := pack.Blob{ entry := restic.Blob{
ID: blob.ID, ID: blob.ID,
Type: blob.Type, Type: blob.Type,
Offset: blob.Offset, Offset: blob.Offset,
@ -179,7 +176,7 @@ func Load(repo types.Repository, p *restic.Progress) (*Index, error) {
// AddPack adds a pack to the index. If this pack is already in the index, an // AddPack adds a pack to the index. If this pack is already in the index, an
// error is returned. // error is returned.
func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error { func (idx *Index) AddPack(id restic.ID, size int64, entries []restic.Blob) error {
if _, ok := idx.Packs[id]; ok { if _, ok := idx.Packs[id]; ok {
return errors.Errorf("pack %v already present in the index", id.Str()) return errors.Errorf("pack %v already present in the index", id.Str())
} }
@ -187,11 +184,11 @@ func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error
idx.Packs[id] = Pack{Size: size, Entries: entries} idx.Packs[id] = Pack{Size: size, Entries: entries}
for _, entry := range entries { for _, entry := range entries {
h := pack.Handle{ID: entry.ID, Type: entry.Type} h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
if _, ok := idx.Blobs[h]; !ok { if _, ok := idx.Blobs[h]; !ok {
idx.Blobs[h] = Blob{ idx.Blobs[h] = Blob{
Size: int64(entry.Length), Size: int64(entry.Length),
Packs: backend.NewIDSet(), Packs: restic.NewIDSet(),
} }
} }
@ -202,13 +199,13 @@ func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error
} }
// RemovePack deletes a pack from the index. // RemovePack deletes a pack from the index.
func (idx *Index) RemovePack(id backend.ID) error { func (idx *Index) RemovePack(id restic.ID) error {
if _, ok := idx.Packs[id]; !ok { if _, ok := idx.Packs[id]; !ok {
return errors.Errorf("pack %v not found in the index", id.Str()) return errors.Errorf("pack %v not found in the index", id.Str())
} }
for _, blob := range idx.Packs[id].Entries { for _, blob := range idx.Packs[id].Entries {
h := pack.Handle{ID: blob.ID, Type: blob.Type} h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
idx.Blobs[h].Packs.Delete(id) idx.Blobs[h].Packs.Delete(id)
if len(idx.Blobs[h].Packs) == 0 { if len(idx.Blobs[h].Packs) == 0 {
@ -223,13 +220,13 @@ func (idx *Index) RemovePack(id backend.ID) error {
// DuplicateBlobs returns a list of blobs that are stored more than once in the // DuplicateBlobs returns a list of blobs that are stored more than once in the
// repo. // repo.
func (idx *Index) DuplicateBlobs() (dups pack.BlobSet) { func (idx *Index) DuplicateBlobs() (dups restic.BlobSet) {
dups = pack.NewBlobSet() dups = restic.NewBlobSet()
seen := pack.NewBlobSet() seen := restic.NewBlobSet()
for _, p := range idx.Packs { for _, p := range idx.Packs {
for _, entry := range p.Entries { for _, entry := range p.Entries {
h := pack.Handle{ID: entry.ID, Type: entry.Type} h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
if seen.Has(h) { if seen.Has(h) {
dups.Insert(h) dups.Insert(h)
} }
@ -241,8 +238,8 @@ func (idx *Index) DuplicateBlobs() (dups pack.BlobSet) {
} }
// PacksForBlobs returns the set of packs in which the blobs are contained. // PacksForBlobs returns the set of packs in which the blobs are contained.
func (idx *Index) PacksForBlobs(blobs pack.BlobSet) (packs backend.IDSet) { func (idx *Index) PacksForBlobs(blobs restic.BlobSet) (packs restic.IDSet) {
packs = backend.NewIDSet() packs = restic.NewIDSet()
for h := range blobs { for h := range blobs {
blob, ok := idx.Blobs[h] blob, ok := idx.Blobs[h]
@ -260,8 +257,8 @@ func (idx *Index) PacksForBlobs(blobs pack.BlobSet) (packs backend.IDSet) {
// Location describes the location of a blob in a pack. // Location describes the location of a blob in a pack.
type Location struct { type Location struct {
PackID backend.ID PackID restic.ID
pack.Blob restic.Blob
} }
// ErrBlobNotFound is return by FindBlob when the blob could not be found in // ErrBlobNotFound is return by FindBlob when the blob could not be found in
@ -269,7 +266,7 @@ type Location struct {
var ErrBlobNotFound = errors.New("blob not found in index") var ErrBlobNotFound = errors.New("blob not found in index")
// FindBlob returns a list of packs and positions the blob can be found in. // FindBlob returns a list of packs and positions the blob can be found in.
func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) { func (idx *Index) FindBlob(h restic.BlobHandle) ([]Location, error) {
blob, ok := idx.Blobs[h] blob, ok := idx.Blobs[h]
if !ok { if !ok {
return nil, ErrBlobNotFound return nil, ErrBlobNotFound
@ -300,8 +297,8 @@ func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) {
} }
// Save writes the complete index to the repo. // Save writes the complete index to the repo.
func (idx *Index) Save(repo types.Repository, supersedes backend.IDs) (backend.ID, error) { func (idx *Index) Save(repo restic.Repository, supersedes restic.IDs) (restic.ID, error) {
packs := make(map[backend.ID][]pack.Blob, len(idx.Packs)) packs := make(map[restic.ID][]restic.Blob, len(idx.Packs))
for id, p := range idx.Packs { for id, p := range idx.Packs {
packs[id] = p.Entries packs[id] = p.Entries
} }
@ -310,7 +307,7 @@ func (idx *Index) Save(repo types.Repository, supersedes backend.IDs) (backend.I
} }
// Save writes a new index containing the given packs. // Save writes a new index containing the given packs.
func Save(repo types.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) { func Save(repo restic.Repository, packs map[restic.ID][]restic.Blob, supersedes restic.IDs) (restic.ID, error) {
idx := &indexJSON{ idx := &indexJSON{
Supersedes: supersedes, Supersedes: supersedes,
Packs: make([]*packJSON, 0, len(packs)), Packs: make([]*packJSON, 0, len(packs)),
@ -335,5 +332,5 @@ func Save(repo types.Repository, packs map[backend.ID][]pack.Blob, supersedes ba
idx.Packs = append(idx.Packs, p) idx.Packs = append(idx.Packs, p)
} }
return repo.SaveJSONUnpacked(backend.Index, idx) return repo.SaveJSONUnpacked(restic.IndexFile, idx)
} }

View file

@ -3,10 +3,7 @@ package index
import ( import (
"math/rand" "math/rand"
"restic" "restic"
"restic/backend"
"restic/pack"
"restic/repository" "restic/repository"
. "restic/test"
"testing" "testing"
"time" "time"
) )
@ -17,7 +14,7 @@ var (
depth = 3 depth = 3
) )
func createFilledRepo(t testing.TB, snapshots int, dup float32) (*repository.Repository, func()) { func createFilledRepo(t testing.TB, snapshots int, dup float32) (restic.Repository, func()) {
repo, cleanup := repository.TestRepository(t) repo, cleanup := repository.TestRepository(t)
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
@ -27,8 +24,8 @@ func createFilledRepo(t testing.TB, snapshots int, dup float32) (*repository.Rep
return repo, cleanup return repo, cleanup
} }
func validateIndex(t testing.TB, repo *repository.Repository, idx *Index) { func validateIndex(t testing.TB, repo restic.Repository, idx *Index) {
for id := range repo.List(backend.Data, nil) { for id := range repo.List(restic.DataFile, nil) {
if _, ok := idx.Packs[id]; !ok { if _, ok := idx.Packs[id]; !ok {
t.Errorf("pack %v missing from index", id.Str()) t.Errorf("pack %v missing from index", id.Str())
} }
@ -164,7 +161,7 @@ func TestIndexDuplicateBlobs(t *testing.T) {
t.Logf("%d packs with duplicate blobs", len(packs)) t.Logf("%d packs with duplicate blobs", len(packs))
} }
func loadIndex(t testing.TB, repo *repository.Repository) *Index { func loadIndex(t testing.TB, repo restic.Repository) *Index {
idx, err := Load(repo, nil) idx, err := Load(repo, nil)
if err != nil { if err != nil {
t.Fatalf("Load() returned error %v", err) t.Fatalf("Load() returned error %v", err)
@ -179,7 +176,7 @@ func TestIndexSave(t *testing.T) {
idx := loadIndex(t, repo) idx := loadIndex(t, repo)
packs := make(map[backend.ID][]pack.Blob) packs := make(map[restic.ID][]restic.Blob)
for id := range idx.Packs { for id := range idx.Packs {
if rand.Float32() < 0.5 { if rand.Float32() < 0.5 {
packs[id] = idx.Packs[id].Entries packs[id] = idx.Packs[id].Entries
@ -197,7 +194,7 @@ func TestIndexSave(t *testing.T) {
for id := range idx.IndexIDs { for id := range idx.IndexIDs {
t.Logf("remove index %v", id.Str()) t.Logf("remove index %v", id.Str())
err = repo.Backend().Remove(backend.Index, id.String()) err = repo.Backend().Remove(restic.IndexFile, id.String())
if err != nil { if err != nil {
t.Errorf("error removing index %v: %v", id, err) t.Errorf("error removing index %v: %v", id, err)
} }
@ -235,7 +232,7 @@ func TestIndexAddRemovePack(t *testing.T) {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
packID := <-repo.List(backend.Data, done) packID := <-repo.List(restic.DataFile, done)
t.Logf("selected pack %v", packID.Str()) t.Logf("selected pack %v", packID.Str())
@ -248,7 +245,7 @@ func TestIndexAddRemovePack(t *testing.T) {
} }
for _, blob := range blobs { for _, blob := range blobs {
h := pack.Handle{ID: blob.ID, Type: blob.Type} h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
_, err := idx.FindBlob(h) _, err := idx.FindBlob(h)
if err == nil { if err == nil {
t.Errorf("removed blob %v found in index", h) t.Errorf("removed blob %v found in index", h)
@ -298,7 +295,7 @@ func TestIndexLoadDocReference(t *testing.T) {
repo, cleanup := repository.TestRepository(t) repo, cleanup := repository.TestRepository(t)
defer cleanup() defer cleanup()
id, err := repo.SaveUnpacked(backend.Index, docExample) id, err := repo.SaveUnpacked(restic.IndexFile, docExample)
if err != nil { if err != nil {
t.Fatalf("SaveUnpacked() returned error %v", err) t.Fatalf("SaveUnpacked() returned error %v", err)
} }
@ -307,8 +304,8 @@ func TestIndexLoadDocReference(t *testing.T) {
idx := loadIndex(t, repo) idx := loadIndex(t, repo)
blobID := ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66") blobID := restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66")
locs, err := idx.FindBlob(pack.Handle{ID: blobID, Type: pack.Data}) locs, err := idx.FindBlob(restic.BlobHandle{ID: blobID, Type: restic.DataBlob})
if err != nil { if err != nil {
t.Errorf("FindBlob() returned error %v", err) t.Errorf("FindBlob() returned error %v", err)
} }
@ -322,8 +319,8 @@ func TestIndexLoadDocReference(t *testing.T) {
t.Errorf("blob IDs are not equal: %v != %v", l.ID, blobID) t.Errorf("blob IDs are not equal: %v != %v", l.ID, blobID)
} }
if l.Type != pack.Data { if l.Type != restic.DataBlob {
t.Errorf("want type %v, got %v", pack.Data, l.Type) t.Errorf("want type %v, got %v", restic.DataBlob, l.Type)
} }
if l.Offset != 150 { if l.Offset != 150 {

View file

@ -1,8 +1,7 @@
package list package list
import ( import (
"restic/backend" "restic"
"restic/pack"
"restic/worker" "restic/worker"
) )
@ -10,19 +9,19 @@ const listPackWorkers = 10
// Lister combines lists packs in a repo and blobs in a pack. // Lister combines lists packs in a repo and blobs in a pack.
type Lister interface { type Lister interface {
List(backend.Type, <-chan struct{}) <-chan backend.ID List(restic.FileType, <-chan struct{}) <-chan restic.ID
ListPack(backend.ID) ([]pack.Blob, int64, error) ListPack(restic.ID) ([]restic.Blob, int64, error)
} }
// Result is returned in the channel from LoadBlobsFromAllPacks. // Result is returned in the channel from LoadBlobsFromAllPacks.
type Result struct { type Result struct {
packID backend.ID packID restic.ID
size int64 size int64
entries []pack.Blob entries []restic.Blob
} }
// PackID returns the pack ID of this result. // PackID returns the pack ID of this result.
func (l Result) PackID() backend.ID { func (l Result) PackID() restic.ID {
return l.packID return l.packID
} }
@ -32,14 +31,14 @@ func (l Result) Size() int64 {
} }
// Entries returns a list of all blobs saved in the pack. // Entries returns a list of all blobs saved in the pack.
func (l Result) Entries() []pack.Blob { func (l Result) Entries() []restic.Blob {
return l.entries return l.entries
} }
// AllPacks sends the contents of all packs to ch. // AllPacks sends the contents of all packs to ch.
func AllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) { func AllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) {
f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { f := func(job worker.Job, done <-chan struct{}) (interface{}, error) {
packID := job.Data.(backend.ID) packID := job.Data.(restic.ID)
entries, size, err := repo.ListPack(packID) entries, size, err := repo.ListPack(packID)
return Result{ return Result{
@ -54,7 +53,7 @@ func AllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) {
go func() { go func() {
defer close(jobCh) defer close(jobCh)
for id := range repo.List(backend.Data, done) { for id := range repo.List(restic.DataFile, done) {
select { select {
case jobCh <- worker.Job{Data: id}: case jobCh <- worker.Job{Data: id}:
case <-done: case <-done:

View file

@ -7,13 +7,12 @@ import (
"os/user" "os/user"
"sync" "sync"
"syscall" "syscall"
"testing"
"time" "time"
"github.com/pkg/errors" "restic/errors"
"restic/backend"
"restic/debug" "restic/debug"
"restic/repository"
) )
// Lock represents a process locking the repository for an operation. // Lock represents a process locking the repository for an operation.
@ -33,8 +32,8 @@ type Lock struct {
UID uint32 `json:"uid,omitempty"` UID uint32 `json:"uid,omitempty"`
GID uint32 `json:"gid,omitempty"` GID uint32 `json:"gid,omitempty"`
repo *repository.Repository repo Repository
lockID *backend.ID lockID *ID
} }
// ErrAlreadyLocked is returned when NewLock or NewExclusiveLock are unable to // ErrAlreadyLocked is returned when NewLock or NewExclusiveLock are unable to
@ -59,20 +58,26 @@ func IsAlreadyLocked(err error) bool {
// NewLock returns a new, non-exclusive lock for the repository. If an // NewLock returns a new, non-exclusive lock for the repository. If an
// exclusive lock is already held by another process, ErrAlreadyLocked is // exclusive lock is already held by another process, ErrAlreadyLocked is
// returned. // returned.
func NewLock(repo *repository.Repository) (*Lock, error) { func NewLock(repo Repository) (*Lock, error) {
return newLock(repo, false) return newLock(repo, false)
} }
// NewExclusiveLock returns a new, exclusive lock for the repository. If // NewExclusiveLock returns a new, exclusive lock for the repository. If
// another lock (normal and exclusive) is already held by another process, // another lock (normal and exclusive) is already held by another process,
// ErrAlreadyLocked is returned. // ErrAlreadyLocked is returned.
func NewExclusiveLock(repo *repository.Repository) (*Lock, error) { func NewExclusiveLock(repo Repository) (*Lock, error) {
return newLock(repo, true) return newLock(repo, true)
} }
const waitBeforeLockCheck = 200 * time.Millisecond var waitBeforeLockCheck = 200 * time.Millisecond
func newLock(repo *repository.Repository, excl bool) (*Lock, error) { // TestSetLockTimeout can be used to reduce the lock wait timeout for tests.
func TestSetLockTimeout(t testing.TB, d time.Duration) {
t.Logf("setting lock timeout to %v", d)
waitBeforeLockCheck = d
}
func newLock(repo Repository, excl bool) (*Lock, error) {
lock := &Lock{ lock := &Lock{
Time: time.Now(), Time: time.Now(),
PID: os.Getpid(), PID: os.Getpid(),
@ -128,7 +133,7 @@ func (l *Lock) fillUserInfo() error {
// non-exclusive lock is to be created, an error is only returned when an // non-exclusive lock is to be created, an error is only returned when an
// exclusive lock is found. // exclusive lock is found.
func (l *Lock) checkForOtherLocks() error { func (l *Lock) checkForOtherLocks() error {
return eachLock(l.repo, func(id backend.ID, lock *Lock, err error) error { return eachLock(l.repo, func(id ID, lock *Lock, err error) error {
if l.lockID != nil && id.Equal(*l.lockID) { if l.lockID != nil && id.Equal(*l.lockID) {
return nil return nil
} }
@ -150,11 +155,11 @@ func (l *Lock) checkForOtherLocks() error {
}) })
} }
func eachLock(repo *repository.Repository, f func(backend.ID, *Lock, error) error) error { func eachLock(repo Repository, f func(ID, *Lock, error) error) error {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
for id := range repo.List(backend.Lock, done) { for id := range repo.List(LockFile, done) {
lock, err := LoadLock(repo, id) lock, err := LoadLock(repo, id)
err = f(id, lock, err) err = f(id, lock, err)
if err != nil { if err != nil {
@ -166,10 +171,10 @@ func eachLock(repo *repository.Repository, f func(backend.ID, *Lock, error) erro
} }
// createLock acquires the lock by creating a file in the repository. // createLock acquires the lock by creating a file in the repository.
func (l *Lock) createLock() (backend.ID, error) { func (l *Lock) createLock() (ID, error) {
id, err := l.repo.SaveJSONUnpacked(backend.Lock, l) id, err := l.repo.SaveJSONUnpacked(LockFile, l)
if err != nil { if err != nil {
return backend.ID{}, err return ID{}, err
} }
return id, nil return id, nil
@ -181,7 +186,7 @@ func (l *Lock) Unlock() error {
return nil return nil
} }
return l.repo.Backend().Remove(backend.Lock, l.lockID.String()) return l.repo.Backend().Remove(LockFile, l.lockID.String())
} }
var staleTimeout = 30 * time.Minute var staleTimeout = 30 * time.Minute
@ -229,7 +234,7 @@ func (l *Lock) Refresh() error {
return err return err
} }
err = l.repo.Backend().Remove(backend.Lock, l.lockID.String()) err = l.repo.Backend().Remove(LockFile, l.lockID.String())
if err != nil { if err != nil {
return err return err
} }
@ -269,9 +274,9 @@ func init() {
} }
// LoadLock loads and unserializes a lock from a repository. // LoadLock loads and unserializes a lock from a repository.
func LoadLock(repo *repository.Repository, id backend.ID) (*Lock, error) { func LoadLock(repo Repository, id ID) (*Lock, error) {
lock := &Lock{} lock := &Lock{}
if err := repo.LoadJSONUnpacked(backend.Lock, id, lock); err != nil { if err := repo.LoadJSONUnpacked(LockFile, id, lock); err != nil {
return nil, err return nil, err
} }
lock.lockID = &id lock.lockID = &id
@ -280,15 +285,15 @@ func LoadLock(repo *repository.Repository, id backend.ID) (*Lock, error) {
} }
// RemoveStaleLocks deletes all locks detected as stale from the repository. // RemoveStaleLocks deletes all locks detected as stale from the repository.
func RemoveStaleLocks(repo *repository.Repository) error { func RemoveStaleLocks(repo Repository) error {
return eachLock(repo, func(id backend.ID, lock *Lock, err error) error { return eachLock(repo, func(id ID, lock *Lock, err error) error {
// ignore locks that cannot be loaded // ignore locks that cannot be loaded
if err != nil { if err != nil {
return nil return nil
} }
if lock.Stale() { if lock.Stale() {
return repo.Backend().Remove(backend.Lock, id.String()) return repo.Backend().Remove(LockFile, id.String())
} }
return nil return nil
@ -296,8 +301,8 @@ func RemoveStaleLocks(repo *repository.Repository) error {
} }
// RemoveAllLocks removes all locks forcefully. // RemoveAllLocks removes all locks forcefully.
func RemoveAllLocks(repo *repository.Repository) error { func RemoveAllLocks(repo Repository) error {
return eachLock(repo, func(id backend.ID, lock *Lock, err error) error { return eachLock(repo, func(id ID, lock *Lock, err error) error {
return repo.Backend().Remove(backend.Lock, id.String()) return repo.Backend().Remove(LockFile, id.String())
}) })
} }

Some files were not shown because too many files have changed in this diff Show more