diff --git a/src/cmds/restic/cmd_backup.go b/src/cmds/restic/cmd_backup.go index 7a7640249..dc07be01d 100644 --- a/src/cmds/restic/cmd_backup.go +++ b/src/cmds/restic/cmd_backup.go @@ -6,14 +6,14 @@ import ( "os" "path/filepath" "restic" - "restic/backend" + "restic/archiver" "restic/debug" "restic/filter" "restic/fs" "strings" "time" - "github.com/pkg/errors" + "restic/errors" "golang.org/x/crypto/ssh/terminal" ) @@ -232,7 +232,7 @@ func filterExisting(items []string) (result []string, err error) { } if len(result) == 0 { - return nil, restic.Fatal("all target directories/files do not exist") + return nil, errors.Fatal("all target directories/files do not exist") } return @@ -240,7 +240,7 @@ func filterExisting(items []string) (result []string, err error) { func (cmd CmdBackup) readFromStdin(args []string) error { if len(args) != 0 { - return restic.Fatalf("when reading from stdin, no additional files can be specified") + return errors.Fatalf("when reading from stdin, no additional files can be specified") } repo, err := cmd.global.OpenRepository() @@ -259,7 +259,7 @@ func (cmd CmdBackup) readFromStdin(args []string) error { return err } - _, id, err := restic.ArchiveReader(repo, cmd.newArchiveStdinProgress(), os.Stdin, cmd.StdinFilename) + _, id, err := archiver.ArchiveReader(repo, cmd.newArchiveStdinProgress(), os.Stdin, cmd.StdinFilename) if err != nil { return err } @@ -274,7 +274,7 @@ func (cmd CmdBackup) Execute(args []string) error { } if len(args) == 0 { - return restic.Fatalf("wrong number of parameters, Usage: %s", cmd.Usage()) + return errors.Fatalf("wrong number of parameters, Usage: %s", cmd.Usage()) } target := make([]string, 0, len(args)) @@ -306,13 +306,13 @@ func (cmd CmdBackup) Execute(args []string) error { return err } - var parentSnapshotID *backend.ID + var parentSnapshotID *restic.ID // Force using a parent if !cmd.Force && cmd.Parent != "" { id, err := restic.FindSnapshot(repo, cmd.Parent) if err != nil { - return restic.Fatalf("invalid id %q: %v", cmd.Parent, err) + return errors.Fatalf("invalid id %q: %v", cmd.Parent, err) } parentSnapshotID = &id @@ -365,12 +365,12 @@ func (cmd CmdBackup) Execute(args []string) error { return !matched } - stat, err := restic.Scan(target, selectFilter, cmd.newScanProgress()) + stat, err := archiver.Scan(target, selectFilter, cmd.newScanProgress()) if err != nil { return err } - arch := restic.NewArchiver(repo) + arch := archiver.New(repo) arch.Excludes = cmd.Excludes arch.SelectFilter = selectFilter diff --git a/src/cmds/restic/cmd_cache.go b/src/cmds/restic/cmd_cache.go deleted file mode 100644 index aa4d5765f..000000000 --- a/src/cmds/restic/cmd_cache.go +++ /dev/null @@ -1,52 +0,0 @@ -package main - -import ( - "fmt" - - "restic" -) - -type CmdCache struct { - global *GlobalOptions -} - -func init() { - _, err := parser.AddCommand("cache", - "manage cache", - "The cache command creates and manages the local cache", - &CmdCache{global: &globalOpts}) - if err != nil { - panic(err) - } -} - -func (cmd CmdCache) Usage() string { - return "[update|clear]" -} - -func (cmd CmdCache) Execute(args []string) error { - repo, err := cmd.global.OpenRepository() - if err != nil { - return err - } - - lock, err := lockRepo(repo) - defer unlockRepo(lock) - if err != nil { - return err - } - - cache, err := restic.NewCache(repo, cmd.global.CacheDir) - if err != nil { - return err - } - - fmt.Printf("clear cache for old snapshots\n") - err = cache.Clear(repo) - if err != nil { - return err - } - fmt.Printf("done\n") - - return nil -} diff --git a/src/cmds/restic/cmd_cat.go b/src/cmds/restic/cmd_cat.go index 4e52848da..802257066 100644 --- a/src/cmds/restic/cmd_cat.go +++ b/src/cmds/restic/cmd_cat.go @@ -8,7 +8,7 @@ import ( "restic" "restic/backend" "restic/debug" - "restic/pack" + "restic/errors" "restic/repository" ) @@ -32,7 +32,7 @@ func (cmd CmdCat) Usage() string { func (cmd CmdCat) Execute(args []string) error { if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) { - return restic.Fatalf("type or ID not specified, Usage: %s", cmd.Usage()) + return errors.Fatalf("type or ID not specified, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() @@ -48,12 +48,12 @@ func (cmd CmdCat) Execute(args []string) error { tpe := args[0] - var id backend.ID + var id restic.ID if tpe != "masterkey" && tpe != "config" { - id, err = backend.ParseID(args[1]) + id, err = restic.ParseID(args[1]) if err != nil { if tpe != "snapshot" { - return restic.Fatalf("unable to parse ID: %v\n", err) + return errors.Fatalf("unable to parse ID: %v\n", err) } // find snapshot id with prefix @@ -67,7 +67,7 @@ func (cmd CmdCat) Execute(args []string) error { // handle all types that don't need an index switch tpe { case "config": - buf, err := json.MarshalIndent(repo.Config, "", " ") + buf, err := json.MarshalIndent(repo.Config(), "", " ") if err != nil { return err } @@ -75,7 +75,7 @@ func (cmd CmdCat) Execute(args []string) error { fmt.Println(string(buf)) return nil case "index": - buf, err := repo.LoadAndDecrypt(backend.Index, id) + buf, err := repo.LoadAndDecrypt(restic.IndexFile, id) if err != nil { return err } @@ -85,7 +85,7 @@ func (cmd CmdCat) Execute(args []string) error { case "snapshot": sn := &restic.Snapshot{} - err = repo.LoadJSONUnpacked(backend.Snapshot, id, sn) + err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, sn) if err != nil { return err } @@ -99,7 +99,7 @@ func (cmd CmdCat) Execute(args []string) error { return nil case "key": - h := backend.Handle{Type: backend.Key, Name: id.String()} + h := restic.Handle{Type: restic.KeyFile, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err @@ -150,13 +150,13 @@ func (cmd CmdCat) Execute(args []string) error { switch tpe { case "pack": - h := backend.Handle{Type: backend.Data, Name: id.String()} + h := restic.Handle{Type: restic.DataFile, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err } - hash := backend.Hash(buf) + hash := restic.Hash(buf) if !hash.Equal(id) { fmt.Fprintf(cmd.global.stderr, "Warning: hash of data does not match ID, want\n %v\ngot:\n %v\n", id.String(), hash.String()) } @@ -165,7 +165,7 @@ func (cmd CmdCat) Execute(args []string) error { return err case "blob": - for _, t := range []pack.BlobType{pack.Data, pack.Tree} { + for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { list, err := repo.Index().Lookup(id, t) if err != nil { continue @@ -173,21 +173,21 @@ func (cmd CmdCat) Execute(args []string) error { blob := list[0] buf := make([]byte, blob.Length) - data, err := repo.LoadBlob(id, t, buf) + n, err := repo.LoadBlob(restic.DataBlob, id, buf) if err != nil { return err } + buf = buf[:n] - _, err = os.Stdout.Write(data) + _, err = os.Stdout.Write(buf) return err } - return restic.Fatal("blob not found") + return errors.Fatal("blob not found") case "tree": debug.Log("cat", "cat tree %v", id.Str()) - tree := restic.NewTree() - err = repo.LoadJSONPack(pack.Tree, id, tree) + tree, err := repo.LoadTree(id) if err != nil { debug.Log("cat", "unable to load tree %v: %v", id.Str(), err) return err @@ -203,6 +203,6 @@ func (cmd CmdCat) Execute(args []string) error { return nil default: - return restic.Fatal("invalid type") + return errors.Fatal("invalid type") } } diff --git a/src/cmds/restic/cmd_check.go b/src/cmds/restic/cmd_check.go index a151fc93b..cd2fc409b 100644 --- a/src/cmds/restic/cmd_check.go +++ b/src/cmds/restic/cmd_check.go @@ -9,6 +9,7 @@ import ( "restic" "restic/checker" + "restic/errors" ) type CmdCheck struct { @@ -65,7 +66,7 @@ func (cmd CmdCheck) newReadProgress(todo restic.Stat) *restic.Progress { func (cmd CmdCheck) Execute(args []string) error { if len(args) != 0 { - return restic.Fatal("check has no arguments") + return errors.Fatal("check has no arguments") } repo, err := cmd.global.OpenRepository() @@ -103,7 +104,7 @@ func (cmd CmdCheck) Execute(args []string) error { for _, err := range errs { cmd.global.Warnf("error: %v\n", err) } - return restic.Fatal("LoadIndex returned errors") + return errors.Fatal("LoadIndex returned errors") } done := make(chan struct{}) @@ -158,7 +159,7 @@ func (cmd CmdCheck) Execute(args []string) error { } if errorsFound { - return restic.Fatal("repository contains errors") + return errors.Fatal("repository contains errors") } return nil } diff --git a/src/cmds/restic/cmd_dump.go b/src/cmds/restic/cmd_dump.go index 32b789094..f29aff905 100644 --- a/src/cmds/restic/cmd_dump.go +++ b/src/cmds/restic/cmd_dump.go @@ -9,7 +9,7 @@ import ( "os" "restic" - "restic/backend" + "restic/errors" "restic/pack" "restic/repository" @@ -50,7 +50,7 @@ func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error { done := make(chan struct{}) defer close(done) - for id := range repo.List(backend.Snapshot, done) { + for id := range repo.List(restic.SnapshotFile, done) { snapshot, err := restic.LoadSnapshot(repo, id) if err != nil { fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err) @@ -68,37 +68,6 @@ func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error { return nil } -func printTrees(repo *repository.Repository, wr io.Writer) error { - done := make(chan struct{}) - defer close(done) - - trees := []backend.ID{} - - for _, idx := range repo.Index().All() { - for blob := range idx.Each(nil) { - if blob.Type != pack.Tree { - continue - } - - trees = append(trees, blob.ID) - } - } - - for _, id := range trees { - tree, err := restic.LoadTree(repo, id) - if err != nil { - fmt.Fprintf(os.Stderr, "LoadTree(%v): %v", id.Str(), err) - continue - } - - fmt.Fprintf(wr, "tree_id: %v\n", id) - - prettyPrintJSON(wr, tree) - } - - return nil -} - const dumpPackWorkers = 10 // Pack is the struct used in printPacks. @@ -110,10 +79,10 @@ type Pack struct { // Blob is the struct used in printPacks. type Blob struct { - Type pack.BlobType `json:"type"` - Length uint `json:"length"` - ID backend.ID `json:"id"` - Offset uint `json:"offset"` + Type restic.BlobType `json:"type"` + Length uint `json:"length"` + ID restic.ID `json:"id"` + Offset uint `json:"offset"` } func printPacks(repo *repository.Repository, wr io.Writer) error { @@ -123,14 +92,14 @@ func printPacks(repo *repository.Repository, wr io.Writer) error { f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { name := job.Data.(string) - h := backend.Handle{Type: backend.Data, Name: name} + h := restic.Handle{Type: restic.DataFile, Name: name} blobInfo, err := repo.Backend().Stat(h) if err != nil { return nil, err } - blobs, err := pack.List(repo.Key(), backend.ReaderAt(repo.Backend(), h), blobInfo.Size) + blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), blobInfo.Size) if err != nil { return nil, err } @@ -143,7 +112,7 @@ func printPacks(repo *repository.Repository, wr io.Writer) error { wp := worker.New(dumpPackWorkers, f, jobCh, resCh) go func() { - for name := range repo.Backend().List(backend.Data, done) { + for name := range repo.Backend().List(restic.DataFile, done) { jobCh <- worker.Job{Data: name} } close(jobCh) @@ -157,7 +126,7 @@ func printPacks(repo *repository.Repository, wr io.Writer) error { continue } - entries := job.Result.([]pack.Blob) + entries := job.Result.([]restic.Blob) p := Pack{ Name: name, Blobs: make([]Blob, len(entries)), @@ -183,7 +152,7 @@ func (cmd CmdDump) DumpIndexes() error { done := make(chan struct{}) defer close(done) - for id := range cmd.repo.List(backend.Index, done) { + for id := range cmd.repo.List(restic.IndexFile, done) { fmt.Printf("index_id: %v\n", id) idx, err := repository.LoadIndex(cmd.repo, id) @@ -202,7 +171,7 @@ func (cmd CmdDump) DumpIndexes() error { func (cmd CmdDump) Execute(args []string) error { if len(args) != 1 { - return restic.Fatalf("type not specified, Usage: %s", cmd.Usage()) + return errors.Fatalf("type not specified, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() @@ -229,8 +198,6 @@ func (cmd CmdDump) Execute(args []string) error { return cmd.DumpIndexes() case "snapshots": return debugPrintSnapshots(repo, os.Stdout) - case "trees": - return printTrees(repo, os.Stdout) case "packs": return printPacks(repo, os.Stdout) case "all": @@ -240,13 +207,6 @@ func (cmd CmdDump) Execute(args []string) error { return err } - fmt.Printf("\ntrees:\n") - - err = printTrees(repo, os.Stdout) - if err != nil { - return err - } - fmt.Printf("\nindexes:\n") err = cmd.DumpIndexes() if err != nil { @@ -255,6 +215,6 @@ func (cmd CmdDump) Execute(args []string) error { return nil default: - return restic.Fatalf("no such type %q", tpe) + return errors.Fatalf("no such type %q", tpe) } } diff --git a/src/cmds/restic/cmd_find.go b/src/cmds/restic/cmd_find.go index 1c66cd757..683adaa87 100644 --- a/src/cmds/restic/cmd_find.go +++ b/src/cmds/restic/cmd_find.go @@ -5,8 +5,8 @@ import ( "time" "restic" - "restic/backend" "restic/debug" + "restic/errors" "restic/repository" ) @@ -56,12 +56,12 @@ func parseTime(str string) (time.Time, error) { } } - return time.Time{}, restic.Fatalf("unable to parse time: %q", str) + return time.Time{}, errors.Fatalf("unable to parse time: %q", str) } -func (c CmdFind) findInTree(repo *repository.Repository, id backend.ID, path string) ([]findResult, error) { +func (c CmdFind) findInTree(repo *repository.Repository, id restic.ID, path string) ([]findResult, error) { debug.Log("restic.find", "checking tree %v\n", id) - tree, err := restic.LoadTree(repo, id) + tree, err := repo.LoadTree(id) if err != nil { return nil, err } @@ -105,7 +105,7 @@ func (c CmdFind) findInTree(repo *repository.Repository, id backend.ID, path str return results, nil } -func (c CmdFind) findInSnapshot(repo *repository.Repository, id backend.ID) error { +func (c CmdFind) findInSnapshot(repo *repository.Repository, id restic.ID) error { debug.Log("restic.find", "searching in snapshot %s\n for entries within [%s %s]", id.Str(), c.oldest, c.newest) sn, err := restic.LoadSnapshot(repo, id) @@ -136,7 +136,7 @@ func (CmdFind) Usage() string { func (c CmdFind) Execute(args []string) error { if len(args) != 1 { - return restic.Fatalf("wrong number of arguments, Usage: %s", c.Usage()) + return errors.Fatalf("wrong number of arguments, Usage: %s", c.Usage()) } var err error @@ -176,7 +176,7 @@ func (c CmdFind) Execute(args []string) error { if c.Snapshot != "" { snapshotID, err := restic.FindSnapshot(repo, c.Snapshot) if err != nil { - return restic.Fatalf("invalid id %q: %v", args[1], err) + return errors.Fatalf("invalid id %q: %v", args[1], err) } return c.findInSnapshot(repo, snapshotID) @@ -184,7 +184,7 @@ func (c CmdFind) Execute(args []string) error { done := make(chan struct{}) defer close(done) - for snapshotID := range repo.List(backend.Snapshot, done) { + for snapshotID := range repo.List(restic.SnapshotFile, done) { err := c.findInSnapshot(repo, snapshotID) if err != nil { diff --git a/src/cmds/restic/cmd_forget.go b/src/cmds/restic/cmd_forget.go index 16da4b556..3f4ec12c6 100644 --- a/src/cmds/restic/cmd_forget.go +++ b/src/cmds/restic/cmd_forget.go @@ -4,7 +4,6 @@ import ( "fmt" "io" "restic" - "restic/backend" "strings" ) @@ -93,7 +92,7 @@ func (cmd CmdForget) Execute(args []string) error { } if !cmd.DryRun { - err = repo.Backend().Remove(backend.Snapshot, id.String()) + err = repo.Backend().Remove(restic.SnapshotFile, id.String()) if err != nil { return err } @@ -156,7 +155,7 @@ func (cmd CmdForget) Execute(args []string) error { if !cmd.DryRun { for _, sn := range remove { - err = repo.Backend().Remove(backend.Snapshot, sn.ID().String()) + err = repo.Backend().Remove(restic.SnapshotFile, sn.ID().String()) if err != nil { return err } diff --git a/src/cmds/restic/cmd_init.go b/src/cmds/restic/cmd_init.go index 49b0907ad..39b8cd2fd 100644 --- a/src/cmds/restic/cmd_init.go +++ b/src/cmds/restic/cmd_init.go @@ -1,7 +1,7 @@ package main import ( - "restic" + "restic/errors" "restic/repository" ) @@ -11,7 +11,7 @@ type CmdInit struct { func (cmd CmdInit) Execute(args []string) error { if cmd.global.Repo == "" { - return restic.Fatal("Please specify repository location (-r)") + return errors.Fatal("Please specify repository location (-r)") } be, err := create(cmd.global.Repo) @@ -32,7 +32,7 @@ func (cmd CmdInit) Execute(args []string) error { cmd.global.Exitf(1, "creating key in backend at %s failed: %v\n", cmd.global.Repo, err) } - cmd.global.Verbosef("created restic backend %v at %s\n", s.Config.ID[:10], cmd.global.Repo) + cmd.global.Verbosef("created restic backend %v at %s\n", s.Config().ID[:10], cmd.global.Repo) cmd.global.Verbosef("\n") cmd.global.Verbosef("Please note that knowledge of your password is required to access\n") cmd.global.Verbosef("the repository. Losing your password means that your data is\n") diff --git a/src/cmds/restic/cmd_key.go b/src/cmds/restic/cmd_key.go index 67d5afa64..848018150 100644 --- a/src/cmds/restic/cmd_key.go +++ b/src/cmds/restic/cmd_key.go @@ -4,7 +4,7 @@ import ( "fmt" "restic" - "restic/backend" + "restic/errors" "restic/repository" ) @@ -28,7 +28,7 @@ func (cmd CmdKey) listKeys(s *repository.Repository) error { tab.Header = fmt.Sprintf(" %-10s %-10s %-10s %s", "ID", "User", "Host", "Created") tab.RowFormat = "%s%-10s %-10s %-10s %s" - plen, err := s.PrefixLength(backend.Key) + plen, err := s.PrefixLength(restic.KeyFile) if err != nil { return err } @@ -36,7 +36,7 @@ func (cmd CmdKey) listKeys(s *repository.Repository) error { done := make(chan struct{}) defer close(done) - for id := range s.List(backend.Key, done) { + for id := range s.List(restic.KeyFile, done) { k, err := repository.LoadKey(s, id.String()) if err != nil { cmd.global.Warnf("LoadKey() failed: %v\n", err) @@ -69,7 +69,7 @@ func (cmd CmdKey) getNewPassword() string { func (cmd CmdKey) addKey(repo *repository.Repository) error { id, err := repository.AddKey(repo, cmd.getNewPassword(), repo.Key()) if err != nil { - return restic.Fatalf("creating new key failed: %v\n", err) + return errors.Fatalf("creating new key failed: %v\n", err) } cmd.global.Verbosef("saved new key as %s\n", id) @@ -79,10 +79,10 @@ func (cmd CmdKey) addKey(repo *repository.Repository) error { func (cmd CmdKey) deleteKey(repo *repository.Repository, name string) error { if name == repo.KeyName() { - return restic.Fatal("refusing to remove key currently used to access repository") + return errors.Fatal("refusing to remove key currently used to access repository") } - err := repo.Backend().Remove(backend.Key, name) + err := repo.Backend().Remove(restic.KeyFile, name) if err != nil { return err } @@ -94,10 +94,10 @@ func (cmd CmdKey) deleteKey(repo *repository.Repository, name string) error { func (cmd CmdKey) changePassword(repo *repository.Repository) error { id, err := repository.AddKey(repo, cmd.getNewPassword(), repo.Key()) if err != nil { - return restic.Fatalf("creating new key failed: %v\n", err) + return errors.Fatalf("creating new key failed: %v\n", err) } - err = repo.Backend().Remove(backend.Key, repo.KeyName()) + err = repo.Backend().Remove(restic.KeyFile, repo.KeyName()) if err != nil { return err } @@ -113,7 +113,7 @@ func (cmd CmdKey) Usage() string { func (cmd CmdKey) Execute(args []string) error { if len(args) < 1 || (args[0] == "rm" && len(args) != 2) { - return restic.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) + return errors.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() @@ -145,7 +145,7 @@ func (cmd CmdKey) Execute(args []string) error { return err } - id, err := backend.Find(repo.Backend(), backend.Key, args[1]) + id, err := restic.Find(repo.Backend(), restic.KeyFile, args[1]) if err != nil { return err } diff --git a/src/cmds/restic/cmd_list.go b/src/cmds/restic/cmd_list.go index 717d65ade..a17d5ce64 100644 --- a/src/cmds/restic/cmd_list.go +++ b/src/cmds/restic/cmd_list.go @@ -2,7 +2,7 @@ package main import ( "restic" - "restic/backend" + "restic/errors" ) type CmdList struct { @@ -25,7 +25,7 @@ func (cmd CmdList) Usage() string { func (cmd CmdList) Execute(args []string) error { if len(args) != 1 { - return restic.Fatalf("type not specified, Usage: %s", cmd.Usage()) + return errors.Fatalf("type not specified, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() @@ -41,33 +41,20 @@ func (cmd CmdList) Execute(args []string) error { } } - var t backend.Type + var t restic.FileType switch args[0] { - case "blobs": - err = repo.LoadIndex() - if err != nil { - return err - } - - for _, idx := range repo.Index().All() { - for blob := range idx.Each(nil) { - cmd.global.Printf("%s\n", blob.ID) - } - } - - return nil case "packs": - t = backend.Data + t = restic.DataFile case "index": - t = backend.Index + t = restic.IndexFile case "snapshots": - t = backend.Snapshot + t = restic.SnapshotFile case "keys": - t = backend.Key + t = restic.KeyFile case "locks": - t = backend.Lock + t = restic.LockFile default: - return restic.Fatal("invalid type") + return errors.Fatal("invalid type") } for id := range repo.List(t, nil) { diff --git a/src/cmds/restic/cmd_ls.go b/src/cmds/restic/cmd_ls.go index c55670a93..4e3b29e8a 100644 --- a/src/cmds/restic/cmd_ls.go +++ b/src/cmds/restic/cmd_ls.go @@ -6,7 +6,7 @@ import ( "path/filepath" "restic" - "restic/backend" + "restic/errors" "restic/repository" ) @@ -46,8 +46,8 @@ func (cmd CmdLs) printNode(prefix string, n *restic.Node) string { } } -func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id backend.ID) error { - tree, err := restic.LoadTree(repo, id) +func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id restic.ID) error { + tree, err := repo.LoadTree(id) if err != nil { return err } @@ -72,7 +72,7 @@ func (cmd CmdLs) Usage() string { func (cmd CmdLs) Execute(args []string) error { if len(args) < 1 || len(args) > 2 { - return restic.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) + return errors.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() diff --git a/src/cmds/restic/cmd_mount.go b/src/cmds/restic/cmd_mount.go index 36a2ce997..e25306e31 100644 --- a/src/cmds/restic/cmd_mount.go +++ b/src/cmds/restic/cmd_mount.go @@ -5,9 +5,8 @@ package main import ( "os" - "restic" - "github.com/pkg/errors" + "restic/errors" resticfs "restic/fs" "restic/fuse" @@ -44,7 +43,7 @@ func (cmd CmdMount) Usage() string { func (cmd CmdMount) Execute(args []string) error { if len(args) == 0 { - return restic.Fatalf("wrong number of parameters, Usage: %s", cmd.Usage()) + return errors.Fatalf("wrong number of parameters, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go index eee330131..4fa8ba0d3 100644 --- a/src/cmds/restic/cmd_prune.go +++ b/src/cmds/restic/cmd_prune.go @@ -4,10 +4,9 @@ import ( "fmt" "os" "restic" - "restic/backend" "restic/debug" + "restic/errors" "restic/index" - "restic/pack" "restic/repository" "time" @@ -94,7 +93,7 @@ func (cmd CmdPrune) Execute(args []string) error { } cmd.global.Verbosef("counting files in repo\n") - for _ = range repo.List(backend.Data, done) { + for _ = range repo.List(restic.DataFile, done) { stats.packs++ } @@ -112,7 +111,7 @@ func (cmd CmdPrune) Execute(args []string) error { cmd.global.Verbosef("repository contains %v packs (%v blobs) with %v bytes\n", len(idx.Packs), len(idx.Blobs), formatBytes(uint64(stats.bytes))) - blobCount := make(map[pack.Handle]int) + blobCount := make(map[restic.BlobHandle]int) duplicateBlobs := 0 duplicateBytes := 0 @@ -120,7 +119,7 @@ func (cmd CmdPrune) Execute(args []string) error { for _, p := range idx.Packs { for _, entry := range p.Entries { stats.blobs++ - h := pack.Handle{ID: entry.ID, Type: entry.Type} + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} blobCount[h]++ if blobCount[h] > 1 { @@ -144,8 +143,8 @@ func (cmd CmdPrune) Execute(args []string) error { cmd.global.Verbosef("find data that is still in use for %d snapshots\n", stats.snapshots) - usedBlobs := pack.NewBlobSet() - seenBlobs := pack.NewBlobSet() + usedBlobs := restic.NewBlobSet() + seenBlobs := restic.NewBlobSet() bar = newProgressMax(cmd.global.ShowProgress(), uint64(len(snapshots)), "snapshots") bar.Start() @@ -165,7 +164,7 @@ func (cmd CmdPrune) Execute(args []string) error { cmd.global.Verbosef("found %d of %d data blobs still in use\n", len(usedBlobs), stats.blobs) // find packs that need a rewrite - rewritePacks := backend.NewIDSet() + rewritePacks := restic.NewIDSet() for h, blob := range idx.Blobs { if !usedBlobs.Has(h) { rewritePacks.Merge(blob.Packs) @@ -178,11 +177,11 @@ func (cmd CmdPrune) Execute(args []string) error { } // find packs that are unneeded - removePacks := backend.NewIDSet() + removePacks := restic.NewIDSet() nextPack: for packID, p := range idx.Packs { for _, blob := range p.Entries { - h := pack.Handle{ID: blob.ID, Type: blob.Type} + h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} if usedBlobs.Has(h) { continue nextPack } @@ -191,7 +190,7 @@ nextPack: removePacks.Insert(packID) if !rewritePacks.Has(packID) { - return restic.Fatalf("pack %v is unneeded, but not contained in rewritePacks", packID.Str()) + return errors.Fatalf("pack %v is unneeded, but not contained in rewritePacks", packID.Str()) } rewritePacks.Delete(packID) @@ -205,7 +204,7 @@ nextPack: } for packID := range removePacks { - err = repo.Backend().Remove(backend.Data, packID.String()) + err = repo.Backend().Remove(restic.DataFile, packID.String()) if err != nil { cmd.global.Warnf("unable to remove file %v from the repository\n", packID.Str()) } @@ -214,7 +213,7 @@ nextPack: cmd.global.Verbosef("creating new index\n") stats.packs = 0 - for _ = range repo.List(backend.Data, done) { + for _ = range repo.List(restic.DataFile, done) { stats.packs++ } bar = newProgressMax(cmd.global.ShowProgress(), uint64(stats.packs), "packs") @@ -223,9 +222,9 @@ nextPack: return err } - var supersedes backend.IDs - for idxID := range repo.List(backend.Index, done) { - err := repo.Backend().Remove(backend.Index, idxID.String()) + var supersedes restic.IDs + for idxID := range repo.List(restic.IndexFile, done) { + err := repo.Backend().Remove(restic.IndexFile, idxID.String()) if err != nil { fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", idxID.Str(), err) } diff --git a/src/cmds/restic/cmd_restore.go b/src/cmds/restic/cmd_restore.go index 9ff57565d..88099b677 100644 --- a/src/cmds/restic/cmd_restore.go +++ b/src/cmds/restic/cmd_restore.go @@ -2,8 +2,8 @@ package main import ( "restic" - "restic/backend" "restic/debug" + "restic/errors" "restic/filter" ) @@ -33,15 +33,15 @@ func (cmd CmdRestore) Usage() string { func (cmd CmdRestore) Execute(args []string) error { if len(args) != 1 { - return restic.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) + return errors.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) } if cmd.Target == "" { - return restic.Fatal("please specify a directory to restore to (--target)") + return errors.Fatal("please specify a directory to restore to (--target)") } if len(cmd.Exclude) > 0 && len(cmd.Include) > 0 { - return restic.Fatal("exclude and include patterns are mutually exclusive") + return errors.Fatal("exclude and include patterns are mutually exclusive") } snapshotIDString := args[0] @@ -66,7 +66,7 @@ func (cmd CmdRestore) Execute(args []string) error { return err } - var id backend.ID + var id restic.ID if snapshotIDString == "latest" { id, err = restic.FindLatestSnapshot(repo, cmd.Paths, cmd.Host) diff --git a/src/cmds/restic/cmd_snapshots.go b/src/cmds/restic/cmd_snapshots.go index ccf889d23..d7bc4e65d 100644 --- a/src/cmds/restic/cmd_snapshots.go +++ b/src/cmds/restic/cmd_snapshots.go @@ -5,11 +5,11 @@ import ( "fmt" "io" "os" + "restic/errors" "sort" "strings" "restic" - "restic/backend" ) type Table struct { @@ -70,7 +70,7 @@ func (cmd CmdSnapshots) Usage() string { func (cmd CmdSnapshots) Execute(args []string) error { if len(args) != 0 { - return restic.Fatalf("wrong number of arguments, usage: %s", cmd.Usage()) + return errors.Fatalf("wrong number of arguments, usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() @@ -92,7 +92,7 @@ func (cmd CmdSnapshots) Execute(args []string) error { defer close(done) list := []*restic.Snapshot{} - for id := range repo.List(backend.Snapshot, done) { + for id := range repo.List(restic.SnapshotFile, done) { sn, err := restic.LoadSnapshot(repo, id) if err != nil { fmt.Fprintf(os.Stderr, "error loading snapshot %s: %v\n", id, err) @@ -115,7 +115,7 @@ func (cmd CmdSnapshots) Execute(args []string) error { } - plen, err := repo.PrefixLength(backend.Snapshot) + plen, err := repo.PrefixLength(restic.SnapshotFile) if err != nil { return err } diff --git a/src/cmds/restic/global.go b/src/cmds/restic/global.go index 2aedb026e..ee4255f7b 100644 --- a/src/cmds/restic/global.go +++ b/src/cmds/restic/global.go @@ -9,7 +9,6 @@ import ( "strings" "syscall" - "restic/backend" "restic/backend/local" "restic/backend/rest" "restic/backend/s3" @@ -18,8 +17,9 @@ import ( "restic/location" "restic/repository" + "restic/errors" + "github.com/jessevdk/go-flags" - "github.com/pkg/errors" "golang.org/x/crypto/ssh/terminal" ) @@ -247,7 +247,7 @@ const maxKeys = 20 // OpenRepository reads the password and opens the repository. func (o GlobalOptions) OpenRepository() (*repository.Repository, error) { if o.Repo == "" { - return nil, restic.Fatal("Please specify repository location (-r)") + return nil, errors.Fatal("Please specify repository location (-r)") } be, err := open(o.Repo) @@ -263,14 +263,14 @@ func (o GlobalOptions) OpenRepository() (*repository.Repository, error) { err = s.SearchKey(o.password, maxKeys) if err != nil { - return nil, restic.Fatalf("unable to open repo: %v", err) + return nil, errors.Fatalf("unable to open repo: %v", err) } return s, nil } // Open the backend specified by a location config. -func open(s string) (backend.Backend, error) { +func open(s string) (restic.Backend, error) { debug.Log("open", "parsing location %v", s) loc, err := location.Parse(s) if err != nil { @@ -301,11 +301,11 @@ func open(s string) (backend.Backend, error) { } debug.Log("open", "invalid repository location: %v", s) - return nil, restic.Fatalf("invalid scheme %q", loc.Scheme) + return nil, errors.Fatalf("invalid scheme %q", loc.Scheme) } // Create the backend specified by URI. -func create(s string) (backend.Backend, error) { +func create(s string) (restic.Backend, error) { debug.Log("open", "parsing location %v", s) loc, err := location.Parse(s) if err != nil { @@ -336,5 +336,5 @@ func create(s string) (backend.Backend, error) { } debug.Log("open", "invalid repository scheme: %v", s) - return nil, restic.Fatalf("invalid scheme %q", loc.Scheme) + return nil, errors.Fatalf("invalid scheme %q", loc.Scheme) } diff --git a/src/cmds/restic/integration_fuse_test.go b/src/cmds/restic/integration_fuse_test.go index 25f4b7e84..a106d035d 100644 --- a/src/cmds/restic/integration_fuse_test.go +++ b/src/cmds/restic/integration_fuse_test.go @@ -10,10 +10,9 @@ import ( "testing" "time" - "github.com/pkg/errors" + "restic/errors" "restic" - "restic/backend" "restic/repository" . "restic/test" ) @@ -51,7 +50,7 @@ func waitForMount(dir string) error { time.Sleep(mountSleep) } - return restic.Fatalf("subdir %q of dir %s never appeared", mountTestSubdir, dir) + return errors.Fatalf("subdir %q of dir %s never appeared", mountTestSubdir, dir) } func cmdMount(t testing.TB, global GlobalOptions, dir string, ready, done chan struct{}) { @@ -71,7 +70,7 @@ func TestMount(t *testing.T) { t.Skip("Skipping fuse tests") } - checkSnapshots := func(repo *repository.Repository, mountpoint string, snapshotIDs []backend.ID) { + checkSnapshots := func(repo *repository.Repository, mountpoint string, snapshotIDs []restic.ID) { snapshotsDir, err := os.Open(filepath.Join(mountpoint, "snapshots")) OK(t, err) namesInSnapshots, err := snapshotsDir.Readdirnames(-1) @@ -123,7 +122,7 @@ func TestMount(t *testing.T) { Assert(t, len(names) == 1 && names[0] == "snapshots", `The fuse virtual directory "snapshots" doesn't exist`) OK(t, mountpointDir.Close()) - checkSnapshots(repo, mountpoint, []backend.ID{}) + checkSnapshots(repo, mountpoint, []restic.ID{}) datafile := filepath.Join("testdata", "backup-data.tar.gz") fd, err := os.Open(datafile) diff --git a/src/cmds/restic/integration_helpers_test.go b/src/cmds/restic/integration_helpers_test.go index d6615f6b0..15111d688 100644 --- a/src/cmds/restic/integration_helpers_test.go +++ b/src/cmds/restic/integration_helpers_test.go @@ -8,6 +8,7 @@ import ( "runtime" "testing" + "restic/repository" . "restic/test" ) @@ -193,6 +194,8 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions)) t.Skip("integration tests disabled") } + repository.TestUseLowSecurityKDFParameters(t) + tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") OK(t, err) diff --git a/src/cmds/restic/integration_test.go b/src/cmds/restic/integration_test.go index fff765bef..0dd6c6165 100644 --- a/src/cmds/restic/integration_test.go +++ b/src/cmds/restic/integration_test.go @@ -16,21 +16,20 @@ import ( "testing" "time" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/debug" "restic/filter" "restic/repository" . "restic/test" ) -func parseIDsFromReader(t testing.TB, rd io.Reader) backend.IDs { - IDs := backend.IDs{} +func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs { + IDs := restic.IDs{} sc := bufio.NewScanner(rd) for sc.Scan() { - id, err := backend.ParseID(sc.Text()) + id, err := restic.ParseID(sc.Text()) if err != nil { t.Logf("parse id %v: %v", sc.Text(), err) continue @@ -44,6 +43,7 @@ func parseIDsFromReader(t testing.TB, rd io.Reader) backend.IDs { func cmdInit(t testing.TB, global GlobalOptions) { repository.TestUseLowSecurityKDFParameters(t) + restic.TestSetLockTimeout(t, 0) cmd := &CmdInit{global: &global} OK(t, cmd.Execute(nil)) @@ -51,11 +51,11 @@ func cmdInit(t testing.TB, global GlobalOptions) { t.Logf("repository initialized at %v", global.Repo) } -func cmdBackup(t testing.TB, global GlobalOptions, target []string, parentID *backend.ID) { +func cmdBackup(t testing.TB, global GlobalOptions, target []string, parentID *restic.ID) { cmdBackupExcludes(t, global, target, parentID, nil) } -func cmdBackupExcludes(t testing.TB, global GlobalOptions, target []string, parentID *backend.ID, excludes []string) { +func cmdBackupExcludes(t testing.TB, global GlobalOptions, target []string, parentID *restic.ID, excludes []string) { cmd := &CmdBackup{global: &global, Excludes: excludes} if parentID != nil { cmd.Parent = parentID.String() @@ -66,19 +66,19 @@ func cmdBackupExcludes(t testing.TB, global GlobalOptions, target []string, pare OK(t, cmd.Execute(target)) } -func cmdList(t testing.TB, global GlobalOptions, tpe string) backend.IDs { +func cmdList(t testing.TB, global GlobalOptions, tpe string) restic.IDs { cmd := &CmdList{global: &global} return executeAndParseIDs(t, cmd, tpe) } -func executeAndParseIDs(t testing.TB, cmd *CmdList, args ...string) backend.IDs { +func executeAndParseIDs(t testing.TB, cmd *CmdList, args ...string) restic.IDs { buf := bytes.NewBuffer(nil) cmd.global.stdout = buf OK(t, cmd.Execute(args)) return parseIDsFromReader(t, buf) } -func cmdRestore(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID) { +func cmdRestore(t testing.TB, global GlobalOptions, dir string, snapshotID restic.ID) { cmdRestoreExcludes(t, global, dir, snapshotID, nil) } @@ -87,12 +87,12 @@ func cmdRestoreLatest(t testing.TB, global GlobalOptions, dir string, paths []st OK(t, cmd.Execute([]string{"latest"})) } -func cmdRestoreExcludes(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID, excludes []string) { +func cmdRestoreExcludes(t testing.TB, global GlobalOptions, dir string, snapshotID restic.ID, excludes []string) { cmd := &CmdRestore{global: &global, Target: dir, Exclude: excludes} OK(t, cmd.Execute([]string{snapshotID.String()})) } -func cmdRestoreIncludes(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID, includes []string) { +func cmdRestoreIncludes(t testing.TB, global GlobalOptions, dir string, snapshotID restic.ID, includes []string) { cmd := &CmdRestore{global: &global, Target: dir, Include: includes} OK(t, cmd.Execute([]string{snapshotID.String()})) } @@ -582,7 +582,7 @@ func testFileSize(filename string, size int64) error { } if fi.Size() != size { - return restic.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size()) + return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size()) } return nil @@ -811,11 +811,11 @@ func TestRebuildIndexAlwaysFull(t *testing.T) { var optimizeTests = []struct { testFilename string - snapshots backend.IDSet + snapshots restic.IDSet }{ { filepath.Join("..", "..", "restic", "checker", "testdata", "checker-test-repo.tar.gz"), - backend.NewIDSet(ParseID("a13c11e582b77a693dd75ab4e3a3ba96538a056594a4b9076e4cacebe6e06d43")), + restic.NewIDSet(restic.TestParseID("a13c11e582b77a693dd75ab4e3a3ba96538a056594a4b9076e4cacebe6e06d43")), }, { filepath.Join("testdata", "old-index-repo.tar.gz"), @@ -823,9 +823,9 @@ var optimizeTests = []struct { }, { filepath.Join("testdata", "old-index-repo.tar.gz"), - backend.NewIDSet( - ParseID("f7d83db709977178c9d1a09e4009355e534cde1a135b8186b8b118a3fc4fcd41"), - ParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"), + restic.NewIDSet( + restic.TestParseID("f7d83db709977178c9d1a09e4009355e534cde1a135b8186b8b118a3fc4fcd41"), + restic.TestParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"), ), }, } diff --git a/src/cmds/restic/main.go b/src/cmds/restic/main.go index 5ad0ab128..6477c1e62 100644 --- a/src/cmds/restic/main.go +++ b/src/cmds/restic/main.go @@ -7,8 +7,9 @@ import ( "restic/debug" "runtime" + "restic/errors" + "github.com/jessevdk/go-flags" - "github.com/pkg/errors" ) func init() { @@ -42,7 +43,7 @@ func main() { switch { case restic.IsAlreadyLocked(errors.Cause(err)): fmt.Fprintf(os.Stderr, "%v\nthe `unlock` command can be used to remove stale locks\n", err) - case restic.IsFatal(errors.Cause(err)): + case errors.IsFatal(errors.Cause(err)): fmt.Fprintf(os.Stderr, "%v\n", err) case err != nil: fmt.Fprintf(os.Stderr, "%+v\n", err) diff --git a/src/restic/archive_reader.go b/src/restic/archive_reader.go deleted file mode 100644 index 02b630b1d..000000000 --- a/src/restic/archive_reader.go +++ /dev/null @@ -1,123 +0,0 @@ -package restic - -import ( - "encoding/json" - "io" - "restic/backend" - "restic/debug" - "restic/pack" - "restic/repository" - "time" - - "github.com/pkg/errors" - "github.com/restic/chunker" -) - -// saveTreeJSON stores a tree in the repository. -func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, error) { - data, err := json.Marshal(item) - if err != nil { - return backend.ID{}, errors.Wrap(err, "") - } - data = append(data, '\n') - - // check if tree has been saved before - id := backend.Hash(data) - if repo.Index().Has(id, pack.Tree) { - return id, nil - } - - return repo.SaveJSON(pack.Tree, item) -} - -// ArchiveReader reads from the reader and archives the data. Returned is the -// resulting snapshot and its ID. -func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name string) (*Snapshot, backend.ID, error) { - debug.Log("ArchiveReader", "start archiving %s", name) - sn, err := NewSnapshot([]string{name}) - if err != nil { - return nil, backend.ID{}, err - } - - p.Start() - defer p.Done() - - chnker := chunker.New(rd, repo.Config.ChunkerPolynomial) - - var ids backend.IDs - var fileSize uint64 - - for { - chunk, err := chnker.Next(getBuf()) - if errors.Cause(err) == io.EOF { - break - } - - if err != nil { - return nil, backend.ID{}, errors.Wrap(err, "chunker.Next()") - } - - id := backend.Hash(chunk.Data) - - if !repo.Index().Has(id, pack.Data) { - _, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil) - if err != nil { - return nil, backend.ID{}, err - } - debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length) - } else { - debug.Log("ArchiveReader", "blob %v already saved in the repo\n", id.Str()) - } - - freeBuf(chunk.Data) - - ids = append(ids, id) - - p.Report(Stat{Bytes: uint64(chunk.Length)}) - fileSize += uint64(chunk.Length) - } - - tree := &Tree{ - Nodes: []*Node{ - &Node{ - Name: name, - AccessTime: time.Now(), - ModTime: time.Now(), - Type: "file", - Mode: 0644, - Size: fileSize, - UID: sn.UID, - GID: sn.GID, - User: sn.Username, - Content: ids, - }, - }, - } - - treeID, err := saveTreeJSON(repo, tree) - if err != nil { - return nil, backend.ID{}, err - } - sn.Tree = &treeID - debug.Log("ArchiveReader", "tree saved as %v", treeID.Str()) - - id, err := repo.SaveJSONUnpacked(backend.Snapshot, sn) - if err != nil { - return nil, backend.ID{}, err - } - - sn.id = &id - debug.Log("ArchiveReader", "snapshot saved as %v", id.Str()) - - err = repo.Flush() - if err != nil { - return nil, backend.ID{}, err - } - - err = repo.SaveIndex() - if err != nil { - return nil, backend.ID{}, err - } - - return sn, id, nil -} diff --git a/src/restic/archiver/archive_reader.go b/src/restic/archiver/archive_reader.go new file mode 100644 index 000000000..fb3803e34 --- /dev/null +++ b/src/restic/archiver/archive_reader.go @@ -0,0 +1,103 @@ +package archiver + +import ( + "io" + "restic" + "restic/debug" + "time" + + "restic/errors" + + "github.com/restic/chunker" +) + +// ArchiveReader reads from the reader and archives the data. Returned is the +// resulting snapshot and its ID. +func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, name string) (*restic.Snapshot, restic.ID, error) { + debug.Log("ArchiveReader", "start archiving %s", name) + sn, err := restic.NewSnapshot([]string{name}) + if err != nil { + return nil, restic.ID{}, err + } + + p.Start() + defer p.Done() + + chnker := chunker.New(rd, repo.Config().ChunkerPolynomial) + + var ids restic.IDs + var fileSize uint64 + + for { + chunk, err := chnker.Next(getBuf()) + if errors.Cause(err) == io.EOF { + break + } + + if err != nil { + return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()") + } + + id := restic.Hash(chunk.Data) + + if !repo.Index().Has(id, restic.DataBlob) { + _, err := repo.SaveBlob(restic.DataBlob, chunk.Data, id) + if err != nil { + return nil, restic.ID{}, err + } + debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length) + } else { + debug.Log("ArchiveReader", "blob %v already saved in the repo\n", id.Str()) + } + + freeBuf(chunk.Data) + + ids = append(ids, id) + + p.Report(restic.Stat{Bytes: uint64(chunk.Length)}) + fileSize += uint64(chunk.Length) + } + + tree := &restic.Tree{ + Nodes: []*restic.Node{ + &restic.Node{ + Name: name, + AccessTime: time.Now(), + ModTime: time.Now(), + Type: "file", + Mode: 0644, + Size: fileSize, + UID: sn.UID, + GID: sn.GID, + User: sn.Username, + Content: ids, + }, + }, + } + + treeID, err := repo.SaveTree(tree) + if err != nil { + return nil, restic.ID{}, err + } + sn.Tree = &treeID + debug.Log("ArchiveReader", "tree saved as %v", treeID.Str()) + + id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn) + if err != nil { + return nil, restic.ID{}, err + } + + debug.Log("ArchiveReader", "snapshot saved as %v", id.Str()) + + err = repo.Flush() + if err != nil { + return nil, restic.ID{}, err + } + + err = repo.SaveIndex() + if err != nil { + return nil, restic.ID{}, err + } + + return sn, id, nil +} diff --git a/src/restic/archive_reader_test.go b/src/restic/archiver/archive_reader_test.go similarity index 68% rename from src/restic/archive_reader_test.go rename to src/restic/archiver/archive_reader_test.go index 2d5b705db..86c8a4ca8 100644 --- a/src/restic/archive_reader_test.go +++ b/src/restic/archiver/archive_reader_test.go @@ -1,28 +1,25 @@ -package restic +package archiver import ( "bytes" "io" "math/rand" - "restic/backend" - "restic/pack" + "restic" "restic/repository" "testing" - - "github.com/restic/chunker" ) -func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []byte) []byte { - buf, err := repo.LoadBlob(id, pack.Data, buf) +func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) int { + n, err := repo.LoadBlob(restic.DataBlob, id, buf) if err != nil { t.Fatalf("LoadBlob(%v) returned error %v", id, err) } - return buf + return n } -func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID, name string, rd io.Reader) { - tree, err := LoadTree(repo, treeID) +func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name string, rd io.Reader) { + tree, err := repo.LoadTree(treeID) if err != nil { t.Fatalf("LoadTree() returned error %v", err) } @@ -41,12 +38,19 @@ func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID } // check blobs - buf := make([]byte, chunker.MaxSize) - buf2 := make([]byte, chunker.MaxSize) for i, id := range node.Content { - buf = loadBlob(t, repo, id, buf) + size, err := repo.LookupBlobSize(id, restic.DataBlob) + if err != nil { + t.Fatal(err) + } - buf2 = buf2[:len(buf)] + buf := make([]byte, int(size)) + n := loadBlob(t, repo, id, buf) + if n != len(buf) { + t.Errorf("wrong number of bytes read, want %d, got %d", len(buf), n) + } + + buf2 := make([]byte, int(size)) _, err = io.ReadFull(rd, buf2) if err != nil { t.Fatal(err) @@ -58,6 +62,11 @@ func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID } } +// fakeFile returns a reader which yields deterministic pseudo-random data. +func fakeFile(t testing.TB, seed, size int64) io.Reader { + return io.LimitReader(restic.NewRandReader(rand.New(rand.NewSource(seed))), size) +} + func TestArchiveReader(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() diff --git a/src/restic/archiver.go b/src/restic/archiver/archiver.go similarity index 78% rename from src/restic/archiver.go rename to src/restic/archiver/archiver.go index 5f72633a2..3be272adc 100644 --- a/src/restic/archiver.go +++ b/src/restic/archiver/archiver.go @@ -1,4 +1,4 @@ -package restic +package archiver import ( "encoding/json" @@ -6,18 +6,17 @@ import ( "io" "os" "path/filepath" + "restic" "sort" "sync" "time" - "github.com/pkg/errors" + "restic/errors" + "restic/walk" - "restic/backend" "restic/debug" "restic/fs" - "restic/pack" "restic/pipe" - "restic/repository" "github.com/restic/chunker" ) @@ -32,9 +31,9 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true } // Archiver is used to backup a set of directories. type Archiver struct { - repo *repository.Repository + repo restic.Repository knownBlobs struct { - backend.IDSet + restic.IDSet sync.Mutex } @@ -45,16 +44,16 @@ type Archiver struct { Excludes []string } -// NewArchiver returns a new archiver. -func NewArchiver(repo *repository.Repository) *Archiver { +// New returns a new archiver. +func New(repo restic.Repository) *Archiver { arch := &Archiver{ repo: repo, blobToken: make(chan struct{}, maxConcurrentBlobs), knownBlobs: struct { - backend.IDSet + restic.IDSet sync.Mutex }{ - IDSet: backend.NewIDSet(), + IDSet: restic.NewIDSet(), }, } @@ -72,7 +71,7 @@ func NewArchiver(repo *repository.Repository) *Archiver { // When the blob is not known, false is returned and the blob is added to the // list. This means that the caller false is returned to is responsible to save // the blob to the backend. -func (arch *Archiver) isKnownBlob(id backend.ID, t pack.BlobType) bool { +func (arch *Archiver) isKnownBlob(id restic.ID, t restic.BlobType) bool { arch.knownBlobs.Lock() defer arch.knownBlobs.Unlock() @@ -91,15 +90,15 @@ func (arch *Archiver) isKnownBlob(id backend.ID, t pack.BlobType) bool { } // Save stores a blob read from rd in the repository. -func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error { +func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error { debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str()) - if arch.isKnownBlob(id, pack.Data) { + if arch.isKnownBlob(id, restic.DataBlob) { debug.Log("Archiver.Save", "blob %v is known\n", id.Str()) return nil } - _, err := arch.repo.SaveAndEncrypt(t, data, &id) + _, err := arch.repo.SaveBlob(t, data, id) if err != nil { debug.Log("Archiver.Save", "Save(%v, %v): error %v\n", t, id.Str(), err) return err @@ -110,40 +109,40 @@ func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error { } // SaveTreeJSON stores a tree in the repository. -func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) { +func (arch *Archiver) SaveTreeJSON(item interface{}) (restic.ID, error) { data, err := json.Marshal(item) if err != nil { - return backend.ID{}, errors.Wrap(err, "Marshal") + return restic.ID{}, errors.Wrap(err, "Marshal") } data = append(data, '\n') // check if tree has been saved before - id := backend.Hash(data) - if arch.isKnownBlob(id, pack.Tree) { + id := restic.Hash(data) + if arch.isKnownBlob(id, restic.TreeBlob) { return id, nil } - return arch.repo.SaveJSON(pack.Tree, item) + return arch.repo.SaveBlob(restic.TreeBlob, data, id) } -func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, error) { +func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*restic.Node, error) { fi, err := file.Stat() if err != nil { - return nil, errors.Wrap(err, "Stat") + return nil, errors.Wrap(err, "restic.Stat") } if fi.ModTime() == node.ModTime { return node, nil } - err = arch.Error(node.path, fi, errors.New("file has changed")) + err = arch.Error(node.Path, fi, errors.New("file has changed")) if err != nil { return nil, err } - node, err = NodeFromFileInfo(node.path, fi) + node, err = restic.NodeFromFileInfo(node.Path, fi) if err != nil { - debug.Log("Archiver.SaveFile", "NodeFromFileInfo returned error for %v: %v", node.path, err) + debug.Log("Archiver.SaveFile", "restic.NodeFromFileInfo returned error for %v: %v", node.Path, err) return nil, err } @@ -151,21 +150,21 @@ func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, erro } type saveResult struct { - id backend.ID + id restic.ID bytes uint64 } -func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) { +func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) { defer freeBuf(chunk.Data) - id := backend.Hash(chunk.Data) - err := arch.Save(pack.Data, chunk.Data, id) + id := restic.Hash(chunk.Data) + err := arch.Save(restic.DataBlob, chunk.Data, id) // TODO handle error if err != nil { panic(err) } - p.Report(Stat{Bytes: uint64(chunk.Length)}) + p.Report(restic.Stat{Bytes: uint64(chunk.Length)}) arch.blobToken <- token resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)} } @@ -184,11 +183,11 @@ func waitForResults(resultChannels [](<-chan saveResult)) ([]saveResult, error) return results, nil } -func updateNodeContent(node *Node, results []saveResult) error { - debug.Log("Archiver.Save", "checking size for file %s", node.path) +func updateNodeContent(node *restic.Node, results []saveResult) error { + debug.Log("Archiver.Save", "checking size for file %s", node.Path) var bytes uint64 - node.Content = make([]backend.ID, len(results)) + node.Content = make([]restic.ID, len(results)) for i, b := range results { node.Content[i] = b.id @@ -198,18 +197,18 @@ func updateNodeContent(node *Node, results []saveResult) error { } if bytes != node.Size { - return errors.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.path, bytes, node.Size) + return errors.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.Path, bytes, node.Size) } - debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.path, len(results)) + debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.Path, len(results)) return nil } // SaveFile stores the content of the file on the backend as a Blob by calling // Save for each chunk. -func (arch *Archiver) SaveFile(p *Progress, node *Node) error { - file, err := fs.Open(node.path) +func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) error { + file, err := fs.Open(node.Path) defer file.Close() if err != nil { return errors.Wrap(err, "Open") @@ -220,7 +219,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error { return err } - chnker := chunker.New(file, arch.repo.Config.ChunkerPolynomial) + chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial) resultChannels := [](<-chan saveResult){} for { @@ -247,7 +246,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error { return err } -func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, entCh <-chan pipe.Entry) { +func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, entCh <-chan pipe.Entry) { defer func() { debug.Log("Archiver.fileWorker", "done") wg.Done() @@ -269,16 +268,16 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error()) // ignore this file e.Result() <- nil - p.Report(Stat{Errors: 1}) + p.Report(restic.Stat{Errors: 1}) continue } - node, err := NodeFromFileInfo(e.Fullpath(), e.Info()) + node, err := restic.NodeFromFileInfo(e.Fullpath(), e.Info()) if err != nil { // TODO: integrate error reporting - debug.Log("Archiver.fileWorker", "NodeFromFileInfo returned error for %v: %v", node.path, err) + debug.Log("Archiver.fileWorker", "restic.NodeFromFileInfo returned error for %v: %v", node.Path, err) e.Result() <- nil - p.Report(Stat{Errors: 1}) + p.Report(restic.Stat{Errors: 1}) continue } @@ -286,12 +285,12 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st if e.Node != nil { debug.Log("Archiver.fileWorker", " %v use old data", e.Path()) - oldNode := e.Node.(*Node) + oldNode := e.Node.(*restic.Node) // check if all content is still available in the repository contentMissing := false - for _, blob := range oldNode.blobs { - if ok, err := arch.repo.Backend().Test(backend.Data, blob.Storage.String()); !ok || err != nil { - debug.Log("Archiver.fileWorker", " %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str()) + for _, blob := range oldNode.Content { + if !arch.repo.Index().Has(blob, restic.DataBlob) { + debug.Log("Archiver.fileWorker", " %v not using old data, %v is missing", e.Path(), blob.Str()) contentMissing = true break } @@ -299,7 +298,6 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st if !contentMissing { node.Content = oldNode.Content - node.blobs = oldNode.blobs debug.Log("Archiver.fileWorker", " %v content is complete", e.Path()) } } else { @@ -312,20 +310,20 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st err = arch.SaveFile(p, node) if err != nil { // TODO: integrate error reporting - fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.path, err) + fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.Path, err) // ignore this file e.Result() <- nil - p.Report(Stat{Errors: 1}) + p.Report(restic.Stat{Errors: 1}) continue } } else { // report old data size - p.Report(Stat{Bytes: node.Size}) + p.Report(restic.Stat{Bytes: node.Size}) } - debug.Log("Archiver.fileWorker", " processed %v, %d/%d blobs", e.Path(), len(node.Content), len(node.blobs)) + debug.Log("Archiver.fileWorker", " processed %v, %d blobs", e.Path(), len(node.Content)) e.Result() <- node - p.Report(Stat{Files: 1}) + p.Report(restic.Stat{Files: 1}) case <-done: // pipeline was cancelled return @@ -333,7 +331,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st } } -func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) { +func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) { debug.Log("Archiver.dirWorker", "start") defer func() { debug.Log("Archiver.dirWorker", "done") @@ -352,11 +350,11 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str if dir.Error() != nil { fmt.Fprintf(os.Stderr, "error walking dir %v: %v\n", dir.Path(), dir.Error()) dir.Result() <- nil - p.Report(Stat{Errors: 1}) + p.Report(restic.Stat{Errors: 1}) continue } - tree := NewTree() + tree := restic.NewTree() // wait for all content for _, ch := range dir.Entries { @@ -371,22 +369,22 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str } // else insert node - node := res.(*Node) + node := res.(*restic.Node) tree.Insert(node) if node.Type == "dir" { - debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.path, node.Subtree) + debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.Path, node.Subtree) if node.Subtree.IsNull() { - panic("invalid null subtree ID") + panic("invalid null subtree restic.ID") } } } - node := &Node{} + node := &restic.Node{} if dir.Path() != "" && dir.Info() != nil { - n, err := NodeFromFileInfo(dir.Path(), dir.Info()) + n, err := restic.NodeFromFileInfo(dir.Path(), dir.Info()) if err != nil { n.Error = err.Error() dir.Result() <- n @@ -405,7 +403,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str } debug.Log("Archiver.dirWorker", "save tree for %s: %v", dir.Path(), id.Str()) if id.IsNull() { - panic("invalid null subtree ID return from SaveTreeJSON()") + panic("invalid null subtree restic.ID return from SaveTreeJSON()") } node.Subtree = &id @@ -414,7 +412,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str dir.Result() <- node if dir.Path() != "" { - p.Report(Stat{Dirs: 1}) + p.Report(restic.Stat{Dirs: 1}) } case <-done: // pipeline was cancelled @@ -424,7 +422,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str } type archivePipe struct { - Old <-chan WalkTreeJob + Old <-chan walk.TreeJob New <-chan pipe.Job } @@ -459,7 +457,7 @@ func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) { type archiveJob struct { hasOld bool - old WalkTreeJob + old walk.TreeJob new pipe.Job } @@ -473,7 +471,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) { var ( loadOld, loadNew bool = true, true ok bool - oldJob WalkTreeJob + oldJob walk.TreeJob newJob pipe.Job ) @@ -567,7 +565,7 @@ func (j archiveJob) Copy() pipe.Job { } // if file is newer, return the new job - if j.old.Node.isNewer(j.new.Fullpath(), j.new.Info()) { + if j.old.Node.IsNewer(j.new.Fullpath(), j.new.Info()) { debug.Log("archiveJob.Copy", " job %v is newer", j.new.Path()) return j.new } @@ -632,10 +630,10 @@ func (p baseNameSlice) Len() int { return len(p) } func (p baseNameSlice) Less(i, j int) bool { return filepath.Base(p[i]) < filepath.Base(p[j]) } func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -// Snapshot creates a snapshot of the given paths. If parentID is set, this is +// Snapshot creates a snapshot of the given paths. If parentrestic.ID is set, this is // used to compare the files to the ones archived at the time this snapshot was // taken. -func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID) (*Snapshot, backend.ID, error) { +func (arch *Archiver) Snapshot(p *restic.Progress, paths []string, parentID *restic.ID) (*restic.Snapshot, restic.ID, error) { paths = unique(paths) sort.Sort(baseNameSlice(paths)) @@ -651,9 +649,9 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID defer p.Done() // create new snapshot - sn, err := NewSnapshot(paths) + sn, err := restic.NewSnapshot(paths) if err != nil { - return nil, backend.ID{}, err + return nil, restic.ID{}, err } sn.Excludes = arch.Excludes @@ -664,18 +662,18 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID sn.Parent = parentID // load parent snapshot - parent, err := LoadSnapshot(arch.repo, *parentID) + parent, err := restic.LoadSnapshot(arch.repo, *parentID) if err != nil { - return nil, backend.ID{}, err + return nil, restic.ID{}, err } // start walker on old tree - ch := make(chan WalkTreeJob) - go WalkTree(arch.repo, *parent.Tree, done, ch) + ch := make(chan walk.TreeJob) + go walk.Tree(arch.repo, *parent.Tree, done, ch) jobs.Old = ch } else { // use closed channel - ch := make(chan WalkTreeJob) + ch := make(chan walk.TreeJob) close(ch) jobs.Old = ch } @@ -730,31 +728,29 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID debug.Log("Archiver.Snapshot", "workers terminated") // receive the top-level tree - root := (<-resCh).(*Node) + root := (<-resCh).(*restic.Node) debug.Log("Archiver.Snapshot", "root node received: %v", root.Subtree.Str()) sn.Tree = root.Subtree // save snapshot - id, err := arch.repo.SaveJSONUnpacked(backend.Snapshot, sn) + id, err := arch.repo.SaveJSONUnpacked(restic.SnapshotFile, sn) if err != nil { - return nil, backend.ID{}, err + return nil, restic.ID{}, err } - // store ID in snapshot struct - sn.id = &id debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str()) // flush repository err = arch.repo.Flush() if err != nil { - return nil, backend.ID{}, err + return nil, restic.ID{}, err } // save index err = arch.repo.SaveIndex() if err != nil { debug.Log("Archiver.Snapshot", "error saving index: %v", err) - return nil, backend.ID{}, err + return nil, restic.ID{}, err } debug.Log("Archiver.Snapshot", "saved indexes") @@ -770,13 +766,13 @@ func isRegularFile(fi os.FileInfo) bool { return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 } -// Scan traverses the dirs to collect Stat information while emitting progress +// Scan traverses the dirs to collect restic.Stat information while emitting progress // information with p. -func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) { +func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Stat, error) { p.Start() defer p.Done() - var stat Stat + var stat restic.Stat for _, dir := range dirs { debug.Log("Scan", "Start for %v", dir) @@ -799,7 +795,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) { return nil } - s := Stat{} + s := restic.Stat{} if fi.IsDir() { s.Dirs++ } else { @@ -819,7 +815,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) { debug.Log("Scan", "Done for %v, err: %v", dir, err) if err != nil { - return Stat{}, errors.Wrap(err, "fs.Walk") + return restic.Stat{}, errors.Wrap(err, "fs.Walk") } } diff --git a/src/restic/archiver_duplication_test.go b/src/restic/archiver/archiver_duplication_test.go similarity index 68% rename from src/restic/archiver_duplication_test.go rename to src/restic/archiver/archiver_duplication_test.go index 61f7aafb9..aadfc5904 100644 --- a/src/restic/archiver_duplication_test.go +++ b/src/restic/archiver/archiver_duplication_test.go @@ -1,4 +1,4 @@ -package restic_test +package archiver_test import ( "crypto/rand" @@ -8,11 +8,11 @@ import ( "testing" "time" - "github.com/pkg/errors" + "restic/errors" "restic" - "restic/backend" - "restic/pack" + "restic/archiver" + "restic/mock" "restic/repository" ) @@ -20,14 +20,14 @@ const parallelSaves = 50 const testSaveIndexTime = 100 * time.Millisecond const testTimeout = 2 * time.Second -var DupID backend.ID +var DupID restic.ID -func randomID() backend.ID { +func randomID() restic.ID { if mrand.Float32() < 0.5 { return DupID } - id := backend.ID{} + id := restic.ID{} _, err := io.ReadFull(rand.Reader, id[:]) if err != nil { panic(err) @@ -36,30 +36,30 @@ func randomID() backend.ID { } // forgetfulBackend returns a backend that forgets everything. -func forgetfulBackend() backend.Backend { - be := &backend.MockBackend{} +func forgetfulBackend() restic.Backend { + be := &mock.Backend{} - be.TestFn = func(t backend.Type, name string) (bool, error) { + be.TestFn = func(t restic.FileType, name string) (bool, error) { return false, nil } - be.LoadFn = func(h backend.Handle, p []byte, off int64) (int, error) { + be.LoadFn = func(h restic.Handle, p []byte, off int64) (int, error) { return 0, errors.New("not found") } - be.SaveFn = func(h backend.Handle, p []byte) error { + be.SaveFn = func(h restic.Handle, p []byte) error { return nil } - be.StatFn = func(h backend.Handle) (backend.BlobInfo, error) { - return backend.BlobInfo{}, errors.New("not found") + be.StatFn = func(h restic.Handle) (restic.FileInfo, error) { + return restic.FileInfo{}, errors.New("not found") } - be.RemoveFn = func(t backend.Type, name string) error { + be.RemoveFn = func(t restic.FileType, name string) error { return nil } - be.ListFn = func(t backend.Type, done <-chan struct{}) <-chan string { + be.ListFn = func(t restic.FileType, done <-chan struct{}) <-chan string { ch := make(chan string) close(ch) return ch @@ -85,7 +85,7 @@ func testArchiverDuplication(t *testing.T) { t.Fatal(err) } - arch := restic.NewArchiver(repo) + arch := archiver.New(repo) wg := &sync.WaitGroup{} done := make(chan struct{}) @@ -102,13 +102,13 @@ func testArchiverDuplication(t *testing.T) { id := randomID() - if repo.Index().Has(id, pack.Data) { + if repo.Index().Has(id, restic.DataBlob) { continue } buf := make([]byte, 50) - err := arch.Save(pack.Data, buf, id) + err := arch.Save(restic.DataBlob, buf, id) if err != nil { t.Fatal(err) } diff --git a/src/restic/archiver_int_test.go b/src/restic/archiver/archiver_int_test.go similarity index 94% rename from src/restic/archiver_int_test.go rename to src/restic/archiver/archiver_int_test.go index 9e4426889..c4014f5b0 100644 --- a/src/restic/archiver_int_test.go +++ b/src/restic/archiver/archiver_int_test.go @@ -1,10 +1,11 @@ -package restic +package archiver import ( "os" "testing" "restic/pipe" + "restic/walk" ) var treeJobs = []string{ @@ -82,12 +83,12 @@ func (j testPipeJob) Error() error { return j.err } func (j testPipeJob) Info() os.FileInfo { return j.fi } func (j testPipeJob) Result() chan<- pipe.Result { return j.res } -func testTreeWalker(done <-chan struct{}, out chan<- WalkTreeJob) { +func testTreeWalker(done <-chan struct{}, out chan<- walk.TreeJob) { for _, e := range treeJobs { select { case <-done: return - case out <- WalkTreeJob{Path: e}: + case out <- walk.TreeJob{Path: e}: } } @@ -109,7 +110,7 @@ func testPipeWalker(done <-chan struct{}, out chan<- pipe.Job) { func TestArchivePipe(t *testing.T) { done := make(chan struct{}) - treeCh := make(chan WalkTreeJob) + treeCh := make(chan walk.TreeJob) pipeCh := make(chan pipe.Job) go testTreeWalker(done, treeCh) diff --git a/src/restic/archiver_test.go b/src/restic/archiver/archiver_test.go similarity index 73% rename from src/restic/archiver_test.go rename to src/restic/archiver/archiver_test.go index 47b2210fa..6073b654d 100644 --- a/src/restic/archiver_test.go +++ b/src/restic/archiver/archiver_test.go @@ -1,4 +1,4 @@ -package restic_test +package archiver_test import ( "bytes" @@ -7,14 +7,14 @@ import ( "time" "restic" - "restic/backend" + "restic/archiver" "restic/checker" "restic/crypto" - "restic/pack" "restic/repository" . "restic/test" - "github.com/pkg/errors" + "restic/errors" + "github.com/restic/chunker" ) @@ -48,8 +48,8 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.K } func BenchmarkChunkEncrypt(b *testing.B) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(b) + defer cleanup() data := Random(23, 10<<20) // 10MiB rd := bytes.NewReader(data) @@ -80,8 +80,8 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key) } func BenchmarkChunkEncryptParallel(b *testing.B) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(b) + defer cleanup() data := Random(23, 10<<20) // 10MiB @@ -99,10 +99,10 @@ func BenchmarkChunkEncryptParallel(b *testing.B) { } func archiveDirectory(b testing.TB) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(b) + defer cleanup() - arch := restic.NewArchiver(repo) + arch := archiver.New(repo) _, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil) OK(b, err) @@ -128,9 +128,17 @@ func BenchmarkArchiveDirectory(b *testing.B) { } } +func countPacks(repo restic.Repository, t restic.FileType) (n uint) { + for _ = range repo.Backend().List(t, nil) { + n++ + } + + return n +} + func archiveWithDedup(t testing.TB) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() if BenchArchiveDirectory == "" { t.Skip("benchdir not set, skipping TestArchiverDedup") @@ -143,24 +151,24 @@ func archiveWithDedup(t testing.TB) { } // archive a few files - sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil) + sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil) t.Logf("archived snapshot %v", sn.ID().Str()) // get archive stats - cnt.before.packs = repo.Count(backend.Data) - cnt.before.dataBlobs = repo.Index().Count(pack.Data) - cnt.before.treeBlobs = repo.Index().Count(pack.Tree) + cnt.before.packs = countPacks(repo, restic.DataFile) + cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob) + cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob) t.Logf("packs %v, data blobs %v, tree blobs %v", cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs) // archive the same files again, without parent snapshot - sn2 := SnapshotDir(t, repo, BenchArchiveDirectory, nil) + sn2 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil) t.Logf("archived snapshot %v", sn2.ID().Str()) // get archive stats again - cnt.after.packs = repo.Count(backend.Data) - cnt.after.dataBlobs = repo.Index().Count(pack.Data) - cnt.after.treeBlobs = repo.Index().Count(pack.Tree) + cnt.after.packs = countPacks(repo, restic.DataFile) + cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob) + cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob) t.Logf("packs %v, data blobs %v, tree blobs %v", cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs) @@ -171,13 +179,13 @@ func archiveWithDedup(t testing.TB) { } // archive the same files again, with a parent snapshot - sn3 := SnapshotDir(t, repo, BenchArchiveDirectory, sn2.ID()) + sn3 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, sn2.ID()) t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str()) // get archive stats again - cnt.after2.packs = repo.Count(backend.Data) - cnt.after2.dataBlobs = repo.Index().Count(pack.Data) - cnt.after2.treeBlobs = repo.Index().Count(pack.Tree) + cnt.after2.packs = countPacks(repo, restic.DataFile) + cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob) + cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob) t.Logf("packs %v, data blobs %v, tree blobs %v", cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs) @@ -192,48 +200,6 @@ func TestArchiveDedup(t *testing.T) { archiveWithDedup(t) } -func BenchmarkLoadTree(t *testing.B) { - repo := SetupRepo() - defer TeardownRepo(repo) - - if BenchArchiveDirectory == "" { - t.Skip("benchdir not set, skipping TestArchiverDedup") - } - - // archive a few files - arch := restic.NewArchiver(repo) - sn, _, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil) - OK(t, err) - t.Logf("archived snapshot %v", sn.ID()) - - list := make([]backend.ID, 0, 10) - done := make(chan struct{}) - - for _, idx := range repo.Index().All() { - for blob := range idx.Each(done) { - if blob.Type != pack.Tree { - continue - } - - list = append(list, blob.ID) - if len(list) == cap(list) { - close(done) - break - } - } - } - - // start benchmark - t.ResetTimer() - - for i := 0; i < t.N; i++ { - for _, id := range list { - _, err := restic.LoadTree(repo, id) - OK(t, err) - } - } -} - // Saves several identical chunks concurrently and later checks that there are no // unreferenced packs in the repository. See also #292 and #358. func TestParallelSaveWithDuplication(t *testing.T) { @@ -243,13 +209,13 @@ func TestParallelSaveWithDuplication(t *testing.T) { } func testParallelSaveWithDuplication(t *testing.T, seed int) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() dataSizeMb := 128 duplication := 7 - arch := restic.NewArchiver(repo) + arch := archiver.New(repo) chunks := getRandomData(seed, dataSizeMb*1024*1024) errChannels := [](<-chan error){} @@ -266,9 +232,9 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) { go func(c chunker.Chunk, errChan chan<- error) { barrier <- struct{}{} - id := backend.Hash(c.Data) + id := restic.Hash(c.Data) time.Sleep(time.Duration(id[0])) - err := arch.Save(pack.Data, c.Data, id) + err := arch.Save(restic.DataBlob, c.Data, id) <-barrier errChan <- err }(c, errChan) @@ -302,7 +268,7 @@ func getRandomData(seed int, size int) []chunker.Chunk { return chunks } -func createAndInitChecker(t *testing.T, repo *repository.Repository) *checker.Checker { +func createAndInitChecker(t *testing.T, repo restic.Repository) *checker.Checker { chkr := checker.New(repo) hints, errs := chkr.LoadIndex() diff --git a/src/restic/archiver/buffer_pool.go b/src/restic/archiver/buffer_pool.go new file mode 100644 index 000000000..32df5ab7b --- /dev/null +++ b/src/restic/archiver/buffer_pool.go @@ -0,0 +1,21 @@ +package archiver + +import ( + "sync" + + "github.com/restic/chunker" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return make([]byte, chunker.MinSize) + }, +} + +func getBuf() []byte { + return bufPool.Get().([]byte) +} + +func freeBuf(data []byte) { + bufPool.Put(data) +} diff --git a/src/restic/archiver/testing.go b/src/restic/archiver/testing.go new file mode 100644 index 000000000..b73f09dcd --- /dev/null +++ b/src/restic/archiver/testing.go @@ -0,0 +1,16 @@ +package archiver + +import ( + "restic" + "testing" +) + +// TestSnapshot creates a new snapshot of path. +func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot { + arch := New(repo) + sn, _, err := arch.Snapshot(nil, []string{path}, parent) + if err != nil { + t.Fatal(err) + } + return sn +} diff --git a/src/restic/backend.go b/src/restic/backend.go new file mode 100644 index 000000000..37a840412 --- /dev/null +++ b/src/restic/backend.go @@ -0,0 +1,38 @@ +package restic + +// Backend is used to store and access data. +type Backend interface { + // Location returns a string that describes the type and location of the + // repository. + Location() string + + // Test a boolean value whether a File with the name and type exists. + Test(t FileType, name string) (bool, error) + + // Remove removes a File with type t and name. + Remove(t FileType, name string) error + + // Close the backend + Close() error + + // Load returns the data stored in the backend for h at the given offset + // and saves it in p. Load has the same semantics as io.ReaderAt, except + // that a negative offset is also allowed. In this case it references a + // position relative to the end of the file (similar to Seek()). + Load(h Handle, p []byte, off int64) (int, error) + + // Save stores the data in the backend under the given handle. + Save(h Handle, p []byte) error + + // Stat returns information about the File identified by h. + Stat(h Handle) (FileInfo, error) + + // List returns a channel that yields all names of files of type t in an + // arbitrary order. A goroutine is started for this. If the channel done is + // closed, sending stops. + List(t FileType, done <-chan struct{}) <-chan string +} + +// FileInfo is returned by Stat() and contains information about a file in the +// backend. +type FileInfo struct{ Size int64 } diff --git a/src/restic/backend/doc.go b/src/restic/backend/doc.go index f82c3d671..daab2e2f8 100644 --- a/src/restic/backend/doc.go +++ b/src/restic/backend/doc.go @@ -1,5 +1,4 @@ // Package backend provides local and remote storage for restic repositories. -// All backends need to implement the Backend interface. There is a -// MockBackend, which can be used for mocking in tests, and a MemBackend, which -// stores all data in a hash internally. +// All backends need to implement the Backend interface. There is a MemBackend, +// which stores all data in a map internally and can be used for testing. package backend diff --git a/src/restic/backend/generic_test.go b/src/restic/backend/generic_test.go deleted file mode 100644 index 7c42e2859..000000000 --- a/src/restic/backend/generic_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package backend_test - -import ( - "testing" - - "restic/backend" - . "restic/test" -) - -type mockBackend struct { - list func(backend.Type, <-chan struct{}) <-chan string -} - -func (m mockBackend) List(t backend.Type, done <-chan struct{}) <-chan string { - return m.list(t, done) -} - -var samples = backend.IDs{ - ParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"), - ParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"), - ParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"), - ParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"), - ParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"), - ParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"), - ParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"), - ParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"), -} - -func TestPrefixLength(t *testing.T) { - list := samples - - m := mockBackend{} - m.list = func(t backend.Type, done <-chan struct{}) <-chan string { - ch := make(chan string) - go func() { - defer close(ch) - for _, id := range list { - select { - case ch <- id.String(): - case <-done: - return - } - } - }() - return ch - } - - l, err := backend.PrefixLength(m, backend.Snapshot) - OK(t, err) - Equals(t, 19, l) - - list = samples[:3] - l, err = backend.PrefixLength(m, backend.Snapshot) - OK(t, err) - Equals(t, 19, l) - - list = samples[3:] - l, err = backend.PrefixLength(m, backend.Snapshot) - OK(t, err) - Equals(t, 8, l) -} diff --git a/src/restic/backend/ids_test.go b/src/restic/backend/ids_test.go deleted file mode 100644 index f4e3162ca..000000000 --- a/src/restic/backend/ids_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package backend_test - -import ( - "reflect" - "testing" - - "restic/backend" - . "restic/test" -) - -var uniqTests = []struct { - before, after backend.IDs -}{ - { - backend.IDs{ - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - }, - backend.IDs{ - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - }, - }, - { - backend.IDs{ - ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - }, - backend.IDs{ - ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - }, - }, - { - backend.IDs{ - ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - }, - backend.IDs{ - ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - }, - }, -} - -func TestUniqIDs(t *testing.T) { - for i, test := range uniqTests { - uniq := test.before.Uniq() - if !reflect.DeepEqual(uniq, test.after) { - t.Errorf("uniqIDs() test %v failed\n wanted: %v\n got: %v", i, test.after, uniq) - } - } -} diff --git a/src/restic/backend/idset_test.go b/src/restic/backend/idset_test.go deleted file mode 100644 index 32dce0e0d..000000000 --- a/src/restic/backend/idset_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package backend_test - -import ( - "testing" - - "restic/backend" - . "restic/test" -) - -var idsetTests = []struct { - id backend.ID - seen bool -}{ - {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false}, - {ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false}, - {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, - {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, - {ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, - {ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false}, - {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, - {ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, - {ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true}, - {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, -} - -func TestIDSet(t *testing.T) { - set := backend.NewIDSet() - for i, test := range idsetTests { - seen := set.Has(test.id) - if seen != test.seen { - t.Errorf("IDSet test %v failed: wanted %v, got %v", i, test.seen, seen) - } - set.Insert(test.id) - } -} diff --git a/src/restic/backend/interface.go b/src/restic/backend/interface.go deleted file mode 100644 index 24838ddfd..000000000 --- a/src/restic/backend/interface.go +++ /dev/null @@ -1,63 +0,0 @@ -package backend - -// Type is the type of a Blob. -type Type string - -// These are the different data types a backend can store. -const ( - Data Type = "data" - Key = "key" - Lock = "lock" - Snapshot = "snapshot" - Index = "index" - Config = "config" -) - -// Backend is used to store and access data. -type Backend interface { - // Location returns a string that describes the type and location of the - // repository. - Location() string - - // Test a boolean value whether a Blob with the name and type exists. - Test(t Type, name string) (bool, error) - - // Remove removes a Blob with type t and name. - Remove(t Type, name string) error - - // Close the backend - Close() error - - Lister - - // Load returns the data stored in the backend for h at the given offset - // and saves it in p. Load has the same semantics as io.ReaderAt, except - // that a negative offset is also allowed. In this case it references a - // position relative to the end of the file (similar to Seek()). - Load(h Handle, p []byte, off int64) (int, error) - - // Save stores the data in the backend under the given handle. - Save(h Handle, p []byte) error - - // Stat returns information about the blob identified by h. - Stat(h Handle) (BlobInfo, error) -} - -// Lister implements listing data items stored in a backend. -type Lister interface { - // List returns a channel that yields all names of blobs of type t in an - // arbitrary order. A goroutine is started for this. If the channel done is - // closed, sending stops. - List(t Type, done <-chan struct{}) <-chan string -} - -// Deleter are backends that allow to self-delete all content stored in them. -type Deleter interface { - // Delete the complete repository. - Delete() error -} - -// BlobInfo is returned by Stat() and contains information about a stored blob. -type BlobInfo struct { - Size int64 -} diff --git a/src/restic/backend/local/config.go b/src/restic/backend/local/config.go index a430f9dec..8a5c67a2c 100644 --- a/src/restic/backend/local/config.go +++ b/src/restic/backend/local/config.go @@ -3,7 +3,7 @@ package local import ( "strings" - "github.com/pkg/errors" + "restic/errors" ) // ParseConfig parses a local backend config. diff --git a/src/restic/backend/local/local.go b/src/restic/backend/local/local.go index c51ee949b..1b76e31e5 100644 --- a/src/restic/backend/local/local.go +++ b/src/restic/backend/local/local.go @@ -5,8 +5,9 @@ import ( "io/ioutil" "os" "path/filepath" + "restic" - "github.com/pkg/errors" + "restic/errors" "restic/backend" "restic/debug" @@ -18,6 +19,8 @@ type Local struct { p string } +var _ restic.Backend = &Local{} + func paths(dir string) []string { return []string{ dir, @@ -69,8 +72,8 @@ func (b *Local) Location() string { } // Construct path for given Type and name. -func filename(base string, t backend.Type, name string) string { - if t == backend.Config { +func filename(base string, t restic.FileType, name string) string { + if t == restic.ConfigFile { return filepath.Join(base, "config") } @@ -78,21 +81,21 @@ func filename(base string, t backend.Type, name string) string { } // Construct directory for given Type. -func dirname(base string, t backend.Type, name string) string { +func dirname(base string, t restic.FileType, name string) string { var n string switch t { - case backend.Data: + case restic.DataFile: n = backend.Paths.Data if len(name) > 2 { n = filepath.Join(n, name[:2]) } - case backend.Snapshot: + case restic.SnapshotFile: n = backend.Paths.Snapshots - case backend.Index: + case restic.IndexFile: n = backend.Paths.Index - case backend.Lock: + case restic.LockFile: n = backend.Paths.Locks - case backend.Key: + case restic.KeyFile: n = backend.Paths.Keys } return filepath.Join(base, n) @@ -102,7 +105,7 @@ func dirname(base string, t backend.Type, name string) string { // saves it in p. Load has the same semantics as io.ReaderAt, with one // exception: when off is lower than zero, it is treated as an offset relative // to the end of the file. -func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) { +func (b *Local) Load(h restic.Handle, p []byte, off int64) (n int, err error) { debug.Log("backend.local.Load", "Load %v, length %v at %v", h, len(p), off) if err := h.Valid(); err != nil { return 0, err @@ -168,7 +171,7 @@ func writeToTempfile(tempdir string, p []byte) (filename string, err error) { } // Save stores data in the backend at the handle. -func (b *Local) Save(h backend.Handle, p []byte) (err error) { +func (b *Local) Save(h restic.Handle, p []byte) (err error) { debug.Log("backend.local.Save", "Save %v, length %v", h, len(p)) if err := h.Valid(); err != nil { return err @@ -188,7 +191,7 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) { } // create directories if necessary, ignore errors - if h.Type == backend.Data { + if h.Type == restic.DataFile { err = fs.MkdirAll(filepath.Dir(filename), backend.Modes.Dir) if err != nil { return errors.Wrap(err, "MkdirAll") @@ -213,22 +216,22 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) { } // Stat returns information about a blob. -func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) { +func (b *Local) Stat(h restic.Handle) (restic.FileInfo, error) { debug.Log("backend.local.Stat", "Stat %v", h) if err := h.Valid(); err != nil { - return backend.BlobInfo{}, err + return restic.FileInfo{}, err } fi, err := fs.Stat(filename(b.p, h.Type, h.Name)) if err != nil { - return backend.BlobInfo{}, errors.Wrap(err, "Stat") + return restic.FileInfo{}, errors.Wrap(err, "Stat") } - return backend.BlobInfo{Size: fi.Size()}, nil + return restic.FileInfo{Size: fi.Size()}, nil } // Test returns true if a blob of the given type and name exists in the backend. -func (b *Local) Test(t backend.Type, name string) (bool, error) { +func (b *Local) Test(t restic.FileType, name string) (bool, error) { debug.Log("backend.local.Test", "Test %v %v", t, name) _, err := fs.Stat(filename(b.p, t, name)) if err != nil { @@ -242,7 +245,7 @@ func (b *Local) Test(t backend.Type, name string) (bool, error) { } // Remove removes the blob with the given name and type. -func (b *Local) Remove(t backend.Type, name string) error { +func (b *Local) Remove(t restic.FileType, name string) error { debug.Log("backend.local.Remove", "Remove %v %v", t, name) fn := filename(b.p, t, name) @@ -317,10 +320,10 @@ func listDirs(dir string) (filenames []string, err error) { // List returns a channel that yields all names of blobs of type t. A // goroutine is started for this. If the channel done is closed, sending // stops. -func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { +func (b *Local) List(t restic.FileType, done <-chan struct{}) <-chan string { debug.Log("backend.local.List", "List %v", t) lister := listDir - if t == backend.Data { + if t == restic.DataFile { lister = listDirs } diff --git a/src/restic/backend/local/local_test.go b/src/restic/backend/local/local_test.go index b139b0aca..3bae88753 100644 --- a/src/restic/backend/local/local_test.go +++ b/src/restic/backend/local/local_test.go @@ -4,8 +4,8 @@ import ( "fmt" "io/ioutil" "os" + "restic" - "restic/backend" "restic/backend/local" "restic/backend/test" ) @@ -30,7 +30,7 @@ func createTempdir() error { } func init() { - test.CreateFn = func() (backend.Backend, error) { + test.CreateFn = func() (restic.Backend, error) { err := createTempdir() if err != nil { return nil, err @@ -38,7 +38,7 @@ func init() { return local.Create(tempBackendDir) } - test.OpenFn = func() (backend.Backend, error) { + test.OpenFn = func() (restic.Backend, error) { err := createTempdir() if err != nil { return nil, err diff --git a/src/restic/backend/mem/mem_backend.go b/src/restic/backend/mem/mem_backend.go index 239e2c899..d8885de49 100644 --- a/src/restic/backend/mem/mem_backend.go +++ b/src/restic/backend/mem/mem_backend.go @@ -2,28 +2,29 @@ package mem import ( "io" + "restic" "sync" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/debug" ) type entry struct { - Type backend.Type + Type restic.FileType Name string } type memMap map[entry][]byte +// make sure that MemoryBackend implements backend.Backend +var _ restic.Backend = &MemoryBackend{} + // MemoryBackend is a mock backend that uses a map for storing all data in // memory. This should only be used for tests. type MemoryBackend struct { data memMap m sync.Mutex - - backend.MockBackend } // New returns a new backend that saves all data in a map in memory. @@ -32,60 +33,13 @@ func New() *MemoryBackend { data: make(memMap), } - be.MockBackend.TestFn = func(t backend.Type, name string) (bool, error) { - return memTest(be, t, name) - } - - be.MockBackend.LoadFn = func(h backend.Handle, p []byte, off int64) (int, error) { - return memLoad(be, h, p, off) - } - - be.MockBackend.SaveFn = func(h backend.Handle, p []byte) error { - return memSave(be, h, p) - } - - be.MockBackend.StatFn = func(h backend.Handle) (backend.BlobInfo, error) { - return memStat(be, h) - } - - be.MockBackend.RemoveFn = func(t backend.Type, name string) error { - return memRemove(be, t, name) - } - - be.MockBackend.ListFn = func(t backend.Type, done <-chan struct{}) <-chan string { - return memList(be, t, done) - } - - be.MockBackend.DeleteFn = func() error { - be.m.Lock() - defer be.m.Unlock() - - be.data = make(memMap) - return nil - } - - be.MockBackend.LocationFn = func() string { - return "Memory Backend" - } - debug.Log("MemoryBackend.New", "created new memory backend") return be } -func (be *MemoryBackend) insert(t backend.Type, name string, data []byte) error { - be.m.Lock() - defer be.m.Unlock() - - if _, ok := be.data[entry{t, name}]; ok { - return errors.New("already present") - } - - be.data[entry{t, name}] = data - return nil -} - -func memTest(be *MemoryBackend, t backend.Type, name string) (bool, error) { +// Test returns whether a file exists. +func (be *MemoryBackend) Test(t restic.FileType, name string) (bool, error) { be.m.Lock() defer be.m.Unlock() @@ -98,7 +52,8 @@ func memTest(be *MemoryBackend, t backend.Type, name string) (bool, error) { return false, nil } -func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, error) { +// Load reads data from the backend. +func (be *MemoryBackend) Load(h restic.Handle, p []byte, off int64) (int, error) { if err := h.Valid(); err != nil { return 0, err } @@ -106,7 +61,7 @@ func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, err be.m.Lock() defer be.m.Unlock() - if h.Type == backend.Config { + if h.Type == restic.ConfigFile { h.Name = "" } @@ -137,7 +92,8 @@ func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, err return n, nil } -func memSave(be *MemoryBackend, h backend.Handle, p []byte) error { +// Save adds new Data to the backend. +func (be *MemoryBackend) Save(h restic.Handle, p []byte) error { if err := h.Valid(); err != nil { return err } @@ -145,7 +101,7 @@ func memSave(be *MemoryBackend, h backend.Handle, p []byte) error { be.m.Lock() defer be.m.Unlock() - if h.Type == backend.Config { + if h.Type == restic.ConfigFile { h.Name = "" } @@ -161,15 +117,16 @@ func memSave(be *MemoryBackend, h backend.Handle, p []byte) error { return nil } -func memStat(be *MemoryBackend, h backend.Handle) (backend.BlobInfo, error) { +// Stat returns information about a file in the backend. +func (be *MemoryBackend) Stat(h restic.Handle) (restic.FileInfo, error) { be.m.Lock() defer be.m.Unlock() if err := h.Valid(); err != nil { - return backend.BlobInfo{}, err + return restic.FileInfo{}, err } - if h.Type == backend.Config { + if h.Type == restic.ConfigFile { h.Name = "" } @@ -177,13 +134,14 @@ func memStat(be *MemoryBackend, h backend.Handle) (backend.BlobInfo, error) { e, ok := be.data[entry{h.Type, h.Name}] if !ok { - return backend.BlobInfo{}, errors.New("no such data") + return restic.FileInfo{}, errors.New("no such data") } - return backend.BlobInfo{Size: int64(len(e))}, nil + return restic.FileInfo{Size: int64(len(e))}, nil } -func memRemove(be *MemoryBackend, t backend.Type, name string) error { +// Remove deletes a file from the backend. +func (be *MemoryBackend) Remove(t restic.FileType, name string) error { be.m.Lock() defer be.m.Unlock() @@ -198,7 +156,8 @@ func memRemove(be *MemoryBackend, t backend.Type, name string) error { return nil } -func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan string { +// List returns a channel which yields entries from the backend. +func (be *MemoryBackend) List(t restic.FileType, done <-chan struct{}) <-chan string { be.m.Lock() defer be.m.Unlock() @@ -227,3 +186,22 @@ func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan str return ch } + +// Location returns the location of the backend (RAM). +func (be *MemoryBackend) Location() string { + return "RAM" +} + +// Delete removes all data in the backend. +func (be *MemoryBackend) Delete() error { + be.m.Lock() + defer be.m.Unlock() + + be.data = make(memMap) + return nil +} + +// Close closes the backend. +func (be *MemoryBackend) Close() error { + return nil +} diff --git a/src/restic/backend/mem/mem_backend_test.go b/src/restic/backend/mem/mem_backend_test.go index cde3bda1c..75b65f4c7 100644 --- a/src/restic/backend/mem/mem_backend_test.go +++ b/src/restic/backend/mem/mem_backend_test.go @@ -1,19 +1,20 @@ package mem_test import ( - "github.com/pkg/errors" + "restic" + + "restic/errors" - "restic/backend" "restic/backend/mem" "restic/backend/test" ) -var be backend.Backend +var be restic.Backend //go:generate go run ../test/generate_backend_tests.go func init() { - test.CreateFn = func() (backend.Backend, error) { + test.CreateFn = func() (restic.Backend, error) { if be != nil { return nil, errors.New("temporary memory backend dir already exists") } @@ -23,7 +24,7 @@ func init() { return be, nil } - test.OpenFn = func() (backend.Backend, error) { + test.OpenFn = func() (restic.Backend, error) { if be == nil { return nil, errors.New("repository not initialized") } diff --git a/src/restic/backend/mock_backend.go b/src/restic/backend/mock_backend.go deleted file mode 100644 index 70429acfd..000000000 --- a/src/restic/backend/mock_backend.go +++ /dev/null @@ -1,103 +0,0 @@ -package backend - -import "github.com/pkg/errors" - -// MockBackend implements a backend whose functions can be specified. This -// should only be used for tests. -type MockBackend struct { - CloseFn func() error - LoadFn func(h Handle, p []byte, off int64) (int, error) - SaveFn func(h Handle, p []byte) error - StatFn func(h Handle) (BlobInfo, error) - ListFn func(Type, <-chan struct{}) <-chan string - RemoveFn func(Type, string) error - TestFn func(Type, string) (bool, error) - DeleteFn func() error - LocationFn func() string -} - -// Close the backend. -func (m *MockBackend) Close() error { - if m.CloseFn == nil { - return nil - } - - return m.CloseFn() -} - -// Location returns a location string. -func (m *MockBackend) Location() string { - if m.LocationFn == nil { - return "" - } - - return m.LocationFn() -} - -// Load loads data from the backend. -func (m *MockBackend) Load(h Handle, p []byte, off int64) (int, error) { - if m.LoadFn == nil { - return 0, errors.New("not implemented") - } - - return m.LoadFn(h, p, off) -} - -// Save data in the backend. -func (m *MockBackend) Save(h Handle, p []byte) error { - if m.SaveFn == nil { - return errors.New("not implemented") - } - - return m.SaveFn(h, p) -} - -// Stat an object in the backend. -func (m *MockBackend) Stat(h Handle) (BlobInfo, error) { - if m.StatFn == nil { - return BlobInfo{}, errors.New("not implemented") - } - - return m.StatFn(h) -} - -// List items of type t. -func (m *MockBackend) List(t Type, done <-chan struct{}) <-chan string { - if m.ListFn == nil { - ch := make(chan string) - close(ch) - return ch - } - - return m.ListFn(t, done) -} - -// Remove data from the backend. -func (m *MockBackend) Remove(t Type, name string) error { - if m.RemoveFn == nil { - return errors.New("not implemented") - } - - return m.RemoveFn(t, name) -} - -// Test for the existence of a specific item. -func (m *MockBackend) Test(t Type, name string) (bool, error) { - if m.TestFn == nil { - return false, errors.New("not implemented") - } - - return m.TestFn(t, name) -} - -// Delete all data. -func (m *MockBackend) Delete() error { - if m.DeleteFn == nil { - return errors.New("not implemented") - } - - return m.DeleteFn() -} - -// Make sure that MockBackend implements the backend interface. -var _ Backend = &MockBackend{} diff --git a/src/restic/backend/rest/config.go b/src/restic/backend/rest/config.go index e59031071..929fda120 100644 --- a/src/restic/backend/rest/config.go +++ b/src/restic/backend/rest/config.go @@ -4,7 +4,7 @@ import ( "net/url" "strings" - "github.com/pkg/errors" + "restic/errors" ) // Config contains all configuration necessary to connect to a REST server. diff --git a/src/restic/backend/rest/rest.go b/src/restic/backend/rest/rest.go index a98fd5a80..ce1d25db9 100644 --- a/src/restic/backend/rest/rest.go +++ b/src/restic/backend/rest/rest.go @@ -8,9 +8,10 @@ import ( "net/http" "net/url" "path" + "restic" "strings" - "github.com/pkg/errors" + "restic/errors" "restic/backend" ) @@ -18,24 +19,24 @@ import ( const connLimit = 10 // restPath returns the path to the given resource. -func restPath(url *url.URL, h backend.Handle) string { +func restPath(url *url.URL, h restic.Handle) string { u := *url var dir string switch h.Type { - case backend.Config: + case restic.ConfigFile: dir = "" h.Name = "config" - case backend.Data: + case restic.DataFile: dir = backend.Paths.Data - case backend.Snapshot: + case restic.SnapshotFile: dir = backend.Paths.Snapshots - case backend.Index: + case restic.IndexFile: dir = backend.Paths.Index - case backend.Lock: + case restic.LockFile: dir = backend.Paths.Locks - case backend.Key: + case restic.KeyFile: dir = backend.Paths.Keys default: dir = string(h.Type) @@ -53,7 +54,7 @@ type restBackend struct { } // Open opens the REST backend with the given config. -func Open(cfg Config) (backend.Backend, error) { +func Open(cfg Config) (restic.Backend, error) { connChan := make(chan struct{}, connLimit) for i := 0; i < connLimit; i++ { connChan <- struct{}{} @@ -71,7 +72,7 @@ func (b *restBackend) Location() string { // Load returns the data stored in the backend for h at the given offset // and saves it in p. Load has the same semantics as io.ReaderAt. -func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err error) { +func (b *restBackend) Load(h restic.Handle, p []byte, off int64) (n int, err error) { if err := h.Valid(); err != nil { return 0, err } @@ -120,7 +121,7 @@ func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err er } // Save stores data in the backend at the handle. -func (b *restBackend) Save(h backend.Handle, p []byte) (err error) { +func (b *restBackend) Save(h restic.Handle, p []byte) (err error) { if err := h.Valid(); err != nil { return err } @@ -151,31 +152,31 @@ func (b *restBackend) Save(h backend.Handle, p []byte) (err error) { } // Stat returns information about a blob. -func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) { +func (b *restBackend) Stat(h restic.Handle) (restic.FileInfo, error) { if err := h.Valid(); err != nil { - return backend.BlobInfo{}, err + return restic.FileInfo{}, err } <-b.connChan resp, err := b.client.Head(restPath(b.url, h)) b.connChan <- struct{}{} if err != nil { - return backend.BlobInfo{}, errors.Wrap(err, "client.Head") + return restic.FileInfo{}, errors.Wrap(err, "client.Head") } if err = resp.Body.Close(); err != nil { - return backend.BlobInfo{}, errors.Wrap(err, "Close") + return restic.FileInfo{}, errors.Wrap(err, "Close") } if resp.StatusCode != 200 { - return backend.BlobInfo{}, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode) + return restic.FileInfo{}, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode) } if resp.ContentLength < 0 { - return backend.BlobInfo{}, errors.New("negative content length") + return restic.FileInfo{}, errors.New("negative content length") } - bi := backend.BlobInfo{ + bi := restic.FileInfo{ Size: resp.ContentLength, } @@ -183,8 +184,8 @@ func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) { } // Test returns true if a blob of the given type and name exists in the backend. -func (b *restBackend) Test(t backend.Type, name string) (bool, error) { - _, err := b.Stat(backend.Handle{Type: t, Name: name}) +func (b *restBackend) Test(t restic.FileType, name string) (bool, error) { + _, err := b.Stat(restic.Handle{Type: t, Name: name}) if err != nil { return false, nil } @@ -193,8 +194,8 @@ func (b *restBackend) Test(t backend.Type, name string) (bool, error) { } // Remove removes the blob with the given name and type. -func (b *restBackend) Remove(t backend.Type, name string) error { - h := backend.Handle{Type: t, Name: name} +func (b *restBackend) Remove(t restic.FileType, name string) error { + h := restic.Handle{Type: t, Name: name} if err := h.Valid(); err != nil { return err } @@ -221,10 +222,10 @@ func (b *restBackend) Remove(t backend.Type, name string) error { // List returns a channel that yields all names of blobs of type t. A // goroutine is started for this. If the channel done is closed, sending // stops. -func (b *restBackend) List(t backend.Type, done <-chan struct{}) <-chan string { +func (b *restBackend) List(t restic.FileType, done <-chan struct{}) <-chan string { ch := make(chan string) - url := restPath(b.url, backend.Handle{Type: t}) + url := restPath(b.url, restic.Handle{Type: t}) if !strings.HasSuffix(url, "/") { url += "/" } diff --git a/src/restic/backend/rest/rest_path_test.go b/src/restic/backend/rest/rest_path_test.go index 285240cac..8356abfba 100644 --- a/src/restic/backend/rest/rest_path_test.go +++ b/src/restic/backend/rest/rest_path_test.go @@ -2,35 +2,35 @@ package rest import ( "net/url" - "restic/backend" + "restic" "testing" ) var restPathTests = []struct { - Handle backend.Handle + Handle restic.Handle URL *url.URL Result string }{ { URL: parseURL("https://hostname.foo"), - Handle: backend.Handle{ - Type: backend.Data, + Handle: restic.Handle{ + Type: restic.DataFile, Name: "foobar", }, Result: "https://hostname.foo/data/foobar", }, { URL: parseURL("https://hostname.foo:1234/prefix/repo"), - Handle: backend.Handle{ - Type: backend.Lock, + Handle: restic.Handle{ + Type: restic.LockFile, Name: "foobar", }, Result: "https://hostname.foo:1234/prefix/repo/locks/foobar", }, { URL: parseURL("https://hostname.foo:1234/prefix/repo"), - Handle: backend.Handle{ - Type: backend.Config, + Handle: restic.Handle{ + Type: restic.ConfigFile, Name: "foobar", }, Result: "https://hostname.foo:1234/prefix/repo/config", diff --git a/src/restic/backend/rest/rest_test.go b/src/restic/backend/rest/rest_test.go index 4e77cf612..2e7095b29 100644 --- a/src/restic/backend/rest/rest_test.go +++ b/src/restic/backend/rest/rest_test.go @@ -4,10 +4,10 @@ import ( "fmt" "net/url" "os" + "restic" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/backend/rest" "restic/backend/test" . "restic/test" @@ -31,13 +31,13 @@ func init() { URL: url, } - test.CreateFn = func() (backend.Backend, error) { + test.CreateFn = func() (restic.Backend, error) { be, err := rest.Open(cfg) if err != nil { return nil, err } - exists, err := be.Test(backend.Config, "") + exists, err := be.Test(restic.ConfigFile, "") if err != nil { return nil, err } @@ -49,7 +49,7 @@ func init() { return be, nil } - test.OpenFn = func() (backend.Backend, error) { + test.OpenFn = func() (restic.Backend, error) { return rest.Open(cfg) } } diff --git a/src/restic/backend/s3/config.go b/src/restic/backend/s3/config.go index 4eda2b0e8..2df02b58c 100644 --- a/src/restic/backend/s3/config.go +++ b/src/restic/backend/s3/config.go @@ -5,7 +5,7 @@ import ( "path" "strings" - "github.com/pkg/errors" + "restic/errors" ) // Config contains all configuration necessary to connect to an s3 compatible diff --git a/src/restic/backend/s3/s3.go b/src/restic/backend/s3/s3.go index 835a7c485..b9f29b6b7 100644 --- a/src/restic/backend/s3/s3.go +++ b/src/restic/backend/s3/s3.go @@ -3,13 +3,13 @@ package s3 import ( "bytes" "io" + "restic" "strings" - "github.com/pkg/errors" + "restic/errors" "github.com/minio/minio-go" - "restic/backend" "restic/debug" ) @@ -25,7 +25,7 @@ type s3 struct { // Open opens the S3 backend at bucket and region. The bucket is created if it // does not exist yet. -func Open(cfg Config) (backend.Backend, error) { +func Open(cfg Config) (restic.Backend, error) { debug.Log("s3.Open", "open, config %#v", cfg) client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP) @@ -53,7 +53,7 @@ func Open(cfg Config) (backend.Backend, error) { return be, nil } -func (be *s3) s3path(t backend.Type, name string) string { +func (be *s3) s3path(t restic.FileType, name string) string { var path string if be.prefix != "" { @@ -61,7 +61,7 @@ func (be *s3) s3path(t backend.Type, name string) string { } path += string(t) - if t == backend.Config { + if t == restic.ConfigFile { return path } return path + "/" + name @@ -81,7 +81,7 @@ func (be *s3) Location() string { // Load returns the data stored in the backend for h at the given offset // and saves it in p. Load has the same semantics as io.ReaderAt. -func (be s3) Load(h backend.Handle, p []byte, off int64) (n int, err error) { +func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) { var obj *minio.Object debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p)) @@ -153,7 +153,7 @@ func (be s3) Load(h backend.Handle, p []byte, off int64) (n int, err error) { } // Save stores data in the backend at the handle. -func (be s3) Save(h backend.Handle, p []byte) (err error) { +func (be s3) Save(h restic.Handle, p []byte) (err error) { if err := h.Valid(); err != nil { return err } @@ -183,7 +183,7 @@ func (be s3) Save(h backend.Handle, p []byte) (err error) { } // Stat returns information about a blob. -func (be s3) Stat(h backend.Handle) (bi backend.BlobInfo, err error) { +func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) { debug.Log("s3.Stat", "%v", h) path := be.s3path(h.Type, h.Name) @@ -192,7 +192,7 @@ func (be s3) Stat(h backend.Handle) (bi backend.BlobInfo, err error) { obj, err = be.client.GetObject(be.bucketname, path) if err != nil { debug.Log("s3.Stat", "GetObject() err %v", err) - return backend.BlobInfo{}, errors.Wrap(err, "client.GetObject") + return restic.FileInfo{}, errors.Wrap(err, "client.GetObject") } // make sure that the object is closed properly. @@ -206,14 +206,14 @@ func (be s3) Stat(h backend.Handle) (bi backend.BlobInfo, err error) { fi, err := obj.Stat() if err != nil { debug.Log("s3.Stat", "Stat() err %v", err) - return backend.BlobInfo{}, errors.Wrap(err, "Stat") + return restic.FileInfo{}, errors.Wrap(err, "Stat") } - return backend.BlobInfo{Size: fi.Size}, nil + return restic.FileInfo{Size: fi.Size}, nil } // Test returns true if a blob of the given type and name exists in the backend. -func (be *s3) Test(t backend.Type, name string) (bool, error) { +func (be *s3) Test(t restic.FileType, name string) (bool, error) { found := false path := be.s3path(t, name) _, err := be.client.StatObject(be.bucketname, path) @@ -226,7 +226,7 @@ func (be *s3) Test(t backend.Type, name string) (bool, error) { } // Remove removes the blob with the given name and type. -func (be *s3) Remove(t backend.Type, name string) error { +func (be *s3) Remove(t restic.FileType, name string) error { path := be.s3path(t, name) err := be.client.RemoveObject(be.bucketname, path) debug.Log("s3.Remove", "%v %v -> err %v", t, name, err) @@ -236,7 +236,7 @@ func (be *s3) Remove(t backend.Type, name string) error { // List returns a channel that yields all names of blobs of type t. A // goroutine is started for this. If the channel done is closed, sending // stops. -func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string { +func (be *s3) List(t restic.FileType, done <-chan struct{}) <-chan string { debug.Log("s3.List", "listing %v", t) ch := make(chan string) @@ -264,11 +264,11 @@ func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string { } // Remove keys for a specified backend type. -func (be *s3) removeKeys(t backend.Type) error { +func (be *s3) removeKeys(t restic.FileType) error { done := make(chan struct{}) defer close(done) - for key := range be.List(backend.Data, done) { - err := be.Remove(backend.Data, key) + for key := range be.List(restic.DataFile, done) { + err := be.Remove(restic.DataFile, key) if err != nil { return err } @@ -279,12 +279,12 @@ func (be *s3) removeKeys(t backend.Type) error { // Delete removes all restic keys in the bucket. It will not remove the bucket itself. func (be *s3) Delete() error { - alltypes := []backend.Type{ - backend.Data, - backend.Key, - backend.Lock, - backend.Snapshot, - backend.Index} + alltypes := []restic.FileType{ + restic.DataFile, + restic.KeyFile, + restic.LockFile, + restic.SnapshotFile, + restic.IndexFile} for _, t := range alltypes { err := be.removeKeys(t) @@ -293,7 +293,7 @@ func (be *s3) Delete() error { } } - return be.Remove(backend.Config, "") + return be.Remove(restic.ConfigFile, "") } // Close does nothing diff --git a/src/restic/backend/s3/s3_test.go b/src/restic/backend/s3/s3_test.go index 6fd9c3bf6..355352fa5 100644 --- a/src/restic/backend/s3/s3_test.go +++ b/src/restic/backend/s3/s3_test.go @@ -4,10 +4,10 @@ import ( "fmt" "net/url" "os" + "restic" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/backend/s3" "restic/backend/test" . "restic/test" @@ -38,13 +38,13 @@ func init() { cfg.UseHTTP = true } - test.CreateFn = func() (backend.Backend, error) { + test.CreateFn = func() (restic.Backend, error) { be, err := s3.Open(cfg) if err != nil { return nil, err } - exists, err := be.Test(backend.Config, "") + exists, err := be.Test(restic.ConfigFile, "") if err != nil { return nil, err } @@ -56,7 +56,7 @@ func init() { return be, nil } - test.OpenFn = func() (backend.Backend, error) { + test.OpenFn = func() (restic.Backend, error) { return s3.Open(cfg) } diff --git a/src/restic/backend/sftp/config.go b/src/restic/backend/sftp/config.go index d8e200491..abd8b0c2f 100644 --- a/src/restic/backend/sftp/config.go +++ b/src/restic/backend/sftp/config.go @@ -5,7 +5,7 @@ import ( "path" "strings" - "github.com/pkg/errors" + "restic/errors" ) // Config collects all information required to connect to an sftp server. diff --git a/src/restic/backend/sftp/sftp.go b/src/restic/backend/sftp/sftp.go index c82e29683..b323eb1b9 100644 --- a/src/restic/backend/sftp/sftp.go +++ b/src/restic/backend/sftp/sftp.go @@ -9,10 +9,11 @@ import ( "os" "os/exec" "path" + "restic" "strings" "time" - "github.com/pkg/errors" + "restic/errors" "restic/backend" "restic/debug" @@ -33,6 +34,8 @@ type SFTP struct { result <-chan error } +var _ restic.Backend = &SFTP{} + func startClient(program string, args ...string) (*SFTP, error) { // Connect to a remote host and request the sftp subsystem via the 'ssh' // command. This assumes that passwordless login is correctly configured. @@ -256,11 +259,11 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error { } // Rename temp file to final name according to type and name. -func (r *SFTP) renameFile(oldname string, t backend.Type, name string) error { +func (r *SFTP) renameFile(oldname string, t restic.FileType, name string) error { filename := r.filename(t, name) // create directories if necessary - if t == backend.Data { + if t == restic.DataFile { err := r.mkdirAll(path.Dir(filename), backend.Modes.Dir) if err != nil { return err @@ -293,9 +296,9 @@ func Join(parts ...string) string { return path.Clean(path.Join(parts...)) } -// Construct path for given backend.Type and name. -func (r *SFTP) filename(t backend.Type, name string) string { - if t == backend.Config { +// Construct path for given restic.Type and name. +func (r *SFTP) filename(t restic.FileType, name string) string { + if t == restic.ConfigFile { return Join(r.p, "config") } @@ -303,21 +306,21 @@ func (r *SFTP) filename(t backend.Type, name string) string { } // Construct directory for given backend.Type. -func (r *SFTP) dirname(t backend.Type, name string) string { +func (r *SFTP) dirname(t restic.FileType, name string) string { var n string switch t { - case backend.Data: + case restic.DataFile: n = backend.Paths.Data if len(name) > 2 { n = Join(n, name[:2]) } - case backend.Snapshot: + case restic.SnapshotFile: n = backend.Paths.Snapshots - case backend.Index: + case restic.IndexFile: n = backend.Paths.Index - case backend.Lock: + case restic.LockFile: n = backend.Paths.Locks - case backend.Key: + case restic.KeyFile: n = backend.Paths.Keys } return Join(r.p, n) @@ -325,7 +328,7 @@ func (r *SFTP) dirname(t backend.Type, name string) string { // Load returns the data stored in the backend for h at the given offset // and saves it in p. Load has the same semantics as io.ReaderAt. -func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) { +func (r *SFTP) Load(h restic.Handle, p []byte, off int64) (n int, err error) { debug.Log("sftp.Load", "load %v, %d bytes, offset %v", h, len(p), off) if err := r.clientError(); err != nil { return 0, err @@ -362,7 +365,7 @@ func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) { } // Save stores data in the backend at the handle. -func (r *SFTP) Save(h backend.Handle, p []byte) (err error) { +func (r *SFTP) Save(h restic.Handle, p []byte) (err error) { debug.Log("sftp.Save", "save %v bytes to %v", h, len(p)) if err := r.clientError(); err != nil { return err @@ -400,26 +403,26 @@ func (r *SFTP) Save(h backend.Handle, p []byte) (err error) { } // Stat returns information about a blob. -func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) { +func (r *SFTP) Stat(h restic.Handle) (restic.FileInfo, error) { debug.Log("sftp.Stat", "stat %v", h) if err := r.clientError(); err != nil { - return backend.BlobInfo{}, err + return restic.FileInfo{}, err } if err := h.Valid(); err != nil { - return backend.BlobInfo{}, err + return restic.FileInfo{}, err } fi, err := r.c.Lstat(r.filename(h.Type, h.Name)) if err != nil { - return backend.BlobInfo{}, errors.Wrap(err, "Lstat") + return restic.FileInfo{}, errors.Wrap(err, "Lstat") } - return backend.BlobInfo{Size: fi.Size()}, nil + return restic.FileInfo{Size: fi.Size()}, nil } // Test returns true if a blob of the given type and name exists in the backend. -func (r *SFTP) Test(t backend.Type, name string) (bool, error) { +func (r *SFTP) Test(t restic.FileType, name string) (bool, error) { debug.Log("sftp.Test", "type %v, name %v", t, name) if err := r.clientError(); err != nil { return false, err @@ -438,7 +441,7 @@ func (r *SFTP) Test(t backend.Type, name string) (bool, error) { } // Remove removes the content stored at name. -func (r *SFTP) Remove(t backend.Type, name string) error { +func (r *SFTP) Remove(t restic.FileType, name string) error { debug.Log("sftp.Remove", "type %v, name %v", t, name) if err := r.clientError(); err != nil { return err @@ -450,14 +453,14 @@ func (r *SFTP) Remove(t backend.Type, name string) error { // List returns a channel that yields all names of blobs of type t. A // goroutine is started for this. If the channel done is closed, sending // stops. -func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string { +func (r *SFTP) List(t restic.FileType, done <-chan struct{}) <-chan string { debug.Log("sftp.List", "list all %v", t) ch := make(chan string) go func() { defer close(ch) - if t == backend.Data { + if t == restic.DataFile { // read first level basedir := r.dirname(t, "") diff --git a/src/restic/backend/sftp/sftp_backend_test.go b/src/restic/backend/sftp/sftp_backend_test.go index 2d8e609ca..567b2cf94 100644 --- a/src/restic/backend/sftp/sftp_backend_test.go +++ b/src/restic/backend/sftp/sftp_backend_test.go @@ -4,11 +4,11 @@ import ( "io/ioutil" "os" "path/filepath" + "restic" "strings" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/backend/sftp" "restic/backend/test" @@ -52,7 +52,7 @@ func init() { args := []string{"-e"} - test.CreateFn = func() (backend.Backend, error) { + test.CreateFn = func() (restic.Backend, error) { err := createTempdir() if err != nil { return nil, err @@ -61,7 +61,7 @@ func init() { return sftp.Create(tempBackendDir, sftpserver, args...) } - test.OpenFn = func() (backend.Backend, error) { + test.OpenFn = func() (restic.Backend, error) { err := createTempdir() if err != nil { return nil, err diff --git a/src/restic/backend/test/tests.go b/src/restic/backend/test/tests.go index a9b18e361..e79fca366 100644 --- a/src/restic/backend/test/tests.go +++ b/src/restic/backend/test/tests.go @@ -7,28 +7,29 @@ import ( "io/ioutil" "math/rand" "reflect" + "restic" "sort" "testing" - "github.com/pkg/errors" + "restic/errors" + "restic/test" "restic/backend" - . "restic/test" ) // CreateFn is a function that creates a temporary repository for the tests. -var CreateFn func() (backend.Backend, error) +var CreateFn func() (restic.Backend, error) // OpenFn is a function that opens a previously created temporary repository. -var OpenFn func() (backend.Backend, error) +var OpenFn func() (restic.Backend, error) // CleanupFn removes temporary files and directories created during the tests. var CleanupFn func() error -var but backend.Backend // backendUnderTest +var but restic.Backend // backendUnderTest var butInitialized bool -func open(t testing.TB) backend.Backend { +func open(t testing.TB) restic.Backend { if OpenFn == nil { t.Fatal("OpenFn not set") } @@ -118,7 +119,7 @@ func TestCreateWithConfig(t testing.TB) { defer close(t) // save a config - store(t, b, backend.Config, []byte("test config")) + store(t, b, restic.ConfigFile, []byte("test config")) // now create the backend again, this must fail _, err := CreateFn() @@ -127,7 +128,7 @@ func TestCreateWithConfig(t testing.TB) { } // remove config - err = b.Remove(backend.Config, "") + err = b.Remove(restic.ConfigFile, "") if err != nil { t.Fatalf("unexpected error removing config: %v", err) } @@ -152,12 +153,12 @@ func TestConfig(t testing.TB) { var testString = "Config" // create config and read it back - _, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil) + _, err := backend.LoadAll(b, restic.Handle{Type: restic.ConfigFile}, nil) if err == nil { t.Fatalf("did not get expected error for non-existing config") } - err = b.Save(backend.Handle{Type: backend.Config}, []byte(testString)) + err = b.Save(restic.Handle{Type: restic.ConfigFile}, []byte(testString)) if err != nil { t.Fatalf("Save() error: %v", err) } @@ -165,7 +166,7 @@ func TestConfig(t testing.TB) { // try accessing the config with different names, should all return the // same config for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} { - h := backend.Handle{Type: backend.Config, Name: name} + h := restic.Handle{Type: restic.ConfigFile, Name: name} buf, err := backend.LoadAll(b, h, nil) if err != nil { t.Fatalf("unable to read config with name %q: %v", name, err) @@ -182,22 +183,22 @@ func TestLoad(t testing.TB) { b := open(t) defer close(t) - _, err := b.Load(backend.Handle{}, nil, 0) + _, err := b.Load(restic.Handle{}, nil, 0) if err == nil { t.Fatalf("Load() did not return an error for invalid handle") } - _, err = b.Load(backend.Handle{Type: backend.Data, Name: "foobar"}, nil, 0) + _, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, nil, 0) if err == nil { t.Fatalf("Load() did not return an error for non-existing blob") } length := rand.Intn(1<<24) + 2000 - data := Random(23, length) - id := backend.Hash(data) + data := test.Random(23, length) + id := restic.Hash(data) - handle := backend.Handle{Type: backend.Data, Name: id.String()} + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} err = b.Save(handle, data) if err != nil { t.Fatalf("Save() error: %v", err) @@ -309,7 +310,7 @@ func TestLoad(t testing.TB) { t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err) } - OK(t, b.Remove(backend.Data, id.String())) + test.OK(t, b.Remove(restic.DataFile, id.String())) } // TestLoadNegativeOffset tests the backend's Load function with negative offsets. @@ -319,10 +320,10 @@ func TestLoadNegativeOffset(t testing.TB) { length := rand.Intn(1<<24) + 2000 - data := Random(23, length) - id := backend.Hash(data) + data := test.Random(23, length) + id := restic.Hash(data) - handle := backend.Handle{Type: backend.Data, Name: id.String()} + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} err := b.Save(handle, data) if err != nil { t.Fatalf("Save() error: %v", err) @@ -365,30 +366,30 @@ func TestLoadNegativeOffset(t testing.TB) { } - OK(t, b.Remove(backend.Data, id.String())) + test.OK(t, b.Remove(restic.DataFile, id.String())) } // TestSave tests saving data in the backend. func TestSave(t testing.TB) { b := open(t) defer close(t) - var id backend.ID + var id restic.ID for i := 0; i < 10; i++ { length := rand.Intn(1<<23) + 200000 - data := Random(23, length) + data := test.Random(23, length) // use the first 32 byte as the ID copy(id[:], data) - h := backend.Handle{ - Type: backend.Data, + h := restic.Handle{ + Type: restic.DataFile, Name: fmt.Sprintf("%s-%d", id, i), } err := b.Save(h, data) - OK(t, err) + test.OK(t, err) buf, err := backend.LoadAll(b, h, nil) - OK(t, err) + test.OK(t, err) if len(buf) != len(data) { t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf)) } @@ -398,7 +399,7 @@ func TestSave(t testing.TB) { } fi, err := b.Stat(h) - OK(t, err) + test.OK(t, err) if fi.Size != int64(len(data)) { t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size) @@ -429,7 +430,7 @@ func TestSaveFilenames(t testing.TB) { defer close(t) for i, test := range filenameTests { - h := backend.Handle{Name: test.name, Type: backend.Data} + h := restic.Handle{Name: test.name, Type: restic.DataFile} err := b.Save(h, []byte(test.data)) if err != nil { t.Errorf("test %d failed: Save() returned %v", i, err) @@ -464,17 +465,17 @@ var testStrings = []struct { {"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"}, } -func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) { - id := backend.Hash(data) - err := b.Save(backend.Handle{Name: id.String(), Type: tpe}, data) - OK(t, err) +func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) { + id := restic.Hash(data) + err := b.Save(restic.Handle{Name: id.String(), Type: tpe}, data) + test.OK(t, err) } func read(t testing.TB, rd io.Reader, expectedData []byte) { buf, err := ioutil.ReadAll(rd) - OK(t, err) + test.OK(t, err) if expectedData != nil { - Equals(t, expectedData, buf) + test.Equals(t, expectedData, buf) } } @@ -483,90 +484,90 @@ func TestBackend(t testing.TB) { b := open(t) defer close(t) - for _, tpe := range []backend.Type{ - backend.Data, backend.Key, backend.Lock, - backend.Snapshot, backend.Index, + for _, tpe := range []restic.FileType{ + restic.DataFile, restic.KeyFile, restic.LockFile, + restic.SnapshotFile, restic.IndexFile, } { // detect non-existing files - for _, test := range testStrings { - id, err := backend.ParseID(test.id) - OK(t, err) + for _, ts := range testStrings { + id, err := restic.ParseID(ts.id) + test.OK(t, err) // test if blob is already in repository ret, err := b.Test(tpe, id.String()) - OK(t, err) - Assert(t, !ret, "blob was found to exist before creating") + test.OK(t, err) + test.Assert(t, !ret, "blob was found to exist before creating") // try to stat a not existing blob - h := backend.Handle{Type: tpe, Name: id.String()} + h := restic.Handle{Type: tpe, Name: id.String()} _, err = b.Stat(h) - Assert(t, err != nil, "blob data could be extracted before creation") + test.Assert(t, err != nil, "blob data could be extracted before creation") // try to read not existing blob _, err = b.Load(h, nil, 0) - Assert(t, err != nil, "blob reader could be obtained before creation") + test.Assert(t, err != nil, "blob reader could be obtained before creation") // try to get string out, should fail ret, err = b.Test(tpe, id.String()) - OK(t, err) - Assert(t, !ret, "id %q was found (but should not have)", test.id) + test.OK(t, err) + test.Assert(t, !ret, "id %q was found (but should not have)", ts.id) } // add files - for _, test := range testStrings { - store(t, b, tpe, []byte(test.data)) + for _, ts := range testStrings { + store(t, b, tpe, []byte(ts.data)) // test Load() - h := backend.Handle{Type: tpe, Name: test.id} + h := restic.Handle{Type: tpe, Name: ts.id} buf, err := backend.LoadAll(b, h, nil) - OK(t, err) - Equals(t, test.data, string(buf)) + test.OK(t, err) + test.Equals(t, ts.data, string(buf)) // try to read it out with an offset and a length start := 1 - end := len(test.data) - 2 + end := len(ts.data) - 2 length := end - start buf2 := make([]byte, length) n, err := b.Load(h, buf2, int64(start)) - OK(t, err) - Equals(t, length, n) - Equals(t, test.data[start:end], string(buf2)) + test.OK(t, err) + test.Equals(t, length, n) + test.Equals(t, ts.data[start:end], string(buf2)) } // test adding the first file again - test := testStrings[0] + ts := testStrings[0] // create blob - err := b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data)) - Assert(t, err != nil, "expected error, got %v", err) + err := b.Save(restic.Handle{Type: tpe, Name: ts.id}, []byte(ts.data)) + test.Assert(t, err != nil, "expected error, got %v", err) // remove and recreate - err = b.Remove(tpe, test.id) - OK(t, err) + err = b.Remove(tpe, ts.id) + test.OK(t, err) // test that the blob is gone - ok, err := b.Test(tpe, test.id) - OK(t, err) - Assert(t, ok == false, "removed blob still present") + ok, err := b.Test(tpe, ts.id) + test.OK(t, err) + test.Assert(t, ok == false, "removed blob still present") // create blob - err = b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data)) - OK(t, err) + err = b.Save(restic.Handle{Type: tpe, Name: ts.id}, []byte(ts.data)) + test.OK(t, err) // list items - IDs := backend.IDs{} + IDs := restic.IDs{} - for _, test := range testStrings { - id, err := backend.ParseID(test.id) - OK(t, err) + for _, ts := range testStrings { + id, err := restic.ParseID(ts.id) + test.OK(t, err) IDs = append(IDs, id) } - list := backend.IDs{} + list := restic.IDs{} for s := range b.List(tpe, nil) { - list = append(list, ParseID(s)) + list = append(list, restic.TestParseID(s)) } if len(IDs) != len(list) { @@ -581,19 +582,19 @@ func TestBackend(t testing.TB) { } // remove content if requested - if TestCleanupTempDirs { - for _, test := range testStrings { - id, err := backend.ParseID(test.id) - OK(t, err) + if test.TestCleanupTempDirs { + for _, ts := range testStrings { + id, err := restic.ParseID(ts.id) + test.OK(t, err) found, err := b.Test(tpe, id.String()) - OK(t, err) + test.OK(t, err) - OK(t, b.Remove(tpe, id.String())) + test.OK(t, b.Remove(tpe, id.String())) found, err = b.Test(tpe, id.String()) - OK(t, err) - Assert(t, !found, fmt.Sprintf("id %q not found after removal", id)) + test.OK(t, err) + test.Assert(t, !found, fmt.Sprintf("id %q not found after removal", id)) } } } @@ -604,7 +605,7 @@ func TestDelete(t testing.TB) { b := open(t) defer close(t) - be, ok := b.(backend.Deleter) + be, ok := b.(restic.Deleter) if !ok { return } @@ -622,7 +623,7 @@ func TestCleanup(t testing.TB) { return } - if !TestCleanupTempDirs { + if !test.TestCleanupTempDirs { t.Logf("not cleaning up backend") return } diff --git a/src/restic/backend/test/tests_test.go b/src/restic/backend/test/tests_test.go index 5dbba88a4..04e9936e0 100644 --- a/src/restic/backend/test/tests_test.go +++ b/src/restic/backend/test/tests_test.go @@ -1,19 +1,20 @@ package test_test import ( - "github.com/pkg/errors" + "restic" + + "restic/errors" - "restic/backend" "restic/backend/mem" "restic/backend/test" ) -var be backend.Backend +var be restic.Backend //go:generate go run ../test/generate_backend_tests.go func init() { - test.CreateFn = func() (backend.Backend, error) { + test.CreateFn = func() (restic.Backend, error) { if be != nil { return nil, errors.New("temporary memory backend dir already exists") } @@ -23,7 +24,7 @@ func init() { return be, nil } - test.OpenFn = func() (backend.Backend, error) { + test.OpenFn = func() (restic.Backend, error) { if be == nil { return nil, errors.New("repository not initialized") } diff --git a/src/restic/backend/testing.go b/src/restic/backend/testing.go deleted file mode 100644 index e0c3dd569..000000000 --- a/src/restic/backend/testing.go +++ /dev/null @@ -1,17 +0,0 @@ -package backend - -import ( - "crypto/rand" - "io" -) - -// RandomID retuns a randomly generated ID. This is mainly used for testing. -// When reading from rand fails, the function panics. -func RandomID() ID { - id := ID{} - _, err := io.ReadFull(rand.Reader, id[:]) - if err != nil { - panic(err) - } - return id -} diff --git a/src/restic/backend/utils.go b/src/restic/backend/utils.go index 9bd87b4fb..82a899515 100644 --- a/src/restic/backend/utils.go +++ b/src/restic/backend/utils.go @@ -2,15 +2,16 @@ package backend import ( "io" + "restic" - "github.com/pkg/errors" + "restic/errors" ) // LoadAll reads all data stored in the backend for the handle. The buffer buf // is resized to accomodate all data in the blob. Errors returned by be.Load() // are passed on, except io.ErrUnexpectedEOF is silenced and nil returned // instead, since it means this function is working properly. -func LoadAll(be Backend, h Handle, buf []byte) ([]byte, error) { +func LoadAll(be restic.Backend, h restic.Handle, buf []byte) ([]byte, error) { fi, err := be.Stat(h) if err != nil { return nil, errors.Wrap(err, "Stat") diff --git a/src/restic/backend/utils_test.go b/src/restic/backend/utils_test.go index ad39c3b1d..59eed7089 100644 --- a/src/restic/backend/utils_test.go +++ b/src/restic/backend/utils_test.go @@ -3,6 +3,7 @@ package backend_test import ( "bytes" "math/rand" + "restic" "testing" "restic/backend" @@ -19,11 +20,11 @@ func TestLoadAll(t *testing.T) { for i := 0; i < 20; i++ { data := Random(23+i, rand.Intn(MiB)+500*KiB) - id := backend.Hash(data) - err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) + id := restic.Hash(data) + err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) OK(t, err) - buf, err := backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, nil) + buf, err := backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, nil) OK(t, err) if len(buf) != len(data) { @@ -44,12 +45,12 @@ func TestLoadSmallBuffer(t *testing.T) { for i := 0; i < 20; i++ { data := Random(23+i, rand.Intn(MiB)+500*KiB) - id := backend.Hash(data) - err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) + id := restic.Hash(data) + err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) OK(t, err) buf := make([]byte, len(data)-23) - buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf) + buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf) OK(t, err) if len(buf) != len(data) { @@ -70,12 +71,12 @@ func TestLoadLargeBuffer(t *testing.T) { for i := 0; i < 20; i++ { data := Random(23+i, rand.Intn(MiB)+500*KiB) - id := backend.Hash(data) - err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) + id := restic.Hash(data) + err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) OK(t, err) buf := make([]byte, len(data)+100) - buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf) + buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf) OK(t, err) if len(buf) != len(data) { diff --git a/src/restic/backend/generic.go b/src/restic/backend_find.go similarity index 82% rename from src/restic/backend/generic.go rename to src/restic/backend_find.go index 7510ad0fe..193fd165b 100644 --- a/src/restic/backend/generic.go +++ b/src/restic/backend_find.go @@ -1,6 +1,6 @@ -package backend +package restic -import "github.com/pkg/errors" +import "restic/errors" // ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix // could be found. @@ -10,10 +10,10 @@ var ErrNoIDPrefixFound = errors.New("no ID found") // prefix are found. var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found") -// Find loads the list of all blobs of type t and searches for names which +// Find loads the list of all files of type t and searches for names which // start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. // If more than one is found, nil and ErrMultipleIDMatches is returned. -func Find(be Lister, t Type, prefix string) (string, error) { +func Find(be Lister, t FileType, prefix string) (string, error) { done := make(chan struct{}) defer close(done) @@ -41,7 +41,7 @@ const minPrefixLength = 8 // PrefixLength returns the number of bytes required so that all prefixes of // all names of type t are unique. -func PrefixLength(be Lister, t Type) (int, error) { +func PrefixLength(be Lister, t FileType) (int, error) { done := make(chan struct{}) defer close(done) @@ -52,8 +52,9 @@ func PrefixLength(be Lister, t Type) (int, error) { } // select prefixes of length l, test if the last one is the same as the current one + id := ID{} outer: - for l := minPrefixLength; l < IDSize; l++ { + for l := minPrefixLength; l < len(id); l++ { var last string for _, name := range list { @@ -66,5 +67,5 @@ outer: return l, nil } - return IDSize, nil + return len(id), nil } diff --git a/src/restic/backend_find_test.go b/src/restic/backend_find_test.go new file mode 100644 index 000000000..cc86cd810 --- /dev/null +++ b/src/restic/backend_find_test.go @@ -0,0 +1,70 @@ +package restic + +import ( + "testing" +) + +type mockBackend struct { + list func(FileType, <-chan struct{}) <-chan string +} + +func (m mockBackend) List(t FileType, done <-chan struct{}) <-chan string { + return m.list(t, done) +} + +var samples = IDs{ + TestParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"), + TestParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"), + TestParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"), + TestParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"), + TestParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"), + TestParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"), + TestParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"), + TestParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"), +} + +func TestPrefixLength(t *testing.T) { + list := samples + + m := mockBackend{} + m.list = func(t FileType, done <-chan struct{}) <-chan string { + ch := make(chan string) + go func() { + defer close(ch) + for _, id := range list { + select { + case ch <- id.String(): + case <-done: + return + } + } + }() + return ch + } + + l, err := PrefixLength(m, SnapshotFile) + if err != nil { + t.Error(err) + } + if l != 19 { + t.Errorf("wrong prefix length returned, want %d, got %d", 19, l) + } + + list = samples[:3] + l, err = PrefixLength(m, SnapshotFile) + if err != nil { + t.Error(err) + } + if l != 19 { + t.Errorf("wrong prefix length returned, want %d, got %d", 19, l) + } + + list = samples[3:] + l, err = PrefixLength(m, SnapshotFile) + if err != nil { + t.Error(err) + } + if l != 8 { + t.Errorf("wrong prefix length returned, want %d, got %d", 8, l) + } +} diff --git a/src/restic/blob.go b/src/restic/blob.go new file mode 100644 index 000000000..6074b59b1 --- /dev/null +++ b/src/restic/blob.go @@ -0,0 +1,113 @@ +package restic + +import ( + "fmt" + + "restic/errors" +) + +// Blob is one part of a file or a tree. +type Blob struct { + Type BlobType + Length uint + ID ID + Offset uint +} + +// PackedBlob is a blob stored within a file. +type PackedBlob struct { + Blob + PackID ID +} + +// BlobHandle identifies a blob of a given type. +type BlobHandle struct { + ID ID + Type BlobType +} + +func (h BlobHandle) String() string { + return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str()) +} + +// BlobType specifies what a blob stored in a pack is. +type BlobType uint8 + +// These are the blob types that can be stored in a pack. +const ( + InvalidBlob BlobType = iota + DataBlob + TreeBlob +) + +func (t BlobType) String() string { + switch t { + case DataBlob: + return "data" + case TreeBlob: + return "tree" + } + + return fmt.Sprintf("", t) +} + +// MarshalJSON encodes the BlobType into JSON. +func (t BlobType) MarshalJSON() ([]byte, error) { + switch t { + case DataBlob: + return []byte(`"data"`), nil + case TreeBlob: + return []byte(`"tree"`), nil + } + + return nil, errors.New("unknown blob type") +} + +// UnmarshalJSON decodes the BlobType from JSON. +func (t *BlobType) UnmarshalJSON(buf []byte) error { + switch string(buf) { + case `"data"`: + *t = DataBlob + case `"tree"`: + *t = TreeBlob + default: + return errors.New("unknown blob type") + } + + return nil +} + +// BlobHandles is an ordered list of BlobHandles that implements sort.Interface. +type BlobHandles []BlobHandle + +func (h BlobHandles) Len() int { + return len(h) +} + +func (h BlobHandles) Less(i, j int) bool { + for k, b := range h[i].ID { + if b == h[j].ID[k] { + continue + } + + if b < h[j].ID[k] { + return true + } + + return false + } + + return h[i].Type < h[j].Type +} + +func (h BlobHandles) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h BlobHandles) String() string { + elements := make([]string, 0, len(h)) + for _, e := range h { + elements = append(elements, e.String()) + } + return fmt.Sprintf("%v", elements) +} diff --git a/src/restic/pack/blob_set.go b/src/restic/blob_set.go similarity index 81% rename from src/restic/pack/blob_set.go rename to src/restic/blob_set.go index 686ea9315..07e88fed0 100644 --- a/src/restic/pack/blob_set.go +++ b/src/restic/blob_set.go @@ -1,12 +1,12 @@ -package pack +package restic import "sort" // BlobSet is a set of blobs. -type BlobSet map[Handle]struct{} +type BlobSet map[BlobHandle]struct{} // NewBlobSet returns a new BlobSet, populated with ids. -func NewBlobSet(handles ...Handle) BlobSet { +func NewBlobSet(handles ...BlobHandle) BlobSet { m := make(BlobSet) for _, h := range handles { m[h] = struct{}{} @@ -16,18 +16,18 @@ func NewBlobSet(handles ...Handle) BlobSet { } // Has returns true iff id is contained in the set. -func (s BlobSet) Has(h Handle) bool { +func (s BlobSet) Has(h BlobHandle) bool { _, ok := s[h] return ok } // Insert adds id to the set. -func (s BlobSet) Insert(h Handle) { +func (s BlobSet) Insert(h BlobHandle) { s[h] = struct{}{} } // Delete removes id from the set. -func (s BlobSet) Delete(h Handle) { +func (s BlobSet) Delete(h BlobHandle) { delete(s, h) } @@ -87,9 +87,9 @@ func (s BlobSet) Sub(other BlobSet) (result BlobSet) { return result } -// List returns a slice of all Handles in the set. -func (s BlobSet) List() Handles { - list := make(Handles, 0, len(s)) +// List returns a sorted slice of all BlobHandle in the set. +func (s BlobSet) List() BlobHandles { + list := make(BlobHandles, 0, len(s)) for h := range s { list = append(list, h) } diff --git a/src/restic/blob_test.go b/src/restic/blob_test.go new file mode 100644 index 000000000..951872250 --- /dev/null +++ b/src/restic/blob_test.go @@ -0,0 +1,41 @@ +package restic + +import ( + "encoding/json" + "testing" +) + +var blobTypeJSON = []struct { + t BlobType + res string +}{ + {DataBlob, `"data"`}, + {TreeBlob, `"tree"`}, +} + +func TestBlobTypeJSON(t *testing.T) { + for _, test := range blobTypeJSON { + // test serialize + buf, err := json.Marshal(test.t) + if err != nil { + t.Error(err) + continue + } + if test.res != string(buf) { + t.Errorf("want %q, got %q", test.res, string(buf)) + continue + } + + // test unserialize + var v BlobType + err = json.Unmarshal([]byte(test.res), &v) + if err != nil { + t.Error(err) + continue + } + if test.t != v { + t.Errorf("want %v, got %v", test.t, v) + continue + } + } +} diff --git a/src/restic/cache.go b/src/restic/cache.go deleted file mode 100644 index 1af4e9605..000000000 --- a/src/restic/cache.go +++ /dev/null @@ -1,290 +0,0 @@ -package restic - -import ( - "io" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/pkg/errors" - - "restic/backend" - "restic/debug" - "restic/fs" - "restic/repository" -) - -// Cache is used to locally cache items from a repository. -type Cache struct { - base string -} - -// NewCache returns a new cache at cacheDir. If it is the empty string, the -// default cache location is chosen. -func NewCache(repo *repository.Repository, cacheDir string) (*Cache, error) { - var err error - - if cacheDir == "" { - cacheDir, err = getCacheDir() - if err != nil { - return nil, err - } - } - - basedir := filepath.Join(cacheDir, repo.Config.ID) - debug.Log("Cache.New", "opened cache at %v", basedir) - - return &Cache{base: basedir}, nil -} - -// Has checks if the local cache has the id. -func (c *Cache) Has(t backend.Type, subtype string, id backend.ID) (bool, error) { - filename, err := c.filename(t, subtype, id) - if err != nil { - return false, err - } - fd, err := fs.Open(filename) - defer fd.Close() - - if err != nil { - if os.IsNotExist(errors.Cause(err)) { - debug.Log("Cache.Has", "test for file %v: not cached", filename) - return false, nil - } - - debug.Log("Cache.Has", "test for file %v: error %v", filename, err) - return false, errors.Wrap(err, "Open") - } - - debug.Log("Cache.Has", "test for file %v: is cached", filename) - return true, nil -} - -// Store returns an io.WriteCloser that is used to save new information to the -// cache. The returned io.WriteCloser must be closed by the caller after all -// data has been written. -func (c *Cache) Store(t backend.Type, subtype string, id backend.ID) (io.WriteCloser, error) { - filename, err := c.filename(t, subtype, id) - if err != nil { - return nil, err - } - - dirname := filepath.Dir(filename) - err = fs.MkdirAll(dirname, 0700) - if err != nil { - return nil, errors.Wrap(err, "MkdirAll") - } - - file, err := fs.Create(filename) - if err != nil { - debug.Log("Cache.Store", "error creating file %v: %v", filename, err) - return nil, errors.Wrap(err, "Create") - } - - debug.Log("Cache.Store", "created file %v", filename) - return file, nil -} - -// Load returns information from the cache. The returned io.ReadCloser must be -// closed by the caller. -func (c *Cache) Load(t backend.Type, subtype string, id backend.ID) (io.ReadCloser, error) { - filename, err := c.filename(t, subtype, id) - if err != nil { - return nil, err - } - - return fs.Open(filename) -} - -func (c *Cache) purge(t backend.Type, subtype string, id backend.ID) error { - filename, err := c.filename(t, subtype, id) - if err != nil { - return err - } - - err = fs.Remove(filename) - debug.Log("Cache.purge", "Remove file %v: %v", filename, err) - - if err != nil && os.IsNotExist(errors.Cause(err)) { - return nil - } - - return errors.Wrap(err, "Remove") -} - -// Clear removes information from the cache that isn't present in the repository any more. -func (c *Cache) Clear(repo *repository.Repository) error { - list, err := c.list(backend.Snapshot) - if err != nil { - return err - } - - for _, entry := range list { - debug.Log("Cache.Clear", "found entry %v", entry) - - if ok, err := repo.Backend().Test(backend.Snapshot, entry.ID.String()); !ok || err != nil { - debug.Log("Cache.Clear", "snapshot %v doesn't exist any more, removing %v", entry.ID, entry) - - err = c.purge(backend.Snapshot, entry.Subtype, entry.ID) - if err != nil { - return err - } - } - } - - return nil -} - -type cacheEntry struct { - ID backend.ID - Subtype string -} - -func (c cacheEntry) String() string { - if c.Subtype != "" { - return c.ID.Str() + "." + c.Subtype - } - return c.ID.Str() -} - -func (c *Cache) list(t backend.Type) ([]cacheEntry, error) { - var dir string - - switch t { - case backend.Snapshot: - dir = filepath.Join(c.base, "snapshots") - default: - return nil, errors.Errorf("cache not supported for type %v", t) - } - - fd, err := fs.Open(dir) - if err != nil { - if os.IsNotExist(errors.Cause(err)) { - return []cacheEntry{}, nil - } - return nil, errors.Wrap(err, "Open") - } - defer fd.Close() - - fis, err := fd.Readdir(-1) - if err != nil { - return nil, errors.Wrap(err, "Readdir") - } - - entries := make([]cacheEntry, 0, len(fis)) - - for _, fi := range fis { - parts := strings.SplitN(fi.Name(), ".", 2) - - id, err := backend.ParseID(parts[0]) - // ignore invalid cache entries for now - if err != nil { - debug.Log("Cache.List", "unable to parse name %v as id: %v", parts[0], err) - continue - } - - e := cacheEntry{ID: id} - - if len(parts) == 2 { - e.Subtype = parts[1] - } - - entries = append(entries, e) - } - - return entries, nil -} - -func (c *Cache) filename(t backend.Type, subtype string, id backend.ID) (string, error) { - filename := id.String() - if subtype != "" { - filename += "." + subtype - } - - switch t { - case backend.Snapshot: - return filepath.Join(c.base, "snapshots", filename), nil - } - - return "", errors.Errorf("cache not supported for type %v", t) -} - -func getCacheDir() (string, error) { - if dir := os.Getenv("RESTIC_CACHE"); dir != "" { - return dir, nil - } - if runtime.GOOS == "windows" { - return getWindowsCacheDir() - } - - return getXDGCacheDir() -} - -// getWindowsCacheDir will return %APPDATA%\restic or create -// a folder in the temporary folder called "restic". -func getWindowsCacheDir() (string, error) { - cachedir := os.Getenv("APPDATA") - if cachedir == "" { - cachedir = os.TempDir() - } - cachedir = filepath.Join(cachedir, "restic") - fi, err := fs.Stat(cachedir) - - if os.IsNotExist(errors.Cause(err)) { - err = fs.MkdirAll(cachedir, 0700) - if err != nil { - return "", errors.Wrap(err, "MkdirAll") - } - - return cachedir, nil - } - - if err != nil { - return "", errors.Wrap(err, "Stat") - } - - if !fi.IsDir() { - return "", errors.Errorf("cache dir %v is not a directory", cachedir) - } - return cachedir, nil -} - -// getXDGCacheDir returns the cache directory according to XDG basedir spec, see -// http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html -func getXDGCacheDir() (string, error) { - xdgcache := os.Getenv("XDG_CACHE_HOME") - home := os.Getenv("HOME") - - if xdgcache == "" && home == "" { - return "", errors.New("unable to locate cache directory (XDG_CACHE_HOME and HOME unset)") - } - - cachedir := "" - if xdgcache != "" { - cachedir = filepath.Join(xdgcache, "restic") - } else if home != "" { - cachedir = filepath.Join(home, ".cache", "restic") - } - - fi, err := fs.Stat(cachedir) - if os.IsNotExist(errors.Cause(err)) { - err = fs.MkdirAll(cachedir, 0700) - if err != nil { - return "", errors.Wrap(err, "MkdirAll") - } - - fi, err = fs.Stat(cachedir) - debug.Log("getCacheDir", "create cache dir %v", cachedir) - } - - if err != nil { - return "", errors.Wrap(err, "Stat") - } - - if !fi.IsDir() { - return "", errors.Errorf("cache dir %v is not a directory", cachedir) - } - - return cachedir, nil -} diff --git a/src/restic/cache_test.go b/src/restic/cache_test.go deleted file mode 100644 index c72b26e2a..000000000 --- a/src/restic/cache_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package restic_test - -import ( - "testing" - - "restic" - . "restic/test" -) - -func TestCache(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) - - _, err := restic.NewCache(repo, "") - OK(t, err) - - arch := restic.NewArchiver(repo) - - // archive some files, this should automatically cache all blobs from the snapshot - _, _, err = arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil) - if err != nil { - t.Fatal(err) - } - - // TODO: test caching index -} diff --git a/src/restic/checker/checker.go b/src/restic/checker/checker.go index 1755bd3ac..ebb416938 100644 --- a/src/restic/checker/checker.go +++ b/src/restic/checker/checker.go @@ -5,7 +5,7 @@ import ( "fmt" "sync" - "github.com/pkg/errors" + "restic/errors" "restic" "restic/backend" @@ -21,31 +21,31 @@ import ( // A Checker only tests for internal errors within the data structures of the // repository (e.g. missing blobs), and needs a valid Repository to work on. type Checker struct { - packs backend.IDSet - blobs backend.IDSet + packs restic.IDSet + blobs restic.IDSet blobRefs struct { sync.Mutex - M map[backend.ID]uint + M map[restic.ID]uint } - indexes map[backend.ID]*repository.Index - orphanedPacks backend.IDs + indexes map[restic.ID]*repository.Index + orphanedPacks restic.IDs masterIndex *repository.MasterIndex - repo *repository.Repository + repo restic.Repository } // New returns a new checker which runs on repo. -func New(repo *repository.Repository) *Checker { +func New(repo restic.Repository) *Checker { c := &Checker{ - packs: backend.NewIDSet(), - blobs: backend.NewIDSet(), + packs: restic.NewIDSet(), + blobs: restic.NewIDSet(), masterIndex: repository.NewMasterIndex(), - indexes: make(map[backend.ID]*repository.Index), + indexes: make(map[restic.ID]*repository.Index), repo: repo, } - c.blobRefs.M = make(map[backend.ID]uint) + c.blobRefs.M = make(map[restic.ID]uint) return c } @@ -54,8 +54,8 @@ const defaultParallelism = 40 // ErrDuplicatePacks is returned when a pack is found in more than one index. type ErrDuplicatePacks struct { - PackID backend.ID - Indexes backend.IDSet + PackID restic.ID + Indexes restic.IDSet } func (e ErrDuplicatePacks) Error() string { @@ -65,7 +65,7 @@ func (e ErrDuplicatePacks) Error() string { // ErrOldIndexFormat is returned when an index with the old format is // found. type ErrOldIndexFormat struct { - backend.ID + restic.ID } func (err ErrOldIndexFormat) Error() string { @@ -82,7 +82,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) { indexCh := make(chan indexRes) - worker := func(id backend.ID, done <-chan struct{}) error { + worker := func(id restic.ID, done <-chan struct{}) error { debug.Log("LoadIndex", "worker got index %v", id) idx, err := repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeIndex) if errors.Cause(err) == repository.ErrOldIndexFormat { @@ -108,7 +108,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) { go func() { defer close(indexCh) debug.Log("LoadIndex", "start loading indexes in parallel") - perr = repository.FilesInParallel(c.repo.Backend(), backend.Index, defaultParallelism, + perr = repository.FilesInParallel(c.repo.Backend(), restic.IndexFile, defaultParallelism, repository.ParallelWorkFuncParseID(worker)) debug.Log("LoadIndex", "loading indexes finished, error: %v", perr) }() @@ -121,11 +121,11 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) { return hints, errs } - packToIndex := make(map[backend.ID]backend.IDSet) + packToIndex := make(map[restic.ID]restic.IDSet) for res := range indexCh { debug.Log("LoadIndex", "process index %v", res.ID) - idxID, err := backend.ParseID(res.ID) + idxID, err := restic.ParseID(res.ID) if err != nil { errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID)) continue @@ -143,7 +143,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) { cnt++ if _, ok := packToIndex[blob.PackID]; !ok { - packToIndex[blob.PackID] = backend.NewIDSet() + packToIndex[blob.PackID] = restic.NewIDSet() } packToIndex[blob.PackID].Insert(idxID) } @@ -171,7 +171,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) { // PackError describes an error with a specific pack. type PackError struct { - ID backend.ID + ID restic.ID Orphaned bool Err error } @@ -180,14 +180,14 @@ func (e PackError) Error() string { return "pack " + e.ID.String() + ": " + e.Err.Error() } -func packIDTester(repo *repository.Repository, inChan <-chan backend.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) { +func packIDTester(repo restic.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) { debug.Log("Checker.testPackID", "worker start") defer debug.Log("Checker.testPackID", "worker done") defer wg.Done() for id := range inChan { - ok, err := repo.Backend().Test(backend.Data, id.String()) + ok, err := repo.Backend().Test(restic.DataFile, id.String()) if err != nil { err = PackError{ID: id, Err: err} } else { @@ -218,11 +218,11 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) { defer close(errChan) debug.Log("Checker.Packs", "checking for %d packs", len(c.packs)) - seenPacks := backend.NewIDSet() + seenPacks := restic.NewIDSet() var workerWG sync.WaitGroup - IDChan := make(chan backend.ID) + IDChan := make(chan restic.ID) for i := 0; i < defaultParallelism; i++ { workerWG.Add(1) go packIDTester(c.repo, IDChan, errChan, &workerWG, done) @@ -238,7 +238,7 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) { workerWG.Wait() debug.Log("Checker.Packs", "workers terminated") - for id := range c.repo.List(backend.Data, done) { + for id := range c.repo.List(restic.DataFile, done) { debug.Log("Checker.Packs", "check data blob %v", id.Str()) if !seenPacks.Has(id) { c.orphanedPacks = append(c.orphanedPacks, id) @@ -253,8 +253,8 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) { // Error is an error that occurred while checking a repository. type Error struct { - TreeID backend.ID - BlobID backend.ID + TreeID restic.ID + BlobID restic.ID Err error } @@ -273,25 +273,25 @@ func (e Error) Error() string { return e.Err.Error() } -func loadTreeFromSnapshot(repo *repository.Repository, id backend.ID) (backend.ID, error) { +func loadTreeFromSnapshot(repo restic.Repository, id restic.ID) (restic.ID, error) { sn, err := restic.LoadSnapshot(repo, id) if err != nil { debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err) - return backend.ID{}, err + return restic.ID{}, err } if sn.Tree == nil { debug.Log("Checker.loadTreeFromSnapshot", "snapshot %v has no tree", id.Str()) - return backend.ID{}, errors.Errorf("snapshot %v has no tree", id) + return restic.ID{}, errors.Errorf("snapshot %v has no tree", id) } return *sn.Tree, nil } // loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs. -func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) { +func loadSnapshotTreeIDs(repo restic.Repository) (restic.IDs, []error) { var trees struct { - IDs backend.IDs + IDs restic.IDs sync.Mutex } @@ -301,7 +301,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) { } snapshotWorker := func(strID string, done <-chan struct{}) error { - id, err := backend.ParseID(strID) + id, err := restic.ParseID(strID) if err != nil { return err } @@ -324,7 +324,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) { return nil } - err := repository.FilesInParallel(repo.Backend(), backend.Snapshot, defaultParallelism, snapshotWorker) + err := repository.FilesInParallel(repo.Backend(), restic.SnapshotFile, defaultParallelism, snapshotWorker) if err != nil { errs.errs = append(errs.errs, err) } @@ -334,7 +334,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) { // TreeError collects several errors that occurred while processing a tree. type TreeError struct { - ID backend.ID + ID restic.ID Errors []error } @@ -343,14 +343,14 @@ func (e TreeError) Error() string { } type treeJob struct { - backend.ID + restic.ID error *restic.Tree } // loadTreeWorker loads trees from repo and sends them to out. -func loadTreeWorker(repo *repository.Repository, - in <-chan backend.ID, out chan<- treeJob, +func loadTreeWorker(repo restic.Repository, + in <-chan restic.ID, out chan<- treeJob, done <-chan struct{}, wg *sync.WaitGroup) { defer func() { @@ -376,7 +376,7 @@ func loadTreeWorker(repo *repository.Repository, } debug.Log("checker.loadTreeWorker", "load tree %v", treeID.Str()) - tree, err := restic.LoadTree(repo, treeID) + tree, err := repo.LoadTree(treeID) debug.Log("checker.loadTreeWorker", "load tree %v (%v) returned err: %v", tree, treeID.Str(), err) job = treeJob{ID: treeID, error: err, Tree: tree} outCh = out @@ -454,7 +454,7 @@ func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-ch } } -func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) { +func filterTrees(backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) { defer func() { debug.Log("checker.filterTrees", "closing output channels") close(loaderChan) @@ -466,7 +466,7 @@ func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan tr outCh = out loadCh = loaderChan job treeJob - nextTreeID backend.ID + nextTreeID restic.ID outstandingLoadTreeJobs = 0 ) @@ -559,7 +559,7 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) { } } - treeIDChan := make(chan backend.ID) + treeIDChan := make(chan restic.ID) treeJobChan1 := make(chan treeJob) treeJobChan2 := make(chan treeJob) @@ -575,10 +575,10 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) { wg.Wait() } -func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) { +func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { debug.Log("Checker.checkTree", "checking tree %v", id.Str()) - var blobs []backend.ID + var blobs []restic.ID for _, node := range tree.Nodes { switch node.Type { @@ -634,7 +634,7 @@ func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) { } // UnusedBlobs returns all blobs that have never been referenced. -func (c *Checker) UnusedBlobs() (blobs backend.IDs) { +func (c *Checker) UnusedBlobs() (blobs restic.IDs) { c.blobRefs.Lock() defer c.blobRefs.Unlock() @@ -650,7 +650,7 @@ func (c *Checker) UnusedBlobs() (blobs backend.IDs) { } // OrphanedPacks returns a slice of unused packs (only available after Packs() was run). -func (c *Checker) OrphanedPacks() backend.IDs { +func (c *Checker) OrphanedPacks() restic.IDs { return c.orphanedPacks } @@ -660,15 +660,15 @@ func (c *Checker) CountPacks() uint64 { } // checkPack reads a pack and checks the integrity of all blobs. -func checkPack(r *repository.Repository, id backend.ID) error { +func checkPack(r restic.Repository, id restic.ID) error { debug.Log("Checker.checkPack", "checking pack %v", id.Str()) - h := backend.Handle{Type: backend.Data, Name: id.String()} + h := restic.Handle{Type: restic.DataFile, Name: id.String()} buf, err := backend.LoadAll(r.Backend(), h, nil) if err != nil { return err } - hash := backend.Hash(buf) + hash := restic.Hash(buf) if !hash.Equal(id) { debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) @@ -684,14 +684,15 @@ func checkPack(r *repository.Repository, id backend.ID) error { debug.Log("Checker.checkPack", " check blob %d: %v", i, blob.ID.Str()) plainBuf := make([]byte, blob.Length) - plainBuf, err = crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length]) + n, err := crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length]) if err != nil { debug.Log("Checker.checkPack", " error decrypting blob %v: %v", blob.ID.Str(), err) errs = append(errs, errors.Errorf("blob %v: %v", i, err)) continue } + plainBuf = plainBuf[:n] - hash := backend.Hash(plainBuf) + hash := restic.Hash(plainBuf) if !hash.Equal(blob.ID) { debug.Log("Checker.checkPack", " Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()) errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())) @@ -713,10 +714,10 @@ func (c *Checker) ReadData(p *restic.Progress, errChan chan<- error, done <-chan p.Start() defer p.Done() - worker := func(wg *sync.WaitGroup, in <-chan backend.ID) { + worker := func(wg *sync.WaitGroup, in <-chan restic.ID) { defer wg.Done() for { - var id backend.ID + var id restic.ID var ok bool select { @@ -742,7 +743,7 @@ func (c *Checker) ReadData(p *restic.Progress, errChan chan<- error, done <-chan } } - ch := c.repo.List(backend.Data, done) + ch := c.repo.List(restic.DataFile, done) var wg sync.WaitGroup for i := 0; i < defaultParallelism; i++ { diff --git a/src/restic/checker/checker_test.go b/src/restic/checker/checker_test.go index d06b2139b..b5cf3732c 100644 --- a/src/restic/checker/checker_test.go +++ b/src/restic/checker/checker_test.go @@ -1,23 +1,22 @@ package checker_test import ( - "fmt" "math/rand" "path/filepath" "sort" "testing" "restic" - "restic/backend" + "restic/archiver" "restic/backend/mem" "restic/checker" "restic/repository" - . "restic/test" + "restic/test" ) var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz") -func list(repo *repository.Repository, t backend.Type) (IDs []string) { +func list(repo restic.Repository, t restic.FileType) (IDs []string) { done := make(chan struct{}) defer close(done) @@ -60,164 +59,167 @@ func checkData(chkr *checker.Checker) []error { } func TestCheckRepo(t *testing.T) { - WithTestEnvironment(t, checkerTestData, func(repodir string) { - repo := OpenLocalRepo(t, repodir) + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() - chkr := checker.New(repo) - hints, errs := chkr.LoadIndex() - if len(errs) > 0 { - t.Fatalf("expected no errors, got %v: %v", len(errs), errs) - } + repo := repository.TestOpenLocal(t, repodir) - if len(hints) > 0 { - t.Errorf("expected no hints, got %v: %v", len(hints), hints) - } + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex() + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } - OKs(t, checkPacks(chkr)) - OKs(t, checkStruct(chkr)) - }) + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } + + test.OKs(t, checkPacks(chkr)) + test.OKs(t, checkStruct(chkr)) } func TestMissingPack(t *testing.T) { - WithTestEnvironment(t, checkerTestData, func(repodir string) { - repo := OpenLocalRepo(t, repodir) + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() - packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6" - OK(t, repo.Backend().Remove(backend.Data, packID)) + repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo) - hints, errs := chkr.LoadIndex() - if len(errs) > 0 { - t.Fatalf("expected no errors, got %v: %v", len(errs), errs) - } + packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6" + test.OK(t, repo.Backend().Remove(restic.DataFile, packID)) - if len(hints) > 0 { - t.Errorf("expected no hints, got %v: %v", len(hints), hints) - } + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex() + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } - errs = checkPacks(chkr) + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } - Assert(t, len(errs) == 1, - "expected exactly one error, got %v", len(errs)) + errs = checkPacks(chkr) - if err, ok := errs[0].(checker.PackError); ok { - Equals(t, packID, err.ID.String()) - } else { - t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) - } - }) + test.Assert(t, len(errs) == 1, + "expected exactly one error, got %v", len(errs)) + + if err, ok := errs[0].(checker.PackError); ok { + test.Equals(t, packID, err.ID.String()) + } else { + t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) + } } func TestUnreferencedPack(t *testing.T) { - WithTestEnvironment(t, checkerTestData, func(repodir string) { - repo := OpenLocalRepo(t, repodir) + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() - // index 3f1a only references pack 60e0 - indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44" - packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" - OK(t, repo.Backend().Remove(backend.Index, indexID)) + repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo) - hints, errs := chkr.LoadIndex() - if len(errs) > 0 { - t.Fatalf("expected no errors, got %v: %v", len(errs), errs) - } + // index 3f1a only references pack 60e0 + indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44" + packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" + test.OK(t, repo.Backend().Remove(restic.IndexFile, indexID)) - if len(hints) > 0 { - t.Errorf("expected no hints, got %v: %v", len(hints), hints) - } + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex() + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } - errs = checkPacks(chkr) + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } - Assert(t, len(errs) == 1, - "expected exactly one error, got %v", len(errs)) + errs = checkPacks(chkr) - if err, ok := errs[0].(checker.PackError); ok { - Equals(t, packID, err.ID.String()) - } else { - t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) - } - }) + test.Assert(t, len(errs) == 1, + "expected exactly one error, got %v", len(errs)) + + if err, ok := errs[0].(checker.PackError); ok { + test.Equals(t, packID, err.ID.String()) + } else { + t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) + } } func TestUnreferencedBlobs(t *testing.T) { - WithTestEnvironment(t, checkerTestData, func(repodir string) { - repo := OpenLocalRepo(t, repodir) + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() - snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02" - OK(t, repo.Backend().Remove(backend.Snapshot, snID)) + repo := repository.TestOpenLocal(t, repodir) - unusedBlobsBySnapshot := backend.IDs{ - ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"), - ParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"), - ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"), - ParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"), - ParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"), - ParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"), - } + snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02" + test.OK(t, repo.Backend().Remove(restic.SnapshotFile, snID)) - sort.Sort(unusedBlobsBySnapshot) + unusedBlobsBySnapshot := restic.IDs{ + restic.TestParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"), + restic.TestParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"), + restic.TestParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"), + restic.TestParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"), + restic.TestParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"), + restic.TestParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"), + } - chkr := checker.New(repo) - hints, errs := chkr.LoadIndex() - if len(errs) > 0 { - t.Fatalf("expected no errors, got %v: %v", len(errs), errs) - } + sort.Sort(unusedBlobsBySnapshot) - if len(hints) > 0 { - t.Errorf("expected no hints, got %v: %v", len(hints), hints) - } + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex() + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } - OKs(t, checkPacks(chkr)) - OKs(t, checkStruct(chkr)) + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } - blobs := chkr.UnusedBlobs() - sort.Sort(blobs) + test.OKs(t, checkPacks(chkr)) + test.OKs(t, checkStruct(chkr)) - Equals(t, unusedBlobsBySnapshot, blobs) - }) + blobs := chkr.UnusedBlobs() + sort.Sort(blobs) + + test.Equals(t, unusedBlobsBySnapshot, blobs) } var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz") func TestDuplicatePacksInIndex(t *testing.T) { - WithTestEnvironment(t, checkerDuplicateIndexTestData, func(repodir string) { - repo := OpenLocalRepo(t, repodir) + repodir, cleanup := test.Env(t, checkerDuplicateIndexTestData) + defer cleanup() - chkr := checker.New(repo) - hints, errs := chkr.LoadIndex() - if len(hints) == 0 { - t.Fatalf("did not get expected checker hints for duplicate packs in indexes") + repo := repository.TestOpenLocal(t, repodir) + + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex() + if len(hints) == 0 { + t.Fatalf("did not get expected checker hints for duplicate packs in indexes") + } + + found := false + for _, hint := range hints { + if _, ok := hint.(checker.ErrDuplicatePacks); ok { + found = true + } else { + t.Errorf("got unexpected hint: %v", hint) } + } - found := false - for _, hint := range hints { - if _, ok := hint.(checker.ErrDuplicatePacks); ok { - found = true - } else { - t.Errorf("got unexpected hint: %v", hint) - } - } + if !found { + t.Fatalf("did not find hint ErrDuplicatePacks") + } - if !found { - t.Fatalf("did not find hint ErrDuplicatePacks") - } - - if len(errs) > 0 { - t.Errorf("expected no errors, got %v: %v", len(errs), errs) - } - - }) + if len(errs) > 0 { + t.Errorf("expected no errors, got %v: %v", len(errs), errs) + } } // errorBackend randomly modifies data after reading. type errorBackend struct { - backend.Backend + restic.Backend ProduceErrors bool } -func (b errorBackend) Load(h backend.Handle, p []byte, off int64) (int, error) { - fmt.Printf("load %v\n", h) +func (b errorBackend) Load(h restic.Handle, p []byte, off int64) (int, error) { n, err := b.Backend.Load(h, p, off) if b.ProduceErrors { @@ -242,16 +244,16 @@ func TestCheckerModifiedData(t *testing.T) { repository.TestUseLowSecurityKDFParameters(t) repo := repository.New(be) - OK(t, repo.Init(TestPassword)) + test.OK(t, repo.Init(test.TestPassword)) - arch := restic.NewArchiver(repo) + arch := archiver.New(repo) _, id, err := arch.Snapshot(nil, []string{"."}, nil) - OK(t, err) + test.OK(t, err) t.Logf("archived as %v", id.Str()) beError := &errorBackend{Backend: be} checkRepo := repository.New(beError) - OK(t, checkRepo.SearchKey(TestPassword, 5)) + test.OK(t, checkRepo.SearchKey(test.TestPassword, 5)) chkr := checker.New(checkRepo) diff --git a/src/restic/checker/testing.go b/src/restic/checker/testing.go index 3bf9aa2ec..7b642dea1 100644 --- a/src/restic/checker/testing.go +++ b/src/restic/checker/testing.go @@ -1,12 +1,12 @@ package checker import ( - "restic/repository" + "restic" "testing" ) // TestCheckRepo runs the checker on repo. -func TestCheckRepo(t testing.TB, repo *repository.Repository) { +func TestCheckRepo(t testing.TB, repo restic.Repository) { chkr := New(repo) hints, errs := chkr.LoadIndex() diff --git a/src/restic/repository/config.go b/src/restic/config.go similarity index 66% rename from src/restic/repository/config.go rename to src/restic/config.go index c9e4eac85..0afb5426b 100644 --- a/src/restic/repository/config.go +++ b/src/restic/config.go @@ -1,15 +1,10 @@ -package repository +package restic import ( - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "io" "testing" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/debug" "github.com/restic/chunker" @@ -22,21 +17,18 @@ type Config struct { ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"` } -// repositoryIDSize is the length of the ID chosen at random for a new repository. -const repositoryIDSize = sha256.Size - // RepoVersion is the version that is written to the config when a repository // is newly created with Init(). const RepoVersion = 1 // JSONUnpackedSaver saves unpacked JSON. type JSONUnpackedSaver interface { - SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error) + SaveJSONUnpacked(FileType, interface{}) (ID, error) } // JSONUnpackedLoader loads unpacked JSON. type JSONUnpackedLoader interface { - LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error + LoadJSONUnpacked(FileType, ID, interface{}) error } // CreateConfig creates a config file with a randomly selected polynomial and @@ -52,13 +44,7 @@ func CreateConfig() (Config, error) { return Config{}, errors.Wrap(err, "chunker.RandomPolynomial") } - newID := make([]byte, repositoryIDSize) - _, err = io.ReadFull(rand.Reader, newID) - if err != nil { - return Config{}, errors.Wrap(err, "io.ReadFull") - } - - cfg.ID = hex.EncodeToString(newID) + cfg.ID = NewRandomID().String() cfg.Version = RepoVersion debug.Log("Repo.CreateConfig", "New config: %#v", cfg) @@ -69,13 +55,7 @@ func CreateConfig() (Config, error) { func TestCreateConfig(t testing.TB, pol chunker.Pol) (cfg Config) { cfg.ChunkerPolynomial = pol - newID := make([]byte, repositoryIDSize) - _, err := io.ReadFull(rand.Reader, newID) - if err != nil { - t.Fatalf("unable to create random ID: %v", err) - } - - cfg.ID = hex.EncodeToString(newID) + cfg.ID = NewRandomID().String() cfg.Version = RepoVersion return cfg @@ -87,7 +67,7 @@ func LoadConfig(r JSONUnpackedLoader) (Config, error) { cfg Config ) - err := r.LoadJSONUnpacked(backend.Config, backend.ID{}, &cfg) + err := r.LoadJSONUnpacked(ConfigFile, ID{}, &cfg) if err != nil { return Config{}, err } diff --git a/src/restic/config_test.go b/src/restic/config_test.go new file mode 100644 index 000000000..c5d2166e3 --- /dev/null +++ b/src/restic/config_test.go @@ -0,0 +1,54 @@ +package restic_test + +import ( + "restic" + "testing" + + . "restic/test" +) + +type saver func(restic.FileType, interface{}) (restic.ID, error) + +func (s saver) SaveJSONUnpacked(t restic.FileType, arg interface{}) (restic.ID, error) { + return s(t, arg) +} + +type loader func(restic.FileType, restic.ID, interface{}) error + +func (l loader) LoadJSONUnpacked(t restic.FileType, id restic.ID, arg interface{}) error { + return l(t, id, arg) +} + +func TestConfig(t *testing.T) { + resultConfig := restic.Config{} + save := func(tpe restic.FileType, arg interface{}) (restic.ID, error) { + Assert(t, tpe == restic.ConfigFile, + "wrong backend type: got %v, wanted %v", + tpe, restic.ConfigFile) + + cfg := arg.(restic.Config) + resultConfig = cfg + return restic.ID{}, nil + } + + cfg1, err := restic.CreateConfig() + OK(t, err) + + _, err = saver(save).SaveJSONUnpacked(restic.ConfigFile, cfg1) + + load := func(tpe restic.FileType, id restic.ID, arg interface{}) error { + Assert(t, tpe == restic.ConfigFile, + "wrong backend type: got %v, wanted %v", + tpe, restic.ConfigFile) + + cfg := arg.(*restic.Config) + *cfg = resultConfig + return nil + } + + cfg2, err := restic.LoadConfig(loader(load)) + OK(t, err) + + Assert(t, cfg1 == cfg2, + "configs aren't equal: %v != %v", cfg1, cfg2) +} diff --git a/src/restic/crypto/crypto.go b/src/restic/crypto/crypto.go index 33b9dfda8..57fdd6230 100644 --- a/src/restic/crypto/crypto.go +++ b/src/restic/crypto/crypto.go @@ -7,7 +7,7 @@ import ( "encoding/json" "fmt" - "github.com/pkg/errors" + "restic/errors" "golang.org/x/crypto/poly1305" ) @@ -274,9 +274,9 @@ func Encrypt(ks *Key, ciphertext []byte, plaintext []byte) ([]byte, error) { // Decrypt verifies and decrypts the ciphertext. Ciphertext must be in the form // IV || Ciphertext || MAC. plaintext and ciphertext may point to (exactly) the // same slice. -func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) ([]byte, error) { +func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) (int, error) { if !ks.Valid() { - return nil, errors.New("invalid key") + return 0, errors.New("invalid key") } // check for plausible length @@ -284,21 +284,26 @@ func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) ([]byte, error panic("trying to decrypt invalid data: ciphertext too small") } + // check buffer length for plaintext + plaintextLength := len(ciphertextWithMac) - ivSize - macSize + if len(plaintext) < plaintextLength { + return 0, errors.Errorf("plaintext buffer too small, %d < %d", len(plaintext), plaintextLength) + } + // extract mac l := len(ciphertextWithMac) - macSize ciphertextWithIV, mac := ciphertextWithMac[:l], ciphertextWithMac[l:] // verify mac if !poly1305Verify(ciphertextWithIV[ivSize:], ciphertextWithIV[:ivSize], &ks.MAC, mac) { - return nil, ErrUnauthenticated + return 0, ErrUnauthenticated } // extract iv iv, ciphertext := ciphertextWithIV[:ivSize], ciphertextWithIV[ivSize:] - if cap(plaintext) < len(ciphertext) { - // extend plaintext - plaintext = append(plaintext, make([]byte, len(ciphertext)-cap(plaintext))...) + if len(ciphertext) != plaintextLength { + return 0, errors.Errorf("plaintext and ciphertext lengths do not match: %d != %d", len(ciphertext), plaintextLength) } // decrypt data @@ -312,7 +317,7 @@ func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) ([]byte, error plaintext = plaintext[:len(ciphertext)] e.XORKeyStream(plaintext, ciphertext) - return plaintext, nil + return plaintextLength, nil } // Valid tests if the key is valid. diff --git a/src/restic/crypto/crypto_int_test.go b/src/restic/crypto/crypto_int_test.go index 5fed6b54c..1dbc32623 100644 --- a/src/restic/crypto/crypto_int_test.go +++ b/src/restic/crypto/crypto_int_test.go @@ -100,15 +100,17 @@ func TestCrypto(t *testing.T) { } // decrypt message - _, err = Decrypt(k, []byte{}, msg) + buf := make([]byte, len(tv.plaintext)) + n, err := Decrypt(k, buf, msg) if err != nil { t.Fatal(err) } + buf = buf[:n] // change mac, this must fail msg[len(msg)-8] ^= 0x23 - if _, err = Decrypt(k, []byte{}, msg); err != ErrUnauthenticated { + if _, err = Decrypt(k, buf, msg); err != ErrUnauthenticated { t.Fatal("wrong MAC value not detected") } @@ -118,15 +120,17 @@ func TestCrypto(t *testing.T) { // tamper with message, this must fail msg[16+5] ^= 0x85 - if _, err = Decrypt(k, []byte{}, msg); err != ErrUnauthenticated { + if _, err = Decrypt(k, buf, msg); err != ErrUnauthenticated { t.Fatal("tampered message not detected") } // test decryption - p, err := Decrypt(k, []byte{}, tv.ciphertext) + p := make([]byte, len(tv.ciphertext)) + n, err = Decrypt(k, p, tv.ciphertext) if err != nil { t.Fatal(err) } + p = p[:n] if !bytes.Equal(p, tv.plaintext) { t.Fatalf("wrong plaintext: expected %q but got %q\n", tv.plaintext, p) diff --git a/src/restic/crypto/crypto_test.go b/src/restic/crypto/crypto_test.go index fe799da77..39c3cc169 100644 --- a/src/restic/crypto/crypto_test.go +++ b/src/restic/crypto/crypto_test.go @@ -32,8 +32,10 @@ func TestEncryptDecrypt(t *testing.T) { "ciphertext length does not match: want %d, got %d", len(data)+crypto.Extension, len(ciphertext)) - plaintext, err := crypto.Decrypt(k, nil, ciphertext) + plaintext := make([]byte, len(ciphertext)) + n, err := crypto.Decrypt(k, plaintext, ciphertext) OK(t, err) + plaintext = plaintext[:n] Assert(t, len(plaintext) == len(data), "plaintext length does not match: want %d, got %d", len(data), len(plaintext)) @@ -58,8 +60,10 @@ func TestSmallBuffer(t *testing.T) { cap(ciphertext)) // check for the correct plaintext - plaintext, err := crypto.Decrypt(k, nil, ciphertext) + plaintext := make([]byte, len(ciphertext)) + n, err := crypto.Decrypt(k, plaintext, ciphertext) OK(t, err) + plaintext = plaintext[:n] Assert(t, bytes.Equal(plaintext, data), "wrong plaintext returned") } @@ -78,8 +82,9 @@ func TestSameBuffer(t *testing.T) { OK(t, err) // use the same buffer for decryption - ciphertext, err = crypto.Decrypt(k, ciphertext, ciphertext) + n, err := crypto.Decrypt(k, ciphertext, ciphertext) OK(t, err) + ciphertext = ciphertext[:n] Assert(t, bytes.Equal(ciphertext, data), "wrong plaintext returned") } @@ -97,9 +102,9 @@ func TestCornerCases(t *testing.T) { len(c)) // this should decrypt to nil - p, err := crypto.Decrypt(k, nil, c) + n, err := crypto.Decrypt(k, nil, c) OK(t, err) - Equals(t, []byte(nil), p) + Equals(t, 0, n) // test encryption for same slice, this should return an error _, err = crypto.Encrypt(k, c, c) @@ -160,7 +165,7 @@ func BenchmarkDecrypt(b *testing.B) { b.SetBytes(int64(size)) for i := 0; i < b.N; i++ { - plaintext, err = crypto.Decrypt(k, plaintext, ciphertext) + _, err = crypto.Decrypt(k, plaintext, ciphertext) OK(b, err) } } diff --git a/src/restic/crypto/kdf.go b/src/restic/crypto/kdf.go index ea8be37b6..158f462f1 100644 --- a/src/restic/crypto/kdf.go +++ b/src/restic/crypto/kdf.go @@ -4,8 +4,9 @@ import ( "crypto/rand" "time" + "restic/errors" + sscrypt "github.com/elithrar/simple-scrypt" - "github.com/pkg/errors" "golang.org/x/crypto/scrypt" ) diff --git a/src/restic/debug/debug.go b/src/restic/debug/debug.go index aeae376cd..b1ab2b38c 100644 --- a/src/restic/debug/debug.go +++ b/src/restic/debug/debug.go @@ -15,7 +15,7 @@ import ( "sync" "time" - "github.com/pkg/errors" + "restic/errors" ) type process struct { diff --git a/src/restic/doc.go b/src/restic/doc.go index 358dca240..2e53b2524 100644 --- a/src/restic/doc.go +++ b/src/restic/doc.go @@ -1,6 +1,5 @@ // Package restic is the top level package for the restic backup program, // please see https://github.com/restic/restic for more information. // -// This package exposes the main components needed to create and restore a -// backup as well as handling things like a local cache of objects. +// This package exposes the main objects that are handled in restic. package restic diff --git a/src/restic/errors/doc.go b/src/restic/errors/doc.go new file mode 100644 index 000000000..9f63cf958 --- /dev/null +++ b/src/restic/errors/doc.go @@ -0,0 +1,2 @@ +// Package errors provides custom error types used within restic. +package errors diff --git a/src/restic/errors.go b/src/restic/errors/fatal.go similarity index 98% rename from src/restic/errors.go rename to src/restic/errors/fatal.go index 1aa7e1fdc..dce3a92b0 100644 --- a/src/restic/errors.go +++ b/src/restic/errors/fatal.go @@ -1,4 +1,4 @@ -package restic +package errors import "fmt" diff --git a/src/restic/errors/wrap.go b/src/restic/errors/wrap.go new file mode 100644 index 000000000..65b48de8b --- /dev/null +++ b/src/restic/errors/wrap.go @@ -0,0 +1,23 @@ +package errors + +import "github.com/pkg/errors" + +// Cause returns the cause of an error. +func Cause(err error) error { + return errors.Cause(err) +} + +// New creates a new error based on message. +func New(message string) error { + return errors.New(message) +} + +// Errorf creates an error based on a format string and values. +func Errorf(format string, args ...interface{}) error { + return errors.Errorf(format, args...) +} + +// Wrap wraps an error retrieved from outside of restic. +func Wrap(err error, message string) error { + return errors.Wrap(err, message) +} diff --git a/src/restic/backend/handle.go b/src/restic/file.go similarity index 52% rename from src/restic/backend/handle.go rename to src/restic/file.go index 09561161b..bfe44ad42 100644 --- a/src/restic/backend/handle.go +++ b/src/restic/file.go @@ -1,14 +1,27 @@ -package backend +package restic import ( "fmt" - "github.com/pkg/errors" + "restic/errors" +) + +// FileType is the type of a file in the backend. +type FileType string + +// These are the different data types a backend can store. +const ( + DataFile FileType = "data" + KeyFile = "key" + LockFile = "lock" + SnapshotFile = "snapshot" + IndexFile = "index" + ConfigFile = "config" ) // Handle is used to store and access data in a backend. type Handle struct { - Type Type + Type FileType Name string } @@ -27,17 +40,17 @@ func (h Handle) Valid() error { } switch h.Type { - case Data: - case Key: - case Lock: - case Snapshot: - case Index: - case Config: + case DataFile: + case KeyFile: + case LockFile: + case SnapshotFile: + case IndexFile: + case ConfigFile: default: return errors.Errorf("invalid Type %q", h.Type) } - if h.Type == Config { + if h.Type == ConfigFile { return nil } diff --git a/src/restic/backend/handle_test.go b/src/restic/file_test.go similarity index 76% rename from src/restic/backend/handle_test.go rename to src/restic/file_test.go index a477c0aec..2f8f395c2 100644 --- a/src/restic/backend/handle_test.go +++ b/src/restic/file_test.go @@ -1,4 +1,4 @@ -package backend +package restic import "testing" @@ -8,10 +8,10 @@ var handleTests = []struct { }{ {Handle{Name: "foo"}, false}, {Handle{Type: "foobar"}, false}, - {Handle{Type: Config, Name: ""}, true}, - {Handle{Type: Data, Name: ""}, false}, + {Handle{Type: ConfigFile, Name: ""}, true}, + {Handle{Type: DataFile, Name: ""}, false}, {Handle{Type: "", Name: "x"}, false}, - {Handle{Type: Lock, Name: "010203040506"}, true}, + {Handle{Type: LockFile, Name: "010203040506"}, true}, } func TestHandleValid(t *testing.T) { diff --git a/src/restic/filter/filter.go b/src/restic/filter/filter.go index 48ce01fb8..bb483d31c 100644 --- a/src/restic/filter/filter.go +++ b/src/restic/filter/filter.go @@ -4,7 +4,7 @@ import ( "path/filepath" "strings" - "github.com/pkg/errors" + "restic/errors" ) // ErrBadString is returned when Match is called with the empty string as the diff --git a/src/restic/find.go b/src/restic/find.go index 63c8bd813..dcc9d0251 100644 --- a/src/restic/find.go +++ b/src/restic/find.go @@ -1,18 +1,12 @@ package restic -import ( - "restic/backend" - "restic/pack" - "restic/repository" -) - // FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data // blobs) to the set blobs. The tree blobs in the `seen` BlobSet will not be visited // again. -func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.BlobSet, seen pack.BlobSet) error { - blobs.Insert(pack.Handle{ID: treeID, Type: pack.Tree}) +func FindUsedBlobs(repo Repository, treeID ID, blobs BlobSet, seen BlobSet) error { + blobs.Insert(BlobHandle{ID: treeID, Type: TreeBlob}) - tree, err := LoadTree(repo, treeID) + tree, err := repo.LoadTree(treeID) if err != nil { return err } @@ -21,11 +15,11 @@ func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.Bl switch node.Type { case "file": for _, blob := range node.Content { - blobs.Insert(pack.Handle{ID: blob, Type: pack.Data}) + blobs.Insert(BlobHandle{ID: blob, Type: DataBlob}) } case "dir": subtreeID := *node.Subtree - h := pack.Handle{ID: subtreeID, Type: pack.Tree} + h := BlobHandle{ID: subtreeID, Type: TreeBlob} if seen.Has(h) { continue } diff --git a/src/restic/find_test.go b/src/restic/find_test.go index f7e47bde4..effc58f24 100644 --- a/src/restic/find_test.go +++ b/src/restic/find_test.go @@ -1,4 +1,4 @@ -package restic +package restic_test import ( "bufio" @@ -7,26 +7,26 @@ import ( "fmt" "os" "path/filepath" + "restic" "sort" "testing" "time" - "restic/pack" "restic/repository" ) -func loadIDSet(t testing.TB, filename string) pack.BlobSet { +func loadIDSet(t testing.TB, filename string) restic.BlobSet { f, err := os.Open(filename) if err != nil { t.Logf("unable to open golden file %v: %v", filename, err) - return pack.NewBlobSet() + return restic.NewBlobSet() } sc := bufio.NewScanner(f) - blobs := pack.NewBlobSet() + blobs := restic.NewBlobSet() for sc.Scan() { - var h pack.Handle + var h restic.BlobHandle err := json.Unmarshal([]byte(sc.Text()), &h) if err != nil { t.Errorf("file %v contained invalid blob: %#v", filename, err) @@ -43,14 +43,14 @@ func loadIDSet(t testing.TB, filename string) pack.BlobSet { return blobs } -func saveIDSet(t testing.TB, filename string, s pack.BlobSet) { +func saveIDSet(t testing.TB, filename string, s restic.BlobSet) { f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644) if err != nil { t.Fatalf("unable to update golden file %v: %v", filename, err) return } - var hs pack.Handles + var hs restic.BlobHandles for h := range s { hs = append(hs, h) } @@ -83,16 +83,16 @@ func TestFindUsedBlobs(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() - var snapshots []*Snapshot + var snapshots []*restic.Snapshot for i := 0; i < findTestSnapshots; i++ { - sn := TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0) + sn := restic.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0) t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str()) snapshots = append(snapshots, sn) } for i, sn := range snapshots { - usedBlobs := pack.NewBlobSet() - err := FindUsedBlobs(repo, *sn.Tree, usedBlobs, pack.NewBlobSet()) + usedBlobs := restic.NewBlobSet() + err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, restic.NewBlobSet()) if err != nil { t.Errorf("FindUsedBlobs returned error: %v", err) continue @@ -121,14 +121,14 @@ func BenchmarkFindUsedBlobs(b *testing.B) { repo, cleanup := repository.TestRepository(b) defer cleanup() - sn := TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0) + sn := restic.TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0) b.ResetTimer() for i := 0; i < b.N; i++ { - seen := pack.NewBlobSet() - blobs := pack.NewBlobSet() - err := FindUsedBlobs(repo, *sn.Tree, blobs, seen) + seen := restic.NewBlobSet() + blobs := restic.NewBlobSet() + err := restic.FindUsedBlobs(repo, *sn.Tree, blobs, seen) if err != nil { b.Error(err) } diff --git a/src/restic/fs/file_linux.go b/src/restic/fs/file_linux.go index e3cdf9600..f02c6470d 100644 --- a/src/restic/fs/file_linux.go +++ b/src/restic/fs/file_linux.go @@ -6,7 +6,7 @@ import ( "os" "syscall" - "github.com/pkg/errors" + "restic/errors" "golang.org/x/sys/unix" ) diff --git a/src/restic/fuse/dir.go b/src/restic/fuse/dir.go index a89617e5f..de970d526 100644 --- a/src/restic/fuse/dir.go +++ b/src/restic/fuse/dir.go @@ -12,7 +12,6 @@ import ( "restic" "restic/debug" - "restic/repository" ) // Statically ensure that *dir implement those interface @@ -20,16 +19,16 @@ var _ = fs.HandleReadDirAller(&dir{}) var _ = fs.NodeStringLookuper(&dir{}) type dir struct { - repo *repository.Repository + repo restic.Repository items map[string]*restic.Node inode uint64 node *restic.Node ownerIsRoot bool } -func newDir(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (*dir, error) { +func newDir(repo restic.Repository, node *restic.Node, ownerIsRoot bool) (*dir, error) { debug.Log("newDir", "new dir for %v (%v)", node.Name, node.Subtree.Str()) - tree, err := restic.LoadTree(repo, *node.Subtree) + tree, err := repo.LoadTree(*node.Subtree) if err != nil { debug.Log("newDir", " error loading tree %v: %v", node.Subtree.Str(), err) return nil, err @@ -50,7 +49,7 @@ func newDir(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (* // replaceSpecialNodes replaces nodes with name "." and "/" by their contents. // Otherwise, the node is returned. -func replaceSpecialNodes(repo *repository.Repository, node *restic.Node) ([]*restic.Node, error) { +func replaceSpecialNodes(repo restic.Repository, node *restic.Node) ([]*restic.Node, error) { if node.Type != "dir" || node.Subtree == nil { return []*restic.Node{node}, nil } @@ -59,7 +58,7 @@ func replaceSpecialNodes(repo *repository.Repository, node *restic.Node) ([]*res return []*restic.Node{node}, nil } - tree, err := restic.LoadTree(repo, *node.Subtree) + tree, err := repo.LoadTree(*node.Subtree) if err != nil { return nil, err } @@ -67,9 +66,9 @@ func replaceSpecialNodes(repo *repository.Repository, node *restic.Node) ([]*res return tree.Nodes, nil } -func newDirFromSnapshot(repo *repository.Repository, snapshot SnapshotWithId, ownerIsRoot bool) (*dir, error) { +func newDirFromSnapshot(repo restic.Repository, snapshot SnapshotWithId, ownerIsRoot bool) (*dir, error) { debug.Log("newDirFromSnapshot", "new dir for snapshot %v (%v)", snapshot.ID.Str(), snapshot.Tree.Str()) - tree, err := restic.LoadTree(repo, *snapshot.Tree) + tree, err := repo.LoadTree(*snapshot.Tree) if err != nil { debug.Log("newDirFromSnapshot", " loadTree(%v) failed: %v", snapshot.ID.Str(), err) return nil, err @@ -98,7 +97,7 @@ func newDirFromSnapshot(repo *repository.Repository, snapshot SnapshotWithId, ow Mode: os.ModeDir | 0555, }, items: items, - inode: inodeFromBackendId(snapshot.ID), + inode: inodeFromBackendID(snapshot.ID), ownerIsRoot: ownerIsRoot, }, nil } diff --git a/src/restic/fuse/file.go b/src/restic/fuse/file.go index f6dfa771e..d2fc12222 100644 --- a/src/restic/fuse/file.go +++ b/src/restic/fuse/file.go @@ -6,12 +6,10 @@ package fuse import ( "sync" - "github.com/pkg/errors" + "restic/errors" "restic" - "restic/backend" "restic/debug" - "restic/pack" "bazil.org/fuse" "bazil.org/fuse/fs" @@ -28,8 +26,8 @@ var _ = fs.HandleReleaser(&file{}) // BlobLoader is an abstracted repository with a reduced set of methods used // for fuse operations. type BlobLoader interface { - LookupBlobSize(backend.ID, pack.BlobType) (uint, error) - LoadBlob(backend.ID, pack.BlobType, []byte) ([]byte, error) + LookupBlobSize(restic.ID, restic.BlobType) (uint, error) + LoadBlob(restic.BlobType, restic.ID, []byte) (int, error) } type file struct { @@ -54,7 +52,7 @@ func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool) (*file, error var bytes uint64 sizes := make([]uint, len(node.Content)) for i, id := range node.Content { - size, err := repo.LookupBlobSize(id, pack.Data) + size, err := repo.LookupBlobSize(id, restic.DataBlob) if err != nil { return nil, err } @@ -111,14 +109,14 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) { buf = make([]byte, f.sizes[i]) } - blob, err = f.repo.LoadBlob(f.node.Content[i], pack.Data, buf) + n, err := f.repo.LoadBlob(restic.DataBlob, f.node.Content[i], buf) if err != nil { debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err) return nil, err } - f.blobs[i] = blob + f.blobs[i] = buf[:n] - return blob, nil + return buf[:n], nil } func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { diff --git a/src/restic/fuse/file_test.go b/src/restic/fuse/file_test.go index cb1c67452..090e43200 100644 --- a/src/restic/fuse/file_test.go +++ b/src/restic/fuse/file_test.go @@ -9,25 +9,23 @@ import ( "testing" "time" - "github.com/pkg/errors" + "restic/errors" "bazil.org/fuse" "restic" - "restic/backend" - "restic/pack" . "restic/test" ) type MockRepo struct { - blobs map[backend.ID][]byte + blobs map[restic.ID][]byte } -func NewMockRepo(content map[backend.ID][]byte) *MockRepo { +func NewMockRepo(content map[restic.ID][]byte) *MockRepo { return &MockRepo{blobs: content} } -func (m *MockRepo) LookupBlobSize(id backend.ID, t pack.BlobType) (uint, error) { +func (m *MockRepo) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, error) { buf, ok := m.blobs[id] if !ok { return 0, errors.New("blob not found") @@ -36,19 +34,19 @@ func (m *MockRepo) LookupBlobSize(id backend.ID, t pack.BlobType) (uint, error) return uint(len(buf)), nil } -func (m *MockRepo) LoadBlob(id backend.ID, t pack.BlobType, buf []byte) ([]byte, error) { +func (m *MockRepo) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) { size, err := m.LookupBlobSize(id, t) if err != nil { - return nil, err + return 0, err } - if uint(cap(buf)) < size { - return nil, errors.New("buffer too small") + if uint(len(buf)) < size { + return 0, errors.New("buffer too small") } buf = buf[:size] copy(buf, m.blobs[id]) - return buf, nil + return int(size), nil } type MockContext struct{} @@ -68,12 +66,12 @@ var testContentLengths = []uint{ } var testMaxFileSize uint -func genTestContent() map[backend.ID][]byte { - m := make(map[backend.ID][]byte) +func genTestContent() map[restic.ID][]byte { + m := make(map[restic.ID][]byte) for _, length := range testContentLengths { buf := Random(int(length), int(length)) - id := backend.Hash(buf) + id := restic.Hash(buf) m[id] = buf testMaxFileSize += length } @@ -83,7 +81,7 @@ func genTestContent() map[backend.ID][]byte { const maxBufSize = 20 * 1024 * 1024 -func testRead(t *testing.T, f *file, offset, length int, data []byte) []byte { +func testRead(t *testing.T, f *file, offset, length int, data []byte) { ctx := MockContext{} req := &fuse.ReadRequest{ @@ -94,8 +92,6 @@ func testRead(t *testing.T, f *file, offset, length int, data []byte) []byte { Data: make([]byte, length), } OK(t, f.Read(ctx, req, resp)) - - return resp.Data } var offsetReadsTests = []struct { @@ -111,7 +107,7 @@ func TestFuseFile(t *testing.T) { memfile := make([]byte, 0, maxBufSize) - var ids backend.IDs + var ids restic.IDs for id, buf := range repo.blobs { ids = append(ids, id) memfile = append(memfile, buf...) @@ -137,8 +133,9 @@ func TestFuseFile(t *testing.T) { for i, test := range offsetReadsTests { b := memfile[test.offset : test.offset+test.length] - res := testRead(t, f, test.offset, test.length, b) - if !bytes.Equal(b, res) { + buf := make([]byte, test.length) + testRead(t, f, test.offset, test.length, buf) + if !bytes.Equal(b, buf) { t.Errorf("test %d failed, wrong data returned", i) } } @@ -152,8 +149,9 @@ func TestFuseFile(t *testing.T) { } b := memfile[offset : offset+length] - res := testRead(t, f, offset, length, b) - if !bytes.Equal(b, res) { + buf := make([]byte, length) + testRead(t, f, offset, length, buf) + if !bytes.Equal(b, buf) { t.Errorf("test %d failed (offset %d, length %d), wrong data returned", i, offset, length) } } diff --git a/src/restic/fuse/fuse.go b/src/restic/fuse/fuse.go index 6ef3e48cc..e8e45c445 100644 --- a/src/restic/fuse/fuse.go +++ b/src/restic/fuse/fuse.go @@ -5,13 +5,12 @@ package fuse import ( "encoding/binary" - - "restic/backend" + "restic" ) // inodeFromBackendId returns a unique uint64 from a backend id. // Endianness has no specific meaning, it is just the simplest way to // transform a []byte to an uint64 -func inodeFromBackendId(id backend.ID) uint64 { +func inodeFromBackendID(id restic.ID) uint64 { return binary.BigEndian.Uint64(id[:8]) } diff --git a/src/restic/fuse/link.go b/src/restic/fuse/link.go index 732446a7a..43fb35020 100644 --- a/src/restic/fuse/link.go +++ b/src/restic/fuse/link.go @@ -5,7 +5,6 @@ package fuse import ( "restic" - "restic/repository" "bazil.org/fuse" "bazil.org/fuse/fs" @@ -20,7 +19,7 @@ type link struct { ownerIsRoot bool } -func newLink(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (*link, error) { +func newLink(repo restic.Repository, node *restic.Node, ownerIsRoot bool) (*link, error) { return &link{node: node, ownerIsRoot: ownerIsRoot}, nil } diff --git a/src/restic/fuse/snapshot.go b/src/restic/fuse/snapshot.go index a384e3fb5..d71adbc79 100644 --- a/src/restic/fuse/snapshot.go +++ b/src/restic/fuse/snapshot.go @@ -12,16 +12,14 @@ import ( "bazil.org/fuse/fs" "restic" - "restic/backend" "restic/debug" - "restic/repository" "golang.org/x/net/context" ) type SnapshotWithId struct { *restic.Snapshot - backend.ID + restic.ID } // These lines statically ensure that a *SnapshotsDir implement the given @@ -31,7 +29,7 @@ var _ = fs.HandleReadDirAller(&SnapshotsDir{}) var _ = fs.NodeStringLookuper(&SnapshotsDir{}) type SnapshotsDir struct { - repo *repository.Repository + repo restic.Repository ownerIsRoot bool // knownSnapshots maps snapshot timestamp to the snapshot @@ -39,7 +37,8 @@ type SnapshotsDir struct { knownSnapshots map[string]SnapshotWithId } -func NewSnapshotsDir(repo *repository.Repository, ownerIsRoot bool) *SnapshotsDir { +// NewSnapshotsDir returns a new dir object for the snapshots. +func NewSnapshotsDir(repo restic.Repository, ownerIsRoot bool) *SnapshotsDir { debug.Log("NewSnapshotsDir", "fuse mount initiated") return &SnapshotsDir{ repo: repo, @@ -65,7 +64,7 @@ func (sn *SnapshotsDir) updateCache(ctx context.Context) error { sn.Lock() defer sn.Unlock() - for id := range sn.repo.List(backend.Snapshot, ctx.Done()) { + for id := range sn.repo.List(restic.SnapshotFile, ctx.Done()) { snapshot, err := restic.LoadSnapshot(sn.repo, id) if err != nil { return err @@ -96,7 +95,7 @@ func (sn *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { ret := make([]fuse.Dirent, 0) for _, snapshot := range sn.knownSnapshots { ret = append(ret, fuse.Dirent{ - Inode: inodeFromBackendId(snapshot.ID), + Inode: inodeFromBackendID(snapshot.ID), Type: fuse.DT_Dir, Name: snapshot.Time.Format(time.RFC3339), }) diff --git a/src/restic/backend/id.go b/src/restic/id.go similarity index 82% rename from src/restic/backend/id.go rename to src/restic/id.go index 2714ee63e..6d1f55de2 100644 --- a/src/restic/backend/id.go +++ b/src/restic/id.go @@ -1,12 +1,14 @@ -package backend +package restic import ( "bytes" + "crypto/rand" "crypto/sha256" "encoding/hex" "encoding/json" + "io" - "github.com/pkg/errors" + "restic/errors" ) // Hash returns the ID for data. @@ -14,11 +16,11 @@ func Hash(data []byte) ID { return sha256.Sum256(data) } -// IDSize contains the size of an ID, in bytes. -const IDSize = sha256.Size +// idSize contains the size of an ID, in bytes. +const idSize = sha256.Size // ID references content within a repository. -type ID [IDSize]byte +type ID [idSize]byte // ParseID converts the given string to an ID. func ParseID(s string) (ID, error) { @@ -28,7 +30,7 @@ func ParseID(s string) (ID, error) { return ID{}, errors.Wrap(err, "hex.DecodeString") } - if len(b) != IDSize { + if len(b) != idSize { return ID{}, errors.New("invalid length for hash") } @@ -42,6 +44,17 @@ func (id ID) String() string { return hex.EncodeToString(id[:]) } +// NewRandomID retuns a randomly generated ID. When reading from rand fails, +// the function panics. +func NewRandomID() ID { + id := ID{} + _, err := io.ReadFull(rand.Reader, id[:]) + if err != nil { + panic(err) + } + return id +} + const shortStr = 4 // Str returns the shortened string version of id. diff --git a/src/restic/backend/id_int_test.go b/src/restic/id_int_test.go similarity index 95% rename from src/restic/backend/id_int_test.go rename to src/restic/id_int_test.go index d46a1554b..a60a11b89 100644 --- a/src/restic/backend/id_int_test.go +++ b/src/restic/id_int_test.go @@ -1,4 +1,4 @@ -package backend +package restic import "testing" diff --git a/src/restic/backend/id_test.go b/src/restic/id_test.go similarity index 50% rename from src/restic/backend/id_test.go rename to src/restic/id_test.go index 47d12d319..2e9634a19 100644 --- a/src/restic/backend/id_test.go +++ b/src/restic/id_test.go @@ -1,10 +1,8 @@ -package backend_test +package restic import ( + "reflect" "testing" - - "restic/backend" - . "restic/test" ) var TestStrings = []struct { @@ -19,25 +17,44 @@ var TestStrings = []struct { func TestID(t *testing.T) { for _, test := range TestStrings { - id, err := backend.ParseID(test.id) - OK(t, err) + id, err := ParseID(test.id) + if err != nil { + t.Error(err) + } - id2, err := backend.ParseID(test.id) - OK(t, err) - Assert(t, id.Equal(id2), "ID.Equal() does not work as expected") + id2, err := ParseID(test.id) + if err != nil { + t.Error(err) + } + if !id.Equal(id2) { + t.Errorf("ID.Equal() does not work as expected") + } ret, err := id.EqualString(test.id) - OK(t, err) - Assert(t, ret, "ID.EqualString() returned wrong value") + if err != nil { + t.Error(err) + } + if !ret { + t.Error("ID.EqualString() returned wrong value") + } // test json marshalling buf, err := id.MarshalJSON() - OK(t, err) - Equals(t, "\""+test.id+"\"", string(buf)) + if err != nil { + t.Error(err) + } + want := `"` + test.id + `"` + if string(buf) != want { + t.Errorf("string comparison failed, wanted %q, got %q", want, string(buf)) + } - var id3 backend.ID + var id3 ID err = id3.UnmarshalJSON(buf) - OK(t, err) - Equals(t, id, id3) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(id, id3) { + t.Error("ids are not equal") + } } } diff --git a/src/restic/backend/ids.go b/src/restic/ids.go similarity index 98% rename from src/restic/backend/ids.go rename to src/restic/ids.go index 11cf436d2..cc5ad18da 100644 --- a/src/restic/backend/ids.go +++ b/src/restic/ids.go @@ -1,4 +1,4 @@ -package backend +package restic import ( "encoding/hex" diff --git a/src/restic/ids_test.go b/src/restic/ids_test.go new file mode 100644 index 000000000..9ce02607b --- /dev/null +++ b/src/restic/ids_test.go @@ -0,0 +1,55 @@ +package restic + +import ( + "reflect" + "testing" +) + +var uniqTests = []struct { + before, after IDs +}{ + { + IDs{ + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + IDs{ + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + }, + }, + { + IDs{ + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + IDs{ + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + }, + { + IDs{ + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + IDs{ + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + }, +} + +func TestUniqIDs(t *testing.T) { + for i, test := range uniqTests { + uniq := test.before.Uniq() + if !reflect.DeepEqual(uniq, test.after) { + t.Errorf("uniqIDs() test %v failed\n wanted: %v\n got: %v", i, test.after, uniq) + } + } +} diff --git a/src/restic/backend/idset.go b/src/restic/idset.go similarity index 99% rename from src/restic/backend/idset.go rename to src/restic/idset.go index 4bfe52ca2..c31ca7747 100644 --- a/src/restic/backend/idset.go +++ b/src/restic/idset.go @@ -1,4 +1,4 @@ -package backend +package restic import "sort" diff --git a/src/restic/idset_test.go b/src/restic/idset_test.go new file mode 100644 index 000000000..5525eab79 --- /dev/null +++ b/src/restic/idset_test.go @@ -0,0 +1,32 @@ +package restic + +import ( + "testing" +) + +var idsetTests = []struct { + id ID + seen bool +}{ + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false}, + {TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false}, + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, + {TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false}, + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, + {TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true}, + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, +} + +func TestIDSet(t *testing.T) { + set := NewIDSet() + for i, test := range idsetTests { + seen := set.Has(test.id) + if seen != test.seen { + t.Errorf("IDSet test %v failed: wanted %v, got %v", i, test.seen, seen) + } + set.Insert(test.id) + } +} diff --git a/src/restic/index/index.go b/src/restic/index/index.go index 20d2e08fe..9027e3fda 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -5,45 +5,42 @@ import ( "fmt" "os" "restic" - "restic/backend" "restic/debug" "restic/list" - "restic/pack" - "restic/types" "restic/worker" - "github.com/pkg/errors" + "restic/errors" ) // Pack contains information about the contents of a pack. type Pack struct { Size int64 - Entries []pack.Blob + Entries []restic.Blob } // Blob contains information about a blob. type Blob struct { Size int64 - Packs backend.IDSet + Packs restic.IDSet } // Index contains information about blobs and packs stored in a repo. type Index struct { - Packs map[backend.ID]Pack - Blobs map[pack.Handle]Blob - IndexIDs backend.IDSet + Packs map[restic.ID]Pack + Blobs map[restic.BlobHandle]Blob + IndexIDs restic.IDSet } func newIndex() *Index { return &Index{ - Packs: make(map[backend.ID]Pack), - Blobs: make(map[pack.Handle]Blob), - IndexIDs: backend.NewIDSet(), + Packs: make(map[restic.ID]Pack), + Blobs: make(map[restic.BlobHandle]Blob), + IndexIDs: restic.NewIDSet(), } } // New creates a new index for repo from scratch. -func New(repo types.Repository, p *restic.Progress) (*Index, error) { +func New(repo restic.Repository, p *restic.Progress) (*Index, error) { done := make(chan struct{}) defer close(done) @@ -58,7 +55,7 @@ func New(repo types.Repository, p *restic.Progress) (*Index, error) { for job := range ch { p.Report(restic.Stat{Blobs: 1}) - packID := job.Data.(backend.ID) + packID := job.Data.(restic.ID) if job.Error != nil { fmt.Fprintf(os.Stderr, "unable to list pack %v: %v\n", packID.Str(), job.Error) continue @@ -83,27 +80,27 @@ func New(repo types.Repository, p *restic.Progress) (*Index, error) { const loadIndexParallelism = 20 type packJSON struct { - ID backend.ID `json:"id"` + ID restic.ID `json:"id"` Blobs []blobJSON `json:"blobs"` } type blobJSON struct { - ID backend.ID `json:"id"` - Type pack.BlobType `json:"type"` - Offset uint `json:"offset"` - Length uint `json:"length"` + ID restic.ID `json:"id"` + Type restic.BlobType `json:"type"` + Offset uint `json:"offset"` + Length uint `json:"length"` } type indexJSON struct { - Supersedes backend.IDs `json:"supersedes,omitempty"` + Supersedes restic.IDs `json:"supersedes,omitempty"` Packs []*packJSON `json:"packs"` } -func loadIndexJSON(repo types.Repository, id backend.ID) (*indexJSON, error) { +func loadIndexJSON(repo restic.Repository, id restic.ID) (*indexJSON, error) { debug.Log("index.loadIndexJSON", "process index %v\n", id.Str()) var idx indexJSON - err := repo.LoadJSONUnpacked(backend.Index, id, &idx) + err := repo.LoadJSONUnpacked(restic.IndexFile, id, &idx) if err != nil { return nil, err } @@ -112,7 +109,7 @@ func loadIndexJSON(repo types.Repository, id backend.ID) (*indexJSON, error) { } // Load creates an index by loading all index files from the repo. -func Load(repo types.Repository, p *restic.Progress) (*Index, error) { +func Load(repo restic.Repository, p *restic.Progress) (*Index, error) { debug.Log("index.Load", "loading indexes") p.Start() @@ -121,12 +118,12 @@ func Load(repo types.Repository, p *restic.Progress) (*Index, error) { done := make(chan struct{}) defer close(done) - supersedes := make(map[backend.ID]backend.IDSet) - results := make(map[backend.ID]map[backend.ID]Pack) + supersedes := make(map[restic.ID]restic.IDSet) + results := make(map[restic.ID]map[restic.ID]Pack) index := newIndex() - for id := range repo.List(backend.Index, done) { + for id := range repo.List(restic.IndexFile, done) { p.Report(restic.Stat{Blobs: 1}) debug.Log("index.Load", "Load index %v", id.Str()) @@ -135,17 +132,17 @@ func Load(repo types.Repository, p *restic.Progress) (*Index, error) { return nil, err } - res := make(map[backend.ID]Pack) - supersedes[id] = backend.NewIDSet() + res := make(map[restic.ID]Pack) + supersedes[id] = restic.NewIDSet() for _, sid := range idx.Supersedes { debug.Log("index.Load", " index %v supersedes %v", id.Str(), sid) supersedes[id].Insert(sid) } for _, jpack := range idx.Packs { - entries := make([]pack.Blob, 0, len(jpack.Blobs)) + entries := make([]restic.Blob, 0, len(jpack.Blobs)) for _, blob := range jpack.Blobs { - entry := pack.Blob{ + entry := restic.Blob{ ID: blob.ID, Type: blob.Type, Offset: blob.Offset, @@ -179,7 +176,7 @@ func Load(repo types.Repository, p *restic.Progress) (*Index, error) { // AddPack adds a pack to the index. If this pack is already in the index, an // error is returned. -func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error { +func (idx *Index) AddPack(id restic.ID, size int64, entries []restic.Blob) error { if _, ok := idx.Packs[id]; ok { return errors.Errorf("pack %v already present in the index", id.Str()) } @@ -187,11 +184,11 @@ func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error idx.Packs[id] = Pack{Size: size, Entries: entries} for _, entry := range entries { - h := pack.Handle{ID: entry.ID, Type: entry.Type} + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} if _, ok := idx.Blobs[h]; !ok { idx.Blobs[h] = Blob{ Size: int64(entry.Length), - Packs: backend.NewIDSet(), + Packs: restic.NewIDSet(), } } @@ -202,13 +199,13 @@ func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error } // RemovePack deletes a pack from the index. -func (idx *Index) RemovePack(id backend.ID) error { +func (idx *Index) RemovePack(id restic.ID) error { if _, ok := idx.Packs[id]; !ok { return errors.Errorf("pack %v not found in the index", id.Str()) } for _, blob := range idx.Packs[id].Entries { - h := pack.Handle{ID: blob.ID, Type: blob.Type} + h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} idx.Blobs[h].Packs.Delete(id) if len(idx.Blobs[h].Packs) == 0 { @@ -223,13 +220,13 @@ func (idx *Index) RemovePack(id backend.ID) error { // DuplicateBlobs returns a list of blobs that are stored more than once in the // repo. -func (idx *Index) DuplicateBlobs() (dups pack.BlobSet) { - dups = pack.NewBlobSet() - seen := pack.NewBlobSet() +func (idx *Index) DuplicateBlobs() (dups restic.BlobSet) { + dups = restic.NewBlobSet() + seen := restic.NewBlobSet() for _, p := range idx.Packs { for _, entry := range p.Entries { - h := pack.Handle{ID: entry.ID, Type: entry.Type} + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} if seen.Has(h) { dups.Insert(h) } @@ -241,8 +238,8 @@ func (idx *Index) DuplicateBlobs() (dups pack.BlobSet) { } // PacksForBlobs returns the set of packs in which the blobs are contained. -func (idx *Index) PacksForBlobs(blobs pack.BlobSet) (packs backend.IDSet) { - packs = backend.NewIDSet() +func (idx *Index) PacksForBlobs(blobs restic.BlobSet) (packs restic.IDSet) { + packs = restic.NewIDSet() for h := range blobs { blob, ok := idx.Blobs[h] @@ -260,8 +257,8 @@ func (idx *Index) PacksForBlobs(blobs pack.BlobSet) (packs backend.IDSet) { // Location describes the location of a blob in a pack. type Location struct { - PackID backend.ID - pack.Blob + PackID restic.ID + restic.Blob } // ErrBlobNotFound is return by FindBlob when the blob could not be found in @@ -269,7 +266,7 @@ type Location struct { var ErrBlobNotFound = errors.New("blob not found in index") // FindBlob returns a list of packs and positions the blob can be found in. -func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) { +func (idx *Index) FindBlob(h restic.BlobHandle) ([]Location, error) { blob, ok := idx.Blobs[h] if !ok { return nil, ErrBlobNotFound @@ -300,8 +297,8 @@ func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) { } // Save writes the complete index to the repo. -func (idx *Index) Save(repo types.Repository, supersedes backend.IDs) (backend.ID, error) { - packs := make(map[backend.ID][]pack.Blob, len(idx.Packs)) +func (idx *Index) Save(repo restic.Repository, supersedes restic.IDs) (restic.ID, error) { + packs := make(map[restic.ID][]restic.Blob, len(idx.Packs)) for id, p := range idx.Packs { packs[id] = p.Entries } @@ -310,7 +307,7 @@ func (idx *Index) Save(repo types.Repository, supersedes backend.IDs) (backend.I } // Save writes a new index containing the given packs. -func Save(repo types.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) { +func Save(repo restic.Repository, packs map[restic.ID][]restic.Blob, supersedes restic.IDs) (restic.ID, error) { idx := &indexJSON{ Supersedes: supersedes, Packs: make([]*packJSON, 0, len(packs)), @@ -335,5 +332,5 @@ func Save(repo types.Repository, packs map[backend.ID][]pack.Blob, supersedes ba idx.Packs = append(idx.Packs, p) } - return repo.SaveJSONUnpacked(backend.Index, idx) + return repo.SaveJSONUnpacked(restic.IndexFile, idx) } diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index 559c3f08b..608ff944a 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -3,10 +3,7 @@ package index import ( "math/rand" "restic" - "restic/backend" - "restic/pack" "restic/repository" - . "restic/test" "testing" "time" ) @@ -17,7 +14,7 @@ var ( depth = 3 ) -func createFilledRepo(t testing.TB, snapshots int, dup float32) (*repository.Repository, func()) { +func createFilledRepo(t testing.TB, snapshots int, dup float32) (restic.Repository, func()) { repo, cleanup := repository.TestRepository(t) for i := 0; i < 3; i++ { @@ -27,8 +24,8 @@ func createFilledRepo(t testing.TB, snapshots int, dup float32) (*repository.Rep return repo, cleanup } -func validateIndex(t testing.TB, repo *repository.Repository, idx *Index) { - for id := range repo.List(backend.Data, nil) { +func validateIndex(t testing.TB, repo restic.Repository, idx *Index) { + for id := range repo.List(restic.DataFile, nil) { if _, ok := idx.Packs[id]; !ok { t.Errorf("pack %v missing from index", id.Str()) } @@ -164,7 +161,7 @@ func TestIndexDuplicateBlobs(t *testing.T) { t.Logf("%d packs with duplicate blobs", len(packs)) } -func loadIndex(t testing.TB, repo *repository.Repository) *Index { +func loadIndex(t testing.TB, repo restic.Repository) *Index { idx, err := Load(repo, nil) if err != nil { t.Fatalf("Load() returned error %v", err) @@ -179,7 +176,7 @@ func TestIndexSave(t *testing.T) { idx := loadIndex(t, repo) - packs := make(map[backend.ID][]pack.Blob) + packs := make(map[restic.ID][]restic.Blob) for id := range idx.Packs { if rand.Float32() < 0.5 { packs[id] = idx.Packs[id].Entries @@ -197,7 +194,7 @@ func TestIndexSave(t *testing.T) { for id := range idx.IndexIDs { t.Logf("remove index %v", id.Str()) - err = repo.Backend().Remove(backend.Index, id.String()) + err = repo.Backend().Remove(restic.IndexFile, id.String()) if err != nil { t.Errorf("error removing index %v: %v", id, err) } @@ -235,7 +232,7 @@ func TestIndexAddRemovePack(t *testing.T) { done := make(chan struct{}) defer close(done) - packID := <-repo.List(backend.Data, done) + packID := <-repo.List(restic.DataFile, done) t.Logf("selected pack %v", packID.Str()) @@ -248,7 +245,7 @@ func TestIndexAddRemovePack(t *testing.T) { } for _, blob := range blobs { - h := pack.Handle{ID: blob.ID, Type: blob.Type} + h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} _, err := idx.FindBlob(h) if err == nil { t.Errorf("removed blob %v found in index", h) @@ -298,7 +295,7 @@ func TestIndexLoadDocReference(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() - id, err := repo.SaveUnpacked(backend.Index, docExample) + id, err := repo.SaveUnpacked(restic.IndexFile, docExample) if err != nil { t.Fatalf("SaveUnpacked() returned error %v", err) } @@ -307,8 +304,8 @@ func TestIndexLoadDocReference(t *testing.T) { idx := loadIndex(t, repo) - blobID := ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66") - locs, err := idx.FindBlob(pack.Handle{ID: blobID, Type: pack.Data}) + blobID := restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66") + locs, err := idx.FindBlob(restic.BlobHandle{ID: blobID, Type: restic.DataBlob}) if err != nil { t.Errorf("FindBlob() returned error %v", err) } @@ -322,8 +319,8 @@ func TestIndexLoadDocReference(t *testing.T) { t.Errorf("blob IDs are not equal: %v != %v", l.ID, blobID) } - if l.Type != pack.Data { - t.Errorf("want type %v, got %v", pack.Data, l.Type) + if l.Type != restic.DataBlob { + t.Errorf("want type %v, got %v", restic.DataBlob, l.Type) } if l.Offset != 150 { diff --git a/src/restic/list/list.go b/src/restic/list/list.go index e3a14798f..18bfb606f 100644 --- a/src/restic/list/list.go +++ b/src/restic/list/list.go @@ -1,8 +1,7 @@ package list import ( - "restic/backend" - "restic/pack" + "restic" "restic/worker" ) @@ -10,19 +9,19 @@ const listPackWorkers = 10 // Lister combines lists packs in a repo and blobs in a pack. type Lister interface { - List(backend.Type, <-chan struct{}) <-chan backend.ID - ListPack(backend.ID) ([]pack.Blob, int64, error) + List(restic.FileType, <-chan struct{}) <-chan restic.ID + ListPack(restic.ID) ([]restic.Blob, int64, error) } // Result is returned in the channel from LoadBlobsFromAllPacks. type Result struct { - packID backend.ID + packID restic.ID size int64 - entries []pack.Blob + entries []restic.Blob } // PackID returns the pack ID of this result. -func (l Result) PackID() backend.ID { +func (l Result) PackID() restic.ID { return l.packID } @@ -32,14 +31,14 @@ func (l Result) Size() int64 { } // Entries returns a list of all blobs saved in the pack. -func (l Result) Entries() []pack.Blob { +func (l Result) Entries() []restic.Blob { return l.entries } // AllPacks sends the contents of all packs to ch. func AllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) { f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { - packID := job.Data.(backend.ID) + packID := job.Data.(restic.ID) entries, size, err := repo.ListPack(packID) return Result{ @@ -54,7 +53,7 @@ func AllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) { go func() { defer close(jobCh) - for id := range repo.List(backend.Data, done) { + for id := range repo.List(restic.DataFile, done) { select { case jobCh <- worker.Job{Data: id}: case <-done: diff --git a/src/restic/lock.go b/src/restic/lock.go index a2780379d..f32df4f79 100644 --- a/src/restic/lock.go +++ b/src/restic/lock.go @@ -7,13 +7,12 @@ import ( "os/user" "sync" "syscall" + "testing" "time" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/debug" - "restic/repository" ) // Lock represents a process locking the repository for an operation. @@ -33,8 +32,8 @@ type Lock struct { UID uint32 `json:"uid,omitempty"` GID uint32 `json:"gid,omitempty"` - repo *repository.Repository - lockID *backend.ID + repo Repository + lockID *ID } // ErrAlreadyLocked is returned when NewLock or NewExclusiveLock are unable to @@ -59,20 +58,26 @@ func IsAlreadyLocked(err error) bool { // NewLock returns a new, non-exclusive lock for the repository. If an // exclusive lock is already held by another process, ErrAlreadyLocked is // returned. -func NewLock(repo *repository.Repository) (*Lock, error) { +func NewLock(repo Repository) (*Lock, error) { return newLock(repo, false) } // NewExclusiveLock returns a new, exclusive lock for the repository. If // another lock (normal and exclusive) is already held by another process, // ErrAlreadyLocked is returned. -func NewExclusiveLock(repo *repository.Repository) (*Lock, error) { +func NewExclusiveLock(repo Repository) (*Lock, error) { return newLock(repo, true) } -const waitBeforeLockCheck = 200 * time.Millisecond +var waitBeforeLockCheck = 200 * time.Millisecond -func newLock(repo *repository.Repository, excl bool) (*Lock, error) { +// TestSetLockTimeout can be used to reduce the lock wait timeout for tests. +func TestSetLockTimeout(t testing.TB, d time.Duration) { + t.Logf("setting lock timeout to %v", d) + waitBeforeLockCheck = d +} + +func newLock(repo Repository, excl bool) (*Lock, error) { lock := &Lock{ Time: time.Now(), PID: os.Getpid(), @@ -128,7 +133,7 @@ func (l *Lock) fillUserInfo() error { // non-exclusive lock is to be created, an error is only returned when an // exclusive lock is found. func (l *Lock) checkForOtherLocks() error { - return eachLock(l.repo, func(id backend.ID, lock *Lock, err error) error { + return eachLock(l.repo, func(id ID, lock *Lock, err error) error { if l.lockID != nil && id.Equal(*l.lockID) { return nil } @@ -150,11 +155,11 @@ func (l *Lock) checkForOtherLocks() error { }) } -func eachLock(repo *repository.Repository, f func(backend.ID, *Lock, error) error) error { +func eachLock(repo Repository, f func(ID, *Lock, error) error) error { done := make(chan struct{}) defer close(done) - for id := range repo.List(backend.Lock, done) { + for id := range repo.List(LockFile, done) { lock, err := LoadLock(repo, id) err = f(id, lock, err) if err != nil { @@ -166,10 +171,10 @@ func eachLock(repo *repository.Repository, f func(backend.ID, *Lock, error) erro } // createLock acquires the lock by creating a file in the repository. -func (l *Lock) createLock() (backend.ID, error) { - id, err := l.repo.SaveJSONUnpacked(backend.Lock, l) +func (l *Lock) createLock() (ID, error) { + id, err := l.repo.SaveJSONUnpacked(LockFile, l) if err != nil { - return backend.ID{}, err + return ID{}, err } return id, nil @@ -181,7 +186,7 @@ func (l *Lock) Unlock() error { return nil } - return l.repo.Backend().Remove(backend.Lock, l.lockID.String()) + return l.repo.Backend().Remove(LockFile, l.lockID.String()) } var staleTimeout = 30 * time.Minute @@ -229,7 +234,7 @@ func (l *Lock) Refresh() error { return err } - err = l.repo.Backend().Remove(backend.Lock, l.lockID.String()) + err = l.repo.Backend().Remove(LockFile, l.lockID.String()) if err != nil { return err } @@ -269,9 +274,9 @@ func init() { } // LoadLock loads and unserializes a lock from a repository. -func LoadLock(repo *repository.Repository, id backend.ID) (*Lock, error) { +func LoadLock(repo Repository, id ID) (*Lock, error) { lock := &Lock{} - if err := repo.LoadJSONUnpacked(backend.Lock, id, lock); err != nil { + if err := repo.LoadJSONUnpacked(LockFile, id, lock); err != nil { return nil, err } lock.lockID = &id @@ -280,15 +285,15 @@ func LoadLock(repo *repository.Repository, id backend.ID) (*Lock, error) { } // RemoveStaleLocks deletes all locks detected as stale from the repository. -func RemoveStaleLocks(repo *repository.Repository) error { - return eachLock(repo, func(id backend.ID, lock *Lock, err error) error { +func RemoveStaleLocks(repo Repository) error { + return eachLock(repo, func(id ID, lock *Lock, err error) error { // ignore locks that cannot be loaded if err != nil { return nil } if lock.Stale() { - return repo.Backend().Remove(backend.Lock, id.String()) + return repo.Backend().Remove(LockFile, id.String()) } return nil @@ -296,8 +301,8 @@ func RemoveStaleLocks(repo *repository.Repository) error { } // RemoveAllLocks removes all locks forcefully. -func RemoveAllLocks(repo *repository.Repository) error { - return eachLock(repo, func(id backend.ID, lock *Lock, err error) error { - return repo.Backend().Remove(backend.Lock, id.String()) +func RemoveAllLocks(repo Repository) error { + return eachLock(repo, func(id ID, lock *Lock, err error) error { + return repo.Backend().Remove(LockFile, id.String()) }) } diff --git a/src/restic/lock_test.go b/src/restic/lock_test.go index da8fb7a40..a6854dbe6 100644 --- a/src/restic/lock_test.go +++ b/src/restic/lock_test.go @@ -6,14 +6,13 @@ import ( "time" "restic" - "restic/backend" "restic/repository" . "restic/test" ) func TestLock(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() lock, err := restic.NewLock(repo) OK(t, err) @@ -22,8 +21,8 @@ func TestLock(t *testing.T) { } func TestDoubleUnlock(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() lock, err := restic.NewLock(repo) OK(t, err) @@ -36,8 +35,8 @@ func TestDoubleUnlock(t *testing.T) { } func TestMultipleLock(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() lock1, err := restic.NewLock(repo) OK(t, err) @@ -50,8 +49,8 @@ func TestMultipleLock(t *testing.T) { } func TestLockExclusive(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() elock, err := restic.NewExclusiveLock(repo) OK(t, err) @@ -59,8 +58,8 @@ func TestLockExclusive(t *testing.T) { } func TestLockOnExclusiveLockedRepo(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() elock, err := restic.NewExclusiveLock(repo) OK(t, err) @@ -76,8 +75,8 @@ func TestLockOnExclusiveLockedRepo(t *testing.T) { } func TestExclusiveLockOnLockedRepo(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() elock, err := restic.NewLock(repo) OK(t, err) @@ -92,18 +91,18 @@ func TestExclusiveLockOnLockedRepo(t *testing.T) { OK(t, elock.Unlock()) } -func createFakeLock(repo *repository.Repository, t time.Time, pid int) (backend.ID, error) { +func createFakeLock(repo restic.Repository, t time.Time, pid int) (restic.ID, error) { hostname, err := os.Hostname() if err != nil { - return backend.ID{}, err + return restic.ID{}, err } newLock := &restic.Lock{Time: t, PID: pid, Hostname: hostname} - return repo.SaveJSONUnpacked(backend.Lock, &newLock) + return repo.SaveJSONUnpacked(restic.LockFile, &newLock) } -func removeLock(repo *repository.Repository, id backend.ID) error { - return repo.Backend().Remove(backend.Lock, id.String()) +func removeLock(repo restic.Repository, id restic.ID) error { + return repo.Backend().Remove(restic.LockFile, id.String()) } var staleLockTests = []struct { @@ -162,16 +161,16 @@ func TestLockStale(t *testing.T) { } } -func lockExists(repo *repository.Repository, t testing.TB, id backend.ID) bool { - exists, err := repo.Backend().Test(backend.Lock, id.String()) +func lockExists(repo restic.Repository, t testing.TB, id restic.ID) bool { + exists, err := repo.Backend().Test(restic.LockFile, id.String()) OK(t, err) return exists } func TestLockWithStaleLock(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) OK(t, err) @@ -195,8 +194,8 @@ func TestLockWithStaleLock(t *testing.T) { } func TestRemoveAllLocks(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) OK(t, err) @@ -218,14 +217,14 @@ func TestRemoveAllLocks(t *testing.T) { } func TestLockRefresh(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() lock, err := restic.NewLock(repo) OK(t, err) - var lockID *backend.ID - for id := range repo.List(backend.Lock, nil) { + var lockID *restic.ID + for id := range repo.List(restic.LockFile, nil) { if lockID != nil { t.Error("more than one lock found") } @@ -234,8 +233,8 @@ func TestLockRefresh(t *testing.T) { OK(t, lock.Refresh()) - var lockID2 *backend.ID - for id := range repo.List(backend.Lock, nil) { + var lockID2 *restic.ID + for id := range repo.List(restic.LockFile, nil) { if lockID2 != nil { t.Error("more than one lock found") } diff --git a/src/restic/lock_unix.go b/src/restic/lock_unix.go index 6b481ed26..d1b7fb0a3 100644 --- a/src/restic/lock_unix.go +++ b/src/restic/lock_unix.go @@ -8,7 +8,7 @@ import ( "strconv" "syscall" - "github.com/pkg/errors" + "restic/errors" "restic/debug" ) diff --git a/src/restic/mock/backend.go b/src/restic/mock/backend.go new file mode 100644 index 000000000..5aadc849d --- /dev/null +++ b/src/restic/mock/backend.go @@ -0,0 +1,106 @@ +package mock + +import ( + "restic" + + "restic/errors" +) + +// Backend implements a mock backend. +type Backend struct { + CloseFn func() error + LoadFn func(h restic.Handle, p []byte, off int64) (int, error) + SaveFn func(h restic.Handle, p []byte) error + StatFn func(h restic.Handle) (restic.FileInfo, error) + ListFn func(restic.FileType, <-chan struct{}) <-chan string + RemoveFn func(restic.FileType, string) error + TestFn func(restic.FileType, string) (bool, error) + DeleteFn func() error + LocationFn func() string +} + +// Close the backend. +func (m *Backend) Close() error { + if m.CloseFn == nil { + return nil + } + + return m.CloseFn() +} + +// Location returns a location string. +func (m *Backend) Location() string { + if m.LocationFn == nil { + return "" + } + + return m.LocationFn() +} + +// Load loads data from the backend. +func (m *Backend) Load(h restic.Handle, p []byte, off int64) (int, error) { + if m.LoadFn == nil { + return 0, errors.New("not implemented") + } + + return m.LoadFn(h, p, off) +} + +// Save data in the backend. +func (m *Backend) Save(h restic.Handle, p []byte) error { + if m.SaveFn == nil { + return errors.New("not implemented") + } + + return m.SaveFn(h, p) +} + +// Stat an object in the backend. +func (m *Backend) Stat(h restic.Handle) (restic.FileInfo, error) { + if m.StatFn == nil { + return restic.FileInfo{}, errors.New("not implemented") + } + + return m.StatFn(h) +} + +// List items of type t. +func (m *Backend) List(t restic.FileType, done <-chan struct{}) <-chan string { + if m.ListFn == nil { + ch := make(chan string) + close(ch) + return ch + } + + return m.ListFn(t, done) +} + +// Remove data from the backend. +func (m *Backend) Remove(t restic.FileType, name string) error { + if m.RemoveFn == nil { + return errors.New("not implemented") + } + + return m.RemoveFn(t, name) +} + +// Test for the existence of a specific item. +func (m *Backend) Test(t restic.FileType, name string) (bool, error) { + if m.TestFn == nil { + return false, errors.New("not implemented") + } + + return m.TestFn(t, name) +} + +// Delete all data. +func (m *Backend) Delete() error { + if m.DeleteFn == nil { + return errors.New("not implemented") + } + + return m.DeleteFn() +} + +// Make sure that Backend implements the backend interface. +var _ restic.Backend = &Backend{} diff --git a/src/restic/mock/repository.go b/src/restic/mock/repository.go new file mode 100644 index 000000000..3143a8ceb --- /dev/null +++ b/src/restic/mock/repository.go @@ -0,0 +1,141 @@ +package mock + +import ( + "restic" + "restic/crypto" +) + +// Repository implements a mock Repository. +type Repository struct { + BackendFn func() restic.Backend + + KeyFn func() *crypto.Key + + SetIndexFn func(restic.Index) + + IndexFn func() restic.Index + SaveFullIndexFn func() error + SaveIndexFn func() error + LoadIndexFn func() error + + ConfigFn func() restic.Config + + LookupBlobSizeFn func(restic.ID, restic.BlobType) (uint, error) + + ListFn func(restic.FileType, <-chan struct{}) <-chan restic.ID + ListPackFn func(restic.ID) ([]restic.Blob, int64, error) + + FlushFn func() error + + SaveUnpackedFn func(restic.FileType, []byte) (restic.ID, error) + SaveJSONUnpackedFn func(restic.FileType, interface{}) (restic.ID, error) + + LoadJSONUnpackedFn func(restic.FileType, restic.ID, interface{}) error + LoadAndDecryptFn func(restic.FileType, restic.ID) ([]byte, error) + + LoadBlobFn func(restic.BlobType, restic.ID, []byte) (int, error) + SaveBlobFn func(restic.BlobType, []byte, restic.ID) (restic.ID, error) + + LoadTreeFn func(restic.ID) (*restic.Tree, error) + SaveTreeFn func(t *restic.Tree) (restic.ID, error) +} + +// Backend is a stub method. +func (repo Repository) Backend() restic.Backend { + return repo.BackendFn() +} + +// Key is a stub method. +func (repo Repository) Key() *crypto.Key { + return repo.KeyFn() +} + +// SetIndex is a stub method. +func (repo Repository) SetIndex(idx restic.Index) { + repo.SetIndexFn(idx) +} + +// Index is a stub method. +func (repo Repository) Index() restic.Index { + return repo.IndexFn() +} + +// SaveFullIndex is a stub method. +func (repo Repository) SaveFullIndex() error { + return repo.SaveFullIndexFn() +} + +// SaveIndex is a stub method. +func (repo Repository) SaveIndex() error { + return repo.SaveIndexFn() +} + +// LoadIndex is a stub method. +func (repo Repository) LoadIndex() error { + return repo.LoadIndexFn() +} + +// Config is a stub method. +func (repo Repository) Config() restic.Config { + return repo.ConfigFn() +} + +// LookupBlobSize is a stub method. +func (repo Repository) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, error) { + return repo.LookupBlobSizeFn(id, t) +} + +// List is a stub method. +func (repo Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic.ID { + return repo.ListFn(t, done) +} + +// ListPack is a stub method. +func (repo Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) { + return repo.ListPackFn(id) +} + +// Flush is a stub method. +func (repo Repository) Flush() error { + return repo.FlushFn() +} + +// SaveUnpacked is a stub method. +func (repo Repository) SaveUnpacked(t restic.FileType, buf []byte) (restic.ID, error) { + return repo.SaveUnpackedFn(t, buf) +} + +// SaveJSONUnpacked is a stub method. +func (repo Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (restic.ID, error) { + return repo.SaveJSONUnpackedFn(t, item) +} + +// LoadJSONUnpacked is a stub method. +func (repo Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) error { + return repo.LoadJSONUnpackedFn(t, id, item) +} + +// LoadAndDecrypt is a stub method. +func (repo Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) { + return repo.LoadAndDecryptFn(t, id) +} + +// LoadBlob is a stub method. +func (repo Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) { + return repo.LoadBlobFn(t, id, buf) +} + +// SaveBlob is a stub method. +func (repo Repository) SaveBlob(t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { + return repo.SaveBlobFn(t, buf, id) +} + +// LoadTree is a stub method. +func (repo Repository) LoadTree(id restic.ID) (*restic.Tree, error) { + return repo.LoadTreeFn(id) +} + +// SaveTree is a stub method. +func (repo Repository) SaveTree(t *restic.Tree) (restic.ID, error) { + return repo.SaveTreeFn(t) +} diff --git a/src/restic/node.go b/src/restic/node.go index 37ef5e04c..1d33aa6ad 100644 --- a/src/restic/node.go +++ b/src/restic/node.go @@ -10,44 +10,40 @@ import ( "syscall" "time" - "github.com/pkg/errors" + "restic/errors" "runtime" - "restic/backend" "restic/debug" "restic/fs" - "restic/pack" - "restic/repository" ) // Node is a file, directory or other item in a backup. type Node struct { - Name string `json:"name"` - Type string `json:"type"` - Mode os.FileMode `json:"mode,omitempty"` - ModTime time.Time `json:"mtime,omitempty"` - AccessTime time.Time `json:"atime,omitempty"` - ChangeTime time.Time `json:"ctime,omitempty"` - UID uint32 `json:"uid"` - GID uint32 `json:"gid"` - User string `json:"user,omitempty"` - Group string `json:"group,omitempty"` - Inode uint64 `json:"inode,omitempty"` - Size uint64 `json:"size,omitempty"` - Links uint64 `json:"links,omitempty"` - LinkTarget string `json:"linktarget,omitempty"` - Device uint64 `json:"device,omitempty"` - Content []backend.ID `json:"content"` - Subtree *backend.ID `json:"subtree,omitempty"` + Name string `json:"name"` + Type string `json:"type"` + Mode os.FileMode `json:"mode,omitempty"` + ModTime time.Time `json:"mtime,omitempty"` + AccessTime time.Time `json:"atime,omitempty"` + ChangeTime time.Time `json:"ctime,omitempty"` + UID uint32 `json:"uid"` + GID uint32 `json:"gid"` + User string `json:"user,omitempty"` + Group string `json:"group,omitempty"` + Inode uint64 `json:"inode,omitempty"` + Size uint64 `json:"size,omitempty"` + Links uint64 `json:"links,omitempty"` + LinkTarget string `json:"linktarget,omitempty"` + Device uint64 `json:"device,omitempty"` + Content IDs `json:"content"` + Subtree *ID `json:"subtree,omitempty"` Error string `json:"error,omitempty"` tree *Tree - path string - err error - blobs repository.Blobs + Path string `json:"-"` + err error } func (node Node) String() string { @@ -63,6 +59,7 @@ func (node Node) String() string { return fmt.Sprintf("", node.Type, node.Name) } +// Tree returns this node's tree object. func (node Node) Tree() *Tree { return node.tree } @@ -71,7 +68,7 @@ func (node Node) Tree() *Tree { func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky node := &Node{ - path: path, + Path: path, Name: fi.Name(), Mode: fi.Mode() & mask, ModTime: fi.ModTime(), @@ -108,7 +105,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string { } // CreateAt creates the node at the given path and restores all the meta data. -func (node *Node) CreateAt(path string, repo *repository.Repository) error { +func (node *Node) CreateAt(path string, repo Repository) error { debug.Log("Node.CreateAt", "create node %v at %v", node.Name, path) switch node.Type { @@ -202,7 +199,7 @@ func (node Node) createDirAt(path string) error { return nil } -func (node Node) createFileAt(path string, repo *repository.Repository) error { +func (node Node) createFileAt(path string, repo Repository) error { f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600) defer f.Close() @@ -212,7 +209,7 @@ func (node Node) createFileAt(path string, repo *repository.Repository) error { var buf []byte for _, id := range node.Content { - size, err := repo.LookupBlobSize(id, pack.Data) + size, err := repo.LookupBlobSize(id, DataBlob) if err != nil { return err } @@ -222,10 +219,11 @@ func (node Node) createFileAt(path string, repo *repository.Repository) error { buf = make([]byte, size) } - buf, err := repo.LoadBlob(id, pack.Data, buf) + n, err := repo.LoadBlob(DataBlob, id, buf) if err != nil { return err } + buf = buf[:n] _, err = f.Write(buf) if err != nil { @@ -374,15 +372,16 @@ func (node Node) sameContent(other Node) bool { return true } -func (node *Node) isNewer(path string, fi os.FileInfo) bool { +// IsNewer returns true of the file has been updated since the last Stat(). +func (node *Node) IsNewer(path string, fi os.FileInfo) bool { if node.Type != "file" { - debug.Log("node.isNewer", "node %v is newer: not file", path) + debug.Log("node.IsNewer", "node %v is newer: not file", path) return true } tpe := nodeTypeFromFileInfo(fi) if node.Name != fi.Name() || node.Type != tpe { - debug.Log("node.isNewer", "node %v is newer: name or type changed", path) + debug.Log("node.IsNewer", "node %v is newer: name or type changed", path) return true } @@ -392,7 +391,7 @@ func (node *Node) isNewer(path string, fi os.FileInfo) bool { if !ok { if node.ModTime != fi.ModTime() || node.Size != size { - debug.Log("node.isNewer", "node %v is newer: timestamp or size changed", path) + debug.Log("node.IsNewer", "node %v is newer: timestamp or size changed", path) return true } return false @@ -404,11 +403,11 @@ func (node *Node) isNewer(path string, fi os.FileInfo) bool { node.ChangeTime != changeTime(extendedStat) || node.Inode != uint64(inode) || node.Size != size { - debug.Log("node.isNewer", "node %v is newer: timestamp, size or inode changed", path) + debug.Log("node.IsNewer", "node %v is newer: timestamp, size or inode changed", path) return true } - debug.Log("node.isNewer", "node %v is not newer", path) + debug.Log("node.IsNewer", "node %v is not newer", path) return false } diff --git a/src/restic/node_linux.go b/src/restic/node_linux.go index 57a5e5c47..7ebad89f3 100644 --- a/src/restic/node_linux.go +++ b/src/restic/node_linux.go @@ -6,7 +6,7 @@ import ( "golang.org/x/sys/unix" - "github.com/pkg/errors" + "restic/errors" "restic/fs" ) diff --git a/src/restic/node_test.go b/src/restic/node_test.go index e3b458c47..a2e175b14 100644 --- a/src/restic/node_test.go +++ b/src/restic/node_test.go @@ -9,7 +9,6 @@ import ( "time" "restic" - "restic/backend" . "restic/test" ) @@ -75,7 +74,7 @@ var nodeTests = []restic.Node{ restic.Node{ Name: "testFile", Type: "file", - Content: []backend.ID{}, + Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0604, @@ -86,7 +85,7 @@ var nodeTests = []restic.Node{ restic.Node{ Name: "testSuidFile", Type: "file", - Content: []backend.ID{}, + Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0755 | os.ModeSetuid, @@ -97,7 +96,7 @@ var nodeTests = []restic.Node{ restic.Node{ Name: "testSuidFile2", Type: "file", - Content: []backend.ID{}, + Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0755 | os.ModeSetgid, @@ -108,7 +107,7 @@ var nodeTests = []restic.Node{ restic.Node{ Name: "testSticky", Type: "file", - Content: []backend.ID{}, + Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0755 | os.ModeSticky, diff --git a/src/restic/node_windows.go b/src/restic/node_windows.go index 08a7f86a2..050de8f27 100644 --- a/src/restic/node_windows.go +++ b/src/restic/node_windows.go @@ -3,7 +3,7 @@ package restic import ( "syscall" - "github.com/pkg/errors" + "restic/errors" ) // mknod() creates a filesystem node (file, device diff --git a/src/restic/pack/handle.go b/src/restic/pack/handle.go deleted file mode 100644 index 9a0ce58f6..000000000 --- a/src/restic/pack/handle.go +++ /dev/null @@ -1,51 +0,0 @@ -package pack - -import ( - "fmt" - "restic/backend" -) - -// Handle identifies a blob of a given type. -type Handle struct { - ID backend.ID - Type BlobType -} - -func (h Handle) String() string { - return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str()) -} - -// Handles is an ordered list of Handles that implements sort.Interface. -type Handles []Handle - -func (h Handles) Len() int { - return len(h) -} - -func (h Handles) Less(i, j int) bool { - for k, b := range h[i].ID { - if b == h[j].ID[k] { - continue - } - - if b < h[j].ID[k] { - return true - } - - return false - } - - return h[i].Type < h[j].Type -} - -func (h Handles) Swap(i, j int) { - h[i], h[j] = h[j], h[i] -} - -func (h Handles) String() string { - elements := make([]string, 0, len(h)) - for _, e := range h { - elements = append(elements, e.String()) - } - return fmt.Sprintf("%v", elements) -} diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index 3fb7206a5..17f79b09a 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -5,77 +5,17 @@ import ( "encoding/binary" "fmt" "io" + "restic" "sync" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/crypto" ) -// BlobType specifies what a blob stored in a pack is. -type BlobType uint8 - -// These are the blob types that can be stored in a pack. -const ( - Invalid BlobType = iota - Data - Tree -) - -func (t BlobType) String() string { - switch t { - case Data: - return "data" - case Tree: - return "tree" - } - - return fmt.Sprintf("", t) -} - -// MarshalJSON encodes the BlobType into JSON. -func (t BlobType) MarshalJSON() ([]byte, error) { - switch t { - case Data: - return []byte(`"data"`), nil - case Tree: - return []byte(`"tree"`), nil - } - - return nil, errors.New("unknown blob type") -} - -// UnmarshalJSON decodes the BlobType from JSON. -func (t *BlobType) UnmarshalJSON(buf []byte) error { - switch string(buf) { - case `"data"`: - *t = Data - case `"tree"`: - *t = Tree - default: - return errors.New("unknown blob type") - } - - return nil -} - -// Blob is a blob within a pack. -type Blob struct { - Type BlobType - Length uint - ID backend.ID - Offset uint -} - -func (b Blob) String() string { - return fmt.Sprintf("", - b.ID.Str(), b.Type, b.Length, b.Offset) -} - // Packer is used to create a new Pack. type Packer struct { - blobs []Blob + blobs []restic.Blob bytes uint k *crypto.Key @@ -95,11 +35,11 @@ func NewPacker(k *crypto.Key, wr io.Writer) *Packer { // Add saves the data read from rd as a new blob to the packer. Returned is the // number of bytes written to the pack. -func (p *Packer) Add(t BlobType, id backend.ID, data []byte) (int, error) { +func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error) { p.m.Lock() defer p.m.Unlock() - c := Blob{Type: t, ID: id} + c := restic.Blob{Type: t, ID: id} n, err := p.wr.Write(data) c.Length = uint(n) @@ -110,13 +50,13 @@ func (p *Packer) Add(t BlobType, id backend.ID, data []byte) (int, error) { return n, errors.Wrap(err, "Write") } -var entrySize = uint(binary.Size(BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize) +var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{})) // headerEntry is used with encoding/binary to read and write header entries type headerEntry struct { Type uint8 Length uint32 - ID [backend.IDSize]byte + ID restic.ID } // Finalize writes the header for all added blobs and finalizes the pack. @@ -177,9 +117,9 @@ func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) { } switch b.Type { - case Data: + case restic.DataBlob: entry.Type = 0 - case Tree: + case restic.TreeBlob: entry.Type = 1 default: return 0, errors.Errorf("invalid blob type %v", b.Type) @@ -213,7 +153,7 @@ func (p *Packer) Count() int { } // Blobs returns the slice of blobs that have been written. -func (p *Packer) Blobs() []Blob { +func (p *Packer) Blobs() []restic.Blob { p.m.Lock() defer p.m.Unlock() @@ -279,18 +219,19 @@ func readHeader(rd io.ReaderAt, size int64) ([]byte, error) { } // List returns the list of entries found in a pack file. -func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error) { +func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err error) { buf, err := readHeader(rd, size) if err != nil { return nil, err } - hdr, err := crypto.Decrypt(k, buf, buf) + n, err := crypto.Decrypt(k, buf, buf) if err != nil { return nil, err } + buf = buf[:n] - hdrRd := bytes.NewReader(hdr) + hdrRd := bytes.NewReader(buf) pos := uint(0) for { @@ -304,7 +245,7 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error) return nil, errors.Wrap(err, "binary.Read") } - entry := Blob{ + entry := restic.Blob{ Length: uint(e.Length), ID: e.ID, Offset: pos, @@ -312,9 +253,9 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error) switch e.Type { case 0: - entry.Type = Data + entry.Type = restic.DataBlob case 1: - entry.Type = Tree + entry.Type = restic.TreeBlob default: return nil, errors.Errorf("invalid type %d", e.Type) } diff --git a/src/restic/pack/pack_test.go b/src/restic/pack/pack_test.go index 82b026e7e..f90d1a426 100644 --- a/src/restic/pack/pack_test.go +++ b/src/restic/pack/pack_test.go @@ -7,9 +7,9 @@ import ( "encoding/binary" "encoding/json" "io" + "restic" "testing" - "restic/backend" "restic/backend/mem" "restic/crypto" "restic/pack" @@ -20,7 +20,7 @@ var testLens = []int{23, 31650, 25860, 10928, 13769, 19862, 5211, 127, 13690, 30 type Buf struct { data []byte - id backend.ID + id restic.ID } func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) { @@ -37,7 +37,7 @@ func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) { // pack blobs p := pack.NewPacker(k, nil) for _, b := range bufs { - p.Add(pack.Tree, b.id, b.data) + p.Add(restic.TreeBlob, b.id, b.data) } _, err := p.Finalize() @@ -55,7 +55,7 @@ func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSi // header length written += binary.Size(uint32(0)) // header - written += len(bufs) * (binary.Size(pack.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize) + written += len(bufs) * (binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{})) // header crypto written += crypto.Extension @@ -95,11 +95,11 @@ func TestCreatePack(t *testing.T) { } var blobTypeJSON = []struct { - t pack.BlobType + t restic.BlobType res string }{ - {pack.Data, `"data"`}, - {pack.Tree, `"tree"`}, + {restic.DataBlob, `"data"`}, + {restic.TreeBlob, `"tree"`}, } func TestBlobTypeJSON(t *testing.T) { @@ -110,7 +110,7 @@ func TestBlobTypeJSON(t *testing.T) { Equals(t, test.res, string(buf)) // test unserialize - var v pack.BlobType + var v restic.BlobType err = json.Unmarshal([]byte(test.res), &v) OK(t, err) Equals(t, test.t, v) @@ -124,11 +124,11 @@ func TestUnpackReadSeeker(t *testing.T) { bufs, packData, packSize := newPack(t, k, testLens) b := mem.New() - id := backend.Hash(packData) + id := restic.Hash(packData) - handle := backend.Handle{Type: backend.Data, Name: id.String()} + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} OK(t, b.Save(handle, packData)) - verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize) + verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) } func TestShortPack(t *testing.T) { @@ -137,9 +137,9 @@ func TestShortPack(t *testing.T) { bufs, packData, packSize := newPack(t, k, []int{23}) b := mem.New() - id := backend.Hash(packData) + id := restic.Hash(packData) - handle := backend.Handle{Type: backend.Data, Name: id.String()} + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} OK(t, b.Save(handle, packData)) - verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize) + verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) } diff --git a/src/restic/pipe/pipe.go b/src/restic/pipe/pipe.go index 48a83a362..1ed9b6162 100644 --- a/src/restic/pipe/pipe.go +++ b/src/restic/pipe/pipe.go @@ -6,7 +6,7 @@ import ( "path/filepath" "sort" - "github.com/pkg/errors" + "restic/errors" "restic/debug" "restic/fs" diff --git a/src/restic/repository/rand_reader.go b/src/restic/rand_reader.go similarity index 97% rename from src/restic/repository/rand_reader.go rename to src/restic/rand_reader.go index 2afbd60b7..205fd6aba 100644 --- a/src/restic/repository/rand_reader.go +++ b/src/restic/rand_reader.go @@ -1,10 +1,10 @@ -package repository +package restic import ( "io" "math/rand" - "github.com/pkg/errors" + "restic/errors" ) // RandReader allows reading from a rand.Rand. diff --git a/src/restic/backend/readerat.go b/src/restic/readerat.go similarity index 95% rename from src/restic/backend/readerat.go rename to src/restic/readerat.go index 027b34456..7d36b3396 100644 --- a/src/restic/backend/readerat.go +++ b/src/restic/readerat.go @@ -1,4 +1,4 @@ -package backend +package restic import ( "io" diff --git a/src/restic/repository.go b/src/restic/repository.go new file mode 100644 index 000000000..959c0bd3c --- /dev/null +++ b/src/restic/repository.go @@ -0,0 +1,58 @@ +package restic + +import "restic/crypto" + +// Repository stores data in a backend. It provides high-level functions and +// transparently encrypts/decrypts data. +type Repository interface { + + // Backend returns the backend used by the repository + Backend() Backend + + Key() *crypto.Key + + SetIndex(Index) + + Index() Index + SaveFullIndex() error + SaveIndex() error + LoadIndex() error + + Config() Config + + LookupBlobSize(ID, BlobType) (uint, error) + + List(FileType, <-chan struct{}) <-chan ID + ListPack(ID) ([]Blob, int64, error) + + Flush() error + + SaveUnpacked(FileType, []byte) (ID, error) + SaveJSONUnpacked(FileType, interface{}) (ID, error) + + LoadJSONUnpacked(FileType, ID, interface{}) error + LoadAndDecrypt(FileType, ID) ([]byte, error) + + LoadBlob(BlobType, ID, []byte) (int, error) + SaveBlob(BlobType, []byte, ID) (ID, error) + + LoadTree(ID) (*Tree, error) + SaveTree(t *Tree) (ID, error) +} + +// Deleter removes all data stored in a backend/repo. +type Deleter interface { + Delete() error +} + +// Lister allows listing files in a backend. +type Lister interface { + List(FileType, <-chan struct{}) <-chan string +} + +// Index keeps track of the blobs are stored within files. +type Index interface { + Has(ID, BlobType) bool + Lookup(ID, BlobType) ([]PackedBlob, error) + Count(BlobType) uint +} diff --git a/src/restic/repository/blob.go b/src/restic/repository/blob.go deleted file mode 100644 index 13cb022d1..000000000 --- a/src/restic/repository/blob.go +++ /dev/null @@ -1,47 +0,0 @@ -package repository - -import ( - "fmt" - - "restic/backend" -) - -type Blob struct { - ID *backend.ID `json:"id,omitempty"` - Size uint64 `json:"size,omitempty"` - Storage *backend.ID `json:"sid,omitempty"` // encrypted ID - StorageSize uint64 `json:"ssize,omitempty"` // encrypted Size -} - -type Blobs []Blob - -func (b Blob) Valid() bool { - if b.ID == nil || b.Storage == nil || b.StorageSize == 0 { - return false - } - - return true -} - -func (b Blob) String() string { - return fmt.Sprintf("Blob<%s (%d) -> %s (%d)>", - b.ID.Str(), b.Size, - b.Storage.Str(), b.StorageSize) -} - -// Compare compares two blobs by comparing the ID and the size. It returns -1, -// 0, or 1. -func (b Blob) Compare(other Blob) int { - if res := b.ID.Compare(*other.ID); res != 0 { - return res - } - - if b.Size < other.Size { - return -1 - } - if b.Size > other.Size { - return 1 - } - - return 0 -} diff --git a/src/restic/repository/config_test.go b/src/restic/repository/config_test.go deleted file mode 100644 index 71f2fd810..000000000 --- a/src/restic/repository/config_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package repository_test - -import ( - "testing" - - "restic/backend" - "restic/repository" - . "restic/test" -) - -type saver func(backend.Type, interface{}) (backend.ID, error) - -func (s saver) SaveJSONUnpacked(t backend.Type, arg interface{}) (backend.ID, error) { - return s(t, arg) -} - -type loader func(backend.Type, backend.ID, interface{}) error - -func (l loader) LoadJSONUnpacked(t backend.Type, id backend.ID, arg interface{}) error { - return l(t, id, arg) -} - -func TestConfig(t *testing.T) { - resultConfig := repository.Config{} - save := func(tpe backend.Type, arg interface{}) (backend.ID, error) { - Assert(t, tpe == backend.Config, - "wrong backend type: got %v, wanted %v", - tpe, backend.Config) - - cfg := arg.(repository.Config) - resultConfig = cfg - return backend.ID{}, nil - } - - cfg1, err := repository.CreateConfig() - OK(t, err) - - _, err = saver(save).SaveJSONUnpacked(backend.Config, cfg1) - - load := func(tpe backend.Type, id backend.ID, arg interface{}) error { - Assert(t, tpe == backend.Config, - "wrong backend type: got %v, wanted %v", - tpe, backend.Config) - - cfg := arg.(*repository.Config) - *cfg = resultConfig - return nil - } - - cfg2, err := repository.LoadConfig(loader(load)) - OK(t, err) - - Assert(t, cfg1 == cfg2, - "configs aren't equal: %v != %v", cfg1, cfg2) -} diff --git a/src/restic/repository/index.go b/src/restic/repository/index.go index f49a1735f..029374063 100644 --- a/src/restic/repository/index.go +++ b/src/restic/repository/index.go @@ -3,32 +3,30 @@ package repository import ( "bytes" "encoding/json" - "fmt" "io" + "restic" "sync" "time" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/crypto" "restic/debug" - "restic/pack" ) // Index holds a lookup table for id -> pack. type Index struct { m sync.Mutex - pack map[pack.Handle][]indexEntry + pack map[restic.BlobHandle][]indexEntry - final bool // set to true for all indexes read from the backend ("finalized") - id backend.ID // set to the ID of the index when it's finalized - supersedes backend.IDs + final bool // set to true for all indexes read from the backend ("finalized") + id restic.ID // set to the ID of the index when it's finalized + supersedes restic.IDs created time.Time } type indexEntry struct { - packID backend.ID + packID restic.ID offset uint length uint } @@ -36,18 +34,18 @@ type indexEntry struct { // NewIndex returns a new index. func NewIndex() *Index { return &Index{ - pack: make(map[pack.Handle][]indexEntry), + pack: make(map[restic.BlobHandle][]indexEntry), created: time.Now(), } } -func (idx *Index) store(blob PackedBlob) { +func (idx *Index) store(blob restic.PackedBlob) { newEntry := indexEntry{ packID: blob.PackID, offset: blob.Offset, length: blob.Length, } - h := pack.Handle{ID: blob.ID, Type: blob.Type} + h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} idx.pack[h] = append(idx.pack[h], newEntry) } @@ -98,7 +96,7 @@ var IndexFull = func(idx *Index) bool { // Store remembers the id and pack in the index. An existing entry will be // silently overwritten. -func (idx *Index) Store(blob PackedBlob) { +func (idx *Index) Store(blob restic.PackedBlob) { idx.m.Lock() defer idx.m.Unlock() @@ -111,25 +109,27 @@ func (idx *Index) Store(blob PackedBlob) { idx.store(blob) } -// Lookup queries the index for the blob ID and returns a PackedBlob. -func (idx *Index) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) { +// Lookup queries the index for the blob ID and returns a restic.PackedBlob. +func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) { idx.m.Lock() defer idx.m.Unlock() - h := pack.Handle{ID: id, Type: tpe} + h := restic.BlobHandle{ID: id, Type: tpe} if packs, ok := idx.pack[h]; ok { - blobs = make([]PackedBlob, 0, len(packs)) + blobs = make([]restic.PackedBlob, 0, len(packs)) for _, p := range packs { debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d", id.Str(), p.packID.Str(), p.offset, p.length) - blob := PackedBlob{ - Type: tpe, - Length: p.length, - ID: id, - Offset: p.offset, + blob := restic.PackedBlob{ + Blob: restic.Blob{ + Type: tpe, + Length: p.length, + ID: id, + Offset: p.offset, + }, PackID: p.packID, } @@ -144,18 +144,20 @@ func (idx *Index) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, } // ListPack returns a list of blobs contained in a pack. -func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) { +func (idx *Index) ListPack(id restic.ID) (list []restic.PackedBlob) { idx.m.Lock() defer idx.m.Unlock() for h, packList := range idx.pack { for _, entry := range packList { if entry.packID == id { - list = append(list, PackedBlob{ - ID: h.ID, - Type: h.Type, - Length: entry.length, - Offset: entry.offset, + list = append(list, restic.PackedBlob{ + Blob: restic.Blob{ + ID: h.ID, + Type: h.Type, + Length: entry.length, + Offset: entry.offset, + }, PackID: entry.packID, }) } @@ -166,7 +168,7 @@ func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) { } // Has returns true iff the id is listed in the index. -func (idx *Index) Has(id backend.ID, tpe pack.BlobType) bool { +func (idx *Index) Has(id restic.ID, tpe restic.BlobType) bool { _, err := idx.Lookup(id, tpe) if err == nil { return true @@ -177,23 +179,23 @@ func (idx *Index) Has(id backend.ID, tpe pack.BlobType) bool { // LookupSize returns the length of the cleartext content behind the // given id -func (idx *Index) LookupSize(id backend.ID, tpe pack.BlobType) (cleartextLength uint, err error) { +func (idx *Index) LookupSize(id restic.ID, tpe restic.BlobType) (cleartextLength uint, err error) { blobs, err := idx.Lookup(id, tpe) if err != nil { return 0, err } - return blobs[0].PlaintextLength(), nil + return blobs[0].Length - crypto.Extension, nil } // Supersedes returns the list of indexes this index supersedes, if any. -func (idx *Index) Supersedes() backend.IDs { +func (idx *Index) Supersedes() restic.IDs { return idx.supersedes } // AddToSupersedes adds the ids to the list of indexes superseded by this // index. If the index has already been finalized, an error is returned. -func (idx *Index) AddToSupersedes(ids ...backend.ID) error { +func (idx *Index) AddToSupersedes(ids ...restic.ID) error { idx.m.Lock() defer idx.m.Unlock() @@ -205,32 +207,13 @@ func (idx *Index) AddToSupersedes(ids ...backend.ID) error { return nil } -// PackedBlob is a blob already saved within a pack. -type PackedBlob struct { - Type pack.BlobType - Length uint - ID backend.ID - Offset uint - PackID backend.ID -} - -func (pb PackedBlob) String() string { - return fmt.Sprintf(" 0 && checked > maxKeys { return nil, ErrMaxKeysReached } @@ -142,7 +145,7 @@ func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) { // LoadKey loads a key from the backend. func LoadKey(s *Repository, name string) (k *Key, err error) { - h := backend.Handle{Type: backend.Key, Name: name} + h := restic.Handle{Type: restic.KeyFile, Name: name} data, err := backend.LoadAll(s.be, h, nil) if err != nil { return nil, err @@ -224,9 +227,9 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error) } // store in repository and return - h := backend.Handle{ - Type: backend.Key, - Name: backend.Hash(buf).String(), + h := restic.Handle{ + Type: restic.KeyFile, + Name: restic.Hash(buf).String(), } err = s.be.Save(h, buf) diff --git a/src/restic/repository/master_index.go b/src/restic/repository/master_index.go index 96425f9e8..a3489e53b 100644 --- a/src/restic/repository/master_index.go +++ b/src/restic/repository/master_index.go @@ -1,13 +1,12 @@ package repository import ( + "restic" "sync" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/debug" - "restic/pack" ) // MasterIndex is a collection of indexes and IDs of chunks that are in the process of being saved. @@ -22,7 +21,7 @@ func NewMasterIndex() *MasterIndex { } // Lookup queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) { +func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -42,7 +41,7 @@ func (mi *MasterIndex) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedB } // LookupSize queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) LookupSize(id backend.ID, tpe pack.BlobType) (uint, error) { +func (mi *MasterIndex) LookupSize(id restic.ID, tpe restic.BlobType) (uint, error) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -58,7 +57,7 @@ func (mi *MasterIndex) LookupSize(id backend.ID, tpe pack.BlobType) (uint, error // ListPack returns the list of blobs in a pack. The first matching index is // returned, or nil if no index contains information about the pack id. -func (mi *MasterIndex) ListPack(id backend.ID) (list []PackedBlob) { +func (mi *MasterIndex) ListPack(id restic.ID) (list []restic.PackedBlob) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -73,7 +72,7 @@ func (mi *MasterIndex) ListPack(id backend.ID) (list []PackedBlob) { } // Has queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) Has(id backend.ID, tpe pack.BlobType) bool { +func (mi *MasterIndex) Has(id restic.ID, tpe restic.BlobType) bool { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -87,7 +86,7 @@ func (mi *MasterIndex) Has(id backend.ID, tpe pack.BlobType) bool { } // Count returns the number of blobs of type t in the index. -func (mi *MasterIndex) Count(t pack.BlobType) (n uint) { +func (mi *MasterIndex) Count(t restic.BlobType) (n uint) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -197,7 +196,7 @@ func (mi *MasterIndex) All() []*Index { // RebuildIndex combines all known indexes to a new index, leaving out any // packs whose ID is contained in packBlacklist. The new index contains the IDs // of all known indexes in the "supersedes" field. -func (mi *MasterIndex) RebuildIndex(packBlacklist backend.IDSet) (*Index, error) { +func (mi *MasterIndex) RebuildIndex(packBlacklist restic.IDSet) (*Index, error) { mi.idxMutex.Lock() defer mi.idxMutex.Unlock() diff --git a/src/restic/repository/packer_manager.go b/src/restic/repository/packer_manager.go index 32c8a73d4..85c5b186c 100644 --- a/src/restic/repository/packer_manager.go +++ b/src/restic/repository/packer_manager.go @@ -4,11 +4,11 @@ import ( "io" "io/ioutil" "os" + "restic" "sync" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/crypto" "restic/debug" "restic/fs" @@ -17,7 +17,7 @@ import ( // Saver implements saving data in a backend. type Saver interface { - Save(h backend.Handle, jp []byte) error + Save(h restic.Handle, jp []byte) error } // packerManager keeps a list of open packs and creates new on demand. @@ -114,8 +114,8 @@ func (r *Repository) savePacker(p *pack.Packer) error { return errors.Wrap(err, "Close") } - id := backend.Hash(data) - h := backend.Handle{Type: backend.Data, Name: id.String()} + id := restic.Hash(data) + h := restic.Handle{Type: restic.DataFile, Name: id.String()} err = r.be.Save(h, data) if err != nil { @@ -133,12 +133,14 @@ func (r *Repository) savePacker(p *pack.Packer) error { // update blobs in the index for _, b := range p.Blobs() { debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), id.Str()) - r.idx.Current().Store(PackedBlob{ - Type: b.Type, - ID: b.ID, + r.idx.Current().Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: b.Type, + ID: b.ID, + Offset: b.Offset, + Length: uint(b.Length), + }, PackID: id, - Offset: b.Offset, - Length: uint(b.Length), }) } diff --git a/src/restic/repository/packer_manager_test.go b/src/restic/repository/packer_manager_test.go index 78d91bc37..bf6258428 100644 --- a/src/restic/repository/packer_manager_test.go +++ b/src/restic/repository/packer_manager_test.go @@ -4,10 +4,9 @@ import ( "io" "math/rand" "os" - "restic/backend" + "restic" "restic/backend/mem" "restic/crypto" - "restic/pack" "testing" ) @@ -36,8 +35,8 @@ func (r *randReader) Read(p []byte) (n int, err error) { return len(p), nil } -func randomID(rd io.Reader) backend.ID { - id := backend.ID{} +func randomID(rd io.Reader) restic.ID { + id := restic.ID{} _, err := io.ReadFull(rd, id[:]) if err != nil { panic(err) @@ -64,7 +63,7 @@ func saveFile(t testing.TB, be Saver, filename string, n int) { t.Fatal(err) } - h := backend.Handle{Type: backend.Data, Name: backend.Hash(data).String()} + h := restic.Handle{Type: restic.DataFile, Name: restic.Hash(data).String()} err = be.Save(h, data) if err != nil { @@ -95,7 +94,7 @@ func fillPacks(t testing.TB, rnd *randReader, be Saver, pm *packerManager, buf [ t.Fatal(err) } - n, err := packer.Add(pack.Data, id, buf) + n, err := packer.Add(restic.DataBlob, id, buf) if n != l { t.Errorf("Add() returned invalid number of bytes: want %v, got %v", n, l) } @@ -137,7 +136,7 @@ func flushRemainingPacks(t testing.TB, rnd *randReader, be Saver, pm *packerMana type fakeBackend struct{} -func (f *fakeBackend) Save(h backend.Handle, data []byte) error { +func (f *fakeBackend) Save(h restic.Handle, data []byte) error { return nil } diff --git a/src/restic/repository/parallel.go b/src/restic/repository/parallel.go index 7094ae299..02e2d8f12 100644 --- a/src/restic/repository/parallel.go +++ b/src/restic/repository/parallel.go @@ -1,9 +1,9 @@ package repository import ( + "restic" "sync" - "restic/backend" "restic/debug" ) @@ -21,14 +21,14 @@ func closeIfOpen(ch chan struct{}) { // processing stops. If done is closed, the function should return. type ParallelWorkFunc func(id string, done <-chan struct{}) error -// ParallelIDWorkFunc gets one backend.ID to work on. If an error is returned, +// ParallelIDWorkFunc gets one restic.ID to work on. If an error is returned, // processing stops. If done is closed, the function should return. -type ParallelIDWorkFunc func(id backend.ID, done <-chan struct{}) error +type ParallelIDWorkFunc func(id restic.ID, done <-chan struct{}) error // FilesInParallel runs n workers of f in parallel, on the IDs that // repo.List(t) yield. If f returns an error, the process is aborted and the // first error is returned. -func FilesInParallel(repo backend.Lister, t backend.Type, n uint, f ParallelWorkFunc) error { +func FilesInParallel(repo restic.Lister, t restic.FileType, n uint, f ParallelWorkFunc) error { done := make(chan struct{}) defer closeIfOpen(done) @@ -75,12 +75,12 @@ func FilesInParallel(repo backend.Lister, t backend.Type, n uint, f ParallelWork return nil } -// ParallelWorkFuncParseID converts a function that takes a backend.ID to a -// function that takes a string. Filenames that do not parse as a backend.ID +// ParallelWorkFuncParseID converts a function that takes a restic.ID to a +// function that takes a string. Filenames that do not parse as a restic.ID // are ignored. func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc { return func(s string, done <-chan struct{}) error { - id, err := backend.ParseID(s) + id, err := restic.ParseID(s) if err != nil { debug.Log("repository.ParallelWorkFuncParseID", "invalid ID %q: %v", id, err) return err diff --git a/src/restic/repository/parallel_test.go b/src/restic/repository/parallel_test.go index 6aab24b0e..cfa384a01 100644 --- a/src/restic/repository/parallel_test.go +++ b/src/restic/repository/parallel_test.go @@ -2,12 +2,12 @@ package repository_test import ( "math/rand" + "restic" "testing" "time" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/repository" . "restic/test" ) @@ -73,7 +73,7 @@ var lister = testIDs{ "34dd044c228727f2226a0c9c06a3e5ceb5e30e31cb7854f8fa1cde846b395a58", } -func (tests testIDs) List(t backend.Type, done <-chan struct{}) <-chan string { +func (tests testIDs) List(t restic.FileType, done <-chan struct{}) <-chan string { ch := make(chan string) go func() { @@ -100,7 +100,7 @@ func TestFilesInParallel(t *testing.T) { } for n := uint(1); n < 5; n++ { - err := repository.FilesInParallel(lister, backend.Data, n*100, f) + err := repository.FilesInParallel(lister, restic.DataFile, n*100, f) OK(t, err) } } @@ -120,7 +120,7 @@ func TestFilesInParallelWithError(t *testing.T) { } for n := uint(1); n < 5; n++ { - err := repository.FilesInParallel(lister, backend.Data, n*100, f) + err := repository.FilesInParallel(lister, restic.DataFile, n*100, f) Equals(t, errTest, err) } } diff --git a/src/restic/repository/repack.go b/src/restic/repository/repack.go index 2c61705da..edb717efb 100644 --- a/src/restic/repository/repack.go +++ b/src/restic/repository/repack.go @@ -3,25 +3,25 @@ package repository import ( "bytes" "io" - "restic/backend" + "restic" "restic/crypto" "restic/debug" "restic/pack" - "github.com/pkg/errors" + "restic/errors" ) // Repack takes a list of packs together with a list of blobs contained in // these packs. Each pack is loaded and the blobs listed in keepBlobs is saved // into a new pack. Afterwards, the packs are removed. This operation requires // an exclusive lock on the repo. -func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err error) { +func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet) (err error) { debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs)) buf := make([]byte, 0, maxPackSize) for packID := range packs { // load the complete pack - h := backend.Handle{Type: backend.Data, Name: packID.String()} + h := restic.Handle{Type: restic.DataFile, Name: packID.String()} l, err := repo.Backend().Load(h, buf[:cap(buf)], 0) if errors.Cause(err) == io.ErrUnexpectedEOF { @@ -43,23 +43,28 @@ func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(blobs)) var plaintext []byte for _, entry := range blobs { - h := pack.Handle{ID: entry.ID, Type: entry.Type} + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} if !keepBlobs.Has(h) { continue } - ciphertext := buf[entry.Offset : entry.Offset+entry.Length] + debug.Log("Repack", " process blob %v", h) - if cap(plaintext) < len(ciphertext) { + ciphertext := buf[entry.Offset : entry.Offset+entry.Length] + plaintext = plaintext[:len(plaintext)] + if len(plaintext) < len(ciphertext) { plaintext = make([]byte, len(ciphertext)) } - plaintext, err = crypto.Decrypt(repo.Key(), plaintext, ciphertext) + debug.Log("Repack", " ciphertext %d, plaintext %d", len(plaintext), len(ciphertext)) + + n, err := crypto.Decrypt(repo.Key(), plaintext, ciphertext) if err != nil { return err } + plaintext = plaintext[:n] - _, err = repo.SaveAndEncrypt(entry.Type, plaintext, &entry.ID) + _, err = repo.SaveBlob(entry.Type, plaintext, entry.ID) if err != nil { return err } @@ -75,7 +80,7 @@ func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err } for packID := range packs { - err := repo.Backend().Remove(backend.Data, packID.String()) + err := repo.Backend().Remove(restic.DataFile, packID.String()) if err != nil { debug.Log("Repack", "error removing pack %v: %v", packID.Str(), err) return err diff --git a/src/restic/repository/repack_test.go b/src/restic/repository/repack_test.go index b29c7e622..6d910c97b 100644 --- a/src/restic/repository/repack_test.go +++ b/src/restic/repository/repack_test.go @@ -3,8 +3,7 @@ package repository_test import ( "io" "math/rand" - "restic/backend" - "restic/pack" + "restic" "restic/repository" "testing" ) @@ -14,7 +13,7 @@ func randomSize(min, max int) int { } func random(t testing.TB, length int) []byte { - rd := repository.NewRandReader(rand.New(rand.NewSource(int64(length)))) + rd := restic.NewRandReader(rand.New(rand.NewSource(int64(length)))) buf := make([]byte, length) _, err := io.ReadFull(rd, buf) if err != nil { @@ -24,30 +23,30 @@ func random(t testing.TB, length int) []byte { return buf } -func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pData float32) { +func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32) { for i := 0; i < blobs; i++ { var ( - tpe pack.BlobType + tpe restic.BlobType length int ) if rand.Float32() < pData { - tpe = pack.Data + tpe = restic.DataBlob length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data } else { - tpe = pack.Tree + tpe = restic.TreeBlob length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB } buf := random(t, length) - id := backend.Hash(buf) + id := restic.Hash(buf) - if repo.Index().Has(id, pack.Data) { - t.Errorf("duplicate blob %v/%v ignored", id, pack.Data) + if repo.Index().Has(id, restic.DataBlob) { + t.Errorf("duplicate blob %v/%v ignored", id, restic.DataBlob) continue } - _, err := repo.SaveAndEncrypt(tpe, buf, &id) + _, err := repo.SaveBlob(tpe, buf, id) if err != nil { t.Fatalf("SaveFrom() error %v", err) } @@ -66,23 +65,23 @@ func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pDa // selectBlobs splits the list of all blobs randomly into two lists. A blob // will be contained in the firstone ith probability p. -func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, list2 pack.BlobSet) { +func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 restic.BlobSet) { done := make(chan struct{}) defer close(done) - list1 = pack.NewBlobSet() - list2 = pack.NewBlobSet() + list1 = restic.NewBlobSet() + list2 = restic.NewBlobSet() - blobs := pack.NewBlobSet() + blobs := restic.NewBlobSet() - for id := range repo.List(backend.Data, done) { + for id := range repo.List(restic.DataFile, done) { entries, _, err := repo.ListPack(id) if err != nil { t.Fatalf("error listing pack %v: %v", id, err) } for _, entry := range entries { - h := pack.Handle{ID: entry.ID, Type: entry.Type} + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} if blobs.Has(h) { t.Errorf("ignoring duplicate blob %v", h) continue @@ -90,9 +89,9 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l blobs.Insert(h) if rand.Float32() <= p { - list1.Insert(pack.Handle{ID: entry.ID, Type: entry.Type}) + list1.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type}) } else { - list2.Insert(pack.Handle{ID: entry.ID, Type: entry.Type}) + list2.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type}) } } @@ -101,20 +100,20 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l return list1, list2 } -func listPacks(t *testing.T, repo *repository.Repository) backend.IDSet { +func listPacks(t *testing.T, repo restic.Repository) restic.IDSet { done := make(chan struct{}) defer close(done) - list := backend.NewIDSet() - for id := range repo.List(backend.Data, done) { + list := restic.NewIDSet() + for id := range repo.List(restic.DataFile, done) { list.Insert(id) } return list } -func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.BlobSet) backend.IDSet { - packs := backend.NewIDSet() +func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSet) restic.IDSet { + packs := restic.NewIDSet() idx := repo.Index() for h := range blobs { @@ -131,26 +130,26 @@ func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.Blo return packs } -func repack(t *testing.T, repo *repository.Repository, packs backend.IDSet, blobs pack.BlobSet) { +func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs restic.BlobSet) { err := repository.Repack(repo, packs, blobs) if err != nil { t.Fatal(err) } } -func saveIndex(t *testing.T, repo *repository.Repository) { +func saveIndex(t *testing.T, repo restic.Repository) { if err := repo.SaveIndex(); err != nil { t.Fatalf("repo.SaveIndex() %v", err) } } -func rebuildIndex(t *testing.T, repo *repository.Repository) { +func rebuildIndex(t *testing.T, repo restic.Repository) { if err := repository.RebuildIndex(repo); err != nil { t.Fatalf("error rebuilding index: %v", err) } } -func reloadIndex(t *testing.T, repo *repository.Repository) { +func reloadIndex(t *testing.T, repo restic.Repository) { repo.SetIndex(repository.NewMasterIndex()) if err := repo.LoadIndex(); err != nil { t.Fatalf("error loading new index: %v", err) diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index d2a5a0fa9..a7258e090 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -6,8 +6,9 @@ import ( "fmt" "io" "os" + "restic" - "github.com/pkg/errors" + "restic/errors" "restic/backend" "restic/crypto" @@ -17,8 +18,8 @@ import ( // Repository is used to access a repository in a backend. type Repository struct { - be backend.Backend - Config Config + be restic.Backend + cfg restic.Config key *crypto.Key keyName string idx *MasterIndex @@ -27,7 +28,7 @@ type Repository struct { } // New returns a new repository with backend be. -func New(be backend.Backend) *Repository { +func New(be restic.Backend) *Repository { repo := &Repository{ be: be, idx: NewMasterIndex(), @@ -37,85 +38,89 @@ func New(be backend.Backend) *Repository { return repo } +// Config returns the repository configuration. +func (r *Repository) Config() restic.Config { + return r.cfg +} + // Find loads the list of all blobs of type t and searches for names which start // with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If // more than one is found, nil and ErrMultipleIDMatches is returned. -func (r *Repository) Find(t backend.Type, prefix string) (string, error) { - return backend.Find(r.be, t, prefix) +func (r *Repository) Find(t restic.FileType, prefix string) (string, error) { + return restic.Find(r.be, t, prefix) } // PrefixLength returns the number of bytes required so that all prefixes of // all IDs of type t are unique. -func (r *Repository) PrefixLength(t backend.Type) (int, error) { - return backend.PrefixLength(r.be, t) +func (r *Repository) PrefixLength(t restic.FileType) (int, error) { + return restic.PrefixLength(r.be, t) } // LoadAndDecrypt loads and decrypts data identified by t and id from the // backend. -func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) { +func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) { debug.Log("Repo.Load", "load %v with id %v", t, id.Str()) - h := backend.Handle{Type: t, Name: id.String()} + h := restic.Handle{Type: t, Name: id.String()} buf, err := backend.LoadAll(r.be, h, nil) if err != nil { debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err) return nil, err } - if t != backend.Config && !backend.Hash(buf).Equal(id) { + if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) { return nil, errors.New("invalid data returned") } + plain := make([]byte, len(buf)) + // decrypt - plain, err := r.Decrypt(buf) + n, err := r.decryptTo(plain, buf) if err != nil { return nil, err } - return plain, nil + return plain[:n], nil } -// LoadBlob tries to load and decrypt content identified by t and id from a +// loadBlob tries to load and decrypt content identified by t and id from a // pack from the backend, the result is stored in plaintextBuf, which must be // large enough to hold the complete blob. -func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byte) ([]byte, error) { - debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str()) +func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []byte) (int, error) { + debug.Log("Repo.loadBlob", "load %v with id %v (buf %p, len %d)", t, id.Str(), plaintextBuf, len(plaintextBuf)) // lookup plaintext size of blob size, err := r.idx.LookupSize(id, t) if err != nil { - return nil, err + return 0, err } // make sure the plaintext buffer is large enough, extend otherwise - plaintextBufSize := uint(cap(plaintextBuf)) - if size > plaintextBufSize { - debug.Log("Repo.LoadBlob", "need to expand buffer: want %d bytes, got %d", - size, plaintextBufSize) - plaintextBuf = make([]byte, size) + if len(plaintextBuf) < int(size) { + return 0, errors.Errorf("buffer is too small: %d < %d", len(plaintextBuf), size) } // lookup packs blobs, err := r.idx.Lookup(id, t) if err != nil { - debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err) - return nil, err + debug.Log("Repo.loadBlob", "id %v not found in index: %v", id.Str(), err) + return 0, err } var lastError error for _, blob := range blobs { - debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob) + debug.Log("Repo.loadBlob", "id %v found: %v", id.Str(), blob) if blob.Type != t { - debug.Log("Repo.LoadBlob", "blob %v has wrong block type, want %v", blob, t) + debug.Log("Repo.loadBlob", "blob %v has wrong block type, want %v", blob, t) } // load blob from pack - h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()} + h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()} ciphertextBuf := make([]byte, blob.Length) n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset)) if err != nil { - debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err) + debug.Log("Repo.loadBlob", "error loading blob %v: %v", blob, err) lastError = err continue } @@ -123,31 +128,32 @@ func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byt if uint(n) != blob.Length { lastError = errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d", id.Str(), blob.Length, uint(n)) - debug.Log("Repo.LoadBlob", "lastError: %v", lastError) + debug.Log("Repo.loadBlob", "lastError: %v", lastError) continue } // decrypt - plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf) + n, err = r.decryptTo(plaintextBuf, ciphertextBuf) if err != nil { lastError = errors.Errorf("decrypting blob %v failed: %v", id, err) continue } + plaintextBuf = plaintextBuf[:n] // check hash - if !backend.Hash(plaintextBuf).Equal(id) { + if !restic.Hash(plaintextBuf).Equal(id) { lastError = errors.Errorf("blob %v returned invalid hash", id) continue } - return plaintextBuf, nil + return len(plaintextBuf), nil } if lastError != nil { - return nil, lastError + return 0, lastError } - return nil, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs)) + return 0, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs)) } // closeOrErr calls cl.Close() and sets err to the returned error value if @@ -162,7 +168,7 @@ func closeOrErr(cl io.Closer, err *error) { // LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on // the item. -func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) (err error) { +func (r *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) (err error) { buf, err := r.LoadAndDecrypt(t, id) if err != nil { return err @@ -171,28 +177,17 @@ func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interf return json.Unmarshal(buf, item) } -// LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the -// data and afterwards call json.Unmarshal on the item. -func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) (err error) { - buf, err := r.LoadBlob(id, t, nil) - if err != nil { - return err - } - - return json.Unmarshal(buf, item) -} - // LookupBlobSize returns the size of blob id. -func (r *Repository) LookupBlobSize(id backend.ID, tpe pack.BlobType) (uint, error) { +func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, error) { return r.idx.LookupSize(id, tpe) } // SaveAndEncrypt encrypts data and stores it to the backend as type t. If data // is small enough, it will be packed together with other small blobs. -func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID) (backend.ID, error) { +func (r *Repository) SaveAndEncrypt(t restic.BlobType, data []byte, id *restic.ID) (restic.ID, error) { if id == nil { // compute plaintext hash - hashedID := backend.Hash(data) + hashedID := restic.Hash(data) id = &hashedID } @@ -205,19 +200,19 @@ func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID // encrypt blob ciphertext, err := r.Encrypt(ciphertext, data) if err != nil { - return backend.ID{}, err + return restic.ID{}, err } // find suitable packer and add blob packer, err := r.findPacker(uint(len(ciphertext))) if err != nil { - return backend.ID{}, err + return restic.ID{}, err } // save ciphertext _, err = packer.Add(t, *id, ciphertext) if err != nil { - return backend.ID{}, err + return restic.ID{}, err } // if the pack is not full enough and there are less than maxPackers @@ -232,32 +227,13 @@ func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID return *id, r.savePacker(packer) } -// SaveJSON serialises item as JSON and encrypts and saves it in a pack in the -// backend as type t. -func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error) { - debug.Log("Repo.SaveJSON", "save %v blob", t) - buf := getBuf()[:0] - defer freeBuf(buf) - - wr := bytes.NewBuffer(buf) - - enc := json.NewEncoder(wr) - err := enc.Encode(item) - if err != nil { - return backend.ID{}, errors.Errorf("json.Encode: %v", err) - } - - buf = wr.Bytes() - return r.SaveAndEncrypt(t, buf, nil) -} - // SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the // backend as type t, without a pack. It returns the storage hash. -func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) { +func (r *Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (restic.ID, error) { debug.Log("Repo.SaveJSONUnpacked", "save new blob %v", t) plaintext, err := json.Marshal(item) if err != nil { - return backend.ID{}, errors.Wrap(err, "json.Marshal") + return restic.ID{}, errors.Wrap(err, "json.Marshal") } return r.SaveUnpacked(t, plaintext) @@ -265,20 +241,20 @@ func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend // SaveUnpacked encrypts data and stores it in the backend. Returned is the // storage hash. -func (r *Repository) SaveUnpacked(t backend.Type, p []byte) (id backend.ID, err error) { +func (r *Repository) SaveUnpacked(t restic.FileType, p []byte) (id restic.ID, err error) { ciphertext := make([]byte, len(p)+crypto.Extension) ciphertext, err = r.Encrypt(ciphertext, p) if err != nil { - return backend.ID{}, err + return restic.ID{}, err } - id = backend.Hash(ciphertext) - h := backend.Handle{Type: t, Name: id.String()} + id = restic.Hash(ciphertext) + h := restic.Handle{Type: t, Name: id.String()} err = r.be.Save(h, ciphertext) if err != nil { debug.Log("Repo.SaveJSONUnpacked", "error saving blob %v: %v", h, err) - return backend.ID{}, err + return restic.ID{}, err } debug.Log("Repo.SaveJSONUnpacked", "blob %v saved", h) @@ -303,30 +279,30 @@ func (r *Repository) Flush() error { } // Backend returns the backend for the repository. -func (r *Repository) Backend() backend.Backend { +func (r *Repository) Backend() restic.Backend { return r.be } // Index returns the currently used MasterIndex. -func (r *Repository) Index() *MasterIndex { +func (r *Repository) Index() restic.Index { return r.idx } // SetIndex instructs the repository to use the given index. -func (r *Repository) SetIndex(i *MasterIndex) { - r.idx = i +func (r *Repository) SetIndex(i restic.Index) { + r.idx = i.(*MasterIndex) } // SaveIndex saves an index in the repository. -func SaveIndex(repo *Repository, index *Index) (backend.ID, error) { +func SaveIndex(repo restic.Repository, index *Index) (restic.ID, error) { buf := bytes.NewBuffer(nil) err := index.Finalize(buf) if err != nil { - return backend.ID{}, err + return restic.ID{}, err } - return repo.SaveUnpacked(backend.Index, buf.Bytes()) + return repo.SaveUnpacked(restic.IndexFile, buf.Bytes()) } // saveIndex saves all indexes in the backend. @@ -365,7 +341,7 @@ func (r *Repository) LoadIndex() error { errCh := make(chan error, 1) indexes := make(chan *Index) - worker := func(id backend.ID, done <-chan struct{}) error { + worker := func(id restic.ID, done <-chan struct{}) error { idx, err := LoadIndex(r, id) if err != nil { return err @@ -381,7 +357,7 @@ func (r *Repository) LoadIndex() error { go func() { defer close(indexes) - errCh <- FilesInParallel(r.be, backend.Index, loadIndexParallelism, + errCh <- FilesInParallel(r.be, restic.IndexFile, loadIndexParallelism, ParallelWorkFuncParseID(worker)) }() @@ -397,7 +373,7 @@ func (r *Repository) LoadIndex() error { } // LoadIndex loads the index id from backend and returns it. -func LoadIndex(repo *Repository, id backend.ID) (*Index, error) { +func LoadIndex(repo restic.Repository, id restic.ID) (*Index, error) { idx, err := LoadIndexWithDecoder(repo, id, DecodeIndex) if err == nil { return idx, nil @@ -422,14 +398,14 @@ func (r *Repository) SearchKey(password string, maxKeys int) error { r.key = key.master r.packerManager.key = key.master r.keyName = key.Name() - r.Config, err = LoadConfig(r) + r.cfg, err = restic.LoadConfig(r) return err } // Init creates a new master key with the supplied password, initializes and // saves the repository config. func (r *Repository) Init(password string) error { - has, err := r.be.Test(backend.Config, "") + has, err := r.be.Test(restic.ConfigFile, "") if err != nil { return err } @@ -437,7 +413,7 @@ func (r *Repository) Init(password string) error { return errors.New("repository master key and config already initialized") } - cfg, err := CreateConfig() + cfg, err := restic.CreateConfig() if err != nil { return err } @@ -447,7 +423,7 @@ func (r *Repository) Init(password string) error { // init creates a new master key with the supplied password and uses it to save // the config into the repo. -func (r *Repository) init(password string, cfg Config) error { +func (r *Repository) init(password string, cfg restic.Config) error { key, err := createMasterKey(r, password) if err != nil { return err @@ -456,24 +432,19 @@ func (r *Repository) init(password string, cfg Config) error { r.key = key.master r.packerManager.key = key.master r.keyName = key.Name() - r.Config = cfg - _, err = r.SaveJSONUnpacked(backend.Config, cfg) + r.cfg = cfg + _, err = r.SaveJSONUnpacked(restic.ConfigFile, cfg) return err } -// Decrypt authenticates and decrypts ciphertext and returns the plaintext. -func (r *Repository) Decrypt(ciphertext []byte) ([]byte, error) { - return r.decryptTo(nil, ciphertext) -} - // decrypt authenticates and decrypts ciphertext and stores the result in // plaintext. -func (r *Repository) decryptTo(plaintext, ciphertext []byte) ([]byte, error) { +func (r *Repository) decryptTo(plaintext, ciphertext []byte) (int, error) { if r.key == nil { - return nil, errors.New("key for repository not set") + return 0, errors.New("key for repository not set") } - return crypto.Decrypt(r.key, nil, ciphertext) + return crypto.Decrypt(r.key, plaintext, ciphertext) } // Encrypt encrypts and authenticates the plaintext and saves the result in @@ -496,25 +467,16 @@ func (r *Repository) KeyName() string { return r.keyName } -// Count returns the number of blobs of a given type in the backend. -func (r *Repository) Count(t backend.Type) (n uint) { - for _ = range r.be.List(t, nil) { - n++ - } - - return -} - -func (r *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backend.ID) { +func (r *Repository) list(t restic.FileType, done <-chan struct{}, out chan<- restic.ID) { defer close(out) in := r.be.List(t, done) var ( // disable sending on the outCh until we received a job - outCh chan<- backend.ID + outCh chan<- restic.ID // enable receiving from in inCh = in - id backend.ID + id restic.ID err error ) @@ -527,7 +489,7 @@ func (r *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backe // input channel closed, we're done return } - id, err = backend.ParseID(strID) + id, err = restic.ParseID(strID) if err != nil { // ignore invalid IDs continue @@ -543,8 +505,8 @@ func (r *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backe } // List returns a channel that yields all IDs of type t in the backend. -func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.ID { - outCh := make(chan backend.ID) +func (r *Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic.ID { + outCh := make(chan restic.ID) go r.list(t, done, outCh) @@ -553,15 +515,15 @@ func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.I // ListPack returns the list of blobs saved in the pack id and the length of // the file as stored in the backend. -func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, int64, error) { - h := backend.Handle{Type: backend.Data, Name: id.String()} +func (r *Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) { + h := restic.Handle{Type: restic.DataFile, Name: id.String()} blobInfo, err := r.Backend().Stat(h) if err != nil { return nil, 0, err } - blobs, err := pack.List(r.Key(), backend.ReaderAt(r.Backend(), h), blobInfo.Size) + blobs, err := pack.List(r.Key(), restic.ReaderAt(r.Backend(), h), blobInfo.Size) if err != nil { return nil, 0, err } @@ -572,7 +534,7 @@ func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, int64, error) { // Delete calls backend.Delete() if implemented, and returns an error // otherwise. func (r *Repository) Delete() error { - if b, ok := r.be.(backend.Deleter); ok { + if b, ok := r.be.(restic.Deleter); ok { return b.Delete() } @@ -583,3 +545,85 @@ func (r *Repository) Delete() error { func (r *Repository) Close() error { return r.be.Close() } + +// LoadBlob loads a blob of type t from the repository to the buffer. +func (r *Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) { + debug.Log("repo.LoadBlob", "load blob %v into buf %p", id.Str(), buf) + size, err := r.idx.LookupSize(id, t) + if err != nil { + return 0, err + } + + if len(buf) < int(size) { + return 0, errors.Errorf("buffer is too small for data blob (%d < %d)", len(buf), size) + } + + n, err := r.loadBlob(id, t, buf) + if err != nil { + return 0, err + } + buf = buf[:n] + + debug.Log("repo.LoadBlob", "loaded %d bytes into buf %p", len(buf), buf) + + return len(buf), err +} + +// SaveBlob saves a blob of type t into the repository. If id is the null id, it +// will be computed and returned. +func (r *Repository) SaveBlob(t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { + var i *restic.ID + if !id.IsNull() { + i = &id + } + return r.SaveAndEncrypt(t, buf, i) +} + +// LoadTree loads a tree from the repository. +func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) { + debug.Log("repo.LoadTree", "load tree %v", id.Str()) + + size, err := r.idx.LookupSize(id, restic.TreeBlob) + if err != nil { + return nil, err + } + + debug.Log("repo.LoadTree", "size is %d, create buffer", size) + buf := make([]byte, size) + + n, err := r.loadBlob(id, restic.TreeBlob, buf) + if err != nil { + return nil, err + } + buf = buf[:n] + + t := &restic.Tree{} + err = json.Unmarshal(buf, t) + if err != nil { + return nil, err + } + + return t, nil +} + +// SaveTree stores a tree into the repository and returns the ID. The ID is +// checked against the index. The tree is only stored when the index does not +// contain the ID. +func (r *Repository) SaveTree(t *restic.Tree) (restic.ID, error) { + buf, err := json.Marshal(t) + if err != nil { + return restic.ID{}, errors.Wrap(err, "MarshalJSON") + } + + // append a newline so that the data is always consistent (json.Encoder + // adds a newline after each object) + buf = append(buf, '\n') + + id := restic.Hash(buf) + if r.idx.Has(id, restic.TreeBlob) { + return id, nil + } + + _, err = r.SaveBlob(restic.TreeBlob, buf, id) + return id, err +} diff --git a/src/restic/repository/repository_test.go b/src/restic/repository/repository_test.go index db70765a8..ce4fb68ed 100644 --- a/src/restic/repository/repository_test.go +++ b/src/restic/repository/repository_test.go @@ -4,86 +4,32 @@ import ( "bytes" "crypto/rand" "crypto/sha256" - "encoding/json" "io" mrand "math/rand" "path/filepath" "testing" "restic" - "restic/backend" - "restic/pack" + "restic/archiver" "restic/repository" . "restic/test" ) -type testJSONStruct struct { - Foo uint32 - Bar string - Baz []byte -} - -var repoTests = []testJSONStruct{ - testJSONStruct{Foo: 23, Bar: "Teststring", Baz: []byte("xx")}, -} - -func TestSaveJSON(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) - - for _, obj := range repoTests { - data, err := json.Marshal(obj) - OK(t, err) - data = append(data, '\n') - h := sha256.Sum256(data) - - id, err := repo.SaveJSON(pack.Tree, obj) - OK(t, err) - - Assert(t, h == id, - "TestSaveJSON: wrong plaintext ID: expected %02x, got %02x", - h, id) - } -} - -func BenchmarkSaveJSON(t *testing.B) { - repo := SetupRepo() - defer TeardownRepo(repo) - - obj := repoTests[0] - - data, err := json.Marshal(obj) - OK(t, err) - data = append(data, '\n') - h := sha256.Sum256(data) - - t.ResetTimer() - - for i := 0; i < t.N; i++ { - id, err := repo.SaveJSON(pack.Tree, obj) - OK(t, err) - - Assert(t, h == id, - "TestSaveJSON: wrong plaintext ID: expected %02x, got %02x", - h, id) - } -} - var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20} func TestSave(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() for _, size := range testSizes { data := make([]byte, size) _, err := io.ReadFull(rand.Reader, data) OK(t, err) - id := backend.Hash(data) + id := restic.Hash(data) // save - sid, err := repo.SaveAndEncrypt(pack.Data, data, nil) + sid, err := repo.SaveBlob(restic.DataBlob, data, restic.ID{}) OK(t, err) Equals(t, id, sid) @@ -92,8 +38,10 @@ func TestSave(t *testing.T) { // OK(t, repo.SaveIndex()) // read back - buf, err := repo.LoadBlob(id, pack.Data, make([]byte, size)) + buf := make([]byte, size) + n, err := repo.LoadBlob(restic.DataBlob, id, buf) OK(t, err) + Equals(t, len(buf), n) Assert(t, len(buf) == len(data), "number of bytes read back does not match: expected %d, got %d", @@ -106,26 +54,28 @@ func TestSave(t *testing.T) { } func TestSaveFrom(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() for _, size := range testSizes { data := make([]byte, size) _, err := io.ReadFull(rand.Reader, data) OK(t, err) - id := backend.Hash(data) + id := restic.Hash(data) // save - id2, err := repo.SaveAndEncrypt(pack.Data, data, &id) + id2, err := repo.SaveBlob(restic.DataBlob, data, id) OK(t, err) Equals(t, id, id2) OK(t, repo.Flush()) // read back - buf, err := repo.LoadBlob(id, pack.Data, make([]byte, size)) + buf := make([]byte, size) + n, err := repo.LoadBlob(restic.DataBlob, id, buf) OK(t, err) + Equals(t, len(buf), n) Assert(t, len(buf) == len(data), "number of bytes read back does not match: expected %d, got %d", @@ -138,8 +88,8 @@ func TestSaveFrom(t *testing.T) { } func BenchmarkSaveAndEncrypt(t *testing.B) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() size := 4 << 20 // 4MiB @@ -147,60 +97,57 @@ func BenchmarkSaveAndEncrypt(t *testing.B) { _, err := io.ReadFull(rand.Reader, data) OK(t, err) - id := backend.ID(sha256.Sum256(data)) + id := restic.ID(sha256.Sum256(data)) t.ResetTimer() t.SetBytes(int64(size)) for i := 0; i < t.N; i++ { // save - _, err = repo.SaveAndEncrypt(pack.Data, data, &id) + _, err = repo.SaveBlob(restic.DataBlob, data, id) OK(t, err) } } -func TestLoadJSONPack(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) +func TestLoadTree(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() if BenchArchiveDirectory == "" { t.Skip("benchdir not set, skipping") } // archive a few files - sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil) + sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil) OK(t, repo.Flush()) - tree := restic.NewTree() - err := repo.LoadJSONPack(pack.Tree, *sn.Tree, &tree) + _, err := repo.LoadTree(*sn.Tree) OK(t, err) } -func BenchmarkLoadJSONPack(t *testing.B) { - repo := SetupRepo() - defer TeardownRepo(repo) +func BenchmarkLoadTree(t *testing.B) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() if BenchArchiveDirectory == "" { t.Skip("benchdir not set, skipping") } // archive a few files - sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil) + sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil) OK(t, repo.Flush()) - tree := restic.NewTree() - t.ResetTimer() for i := 0; i < t.N; i++ { - err := repo.LoadJSONPack(pack.Tree, *sn.Tree, &tree) + _, err := repo.LoadTree(*sn.Tree) OK(t, err) } } func TestLoadJSONUnpacked(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() if BenchArchiveDirectory == "" { t.Skip("benchdir not set, skipping") @@ -211,13 +158,13 @@ func TestLoadJSONUnpacked(t *testing.T) { sn.Hostname = "foobar" sn.Username = "test!" - id, err := repo.SaveJSONUnpacked(backend.Snapshot, &sn) + id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, &sn) OK(t, err) var sn2 restic.Snapshot // restore - err = repo.LoadJSONUnpacked(backend.Snapshot, id, &sn2) + err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, &sn2) OK(t, err) Equals(t, sn.Hostname, sn2.Hostname) @@ -227,26 +174,28 @@ func TestLoadJSONUnpacked(t *testing.T) { var repoFixture = filepath.Join("testdata", "test-repo.tar.gz") func TestRepositoryLoadIndex(t *testing.T) { - WithTestEnvironment(t, repoFixture, func(repodir string) { - repo := OpenLocalRepo(t, repodir) - OK(t, repo.LoadIndex()) - }) + repodir, cleanup := Env(t, repoFixture) + defer cleanup() + + repo := repository.TestOpenLocal(t, repodir) + OK(t, repo.LoadIndex()) } func BenchmarkLoadIndex(b *testing.B) { - WithTestEnvironment(b, repoFixture, func(repodir string) { - repo := OpenLocalRepo(b, repodir) - b.ResetTimer() + repodir, cleanup := Env(b, repoFixture) + defer cleanup() - for i := 0; i < b.N; i++ { - repo.SetIndex(repository.NewMasterIndex()) - OK(b, repo.LoadIndex()) - } - }) + repo := repository.TestOpenLocal(b, repodir) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + repo.SetIndex(repository.NewMasterIndex()) + OK(b, repo.LoadIndex()) + } } // saveRandomDataBlobs generates random data blobs and saves them to the repository. -func saveRandomDataBlobs(t testing.TB, repo *repository.Repository, num int, sizeMax int) { +func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax int) { for i := 0; i < num; i++ { size := mrand.Int() % sizeMax @@ -254,14 +203,14 @@ func saveRandomDataBlobs(t testing.TB, repo *repository.Repository, num int, siz _, err := io.ReadFull(rand.Reader, buf) OK(t, err) - _, err = repo.SaveAndEncrypt(pack.Data, buf, nil) + _, err = repo.SaveBlob(restic.DataBlob, buf, restic.ID{}) OK(t, err) } } func TestRepositoryIncrementalIndex(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() repository.IndexFull = func(*repository.Index) bool { return true } @@ -286,19 +235,19 @@ func TestRepositoryIncrementalIndex(t *testing.T) { OK(t, repo.SaveIndex()) type packEntry struct { - id backend.ID + id restic.ID indexes []*repository.Index } - packEntries := make(map[backend.ID]map[backend.ID]struct{}) + packEntries := make(map[restic.ID]map[restic.ID]struct{}) - for id := range repo.List(backend.Index, nil) { + for id := range repo.List(restic.IndexFile, nil) { idx, err := repository.LoadIndex(repo, id) OK(t, err) for pb := range idx.Each(nil) { if _, ok := packEntries[pb.PackID]; !ok { - packEntries[pb.PackID] = make(map[backend.ID]struct{}) + packEntries[pb.PackID] = make(map[restic.ID]struct{}) } packEntries[pb.PackID][id] = struct{}{} diff --git a/src/restic/repository/testing.go b/src/restic/repository/testing.go index 904ee397c..7650ad8b9 100644 --- a/src/restic/repository/testing.go +++ b/src/restic/repository/testing.go @@ -2,10 +2,11 @@ package repository import ( "os" - "restic/backend" + "restic" "restic/backend/local" "restic/backend/mem" "restic/crypto" + "restic/test" "testing" "github.com/restic/chunker" @@ -25,19 +26,16 @@ func TestUseLowSecurityKDFParameters(t testing.TB) { } // TestBackend returns a fully configured in-memory backend. -func TestBackend(t testing.TB) (be backend.Backend, cleanup func()) { +func TestBackend(t testing.TB) (be restic.Backend, cleanup func()) { return mem.New(), func() {} } -// TestPassword is used for all repositories created by the Test* functions. -const TestPassword = "geheim" - const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) // TestRepositoryWithBackend returns a repository initialized with a test // password. If be is nil, an in-memory backend is used. A constant polynomial // is used for the chunker and low-security test parameters. -func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository, cleanup func()) { +func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r restic.Repository, cleanup func()) { TestUseLowSecurityKDFParameters(t) var beCleanup func() @@ -45,15 +43,15 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository, be, beCleanup = TestBackend(t) } - r = New(be) + repo := New(be) - cfg := TestCreateConfig(t, testChunkerPol) - err := r.init(TestPassword, cfg) + cfg := restic.TestCreateConfig(t, testChunkerPol) + err := repo.init(test.TestPassword, cfg) if err != nil { t.Fatalf("TestRepository(): initialize repo failed: %v", err) } - return r, func() { + return repo, func() { if beCleanup != nil { beCleanup() } @@ -64,7 +62,7 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository, // in-memory backend. When the environment variable RESTIC_TEST_REPO is set to // a non-existing directory, a local backend is created there and this is used // instead. The directory is not removed, but left there for inspection. -func TestRepository(t testing.TB) (r *Repository, cleanup func()) { +func TestRepository(t testing.TB) (r restic.Repository, cleanup func()) { dir := os.Getenv("RESTIC_TEST_REPO") if dir != "" { _, err := os.Stat(dir) @@ -83,3 +81,19 @@ func TestRepository(t testing.TB) (r *Repository, cleanup func()) { return TestRepositoryWithBackend(t, nil) } + +// TestOpenLocal opens a local repository. +func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { + be, err := local.Open(dir) + if err != nil { + t.Fatal(err) + } + + repo := New(be) + err = repo.SearchKey(test.TestPassword, 10) + if err != nil { + t.Fatal(err) + } + + return repo +} diff --git a/src/restic/restorer.go b/src/restic/restorer.go index 74cdfc34d..e3fceb67f 100644 --- a/src/restic/restorer.go +++ b/src/restic/restorer.go @@ -4,17 +4,15 @@ import ( "os" "path/filepath" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/debug" "restic/fs" - "restic/repository" ) // Restorer is used to restore a snapshot to a directory. type Restorer struct { - repo *repository.Repository + repo Repository sn *Snapshot Error func(dir string, node *Node, err error) error @@ -24,7 +22,7 @@ type Restorer struct { var restorerAbortOnAllErrors = func(str string, node *Node, err error) error { return err } // NewRestorer creates a restorer preloaded with the content from the snapshot id. -func NewRestorer(repo *repository.Repository, id backend.ID) (*Restorer, error) { +func NewRestorer(repo Repository, id ID) (*Restorer, error) { r := &Restorer{ repo: repo, Error: restorerAbortOnAllErrors, SelectFilter: func(string, string, *Node) bool { return true }, @@ -40,8 +38,8 @@ func NewRestorer(repo *repository.Repository, id backend.ID) (*Restorer, error) return r, nil } -func (res *Restorer) restoreTo(dst string, dir string, treeID backend.ID) error { - tree, err := LoadTree(res.repo, treeID) +func (res *Restorer) restoreTo(dst string, dir string, treeID ID) error { + tree, err := res.repo.LoadTree(treeID) if err != nil { return res.Error(dir, nil, err) } diff --git a/src/restic/snapshot.go b/src/restic/snapshot.go index 2ce01b4d9..dc351e8e4 100644 --- a/src/restic/snapshot.go +++ b/src/restic/snapshot.go @@ -7,25 +7,22 @@ import ( "path/filepath" "time" - "github.com/pkg/errors" - - "restic/backend" - "restic/repository" + "restic/errors" ) // Snapshot is the state of a resource at one point in time. type Snapshot struct { - Time time.Time `json:"time"` - Parent *backend.ID `json:"parent,omitempty"` - Tree *backend.ID `json:"tree"` - Paths []string `json:"paths"` - Hostname string `json:"hostname,omitempty"` - Username string `json:"username,omitempty"` - UID uint32 `json:"uid,omitempty"` - GID uint32 `json:"gid,omitempty"` - Excludes []string `json:"excludes,omitempty"` + Time time.Time `json:"time"` + Parent *ID `json:"parent,omitempty"` + Tree *ID `json:"tree"` + Paths []string `json:"paths"` + Hostname string `json:"hostname,omitempty"` + Username string `json:"username,omitempty"` + UID uint32 `json:"uid,omitempty"` + GID uint32 `json:"gid,omitempty"` + Excludes []string `json:"excludes,omitempty"` - id *backend.ID // plaintext ID, used during restore + id *ID // plaintext ID, used during restore } // NewSnapshot returns an initialized snapshot struct for the current user and @@ -56,9 +53,9 @@ func NewSnapshot(paths []string) (*Snapshot, error) { } // LoadSnapshot loads the snapshot with the id and returns it. -func LoadSnapshot(repo *repository.Repository, id backend.ID) (*Snapshot, error) { +func LoadSnapshot(repo Repository, id ID) (*Snapshot, error) { sn := &Snapshot{id: &id} - err := repo.LoadJSONUnpacked(backend.Snapshot, id, sn) + err := repo.LoadJSONUnpacked(SnapshotFile, id, sn) if err != nil { return nil, err } @@ -67,11 +64,11 @@ func LoadSnapshot(repo *repository.Repository, id backend.ID) (*Snapshot, error) } // LoadAllSnapshots returns a list of all snapshots in the repo. -func LoadAllSnapshots(repo *repository.Repository) (snapshots []*Snapshot, err error) { +func LoadAllSnapshots(repo Repository) (snapshots []*Snapshot, err error) { done := make(chan struct{}) defer close(done) - for id := range repo.List(backend.Snapshot, done) { + for id := range repo.List(SnapshotFile, done) { sn, err := LoadSnapshot(repo, id) if err != nil { return nil, err @@ -89,7 +86,7 @@ func (sn Snapshot) String() string { } // ID retuns the snapshot's ID. -func (sn Snapshot) ID() *backend.ID { +func (sn Snapshot) ID() *ID { return sn.id } @@ -131,17 +128,17 @@ func SamePaths(expected, actual []string) bool { var ErrNoSnapshotFound = errors.New("no snapshot found") // FindLatestSnapshot finds latest snapshot with optional target/directory and source filters -func FindLatestSnapshot(repo *repository.Repository, targets []string, source string) (backend.ID, error) { +func FindLatestSnapshot(repo Repository, targets []string, source string) (ID, error) { var ( latest time.Time - latestID backend.ID + latestID ID found bool ) - for snapshotID := range repo.List(backend.Snapshot, make(chan struct{})) { + for snapshotID := range repo.List(SnapshotFile, make(chan struct{})) { snapshot, err := LoadSnapshot(repo, snapshotID) if err != nil { - return backend.ID{}, errors.Errorf("Error listing snapshot: %v", err) + return ID{}, errors.Errorf("Error listing snapshot: %v", err) } if snapshot.Time.After(latest) && SamePaths(snapshot.Paths, targets) && (source == "" || source == snapshot.Hostname) { latest = snapshot.Time @@ -151,7 +148,7 @@ func FindLatestSnapshot(repo *repository.Repository, targets []string, source st } if !found { - return backend.ID{}, ErrNoSnapshotFound + return ID{}, ErrNoSnapshotFound } return latestID, nil @@ -159,13 +156,13 @@ func FindLatestSnapshot(repo *repository.Repository, targets []string, source st // FindSnapshot takes a string and tries to find a snapshot whose ID matches // the string as closely as possible. -func FindSnapshot(repo *repository.Repository, s string) (backend.ID, error) { +func FindSnapshot(repo Repository, s string) (ID, error) { // find snapshot id with prefix - name, err := backend.Find(repo.Backend(), backend.Snapshot, s) + name, err := Find(repo.Backend(), SnapshotFile, s) if err != nil { - return backend.ID{}, err + return ID{}, err } - return backend.ParseID(name) + return ParseID(name) } diff --git a/src/restic/snapshot_filter_test.go b/src/restic/snapshot_filter_test.go index 07d2e106d..6c5397a4f 100644 --- a/src/restic/snapshot_filter_test.go +++ b/src/restic/snapshot_filter_test.go @@ -1,4 +1,4 @@ -package restic +package restic_test import ( "encoding/json" @@ -6,12 +6,13 @@ import ( "io/ioutil" "path/filepath" "reflect" + "restic" "sort" "testing" "time" ) -func parseTime(s string) time.Time { +func parseTimeUTC(s string) time.Time { t, err := time.Parse("2006-01-02 15:04:05", s) if err != nil { panic(err) @@ -20,29 +21,29 @@ func parseTime(s string) time.Time { return t.UTC() } -var testFilterSnapshots = Snapshots{ - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-01 01:02:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 01:03:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-03 07:02:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 07:08:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 10:23:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 11:23:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:23:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:24:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:28:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:30:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 16:23:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-05 09:02:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-06 08:02:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-07 10:02:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "root", Time: parseTime("2016-01-08 20:02:03"), Paths: []string{"/usr", "/sbin"}}, - {Hostname: "foo", Username: "root", Time: parseTime("2016-01-09 21:02:03"), Paths: []string{"/usr", "/sbin"}}, - {Hostname: "bar", Username: "root", Time: parseTime("2016-01-12 21:02:03"), Paths: []string{"/usr", "/sbin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-12 21:08:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-18 12:02:03"), Paths: []string{"/usr", "/bin"}}, +var testFilterSnapshots = restic.Snapshots{ + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-01 01:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "bar", Username: "testuser", Time: parseTimeUTC("2016-01-01 01:03:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-03 07:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "bar", Username: "testuser", Time: parseTimeUTC("2016-01-01 07:08:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 10:23:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 11:23:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:23:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:24:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:28:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:30:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 16:23:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-05 09:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-06 08:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-07 10:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "root", Time: parseTimeUTC("2016-01-08 20:02:03"), Paths: []string{"/usr", "/sbin"}}, + {Hostname: "foo", Username: "root", Time: parseTimeUTC("2016-01-09 21:02:03"), Paths: []string{"/usr", "/sbin"}}, + {Hostname: "bar", Username: "root", Time: parseTimeUTC("2016-01-12 21:02:03"), Paths: []string{"/usr", "/sbin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-12 21:08:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-18 12:02:03"), Paths: []string{"/usr", "/bin"}}, } -var filterTests = []SnapshotFilter{ +var filterTests = []restic.SnapshotFilter{ {Hostname: "foo"}, {Username: "root"}, {Hostname: "foo", Username: "root"}, @@ -55,7 +56,7 @@ func TestFilterSnapshots(t *testing.T) { sort.Sort(testFilterSnapshots) for i, f := range filterTests { - res := FilterSnapshots(testFilterSnapshots, f) + res := restic.FilterSnapshots(testFilterSnapshots, f) goldenFilename := filepath.Join("testdata", fmt.Sprintf("filter_snapshots_%d", i)) @@ -76,7 +77,7 @@ func TestFilterSnapshots(t *testing.T) { continue } - var want Snapshots + var want restic.Snapshots err = json.Unmarshal(buf, &want) if !reflect.DeepEqual(res, want) { @@ -86,109 +87,109 @@ func TestFilterSnapshots(t *testing.T) { } } -var testExpireSnapshots = Snapshots{ - {Time: parseTime("2014-09-01 10:20:30")}, - {Time: parseTime("2014-09-02 10:20:30")}, - {Time: parseTime("2014-09-05 10:20:30")}, - {Time: parseTime("2014-09-06 10:20:30")}, - {Time: parseTime("2014-09-08 10:20:30")}, - {Time: parseTime("2014-09-09 10:20:30")}, - {Time: parseTime("2014-09-10 10:20:30")}, - {Time: parseTime("2014-09-11 10:20:30")}, - {Time: parseTime("2014-09-20 10:20:30")}, - {Time: parseTime("2014-09-22 10:20:30")}, - {Time: parseTime("2014-08-08 10:20:30")}, - {Time: parseTime("2014-08-10 10:20:30")}, - {Time: parseTime("2014-08-12 10:20:30")}, - {Time: parseTime("2014-08-13 10:20:30")}, - {Time: parseTime("2014-08-13 10:20:30")}, - {Time: parseTime("2014-08-15 10:20:30")}, - {Time: parseTime("2014-08-18 10:20:30")}, - {Time: parseTime("2014-08-20 10:20:30")}, - {Time: parseTime("2014-08-21 10:20:30")}, - {Time: parseTime("2014-08-22 10:20:30")}, - {Time: parseTime("2014-10-01 10:20:30")}, - {Time: parseTime("2014-10-02 10:20:30")}, - {Time: parseTime("2014-10-05 10:20:30")}, - {Time: parseTime("2014-10-06 10:20:30")}, - {Time: parseTime("2014-10-08 10:20:30")}, - {Time: parseTime("2014-10-09 10:20:30")}, - {Time: parseTime("2014-10-10 10:20:30")}, - {Time: parseTime("2014-10-11 10:20:30")}, - {Time: parseTime("2014-10-20 10:20:30")}, - {Time: parseTime("2014-10-22 10:20:30")}, - {Time: parseTime("2014-11-08 10:20:30")}, - {Time: parseTime("2014-11-10 10:20:30")}, - {Time: parseTime("2014-11-12 10:20:30")}, - {Time: parseTime("2014-11-13 10:20:30")}, - {Time: parseTime("2014-11-13 10:20:30")}, - {Time: parseTime("2014-11-15 10:20:30")}, - {Time: parseTime("2014-11-18 10:20:30")}, - {Time: parseTime("2014-11-20 10:20:30")}, - {Time: parseTime("2014-11-21 10:20:30")}, - {Time: parseTime("2014-11-22 10:20:30")}, - {Time: parseTime("2015-09-01 10:20:30")}, - {Time: parseTime("2015-09-02 10:20:30")}, - {Time: parseTime("2015-09-05 10:20:30")}, - {Time: parseTime("2015-09-06 10:20:30")}, - {Time: parseTime("2015-09-08 10:20:30")}, - {Time: parseTime("2015-09-09 10:20:30")}, - {Time: parseTime("2015-09-10 10:20:30")}, - {Time: parseTime("2015-09-11 10:20:30")}, - {Time: parseTime("2015-09-20 10:20:30")}, - {Time: parseTime("2015-09-22 10:20:30")}, - {Time: parseTime("2015-08-08 10:20:30")}, - {Time: parseTime("2015-08-10 10:20:30")}, - {Time: parseTime("2015-08-12 10:20:30")}, - {Time: parseTime("2015-08-13 10:20:30")}, - {Time: parseTime("2015-08-13 10:20:30")}, - {Time: parseTime("2015-08-15 10:20:30")}, - {Time: parseTime("2015-08-18 10:20:30")}, - {Time: parseTime("2015-08-20 10:20:30")}, - {Time: parseTime("2015-08-21 10:20:30")}, - {Time: parseTime("2015-08-22 10:20:30")}, - {Time: parseTime("2015-10-01 10:20:30")}, - {Time: parseTime("2015-10-02 10:20:30")}, - {Time: parseTime("2015-10-05 10:20:30")}, - {Time: parseTime("2015-10-06 10:20:30")}, - {Time: parseTime("2015-10-08 10:20:30")}, - {Time: parseTime("2015-10-09 10:20:30")}, - {Time: parseTime("2015-10-10 10:20:30")}, - {Time: parseTime("2015-10-11 10:20:30")}, - {Time: parseTime("2015-10-20 10:20:30")}, - {Time: parseTime("2015-10-22 10:20:30")}, - {Time: parseTime("2015-11-08 10:20:30")}, - {Time: parseTime("2015-11-10 10:20:30")}, - {Time: parseTime("2015-11-12 10:20:30")}, - {Time: parseTime("2015-11-13 10:20:30")}, - {Time: parseTime("2015-11-13 10:20:30")}, - {Time: parseTime("2015-11-15 10:20:30")}, - {Time: parseTime("2015-11-18 10:20:30")}, - {Time: parseTime("2015-11-20 10:20:30")}, - {Time: parseTime("2015-11-21 10:20:30")}, - {Time: parseTime("2015-11-22 10:20:30")}, - {Time: parseTime("2016-01-01 01:02:03")}, - {Time: parseTime("2016-01-01 01:03:03")}, - {Time: parseTime("2016-01-01 07:08:03")}, - {Time: parseTime("2016-01-03 07:02:03")}, - {Time: parseTime("2016-01-04 10:23:03")}, - {Time: parseTime("2016-01-04 11:23:03")}, - {Time: parseTime("2016-01-04 12:23:03")}, - {Time: parseTime("2016-01-04 12:24:03")}, - {Time: parseTime("2016-01-04 12:28:03")}, - {Time: parseTime("2016-01-04 12:30:03")}, - {Time: parseTime("2016-01-04 16:23:03")}, - {Time: parseTime("2016-01-05 09:02:03")}, - {Time: parseTime("2016-01-06 08:02:03")}, - {Time: parseTime("2016-01-07 10:02:03")}, - {Time: parseTime("2016-01-08 20:02:03")}, - {Time: parseTime("2016-01-09 21:02:03")}, - {Time: parseTime("2016-01-12 21:02:03")}, - {Time: parseTime("2016-01-12 21:08:03")}, - {Time: parseTime("2016-01-18 12:02:03")}, +var testExpireSnapshots = restic.Snapshots{ + {Time: parseTimeUTC("2014-09-01 10:20:30")}, + {Time: parseTimeUTC("2014-09-02 10:20:30")}, + {Time: parseTimeUTC("2014-09-05 10:20:30")}, + {Time: parseTimeUTC("2014-09-06 10:20:30")}, + {Time: parseTimeUTC("2014-09-08 10:20:30")}, + {Time: parseTimeUTC("2014-09-09 10:20:30")}, + {Time: parseTimeUTC("2014-09-10 10:20:30")}, + {Time: parseTimeUTC("2014-09-11 10:20:30")}, + {Time: parseTimeUTC("2014-09-20 10:20:30")}, + {Time: parseTimeUTC("2014-09-22 10:20:30")}, + {Time: parseTimeUTC("2014-08-08 10:20:30")}, + {Time: parseTimeUTC("2014-08-10 10:20:30")}, + {Time: parseTimeUTC("2014-08-12 10:20:30")}, + {Time: parseTimeUTC("2014-08-13 10:20:30")}, + {Time: parseTimeUTC("2014-08-13 10:20:30")}, + {Time: parseTimeUTC("2014-08-15 10:20:30")}, + {Time: parseTimeUTC("2014-08-18 10:20:30")}, + {Time: parseTimeUTC("2014-08-20 10:20:30")}, + {Time: parseTimeUTC("2014-08-21 10:20:30")}, + {Time: parseTimeUTC("2014-08-22 10:20:30")}, + {Time: parseTimeUTC("2014-10-01 10:20:30")}, + {Time: parseTimeUTC("2014-10-02 10:20:30")}, + {Time: parseTimeUTC("2014-10-05 10:20:30")}, + {Time: parseTimeUTC("2014-10-06 10:20:30")}, + {Time: parseTimeUTC("2014-10-08 10:20:30")}, + {Time: parseTimeUTC("2014-10-09 10:20:30")}, + {Time: parseTimeUTC("2014-10-10 10:20:30")}, + {Time: parseTimeUTC("2014-10-11 10:20:30")}, + {Time: parseTimeUTC("2014-10-20 10:20:30")}, + {Time: parseTimeUTC("2014-10-22 10:20:30")}, + {Time: parseTimeUTC("2014-11-08 10:20:30")}, + {Time: parseTimeUTC("2014-11-10 10:20:30")}, + {Time: parseTimeUTC("2014-11-12 10:20:30")}, + {Time: parseTimeUTC("2014-11-13 10:20:30")}, + {Time: parseTimeUTC("2014-11-13 10:20:30")}, + {Time: parseTimeUTC("2014-11-15 10:20:30")}, + {Time: parseTimeUTC("2014-11-18 10:20:30")}, + {Time: parseTimeUTC("2014-11-20 10:20:30")}, + {Time: parseTimeUTC("2014-11-21 10:20:30")}, + {Time: parseTimeUTC("2014-11-22 10:20:30")}, + {Time: parseTimeUTC("2015-09-01 10:20:30")}, + {Time: parseTimeUTC("2015-09-02 10:20:30")}, + {Time: parseTimeUTC("2015-09-05 10:20:30")}, + {Time: parseTimeUTC("2015-09-06 10:20:30")}, + {Time: parseTimeUTC("2015-09-08 10:20:30")}, + {Time: parseTimeUTC("2015-09-09 10:20:30")}, + {Time: parseTimeUTC("2015-09-10 10:20:30")}, + {Time: parseTimeUTC("2015-09-11 10:20:30")}, + {Time: parseTimeUTC("2015-09-20 10:20:30")}, + {Time: parseTimeUTC("2015-09-22 10:20:30")}, + {Time: parseTimeUTC("2015-08-08 10:20:30")}, + {Time: parseTimeUTC("2015-08-10 10:20:30")}, + {Time: parseTimeUTC("2015-08-12 10:20:30")}, + {Time: parseTimeUTC("2015-08-13 10:20:30")}, + {Time: parseTimeUTC("2015-08-13 10:20:30")}, + {Time: parseTimeUTC("2015-08-15 10:20:30")}, + {Time: parseTimeUTC("2015-08-18 10:20:30")}, + {Time: parseTimeUTC("2015-08-20 10:20:30")}, + {Time: parseTimeUTC("2015-08-21 10:20:30")}, + {Time: parseTimeUTC("2015-08-22 10:20:30")}, + {Time: parseTimeUTC("2015-10-01 10:20:30")}, + {Time: parseTimeUTC("2015-10-02 10:20:30")}, + {Time: parseTimeUTC("2015-10-05 10:20:30")}, + {Time: parseTimeUTC("2015-10-06 10:20:30")}, + {Time: parseTimeUTC("2015-10-08 10:20:30")}, + {Time: parseTimeUTC("2015-10-09 10:20:30")}, + {Time: parseTimeUTC("2015-10-10 10:20:30")}, + {Time: parseTimeUTC("2015-10-11 10:20:30")}, + {Time: parseTimeUTC("2015-10-20 10:20:30")}, + {Time: parseTimeUTC("2015-10-22 10:20:30")}, + {Time: parseTimeUTC("2015-11-08 10:20:30")}, + {Time: parseTimeUTC("2015-11-10 10:20:30")}, + {Time: parseTimeUTC("2015-11-12 10:20:30")}, + {Time: parseTimeUTC("2015-11-13 10:20:30")}, + {Time: parseTimeUTC("2015-11-13 10:20:30")}, + {Time: parseTimeUTC("2015-11-15 10:20:30")}, + {Time: parseTimeUTC("2015-11-18 10:20:30")}, + {Time: parseTimeUTC("2015-11-20 10:20:30")}, + {Time: parseTimeUTC("2015-11-21 10:20:30")}, + {Time: parseTimeUTC("2015-11-22 10:20:30")}, + {Time: parseTimeUTC("2016-01-01 01:02:03")}, + {Time: parseTimeUTC("2016-01-01 01:03:03")}, + {Time: parseTimeUTC("2016-01-01 07:08:03")}, + {Time: parseTimeUTC("2016-01-03 07:02:03")}, + {Time: parseTimeUTC("2016-01-04 10:23:03")}, + {Time: parseTimeUTC("2016-01-04 11:23:03")}, + {Time: parseTimeUTC("2016-01-04 12:23:03")}, + {Time: parseTimeUTC("2016-01-04 12:24:03")}, + {Time: parseTimeUTC("2016-01-04 12:28:03")}, + {Time: parseTimeUTC("2016-01-04 12:30:03")}, + {Time: parseTimeUTC("2016-01-04 16:23:03")}, + {Time: parseTimeUTC("2016-01-05 09:02:03")}, + {Time: parseTimeUTC("2016-01-06 08:02:03")}, + {Time: parseTimeUTC("2016-01-07 10:02:03")}, + {Time: parseTimeUTC("2016-01-08 20:02:03")}, + {Time: parseTimeUTC("2016-01-09 21:02:03")}, + {Time: parseTimeUTC("2016-01-12 21:02:03")}, + {Time: parseTimeUTC("2016-01-12 21:08:03")}, + {Time: parseTimeUTC("2016-01-18 12:02:03")}, } -var expireTests = []ExpirePolicy{ +var expireTests = []restic.ExpirePolicy{ {}, {Last: 10}, {Last: 15}, @@ -211,7 +212,7 @@ var expireTests = []ExpirePolicy{ func TestApplyPolicy(t *testing.T) { for i, p := range expireTests { - keep, remove := ApplyPolicy(testExpireSnapshots, p) + keep, remove := restic.ApplyPolicy(testExpireSnapshots, p) t.Logf("test %d: returned keep %v, remove %v (of %v) expired snapshots for policy %v", i, len(keep), len(remove), len(testExpireSnapshots), p) @@ -252,7 +253,7 @@ func TestApplyPolicy(t *testing.T) { continue } - var want Snapshots + var want restic.Snapshots err = json.Unmarshal(buf, &want) if !reflect.DeepEqual(keep, want) { diff --git a/src/restic/test/doc.go b/src/restic/test/doc.go index 44183c141..060bad354 100644 --- a/src/restic/test/doc.go +++ b/src/restic/test/doc.go @@ -1,2 +1,2 @@ -// Package test_helper provides helper functions for writing tests for restic. -package test_helper +// Package test provides helper functions for writing tests for restic. +package test diff --git a/src/restic/test/helpers.go b/src/restic/test/helpers.go index 353c9b8ed..d363e09c9 100644 --- a/src/restic/test/helpers.go +++ b/src/restic/test/helpers.go @@ -1,4 +1,4 @@ -package test_helper +package test import ( "compress/bzip2" @@ -15,10 +15,6 @@ import ( "testing" mrand "math/rand" - - "restic/backend" - "restic/backend/local" - "restic/repository" ) // Assert fails the test if the condition is false. @@ -34,7 +30,7 @@ func Assert(tb testing.TB, condition bool, msg string, v ...interface{}) { func OK(tb testing.TB, err error) { if err != nil { _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) + fmt.Printf("\033[31m%s:%d: unexpected error: %+v\033[39m\n\n", filepath.Base(file), line, err) tb.FailNow() } } @@ -63,16 +59,6 @@ func Equals(tb testing.TB, exp, act interface{}) { } } -// ParseID parses s as a backend.ID and panics if that fails. -func ParseID(s string) backend.ID { - id, err := backend.ParseID(s) - if err != nil { - panic(err) - } - - return id -} - // Random returns size bytes of pseudo-random data derived from the seed. func Random(seed, count int) []byte { p := make([]byte, count) @@ -184,40 +170,28 @@ func SetupTarTestFixture(t testing.TB, outputDir, tarFile string) { OK(t, cmd.Run()) } -// WithTestEnvironment creates a test environment, extracts the repository -// fixture and and calls f with the repository dir. -func WithTestEnvironment(t testing.TB, repoFixture string, f func(repodir string)) { +// Env creates a test environment and extracts the repository fixture. +// Returned is the repo path and a cleanup function. +func Env(t testing.TB, repoFixture string) (repodir string, cleanup func()) { tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") OK(t, err) fd, err := os.Open(repoFixture) if err != nil { - panic(err) + t.Fatal(err) } OK(t, fd.Close()) SetupTarTestFixture(t, tempdir, repoFixture) - f(filepath.Join(tempdir, "repo")) + return filepath.Join(tempdir, "repo"), func() { + if !TestCleanupTempDirs { + t.Logf("leaving temporary directory %v used for test", tempdir) + return + } - if !TestCleanupTempDirs { - t.Logf("leaving temporary directory %v used for test", tempdir) - return + RemoveAll(t, tempdir) } - - RemoveAll(t, tempdir) -} - -// OpenLocalRepo opens the local repository located at dir. -func OpenLocalRepo(t testing.TB, dir string) *repository.Repository { - be, err := local.Open(dir) - OK(t, err) - - repo := repository.New(be) - err = repo.SearchKey(TestPassword, 10) - OK(t, err) - - return repo } func isFile(fi os.FileInfo) bool { diff --git a/src/restic/test/backend.go b/src/restic/test/vars.go similarity index 50% rename from src/restic/test/backend.go rename to src/restic/test/vars.go index 5516cecdf..bb9f6b13d 100644 --- a/src/restic/test/backend.go +++ b/src/restic/test/vars.go @@ -1,16 +1,8 @@ -package test_helper +package test import ( "fmt" - "io/ioutil" "os" - "path/filepath" - "testing" - - "restic" - "restic/backend" - "restic/backend/local" - "restic/repository" ) var ( @@ -48,50 +40,3 @@ func getBoolVar(name string, defaultValue bool) bool { return defaultValue } - -func SetupRepo() *repository.Repository { - tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") - if err != nil { - panic(err) - } - - // create repository below temp dir - b, err := local.Create(filepath.Join(tempdir, "repo")) - if err != nil { - panic(err) - } - - repo := repository.New(b) - err = repo.Init(TestPassword) - if err != nil { - panic(err) - } - - return repo -} - -func TeardownRepo(repo *repository.Repository) { - if !TestCleanupTempDirs { - l := repo.Backend().(*local.Local) - fmt.Printf("leaving local backend at %s\n", l.Location()) - return - } - - err := repo.Delete() - if err != nil { - panic(err) - } -} - -func SnapshotDir(t testing.TB, repo *repository.Repository, path string, parent *backend.ID) *restic.Snapshot { - arch := restic.NewArchiver(repo) - sn, _, err := arch.Snapshot(nil, []string{path}, parent) - OK(t, err) - return sn -} - -func WithRepo(t testing.TB, f func(*repository.Repository)) { - repo := SetupRepo() - f(repo) - TeardownRepo(repo) -} diff --git a/src/restic/testing.go b/src/restic/testing.go index 78783ee44..039b908f7 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -5,33 +5,31 @@ import ( "fmt" "io" "math/rand" - "restic/backend" - "restic/pack" - "restic/repository" "testing" "time" - "github.com/pkg/errors" + "restic/errors" + "github.com/restic/chunker" ) // fakeFile returns a reader which yields deterministic pseudo-random data. func fakeFile(t testing.TB, seed, size int64) io.Reader { - return io.LimitReader(repository.NewRandReader(rand.New(rand.NewSource(seed))), size) + return io.LimitReader(NewRandReader(rand.New(rand.NewSource(seed))), size) } type fakeFileSystem struct { t testing.TB - repo *repository.Repository - knownBlobs backend.IDSet + repo Repository + knownBlobs IDSet duplication float32 } // saveFile reads from rd and saves the blobs in the repository. The list of // IDs is returned. -func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) { - blobs = backend.IDs{} - ch := chunker.New(rd, fs.repo.Config.ChunkerPolynomial) +func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) { + blobs = IDs{} + ch := chunker.New(rd, fs.repo.Config().ChunkerPolynomial) for { chunk, err := ch.Next(getBuf()) @@ -43,9 +41,9 @@ func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) { fs.t.Fatalf("unable to save chunk in repo: %v", err) } - id := backend.Hash(chunk.Data) - if !fs.blobIsKnown(id, pack.Data) { - _, err := fs.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id) + id := Hash(chunk.Data) + if !fs.blobIsKnown(id, DataBlob) { + _, err := fs.repo.SaveBlob(DataBlob, chunk.Data, id) if err != nil { fs.t.Fatalf("error saving chunk: %v", err) } @@ -66,20 +64,19 @@ const ( maxNodes = 32 ) -func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, backend.ID) { +func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, []byte, ID) { data, err := json.Marshal(tree) if err != nil { fs.t.Fatalf("json.Marshal(tree) returned error: %v", err) - return false, backend.ID{} + return false, nil, ID{} } data = append(data, '\n') - id := backend.Hash(data) - return fs.blobIsKnown(id, pack.Tree), id - + id := Hash(data) + return fs.blobIsKnown(id, TreeBlob), data, id } -func (fs fakeFileSystem) blobIsKnown(id backend.ID, t pack.BlobType) bool { +func (fs fakeFileSystem) blobIsKnown(id ID, t BlobType) bool { if rand.Float32() < fs.duplication { return false } @@ -97,7 +94,7 @@ func (fs fakeFileSystem) blobIsKnown(id backend.ID, t pack.BlobType) bool { } // saveTree saves a tree of fake files in the repo and returns the ID. -func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID { +func (fs fakeFileSystem) saveTree(seed int64, depth int) ID { rnd := rand.NewSource(seed) numNodes := int(rnd.Int63() % maxNodes) @@ -134,11 +131,12 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID { tree.Nodes = append(tree.Nodes, node) } - if known, id := fs.treeIsKnown(&tree); known { + known, buf, id := fs.treeIsKnown(&tree) + if known { return id } - id, err := fs.repo.SaveJSON(pack.Tree, tree) + _, err := fs.repo.SaveBlob(TreeBlob, buf, id) if err != nil { fs.t.Fatal(err) } @@ -151,7 +149,7 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID { // also used as the snapshot's timestamp. The tree's depth can be specified // with the parameter depth. The parameter duplication is a probability that // the same blob will saved again. -func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int, duplication float32) *Snapshot { +func TestCreateSnapshot(t testing.TB, repo Repository, at time.Time, depth int, duplication float32) *Snapshot { seed := at.Unix() t.Logf("create fake snapshot at %s with seed %d", at, seed) @@ -165,14 +163,14 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, fs := fakeFileSystem{ t: t, repo: repo, - knownBlobs: backend.NewIDSet(), + knownBlobs: NewIDSet(), duplication: duplication, } treeID := fs.saveTree(seed, depth) snapshot.Tree = &treeID - id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot) + id, err := repo.SaveJSONUnpacked(SnapshotFile, snapshot) if err != nil { t.Fatal(err) } @@ -194,19 +192,12 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, return snapshot } -// TestResetRepository removes all packs and indexes from the repository. -func TestResetRepository(t testing.TB, repo *repository.Repository) { - done := make(chan struct{}) - defer close(done) - - for _, tpe := range []backend.Type{backend.Snapshot, backend.Index, backend.Data} { - for id := range repo.Backend().List(tpe, done) { - err := repo.Backend().Remove(tpe, id) - if err != nil { - t.Errorf("removing %v (%v) failed: %v", id[0:12], tpe, err) - } - } +// TestParseID parses s as a ID and panics if that fails. +func TestParseID(s string) ID { + id, err := ParseID(s) + if err != nil { + panic(err) } - repo.SetIndex(repository.NewMasterIndex()) + return id } diff --git a/src/restic/testing_test.go b/src/restic/testing_test.go index 3c5ea5a6f..1258bf208 100644 --- a/src/restic/testing_test.go +++ b/src/restic/testing_test.go @@ -47,15 +47,3 @@ func TestCreateSnapshot(t *testing.T) { checker.TestCheckRepo(t, repo) } - -func BenchmarkCreateSnapshot(b *testing.B) { - repo, cleanup := repository.TestRepository(b) - defer cleanup() - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - restic.TestCreateSnapshot(b, repo, testSnapshotTime, testDepth, 0) - restic.TestResetRepository(b, repo) - } -} diff --git a/src/restic/tree.go b/src/restic/tree.go index 9bfcfd7ee..f2c1c04a9 100644 --- a/src/restic/tree.go +++ b/src/restic/tree.go @@ -4,22 +4,17 @@ import ( "fmt" "sort" - "github.com/pkg/errors" + "restic/errors" - "restic/backend" "restic/debug" - "restic/pack" ) +// Tree is an ordered list of nodes. type Tree struct { Nodes []*Node `json:"nodes"` } -var ( - ErrNodeNotFound = errors.New("named node not found") - ErrNodeAlreadyInTree = errors.New("node already present") -) - +// NewTree creates a new tree object. func NewTree() *Tree { return &Tree{ Nodes: []*Node{}, @@ -30,20 +25,6 @@ func (t Tree) String() string { return fmt.Sprintf("Tree<%d nodes>", len(t.Nodes)) } -type TreeLoader interface { - LoadJSONPack(pack.BlobType, backend.ID, interface{}) error -} - -func LoadTree(repo TreeLoader, id backend.ID) (*Tree, error) { - tree := &Tree{} - err := repo.LoadJSONPack(pack.Tree, id, tree) - if err != nil { - return nil, err - } - - return tree, nil -} - // Equals returns true if t and other have exactly the same nodes. func (t Tree) Equals(other *Tree) bool { if len(t.Nodes) != len(other.Nodes) { @@ -63,10 +44,11 @@ func (t Tree) Equals(other *Tree) bool { return true } +// Insert adds a new node at the correct place in the tree. func (t *Tree) Insert(node *Node) error { pos, _, err := t.binarySearch(node.Name) if err == nil { - return ErrNodeAlreadyInTree + return errors.New("node already present") } // https://code.google.com/p/go-wiki/wiki/SliceTricks @@ -86,16 +68,17 @@ func (t Tree) binarySearch(name string) (int, *Node, error) { return pos, t.Nodes[pos], nil } - return pos, nil, ErrNodeNotFound + return pos, nil, errors.New("named node not found") } +// Find returns a node with the given name. func (t Tree) Find(name string) (*Node, error) { _, node, err := t.binarySearch(name) return node, err } // Subtrees returns a slice of all subtree IDs of the tree. -func (t Tree) Subtrees() (trees backend.IDs) { +func (t Tree) Subtrees() (trees IDs) { for _, node := range t.Nodes { if node.Type == "dir" && node.Subtree != nil { trees = append(trees, *node.Subtree) diff --git a/src/restic/tree_test.go b/src/restic/tree_test.go index 2f85819fb..1d23e9240 100644 --- a/src/restic/tree_test.go +++ b/src/restic/tree_test.go @@ -8,7 +8,7 @@ import ( "testing" "restic" - "restic/pack" + "restic/repository" . "restic/test" ) @@ -93,19 +93,19 @@ func TestNodeComparison(t *testing.T) { } func TestLoadTree(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() // save tree tree := restic.NewTree() - id, err := repo.SaveJSON(pack.Tree, tree) + id, err := repo.SaveTree(tree) OK(t, err) // save packs OK(t, repo.Flush()) // load tree again - tree2, err := restic.LoadTree(repo, id) + tree2, err := repo.LoadTree(id) OK(t, err) Assert(t, tree.Equals(tree2), diff --git a/src/restic/types/repository.go b/src/restic/types/repository.go deleted file mode 100644 index d13d93333..000000000 --- a/src/restic/types/repository.go +++ /dev/null @@ -1,20 +0,0 @@ -package types - -import ( - "restic/backend" - "restic/pack" -) - -// Repository manages encrypted and packed data stored in a backend. -type Repository interface { - LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error - SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error) - - Lister -} - -// Lister combines lists packs in a repo and blobs in a pack. -type Lister interface { - List(backend.Type, <-chan struct{}) <-chan backend.ID - ListPack(backend.ID) ([]pack.Blob, int64, error) -} diff --git a/src/restic/testdata/walktree-test-repo.tar.gz b/src/restic/walk/testdata/walktree-test-repo.tar.gz similarity index 100% rename from src/restic/testdata/walktree-test-repo.tar.gz rename to src/restic/walk/testdata/walktree-test-repo.tar.gz diff --git a/src/restic/walk.go b/src/restic/walk/walk.go similarity index 75% rename from src/restic/walk.go rename to src/restic/walk/walk.go index 2978e8500..fbe322f63 100644 --- a/src/restic/walk.go +++ b/src/restic/walk/walk.go @@ -1,41 +1,40 @@ -package restic +package walk import ( "fmt" "os" "path/filepath" + "restic" "sync" - "restic/backend" "restic/debug" - "restic/pack" ) -// WalkTreeJob is a job sent from the tree walker. -type WalkTreeJob struct { +// TreeJob is a job sent from the tree walker. +type TreeJob struct { Path string Error error - Node *Node - Tree *Tree + Node *restic.Node + Tree *restic.Tree } // TreeWalker traverses a tree in the repository depth-first and sends a job // for each item (file or dir) that it encounters. type TreeWalker struct { ch chan<- loadTreeJob - out chan<- WalkTreeJob + out chan<- TreeJob } // NewTreeWalker uses ch to load trees from the repository and sends jobs to // out. -func NewTreeWalker(ch chan<- loadTreeJob, out chan<- WalkTreeJob) *TreeWalker { +func NewTreeWalker(ch chan<- loadTreeJob, out chan<- TreeJob) *TreeWalker { return &TreeWalker{ch: ch, out: out} } // Walk starts walking the tree given by id. When the channel done is closed, // processing stops. -func (tw *TreeWalker) Walk(path string, id backend.ID, done chan struct{}) { +func (tw *TreeWalker) Walk(path string, id restic.ID, done chan struct{}) { debug.Log("TreeWalker.Walk", "starting on tree %v for %v", id.Str(), path) defer debug.Log("TreeWalker.Walk", "done walking tree %v for %v", id.Str(), path) @@ -48,7 +47,7 @@ func (tw *TreeWalker) Walk(path string, id backend.ID, done chan struct{}) { res := <-resCh if res.err != nil { select { - case tw.out <- WalkTreeJob{Path: path, Error: res.err}: + case tw.out <- TreeJob{Path: path, Error: res.err}: case <-done: return } @@ -58,13 +57,13 @@ func (tw *TreeWalker) Walk(path string, id backend.ID, done chan struct{}) { tw.walk(path, res.tree, done) select { - case tw.out <- WalkTreeJob{Path: path, Tree: res.tree}: + case tw.out <- TreeJob{Path: path, Tree: res.tree}: case <-done: return } } -func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { +func (tw *TreeWalker) walk(path string, tree *restic.Tree, done chan struct{}) { debug.Log("TreeWalker.walk", "start on %q", path) defer debug.Log("TreeWalker.walk", "done for %q", path) @@ -86,7 +85,7 @@ func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { for i, node := range tree.Nodes { p := filepath.Join(path, node.Name) - var job WalkTreeJob + var job TreeJob if node.Type == "dir" { if results[i] == nil { @@ -100,9 +99,9 @@ func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { fmt.Fprintf(os.Stderr, "error loading tree: %v\n", res.err) } - job = WalkTreeJob{Path: p, Tree: res.tree, Error: res.err} + job = TreeJob{Path: p, Tree: res.tree, Error: res.err} } else { - job = WalkTreeJob{Path: p, Node: node} + job = TreeJob{Path: p, Node: node} } select { @@ -114,16 +113,16 @@ func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { } type loadTreeResult struct { - tree *Tree + tree *restic.Tree err error } type loadTreeJob struct { - id backend.ID + id restic.ID res chan<- loadTreeResult } -type treeLoader func(backend.ID) (*Tree, error) +type treeLoader func(restic.ID) (*restic.Tree, error) func loadTreeWorker(wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader, done <-chan struct{}) { debug.Log("loadTreeWorker", "start") @@ -157,17 +156,21 @@ func loadTreeWorker(wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader, } } +// TreeLoader loads tree objects. +type TreeLoader interface { + LoadTree(restic.ID) (*restic.Tree, error) +} + const loadTreeWorkers = 10 -// WalkTree walks the tree specified by id recursively and sends a job for each +// Tree walks the tree specified by id recursively and sends a job for each // file and directory it finds. When the channel done is closed, processing // stops. -func WalkTree(repo TreeLoader, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) { +func Tree(repo TreeLoader, id restic.ID, done chan struct{}, jobCh chan<- TreeJob) { debug.Log("WalkTree", "start on %v, start workers", id.Str()) - load := func(id backend.ID) (*Tree, error) { - tree := &Tree{} - err := repo.LoadJSONPack(pack.Tree, id, tree) + load := func(id restic.ID) (*restic.Tree, error) { + tree, err := repo.LoadTree(id) if err != nil { return nil, err } diff --git a/src/restic/walk_test.go b/src/restic/walk/walk_test.go similarity index 94% rename from src/restic/walk_test.go rename to src/restic/walk/walk_test.go index cce0e2300..d4643014e 100644 --- a/src/restic/walk_test.go +++ b/src/restic/walk/walk_test.go @@ -1,4 +1,4 @@ -package restic_test +package walk_test import ( "os" @@ -8,22 +8,22 @@ import ( "time" "restic" - "restic/backend" - "restic/pack" + "restic/archiver" "restic/pipe" "restic/repository" . "restic/test" + "restic/walk" ) func TestWalkTree(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := repository.TestRepository(t) + defer cleanup() dirs, err := filepath.Glob(TestWalkerPath) OK(t, err) // archive a few files - arch := restic.NewArchiver(repo) + arch := archiver.New(repo) sn, _, err := arch.Snapshot(nil, dirs, nil) OK(t, err) @@ -33,8 +33,8 @@ func TestWalkTree(t *testing.T) { done := make(chan struct{}) // start tree walker - treeJobs := make(chan restic.WalkTreeJob) - go restic.WalkTree(repo, *sn.Tree, done, treeJobs) + treeJobs := make(chan walk.TreeJob) + go walk.Tree(repo, *sn.Tree, done, treeJobs) // start filesystem walker fsJobs := make(chan pipe.Job) @@ -91,13 +91,13 @@ func TestWalkTree(t *testing.T) { } type delayRepo struct { - repo *repository.Repository + repo restic.Repository delay time.Duration } -func (d delayRepo) LoadJSONPack(t pack.BlobType, id backend.ID, dst interface{}) error { +func (d delayRepo) LoadTree(id restic.ID) (*restic.Tree, error) { time.Sleep(d.delay) - return d.repo.LoadJSONPack(t, id, dst) + return d.repo.LoadTree(id) } var repoFixture = filepath.Join("testdata", "walktree-test-repo.tar.gz") @@ -1341,53 +1341,55 @@ var walktreeTestItems = []string{ } func TestDelayedWalkTree(t *testing.T) { - WithTestEnvironment(t, repoFixture, func(repodir string) { - repo := OpenLocalRepo(t, repodir) - OK(t, repo.LoadIndex()) + repodir, cleanup := Env(t, repoFixture) + defer cleanup() - root, err := backend.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") - OK(t, err) + repo := repository.TestOpenLocal(t, repodir) + OK(t, repo.LoadIndex()) - dr := delayRepo{repo, 100 * time.Millisecond} + root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") + OK(t, err) - // start tree walker - treeJobs := make(chan restic.WalkTreeJob) - go restic.WalkTree(dr, root, nil, treeJobs) + dr := delayRepo{repo, 100 * time.Millisecond} - i := 0 - for job := range treeJobs { - expectedPath := filepath.Join(strings.Split(walktreeTestItems[i], "/")...) - if job.Path != expectedPath { - t.Fatalf("expected path %q (%v), got %q", walktreeTestItems[i], i, job.Path) - } - i++ + // start tree walker + treeJobs := make(chan walk.TreeJob) + go walk.Tree(dr, root, nil, treeJobs) + + i := 0 + for job := range treeJobs { + expectedPath := filepath.Join(strings.Split(walktreeTestItems[i], "/")...) + if job.Path != expectedPath { + t.Fatalf("expected path %q (%v), got %q", walktreeTestItems[i], i, job.Path) } + i++ + } - if i != len(walktreeTestItems) { - t.Fatalf("got %d items, expected %v", i, len(walktreeTestItems)) - } - }) + if i != len(walktreeTestItems) { + t.Fatalf("got %d items, expected %v", i, len(walktreeTestItems)) + } } func BenchmarkDelayedWalkTree(t *testing.B) { - WithTestEnvironment(t, repoFixture, func(repodir string) { - repo := OpenLocalRepo(t, repodir) - OK(t, repo.LoadIndex()) + repodir, cleanup := Env(t, repoFixture) + defer cleanup() - root, err := backend.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") - OK(t, err) + repo := repository.TestOpenLocal(t, repodir) + OK(t, repo.LoadIndex()) - dr := delayRepo{repo, 10 * time.Millisecond} + root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") + OK(t, err) - t.ResetTimer() + dr := delayRepo{repo, 10 * time.Millisecond} - for i := 0; i < t.N; i++ { - // start tree walker - treeJobs := make(chan restic.WalkTreeJob) - go restic.WalkTree(dr, root, nil, treeJobs) + t.ResetTimer() - for _ = range treeJobs { - } + for i := 0; i < t.N; i++ { + // start tree walker + treeJobs := make(chan walk.TreeJob) + go walk.Tree(dr, root, nil, treeJobs) + + for _ = range treeJobs { } - }) + } } diff --git a/src/restic/worker/pool_test.go b/src/restic/worker/pool_test.go index 329ce9a88..9d6159b89 100644 --- a/src/restic/worker/pool_test.go +++ b/src/restic/worker/pool_test.go @@ -3,7 +3,7 @@ package worker_test import ( "testing" - "github.com/pkg/errors" + "restic/errors" "restic/worker" )