From e699f6d1bd3973233a99a07e7a3425ee020480b5 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 31 Aug 2016 18:27:57 +0200 Subject: [PATCH 01/40] Update doc comment --- src/restic/doc.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/restic/doc.go b/src/restic/doc.go index 358dca240..2e53b2524 100644 --- a/src/restic/doc.go +++ b/src/restic/doc.go @@ -1,6 +1,5 @@ // Package restic is the top level package for the restic backup program, // please see https://github.com/restic/restic for more information. // -// This package exposes the main components needed to create and restore a -// backup as well as handling things like a local cache of objects. +// This package exposes the main objects that are handled in restic. package restic From bfdd26c541736c2888c526eb5ca8e33cb2ee4e5e Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 31 Aug 2016 18:34:19 +0200 Subject: [PATCH 02/40] Remove (unused) cache implementation --- src/restic/cache.go | 290 --------------------------------------- src/restic/cache_test.go | 26 ---- 2 files changed, 316 deletions(-) delete mode 100644 src/restic/cache.go delete mode 100644 src/restic/cache_test.go diff --git a/src/restic/cache.go b/src/restic/cache.go deleted file mode 100644 index 1af4e9605..000000000 --- a/src/restic/cache.go +++ /dev/null @@ -1,290 +0,0 @@ -package restic - -import ( - "io" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/pkg/errors" - - "restic/backend" - "restic/debug" - "restic/fs" - "restic/repository" -) - -// Cache is used to locally cache items from a repository. -type Cache struct { - base string -} - -// NewCache returns a new cache at cacheDir. If it is the empty string, the -// default cache location is chosen. -func NewCache(repo *repository.Repository, cacheDir string) (*Cache, error) { - var err error - - if cacheDir == "" { - cacheDir, err = getCacheDir() - if err != nil { - return nil, err - } - } - - basedir := filepath.Join(cacheDir, repo.Config.ID) - debug.Log("Cache.New", "opened cache at %v", basedir) - - return &Cache{base: basedir}, nil -} - -// Has checks if the local cache has the id. -func (c *Cache) Has(t backend.Type, subtype string, id backend.ID) (bool, error) { - filename, err := c.filename(t, subtype, id) - if err != nil { - return false, err - } - fd, err := fs.Open(filename) - defer fd.Close() - - if err != nil { - if os.IsNotExist(errors.Cause(err)) { - debug.Log("Cache.Has", "test for file %v: not cached", filename) - return false, nil - } - - debug.Log("Cache.Has", "test for file %v: error %v", filename, err) - return false, errors.Wrap(err, "Open") - } - - debug.Log("Cache.Has", "test for file %v: is cached", filename) - return true, nil -} - -// Store returns an io.WriteCloser that is used to save new information to the -// cache. The returned io.WriteCloser must be closed by the caller after all -// data has been written. -func (c *Cache) Store(t backend.Type, subtype string, id backend.ID) (io.WriteCloser, error) { - filename, err := c.filename(t, subtype, id) - if err != nil { - return nil, err - } - - dirname := filepath.Dir(filename) - err = fs.MkdirAll(dirname, 0700) - if err != nil { - return nil, errors.Wrap(err, "MkdirAll") - } - - file, err := fs.Create(filename) - if err != nil { - debug.Log("Cache.Store", "error creating file %v: %v", filename, err) - return nil, errors.Wrap(err, "Create") - } - - debug.Log("Cache.Store", "created file %v", filename) - return file, nil -} - -// Load returns information from the cache. The returned io.ReadCloser must be -// closed by the caller. -func (c *Cache) Load(t backend.Type, subtype string, id backend.ID) (io.ReadCloser, error) { - filename, err := c.filename(t, subtype, id) - if err != nil { - return nil, err - } - - return fs.Open(filename) -} - -func (c *Cache) purge(t backend.Type, subtype string, id backend.ID) error { - filename, err := c.filename(t, subtype, id) - if err != nil { - return err - } - - err = fs.Remove(filename) - debug.Log("Cache.purge", "Remove file %v: %v", filename, err) - - if err != nil && os.IsNotExist(errors.Cause(err)) { - return nil - } - - return errors.Wrap(err, "Remove") -} - -// Clear removes information from the cache that isn't present in the repository any more. -func (c *Cache) Clear(repo *repository.Repository) error { - list, err := c.list(backend.Snapshot) - if err != nil { - return err - } - - for _, entry := range list { - debug.Log("Cache.Clear", "found entry %v", entry) - - if ok, err := repo.Backend().Test(backend.Snapshot, entry.ID.String()); !ok || err != nil { - debug.Log("Cache.Clear", "snapshot %v doesn't exist any more, removing %v", entry.ID, entry) - - err = c.purge(backend.Snapshot, entry.Subtype, entry.ID) - if err != nil { - return err - } - } - } - - return nil -} - -type cacheEntry struct { - ID backend.ID - Subtype string -} - -func (c cacheEntry) String() string { - if c.Subtype != "" { - return c.ID.Str() + "." + c.Subtype - } - return c.ID.Str() -} - -func (c *Cache) list(t backend.Type) ([]cacheEntry, error) { - var dir string - - switch t { - case backend.Snapshot: - dir = filepath.Join(c.base, "snapshots") - default: - return nil, errors.Errorf("cache not supported for type %v", t) - } - - fd, err := fs.Open(dir) - if err != nil { - if os.IsNotExist(errors.Cause(err)) { - return []cacheEntry{}, nil - } - return nil, errors.Wrap(err, "Open") - } - defer fd.Close() - - fis, err := fd.Readdir(-1) - if err != nil { - return nil, errors.Wrap(err, "Readdir") - } - - entries := make([]cacheEntry, 0, len(fis)) - - for _, fi := range fis { - parts := strings.SplitN(fi.Name(), ".", 2) - - id, err := backend.ParseID(parts[0]) - // ignore invalid cache entries for now - if err != nil { - debug.Log("Cache.List", "unable to parse name %v as id: %v", parts[0], err) - continue - } - - e := cacheEntry{ID: id} - - if len(parts) == 2 { - e.Subtype = parts[1] - } - - entries = append(entries, e) - } - - return entries, nil -} - -func (c *Cache) filename(t backend.Type, subtype string, id backend.ID) (string, error) { - filename := id.String() - if subtype != "" { - filename += "." + subtype - } - - switch t { - case backend.Snapshot: - return filepath.Join(c.base, "snapshots", filename), nil - } - - return "", errors.Errorf("cache not supported for type %v", t) -} - -func getCacheDir() (string, error) { - if dir := os.Getenv("RESTIC_CACHE"); dir != "" { - return dir, nil - } - if runtime.GOOS == "windows" { - return getWindowsCacheDir() - } - - return getXDGCacheDir() -} - -// getWindowsCacheDir will return %APPDATA%\restic or create -// a folder in the temporary folder called "restic". -func getWindowsCacheDir() (string, error) { - cachedir := os.Getenv("APPDATA") - if cachedir == "" { - cachedir = os.TempDir() - } - cachedir = filepath.Join(cachedir, "restic") - fi, err := fs.Stat(cachedir) - - if os.IsNotExist(errors.Cause(err)) { - err = fs.MkdirAll(cachedir, 0700) - if err != nil { - return "", errors.Wrap(err, "MkdirAll") - } - - return cachedir, nil - } - - if err != nil { - return "", errors.Wrap(err, "Stat") - } - - if !fi.IsDir() { - return "", errors.Errorf("cache dir %v is not a directory", cachedir) - } - return cachedir, nil -} - -// getXDGCacheDir returns the cache directory according to XDG basedir spec, see -// http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html -func getXDGCacheDir() (string, error) { - xdgcache := os.Getenv("XDG_CACHE_HOME") - home := os.Getenv("HOME") - - if xdgcache == "" && home == "" { - return "", errors.New("unable to locate cache directory (XDG_CACHE_HOME and HOME unset)") - } - - cachedir := "" - if xdgcache != "" { - cachedir = filepath.Join(xdgcache, "restic") - } else if home != "" { - cachedir = filepath.Join(home, ".cache", "restic") - } - - fi, err := fs.Stat(cachedir) - if os.IsNotExist(errors.Cause(err)) { - err = fs.MkdirAll(cachedir, 0700) - if err != nil { - return "", errors.Wrap(err, "MkdirAll") - } - - fi, err = fs.Stat(cachedir) - debug.Log("getCacheDir", "create cache dir %v", cachedir) - } - - if err != nil { - return "", errors.Wrap(err, "Stat") - } - - if !fi.IsDir() { - return "", errors.Errorf("cache dir %v is not a directory", cachedir) - } - - return cachedir, nil -} diff --git a/src/restic/cache_test.go b/src/restic/cache_test.go deleted file mode 100644 index c72b26e2a..000000000 --- a/src/restic/cache_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package restic_test - -import ( - "testing" - - "restic" - . "restic/test" -) - -func TestCache(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) - - _, err := restic.NewCache(repo, "") - OK(t, err) - - arch := restic.NewArchiver(repo) - - // archive some files, this should automatically cache all blobs from the snapshot - _, _, err = arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil) - if err != nil { - t.Fatal(err) - } - - // TODO: test caching index -} From 82c2dafb23b77bd4937376fc0b49e0a1f9e8f20b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 31 Aug 2016 19:10:10 +0200 Subject: [PATCH 03/40] Copy interfaces and basic types to package restic/ --- src/restic/archive_reader.go | 2 +- src/restic/archiver.go | 4 +- src/restic/archiver_duplication_test.go | 19 +++-- src/restic/backend.go | 55 ++++++++++++ src/restic/backend/mem/mem_backend.go | 90 ++++++++------------ src/restic/backend/mock_backend.go | 103 ----------------------- src/restic/checker/checker.go | 4 +- src/restic/find.go | 2 +- src/restic/fuse/dir.go | 8 +- src/restic/handle.go | 49 +++++++++++ src/restic/handle_test.go | 28 +++++++ src/restic/mock/backend.go | 106 ++++++++++++++++++++++++ src/restic/node.go | 30 +++---- src/restic/node_test.go | 34 ++++---- src/restic/repository.go | 13 +++ src/restic/restorer.go | 2 +- src/restic/testing.go | 20 ++--- src/restic/tree.go | 2 +- src/restic/walk.go | 4 +- 19 files changed, 351 insertions(+), 224 deletions(-) create mode 100644 src/restic/backend.go delete mode 100644 src/restic/backend/mock_backend.go create mode 100644 src/restic/handle.go create mode 100644 src/restic/handle_test.go create mode 100644 src/restic/mock/backend.go create mode 100644 src/restic/repository.go diff --git a/src/restic/archive_reader.go b/src/restic/archive_reader.go index 02b630b1d..c25cd0996 100644 --- a/src/restic/archive_reader.go +++ b/src/restic/archive_reader.go @@ -83,7 +83,7 @@ func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name Name: name, AccessTime: time.Now(), ModTime: time.Now(), - Type: "file", + FileType: "file", Mode: 0644, Size: fileSize, UID: sn.UID, diff --git a/src/restic/archiver.go b/src/restic/archiver.go index 5f72633a2..24d352bc9 100644 --- a/src/restic/archiver.go +++ b/src/restic/archiver.go @@ -307,7 +307,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st } // otherwise read file normally - if node.Type == "file" && len(node.Content) == 0 { + if node.FileType == "file" && len(node.Content) == 0 { debug.Log("Archiver.fileWorker", " read and save %v, content: %v", e.Path(), node.Content) err = arch.SaveFile(p, node) if err != nil { @@ -374,7 +374,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str node := res.(*Node) tree.Insert(node) - if node.Type == "dir" { + if node.FileType == "dir" { debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.path, node.Subtree) if node.Subtree.IsNull() { diff --git a/src/restic/archiver_duplication_test.go b/src/restic/archiver_duplication_test.go index 61f7aafb9..52afb6bd6 100644 --- a/src/restic/archiver_duplication_test.go +++ b/src/restic/archiver_duplication_test.go @@ -12,6 +12,7 @@ import ( "restic" "restic/backend" + "restic/mock" "restic/pack" "restic/repository" ) @@ -36,30 +37,30 @@ func randomID() backend.ID { } // forgetfulBackend returns a backend that forgets everything. -func forgetfulBackend() backend.Backend { - be := &backend.MockBackend{} +func forgetfulBackend() restic.Backend { + be := &mock.Backend{} - be.TestFn = func(t backend.Type, name string) (bool, error) { + be.TestFn = func(t restic.FileType, name string) (bool, error) { return false, nil } - be.LoadFn = func(h backend.Handle, p []byte, off int64) (int, error) { + be.LoadFn = func(h restic.Handle, p []byte, off int64) (int, error) { return 0, errors.New("not found") } - be.SaveFn = func(h backend.Handle, p []byte) error { + be.SaveFn = func(h restic.Handle, p []byte) error { return nil } - be.StatFn = func(h backend.Handle) (backend.BlobInfo, error) { - return backend.BlobInfo{}, errors.New("not found") + be.StatFn = func(h restic.Handle) (restic.BlobInfo, error) { + return restic.BlobInfo{}, errors.New("not found") } - be.RemoveFn = func(t backend.Type, name string) error { + be.RemoveFn = func(t restic.FileType, name string) error { return nil } - be.ListFn = func(t backend.Type, done <-chan struct{}) <-chan string { + be.ListFn = func(t restic.FileType, done <-chan struct{}) <-chan string { ch := make(chan string) close(ch) return ch diff --git a/src/restic/backend.go b/src/restic/backend.go new file mode 100644 index 000000000..f00c4699e --- /dev/null +++ b/src/restic/backend.go @@ -0,0 +1,55 @@ +package restic + +// FileType is the type of a file in the backend. +type FileType string + +// These are the different data types a backend can store. +const ( + DataFile FileType = "data" + KeyFile = "key" + LockFile = "lock" + SnapshotFile = "snapshot" + IndexFile = "index" + ConfigFile = "config" +) + +// Backend is used to store and access data. +type Backend interface { + // Location returns a string that describes the type and location of the + // repository. + Location() string + + // Test a boolean value whether a Blob with the name and type exists. + Test(t FileType, name string) (bool, error) + + // Remove removes a Blob with type t and name. + Remove(t FileType, name string) error + + // Close the backend + Close() error + + // Load returns the data stored in the backend for h at the given offset + // and saves it in p. Load has the same semantics as io.ReaderAt, except + // that a negative offset is also allowed. In this case it references a + // position relative to the end of the file (similar to Seek()). + Load(h Handle, p []byte, off int64) (int, error) + + // Save stores the data in the backend under the given handle. + Save(h Handle, p []byte) error + + // Stat returns information about the blob identified by h. + Stat(h Handle) (BlobInfo, error) + + // List returns a channel that yields all names of blobs of type t in an + // arbitrary order. A goroutine is started for this. If the channel done is + // closed, sending stops. + List(t FileType, done <-chan struct{}) <-chan string + + // Delete the complete repository. + Delete() error +} + +// BlobInfo is returned by Stat() and contains information about a stored blob. +type BlobInfo struct { + Size int64 +} diff --git a/src/restic/backend/mem/mem_backend.go b/src/restic/backend/mem/mem_backend.go index 239e2c899..339d86c5d 100644 --- a/src/restic/backend/mem/mem_backend.go +++ b/src/restic/backend/mem/mem_backend.go @@ -17,13 +17,14 @@ type entry struct { type memMap map[entry][]byte +// make sure that MemoryBackend implements backend.Backend +var _ backend.Backend = &MemoryBackend{} + // MemoryBackend is a mock backend that uses a map for storing all data in // memory. This should only be used for tests. type MemoryBackend struct { data memMap m sync.Mutex - - backend.MockBackend } // New returns a new backend that saves all data in a map in memory. @@ -32,60 +33,13 @@ func New() *MemoryBackend { data: make(memMap), } - be.MockBackend.TestFn = func(t backend.Type, name string) (bool, error) { - return memTest(be, t, name) - } - - be.MockBackend.LoadFn = func(h backend.Handle, p []byte, off int64) (int, error) { - return memLoad(be, h, p, off) - } - - be.MockBackend.SaveFn = func(h backend.Handle, p []byte) error { - return memSave(be, h, p) - } - - be.MockBackend.StatFn = func(h backend.Handle) (backend.BlobInfo, error) { - return memStat(be, h) - } - - be.MockBackend.RemoveFn = func(t backend.Type, name string) error { - return memRemove(be, t, name) - } - - be.MockBackend.ListFn = func(t backend.Type, done <-chan struct{}) <-chan string { - return memList(be, t, done) - } - - be.MockBackend.DeleteFn = func() error { - be.m.Lock() - defer be.m.Unlock() - - be.data = make(memMap) - return nil - } - - be.MockBackend.LocationFn = func() string { - return "Memory Backend" - } - debug.Log("MemoryBackend.New", "created new memory backend") return be } -func (be *MemoryBackend) insert(t backend.Type, name string, data []byte) error { - be.m.Lock() - defer be.m.Unlock() - - if _, ok := be.data[entry{t, name}]; ok { - return errors.New("already present") - } - - be.data[entry{t, name}] = data - return nil -} - -func memTest(be *MemoryBackend, t backend.Type, name string) (bool, error) { +// Test returns whether a file exists. +func (be *MemoryBackend) Test(t backend.Type, name string) (bool, error) { be.m.Lock() defer be.m.Unlock() @@ -98,7 +52,8 @@ func memTest(be *MemoryBackend, t backend.Type, name string) (bool, error) { return false, nil } -func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, error) { +// Load reads data from the backend. +func (be *MemoryBackend) Load(h backend.Handle, p []byte, off int64) (int, error) { if err := h.Valid(); err != nil { return 0, err } @@ -137,7 +92,8 @@ func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, err return n, nil } -func memSave(be *MemoryBackend, h backend.Handle, p []byte) error { +// Save adds new Data to the backend. +func (be *MemoryBackend) Save(h backend.Handle, p []byte) error { if err := h.Valid(); err != nil { return err } @@ -161,7 +117,8 @@ func memSave(be *MemoryBackend, h backend.Handle, p []byte) error { return nil } -func memStat(be *MemoryBackend, h backend.Handle) (backend.BlobInfo, error) { +// Stat returns information about a file in the backend. +func (be *MemoryBackend) Stat(h backend.Handle) (backend.BlobInfo, error) { be.m.Lock() defer be.m.Unlock() @@ -183,7 +140,8 @@ func memStat(be *MemoryBackend, h backend.Handle) (backend.BlobInfo, error) { return backend.BlobInfo{Size: int64(len(e))}, nil } -func memRemove(be *MemoryBackend, t backend.Type, name string) error { +// Remove deletes a file from the backend. +func (be *MemoryBackend) Remove(t backend.Type, name string) error { be.m.Lock() defer be.m.Unlock() @@ -198,7 +156,8 @@ func memRemove(be *MemoryBackend, t backend.Type, name string) error { return nil } -func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan string { +// List returns a channel which yields entries from the backend. +func (be *MemoryBackend) List(t backend.Type, done <-chan struct{}) <-chan string { be.m.Lock() defer be.m.Unlock() @@ -227,3 +186,22 @@ func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan str return ch } + +// Location returns the location of the backend (RAM). +func (be *MemoryBackend) Location() string { + return "RAM" +} + +// Delete removes all data in the backend. +func (be *MemoryBackend) Delete() error { + be.m.Lock() + defer be.m.Unlock() + + be.data = make(memMap) + return nil +} + +// Close closes the backend. +func (be *MemoryBackend) Close() error { + return nil +} diff --git a/src/restic/backend/mock_backend.go b/src/restic/backend/mock_backend.go deleted file mode 100644 index 70429acfd..000000000 --- a/src/restic/backend/mock_backend.go +++ /dev/null @@ -1,103 +0,0 @@ -package backend - -import "github.com/pkg/errors" - -// MockBackend implements a backend whose functions can be specified. This -// should only be used for tests. -type MockBackend struct { - CloseFn func() error - LoadFn func(h Handle, p []byte, off int64) (int, error) - SaveFn func(h Handle, p []byte) error - StatFn func(h Handle) (BlobInfo, error) - ListFn func(Type, <-chan struct{}) <-chan string - RemoveFn func(Type, string) error - TestFn func(Type, string) (bool, error) - DeleteFn func() error - LocationFn func() string -} - -// Close the backend. -func (m *MockBackend) Close() error { - if m.CloseFn == nil { - return nil - } - - return m.CloseFn() -} - -// Location returns a location string. -func (m *MockBackend) Location() string { - if m.LocationFn == nil { - return "" - } - - return m.LocationFn() -} - -// Load loads data from the backend. -func (m *MockBackend) Load(h Handle, p []byte, off int64) (int, error) { - if m.LoadFn == nil { - return 0, errors.New("not implemented") - } - - return m.LoadFn(h, p, off) -} - -// Save data in the backend. -func (m *MockBackend) Save(h Handle, p []byte) error { - if m.SaveFn == nil { - return errors.New("not implemented") - } - - return m.SaveFn(h, p) -} - -// Stat an object in the backend. -func (m *MockBackend) Stat(h Handle) (BlobInfo, error) { - if m.StatFn == nil { - return BlobInfo{}, errors.New("not implemented") - } - - return m.StatFn(h) -} - -// List items of type t. -func (m *MockBackend) List(t Type, done <-chan struct{}) <-chan string { - if m.ListFn == nil { - ch := make(chan string) - close(ch) - return ch - } - - return m.ListFn(t, done) -} - -// Remove data from the backend. -func (m *MockBackend) Remove(t Type, name string) error { - if m.RemoveFn == nil { - return errors.New("not implemented") - } - - return m.RemoveFn(t, name) -} - -// Test for the existence of a specific item. -func (m *MockBackend) Test(t Type, name string) (bool, error) { - if m.TestFn == nil { - return false, errors.New("not implemented") - } - - return m.TestFn(t, name) -} - -// Delete all data. -func (m *MockBackend) Delete() error { - if m.DeleteFn == nil { - return errors.New("not implemented") - } - - return m.DeleteFn() -} - -// Make sure that MockBackend implements the backend interface. -var _ Backend = &MockBackend{} diff --git a/src/restic/checker/checker.go b/src/restic/checker/checker.go index 1755bd3ac..51b473641 100644 --- a/src/restic/checker/checker.go +++ b/src/restic/checker/checker.go @@ -581,7 +581,7 @@ func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) { var blobs []backend.ID for _, node := range tree.Nodes { - switch node.Type { + switch node.FileType { case "file": if node.Content == nil { errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)}) @@ -609,7 +609,7 @@ func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) { // nothing to check default: - errs = append(errs, Error{TreeID: id, Err: errors.Errorf("node %q with invalid type %q", node.Name, node.Type)}) + errs = append(errs, Error{TreeID: id, Err: errors.Errorf("node %q with invalid type %q", node.Name, node.FileType)}) } if node.Name == "" { diff --git a/src/restic/find.go b/src/restic/find.go index 63c8bd813..067754e73 100644 --- a/src/restic/find.go +++ b/src/restic/find.go @@ -18,7 +18,7 @@ func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.Bl } for _, node := range tree.Nodes { - switch node.Type { + switch node.FileType { case "file": for _, blob := range node.Content { blobs.Insert(pack.Handle{ID: blob, Type: pack.Data}) diff --git a/src/restic/fuse/dir.go b/src/restic/fuse/dir.go index a89617e5f..b553da1ad 100644 --- a/src/restic/fuse/dir.go +++ b/src/restic/fuse/dir.go @@ -51,7 +51,7 @@ func newDir(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (* // replaceSpecialNodes replaces nodes with name "." and "/" by their contents. // Otherwise, the node is returned. func replaceSpecialNodes(repo *repository.Repository, node *restic.Node) ([]*restic.Node, error) { - if node.Type != "dir" || node.Subtree == nil { + if node.FileType != "dir" || node.Subtree == nil { return []*restic.Node{node}, nil } @@ -124,7 +124,7 @@ func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { for _, node := range d.items { var typ fuse.DirentType - switch node.Type { + switch node.FileType { case "dir": typ = fuse.DT_Dir case "file": @@ -150,7 +150,7 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { debug.Log("dir.Lookup", " Lookup(%v) -> not found", name) return nil, fuse.ENOENT } - switch node.Type { + switch node.FileType { case "dir": return newDir(d.repo, node, d.ownerIsRoot) case "file": @@ -158,7 +158,7 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { case "symlink": return newLink(d.repo, node, d.ownerIsRoot) default: - debug.Log("dir.Lookup", " node %v has unknown type %v", name, node.Type) + debug.Log("dir.Lookup", " node %v has unknown type %v", name, node.FileType) return nil, fuse.ENOENT } } diff --git a/src/restic/handle.go b/src/restic/handle.go new file mode 100644 index 000000000..dd7439d39 --- /dev/null +++ b/src/restic/handle.go @@ -0,0 +1,49 @@ +package restic + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// Handle is used to store and access data in a backend. +type Handle struct { + FileType FileType + Name string +} + +func (h Handle) String() string { + name := h.Name + if len(name) > 10 { + name = name[:10] + } + return fmt.Sprintf("<%s/%s>", h.FileType, name) +} + +// Valid returns an error if h is not valid. +func (h Handle) Valid() error { + if h.FileType == "" { + return errors.New("type is empty") + } + + switch h.FileType { + case DataFile: + case KeyFile: + case LockFile: + case SnapshotFile: + case IndexFile: + case ConfigFile: + default: + return errors.Errorf("invalid Type %q", h.FileType) + } + + if h.FileType == ConfigFile { + return nil + } + + if h.Name == "" { + return errors.New("invalid Name") + } + + return nil +} diff --git a/src/restic/handle_test.go b/src/restic/handle_test.go new file mode 100644 index 000000000..d5044558e --- /dev/null +++ b/src/restic/handle_test.go @@ -0,0 +1,28 @@ +package restic + +import "testing" + +var handleTests = []struct { + h Handle + valid bool +}{ + {Handle{Name: "foo"}, false}, + {Handle{FileType: "foobar"}, false}, + {Handle{FileType: ConfigFile, Name: ""}, true}, + {Handle{FileType: DataFile, Name: ""}, false}, + {Handle{FileType: "", Name: "x"}, false}, + {Handle{FileType: LockFile, Name: "010203040506"}, true}, +} + +func TestHandleValid(t *testing.T) { + for i, test := range handleTests { + err := test.h.Valid() + if err != nil && test.valid { + t.Errorf("test %v failed: error returned for valid handle: %v", i, err) + } + + if !test.valid && err == nil { + t.Errorf("test %v failed: expected error for invalid handle not found", i) + } + } +} diff --git a/src/restic/mock/backend.go b/src/restic/mock/backend.go new file mode 100644 index 000000000..b27b64a1c --- /dev/null +++ b/src/restic/mock/backend.go @@ -0,0 +1,106 @@ +package mock + +import ( + "restic" + + "github.com/pkg/errors" +) + +// Backend implements a mock backend. +type Backend struct { + CloseFn func() error + LoadFn func(h restic.Handle, p []byte, off int64) (int, error) + SaveFn func(h restic.Handle, p []byte) error + StatFn func(h restic.Handle) (restic.BlobInfo, error) + ListFn func(restic.FileType, <-chan struct{}) <-chan string + RemoveFn func(restic.FileType, string) error + TestFn func(restic.FileType, string) (bool, error) + DeleteFn func() error + LocationFn func() string +} + +// Close the backend. +func (m *Backend) Close() error { + if m.CloseFn == nil { + return nil + } + + return m.CloseFn() +} + +// Location returns a location string. +func (m *Backend) Location() string { + if m.LocationFn == nil { + return "" + } + + return m.LocationFn() +} + +// Load loads data from the backend. +func (m *Backend) Load(h restic.Handle, p []byte, off int64) (int, error) { + if m.LoadFn == nil { + return 0, errors.New("not implemented") + } + + return m.LoadFn(h, p, off) +} + +// Save data in the backend. +func (m *Backend) Save(h restic.Handle, p []byte) error { + if m.SaveFn == nil { + return errors.New("not implemented") + } + + return m.SaveFn(h, p) +} + +// Stat an object in the backend. +func (m *Backend) Stat(h restic.Handle) (restic.BlobInfo, error) { + if m.StatFn == nil { + return restic.BlobInfo{}, errors.New("not implemented") + } + + return m.StatFn(h) +} + +// List items of type t. +func (m *Backend) List(t restic.FileType, done <-chan struct{}) <-chan string { + if m.ListFn == nil { + ch := make(chan string) + close(ch) + return ch + } + + return m.ListFn(t, done) +} + +// Remove data from the backend. +func (m *Backend) Remove(t restic.FileType, name string) error { + if m.RemoveFn == nil { + return errors.New("not implemented") + } + + return m.RemoveFn(t, name) +} + +// Test for the existence of a specific item. +func (m *Backend) Test(t restic.FileType, name string) (bool, error) { + if m.TestFn == nil { + return false, errors.New("not implemented") + } + + return m.TestFn(t, name) +} + +// Delete all data. +func (m *Backend) Delete() error { + if m.DeleteFn == nil { + return errors.New("not implemented") + } + + return m.DeleteFn() +} + +// Make sure that Backend implements the backend interface. +var _ restic.Backend = &Backend{} diff --git a/src/restic/node.go b/src/restic/node.go index 37ef5e04c..72565342f 100644 --- a/src/restic/node.go +++ b/src/restic/node.go @@ -24,7 +24,7 @@ import ( // Node is a file, directory or other item in a backup. type Node struct { Name string `json:"name"` - Type string `json:"type"` + FileType string `json:"type"` Mode os.FileMode `json:"mode,omitempty"` ModTime time.Time `json:"mtime,omitempty"` AccessTime time.Time `json:"atime,omitempty"` @@ -51,7 +51,7 @@ type Node struct { } func (node Node) String() string { - switch node.Type { + switch node.FileType { case "file": return fmt.Sprintf("%s %5d %5d %6d %s %s", node.Mode, node.UID, node.GID, node.Size, node.ModTime, node.Name) @@ -60,7 +60,7 @@ func (node Node) String() string { node.Mode|os.ModeDir, node.UID, node.GID, node.Size, node.ModTime, node.Name) } - return fmt.Sprintf("", node.Type, node.Name) + return fmt.Sprintf("", node.FileType, node.Name) } func (node Node) Tree() *Tree { @@ -77,8 +77,8 @@ func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { ModTime: fi.ModTime(), } - node.Type = nodeTypeFromFileInfo(fi) - if node.Type == "file" { + node.FileType = nodeTypeFromFileInfo(fi) + if node.FileType == "file" { node.Size = uint64(fi.Size()) } @@ -111,7 +111,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string { func (node *Node) CreateAt(path string, repo *repository.Repository) error { debug.Log("Node.CreateAt", "create node %v at %v", node.Name, path) - switch node.Type { + switch node.FileType { case "dir": if err := node.createDirAt(path); err != nil { return err @@ -139,7 +139,7 @@ func (node *Node) CreateAt(path string, repo *repository.Repository) error { case "socket": return nil default: - return errors.Errorf("filetype %q not implemented!\n", node.Type) + return errors.Errorf("filetype %q not implemented!\n", node.FileType) } err := node.restoreMetadata(path) @@ -158,14 +158,14 @@ func (node Node) restoreMetadata(path string) error { return errors.Wrap(err, "Lchown") } - if node.Type != "symlink" { + if node.FileType != "symlink" { err = fs.Chmod(path, node.Mode) if err != nil { return errors.Wrap(err, "Chmod") } } - if node.Type != "dir" { + if node.FileType != "dir" { err = node.RestoreTimestamps(path) if err != nil { debug.Log("Node.restoreMetadata", "error restoring timestamps for dir %v: %v", path, err) @@ -182,7 +182,7 @@ func (node Node) RestoreTimestamps(path string) error { syscall.NsecToTimespec(node.ModTime.UnixNano()), } - if node.Type == "symlink" { + if node.FileType == "symlink" { return node.restoreSymlinkTimestamps(path, utimes) } @@ -287,7 +287,7 @@ func (node Node) Equals(other Node) bool { if node.Name != other.Name { return false } - if node.Type != other.Type { + if node.FileType != other.FileType { return false } if node.Mode != other.Mode { @@ -375,13 +375,13 @@ func (node Node) sameContent(other Node) bool { } func (node *Node) isNewer(path string, fi os.FileInfo) bool { - if node.Type != "file" { + if node.FileType != "file" { debug.Log("node.isNewer", "node %v is newer: not file", path) return true } tpe := nodeTypeFromFileInfo(fi) - if node.Name != fi.Name() || node.Type != tpe { + if node.Name != fi.Name() || node.FileType != tpe { debug.Log("node.isNewer", "node %v is newer: name or type changed", path) return true } @@ -469,7 +469,7 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error { return err } - switch node.Type { + switch node.FileType { case "file": node.Size = uint64(stat.size()) node.Links = uint64(stat.nlink()) @@ -484,7 +484,7 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error { case "fifo": case "socket": default: - err = errors.Errorf("invalid node type %q", node.Type) + err = errors.Errorf("invalid node type %q", node.FileType) } return err diff --git a/src/restic/node_test.go b/src/restic/node_test.go index e3b458c47..a1d2be8e8 100644 --- a/src/restic/node_test.go +++ b/src/restic/node_test.go @@ -74,7 +74,7 @@ func parseTime(s string) time.Time { var nodeTests = []restic.Node{ restic.Node{ Name: "testFile", - Type: "file", + FileType: "file", Content: []backend.ID{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -85,7 +85,7 @@ var nodeTests = []restic.Node{ }, restic.Node{ Name: "testSuidFile", - Type: "file", + FileType: "file", Content: []backend.ID{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -96,7 +96,7 @@ var nodeTests = []restic.Node{ }, restic.Node{ Name: "testSuidFile2", - Type: "file", + FileType: "file", Content: []backend.ID{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -107,7 +107,7 @@ var nodeTests = []restic.Node{ }, restic.Node{ Name: "testSticky", - Type: "file", + FileType: "file", Content: []backend.ID{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -118,7 +118,7 @@ var nodeTests = []restic.Node{ }, restic.Node{ Name: "testDir", - Type: "dir", + FileType: "dir", Subtree: nil, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -129,7 +129,7 @@ var nodeTests = []restic.Node{ }, restic.Node{ Name: "testSymlink", - Type: "symlink", + FileType: "symlink", LinkTarget: "invalid", UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -156,10 +156,10 @@ func TestNodeRestoreAt(t *testing.T) { nodePath := filepath.Join(tempdir, test.Name) OK(t, test.CreateAt(nodePath, nil)) - if test.Type == "symlink" && runtime.GOOS == "windows" { + if test.FileType == "symlink" && runtime.GOOS == "windows" { continue } - if test.Type == "dir" { + if test.FileType == "dir" { OK(t, test.RestoreTimestamps(nodePath)) } @@ -170,25 +170,25 @@ func TestNodeRestoreAt(t *testing.T) { OK(t, err) Assert(t, test.Name == n2.Name, - "%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name) - Assert(t, test.Type == n2.Type, - "%v: type doesn't match (%v != %v)", test.Type, test.Type, n2.Type) + "%v: name doesn't match (%v != %v)", test.FileType, test.Name, n2.Name) + Assert(t, test.FileType == n2.FileType, + "%v: type doesn't match (%v != %v)", test.FileType, test.FileType, n2.FileType) Assert(t, test.Size == n2.Size, "%v: size doesn't match (%v != %v)", test.Size, test.Size, n2.Size) if runtime.GOOS != "windows" { Assert(t, test.UID == n2.UID, - "%v: UID doesn't match (%v != %v)", test.Type, test.UID, n2.UID) + "%v: UID doesn't match (%v != %v)", test.FileType, test.UID, n2.UID) Assert(t, test.GID == n2.GID, - "%v: GID doesn't match (%v != %v)", test.Type, test.GID, n2.GID) - if test.Type != "symlink" { + "%v: GID doesn't match (%v != %v)", test.FileType, test.GID, n2.GID) + if test.FileType != "symlink" { Assert(t, test.Mode == n2.Mode, - "%v: mode doesn't match (0%o != 0%o)", test.Type, test.Mode, n2.Mode) + "%v: mode doesn't match (0%o != 0%o)", test.FileType, test.Mode, n2.Mode) } } - AssertFsTimeEqual(t, "AccessTime", test.Type, test.AccessTime, n2.AccessTime) - AssertFsTimeEqual(t, "ModTime", test.Type, test.ModTime, n2.ModTime) + AssertFsTimeEqual(t, "AccessTime", test.FileType, test.AccessTime, n2.AccessTime) + AssertFsTimeEqual(t, "ModTime", test.FileType, test.ModTime, n2.ModTime) } } diff --git a/src/restic/repository.go b/src/restic/repository.go new file mode 100644 index 000000000..cb95463f0 --- /dev/null +++ b/src/restic/repository.go @@ -0,0 +1,13 @@ +package restic + +import "restic/repository" + +// Repository stores data in a backend. It provides high-level functions and +// transparently encrypts/decrypts data. +type Repository interface { + + // Backend returns the backend used by the repository + Backend() Backend + + SetIndex(*repository.MasterIndex) +} diff --git a/src/restic/restorer.go b/src/restic/restorer.go index 74cdfc34d..36ea28f87 100644 --- a/src/restic/restorer.go +++ b/src/restic/restorer.go @@ -58,7 +58,7 @@ func (res *Restorer) restoreTo(dst string, dir string, treeID backend.ID) error } } - if node.Type == "dir" { + if node.FileType == "dir" { if node.Subtree == nil { return errors.Errorf("Dir without subtree in tree %v", treeID.Str()) } diff --git a/src/restic/testing.go b/src/restic/testing.go index 78783ee44..0b6ed6b49 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -110,10 +110,10 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID { id := fs.saveTree(treeSeed, depth-1) node := &Node{ - Name: fmt.Sprintf("dir-%v", treeSeed), - Type: "dir", - Mode: 0755, - Subtree: &id, + Name: fmt.Sprintf("dir-%v", treeSeed), + FileType: "dir", + Mode: 0755, + Subtree: &id, } tree.Nodes = append(tree.Nodes, node) @@ -124,10 +124,10 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID { fileSize := (maxFileSize / maxSeed) * fileSeed node := &Node{ - Name: fmt.Sprintf("file-%v", fileSeed), - Type: "file", - Mode: 0644, - Size: uint64(fileSize), + Name: fmt.Sprintf("file-%v", fileSeed), + FileType: "file", + Mode: 0644, + Size: uint64(fileSize), } node.Content = fs.saveFile(fakeFile(fs.t, fileSeed, fileSize)) @@ -195,11 +195,11 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, } // TestResetRepository removes all packs and indexes from the repository. -func TestResetRepository(t testing.TB, repo *repository.Repository) { +func TestResetRepository(t testing.TB, repo Repository) { done := make(chan struct{}) defer close(done) - for _, tpe := range []backend.Type{backend.Snapshot, backend.Index, backend.Data} { + for _, tpe := range []FileType{SnapshotFile, IndexFile, DataFile} { for id := range repo.Backend().List(tpe, done) { err := repo.Backend().Remove(tpe, id) if err != nil { diff --git a/src/restic/tree.go b/src/restic/tree.go index 9bfcfd7ee..3da5cde22 100644 --- a/src/restic/tree.go +++ b/src/restic/tree.go @@ -97,7 +97,7 @@ func (t Tree) Find(name string) (*Node, error) { // Subtrees returns a slice of all subtree IDs of the tree. func (t Tree) Subtrees() (trees backend.IDs) { for _, node := range t.Nodes { - if node.Type == "dir" && node.Subtree != nil { + if node.FileType == "dir" && node.Subtree != nil { trees = append(trees, *node.Subtree) } } diff --git a/src/restic/walk.go b/src/restic/walk.go index 2978e8500..a50438f7c 100644 --- a/src/restic/walk.go +++ b/src/restic/walk.go @@ -73,7 +73,7 @@ func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { // load all subtrees in parallel results := make([]<-chan loadTreeResult, len(tree.Nodes)) for i, node := range tree.Nodes { - if node.Type == "dir" { + if node.FileType == "dir" { resCh := make(chan loadTreeResult, 1) tw.ch <- loadTreeJob{ id: *node.Subtree, @@ -88,7 +88,7 @@ func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { p := filepath.Join(path, node.Name) var job WalkTreeJob - if node.Type == "dir" { + if node.FileType == "dir" { if results[i] == nil { panic("result chan should not be nil") } From 90da66261ad16b7864b698a7c195554557e354ec Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 31 Aug 2016 19:18:51 +0200 Subject: [PATCH 04/40] Copy ID from backend to restic --- src/restic/id.go | 109 +++++++++++++++++++++++++++++++++++++ src/restic/id_int_test.go | 16 ++++++ src/restic/id_test.go | 60 +++++++++++++++++++++ src/restic/ids.go | 69 ++++++++++++++++++++++++ src/restic/ids_test.go | 55 +++++++++++++++++++ src/restic/idset.go | 111 ++++++++++++++++++++++++++++++++++++++ src/restic/idset_test.go | 32 +++++++++++ src/restic/testing.go | 10 ++++ 8 files changed, 462 insertions(+) create mode 100644 src/restic/id.go create mode 100644 src/restic/id_int_test.go create mode 100644 src/restic/id_test.go create mode 100644 src/restic/ids.go create mode 100644 src/restic/ids_test.go create mode 100644 src/restic/idset.go create mode 100644 src/restic/idset_test.go diff --git a/src/restic/id.go b/src/restic/id.go new file mode 100644 index 000000000..2e9308888 --- /dev/null +++ b/src/restic/id.go @@ -0,0 +1,109 @@ +package restic + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "encoding/json" + + "github.com/pkg/errors" +) + +// Hash returns the ID for data. +func Hash(data []byte) ID { + return sha256.Sum256(data) +} + +// IDSize contains the size of an ID, in bytes. +const IDSize = sha256.Size + +// ID references content within a repository. +type ID [IDSize]byte + +// ParseID converts the given string to an ID. +func ParseID(s string) (ID, error) { + b, err := hex.DecodeString(s) + + if err != nil { + return ID{}, errors.Wrap(err, "hex.DecodeString") + } + + if len(b) != IDSize { + return ID{}, errors.New("invalid length for hash") + } + + id := ID{} + copy(id[:], b) + + return id, nil +} + +func (id ID) String() string { + return hex.EncodeToString(id[:]) +} + +const shortStr = 4 + +// Str returns the shortened string version of id. +func (id *ID) Str() string { + if id == nil { + return "[nil]" + } + + if id.IsNull() { + return "[null]" + } + + return hex.EncodeToString(id[:shortStr]) +} + +// IsNull returns true iff id only consists of null bytes. +func (id ID) IsNull() bool { + var nullID ID + + return id == nullID +} + +// Equal compares an ID to another other. +func (id ID) Equal(other ID) bool { + return id == other +} + +// EqualString compares this ID to another one, given as a string. +func (id ID) EqualString(other string) (bool, error) { + s, err := hex.DecodeString(other) + if err != nil { + return false, errors.Wrap(err, "hex.DecodeString") + } + + id2 := ID{} + copy(id2[:], s) + + return id == id2, nil +} + +// Compare compares this ID to another one, returning -1, 0, or 1. +func (id ID) Compare(other ID) int { + return bytes.Compare(other[:], id[:]) +} + +// MarshalJSON returns the JSON encoding of id. +func (id ID) MarshalJSON() ([]byte, error) { + return json.Marshal(id.String()) +} + +// UnmarshalJSON parses the JSON-encoded data and stores the result in id. +func (id *ID) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return errors.Wrap(err, "Unmarshal") + } + + _, err = hex.Decode(id[:], []byte(s)) + if err != nil { + return errors.Wrap(err, "hex.Decode") + } + + return nil +} diff --git a/src/restic/id_int_test.go b/src/restic/id_int_test.go new file mode 100644 index 000000000..a60a11b89 --- /dev/null +++ b/src/restic/id_int_test.go @@ -0,0 +1,16 @@ +package restic + +import "testing" + +func TestIDMethods(t *testing.T) { + var id ID + + if id.Str() != "[null]" { + t.Errorf("ID.Str() returned wrong value, want %v, got %v", "[null]", id.Str()) + } + + var pid *ID + if pid.Str() != "[nil]" { + t.Errorf("ID.Str() returned wrong value, want %v, got %v", "[nil]", pid.Str()) + } +} diff --git a/src/restic/id_test.go b/src/restic/id_test.go new file mode 100644 index 000000000..2e9634a19 --- /dev/null +++ b/src/restic/id_test.go @@ -0,0 +1,60 @@ +package restic + +import ( + "reflect" + "testing" +) + +var TestStrings = []struct { + id string + data string +}{ + {"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"}, + {"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"}, + {"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"}, + {"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"}, +} + +func TestID(t *testing.T) { + for _, test := range TestStrings { + id, err := ParseID(test.id) + if err != nil { + t.Error(err) + } + + id2, err := ParseID(test.id) + if err != nil { + t.Error(err) + } + if !id.Equal(id2) { + t.Errorf("ID.Equal() does not work as expected") + } + + ret, err := id.EqualString(test.id) + if err != nil { + t.Error(err) + } + if !ret { + t.Error("ID.EqualString() returned wrong value") + } + + // test json marshalling + buf, err := id.MarshalJSON() + if err != nil { + t.Error(err) + } + want := `"` + test.id + `"` + if string(buf) != want { + t.Errorf("string comparison failed, wanted %q, got %q", want, string(buf)) + } + + var id3 ID + err = id3.UnmarshalJSON(buf) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(id, id3) { + t.Error("ids are not equal") + } + } +} diff --git a/src/restic/ids.go b/src/restic/ids.go new file mode 100644 index 000000000..cc5ad18da --- /dev/null +++ b/src/restic/ids.go @@ -0,0 +1,69 @@ +package restic + +import ( + "encoding/hex" + "fmt" +) + +// IDs is an ordered list of IDs that implements sort.Interface. +type IDs []ID + +func (ids IDs) Len() int { + return len(ids) +} + +func (ids IDs) Less(i, j int) bool { + if len(ids[i]) < len(ids[j]) { + return true + } + + for k, b := range ids[i] { + if b == ids[j][k] { + continue + } + + if b < ids[j][k] { + return true + } + + return false + } + + return false +} + +func (ids IDs) Swap(i, j int) { + ids[i], ids[j] = ids[j], ids[i] +} + +// Uniq returns list without duplicate IDs. The returned list retains the order +// of the original list so that the order of the first occurrence of each ID +// stays the same. +func (ids IDs) Uniq() (list IDs) { + seen := NewIDSet() + + for _, id := range ids { + if seen.Has(id) { + continue + } + + list = append(list, id) + seen.Insert(id) + } + + return list +} + +type shortID ID + +func (id shortID) String() string { + return hex.EncodeToString(id[:shortStr]) +} + +func (ids IDs) String() string { + elements := make([]shortID, 0, len(ids)) + for _, id := range ids { + elements = append(elements, shortID(id)) + } + return fmt.Sprintf("%v", elements) +} diff --git a/src/restic/ids_test.go b/src/restic/ids_test.go new file mode 100644 index 000000000..9ce02607b --- /dev/null +++ b/src/restic/ids_test.go @@ -0,0 +1,55 @@ +package restic + +import ( + "reflect" + "testing" +) + +var uniqTests = []struct { + before, after IDs +}{ + { + IDs{ + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + IDs{ + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + }, + }, + { + IDs{ + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + IDs{ + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + }, + { + IDs{ + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + IDs{ + TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), + TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), + TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), + }, + }, +} + +func TestUniqIDs(t *testing.T) { + for i, test := range uniqTests { + uniq := test.before.Uniq() + if !reflect.DeepEqual(uniq, test.after) { + t.Errorf("uniqIDs() test %v failed\n wanted: %v\n got: %v", i, test.after, uniq) + } + } +} diff --git a/src/restic/idset.go b/src/restic/idset.go new file mode 100644 index 000000000..c31ca7747 --- /dev/null +++ b/src/restic/idset.go @@ -0,0 +1,111 @@ +package restic + +import "sort" + +// IDSet is a set of IDs. +type IDSet map[ID]struct{} + +// NewIDSet returns a new IDSet, populated with ids. +func NewIDSet(ids ...ID) IDSet { + m := make(IDSet) + for _, id := range ids { + m[id] = struct{}{} + } + + return m +} + +// Has returns true iff id is contained in the set. +func (s IDSet) Has(id ID) bool { + _, ok := s[id] + return ok +} + +// Insert adds id to the set. +func (s IDSet) Insert(id ID) { + s[id] = struct{}{} +} + +// Delete removes id from the set. +func (s IDSet) Delete(id ID) { + delete(s, id) +} + +// List returns a slice of all IDs in the set. +func (s IDSet) List() IDs { + list := make(IDs, 0, len(s)) + for id := range s { + list = append(list, id) + } + + sort.Sort(list) + + return list +} + +// Equals returns true iff s equals other. +func (s IDSet) Equals(other IDSet) bool { + if len(s) != len(other) { + return false + } + + for id := range s { + if _, ok := other[id]; !ok { + return false + } + } + + // length + one-way comparison is sufficient implication of equality + + return true +} + +// Merge adds the blobs in other to the current set. +func (s IDSet) Merge(other IDSet) { + for id := range other { + s.Insert(id) + } +} + +// Intersect returns a new set containing the IDs that are present in both sets. +func (s IDSet) Intersect(other IDSet) (result IDSet) { + result = NewIDSet() + + set1 := s + set2 := other + + // iterate over the smaller set + if len(set2) < len(set1) { + set1, set2 = set2, set1 + } + + for id := range set1 { + if set2.Has(id) { + result.Insert(id) + } + } + + return result +} + +// Sub returns a new set containing all IDs that are present in s but not in +// other. +func (s IDSet) Sub(other IDSet) (result IDSet) { + result = NewIDSet() + for id := range s { + if !other.Has(id) { + result.Insert(id) + } + } + + return result +} + +func (s IDSet) String() string { + str := s.List().String() + if len(str) < 2 { + return "{}" + } + + return "{" + str[1:len(str)-1] + "}" +} diff --git a/src/restic/idset_test.go b/src/restic/idset_test.go new file mode 100644 index 000000000..5525eab79 --- /dev/null +++ b/src/restic/idset_test.go @@ -0,0 +1,32 @@ +package restic + +import ( + "testing" +) + +var idsetTests = []struct { + id ID + seen bool +}{ + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false}, + {TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false}, + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, + {TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false}, + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, + {TestParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, + {TestParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true}, + {TestParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, +} + +func TestIDSet(t *testing.T) { + set := NewIDSet() + for i, test := range idsetTests { + seen := set.Has(test.id) + if seen != test.seen { + t.Errorf("IDSet test %v failed: wanted %v, got %v", i, test.seen, seen) + } + set.Insert(test.id) + } +} diff --git a/src/restic/testing.go b/src/restic/testing.go index 0b6ed6b49..cf2500b17 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -210,3 +210,13 @@ func TestResetRepository(t testing.TB, repo Repository) { repo.SetIndex(repository.NewMasterIndex()) } + +// TestParseID parses s as a backend.ID and panics if that fails. +func TestParseID(s string) ID { + id, err := ParseID(s) + if err != nil { + panic(err) + } + + return id +} From f0600c1d5f50cfa5d5765b0e9e48d7eaf93fcda8 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 31 Aug 2016 20:29:54 +0200 Subject: [PATCH 05/40] wip --- src/restic/{ => archiver}/archive_reader.go | 32 +++--- .../{ => archiver}/archive_reader_test.go | 0 src/restic/{ => archiver}/archiver.go | 46 ++++---- .../archiver_duplication_test.go | 0 .../{ => archiver}/archiver_int_test.go | 0 src/restic/{ => archiver}/archiver_test.go | 3 +- src/restic/backend.go | 30 ++--- src/restic/backend/testing.go | 17 --- src/restic/blob.go | 103 ++++++++++++++++++ src/restic/blob_test.go | 41 +++++++ src/restic/{handle.go => file.go} | 13 +++ src/restic/{handle_test.go => file_test.go} | 0 src/restic/find.go | 8 +- src/restic/find_test.go | 7 +- src/restic/lock.go | 44 ++++---- src/restic/lock_test.go | 24 ++-- src/restic/node.go | 42 ++++--- src/restic/{repository => }/rand_reader.go | 2 +- src/restic/repository.go | 46 +++++++- src/restic/repository/blob.go | 47 -------- src/restic/repository/config.go | 8 +- src/restic/repository/config_test.go | 26 ++--- src/restic/repository/index.go | 54 ++++----- src/restic/repository/index_rebuild.go | 10 +- src/restic/repository/index_test.go | 17 +-- src/restic/repository/key.go | 7 +- src/restic/repository/master_index.go | 12 +- src/restic/repository/packer_manager.go | 8 +- src/restic/repository/packer_manager_test.go | 10 +- src/restic/repository/parallel.go | 5 +- src/restic/repository/parallel_test.go | 8 +- src/restic/repository/repack.go | 8 +- src/restic/repository/repack_test.go | 20 ++-- src/restic/repository/repository.go | 89 +++++++-------- src/restic/repository/repository_test.go | 19 ++-- src/restic/repository/testing.go | 6 +- src/restic/restorer.go | 8 +- src/restic/snapshot.go | 49 ++++----- src/restic/testing.go | 63 +++++------ src/restic/testing_test.go | 12 -- src/restic/tree.go | 7 +- src/restic/walk.go | 11 +- 42 files changed, 524 insertions(+), 438 deletions(-) rename src/restic/{ => archiver}/archive_reader.go (71%) rename src/restic/{ => archiver}/archive_reader_test.go (100%) rename src/restic/{ => archiver}/archiver.go (94%) rename src/restic/{ => archiver}/archiver_duplication_test.go (100%) rename src/restic/{ => archiver}/archiver_int_test.go (100%) rename src/restic/{ => archiver}/archiver_test.go (98%) delete mode 100644 src/restic/backend/testing.go create mode 100644 src/restic/blob.go create mode 100644 src/restic/blob_test.go rename src/restic/{handle.go => file.go} (69%) rename src/restic/{handle_test.go => file_test.go} (100%) rename src/restic/{repository => }/rand_reader.go (98%) delete mode 100644 src/restic/repository/blob.go diff --git a/src/restic/archive_reader.go b/src/restic/archiver/archive_reader.go similarity index 71% rename from src/restic/archive_reader.go rename to src/restic/archiver/archive_reader.go index c25cd0996..88b0ba3fa 100644 --- a/src/restic/archive_reader.go +++ b/src/restic/archiver/archive_reader.go @@ -3,10 +3,8 @@ package restic import ( "encoding/json" "io" - "restic/backend" "restic/debug" "restic/pack" - "restic/repository" "time" "github.com/pkg/errors" @@ -14,15 +12,15 @@ import ( ) // saveTreeJSON stores a tree in the repository. -func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, error) { +func saveTreeJSON(repo Repository, item interface{}) (ID, error) { data, err := json.Marshal(item) if err != nil { - return backend.ID{}, errors.Wrap(err, "") + return ID{}, errors.Wrap(err, "") } data = append(data, '\n') // check if tree has been saved before - id := backend.Hash(data) + id := Hash(data) if repo.Index().Has(id, pack.Tree) { return id, nil } @@ -32,19 +30,19 @@ func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, er // ArchiveReader reads from the reader and archives the data. Returned is the // resulting snapshot and its ID. -func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name string) (*Snapshot, backend.ID, error) { +func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Snapshot, ID, error) { debug.Log("ArchiveReader", "start archiving %s", name) sn, err := NewSnapshot([]string{name}) if err != nil { - return nil, backend.ID{}, err + return nil, ID{}, err } p.Start() defer p.Done() - chnker := chunker.New(rd, repo.Config.ChunkerPolynomial) + chnker := chunker.New(rd, repo.Config().ChunkerPolynomial()) - var ids backend.IDs + var ids IDs var fileSize uint64 for { @@ -54,15 +52,15 @@ func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name } if err != nil { - return nil, backend.ID{}, errors.Wrap(err, "chunker.Next()") + return nil, ID{}, errors.Wrap(err, "chunker.Next()") } - id := backend.Hash(chunk.Data) + id := Hash(chunk.Data) if !repo.Index().Has(id, pack.Data) { _, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil) if err != nil { - return nil, backend.ID{}, err + return nil, ID{}, err } debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length) } else { @@ -96,14 +94,14 @@ func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name treeID, err := saveTreeJSON(repo, tree) if err != nil { - return nil, backend.ID{}, err + return nil, ID{}, err } sn.Tree = &treeID debug.Log("ArchiveReader", "tree saved as %v", treeID.Str()) - id, err := repo.SaveJSONUnpacked(backend.Snapshot, sn) + id, err := repo.SaveJSONUnpacked(SnapshotFile, sn) if err != nil { - return nil, backend.ID{}, err + return nil, ID{}, err } sn.id = &id @@ -111,12 +109,12 @@ func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name err = repo.Flush() if err != nil { - return nil, backend.ID{}, err + return nil, ID{}, err } err = repo.SaveIndex() if err != nil { - return nil, backend.ID{}, err + return nil, ID{}, err } return sn, id, nil diff --git a/src/restic/archive_reader_test.go b/src/restic/archiver/archive_reader_test.go similarity index 100% rename from src/restic/archive_reader_test.go rename to src/restic/archiver/archive_reader_test.go diff --git a/src/restic/archiver.go b/src/restic/archiver/archiver.go similarity index 94% rename from src/restic/archiver.go rename to src/restic/archiver/archiver.go index 24d352bc9..a9d652e76 100644 --- a/src/restic/archiver.go +++ b/src/restic/archiver/archiver.go @@ -12,12 +12,10 @@ import ( "github.com/pkg/errors" - "restic/backend" "restic/debug" "restic/fs" "restic/pack" "restic/pipe" - "restic/repository" "github.com/restic/chunker" ) @@ -32,9 +30,9 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true } // Archiver is used to backup a set of directories. type Archiver struct { - repo *repository.Repository + repo Repository knownBlobs struct { - backend.IDSet + IDSet sync.Mutex } @@ -46,15 +44,15 @@ type Archiver struct { } // NewArchiver returns a new archiver. -func NewArchiver(repo *repository.Repository) *Archiver { +func NewArchiver(repo Repository) *Archiver { arch := &Archiver{ repo: repo, blobToken: make(chan struct{}, maxConcurrentBlobs), knownBlobs: struct { - backend.IDSet + IDSet sync.Mutex }{ - IDSet: backend.NewIDSet(), + IDSet: NewIDSet(), }, } @@ -72,7 +70,7 @@ func NewArchiver(repo *repository.Repository) *Archiver { // When the blob is not known, false is returned and the blob is added to the // list. This means that the caller false is returned to is responsible to save // the blob to the backend. -func (arch *Archiver) isKnownBlob(id backend.ID, t pack.BlobType) bool { +func (arch *Archiver) isKnownBlob(id ID, t pack.BlobType) bool { arch.knownBlobs.Lock() defer arch.knownBlobs.Unlock() @@ -91,7 +89,7 @@ func (arch *Archiver) isKnownBlob(id backend.ID, t pack.BlobType) bool { } // Save stores a blob read from rd in the repository. -func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error { +func (arch *Archiver) Save(t pack.BlobType, data []byte, id ID) error { debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str()) if arch.isKnownBlob(id, pack.Data) { @@ -110,15 +108,15 @@ func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error { } // SaveTreeJSON stores a tree in the repository. -func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) { +func (arch *Archiver) SaveTreeJSON(item interface{}) (ID, error) { data, err := json.Marshal(item) if err != nil { - return backend.ID{}, errors.Wrap(err, "Marshal") + return ID{}, errors.Wrap(err, "Marshal") } data = append(data, '\n') // check if tree has been saved before - id := backend.Hash(data) + id := Hash(data) if arch.isKnownBlob(id, pack.Tree) { return id, nil } @@ -151,14 +149,14 @@ func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, erro } type saveResult struct { - id backend.ID + id ID bytes uint64 } func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) { defer freeBuf(chunk.Data) - id := backend.Hash(chunk.Data) + id := Hash(chunk.Data) err := arch.Save(pack.Data, chunk.Data, id) // TODO handle error if err != nil { @@ -188,7 +186,7 @@ func updateNodeContent(node *Node, results []saveResult) error { debug.Log("Archiver.Save", "checking size for file %s", node.path) var bytes uint64 - node.Content = make([]backend.ID, len(results)) + node.Content = make([]ID, len(results)) for i, b := range results { node.Content[i] = b.id @@ -220,7 +218,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error { return err } - chnker := chunker.New(file, arch.repo.Config.ChunkerPolynomial) + chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial()) resultChannels := [](<-chan saveResult){} for { @@ -290,7 +288,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st // check if all content is still available in the repository contentMissing := false for _, blob := range oldNode.blobs { - if ok, err := arch.repo.Backend().Test(backend.Data, blob.Storage.String()); !ok || err != nil { + if ok, err := arch.repo.Backend().Test(DataFile, blob.Storage.String()); !ok || err != nil { debug.Log("Archiver.fileWorker", " %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str()) contentMissing = true break @@ -635,7 +633,7 @@ func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // Snapshot creates a snapshot of the given paths. If parentID is set, this is // used to compare the files to the ones archived at the time this snapshot was // taken. -func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID) (*Snapshot, backend.ID, error) { +func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snapshot, ID, error) { paths = unique(paths) sort.Sort(baseNameSlice(paths)) @@ -653,7 +651,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID // create new snapshot sn, err := NewSnapshot(paths) if err != nil { - return nil, backend.ID{}, err + return nil, ID{}, err } sn.Excludes = arch.Excludes @@ -666,7 +664,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID // load parent snapshot parent, err := LoadSnapshot(arch.repo, *parentID) if err != nil { - return nil, backend.ID{}, err + return nil, ID{}, err } // start walker on old tree @@ -735,9 +733,9 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID sn.Tree = root.Subtree // save snapshot - id, err := arch.repo.SaveJSONUnpacked(backend.Snapshot, sn) + id, err := arch.repo.SaveJSONUnpacked(SnapshotFile, sn) if err != nil { - return nil, backend.ID{}, err + return nil, ID{}, err } // store ID in snapshot struct @@ -747,14 +745,14 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID // flush repository err = arch.repo.Flush() if err != nil { - return nil, backend.ID{}, err + return nil, ID{}, err } // save index err = arch.repo.SaveIndex() if err != nil { debug.Log("Archiver.Snapshot", "error saving index: %v", err) - return nil, backend.ID{}, err + return nil, ID{}, err } debug.Log("Archiver.Snapshot", "saved indexes") diff --git a/src/restic/archiver_duplication_test.go b/src/restic/archiver/archiver_duplication_test.go similarity index 100% rename from src/restic/archiver_duplication_test.go rename to src/restic/archiver/archiver_duplication_test.go diff --git a/src/restic/archiver_int_test.go b/src/restic/archiver/archiver_int_test.go similarity index 100% rename from src/restic/archiver_int_test.go rename to src/restic/archiver/archiver_int_test.go diff --git a/src/restic/archiver_test.go b/src/restic/archiver/archiver_test.go similarity index 98% rename from src/restic/archiver_test.go rename to src/restic/archiver/archiver_test.go index 47b2210fa..7f211c618 100644 --- a/src/restic/archiver_test.go +++ b/src/restic/archiver/archiver_test.go @@ -11,7 +11,6 @@ import ( "restic/checker" "restic/crypto" "restic/pack" - "restic/repository" . "restic/test" "github.com/pkg/errors" @@ -302,7 +301,7 @@ func getRandomData(seed int, size int) []chunker.Chunk { return chunks } -func createAndInitChecker(t *testing.T, repo *repository.Repository) *checker.Checker { +func createAndInitChecker(t *testing.T, repo Repository) *checker.Checker { chkr := checker.New(repo) hints, errs := chkr.LoadIndex() diff --git a/src/restic/backend.go b/src/restic/backend.go index f00c4699e..39cd83480 100644 --- a/src/restic/backend.go +++ b/src/restic/backend.go @@ -1,28 +1,15 @@ package restic -// FileType is the type of a file in the backend. -type FileType string - -// These are the different data types a backend can store. -const ( - DataFile FileType = "data" - KeyFile = "key" - LockFile = "lock" - SnapshotFile = "snapshot" - IndexFile = "index" - ConfigFile = "config" -) - // Backend is used to store and access data. type Backend interface { // Location returns a string that describes the type and location of the // repository. Location() string - // Test a boolean value whether a Blob with the name and type exists. + // Test a boolean value whether a File with the name and type exists. Test(t FileType, name string) (bool, error) - // Remove removes a Blob with type t and name. + // Remove removes a File with type t and name. Remove(t FileType, name string) error // Close the backend @@ -37,10 +24,10 @@ type Backend interface { // Save stores the data in the backend under the given handle. Save(h Handle, p []byte) error - // Stat returns information about the blob identified by h. - Stat(h Handle) (BlobInfo, error) + // Stat returns information about the File identified by h. + Stat(h Handle) (FileInfo, error) - // List returns a channel that yields all names of blobs of type t in an + // List returns a channel that yields all names of files of type t in an // arbitrary order. A goroutine is started for this. If the channel done is // closed, sending stops. List(t FileType, done <-chan struct{}) <-chan string @@ -49,7 +36,6 @@ type Backend interface { Delete() error } -// BlobInfo is returned by Stat() and contains information about a stored blob. -type BlobInfo struct { - Size int64 -} +// FileInfo is returned by Stat() and contains information about a file in the +// backend. +type FileInfo struct{ Size int64 } diff --git a/src/restic/backend/testing.go b/src/restic/backend/testing.go deleted file mode 100644 index e0c3dd569..000000000 --- a/src/restic/backend/testing.go +++ /dev/null @@ -1,17 +0,0 @@ -package backend - -import ( - "crypto/rand" - "io" -) - -// RandomID retuns a randomly generated ID. This is mainly used for testing. -// When reading from rand fails, the function panics. -func RandomID() ID { - id := ID{} - _, err := io.ReadFull(rand.Reader, id[:]) - if err != nil { - panic(err) - } - return id -} diff --git a/src/restic/blob.go b/src/restic/blob.go new file mode 100644 index 000000000..e88e823fe --- /dev/null +++ b/src/restic/blob.go @@ -0,0 +1,103 @@ +package restic + +import ( + "errors" + "fmt" +) + +type Blob struct { + ID *ID `json:"id,omitempty"` + Size uint64 `json:"size,omitempty"` + Storage *ID `json:"sid,omitempty"` // encrypted ID + StorageSize uint64 `json:"ssize,omitempty"` // encrypted Size +} + +type Blobs []Blob + +func (b Blob) Valid() bool { + if b.ID == nil || b.Storage == nil || b.StorageSize == 0 { + return false + } + + return true +} + +func (b Blob) String() string { + return fmt.Sprintf("Blob<%s (%d) -> %s (%d)>", + b.ID.Str(), b.Size, + b.Storage.Str(), b.StorageSize) +} + +// Compare compares two blobs by comparing the ID and the size. It returns -1, +// 0, or 1. +func (b Blob) Compare(other Blob) int { + if res := b.ID.Compare(*other.ID); res != 0 { + return res + } + + if b.Size < other.Size { + return -1 + } + if b.Size > other.Size { + return 1 + } + + return 0 +} + +// BlobHandle identifies a blob of a given type. +type BlobHandle struct { + ID ID + Type BlobType +} + +func (h BlobHandle) String() string { + return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str()) +} + +// BlobType specifies what a blob stored in a pack is. +type BlobType uint8 + +// These are the blob types that can be stored in a pack. +const ( + InvalidBlob BlobType = iota + DataBlob + TreeBlob +) + +func (t BlobType) String() string { + switch t { + case DataBlob: + return "data" + case TreeBlob: + return "tree" + } + + return fmt.Sprintf("", t) +} + +// MarshalJSON encodes the BlobType into JSON. +func (t BlobType) MarshalJSON() ([]byte, error) { + switch t { + case DataBlob: + return []byte(`"data"`), nil + case TreeBlob: + return []byte(`"tree"`), nil + } + + return nil, errors.New("unknown blob type") +} + +// UnmarshalJSON decodes the BlobType from JSON. +func (t *BlobType) UnmarshalJSON(buf []byte) error { + switch string(buf) { + case `"data"`: + *t = DataBlob + case `"tree"`: + *t = TreeBlob + default: + return errors.New("unknown blob type") + } + + return nil +} diff --git a/src/restic/blob_test.go b/src/restic/blob_test.go new file mode 100644 index 000000000..951872250 --- /dev/null +++ b/src/restic/blob_test.go @@ -0,0 +1,41 @@ +package restic + +import ( + "encoding/json" + "testing" +) + +var blobTypeJSON = []struct { + t BlobType + res string +}{ + {DataBlob, `"data"`}, + {TreeBlob, `"tree"`}, +} + +func TestBlobTypeJSON(t *testing.T) { + for _, test := range blobTypeJSON { + // test serialize + buf, err := json.Marshal(test.t) + if err != nil { + t.Error(err) + continue + } + if test.res != string(buf) { + t.Errorf("want %q, got %q", test.res, string(buf)) + continue + } + + // test unserialize + var v BlobType + err = json.Unmarshal([]byte(test.res), &v) + if err != nil { + t.Error(err) + continue + } + if test.t != v { + t.Errorf("want %v, got %v", test.t, v) + continue + } + } +} diff --git a/src/restic/handle.go b/src/restic/file.go similarity index 69% rename from src/restic/handle.go rename to src/restic/file.go index dd7439d39..af5c374c7 100644 --- a/src/restic/handle.go +++ b/src/restic/file.go @@ -6,6 +6,19 @@ import ( "github.com/pkg/errors" ) +// FileType is the type of a file in the backend. +type FileType string + +// These are the different data types a backend can store. +const ( + DataFile FileType = "data" + KeyFile = "key" + LockFile = "lock" + SnapshotFile = "snapshot" + IndexFile = "index" + ConfigFile = "config" +) + // Handle is used to store and access data in a backend. type Handle struct { FileType FileType diff --git a/src/restic/handle_test.go b/src/restic/file_test.go similarity index 100% rename from src/restic/handle_test.go rename to src/restic/file_test.go diff --git a/src/restic/find.go b/src/restic/find.go index 067754e73..6a9ca0fdb 100644 --- a/src/restic/find.go +++ b/src/restic/find.go @@ -1,15 +1,11 @@ package restic -import ( - "restic/backend" - "restic/pack" - "restic/repository" -) +import "restic/pack" // FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data // blobs) to the set blobs. The tree blobs in the `seen` BlobSet will not be visited // again. -func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.BlobSet, seen pack.BlobSet) error { +func FindUsedBlobs(repo Repository, treeID ID, blobs pack.BlobSet, seen pack.BlobSet) error { blobs.Insert(pack.Handle{ID: treeID, Type: pack.Tree}) tree, err := LoadTree(repo, treeID) diff --git a/src/restic/find_test.go b/src/restic/find_test.go index f7e47bde4..9a9a85e51 100644 --- a/src/restic/find_test.go +++ b/src/restic/find_test.go @@ -1,4 +1,4 @@ -package restic +package restic_test import ( "bufio" @@ -7,6 +7,7 @@ import ( "fmt" "os" "path/filepath" + "restic" "sort" "testing" "time" @@ -92,7 +93,7 @@ func TestFindUsedBlobs(t *testing.T) { for i, sn := range snapshots { usedBlobs := pack.NewBlobSet() - err := FindUsedBlobs(repo, *sn.Tree, usedBlobs, pack.NewBlobSet()) + err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, pack.NewBlobSet()) if err != nil { t.Errorf("FindUsedBlobs returned error: %v", err) continue @@ -128,7 +129,7 @@ func BenchmarkFindUsedBlobs(b *testing.B) { for i := 0; i < b.N; i++ { seen := pack.NewBlobSet() blobs := pack.NewBlobSet() - err := FindUsedBlobs(repo, *sn.Tree, blobs, seen) + err := restic.FindUsedBlobs(repo, *sn.Tree, blobs, seen) if err != nil { b.Error(err) } diff --git a/src/restic/lock.go b/src/restic/lock.go index a2780379d..9f181106a 100644 --- a/src/restic/lock.go +++ b/src/restic/lock.go @@ -11,9 +11,7 @@ import ( "github.com/pkg/errors" - "restic/backend" "restic/debug" - "restic/repository" ) // Lock represents a process locking the repository for an operation. @@ -33,8 +31,8 @@ type Lock struct { UID uint32 `json:"uid,omitempty"` GID uint32 `json:"gid,omitempty"` - repo *repository.Repository - lockID *backend.ID + repo Repository + lockID *ID } // ErrAlreadyLocked is returned when NewLock or NewExclusiveLock are unable to @@ -59,20 +57,20 @@ func IsAlreadyLocked(err error) bool { // NewLock returns a new, non-exclusive lock for the repository. If an // exclusive lock is already held by another process, ErrAlreadyLocked is // returned. -func NewLock(repo *repository.Repository) (*Lock, error) { +func NewLock(repo Repository) (*Lock, error) { return newLock(repo, false) } // NewExclusiveLock returns a new, exclusive lock for the repository. If // another lock (normal and exclusive) is already held by another process, // ErrAlreadyLocked is returned. -func NewExclusiveLock(repo *repository.Repository) (*Lock, error) { +func NewExclusiveLock(repo Repository) (*Lock, error) { return newLock(repo, true) } const waitBeforeLockCheck = 200 * time.Millisecond -func newLock(repo *repository.Repository, excl bool) (*Lock, error) { +func newLock(repo Repository, excl bool) (*Lock, error) { lock := &Lock{ Time: time.Now(), PID: os.Getpid(), @@ -128,7 +126,7 @@ func (l *Lock) fillUserInfo() error { // non-exclusive lock is to be created, an error is only returned when an // exclusive lock is found. func (l *Lock) checkForOtherLocks() error { - return eachLock(l.repo, func(id backend.ID, lock *Lock, err error) error { + return eachLock(l.repo, func(id ID, lock *Lock, err error) error { if l.lockID != nil && id.Equal(*l.lockID) { return nil } @@ -150,11 +148,11 @@ func (l *Lock) checkForOtherLocks() error { }) } -func eachLock(repo *repository.Repository, f func(backend.ID, *Lock, error) error) error { +func eachLock(repo Repository, f func(ID, *Lock, error) error) error { done := make(chan struct{}) defer close(done) - for id := range repo.List(backend.Lock, done) { + for id := range repo.List(LockFile, done) { lock, err := LoadLock(repo, id) err = f(id, lock, err) if err != nil { @@ -166,10 +164,10 @@ func eachLock(repo *repository.Repository, f func(backend.ID, *Lock, error) erro } // createLock acquires the lock by creating a file in the repository. -func (l *Lock) createLock() (backend.ID, error) { - id, err := l.repo.SaveJSONUnpacked(backend.Lock, l) +func (l *Lock) createLock() (ID, error) { + id, err := l.repo.SaveJSONUnpacked(LockFile, l) if err != nil { - return backend.ID{}, err + return ID{}, err } return id, nil @@ -181,7 +179,7 @@ func (l *Lock) Unlock() error { return nil } - return l.repo.Backend().Remove(backend.Lock, l.lockID.String()) + return l.repo.Backend().Remove(LockFile, l.lockID.String()) } var staleTimeout = 30 * time.Minute @@ -229,7 +227,7 @@ func (l *Lock) Refresh() error { return err } - err = l.repo.Backend().Remove(backend.Lock, l.lockID.String()) + err = l.repo.Backend().Remove(LockFile, l.lockID.String()) if err != nil { return err } @@ -269,9 +267,9 @@ func init() { } // LoadLock loads and unserializes a lock from a repository. -func LoadLock(repo *repository.Repository, id backend.ID) (*Lock, error) { +func LoadLock(repo Repository, id ID) (*Lock, error) { lock := &Lock{} - if err := repo.LoadJSONUnpacked(backend.Lock, id, lock); err != nil { + if err := repo.LoadJSONUnpacked(LockFile, id, lock); err != nil { return nil, err } lock.lockID = &id @@ -280,15 +278,15 @@ func LoadLock(repo *repository.Repository, id backend.ID) (*Lock, error) { } // RemoveStaleLocks deletes all locks detected as stale from the repository. -func RemoveStaleLocks(repo *repository.Repository) error { - return eachLock(repo, func(id backend.ID, lock *Lock, err error) error { +func RemoveStaleLocks(repo Repository) error { + return eachLock(repo, func(id ID, lock *Lock, err error) error { // ignore locks that cannot be loaded if err != nil { return nil } if lock.Stale() { - return repo.Backend().Remove(backend.Lock, id.String()) + return repo.Backend().Remove(LockFile, id.String()) } return nil @@ -296,8 +294,8 @@ func RemoveStaleLocks(repo *repository.Repository) error { } // RemoveAllLocks removes all locks forcefully. -func RemoveAllLocks(repo *repository.Repository) error { - return eachLock(repo, func(id backend.ID, lock *Lock, err error) error { - return repo.Backend().Remove(backend.Lock, id.String()) +func RemoveAllLocks(repo Repository) error { + return eachLock(repo, func(id ID, lock *Lock, err error) error { + return repo.Backend().Remove(LockFile, id.String()) }) } diff --git a/src/restic/lock_test.go b/src/restic/lock_test.go index da8fb7a40..b97bc97a8 100644 --- a/src/restic/lock_test.go +++ b/src/restic/lock_test.go @@ -6,8 +6,6 @@ import ( "time" "restic" - "restic/backend" - "restic/repository" . "restic/test" ) @@ -92,18 +90,18 @@ func TestExclusiveLockOnLockedRepo(t *testing.T) { OK(t, elock.Unlock()) } -func createFakeLock(repo *repository.Repository, t time.Time, pid int) (backend.ID, error) { +func createFakeLock(repo restic.Repository, t time.Time, pid int) (restic.ID, error) { hostname, err := os.Hostname() if err != nil { - return backend.ID{}, err + return restic.ID{}, err } newLock := &restic.Lock{Time: t, PID: pid, Hostname: hostname} - return repo.SaveJSONUnpacked(backend.Lock, &newLock) + return repo.SaveJSONUnpacked(restic.LockFile, &newLock) } -func removeLock(repo *repository.Repository, id backend.ID) error { - return repo.Backend().Remove(backend.Lock, id.String()) +func removeLock(repo restic.Repository, id restic.ID) error { + return repo.Backend().Remove(restic.LockFile, id.String()) } var staleLockTests = []struct { @@ -162,8 +160,8 @@ func TestLockStale(t *testing.T) { } } -func lockExists(repo *repository.Repository, t testing.TB, id backend.ID) bool { - exists, err := repo.Backend().Test(backend.Lock, id.String()) +func lockExists(repo restic.Repository, t testing.TB, id restic.ID) bool { + exists, err := repo.Backend().Test(restic.LockFile, id.String()) OK(t, err) return exists @@ -224,8 +222,8 @@ func TestLockRefresh(t *testing.T) { lock, err := restic.NewLock(repo) OK(t, err) - var lockID *backend.ID - for id := range repo.List(backend.Lock, nil) { + var lockID *restic.ID + for id := range repo.List(restic.LockFile, nil) { if lockID != nil { t.Error("more than one lock found") } @@ -234,8 +232,8 @@ func TestLockRefresh(t *testing.T) { OK(t, lock.Refresh()) - var lockID2 *backend.ID - for id := range repo.List(backend.Lock, nil) { + var lockID2 *restic.ID + for id := range repo.List(restic.LockFile, nil) { if lockID2 != nil { t.Error("more than one lock found") } diff --git a/src/restic/node.go b/src/restic/node.go index 72565342f..60ce53b56 100644 --- a/src/restic/node.go +++ b/src/restic/node.go @@ -14,32 +14,30 @@ import ( "runtime" - "restic/backend" "restic/debug" "restic/fs" "restic/pack" - "restic/repository" ) // Node is a file, directory or other item in a backup. type Node struct { - Name string `json:"name"` - FileType string `json:"type"` - Mode os.FileMode `json:"mode,omitempty"` - ModTime time.Time `json:"mtime,omitempty"` - AccessTime time.Time `json:"atime,omitempty"` - ChangeTime time.Time `json:"ctime,omitempty"` - UID uint32 `json:"uid"` - GID uint32 `json:"gid"` - User string `json:"user,omitempty"` - Group string `json:"group,omitempty"` - Inode uint64 `json:"inode,omitempty"` - Size uint64 `json:"size,omitempty"` - Links uint64 `json:"links,omitempty"` - LinkTarget string `json:"linktarget,omitempty"` - Device uint64 `json:"device,omitempty"` - Content []backend.ID `json:"content"` - Subtree *backend.ID `json:"subtree,omitempty"` + Name string `json:"name"` + FileType string `json:"type"` + Mode os.FileMode `json:"mode,omitempty"` + ModTime time.Time `json:"mtime,omitempty"` + AccessTime time.Time `json:"atime,omitempty"` + ChangeTime time.Time `json:"ctime,omitempty"` + UID uint32 `json:"uid"` + GID uint32 `json:"gid"` + User string `json:"user,omitempty"` + Group string `json:"group,omitempty"` + Inode uint64 `json:"inode,omitempty"` + Size uint64 `json:"size,omitempty"` + Links uint64 `json:"links,omitempty"` + LinkTarget string `json:"linktarget,omitempty"` + Device uint64 `json:"device,omitempty"` + Content IDs `json:"content"` + Subtree *ID `json:"subtree,omitempty"` Error string `json:"error,omitempty"` @@ -47,7 +45,7 @@ type Node struct { path string err error - blobs repository.Blobs + blobs Blobs } func (node Node) String() string { @@ -108,7 +106,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string { } // CreateAt creates the node at the given path and restores all the meta data. -func (node *Node) CreateAt(path string, repo *repository.Repository) error { +func (node *Node) CreateAt(path string, repo Repository) error { debug.Log("Node.CreateAt", "create node %v at %v", node.Name, path) switch node.FileType { @@ -202,7 +200,7 @@ func (node Node) createDirAt(path string) error { return nil } -func (node Node) createFileAt(path string, repo *repository.Repository) error { +func (node Node) createFileAt(path string, repo Repository) error { f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600) defer f.Close() diff --git a/src/restic/repository/rand_reader.go b/src/restic/rand_reader.go similarity index 98% rename from src/restic/repository/rand_reader.go rename to src/restic/rand_reader.go index 2afbd60b7..cfe50222e 100644 --- a/src/restic/repository/rand_reader.go +++ b/src/restic/rand_reader.go @@ -1,4 +1,4 @@ -package repository +package restic import ( "io" diff --git a/src/restic/repository.go b/src/restic/repository.go index cb95463f0..6dd5c2c75 100644 --- a/src/restic/repository.go +++ b/src/restic/repository.go @@ -1,6 +1,10 @@ package restic -import "restic/repository" +import ( + "restic/pack" + + "github.com/restic/chunker" +) // Repository stores data in a backend. It provides high-level functions and // transparently encrypts/decrypts data. @@ -9,5 +13,43 @@ type Repository interface { // Backend returns the backend used by the repository Backend() Backend - SetIndex(*repository.MasterIndex) + SetIndex(interface{}) + + Index() Index + SaveFullIndex() error + + SaveJSON(pack.BlobType, interface{}) (ID, error) + + Config() Config + + SaveAndEncrypt(pack.BlobType, []byte, *ID) (ID, error) + SaveJSONUnpacked(FileType, interface{}) (ID, error) + SaveIndex() error + + LoadJSONPack(pack.BlobType, ID, interface{}) error + LoadJSONUnpacked(FileType, ID, interface{}) error + LoadBlob(ID, pack.BlobType, []byte) ([]byte, error) + + LookupBlobSize(ID, pack.BlobType) (uint, error) + + List(FileType, <-chan struct{}) <-chan ID + + Flush() error +} + +type Index interface { + Has(ID, pack.BlobType) bool + Lookup(ID, pack.BlobType) ([]PackedBlob, error) +} + +type Config interface { + ChunkerPolynomial() chunker.Pol +} + +type PackedBlob interface { + Type() pack.BlobType + Length() uint + ID() ID + Offset() uint + PackID() ID } diff --git a/src/restic/repository/blob.go b/src/restic/repository/blob.go deleted file mode 100644 index 13cb022d1..000000000 --- a/src/restic/repository/blob.go +++ /dev/null @@ -1,47 +0,0 @@ -package repository - -import ( - "fmt" - - "restic/backend" -) - -type Blob struct { - ID *backend.ID `json:"id,omitempty"` - Size uint64 `json:"size,omitempty"` - Storage *backend.ID `json:"sid,omitempty"` // encrypted ID - StorageSize uint64 `json:"ssize,omitempty"` // encrypted Size -} - -type Blobs []Blob - -func (b Blob) Valid() bool { - if b.ID == nil || b.Storage == nil || b.StorageSize == 0 { - return false - } - - return true -} - -func (b Blob) String() string { - return fmt.Sprintf("Blob<%s (%d) -> %s (%d)>", - b.ID.Str(), b.Size, - b.Storage.Str(), b.StorageSize) -} - -// Compare compares two blobs by comparing the ID and the size. It returns -1, -// 0, or 1. -func (b Blob) Compare(other Blob) int { - if res := b.ID.Compare(*other.ID); res != 0 { - return res - } - - if b.Size < other.Size { - return -1 - } - if b.Size > other.Size { - return 1 - } - - return 0 -} diff --git a/src/restic/repository/config.go b/src/restic/repository/config.go index c9e4eac85..fcb408f99 100644 --- a/src/restic/repository/config.go +++ b/src/restic/repository/config.go @@ -5,11 +5,11 @@ import ( "crypto/sha256" "encoding/hex" "io" + "restic" "testing" "github.com/pkg/errors" - "restic/backend" "restic/debug" "github.com/restic/chunker" @@ -31,12 +31,12 @@ const RepoVersion = 1 // JSONUnpackedSaver saves unpacked JSON. type JSONUnpackedSaver interface { - SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error) + SaveJSONUnpacked(restic.FileType, interface{}) (restic.ID, error) } // JSONUnpackedLoader loads unpacked JSON. type JSONUnpackedLoader interface { - LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error + LoadJSONUnpacked(restic.FileType, restic.ID, interface{}) error } // CreateConfig creates a config file with a randomly selected polynomial and @@ -87,7 +87,7 @@ func LoadConfig(r JSONUnpackedLoader) (Config, error) { cfg Config ) - err := r.LoadJSONUnpacked(backend.Config, backend.ID{}, &cfg) + err := r.LoadJSONUnpacked(restic.ConfigFile, restic.ID{}, &cfg) if err != nil { return Config{}, err } diff --git a/src/restic/repository/config_test.go b/src/restic/repository/config_test.go index 71f2fd810..8c17a7867 100644 --- a/src/restic/repository/config_test.go +++ b/src/restic/repository/config_test.go @@ -1,46 +1,46 @@ package repository_test import ( + "restic" "testing" - "restic/backend" "restic/repository" . "restic/test" ) -type saver func(backend.Type, interface{}) (backend.ID, error) +type saver func(restic.FileType, interface{}) (restic.ID, error) -func (s saver) SaveJSONUnpacked(t backend.Type, arg interface{}) (backend.ID, error) { +func (s saver) SaveJSONUnpacked(t restic.FileType, arg interface{}) (restic.ID, error) { return s(t, arg) } -type loader func(backend.Type, backend.ID, interface{}) error +type loader func(restic.FileType, restic.ID, interface{}) error -func (l loader) LoadJSONUnpacked(t backend.Type, id backend.ID, arg interface{}) error { +func (l loader) LoadJSONUnpacked(t restic.FileType, id restic.ID, arg interface{}) error { return l(t, id, arg) } func TestConfig(t *testing.T) { resultConfig := repository.Config{} - save := func(tpe backend.Type, arg interface{}) (backend.ID, error) { - Assert(t, tpe == backend.Config, + save := func(tpe restic.FileType, arg interface{}) (restic.ID, error) { + Assert(t, tpe == restic.ConfigFile, "wrong backend type: got %v, wanted %v", - tpe, backend.Config) + tpe, restic.ConfigFile) cfg := arg.(repository.Config) resultConfig = cfg - return backend.ID{}, nil + return restic.ID{}, nil } cfg1, err := repository.CreateConfig() OK(t, err) - _, err = saver(save).SaveJSONUnpacked(backend.Config, cfg1) + _, err = saver(save).SaveJSONUnpacked(restic.ConfigFile, cfg1) - load := func(tpe backend.Type, id backend.ID, arg interface{}) error { - Assert(t, tpe == backend.Config, + load := func(tpe restic.FileType, id restic.ID, arg interface{}) error { + Assert(t, tpe == restic.ConfigFile, "wrong backend type: got %v, wanted %v", - tpe, backend.Config) + tpe, restic.ConfigFile) cfg := arg.(*repository.Config) *cfg = resultConfig diff --git a/src/restic/repository/index.go b/src/restic/repository/index.go index f49a1735f..a04f1b40b 100644 --- a/src/restic/repository/index.go +++ b/src/restic/repository/index.go @@ -5,12 +5,12 @@ import ( "encoding/json" "fmt" "io" + "restic" "sync" "time" "github.com/pkg/errors" - "restic/backend" "restic/crypto" "restic/debug" "restic/pack" @@ -21,14 +21,14 @@ type Index struct { m sync.Mutex pack map[pack.Handle][]indexEntry - final bool // set to true for all indexes read from the backend ("finalized") - id backend.ID // set to the ID of the index when it's finalized - supersedes backend.IDs + final bool // set to true for all indexes read from the backend ("finalized") + id restic.ID // set to the ID of the index when it's finalized + supersedes restic.IDs created time.Time } type indexEntry struct { - packID backend.ID + packID restic.ID offset uint length uint } @@ -112,7 +112,7 @@ func (idx *Index) Store(blob PackedBlob) { } // Lookup queries the index for the blob ID and returns a PackedBlob. -func (idx *Index) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) { +func (idx *Index) Lookup(id restic.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) { idx.m.Lock() defer idx.m.Unlock() @@ -144,7 +144,7 @@ func (idx *Index) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, } // ListPack returns a list of blobs contained in a pack. -func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) { +func (idx *Index) ListPack(id restic.ID) (list []PackedBlob) { idx.m.Lock() defer idx.m.Unlock() @@ -166,7 +166,7 @@ func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) { } // Has returns true iff the id is listed in the index. -func (idx *Index) Has(id backend.ID, tpe pack.BlobType) bool { +func (idx *Index) Has(id restic.ID, tpe pack.BlobType) bool { _, err := idx.Lookup(id, tpe) if err == nil { return true @@ -177,7 +177,7 @@ func (idx *Index) Has(id backend.ID, tpe pack.BlobType) bool { // LookupSize returns the length of the cleartext content behind the // given id -func (idx *Index) LookupSize(id backend.ID, tpe pack.BlobType) (cleartextLength uint, err error) { +func (idx *Index) LookupSize(id restic.ID, tpe pack.BlobType) (cleartextLength uint, err error) { blobs, err := idx.Lookup(id, tpe) if err != nil { return 0, err @@ -187,13 +187,13 @@ func (idx *Index) LookupSize(id backend.ID, tpe pack.BlobType) (cleartextLength } // Supersedes returns the list of indexes this index supersedes, if any. -func (idx *Index) Supersedes() backend.IDs { +func (idx *Index) Supersedes() restic.IDs { return idx.supersedes } // AddToSupersedes adds the ids to the list of indexes superseded by this // index. If the index has already been finalized, an error is returned. -func (idx *Index) AddToSupersedes(ids ...backend.ID) error { +func (idx *Index) AddToSupersedes(ids ...restic.ID) error { idx.m.Lock() defer idx.m.Unlock() @@ -209,9 +209,9 @@ func (idx *Index) AddToSupersedes(ids ...backend.ID) error { type PackedBlob struct { Type pack.BlobType Length uint - ID backend.ID + ID restic.ID Offset uint - PackID backend.ID + PackID restic.ID } func (pb PackedBlob) String() string { @@ -259,11 +259,11 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob { } // Packs returns all packs in this index -func (idx *Index) Packs() backend.IDSet { +func (idx *Index) Packs() restic.IDSet { idx.m.Lock() defer idx.m.Unlock() - packs := backend.NewIDSet() + packs := restic.NewIDSet() for _, list := range idx.pack { for _, entry := range list { packs.Insert(entry.packID) @@ -300,12 +300,12 @@ func (idx *Index) Length() uint { } type packJSON struct { - ID backend.ID `json:"id"` + ID restic.ID `json:"id"` Blobs []blobJSON `json:"blobs"` } type blobJSON struct { - ID backend.ID `json:"id"` + ID restic.ID `json:"id"` Type pack.BlobType `json:"type"` Offset uint `json:"offset"` Length uint `json:"length"` @@ -314,7 +314,7 @@ type blobJSON struct { // generatePackList returns a list of packs. func (idx *Index) generatePackList() ([]*packJSON, error) { list := []*packJSON{} - packs := make(map[backend.ID]*packJSON) + packs := make(map[restic.ID]*packJSON) for h, packedBlobs := range idx.pack { for _, blob := range packedBlobs { @@ -357,7 +357,7 @@ func (idx *Index) generatePackList() ([]*packJSON, error) { } type jsonIndex struct { - Supersedes backend.IDs `json:"supersedes,omitempty"` + Supersedes restic.IDs `json:"supersedes,omitempty"` Packs []*packJSON `json:"packs"` } @@ -402,12 +402,12 @@ func (idx *Index) Finalize(w io.Writer) error { // ID returns the ID of the index, if available. If the index is not yet // finalized, an error is returned. -func (idx *Index) ID() (backend.ID, error) { +func (idx *Index) ID() (restic.ID, error) { idx.m.Lock() defer idx.m.Unlock() if !idx.final { - return backend.ID{}, errors.New("index not finalized") + return restic.ID{}, errors.New("index not finalized") } return idx.id, nil @@ -415,7 +415,7 @@ func (idx *Index) ID() (backend.ID, error) { // SetID sets the ID the index has been written to. This requires that // Finalize() has been called before, otherwise an error is returned. -func (idx *Index) SetID(id backend.ID) error { +func (idx *Index) SetID(id restic.ID) error { idx.m.Lock() defer idx.m.Unlock() @@ -545,10 +545,10 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) { } // LoadIndexWithDecoder loads the index and decodes it with fn. -func LoadIndexWithDecoder(repo *Repository, id backend.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) { +func LoadIndexWithDecoder(repo *Repository, id restic.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) { debug.Log("LoadIndexWithDecoder", "Loading index %v", id[:8]) - buf, err := repo.LoadAndDecrypt(backend.Index, id) + buf, err := repo.LoadAndDecrypt(restic.IndexFile, id) if err != nil { return nil, err } @@ -568,7 +568,7 @@ func LoadIndexWithDecoder(repo *Repository, id backend.ID, fn func(io.Reader) (* // format (if necessary). When the conversion is succcessful, the old index // is removed. Returned is either the old id (if no conversion was needed) or // the new id. -func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) { +func ConvertIndex(repo *Repository, id restic.ID) (restic.ID, error) { debug.Log("ConvertIndex", "checking index %v", id.Str()) idx, err := LoadIndexWithDecoder(repo, id, DecodeOldIndex) @@ -578,7 +578,7 @@ func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) { } buf := bytes.NewBuffer(nil) - idx.supersedes = backend.IDs{id} + idx.supersedes = restic.IDs{id} err = idx.Encode(buf) if err != nil { @@ -586,5 +586,5 @@ func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) { return id, err } - return repo.SaveUnpacked(backend.Index, buf.Bytes()) + return repo.SaveUnpacked(restic.IndexFile, buf.Bytes()) } diff --git a/src/restic/repository/index_rebuild.go b/src/restic/repository/index_rebuild.go index 99c281484..ba9321900 100644 --- a/src/restic/repository/index_rebuild.go +++ b/src/restic/repository/index_rebuild.go @@ -3,7 +3,7 @@ package repository import ( "fmt" "os" - "restic/backend" + "restic" "restic/debug" "restic/list" "restic/worker" @@ -23,7 +23,7 @@ func RebuildIndex(repo *Repository) error { idx := NewIndex() for job := range ch { - id := job.Data.(backend.ID) + id := job.Data.(restic.ID) if job.Error != nil { fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error) @@ -44,8 +44,8 @@ func RebuildIndex(repo *Repository) error { } } - oldIndexes := backend.NewIDSet() - for id := range repo.List(backend.Index, done) { + oldIndexes := restic.NewIDSet() + for id := range repo.List(restic.IndexFile, done) { idx.AddToSupersedes(id) oldIndexes.Insert(id) } @@ -58,7 +58,7 @@ func RebuildIndex(repo *Repository) error { debug.Log("RebuildIndex.RebuildIndex", "new index saved as %v", id.Str()) for indexID := range oldIndexes { - err := repo.Backend().Remove(backend.Index, indexID.String()) + err := repo.Backend().Remove(restic.IndexFile, indexID.String()) if err != nil { fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", indexID.Str(), err) } diff --git a/src/restic/repository/index_test.go b/src/restic/repository/index_test.go index a16c6f2e1..be33422b6 100644 --- a/src/restic/repository/index_test.go +++ b/src/restic/repository/index_test.go @@ -2,6 +2,7 @@ package repository_test import ( "bytes" + "restic" "testing" "restic/backend" @@ -12,8 +13,8 @@ import ( func TestIndexSerialize(t *testing.T) { type testEntry struct { - id backend.ID - pack backend.ID + id restic.ID + pack restic.ID tpe pack.BlobType offset, length uint } @@ -249,7 +250,7 @@ var docOldExample = []byte(` `) var exampleTests = []struct { - id, packID backend.ID + id, packID restic.ID tpe pack.BlobType offset, length uint }{ @@ -269,11 +270,11 @@ var exampleTests = []struct { } var exampleLookupTest = struct { - packID backend.ID - blobs map[backend.ID]pack.BlobType + packID restic.ID + blobs map[restic.ID]pack.BlobType }{ ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), - map[backend.ID]pack.BlobType{ + map[restic.ID]pack.BlobType{ ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): pack.Data, ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): pack.Tree, ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): pack.Data, @@ -281,7 +282,7 @@ var exampleLookupTest = struct { } func TestIndexUnserialize(t *testing.T) { - oldIdx := backend.IDs{ParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")} + oldIdx := restic.IDs{ParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")} idx, err := repository.DecodeIndex(bytes.NewReader(docExample)) OK(t, err) @@ -345,7 +346,7 @@ func TestIndexUnserializeOld(t *testing.T) { func TestIndexPacks(t *testing.T) { idx := repository.NewIndex() - packs := backend.NewIDSet() + packs := restic.NewIDSet() for i := 0; i < 20; i++ { packID := backend.RandomID() diff --git a/src/restic/repository/key.go b/src/restic/repository/key.go index 2f2e79758..792c97d08 100644 --- a/src/restic/repository/key.go +++ b/src/restic/repository/key.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "os/user" + "restic" "time" "github.com/pkg/errors" @@ -142,7 +143,7 @@ func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) { // LoadKey loads a key from the backend. func LoadKey(s *Repository, name string) (k *Key, err error) { - h := backend.Handle{Type: backend.Key, Name: name} + h := restic.Handle{Type: backend.Key, Name: name} data, err := backend.LoadAll(s.be, h, nil) if err != nil { return nil, err @@ -224,9 +225,9 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error) } // store in repository and return - h := backend.Handle{ + h := restic.Handle{ Type: backend.Key, - Name: backend.Hash(buf).String(), + Name: restic.Hash(buf).String(), } err = s.be.Save(h, buf) diff --git a/src/restic/repository/master_index.go b/src/restic/repository/master_index.go index 96425f9e8..adf28eea8 100644 --- a/src/restic/repository/master_index.go +++ b/src/restic/repository/master_index.go @@ -1,11 +1,11 @@ package repository import ( + "restic" "sync" "github.com/pkg/errors" - "restic/backend" "restic/debug" "restic/pack" ) @@ -22,7 +22,7 @@ func NewMasterIndex() *MasterIndex { } // Lookup queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) { +func (mi *MasterIndex) Lookup(id restic.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -42,7 +42,7 @@ func (mi *MasterIndex) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedB } // LookupSize queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) LookupSize(id backend.ID, tpe pack.BlobType) (uint, error) { +func (mi *MasterIndex) LookupSize(id restic.ID, tpe pack.BlobType) (uint, error) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -58,7 +58,7 @@ func (mi *MasterIndex) LookupSize(id backend.ID, tpe pack.BlobType) (uint, error // ListPack returns the list of blobs in a pack. The first matching index is // returned, or nil if no index contains information about the pack id. -func (mi *MasterIndex) ListPack(id backend.ID) (list []PackedBlob) { +func (mi *MasterIndex) ListPack(id restic.ID) (list []PackedBlob) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -73,7 +73,7 @@ func (mi *MasterIndex) ListPack(id backend.ID) (list []PackedBlob) { } // Has queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) Has(id backend.ID, tpe pack.BlobType) bool { +func (mi *MasterIndex) Has(id restic.ID, tpe pack.BlobType) bool { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -197,7 +197,7 @@ func (mi *MasterIndex) All() []*Index { // RebuildIndex combines all known indexes to a new index, leaving out any // packs whose ID is contained in packBlacklist. The new index contains the IDs // of all known indexes in the "supersedes" field. -func (mi *MasterIndex) RebuildIndex(packBlacklist backend.IDSet) (*Index, error) { +func (mi *MasterIndex) RebuildIndex(packBlacklist restic.IDSet) (*Index, error) { mi.idxMutex.Lock() defer mi.idxMutex.Unlock() diff --git a/src/restic/repository/packer_manager.go b/src/restic/repository/packer_manager.go index 32c8a73d4..ea638979f 100644 --- a/src/restic/repository/packer_manager.go +++ b/src/restic/repository/packer_manager.go @@ -4,11 +4,11 @@ import ( "io" "io/ioutil" "os" + "restic" "sync" "github.com/pkg/errors" - "restic/backend" "restic/crypto" "restic/debug" "restic/fs" @@ -17,7 +17,7 @@ import ( // Saver implements saving data in a backend. type Saver interface { - Save(h backend.Handle, jp []byte) error + Save(h restic.Handle, jp []byte) error } // packerManager keeps a list of open packs and creates new on demand. @@ -114,8 +114,8 @@ func (r *Repository) savePacker(p *pack.Packer) error { return errors.Wrap(err, "Close") } - id := backend.Hash(data) - h := backend.Handle{Type: backend.Data, Name: id.String()} + id := restic.Hash(data) + h := restic.Handle{Type: restic.DataFile, Name: id.String()} err = r.be.Save(h, data) if err != nil { diff --git a/src/restic/repository/packer_manager_test.go b/src/restic/repository/packer_manager_test.go index 78d91bc37..0e99e2e90 100644 --- a/src/restic/repository/packer_manager_test.go +++ b/src/restic/repository/packer_manager_test.go @@ -4,7 +4,7 @@ import ( "io" "math/rand" "os" - "restic/backend" + "restic" "restic/backend/mem" "restic/crypto" "restic/pack" @@ -36,8 +36,8 @@ func (r *randReader) Read(p []byte) (n int, err error) { return len(p), nil } -func randomID(rd io.Reader) backend.ID { - id := backend.ID{} +func randomID(rd io.Reader) restic.ID { + id := restic.ID{} _, err := io.ReadFull(rd, id[:]) if err != nil { panic(err) @@ -64,7 +64,7 @@ func saveFile(t testing.TB, be Saver, filename string, n int) { t.Fatal(err) } - h := backend.Handle{Type: backend.Data, Name: backend.Hash(data).String()} + h := restic.Handle{Type: restic.DataFile, Name: restic.Hash(data).String()} err = be.Save(h, data) if err != nil { @@ -137,7 +137,7 @@ func flushRemainingPacks(t testing.TB, rnd *randReader, be Saver, pm *packerMana type fakeBackend struct{} -func (f *fakeBackend) Save(h backend.Handle, data []byte) error { +func (f *fakeBackend) Save(h restic.Handle, data []byte) error { return nil } diff --git a/src/restic/repository/parallel.go b/src/restic/repository/parallel.go index 7094ae299..cf892a779 100644 --- a/src/restic/repository/parallel.go +++ b/src/restic/repository/parallel.go @@ -1,6 +1,7 @@ package repository import ( + "restic" "sync" "restic/backend" @@ -23,12 +24,12 @@ type ParallelWorkFunc func(id string, done <-chan struct{}) error // ParallelIDWorkFunc gets one backend.ID to work on. If an error is returned, // processing stops. If done is closed, the function should return. -type ParallelIDWorkFunc func(id backend.ID, done <-chan struct{}) error +type ParallelIDWorkFunc func(id restic.ID, done <-chan struct{}) error // FilesInParallel runs n workers of f in parallel, on the IDs that // repo.List(t) yield. If f returns an error, the process is aborted and the // first error is returned. -func FilesInParallel(repo backend.Lister, t backend.Type, n uint, f ParallelWorkFunc) error { +func FilesInParallel(repo backend.Lister, t restic.FileType, n uint, f ParallelWorkFunc) error { done := make(chan struct{}) defer closeIfOpen(done) diff --git a/src/restic/repository/parallel_test.go b/src/restic/repository/parallel_test.go index 6aab24b0e..30b0238bd 100644 --- a/src/restic/repository/parallel_test.go +++ b/src/restic/repository/parallel_test.go @@ -2,12 +2,12 @@ package repository_test import ( "math/rand" + "restic" "testing" "time" "github.com/pkg/errors" - "restic/backend" "restic/repository" . "restic/test" ) @@ -73,7 +73,7 @@ var lister = testIDs{ "34dd044c228727f2226a0c9c06a3e5ceb5e30e31cb7854f8fa1cde846b395a58", } -func (tests testIDs) List(t backend.Type, done <-chan struct{}) <-chan string { +func (tests testIDs) List(t restic.FileType, done <-chan struct{}) <-chan string { ch := make(chan string) go func() { @@ -100,7 +100,7 @@ func TestFilesInParallel(t *testing.T) { } for n := uint(1); n < 5; n++ { - err := repository.FilesInParallel(lister, backend.Data, n*100, f) + err := repository.FilesInParallel(lister, restic.DataFile, n*100, f) OK(t, err) } } @@ -120,7 +120,7 @@ func TestFilesInParallelWithError(t *testing.T) { } for n := uint(1); n < 5; n++ { - err := repository.FilesInParallel(lister, backend.Data, n*100, f) + err := repository.FilesInParallel(lister, restic.DataFile, n*100, f) Equals(t, errTest, err) } } diff --git a/src/restic/repository/repack.go b/src/restic/repository/repack.go index 2c61705da..a799f9de5 100644 --- a/src/restic/repository/repack.go +++ b/src/restic/repository/repack.go @@ -3,7 +3,7 @@ package repository import ( "bytes" "io" - "restic/backend" + "restic" "restic/crypto" "restic/debug" "restic/pack" @@ -15,13 +15,13 @@ import ( // these packs. Each pack is loaded and the blobs listed in keepBlobs is saved // into a new pack. Afterwards, the packs are removed. This operation requires // an exclusive lock on the repo. -func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err error) { +func Repack(repo *Repository, packs restic.IDSet, keepBlobs pack.BlobSet) (err error) { debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs)) buf := make([]byte, 0, maxPackSize) for packID := range packs { // load the complete pack - h := backend.Handle{Type: backend.Data, Name: packID.String()} + h := restic.Handle{Type: restic.DataFile, Name: packID.String()} l, err := repo.Backend().Load(h, buf[:cap(buf)], 0) if errors.Cause(err) == io.ErrUnexpectedEOF { @@ -75,7 +75,7 @@ func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err } for packID := range packs { - err := repo.Backend().Remove(backend.Data, packID.String()) + err := repo.Backend().Remove(restic.DataFile, packID.String()) if err != nil { debug.Log("Repack", "error removing pack %v: %v", packID.Str(), err) return err diff --git a/src/restic/repository/repack_test.go b/src/restic/repository/repack_test.go index b29c7e622..f729eed91 100644 --- a/src/restic/repository/repack_test.go +++ b/src/restic/repository/repack_test.go @@ -3,7 +3,7 @@ package repository_test import ( "io" "math/rand" - "restic/backend" + "restic" "restic/pack" "restic/repository" "testing" @@ -14,7 +14,7 @@ func randomSize(min, max int) int { } func random(t testing.TB, length int) []byte { - rd := repository.NewRandReader(rand.New(rand.NewSource(int64(length)))) + rd := restic.NewRandReader(rand.New(rand.NewSource(int64(length)))) buf := make([]byte, length) _, err := io.ReadFull(rd, buf) if err != nil { @@ -40,7 +40,7 @@ func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pDa } buf := random(t, length) - id := backend.Hash(buf) + id := restic.Hash(buf) if repo.Index().Has(id, pack.Data) { t.Errorf("duplicate blob %v/%v ignored", id, pack.Data) @@ -75,7 +75,7 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l blobs := pack.NewBlobSet() - for id := range repo.List(backend.Data, done) { + for id := range repo.List(restic.DataFile, done) { entries, _, err := repo.ListPack(id) if err != nil { t.Fatalf("error listing pack %v: %v", id, err) @@ -101,20 +101,20 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l return list1, list2 } -func listPacks(t *testing.T, repo *repository.Repository) backend.IDSet { +func listPacks(t *testing.T, repo *repository.Repository) restic.IDSet { done := make(chan struct{}) defer close(done) - list := backend.NewIDSet() - for id := range repo.List(backend.Data, done) { + list := restic.NewIDSet() + for id := range repo.List(restic.DataFile, done) { list.Insert(id) } return list } -func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.BlobSet) backend.IDSet { - packs := backend.NewIDSet() +func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.BlobSet) restic.IDSet { + packs := restic.NewIDSet() idx := repo.Index() for h := range blobs { @@ -131,7 +131,7 @@ func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.Blo return packs } -func repack(t *testing.T, repo *repository.Repository, packs backend.IDSet, blobs pack.BlobSet) { +func repack(t *testing.T, repo *repository.Repository, packs restic.IDSet, blobs pack.BlobSet) { err := repository.Repack(repo, packs, blobs) if err != nil { t.Fatal(err) diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index d2a5a0fa9..78061ea73 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "os" + "restic" "github.com/pkg/errors" @@ -17,7 +18,7 @@ import ( // Repository is used to access a repository in a backend. type Repository struct { - be backend.Backend + be restic.Backend Config Config key *crypto.Key keyName string @@ -27,7 +28,7 @@ type Repository struct { } // New returns a new repository with backend be. -func New(be backend.Backend) *Repository { +func New(be restic.Backend) *Repository { repo := &Repository{ be: be, idx: NewMasterIndex(), @@ -40,29 +41,29 @@ func New(be backend.Backend) *Repository { // Find loads the list of all blobs of type t and searches for names which start // with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If // more than one is found, nil and ErrMultipleIDMatches is returned. -func (r *Repository) Find(t backend.Type, prefix string) (string, error) { +func (r *Repository) Find(t restic.FileType, prefix string) (string, error) { return backend.Find(r.be, t, prefix) } // PrefixLength returns the number of bytes required so that all prefixes of // all IDs of type t are unique. -func (r *Repository) PrefixLength(t backend.Type) (int, error) { +func (r *Repository) PrefixLength(t restic.FileType) (int, error) { return backend.PrefixLength(r.be, t) } // LoadAndDecrypt loads and decrypts data identified by t and id from the // backend. -func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) { +func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) { debug.Log("Repo.Load", "load %v with id %v", t, id.Str()) - h := backend.Handle{Type: t, Name: id.String()} + h := restic.Handle{Type: t, Name: id.String()} buf, err := backend.LoadAll(r.be, h, nil) if err != nil { debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err) return nil, err } - if t != backend.Config && !backend.Hash(buf).Equal(id) { + if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) { return nil, errors.New("invalid data returned") } @@ -78,7 +79,7 @@ func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, erro // LoadBlob tries to load and decrypt content identified by t and id from a // pack from the backend, the result is stored in plaintextBuf, which must be // large enough to hold the complete blob. -func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byte) ([]byte, error) { +func (r *Repository) LoadBlob(id restic.ID, t pack.BlobType, plaintextBuf []byte) ([]byte, error) { debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str()) // lookup plaintext size of blob @@ -111,7 +112,7 @@ func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byt } // load blob from pack - h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()} + h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()} ciphertextBuf := make([]byte, blob.Length) n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset)) if err != nil { @@ -135,7 +136,7 @@ func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byt } // check hash - if !backend.Hash(plaintextBuf).Equal(id) { + if !restic.Hash(plaintextBuf).Equal(id) { lastError = errors.Errorf("blob %v returned invalid hash", id) continue } @@ -162,7 +163,7 @@ func closeOrErr(cl io.Closer, err *error) { // LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on // the item. -func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) (err error) { +func (r *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) (err error) { buf, err := r.LoadAndDecrypt(t, id) if err != nil { return err @@ -173,7 +174,7 @@ func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interf // LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the // data and afterwards call json.Unmarshal on the item. -func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) (err error) { +func (r *Repository) LoadJSONPack(t pack.BlobType, id restic.ID, item interface{}) (err error) { buf, err := r.LoadBlob(id, t, nil) if err != nil { return err @@ -183,16 +184,16 @@ func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface } // LookupBlobSize returns the size of blob id. -func (r *Repository) LookupBlobSize(id backend.ID, tpe pack.BlobType) (uint, error) { +func (r *Repository) LookupBlobSize(id restic.ID, tpe pack.BlobType) (uint, error) { return r.idx.LookupSize(id, tpe) } // SaveAndEncrypt encrypts data and stores it to the backend as type t. If data // is small enough, it will be packed together with other small blobs. -func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID) (backend.ID, error) { +func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *restic.ID) (restic.ID, error) { if id == nil { // compute plaintext hash - hashedID := backend.Hash(data) + hashedID := restic.Hash(data) id = &hashedID } @@ -205,19 +206,19 @@ func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID // encrypt blob ciphertext, err := r.Encrypt(ciphertext, data) if err != nil { - return backend.ID{}, err + return restic.ID{}, err } // find suitable packer and add blob packer, err := r.findPacker(uint(len(ciphertext))) if err != nil { - return backend.ID{}, err + return restic.ID{}, err } // save ciphertext _, err = packer.Add(t, *id, ciphertext) if err != nil { - return backend.ID{}, err + return restic.ID{}, err } // if the pack is not full enough and there are less than maxPackers @@ -234,7 +235,7 @@ func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID // SaveJSON serialises item as JSON and encrypts and saves it in a pack in the // backend as type t. -func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error) { +func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (restic.ID, error) { debug.Log("Repo.SaveJSON", "save %v blob", t) buf := getBuf()[:0] defer freeBuf(buf) @@ -244,7 +245,7 @@ func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, er enc := json.NewEncoder(wr) err := enc.Encode(item) if err != nil { - return backend.ID{}, errors.Errorf("json.Encode: %v", err) + return restic.ID{}, errors.Errorf("json.Encode: %v", err) } buf = wr.Bytes() @@ -253,11 +254,11 @@ func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, er // SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the // backend as type t, without a pack. It returns the storage hash. -func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) { +func (r *Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (restic.ID, error) { debug.Log("Repo.SaveJSONUnpacked", "save new blob %v", t) plaintext, err := json.Marshal(item) if err != nil { - return backend.ID{}, errors.Wrap(err, "json.Marshal") + return restic.ID{}, errors.Wrap(err, "json.Marshal") } return r.SaveUnpacked(t, plaintext) @@ -265,20 +266,20 @@ func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend // SaveUnpacked encrypts data and stores it in the backend. Returned is the // storage hash. -func (r *Repository) SaveUnpacked(t backend.Type, p []byte) (id backend.ID, err error) { +func (r *Repository) SaveUnpacked(t restic.FileType, p []byte) (id restic.ID, err error) { ciphertext := make([]byte, len(p)+crypto.Extension) ciphertext, err = r.Encrypt(ciphertext, p) if err != nil { - return backend.ID{}, err + return restic.ID{}, err } - id = backend.Hash(ciphertext) - h := backend.Handle{Type: t, Name: id.String()} + id = restic.Hash(ciphertext) + h := restic.Handle{Type: t, Name: id.String()} err = r.be.Save(h, ciphertext) if err != nil { debug.Log("Repo.SaveJSONUnpacked", "error saving blob %v: %v", h, err) - return backend.ID{}, err + return restic.ID{}, err } debug.Log("Repo.SaveJSONUnpacked", "blob %v saved", h) @@ -303,7 +304,7 @@ func (r *Repository) Flush() error { } // Backend returns the backend for the repository. -func (r *Repository) Backend() backend.Backend { +func (r *Repository) Backend() restic.Backend { return r.be } @@ -318,15 +319,15 @@ func (r *Repository) SetIndex(i *MasterIndex) { } // SaveIndex saves an index in the repository. -func SaveIndex(repo *Repository, index *Index) (backend.ID, error) { +func SaveIndex(repo *Repository, index *Index) (restic.ID, error) { buf := bytes.NewBuffer(nil) err := index.Finalize(buf) if err != nil { - return backend.ID{}, err + return restic.ID{}, err } - return repo.SaveUnpacked(backend.Index, buf.Bytes()) + return repo.SaveUnpacked(restic.IndexFile, buf.Bytes()) } // saveIndex saves all indexes in the backend. @@ -365,7 +366,7 @@ func (r *Repository) LoadIndex() error { errCh := make(chan error, 1) indexes := make(chan *Index) - worker := func(id backend.ID, done <-chan struct{}) error { + worker := func(id restic.ID, done <-chan struct{}) error { idx, err := LoadIndex(r, id) if err != nil { return err @@ -381,7 +382,7 @@ func (r *Repository) LoadIndex() error { go func() { defer close(indexes) - errCh <- FilesInParallel(r.be, backend.Index, loadIndexParallelism, + errCh <- FilesInParallel(r.be, restic.IndexFile, loadIndexParallelism, ParallelWorkFuncParseID(worker)) }() @@ -397,7 +398,7 @@ func (r *Repository) LoadIndex() error { } // LoadIndex loads the index id from backend and returns it. -func LoadIndex(repo *Repository, id backend.ID) (*Index, error) { +func LoadIndex(repo *Repository, id restic.ID) (*Index, error) { idx, err := LoadIndexWithDecoder(repo, id, DecodeIndex) if err == nil { return idx, nil @@ -429,7 +430,7 @@ func (r *Repository) SearchKey(password string, maxKeys int) error { // Init creates a new master key with the supplied password, initializes and // saves the repository config. func (r *Repository) Init(password string) error { - has, err := r.be.Test(backend.Config, "") + has, err := r.be.Test(restic.ConfigFile, "") if err != nil { return err } @@ -457,7 +458,7 @@ func (r *Repository) init(password string, cfg Config) error { r.packerManager.key = key.master r.keyName = key.Name() r.Config = cfg - _, err = r.SaveJSONUnpacked(backend.Config, cfg) + _, err = r.SaveJSONUnpacked(restic.ConfigFile, cfg) return err } @@ -497,7 +498,7 @@ func (r *Repository) KeyName() string { } // Count returns the number of blobs of a given type in the backend. -func (r *Repository) Count(t backend.Type) (n uint) { +func (r *Repository) Count(t restic.FileType) (n uint) { for _ = range r.be.List(t, nil) { n++ } @@ -505,16 +506,16 @@ func (r *Repository) Count(t backend.Type) (n uint) { return } -func (r *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backend.ID) { +func (r *Repository) list(t restic.FileType, done <-chan struct{}, out chan<- restic.ID) { defer close(out) in := r.be.List(t, done) var ( // disable sending on the outCh until we received a job - outCh chan<- backend.ID + outCh chan<- restic.ID // enable receiving from in inCh = in - id backend.ID + id restic.ID err error ) @@ -543,8 +544,8 @@ func (r *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backe } // List returns a channel that yields all IDs of type t in the backend. -func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.ID { - outCh := make(chan backend.ID) +func (r *Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic.ID { + outCh := make(chan restic.ID) go r.list(t, done, outCh) @@ -553,8 +554,8 @@ func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.I // ListPack returns the list of blobs saved in the pack id and the length of // the file as stored in the backend. -func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, int64, error) { - h := backend.Handle{Type: backend.Data, Name: id.String()} +func (r *Repository) ListPack(id restic.ID) ([]pack.Blob, int64, error) { + h := restic.Handle{Type: restic.DataFile, Name: id.String()} blobInfo, err := r.Backend().Stat(h) if err != nil { diff --git a/src/restic/repository/repository_test.go b/src/restic/repository/repository_test.go index db70765a8..4e27b8943 100644 --- a/src/restic/repository/repository_test.go +++ b/src/restic/repository/repository_test.go @@ -11,7 +11,6 @@ import ( "testing" "restic" - "restic/backend" "restic/pack" "restic/repository" . "restic/test" @@ -80,7 +79,7 @@ func TestSave(t *testing.T) { _, err := io.ReadFull(rand.Reader, data) OK(t, err) - id := backend.Hash(data) + id := restic.Hash(data) // save sid, err := repo.SaveAndEncrypt(pack.Data, data, nil) @@ -114,7 +113,7 @@ func TestSaveFrom(t *testing.T) { _, err := io.ReadFull(rand.Reader, data) OK(t, err) - id := backend.Hash(data) + id := restic.Hash(data) // save id2, err := repo.SaveAndEncrypt(pack.Data, data, &id) @@ -147,7 +146,7 @@ func BenchmarkSaveAndEncrypt(t *testing.B) { _, err := io.ReadFull(rand.Reader, data) OK(t, err) - id := backend.ID(sha256.Sum256(data)) + id := restic.ID(sha256.Sum256(data)) t.ResetTimer() t.SetBytes(int64(size)) @@ -211,13 +210,13 @@ func TestLoadJSONUnpacked(t *testing.T) { sn.Hostname = "foobar" sn.Username = "test!" - id, err := repo.SaveJSONUnpacked(backend.Snapshot, &sn) + id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, &sn) OK(t, err) var sn2 restic.Snapshot // restore - err = repo.LoadJSONUnpacked(backend.Snapshot, id, &sn2) + err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, &sn2) OK(t, err) Equals(t, sn.Hostname, sn2.Hostname) @@ -286,19 +285,19 @@ func TestRepositoryIncrementalIndex(t *testing.T) { OK(t, repo.SaveIndex()) type packEntry struct { - id backend.ID + id restic.ID indexes []*repository.Index } - packEntries := make(map[backend.ID]map[backend.ID]struct{}) + packEntries := make(map[restic.ID]map[restic.ID]struct{}) - for id := range repo.List(backend.Index, nil) { + for id := range repo.List(restic.IndexFile, nil) { idx, err := repository.LoadIndex(repo, id) OK(t, err) for pb := range idx.Each(nil) { if _, ok := packEntries[pb.PackID]; !ok { - packEntries[pb.PackID] = make(map[backend.ID]struct{}) + packEntries[pb.PackID] = make(map[restic.ID]struct{}) } packEntries[pb.PackID][id] = struct{}{} diff --git a/src/restic/repository/testing.go b/src/restic/repository/testing.go index 904ee397c..5b696a3a6 100644 --- a/src/restic/repository/testing.go +++ b/src/restic/repository/testing.go @@ -2,7 +2,7 @@ package repository import ( "os" - "restic/backend" + "restic" "restic/backend/local" "restic/backend/mem" "restic/crypto" @@ -25,7 +25,7 @@ func TestUseLowSecurityKDFParameters(t testing.TB) { } // TestBackend returns a fully configured in-memory backend. -func TestBackend(t testing.TB) (be backend.Backend, cleanup func()) { +func TestBackend(t testing.TB) (be restic.Backend, cleanup func()) { return mem.New(), func() {} } @@ -37,7 +37,7 @@ const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) // TestRepositoryWithBackend returns a repository initialized with a test // password. If be is nil, an in-memory backend is used. A constant polynomial // is used for the chunker and low-security test parameters. -func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository, cleanup func()) { +func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r *Repository, cleanup func()) { TestUseLowSecurityKDFParameters(t) var beCleanup func() diff --git a/src/restic/restorer.go b/src/restic/restorer.go index 36ea28f87..8ba2b94f9 100644 --- a/src/restic/restorer.go +++ b/src/restic/restorer.go @@ -6,15 +6,13 @@ import ( "github.com/pkg/errors" - "restic/backend" "restic/debug" "restic/fs" - "restic/repository" ) // Restorer is used to restore a snapshot to a directory. type Restorer struct { - repo *repository.Repository + repo Repository sn *Snapshot Error func(dir string, node *Node, err error) error @@ -24,7 +22,7 @@ type Restorer struct { var restorerAbortOnAllErrors = func(str string, node *Node, err error) error { return err } // NewRestorer creates a restorer preloaded with the content from the snapshot id. -func NewRestorer(repo *repository.Repository, id backend.ID) (*Restorer, error) { +func NewRestorer(repo Repository, id ID) (*Restorer, error) { r := &Restorer{ repo: repo, Error: restorerAbortOnAllErrors, SelectFilter: func(string, string, *Node) bool { return true }, @@ -40,7 +38,7 @@ func NewRestorer(repo *repository.Repository, id backend.ID) (*Restorer, error) return r, nil } -func (res *Restorer) restoreTo(dst string, dir string, treeID backend.ID) error { +func (res *Restorer) restoreTo(dst string, dir string, treeID ID) error { tree, err := LoadTree(res.repo, treeID) if err != nil { return res.Error(dir, nil, err) diff --git a/src/restic/snapshot.go b/src/restic/snapshot.go index 2ce01b4d9..82a0f60f5 100644 --- a/src/restic/snapshot.go +++ b/src/restic/snapshot.go @@ -10,22 +10,21 @@ import ( "github.com/pkg/errors" "restic/backend" - "restic/repository" ) // Snapshot is the state of a resource at one point in time. type Snapshot struct { - Time time.Time `json:"time"` - Parent *backend.ID `json:"parent,omitempty"` - Tree *backend.ID `json:"tree"` - Paths []string `json:"paths"` - Hostname string `json:"hostname,omitempty"` - Username string `json:"username,omitempty"` - UID uint32 `json:"uid,omitempty"` - GID uint32 `json:"gid,omitempty"` - Excludes []string `json:"excludes,omitempty"` + Time time.Time `json:"time"` + Parent *ID `json:"parent,omitempty"` + Tree *ID `json:"tree"` + Paths []string `json:"paths"` + Hostname string `json:"hostname,omitempty"` + Username string `json:"username,omitempty"` + UID uint32 `json:"uid,omitempty"` + GID uint32 `json:"gid,omitempty"` + Excludes []string `json:"excludes,omitempty"` - id *backend.ID // plaintext ID, used during restore + id *ID // plaintext ID, used during restore } // NewSnapshot returns an initialized snapshot struct for the current user and @@ -56,9 +55,9 @@ func NewSnapshot(paths []string) (*Snapshot, error) { } // LoadSnapshot loads the snapshot with the id and returns it. -func LoadSnapshot(repo *repository.Repository, id backend.ID) (*Snapshot, error) { +func LoadSnapshot(repo Repository, id ID) (*Snapshot, error) { sn := &Snapshot{id: &id} - err := repo.LoadJSONUnpacked(backend.Snapshot, id, sn) + err := repo.LoadJSONUnpacked(SnapshotFile, id, sn) if err != nil { return nil, err } @@ -67,11 +66,11 @@ func LoadSnapshot(repo *repository.Repository, id backend.ID) (*Snapshot, error) } // LoadAllSnapshots returns a list of all snapshots in the repo. -func LoadAllSnapshots(repo *repository.Repository) (snapshots []*Snapshot, err error) { +func LoadAllSnapshots(repo Repository) (snapshots []*Snapshot, err error) { done := make(chan struct{}) defer close(done) - for id := range repo.List(backend.Snapshot, done) { + for id := range repo.List(SnapshotFile, done) { sn, err := LoadSnapshot(repo, id) if err != nil { return nil, err @@ -89,7 +88,7 @@ func (sn Snapshot) String() string { } // ID retuns the snapshot's ID. -func (sn Snapshot) ID() *backend.ID { +func (sn Snapshot) ID() *ID { return sn.id } @@ -131,17 +130,17 @@ func SamePaths(expected, actual []string) bool { var ErrNoSnapshotFound = errors.New("no snapshot found") // FindLatestSnapshot finds latest snapshot with optional target/directory and source filters -func FindLatestSnapshot(repo *repository.Repository, targets []string, source string) (backend.ID, error) { +func FindLatestSnapshot(repo Repository, targets []string, source string) (ID, error) { var ( latest time.Time - latestID backend.ID + latestID ID found bool ) - for snapshotID := range repo.List(backend.Snapshot, make(chan struct{})) { + for snapshotID := range repo.List(SnapshotFile, make(chan struct{})) { snapshot, err := LoadSnapshot(repo, snapshotID) if err != nil { - return backend.ID{}, errors.Errorf("Error listing snapshot: %v", err) + return ID{}, errors.Errorf("Error listing snapshot: %v", err) } if snapshot.Time.After(latest) && SamePaths(snapshot.Paths, targets) && (source == "" || source == snapshot.Hostname) { latest = snapshot.Time @@ -151,7 +150,7 @@ func FindLatestSnapshot(repo *repository.Repository, targets []string, source st } if !found { - return backend.ID{}, ErrNoSnapshotFound + return ID{}, ErrNoSnapshotFound } return latestID, nil @@ -159,13 +158,13 @@ func FindLatestSnapshot(repo *repository.Repository, targets []string, source st // FindSnapshot takes a string and tries to find a snapshot whose ID matches // the string as closely as possible. -func FindSnapshot(repo *repository.Repository, s string) (backend.ID, error) { +func FindSnapshot(repo Repository, s string) (ID, error) { // find snapshot id with prefix - name, err := backend.Find(repo.Backend(), backend.Snapshot, s) + name, err := backend.Find(repo.Backend(), SnapshotFile, s) if err != nil { - return backend.ID{}, err + return ID{}, err } - return backend.ParseID(name) + return ParseID(name) } diff --git a/src/restic/testing.go b/src/restic/testing.go index cf2500b17..a10e36ff9 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -1,13 +1,12 @@ package restic import ( + crand "crypto/rand" "encoding/json" "fmt" "io" "math/rand" - "restic/backend" "restic/pack" - "restic/repository" "testing" "time" @@ -17,21 +16,21 @@ import ( // fakeFile returns a reader which yields deterministic pseudo-random data. func fakeFile(t testing.TB, seed, size int64) io.Reader { - return io.LimitReader(repository.NewRandReader(rand.New(rand.NewSource(seed))), size) + return io.LimitReader(NewRandReader(rand.New(rand.NewSource(seed))), size) } type fakeFileSystem struct { t testing.TB - repo *repository.Repository - knownBlobs backend.IDSet + repo Repository + knownBlobs IDSet duplication float32 } // saveFile reads from rd and saves the blobs in the repository. The list of // IDs is returned. -func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) { - blobs = backend.IDs{} - ch := chunker.New(rd, fs.repo.Config.ChunkerPolynomial) +func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) { + blobs = IDs{} + ch := chunker.New(rd, fs.repo.Config().ChunkerPolynomial()) for { chunk, err := ch.Next(getBuf()) @@ -43,7 +42,7 @@ func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) { fs.t.Fatalf("unable to save chunk in repo: %v", err) } - id := backend.Hash(chunk.Data) + id := Hash(chunk.Data) if !fs.blobIsKnown(id, pack.Data) { _, err := fs.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id) if err != nil { @@ -66,20 +65,20 @@ const ( maxNodes = 32 ) -func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, backend.ID) { +func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, ID) { data, err := json.Marshal(tree) if err != nil { fs.t.Fatalf("json.Marshal(tree) returned error: %v", err) - return false, backend.ID{} + return false, ID{} } data = append(data, '\n') - id := backend.Hash(data) + id := Hash(data) return fs.blobIsKnown(id, pack.Tree), id } -func (fs fakeFileSystem) blobIsKnown(id backend.ID, t pack.BlobType) bool { +func (fs fakeFileSystem) blobIsKnown(id ID, t pack.BlobType) bool { if rand.Float32() < fs.duplication { return false } @@ -97,7 +96,7 @@ func (fs fakeFileSystem) blobIsKnown(id backend.ID, t pack.BlobType) bool { } // saveTree saves a tree of fake files in the repo and returns the ID. -func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID { +func (fs fakeFileSystem) saveTree(seed int64, depth int) ID { rnd := rand.NewSource(seed) numNodes := int(rnd.Int63() % maxNodes) @@ -151,7 +150,7 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID { // also used as the snapshot's timestamp. The tree's depth can be specified // with the parameter depth. The parameter duplication is a probability that // the same blob will saved again. -func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int, duplication float32) *Snapshot { +func TestCreateSnapshot(t testing.TB, repo Repository, at time.Time, depth int, duplication float32) *Snapshot { seed := at.Unix() t.Logf("create fake snapshot at %s with seed %d", at, seed) @@ -165,14 +164,14 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, fs := fakeFileSystem{ t: t, repo: repo, - knownBlobs: backend.NewIDSet(), + knownBlobs: NewIDSet(), duplication: duplication, } treeID := fs.saveTree(seed, depth) snapshot.Tree = &treeID - id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot) + id, err := repo.SaveJSONUnpacked(SnapshotFile, snapshot) if err != nil { t.Fatal(err) } @@ -194,24 +193,7 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, return snapshot } -// TestResetRepository removes all packs and indexes from the repository. -func TestResetRepository(t testing.TB, repo Repository) { - done := make(chan struct{}) - defer close(done) - - for _, tpe := range []FileType{SnapshotFile, IndexFile, DataFile} { - for id := range repo.Backend().List(tpe, done) { - err := repo.Backend().Remove(tpe, id) - if err != nil { - t.Errorf("removing %v (%v) failed: %v", id[0:12], tpe, err) - } - } - } - - repo.SetIndex(repository.NewMasterIndex()) -} - -// TestParseID parses s as a backend.ID and panics if that fails. +// TestParseID parses s as a ID and panics if that fails. func TestParseID(s string) ID { id, err := ParseID(s) if err != nil { @@ -220,3 +202,14 @@ func TestParseID(s string) ID { return id } + +// TestRandomID retuns a randomly generated ID. When reading from rand fails, +// the function panics. +func TestRandomID() ID { + id := ID{} + _, err := io.ReadFull(crand.Reader, id[:]) + if err != nil { + panic(err) + } + return id +} diff --git a/src/restic/testing_test.go b/src/restic/testing_test.go index 3c5ea5a6f..1258bf208 100644 --- a/src/restic/testing_test.go +++ b/src/restic/testing_test.go @@ -47,15 +47,3 @@ func TestCreateSnapshot(t *testing.T) { checker.TestCheckRepo(t, repo) } - -func BenchmarkCreateSnapshot(b *testing.B) { - repo, cleanup := repository.TestRepository(b) - defer cleanup() - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - restic.TestCreateSnapshot(b, repo, testSnapshotTime, testDepth, 0) - restic.TestResetRepository(b, repo) - } -} diff --git a/src/restic/tree.go b/src/restic/tree.go index 3da5cde22..6c1b77b07 100644 --- a/src/restic/tree.go +++ b/src/restic/tree.go @@ -6,7 +6,6 @@ import ( "github.com/pkg/errors" - "restic/backend" "restic/debug" "restic/pack" ) @@ -31,10 +30,10 @@ func (t Tree) String() string { } type TreeLoader interface { - LoadJSONPack(pack.BlobType, backend.ID, interface{}) error + LoadJSONPack(pack.BlobType, ID, interface{}) error } -func LoadTree(repo TreeLoader, id backend.ID) (*Tree, error) { +func LoadTree(repo TreeLoader, id ID) (*Tree, error) { tree := &Tree{} err := repo.LoadJSONPack(pack.Tree, id, tree) if err != nil { @@ -95,7 +94,7 @@ func (t Tree) Find(name string) (*Node, error) { } // Subtrees returns a slice of all subtree IDs of the tree. -func (t Tree) Subtrees() (trees backend.IDs) { +func (t Tree) Subtrees() (trees IDs) { for _, node := range t.Nodes { if node.FileType == "dir" && node.Subtree != nil { trees = append(trees, *node.Subtree) diff --git a/src/restic/walk.go b/src/restic/walk.go index a50438f7c..91bce8f61 100644 --- a/src/restic/walk.go +++ b/src/restic/walk.go @@ -6,7 +6,6 @@ import ( "path/filepath" "sync" - "restic/backend" "restic/debug" "restic/pack" ) @@ -35,7 +34,7 @@ func NewTreeWalker(ch chan<- loadTreeJob, out chan<- WalkTreeJob) *TreeWalker { // Walk starts walking the tree given by id. When the channel done is closed, // processing stops. -func (tw *TreeWalker) Walk(path string, id backend.ID, done chan struct{}) { +func (tw *TreeWalker) Walk(path string, id ID, done chan struct{}) { debug.Log("TreeWalker.Walk", "starting on tree %v for %v", id.Str(), path) defer debug.Log("TreeWalker.Walk", "done walking tree %v for %v", id.Str(), path) @@ -119,11 +118,11 @@ type loadTreeResult struct { } type loadTreeJob struct { - id backend.ID + id ID res chan<- loadTreeResult } -type treeLoader func(backend.ID) (*Tree, error) +type treeLoader func(ID) (*Tree, error) func loadTreeWorker(wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader, done <-chan struct{}) { debug.Log("loadTreeWorker", "start") @@ -162,10 +161,10 @@ const loadTreeWorkers = 10 // WalkTree walks the tree specified by id recursively and sends a job for each // file and directory it finds. When the channel done is closed, processing // stops. -func WalkTree(repo TreeLoader, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) { +func WalkTree(repo TreeLoader, id ID, done chan struct{}, jobCh chan<- WalkTreeJob) { debug.Log("WalkTree", "start on %v, start workers", id.Str()) - load := func(id backend.ID) (*Tree, error) { + load := func(id ID) (*Tree, error) { tree := &Tree{} err := repo.LoadJSONPack(pack.Tree, id, tree) if err != nil { From 51d8e6aa285dc7e1545758d3772e383996441d0d Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 31 Aug 2016 20:58:57 +0200 Subject: [PATCH 06/40] wip --- src/restic/blob.go | 79 ++++++++++++++------------ src/restic/{pack => }/blob_set.go | 18 +++--- src/restic/find.go | 10 ++-- src/restic/find_test.go | 21 ++++--- src/restic/list/list.go | 19 +++---- src/restic/node.go | 10 ++-- src/restic/pack/handle.go | 51 ----------------- src/restic/pack/pack.go | 64 +++------------------ src/restic/repository.go | 31 ++++------ src/restic/repository/index.go | 27 +++++---- src/restic/repository/index_rebuild.go | 2 +- src/restic/repository/index_test.go | 8 +-- src/restic/repository/key.go | 2 +- src/restic/repository/master_index.go | 8 +-- src/restic/repository/repack_test.go | 2 +- src/restic/repository/repository.go | 12 ++-- src/restic/snapshot.go | 15 ----- src/restic/snapshot_filter_test.go | 3 + src/restic/testing.go | 11 ++-- src/restic/tree.go | 5 +- src/restic/tree_test.go | 3 +- src/restic/walk.go | 3 +- src/restic/walk_test.go | 3 +- 23 files changed, 143 insertions(+), 264 deletions(-) rename src/restic/{pack => }/blob_set.go (81%) delete mode 100644 src/restic/pack/handle.go diff --git a/src/restic/blob.go b/src/restic/blob.go index e88e823fe..731dc8f6f 100644 --- a/src/restic/blob.go +++ b/src/restic/blob.go @@ -5,44 +5,18 @@ import ( "fmt" ) +// Blob is one part of a file or a tree. type Blob struct { - ID *ID `json:"id,omitempty"` - Size uint64 `json:"size,omitempty"` - Storage *ID `json:"sid,omitempty"` // encrypted ID - StorageSize uint64 `json:"ssize,omitempty"` // encrypted Size + Type BlobType + Length uint + ID ID + Offset uint } -type Blobs []Blob - -func (b Blob) Valid() bool { - if b.ID == nil || b.Storage == nil || b.StorageSize == 0 { - return false - } - - return true -} - -func (b Blob) String() string { - return fmt.Sprintf("Blob<%s (%d) -> %s (%d)>", - b.ID.Str(), b.Size, - b.Storage.Str(), b.StorageSize) -} - -// Compare compares two blobs by comparing the ID and the size. It returns -1, -// 0, or 1. -func (b Blob) Compare(other Blob) int { - if res := b.ID.Compare(*other.ID); res != 0 { - return res - } - - if b.Size < other.Size { - return -1 - } - if b.Size > other.Size { - return 1 - } - - return 0 +// PackedBlob is a blob stored within a file. +type PackedBlob struct { + Blob + PackID ID } // BlobHandle identifies a blob of a given type. @@ -101,3 +75,38 @@ func (t *BlobType) UnmarshalJSON(buf []byte) error { return nil } + +// BlobHandles is an ordered list of BlobHandles that implements sort.Interface. +type BlobHandles []BlobHandle + +func (h BlobHandles) Len() int { + return len(h) +} + +func (h BlobHandles) Less(i, j int) bool { + for k, b := range h[i].ID { + if b == h[j].ID[k] { + continue + } + + if b < h[j].ID[k] { + return true + } + + return false + } + + return h[i].Type < h[j].Type +} + +func (h BlobHandles) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h BlobHandles) String() string { + elements := make([]string, 0, len(h)) + for _, e := range h { + elements = append(elements, e.String()) + } + return fmt.Sprintf("%v", elements) +} diff --git a/src/restic/pack/blob_set.go b/src/restic/blob_set.go similarity index 81% rename from src/restic/pack/blob_set.go rename to src/restic/blob_set.go index 686ea9315..07e88fed0 100644 --- a/src/restic/pack/blob_set.go +++ b/src/restic/blob_set.go @@ -1,12 +1,12 @@ -package pack +package restic import "sort" // BlobSet is a set of blobs. -type BlobSet map[Handle]struct{} +type BlobSet map[BlobHandle]struct{} // NewBlobSet returns a new BlobSet, populated with ids. -func NewBlobSet(handles ...Handle) BlobSet { +func NewBlobSet(handles ...BlobHandle) BlobSet { m := make(BlobSet) for _, h := range handles { m[h] = struct{}{} @@ -16,18 +16,18 @@ func NewBlobSet(handles ...Handle) BlobSet { } // Has returns true iff id is contained in the set. -func (s BlobSet) Has(h Handle) bool { +func (s BlobSet) Has(h BlobHandle) bool { _, ok := s[h] return ok } // Insert adds id to the set. -func (s BlobSet) Insert(h Handle) { +func (s BlobSet) Insert(h BlobHandle) { s[h] = struct{}{} } // Delete removes id from the set. -func (s BlobSet) Delete(h Handle) { +func (s BlobSet) Delete(h BlobHandle) { delete(s, h) } @@ -87,9 +87,9 @@ func (s BlobSet) Sub(other BlobSet) (result BlobSet) { return result } -// List returns a slice of all Handles in the set. -func (s BlobSet) List() Handles { - list := make(Handles, 0, len(s)) +// List returns a sorted slice of all BlobHandle in the set. +func (s BlobSet) List() BlobHandles { + list := make(BlobHandles, 0, len(s)) for h := range s { list = append(list, h) } diff --git a/src/restic/find.go b/src/restic/find.go index 6a9ca0fdb..204a61a09 100644 --- a/src/restic/find.go +++ b/src/restic/find.go @@ -1,12 +1,10 @@ package restic -import "restic/pack" - // FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data // blobs) to the set blobs. The tree blobs in the `seen` BlobSet will not be visited // again. -func FindUsedBlobs(repo Repository, treeID ID, blobs pack.BlobSet, seen pack.BlobSet) error { - blobs.Insert(pack.Handle{ID: treeID, Type: pack.Tree}) +func FindUsedBlobs(repo Repository, treeID ID, blobs BlobSet, seen BlobSet) error { + blobs.Insert(BlobHandle{ID: treeID, Type: TreeBlob}) tree, err := LoadTree(repo, treeID) if err != nil { @@ -17,11 +15,11 @@ func FindUsedBlobs(repo Repository, treeID ID, blobs pack.BlobSet, seen pack.Blo switch node.FileType { case "file": for _, blob := range node.Content { - blobs.Insert(pack.Handle{ID: blob, Type: pack.Data}) + blobs.Insert(BlobHandle{ID: blob, Type: DataBlob}) } case "dir": subtreeID := *node.Subtree - h := pack.Handle{ID: subtreeID, Type: pack.Tree} + h := BlobHandle{ID: subtreeID, Type: TreeBlob} if seen.Has(h) { continue } diff --git a/src/restic/find_test.go b/src/restic/find_test.go index 9a9a85e51..a9ffdffa0 100644 --- a/src/restic/find_test.go +++ b/src/restic/find_test.go @@ -12,22 +12,21 @@ import ( "testing" "time" - "restic/pack" "restic/repository" ) -func loadIDSet(t testing.TB, filename string) pack.BlobSet { +func loadIDSet(t testing.TB, filename string) BlobSet { f, err := os.Open(filename) if err != nil { t.Logf("unable to open golden file %v: %v", filename, err) - return pack.NewBlobSet() + return NewBlobSet() } sc := bufio.NewScanner(f) - blobs := pack.NewBlobSet() + blobs := NewBlobSet() for sc.Scan() { - var h pack.Handle + var h Handle err := json.Unmarshal([]byte(sc.Text()), &h) if err != nil { t.Errorf("file %v contained invalid blob: %#v", filename, err) @@ -44,14 +43,14 @@ func loadIDSet(t testing.TB, filename string) pack.BlobSet { return blobs } -func saveIDSet(t testing.TB, filename string, s pack.BlobSet) { +func saveIDSet(t testing.TB, filename string, s BlobSet) { f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644) if err != nil { t.Fatalf("unable to update golden file %v: %v", filename, err) return } - var hs pack.Handles + var hs Handles for h := range s { hs = append(hs, h) } @@ -92,8 +91,8 @@ func TestFindUsedBlobs(t *testing.T) { } for i, sn := range snapshots { - usedBlobs := pack.NewBlobSet() - err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, pack.NewBlobSet()) + usedBlobs := NewBlobSet() + err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, NewBlobSet()) if err != nil { t.Errorf("FindUsedBlobs returned error: %v", err) continue @@ -127,8 +126,8 @@ func BenchmarkFindUsedBlobs(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - seen := pack.NewBlobSet() - blobs := pack.NewBlobSet() + seen := NewBlobSet() + blobs := NewBlobSet() err := restic.FindUsedBlobs(repo, *sn.Tree, blobs, seen) if err != nil { b.Error(err) diff --git a/src/restic/list/list.go b/src/restic/list/list.go index e3a14798f..18bfb606f 100644 --- a/src/restic/list/list.go +++ b/src/restic/list/list.go @@ -1,8 +1,7 @@ package list import ( - "restic/backend" - "restic/pack" + "restic" "restic/worker" ) @@ -10,19 +9,19 @@ const listPackWorkers = 10 // Lister combines lists packs in a repo and blobs in a pack. type Lister interface { - List(backend.Type, <-chan struct{}) <-chan backend.ID - ListPack(backend.ID) ([]pack.Blob, int64, error) + List(restic.FileType, <-chan struct{}) <-chan restic.ID + ListPack(restic.ID) ([]restic.Blob, int64, error) } // Result is returned in the channel from LoadBlobsFromAllPacks. type Result struct { - packID backend.ID + packID restic.ID size int64 - entries []pack.Blob + entries []restic.Blob } // PackID returns the pack ID of this result. -func (l Result) PackID() backend.ID { +func (l Result) PackID() restic.ID { return l.packID } @@ -32,14 +31,14 @@ func (l Result) Size() int64 { } // Entries returns a list of all blobs saved in the pack. -func (l Result) Entries() []pack.Blob { +func (l Result) Entries() []restic.Blob { return l.entries } // AllPacks sends the contents of all packs to ch. func AllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) { f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { - packID := job.Data.(backend.ID) + packID := job.Data.(restic.ID) entries, size, err := repo.ListPack(packID) return Result{ @@ -54,7 +53,7 @@ func AllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) { go func() { defer close(jobCh) - for id := range repo.List(backend.Data, done) { + for id := range repo.List(restic.DataFile, done) { select { case jobCh <- worker.Job{Data: id}: case <-done: diff --git a/src/restic/node.go b/src/restic/node.go index 60ce53b56..b7f6fbc03 100644 --- a/src/restic/node.go +++ b/src/restic/node.go @@ -16,7 +16,6 @@ import ( "restic/debug" "restic/fs" - "restic/pack" ) // Node is a file, directory or other item in a backup. @@ -43,9 +42,8 @@ type Node struct { tree *Tree - path string - err error - blobs Blobs + path string + err error } func (node Node) String() string { @@ -210,7 +208,7 @@ func (node Node) createFileAt(path string, repo Repository) error { var buf []byte for _, id := range node.Content { - size, err := repo.LookupBlobSize(id, pack.Data) + size, err := repo.LookupBlobSize(id, DataBlob) if err != nil { return err } @@ -220,7 +218,7 @@ func (node Node) createFileAt(path string, repo Repository) error { buf = make([]byte, size) } - buf, err := repo.LoadBlob(id, pack.Data, buf) + buf, err := repo.LoadBlob(id, DataBlob, buf) if err != nil { return err } diff --git a/src/restic/pack/handle.go b/src/restic/pack/handle.go deleted file mode 100644 index 9a0ce58f6..000000000 --- a/src/restic/pack/handle.go +++ /dev/null @@ -1,51 +0,0 @@ -package pack - -import ( - "fmt" - "restic/backend" -) - -// Handle identifies a blob of a given type. -type Handle struct { - ID backend.ID - Type BlobType -} - -func (h Handle) String() string { - return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str()) -} - -// Handles is an ordered list of Handles that implements sort.Interface. -type Handles []Handle - -func (h Handles) Len() int { - return len(h) -} - -func (h Handles) Less(i, j int) bool { - for k, b := range h[i].ID { - if b == h[j].ID[k] { - continue - } - - if b < h[j].ID[k] { - return true - } - - return false - } - - return h[i].Type < h[j].Type -} - -func (h Handles) Swap(i, j int) { - h[i], h[j] = h[j], h[i] -} - -func (h Handles) String() string { - elements := make([]string, 0, len(h)) - for _, e := range h { - elements = append(elements, e.String()) - } - return fmt.Sprintf("%v", elements) -} diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index 3fb7206a5..34e2b442b 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "fmt" "io" + "restic" "sync" "github.com/pkg/errors" @@ -13,58 +14,11 @@ import ( "restic/crypto" ) -// BlobType specifies what a blob stored in a pack is. -type BlobType uint8 - -// These are the blob types that can be stored in a pack. -const ( - Invalid BlobType = iota - Data - Tree -) - -func (t BlobType) String() string { - switch t { - case Data: - return "data" - case Tree: - return "tree" - } - - return fmt.Sprintf("", t) -} - -// MarshalJSON encodes the BlobType into JSON. -func (t BlobType) MarshalJSON() ([]byte, error) { - switch t { - case Data: - return []byte(`"data"`), nil - case Tree: - return []byte(`"tree"`), nil - } - - return nil, errors.New("unknown blob type") -} - -// UnmarshalJSON decodes the BlobType from JSON. -func (t *BlobType) UnmarshalJSON(buf []byte) error { - switch string(buf) { - case `"data"`: - *t = Data - case `"tree"`: - *t = Tree - default: - return errors.New("unknown blob type") - } - - return nil -} - // Blob is a blob within a pack. type Blob struct { - Type BlobType + Type restic.BlobType Length uint - ID backend.ID + ID restic.ID Offset uint } @@ -95,7 +49,7 @@ func NewPacker(k *crypto.Key, wr io.Writer) *Packer { // Add saves the data read from rd as a new blob to the packer. Returned is the // number of bytes written to the pack. -func (p *Packer) Add(t BlobType, id backend.ID, data []byte) (int, error) { +func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error) { p.m.Lock() defer p.m.Unlock() @@ -110,7 +64,7 @@ func (p *Packer) Add(t BlobType, id backend.ID, data []byte) (int, error) { return n, errors.Wrap(err, "Write") } -var entrySize = uint(binary.Size(BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize) +var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize) // headerEntry is used with encoding/binary to read and write header entries type headerEntry struct { @@ -177,9 +131,9 @@ func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) { } switch b.Type { - case Data: + case restic.DataBlob: entry.Type = 0 - case Tree: + case restic.TreeBlob: entry.Type = 1 default: return 0, errors.Errorf("invalid blob type %v", b.Type) @@ -312,9 +266,9 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error) switch e.Type { case 0: - entry.Type = Data + entry.Type = restic.DataBlob case 1: - entry.Type = Tree + entry.Type = restic.TreeBlob default: return nil, errors.Errorf("invalid type %d", e.Type) } diff --git a/src/restic/repository.go b/src/restic/repository.go index 6dd5c2c75..82f6ee99e 100644 --- a/src/restic/repository.go +++ b/src/restic/repository.go @@ -1,10 +1,6 @@ package restic -import ( - "restic/pack" - - "github.com/restic/chunker" -) +import "github.com/restic/chunker" // Repository stores data in a backend. It provides high-level functions and // transparently encrypts/decrypts data. @@ -18,38 +14,33 @@ type Repository interface { Index() Index SaveFullIndex() error - SaveJSON(pack.BlobType, interface{}) (ID, error) + SaveJSON(BlobType, interface{}) (ID, error) Config() Config - SaveAndEncrypt(pack.BlobType, []byte, *ID) (ID, error) + SaveAndEncrypt(BlobType, []byte, *ID) (ID, error) SaveJSONUnpacked(FileType, interface{}) (ID, error) SaveIndex() error - LoadJSONPack(pack.BlobType, ID, interface{}) error + LoadJSONPack(BlobType, ID, interface{}) error LoadJSONUnpacked(FileType, ID, interface{}) error - LoadBlob(ID, pack.BlobType, []byte) ([]byte, error) + LoadBlob(ID, BlobType, []byte) ([]byte, error) - LookupBlobSize(ID, pack.BlobType) (uint, error) + LookupBlobSize(ID, BlobType) (uint, error) List(FileType, <-chan struct{}) <-chan ID + ListPack(ID) ([]Blob, int64, error) Flush() error } +// Index keeps track of the blobs are stored within files. type Index interface { - Has(ID, pack.BlobType) bool - Lookup(ID, pack.BlobType) ([]PackedBlob, error) + Has(ID, BlobType) bool + Lookup(ID, BlobType) ([]PackedBlob, error) } +// Config stores information about the repository. type Config interface { ChunkerPolynomial() chunker.Pol } - -type PackedBlob interface { - Type() pack.BlobType - Length() uint - ID() ID - Offset() uint - PackID() ID -} diff --git a/src/restic/repository/index.go b/src/restic/repository/index.go index a04f1b40b..9cd8967f0 100644 --- a/src/restic/repository/index.go +++ b/src/restic/repository/index.go @@ -13,13 +13,12 @@ import ( "restic/crypto" "restic/debug" - "restic/pack" ) // Index holds a lookup table for id -> pack. type Index struct { m sync.Mutex - pack map[pack.Handle][]indexEntry + pack map[restic.BlobHandle][]indexEntry final bool // set to true for all indexes read from the backend ("finalized") id restic.ID // set to the ID of the index when it's finalized @@ -36,7 +35,7 @@ type indexEntry struct { // NewIndex returns a new index. func NewIndex() *Index { return &Index{ - pack: make(map[pack.Handle][]indexEntry), + pack: make(map[restic.BlobHandle][]indexEntry), created: time.Now(), } } @@ -47,7 +46,7 @@ func (idx *Index) store(blob PackedBlob) { offset: blob.Offset, length: blob.Length, } - h := pack.Handle{ID: blob.ID, Type: blob.Type} + h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} idx.pack[h] = append(idx.pack[h], newEntry) } @@ -112,11 +111,11 @@ func (idx *Index) Store(blob PackedBlob) { } // Lookup queries the index for the blob ID and returns a PackedBlob. -func (idx *Index) Lookup(id restic.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) { +func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob, err error) { idx.m.Lock() defer idx.m.Unlock() - h := pack.Handle{ID: id, Type: tpe} + h := restic.BlobHandle{ID: id, Type: tpe} if packs, ok := idx.pack[h]; ok { blobs = make([]PackedBlob, 0, len(packs)) @@ -166,7 +165,7 @@ func (idx *Index) ListPack(id restic.ID) (list []PackedBlob) { } // Has returns true iff the id is listed in the index. -func (idx *Index) Has(id restic.ID, tpe pack.BlobType) bool { +func (idx *Index) Has(id restic.ID, tpe restic.BlobType) bool { _, err := idx.Lookup(id, tpe) if err == nil { return true @@ -177,7 +176,7 @@ func (idx *Index) Has(id restic.ID, tpe pack.BlobType) bool { // LookupSize returns the length of the cleartext content behind the // given id -func (idx *Index) LookupSize(id restic.ID, tpe pack.BlobType) (cleartextLength uint, err error) { +func (idx *Index) LookupSize(id restic.ID, tpe restic.BlobType) (cleartextLength uint, err error) { blobs, err := idx.Lookup(id, tpe) if err != nil { return 0, err @@ -207,7 +206,7 @@ func (idx *Index) AddToSupersedes(ids ...restic.ID) error { // PackedBlob is a blob already saved within a pack. type PackedBlob struct { - Type pack.BlobType + Type restic.BlobType Length uint ID restic.ID Offset uint @@ -274,7 +273,7 @@ func (idx *Index) Packs() restic.IDSet { } // Count returns the number of blobs of type t in the index. -func (idx *Index) Count(t pack.BlobType) (n uint) { +func (idx *Index) Count(t restic.BlobType) (n uint) { debug.Log("Index.Count", "counting blobs of type %v", t) idx.m.Lock() defer idx.m.Unlock() @@ -305,10 +304,10 @@ type packJSON struct { } type blobJSON struct { - ID restic.ID `json:"id"` - Type pack.BlobType `json:"type"` - Offset uint `json:"offset"` - Length uint `json:"length"` + ID restic.ID `json:"id"` + Type restic.BlobType `json:"type"` + Offset uint `json:"offset"` + Length uint `json:"length"` } // generatePackList returns a list of packs. diff --git a/src/restic/repository/index_rebuild.go b/src/restic/repository/index_rebuild.go index ba9321900..01fb78a9e 100644 --- a/src/restic/repository/index_rebuild.go +++ b/src/restic/repository/index_rebuild.go @@ -12,7 +12,7 @@ import ( // RebuildIndex lists all packs in the repo, writes a new index and removes all // old indexes. This operation should only be done with an exclusive lock in // place. -func RebuildIndex(repo *Repository) error { +func RebuildIndex(repo restic.Repository) error { debug.Log("RebuildIndex", "start rebuilding index") done := make(chan struct{}) diff --git a/src/restic/repository/index_test.go b/src/restic/repository/index_test.go index be33422b6..6b45d5990 100644 --- a/src/restic/repository/index_test.go +++ b/src/restic/repository/index_test.go @@ -15,7 +15,7 @@ func TestIndexSerialize(t *testing.T) { type testEntry struct { id restic.ID pack restic.ID - tpe pack.BlobType + tpe restic.BlobType offset, length uint } tests := []testEntry{} @@ -251,7 +251,7 @@ var docOldExample = []byte(` var exampleTests = []struct { id, packID restic.ID - tpe pack.BlobType + tpe restic.BlobType offset, length uint }{ { @@ -271,10 +271,10 @@ var exampleTests = []struct { var exampleLookupTest = struct { packID restic.ID - blobs map[restic.ID]pack.BlobType + blobs map[restic.ID]restic.BlobType }{ ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), - map[restic.ID]pack.BlobType{ + map[restic.ID]restic.BlobType{ ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): pack.Data, ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): pack.Tree, ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): pack.Data, diff --git a/src/restic/repository/key.go b/src/restic/repository/key.go index 792c97d08..b30d40ebf 100644 --- a/src/restic/repository/key.go +++ b/src/restic/repository/key.go @@ -143,7 +143,7 @@ func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) { // LoadKey loads a key from the backend. func LoadKey(s *Repository, name string) (k *Key, err error) { - h := restic.Handle{Type: backend.Key, Name: name} + h := restic.Handle{FileType: restic.KeyFile, Name: name} data, err := backend.LoadAll(s.be, h, nil) if err != nil { return nil, err diff --git a/src/restic/repository/master_index.go b/src/restic/repository/master_index.go index adf28eea8..50a4a9e03 100644 --- a/src/restic/repository/master_index.go +++ b/src/restic/repository/master_index.go @@ -22,7 +22,7 @@ func NewMasterIndex() *MasterIndex { } // Lookup queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) Lookup(id restic.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) { +func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob, err error) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -42,7 +42,7 @@ func (mi *MasterIndex) Lookup(id restic.ID, tpe pack.BlobType) (blobs []PackedBl } // LookupSize queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) LookupSize(id restic.ID, tpe pack.BlobType) (uint, error) { +func (mi *MasterIndex) LookupSize(id restic.ID, tpe restic.BlobType) (uint, error) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -73,7 +73,7 @@ func (mi *MasterIndex) ListPack(id restic.ID) (list []PackedBlob) { } // Has queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) Has(id restic.ID, tpe pack.BlobType) bool { +func (mi *MasterIndex) Has(id restic.ID, tpe restic.BlobType) bool { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -87,7 +87,7 @@ func (mi *MasterIndex) Has(id restic.ID, tpe pack.BlobType) bool { } // Count returns the number of blobs of type t in the index. -func (mi *MasterIndex) Count(t pack.BlobType) (n uint) { +func (mi *MasterIndex) Count(t restic.BlobType) (n uint) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() diff --git a/src/restic/repository/repack_test.go b/src/restic/repository/repack_test.go index f729eed91..804cf77f1 100644 --- a/src/restic/repository/repack_test.go +++ b/src/restic/repository/repack_test.go @@ -27,7 +27,7 @@ func random(t testing.TB, length int) []byte { func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pData float32) { for i := 0; i < blobs; i++ { var ( - tpe pack.BlobType + tpe restic.BlobType length int ) diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index 78061ea73..55e1871a7 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -79,7 +79,7 @@ func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, er // LoadBlob tries to load and decrypt content identified by t and id from a // pack from the backend, the result is stored in plaintextBuf, which must be // large enough to hold the complete blob. -func (r *Repository) LoadBlob(id restic.ID, t pack.BlobType, plaintextBuf []byte) ([]byte, error) { +func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []byte) ([]byte, error) { debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str()) // lookup plaintext size of blob @@ -174,7 +174,7 @@ func (r *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item inte // LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the // data and afterwards call json.Unmarshal on the item. -func (r *Repository) LoadJSONPack(t pack.BlobType, id restic.ID, item interface{}) (err error) { +func (r *Repository) LoadJSONPack(t restic.BlobType, id restic.ID, item interface{}) (err error) { buf, err := r.LoadBlob(id, t, nil) if err != nil { return err @@ -184,13 +184,13 @@ func (r *Repository) LoadJSONPack(t pack.BlobType, id restic.ID, item interface{ } // LookupBlobSize returns the size of blob id. -func (r *Repository) LookupBlobSize(id restic.ID, tpe pack.BlobType) (uint, error) { +func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, error) { return r.idx.LookupSize(id, tpe) } // SaveAndEncrypt encrypts data and stores it to the backend as type t. If data // is small enough, it will be packed together with other small blobs. -func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *restic.ID) (restic.ID, error) { +func (r *Repository) SaveAndEncrypt(t restic.BlobType, data []byte, id *restic.ID) (restic.ID, error) { if id == nil { // compute plaintext hash hashedID := restic.Hash(data) @@ -235,7 +235,7 @@ func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *restic.ID) // SaveJSON serialises item as JSON and encrypts and saves it in a pack in the // backend as type t. -func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (restic.ID, error) { +func (r *Repository) SaveJSON(t restic.BlobType, item interface{}) (restic.ID, error) { debug.Log("Repo.SaveJSON", "save %v blob", t) buf := getBuf()[:0] defer freeBuf(buf) @@ -319,7 +319,7 @@ func (r *Repository) SetIndex(i *MasterIndex) { } // SaveIndex saves an index in the repository. -func SaveIndex(repo *Repository, index *Index) (restic.ID, error) { +func SaveIndex(repo restic.Repository, index *Index) (restic.ID, error) { buf := bytes.NewBuffer(nil) err := index.Finalize(buf) diff --git a/src/restic/snapshot.go b/src/restic/snapshot.go index 82a0f60f5..a81dfb82c 100644 --- a/src/restic/snapshot.go +++ b/src/restic/snapshot.go @@ -8,8 +8,6 @@ import ( "time" "github.com/pkg/errors" - - "restic/backend" ) // Snapshot is the state of a resource at one point in time. @@ -155,16 +153,3 @@ func FindLatestSnapshot(repo Repository, targets []string, source string) (ID, e return latestID, nil } - -// FindSnapshot takes a string and tries to find a snapshot whose ID matches -// the string as closely as possible. -func FindSnapshot(repo Repository, s string) (ID, error) { - - // find snapshot id with prefix - name, err := backend.Find(repo.Backend(), SnapshotFile, s) - if err != nil { - return ID{}, err - } - - return ParseID(name) -} diff --git a/src/restic/snapshot_filter_test.go b/src/restic/snapshot_filter_test.go index 07d2e106d..a9c2e9fae 100644 --- a/src/restic/snapshot_filter_test.go +++ b/src/restic/snapshot_filter_test.go @@ -2,6 +2,7 @@ package restic import ( "encoding/json" + "flag" "fmt" "io/ioutil" "path/filepath" @@ -11,6 +12,8 @@ import ( "time" ) +var updateGoldenFiles = flag.Bool("update", false, "update golden files in testdata/") + func parseTime(s string) time.Time { t, err := time.Parse("2006-01-02 15:04:05", s) if err != nil { diff --git a/src/restic/testing.go b/src/restic/testing.go index a10e36ff9..68b6b2592 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "math/rand" - "restic/pack" "testing" "time" @@ -43,8 +42,8 @@ func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) { } id := Hash(chunk.Data) - if !fs.blobIsKnown(id, pack.Data) { - _, err := fs.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id) + if !fs.blobIsKnown(id, DataBlob) { + _, err := fs.repo.SaveAndEncrypt(DataBlob, chunk.Data, &id) if err != nil { fs.t.Fatalf("error saving chunk: %v", err) } @@ -74,11 +73,11 @@ func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, ID) { data = append(data, '\n') id := Hash(data) - return fs.blobIsKnown(id, pack.Tree), id + return fs.blobIsKnown(id, TreeBlob), id } -func (fs fakeFileSystem) blobIsKnown(id ID, t pack.BlobType) bool { +func (fs fakeFileSystem) blobIsKnown(id ID, t BlobType) bool { if rand.Float32() < fs.duplication { return false } @@ -137,7 +136,7 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) ID { return id } - id, err := fs.repo.SaveJSON(pack.Tree, tree) + id, err := fs.repo.SaveJSON(TreeBlob, tree) if err != nil { fs.t.Fatal(err) } diff --git a/src/restic/tree.go b/src/restic/tree.go index 6c1b77b07..17b1d3403 100644 --- a/src/restic/tree.go +++ b/src/restic/tree.go @@ -7,7 +7,6 @@ import ( "github.com/pkg/errors" "restic/debug" - "restic/pack" ) type Tree struct { @@ -30,12 +29,12 @@ func (t Tree) String() string { } type TreeLoader interface { - LoadJSONPack(pack.BlobType, ID, interface{}) error + LoadJSONPack(BlobType, ID, interface{}) error } func LoadTree(repo TreeLoader, id ID) (*Tree, error) { tree := &Tree{} - err := repo.LoadJSONPack(pack.Tree, id, tree) + err := repo.LoadJSONPack(TreeBlob, id, tree) if err != nil { return nil, err } diff --git a/src/restic/tree_test.go b/src/restic/tree_test.go index 2f85819fb..2241419f4 100644 --- a/src/restic/tree_test.go +++ b/src/restic/tree_test.go @@ -8,7 +8,6 @@ import ( "testing" "restic" - "restic/pack" . "restic/test" ) @@ -98,7 +97,7 @@ func TestLoadTree(t *testing.T) { // save tree tree := restic.NewTree() - id, err := repo.SaveJSON(pack.Tree, tree) + id, err := repo.SaveJSON(TreeBlob, tree) OK(t, err) // save packs diff --git a/src/restic/walk.go b/src/restic/walk.go index 91bce8f61..8c5e52391 100644 --- a/src/restic/walk.go +++ b/src/restic/walk.go @@ -7,7 +7,6 @@ import ( "sync" "restic/debug" - "restic/pack" ) // WalkTreeJob is a job sent from the tree walker. @@ -166,7 +165,7 @@ func WalkTree(repo TreeLoader, id ID, done chan struct{}, jobCh chan<- WalkTreeJ load := func(id ID) (*Tree, error) { tree := &Tree{} - err := repo.LoadJSONPack(pack.Tree, id, tree) + err := repo.LoadJSONPack(TreeBlob, id, tree) if err != nil { return nil, err } diff --git a/src/restic/walk_test.go b/src/restic/walk_test.go index cce0e2300..c359c0ec9 100644 --- a/src/restic/walk_test.go +++ b/src/restic/walk_test.go @@ -9,7 +9,6 @@ import ( "restic" "restic/backend" - "restic/pack" "restic/pipe" "restic/repository" . "restic/test" @@ -95,7 +94,7 @@ type delayRepo struct { delay time.Duration } -func (d delayRepo) LoadJSONPack(t pack.BlobType, id backend.ID, dst interface{}) error { +func (d delayRepo) LoadJSONPack(t BlobType, id backend.ID, dst interface{}) error { time.Sleep(d.delay) return d.repo.LoadJSONPack(t, id, dst) } From cc6a8b6e15357fae3d294f169ee6eadf69a6485b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 31 Aug 2016 22:39:36 +0200 Subject: [PATCH 07/40] wip --- src/restic/archiver/archive_reader.go | 53 ++-- src/restic/archiver/archive_reader_test.go | 18 +- src/restic/archiver/archiver.go | 165 ++++++----- .../archiver/archiver_duplication_test.go | 6 +- src/restic/archiver/archiver_int_test.go | 2 +- src/restic/archiver/archiver_test.go | 24 +- src/restic/archiver/buffer_pool.go | 21 ++ src/restic/backend/generic_test.go | 13 +- src/restic/backend/local/local.go | 47 ++-- src/restic/backend/mem/mem_backend.go | 40 +-- src/restic/backend/rest/rest.go | 37 +-- src/restic/backend/rest/rest_path_test.go | 22 +- src/restic/backend/rest/rest_test.go | 3 +- src/restic/backend/s3/s3.go | 52 ++-- src/restic/backend/s3/s3_test.go | 2 +- src/restic/backend/sftp/sftp.go | 43 +-- src/restic/backend/test/tests.go | 47 ++-- src/restic/backend/utils.go | 3 +- src/restic/backend/utils_test.go | 12 +- src/restic/backend_find.go | 70 +++++ src/restic/checker/checker.go | 98 +++---- src/restic/checker/checker_test.go | 10 +- src/restic/{repository => }/config.go | 9 +- src/restic/{repository => }/config_test.go | 13 +- src/restic/find_test.go | 26 +- src/restic/fuse/snapshot.go | 2 +- src/restic/index/index.go | 6 +- src/restic/index/index_test.go | 8 +- src/restic/mock/backend.go | 6 +- src/restic/node.go | 16 +- src/restic/node_test.go | 9 +- src/restic/pack/pack.go | 28 +- src/restic/pack/pack_test.go | 7 +- src/restic/{backend => }/readerat.go | 2 +- src/restic/repository.go | 15 +- src/restic/repository/index.go | 98 +++---- src/restic/repository/index_rebuild.go | 7 +- src/restic/repository/key.go | 6 +- src/restic/repository/master_index.go | 5 +- src/restic/repository/packer_manager.go | 14 +- src/restic/repository/parallel.go | 11 +- src/restic/repository/repack.go | 6 +- src/restic/repository/repository.go | 38 +-- src/restic/repository/testing.go | 2 +- src/restic/snapshot_filter_test.go | 260 +++++++++--------- src/restic/test/backend.go | 6 +- src/restic/testing.go | 2 +- src/restic/tree_test.go | 2 +- src/restic/types/repository.go | 7 +- src/restic/walk_test.go | 10 +- 50 files changed, 741 insertions(+), 668 deletions(-) create mode 100644 src/restic/archiver/buffer_pool.go create mode 100644 src/restic/backend_find.go rename src/restic/{repository => }/config.go (90%) rename src/restic/{repository => }/config_test.go (81%) rename src/restic/{backend => }/readerat.go (95%) diff --git a/src/restic/archiver/archive_reader.go b/src/restic/archiver/archive_reader.go index 88b0ba3fa..1f835c202 100644 --- a/src/restic/archiver/archive_reader.go +++ b/src/restic/archiver/archive_reader.go @@ -1,10 +1,10 @@ -package restic +package archiver import ( "encoding/json" "io" + "restic" "restic/debug" - "restic/pack" "time" "github.com/pkg/errors" @@ -12,37 +12,37 @@ import ( ) // saveTreeJSON stores a tree in the repository. -func saveTreeJSON(repo Repository, item interface{}) (ID, error) { +func saveTreeJSON(repo restic.Repository, item interface{}) (restic.ID, error) { data, err := json.Marshal(item) if err != nil { - return ID{}, errors.Wrap(err, "") + return restic.ID{}, errors.Wrap(err, "") } data = append(data, '\n') // check if tree has been saved before - id := Hash(data) - if repo.Index().Has(id, pack.Tree) { + id := restic.Hash(data) + if repo.Index().Has(id, restic.TreeBlob) { return id, nil } - return repo.SaveJSON(pack.Tree, item) + return repo.SaveJSON(restic.TreeBlob, item) } // ArchiveReader reads from the reader and archives the data. Returned is the // resulting snapshot and its ID. -func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Snapshot, ID, error) { +func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, name string) (*restic.Snapshot, restic.ID, error) { debug.Log("ArchiveReader", "start archiving %s", name) - sn, err := NewSnapshot([]string{name}) + sn, err := restic.NewSnapshot([]string{name}) if err != nil { - return nil, ID{}, err + return nil, restic.ID{}, err } p.Start() defer p.Done() - chnker := chunker.New(rd, repo.Config().ChunkerPolynomial()) + chnker := chunker.New(rd, repo.Config().ChunkerPolynomial) - var ids IDs + var ids restic.IDs var fileSize uint64 for { @@ -52,15 +52,15 @@ func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Sn } if err != nil { - return nil, ID{}, errors.Wrap(err, "chunker.Next()") + return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()") } - id := Hash(chunk.Data) + id := restic.Hash(chunk.Data) - if !repo.Index().Has(id, pack.Data) { - _, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil) + if !repo.Index().Has(id, restic.DataBlob) { + _, err := repo.SaveAndEncrypt(restic.DataBlob, chunk.Data, nil) if err != nil { - return nil, ID{}, err + return nil, restic.ID{}, err } debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length) } else { @@ -71,13 +71,13 @@ func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Sn ids = append(ids, id) - p.Report(Stat{Bytes: uint64(chunk.Length)}) + p.Report(restic.Stat{Bytes: uint64(chunk.Length)}) fileSize += uint64(chunk.Length) } - tree := &Tree{ - Nodes: []*Node{ - &Node{ + tree := &restic.Tree{ + Nodes: []*restic.Node{ + &restic.Node{ Name: name, AccessTime: time.Now(), ModTime: time.Now(), @@ -94,27 +94,26 @@ func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Sn treeID, err := saveTreeJSON(repo, tree) if err != nil { - return nil, ID{}, err + return nil, restic.ID{}, err } sn.Tree = &treeID debug.Log("ArchiveReader", "tree saved as %v", treeID.Str()) - id, err := repo.SaveJSONUnpacked(SnapshotFile, sn) + id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn) if err != nil { - return nil, ID{}, err + return nil, restic.ID{}, err } - sn.id = &id debug.Log("ArchiveReader", "snapshot saved as %v", id.Str()) err = repo.Flush() if err != nil { - return nil, ID{}, err + return nil, restic.ID{}, err } err = repo.SaveIndex() if err != nil { - return nil, ID{}, err + return nil, restic.ID{}, err } return sn, id, nil diff --git a/src/restic/archiver/archive_reader_test.go b/src/restic/archiver/archive_reader_test.go index 2d5b705db..da46f030a 100644 --- a/src/restic/archiver/archive_reader_test.go +++ b/src/restic/archiver/archive_reader_test.go @@ -1,19 +1,18 @@ -package restic +package archiver import ( "bytes" "io" "math/rand" - "restic/backend" - "restic/pack" + "restic" "restic/repository" "testing" "github.com/restic/chunker" ) -func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []byte) []byte { - buf, err := repo.LoadBlob(id, pack.Data, buf) +func loadBlob(t *testing.T, repo *repository.Repository, id restic.ID, buf []byte) []byte { + buf, err := repo.LoadBlob(id, restic.DataBlob, buf) if err != nil { t.Fatalf("LoadBlob(%v) returned error %v", id, err) } @@ -21,8 +20,8 @@ func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []by return buf } -func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID, name string, rd io.Reader) { - tree, err := LoadTree(repo, treeID) +func checkSavedFile(t *testing.T, repo *repository.Repository, treeID restic.ID, name string, rd io.Reader) { + tree, err := restic.LoadTree(repo, treeID) if err != nil { t.Fatalf("LoadTree() returned error %v", err) } @@ -58,6 +57,11 @@ func checkSavedFile(t *testing.T, repo *repository.Repository, treeID backend.ID } } +// fakeFile returns a reader which yields deterministic pseudo-random data. +func fakeFile(t testing.TB, seed, size int64) io.Reader { + return io.LimitReader(restic.NewRandReader(rand.New(rand.NewSource(seed))), size) +} + func TestArchiveReader(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() diff --git a/src/restic/archiver/archiver.go b/src/restic/archiver/archiver.go index a9d652e76..b4e201871 100644 --- a/src/restic/archiver/archiver.go +++ b/src/restic/archiver/archiver.go @@ -1,4 +1,4 @@ -package restic +package archiver import ( "encoding/json" @@ -6,6 +6,7 @@ import ( "io" "os" "path/filepath" + "restic" "sort" "sync" "time" @@ -14,7 +15,6 @@ import ( "restic/debug" "restic/fs" - "restic/pack" "restic/pipe" "github.com/restic/chunker" @@ -30,9 +30,9 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true } // Archiver is used to backup a set of directories. type Archiver struct { - repo Repository + repo restic.Repository knownBlobs struct { - IDSet + restic.IDSet sync.Mutex } @@ -43,16 +43,16 @@ type Archiver struct { Excludes []string } -// NewArchiver returns a new archiver. -func NewArchiver(repo Repository) *Archiver { +// New returns a new archiver. +func New(repo restic.Repository) *Archiver { arch := &Archiver{ repo: repo, blobToken: make(chan struct{}, maxConcurrentBlobs), knownBlobs: struct { - IDSet + restic.IDSet sync.Mutex }{ - IDSet: NewIDSet(), + IDSet: restic.NewIDSet(), }, } @@ -70,7 +70,7 @@ func NewArchiver(repo Repository) *Archiver { // When the blob is not known, false is returned and the blob is added to the // list. This means that the caller false is returned to is responsible to save // the blob to the backend. -func (arch *Archiver) isKnownBlob(id ID, t pack.BlobType) bool { +func (arch *Archiver) isKnownBlob(id restic.ID, t restic.BlobType) bool { arch.knownBlobs.Lock() defer arch.knownBlobs.Unlock() @@ -89,10 +89,10 @@ func (arch *Archiver) isKnownBlob(id ID, t pack.BlobType) bool { } // Save stores a blob read from rd in the repository. -func (arch *Archiver) Save(t pack.BlobType, data []byte, id ID) error { +func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error { debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str()) - if arch.isKnownBlob(id, pack.Data) { + if arch.isKnownBlob(id, restic.DataBlob) { debug.Log("Archiver.Save", "blob %v is known\n", id.Str()) return nil } @@ -108,40 +108,40 @@ func (arch *Archiver) Save(t pack.BlobType, data []byte, id ID) error { } // SaveTreeJSON stores a tree in the repository. -func (arch *Archiver) SaveTreeJSON(item interface{}) (ID, error) { +func (arch *Archiver) SaveTreeJSON(item interface{}) (restic.ID, error) { data, err := json.Marshal(item) if err != nil { - return ID{}, errors.Wrap(err, "Marshal") + return restic.ID{}, errors.Wrap(err, "Marshal") } data = append(data, '\n') // check if tree has been saved before - id := Hash(data) - if arch.isKnownBlob(id, pack.Tree) { + id := restic.Hash(data) + if arch.isKnownBlob(id, restic.TreeBlob) { return id, nil } - return arch.repo.SaveJSON(pack.Tree, item) + return arch.repo.SaveJSON(restic.TreeBlob, item) } -func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, error) { +func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*restic.Node, error) { fi, err := file.Stat() if err != nil { - return nil, errors.Wrap(err, "Stat") + return nil, errors.Wrap(err, "restic.Stat") } if fi.ModTime() == node.ModTime { return node, nil } - err = arch.Error(node.path, fi, errors.New("file has changed")) + err = arch.Error(node.Path, fi, errors.New("file has changed")) if err != nil { return nil, err } - node, err = NodeFromFileInfo(node.path, fi) + node, err = restic.NodeFromFileInfo(node.Path, fi) if err != nil { - debug.Log("Archiver.SaveFile", "NodeFromFileInfo returned error for %v: %v", node.path, err) + debug.Log("Archiver.SaveFile", "restic.NodeFromFileInfo returned error for %v: %v", node.Path, err) return nil, err } @@ -149,21 +149,21 @@ func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, erro } type saveResult struct { - id ID + id restic.ID bytes uint64 } -func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) { +func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) { defer freeBuf(chunk.Data) - id := Hash(chunk.Data) - err := arch.Save(pack.Data, chunk.Data, id) + id := restic.Hash(chunk.Data) + err := arch.Save(restic.DataBlob, chunk.Data, id) // TODO handle error if err != nil { panic(err) } - p.Report(Stat{Bytes: uint64(chunk.Length)}) + p.Report(restic.Stat{Bytes: uint64(chunk.Length)}) arch.blobToken <- token resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)} } @@ -182,11 +182,11 @@ func waitForResults(resultChannels [](<-chan saveResult)) ([]saveResult, error) return results, nil } -func updateNodeContent(node *Node, results []saveResult) error { - debug.Log("Archiver.Save", "checking size for file %s", node.path) +func updateNodeContent(node *restic.Node, results []saveResult) error { + debug.Log("Archiver.Save", "checking size for file %s", node.Path) var bytes uint64 - node.Content = make([]ID, len(results)) + node.Content = make([]restic.ID, len(results)) for i, b := range results { node.Content[i] = b.id @@ -196,18 +196,18 @@ func updateNodeContent(node *Node, results []saveResult) error { } if bytes != node.Size { - return errors.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.path, bytes, node.Size) + return errors.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.Path, bytes, node.Size) } - debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.path, len(results)) + debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.Path, len(results)) return nil } // SaveFile stores the content of the file on the backend as a Blob by calling // Save for each chunk. -func (arch *Archiver) SaveFile(p *Progress, node *Node) error { - file, err := fs.Open(node.path) +func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) error { + file, err := fs.Open(node.Path) defer file.Close() if err != nil { return errors.Wrap(err, "Open") @@ -218,7 +218,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error { return err } - chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial()) + chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial) resultChannels := [](<-chan saveResult){} for { @@ -245,7 +245,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error { return err } -func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, entCh <-chan pipe.Entry) { +func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, entCh <-chan pipe.Entry) { defer func() { debug.Log("Archiver.fileWorker", "done") wg.Done() @@ -267,16 +267,16 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error()) // ignore this file e.Result() <- nil - p.Report(Stat{Errors: 1}) + p.Report(restic.Stat{Errors: 1}) continue } - node, err := NodeFromFileInfo(e.Fullpath(), e.Info()) + node, err := restic.NodeFromFileInfo(e.Fullpath(), e.Info()) if err != nil { // TODO: integrate error reporting - debug.Log("Archiver.fileWorker", "NodeFromFileInfo returned error for %v: %v", node.path, err) + debug.Log("Archiver.fileWorker", "restic.NodeFromFileInfo returned error for %v: %v", node.Path, err) e.Result() <- nil - p.Report(Stat{Errors: 1}) + p.Report(restic.Stat{Errors: 1}) continue } @@ -284,12 +284,12 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st if e.Node != nil { debug.Log("Archiver.fileWorker", " %v use old data", e.Path()) - oldNode := e.Node.(*Node) + oldNode := e.Node.(*restic.Node) // check if all content is still available in the repository contentMissing := false - for _, blob := range oldNode.blobs { - if ok, err := arch.repo.Backend().Test(DataFile, blob.Storage.String()); !ok || err != nil { - debug.Log("Archiver.fileWorker", " %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str()) + for _, blob := range oldNode.Content { + if !arch.repo.Index().Has(blob, restic.DataBlob) { + debug.Log("Archiver.fileWorker", " %v not using old data, %v is missing", e.Path(), blob.Str()) contentMissing = true break } @@ -297,7 +297,6 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st if !contentMissing { node.Content = oldNode.Content - node.blobs = oldNode.blobs debug.Log("Archiver.fileWorker", " %v content is complete", e.Path()) } } else { @@ -310,20 +309,20 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st err = arch.SaveFile(p, node) if err != nil { // TODO: integrate error reporting - fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.path, err) + fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.Path, err) // ignore this file e.Result() <- nil - p.Report(Stat{Errors: 1}) + p.Report(restic.Stat{Errors: 1}) continue } } else { // report old data size - p.Report(Stat{Bytes: node.Size}) + p.Report(restic.Stat{Bytes: node.Size}) } - debug.Log("Archiver.fileWorker", " processed %v, %d/%d blobs", e.Path(), len(node.Content), len(node.blobs)) + debug.Log("Archiver.fileWorker", " processed %v, %d blobs", e.Path(), len(node.Content)) e.Result() <- node - p.Report(Stat{Files: 1}) + p.Report(restic.Stat{Files: 1}) case <-done: // pipeline was cancelled return @@ -331,7 +330,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st } } -func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) { +func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) { debug.Log("Archiver.dirWorker", "start") defer func() { debug.Log("Archiver.dirWorker", "done") @@ -350,11 +349,11 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str if dir.Error() != nil { fmt.Fprintf(os.Stderr, "error walking dir %v: %v\n", dir.Path(), dir.Error()) dir.Result() <- nil - p.Report(Stat{Errors: 1}) + p.Report(restic.Stat{Errors: 1}) continue } - tree := NewTree() + tree := restic.NewTree() // wait for all content for _, ch := range dir.Entries { @@ -369,22 +368,22 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str } // else insert node - node := res.(*Node) + node := res.(*restic.Node) tree.Insert(node) if node.FileType == "dir" { - debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.path, node.Subtree) + debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.Path, node.Subtree) if node.Subtree.IsNull() { - panic("invalid null subtree ID") + panic("invalid null subtree restic.ID") } } } - node := &Node{} + node := &restic.Node{} if dir.Path() != "" && dir.Info() != nil { - n, err := NodeFromFileInfo(dir.Path(), dir.Info()) + n, err := restic.NodeFromFileInfo(dir.Path(), dir.Info()) if err != nil { n.Error = err.Error() dir.Result() <- n @@ -403,7 +402,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str } debug.Log("Archiver.dirWorker", "save tree for %s: %v", dir.Path(), id.Str()) if id.IsNull() { - panic("invalid null subtree ID return from SaveTreeJSON()") + panic("invalid null subtree restic.ID return from SaveTreeJSON()") } node.Subtree = &id @@ -412,7 +411,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str dir.Result() <- node if dir.Path() != "" { - p.Report(Stat{Dirs: 1}) + p.Report(restic.Stat{Dirs: 1}) } case <-done: // pipeline was cancelled @@ -422,7 +421,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str } type archivePipe struct { - Old <-chan WalkTreeJob + Old <-chan restic.WalkTreeJob New <-chan pipe.Job } @@ -457,7 +456,7 @@ func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) { type archiveJob struct { hasOld bool - old WalkTreeJob + old restic.WalkTreeJob new pipe.Job } @@ -471,7 +470,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) { var ( loadOld, loadNew bool = true, true ok bool - oldJob WalkTreeJob + oldJob restic.WalkTreeJob newJob pipe.Job ) @@ -565,7 +564,7 @@ func (j archiveJob) Copy() pipe.Job { } // if file is newer, return the new job - if j.old.Node.isNewer(j.new.Fullpath(), j.new.Info()) { + if j.old.Node.IsNewer(j.new.Fullpath(), j.new.Info()) { debug.Log("archiveJob.Copy", " job %v is newer", j.new.Path()) return j.new } @@ -630,10 +629,10 @@ func (p baseNameSlice) Len() int { return len(p) } func (p baseNameSlice) Less(i, j int) bool { return filepath.Base(p[i]) < filepath.Base(p[j]) } func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -// Snapshot creates a snapshot of the given paths. If parentID is set, this is +// Snapshot creates a snapshot of the given paths. If parentrestic.ID is set, this is // used to compare the files to the ones archived at the time this snapshot was // taken. -func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snapshot, ID, error) { +func (arch *Archiver) Snapshot(p *restic.Progress, paths []string, parentID *restic.ID) (*restic.Snapshot, restic.ID, error) { paths = unique(paths) sort.Sort(baseNameSlice(paths)) @@ -649,9 +648,9 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snap defer p.Done() // create new snapshot - sn, err := NewSnapshot(paths) + sn, err := restic.NewSnapshot(paths) if err != nil { - return nil, ID{}, err + return nil, restic.ID{}, err } sn.Excludes = arch.Excludes @@ -662,18 +661,18 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snap sn.Parent = parentID // load parent snapshot - parent, err := LoadSnapshot(arch.repo, *parentID) + parent, err := restic.LoadSnapshot(arch.repo, *parentID) if err != nil { - return nil, ID{}, err + return nil, restic.ID{}, err } // start walker on old tree - ch := make(chan WalkTreeJob) - go WalkTree(arch.repo, *parent.Tree, done, ch) + ch := make(chan restic.WalkTreeJob) + go restic.WalkTree(arch.repo, *parent.Tree, done, ch) jobs.Old = ch } else { // use closed channel - ch := make(chan WalkTreeJob) + ch := make(chan restic.WalkTreeJob) close(ch) jobs.Old = ch } @@ -728,31 +727,29 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snap debug.Log("Archiver.Snapshot", "workers terminated") // receive the top-level tree - root := (<-resCh).(*Node) + root := (<-resCh).(*restic.Node) debug.Log("Archiver.Snapshot", "root node received: %v", root.Subtree.Str()) sn.Tree = root.Subtree // save snapshot - id, err := arch.repo.SaveJSONUnpacked(SnapshotFile, sn) + id, err := arch.repo.SaveJSONUnpacked(restic.SnapshotFile, sn) if err != nil { - return nil, ID{}, err + return nil, restic.ID{}, err } - // store ID in snapshot struct - sn.id = &id debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str()) // flush repository err = arch.repo.Flush() if err != nil { - return nil, ID{}, err + return nil, restic.ID{}, err } // save index err = arch.repo.SaveIndex() if err != nil { debug.Log("Archiver.Snapshot", "error saving index: %v", err) - return nil, ID{}, err + return nil, restic.ID{}, err } debug.Log("Archiver.Snapshot", "saved indexes") @@ -768,13 +765,13 @@ func isRegularFile(fi os.FileInfo) bool { return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 } -// Scan traverses the dirs to collect Stat information while emitting progress +// Scan traverses the dirs to collect restic.Stat information while emitting progress // information with p. -func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) { +func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Stat, error) { p.Start() defer p.Done() - var stat Stat + var stat restic.Stat for _, dir := range dirs { debug.Log("Scan", "Start for %v", dir) @@ -797,7 +794,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) { return nil } - s := Stat{} + s := restic.Stat{} if fi.IsDir() { s.Dirs++ } else { @@ -817,7 +814,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) { debug.Log("Scan", "Done for %v, err: %v", dir, err) if err != nil { - return Stat{}, errors.Wrap(err, "fs.Walk") + return restic.Stat{}, errors.Wrap(err, "fs.Walk") } } diff --git a/src/restic/archiver/archiver_duplication_test.go b/src/restic/archiver/archiver_duplication_test.go index 52afb6bd6..37bed0830 100644 --- a/src/restic/archiver/archiver_duplication_test.go +++ b/src/restic/archiver/archiver_duplication_test.go @@ -1,4 +1,4 @@ -package restic_test +package archiver_test import ( "crypto/rand" @@ -103,13 +103,13 @@ func testArchiverDuplication(t *testing.T) { id := randomID() - if repo.Index().Has(id, pack.Data) { + if repo.Index().Has(id, restic.DataBlob) { continue } buf := make([]byte, 50) - err := arch.Save(pack.Data, buf, id) + err := arch.Save(restic.DataBlob, buf, id) if err != nil { t.Fatal(err) } diff --git a/src/restic/archiver/archiver_int_test.go b/src/restic/archiver/archiver_int_test.go index 9e4426889..a35410672 100644 --- a/src/restic/archiver/archiver_int_test.go +++ b/src/restic/archiver/archiver_int_test.go @@ -1,4 +1,4 @@ -package restic +package archiver import ( "os" diff --git a/src/restic/archiver/archiver_test.go b/src/restic/archiver/archiver_test.go index 7f211c618..57903931a 100644 --- a/src/restic/archiver/archiver_test.go +++ b/src/restic/archiver/archiver_test.go @@ -1,4 +1,4 @@ -package restic_test +package archiver_test import ( "bytes" @@ -146,9 +146,9 @@ func archiveWithDedup(t testing.TB) { t.Logf("archived snapshot %v", sn.ID().Str()) // get archive stats - cnt.before.packs = repo.Count(backend.Data) - cnt.before.dataBlobs = repo.Index().Count(pack.Data) - cnt.before.treeBlobs = repo.Index().Count(pack.Tree) + cnt.before.packs = repo.Count(restic.DataFile) + cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob) + cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob) t.Logf("packs %v, data blobs %v, tree blobs %v", cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs) @@ -157,9 +157,9 @@ func archiveWithDedup(t testing.TB) { t.Logf("archived snapshot %v", sn2.ID().Str()) // get archive stats again - cnt.after.packs = repo.Count(backend.Data) - cnt.after.dataBlobs = repo.Index().Count(pack.Data) - cnt.after.treeBlobs = repo.Index().Count(pack.Tree) + cnt.after.packs = repo.Count(restic.DataFile) + cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob) + cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob) t.Logf("packs %v, data blobs %v, tree blobs %v", cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs) @@ -174,9 +174,9 @@ func archiveWithDedup(t testing.TB) { t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str()) // get archive stats again - cnt.after2.packs = repo.Count(backend.Data) - cnt.after2.dataBlobs = repo.Index().Count(pack.Data) - cnt.after2.treeBlobs = repo.Index().Count(pack.Tree) + cnt.after2.packs = repo.Count(restic.DataFile) + cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob) + cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob) t.Logf("packs %v, data blobs %v, tree blobs %v", cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs) @@ -210,7 +210,7 @@ func BenchmarkLoadTree(t *testing.B) { for _, idx := range repo.Index().All() { for blob := range idx.Each(done) { - if blob.Type != pack.Tree { + if blob.Type != restic.TreeBlob { continue } @@ -267,7 +267,7 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) { id := backend.Hash(c.Data) time.Sleep(time.Duration(id[0])) - err := arch.Save(pack.Data, c.Data, id) + err := arch.Save(restic.DataBlob, c.Data, id) <-barrier errChan <- err }(c, errChan) diff --git a/src/restic/archiver/buffer_pool.go b/src/restic/archiver/buffer_pool.go new file mode 100644 index 000000000..32df5ab7b --- /dev/null +++ b/src/restic/archiver/buffer_pool.go @@ -0,0 +1,21 @@ +package archiver + +import ( + "sync" + + "github.com/restic/chunker" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return make([]byte, chunker.MinSize) + }, +} + +func getBuf() []byte { + return bufPool.Get().([]byte) +} + +func freeBuf(data []byte) { + bufPool.Put(data) +} diff --git a/src/restic/backend/generic_test.go b/src/restic/backend/generic_test.go index 7c42e2859..20fedc997 100644 --- a/src/restic/backend/generic_test.go +++ b/src/restic/backend/generic_test.go @@ -1,6 +1,7 @@ package backend_test import ( + "restic" "testing" "restic/backend" @@ -8,10 +9,10 @@ import ( ) type mockBackend struct { - list func(backend.Type, <-chan struct{}) <-chan string + list func(restic.FileType, <-chan struct{}) <-chan string } -func (m mockBackend) List(t backend.Type, done <-chan struct{}) <-chan string { +func (m mockBackend) List(t restic.FileType, done <-chan struct{}) <-chan string { return m.list(t, done) } @@ -30,7 +31,7 @@ func TestPrefixLength(t *testing.T) { list := samples m := mockBackend{} - m.list = func(t backend.Type, done <-chan struct{}) <-chan string { + m.list = func(t restic.FileType, done <-chan struct{}) <-chan string { ch := make(chan string) go func() { defer close(ch) @@ -45,17 +46,17 @@ func TestPrefixLength(t *testing.T) { return ch } - l, err := backend.PrefixLength(m, backend.Snapshot) + l, err := backend.PrefixLength(m, restic.SnapshotFile) OK(t, err) Equals(t, 19, l) list = samples[:3] - l, err = backend.PrefixLength(m, backend.Snapshot) + l, err = backend.PrefixLength(m, restic.SnapshotFile) OK(t, err) Equals(t, 19, l) list = samples[3:] - l, err = backend.PrefixLength(m, backend.Snapshot) + l, err = backend.PrefixLength(m, restic.SnapshotFile) OK(t, err) Equals(t, 8, l) } diff --git a/src/restic/backend/local/local.go b/src/restic/backend/local/local.go index c51ee949b..4ac5c0fa7 100644 --- a/src/restic/backend/local/local.go +++ b/src/restic/backend/local/local.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "os" "path/filepath" + "restic" "github.com/pkg/errors" @@ -18,6 +19,8 @@ type Local struct { p string } +var _ restic.Backend = &Local{} + func paths(dir string) []string { return []string{ dir, @@ -69,8 +72,8 @@ func (b *Local) Location() string { } // Construct path for given Type and name. -func filename(base string, t backend.Type, name string) string { - if t == backend.Config { +func filename(base string, t restic.FileType, name string) string { + if t == restic.ConfigFile { return filepath.Join(base, "config") } @@ -78,21 +81,21 @@ func filename(base string, t backend.Type, name string) string { } // Construct directory for given Type. -func dirname(base string, t backend.Type, name string) string { +func dirname(base string, t restic.FileType, name string) string { var n string switch t { - case backend.Data: + case restic.DataFile: n = backend.Paths.Data if len(name) > 2 { n = filepath.Join(n, name[:2]) } - case backend.Snapshot: + case restic.SnapshotFile: n = backend.Paths.Snapshots - case backend.Index: + case restic.IndexFile: n = backend.Paths.Index - case backend.Lock: + case restic.LockFile: n = backend.Paths.Locks - case backend.Key: + case restic.KeyFile: n = backend.Paths.Keys } return filepath.Join(base, n) @@ -102,13 +105,13 @@ func dirname(base string, t backend.Type, name string) string { // saves it in p. Load has the same semantics as io.ReaderAt, with one // exception: when off is lower than zero, it is treated as an offset relative // to the end of the file. -func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) { +func (b *Local) Load(h restic.Handle, p []byte, off int64) (n int, err error) { debug.Log("backend.local.Load", "Load %v, length %v at %v", h, len(p), off) if err := h.Valid(); err != nil { return 0, err } - f, err := fs.Open(filename(b.p, h.Type, h.Name)) + f, err := fs.Open(filename(b.p, h.FileType, h.Name)) if err != nil { return 0, errors.Wrap(err, "Open") } @@ -168,7 +171,7 @@ func writeToTempfile(tempdir string, p []byte) (filename string, err error) { } // Save stores data in the backend at the handle. -func (b *Local) Save(h backend.Handle, p []byte) (err error) { +func (b *Local) Save(h restic.Handle, p []byte) (err error) { debug.Log("backend.local.Save", "Save %v, length %v", h, len(p)) if err := h.Valid(); err != nil { return err @@ -180,7 +183,7 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) { return err } - filename := filename(b.p, h.Type, h.Name) + filename := filename(b.p, h.FileType, h.Name) // test if new path already exists if _, err := fs.Stat(filename); err == nil { @@ -188,7 +191,7 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) { } // create directories if necessary, ignore errors - if h.Type == backend.Data { + if h.FileType == restic.DataFile { err = fs.MkdirAll(filepath.Dir(filename), backend.Modes.Dir) if err != nil { return errors.Wrap(err, "MkdirAll") @@ -213,22 +216,22 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) { } // Stat returns information about a blob. -func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) { +func (b *Local) Stat(h restic.Handle) (restic.FileInfo, error) { debug.Log("backend.local.Stat", "Stat %v", h) if err := h.Valid(); err != nil { - return backend.BlobInfo{}, err + return restic.FileInfo{}, err } - fi, err := fs.Stat(filename(b.p, h.Type, h.Name)) + fi, err := fs.Stat(filename(b.p, h.FileType, h.Name)) if err != nil { - return backend.BlobInfo{}, errors.Wrap(err, "Stat") + return restic.FileInfo{}, errors.Wrap(err, "Stat") } - return backend.BlobInfo{Size: fi.Size()}, nil + return restic.FileInfo{Size: fi.Size()}, nil } // Test returns true if a blob of the given type and name exists in the backend. -func (b *Local) Test(t backend.Type, name string) (bool, error) { +func (b *Local) Test(t restic.FileType, name string) (bool, error) { debug.Log("backend.local.Test", "Test %v %v", t, name) _, err := fs.Stat(filename(b.p, t, name)) if err != nil { @@ -242,7 +245,7 @@ func (b *Local) Test(t backend.Type, name string) (bool, error) { } // Remove removes the blob with the given name and type. -func (b *Local) Remove(t backend.Type, name string) error { +func (b *Local) Remove(t restic.FileType, name string) error { debug.Log("backend.local.Remove", "Remove %v %v", t, name) fn := filename(b.p, t, name) @@ -317,10 +320,10 @@ func listDirs(dir string) (filenames []string, err error) { // List returns a channel that yields all names of blobs of type t. A // goroutine is started for this. If the channel done is closed, sending // stops. -func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { +func (b *Local) List(t restic.FileType, done <-chan struct{}) <-chan string { debug.Log("backend.local.List", "List %v", t) lister := listDir - if t == backend.Data { + if t == restic.DataFile { lister = listDirs } diff --git a/src/restic/backend/mem/mem_backend.go b/src/restic/backend/mem/mem_backend.go index 339d86c5d..a2fbc8150 100644 --- a/src/restic/backend/mem/mem_backend.go +++ b/src/restic/backend/mem/mem_backend.go @@ -2,23 +2,23 @@ package mem import ( "io" + "restic" "sync" "github.com/pkg/errors" - "restic/backend" "restic/debug" ) type entry struct { - Type backend.Type + Type restic.FileType Name string } type memMap map[entry][]byte // make sure that MemoryBackend implements backend.Backend -var _ backend.Backend = &MemoryBackend{} +var _ restic.Backend = &MemoryBackend{} // MemoryBackend is a mock backend that uses a map for storing all data in // memory. This should only be used for tests. @@ -39,7 +39,7 @@ func New() *MemoryBackend { } // Test returns whether a file exists. -func (be *MemoryBackend) Test(t backend.Type, name string) (bool, error) { +func (be *MemoryBackend) Test(t restic.FileType, name string) (bool, error) { be.m.Lock() defer be.m.Unlock() @@ -53,7 +53,7 @@ func (be *MemoryBackend) Test(t backend.Type, name string) (bool, error) { } // Load reads data from the backend. -func (be *MemoryBackend) Load(h backend.Handle, p []byte, off int64) (int, error) { +func (be *MemoryBackend) Load(h restic.Handle, p []byte, off int64) (int, error) { if err := h.Valid(); err != nil { return 0, err } @@ -61,17 +61,17 @@ func (be *MemoryBackend) Load(h backend.Handle, p []byte, off int64) (int, error be.m.Lock() defer be.m.Unlock() - if h.Type == backend.Config { + if h.FileType == restic.ConfigFile { h.Name = "" } debug.Log("MemoryBackend.Load", "get %v offset %v len %v", h, off, len(p)) - if _, ok := be.data[entry{h.Type, h.Name}]; !ok { + if _, ok := be.data[entry{h.FileType, h.Name}]; !ok { return 0, errors.New("no such data") } - buf := be.data[entry{h.Type, h.Name}] + buf := be.data[entry{h.FileType, h.Name}] switch { case off > int64(len(buf)): return 0, errors.New("offset beyond end of file") @@ -93,7 +93,7 @@ func (be *MemoryBackend) Load(h backend.Handle, p []byte, off int64) (int, error } // Save adds new Data to the backend. -func (be *MemoryBackend) Save(h backend.Handle, p []byte) error { +func (be *MemoryBackend) Save(h restic.Handle, p []byte) error { if err := h.Valid(); err != nil { return err } @@ -101,47 +101,47 @@ func (be *MemoryBackend) Save(h backend.Handle, p []byte) error { be.m.Lock() defer be.m.Unlock() - if h.Type == backend.Config { + if h.FileType == restic.ConfigFile { h.Name = "" } - if _, ok := be.data[entry{h.Type, h.Name}]; ok { + if _, ok := be.data[entry{h.FileType, h.Name}]; ok { return errors.New("file already exists") } debug.Log("MemoryBackend.Save", "save %v bytes at %v", len(p), h) buf := make([]byte, len(p)) copy(buf, p) - be.data[entry{h.Type, h.Name}] = buf + be.data[entry{h.FileType, h.Name}] = buf return nil } // Stat returns information about a file in the backend. -func (be *MemoryBackend) Stat(h backend.Handle) (backend.BlobInfo, error) { +func (be *MemoryBackend) Stat(h restic.Handle) (restic.FileInfo, error) { be.m.Lock() defer be.m.Unlock() if err := h.Valid(); err != nil { - return backend.BlobInfo{}, err + return restic.FileInfo{}, err } - if h.Type == backend.Config { + if h.FileType == restic.ConfigFile { h.Name = "" } debug.Log("MemoryBackend.Stat", "stat %v", h) - e, ok := be.data[entry{h.Type, h.Name}] + e, ok := be.data[entry{h.FileType, h.Name}] if !ok { - return backend.BlobInfo{}, errors.New("no such data") + return restic.FileInfo{}, errors.New("no such data") } - return backend.BlobInfo{Size: int64(len(e))}, nil + return restic.FileInfo{Size: int64(len(e))}, nil } // Remove deletes a file from the backend. -func (be *MemoryBackend) Remove(t backend.Type, name string) error { +func (be *MemoryBackend) Remove(t restic.FileType, name string) error { be.m.Lock() defer be.m.Unlock() @@ -157,7 +157,7 @@ func (be *MemoryBackend) Remove(t backend.Type, name string) error { } // List returns a channel which yields entries from the backend. -func (be *MemoryBackend) List(t backend.Type, done <-chan struct{}) <-chan string { +func (be *MemoryBackend) List(t restic.FileType, done <-chan struct{}) <-chan string { be.m.Lock() defer be.m.Unlock() diff --git a/src/restic/backend/rest/rest.go b/src/restic/backend/rest/rest.go index a98fd5a80..5e756579a 100644 --- a/src/restic/backend/rest/rest.go +++ b/src/restic/backend/rest/rest.go @@ -8,6 +8,7 @@ import ( "net/http" "net/url" "path" + "restic" "strings" "github.com/pkg/errors" @@ -18,27 +19,27 @@ import ( const connLimit = 10 // restPath returns the path to the given resource. -func restPath(url *url.URL, h backend.Handle) string { +func restPath(url *url.URL, h restic.Handle) string { u := *url var dir string - switch h.Type { - case backend.Config: + switch h.FileType { + case restic.ConfigFile: dir = "" h.Name = "config" - case backend.Data: + case restic.DataFile: dir = backend.Paths.Data - case backend.Snapshot: + case restic.SnapshotFile: dir = backend.Paths.Snapshots - case backend.Index: + case restic.IndexFile: dir = backend.Paths.Index - case backend.Lock: + case restic.LockFile: dir = backend.Paths.Locks - case backend.Key: + case restic.KeyFile: dir = backend.Paths.Keys default: - dir = string(h.Type) + dir = string(h.FileType) } u.Path = path.Join(url.Path, dir, h.Name) @@ -71,7 +72,7 @@ func (b *restBackend) Location() string { // Load returns the data stored in the backend for h at the given offset // and saves it in p. Load has the same semantics as io.ReaderAt. -func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err error) { +func (b *restBackend) Load(h restic.Handle, p []byte, off int64) (n int, err error) { if err := h.Valid(); err != nil { return 0, err } @@ -120,7 +121,7 @@ func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err er } // Save stores data in the backend at the handle. -func (b *restBackend) Save(h backend.Handle, p []byte) (err error) { +func (b *restBackend) Save(h restic.Handle, p []byte) (err error) { if err := h.Valid(); err != nil { return err } @@ -151,7 +152,7 @@ func (b *restBackend) Save(h backend.Handle, p []byte) (err error) { } // Stat returns information about a blob. -func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) { +func (b *restBackend) Stat(h restic.Handle) (backend.BlobInfo, error) { if err := h.Valid(); err != nil { return backend.BlobInfo{}, err } @@ -183,8 +184,8 @@ func (b *restBackend) Stat(h backend.Handle) (backend.BlobInfo, error) { } // Test returns true if a blob of the given type and name exists in the backend. -func (b *restBackend) Test(t backend.Type, name string) (bool, error) { - _, err := b.Stat(backend.Handle{Type: t, Name: name}) +func (b *restBackend) Test(t restic.FileType, name string) (bool, error) { + _, err := b.Stat(restic.Handle{FileType: t, Name: name}) if err != nil { return false, nil } @@ -193,8 +194,8 @@ func (b *restBackend) Test(t backend.Type, name string) (bool, error) { } // Remove removes the blob with the given name and type. -func (b *restBackend) Remove(t backend.Type, name string) error { - h := backend.Handle{Type: t, Name: name} +func (b *restBackend) Remove(t restic.FileType, name string) error { + h := restic.Handle{FileType: t, Name: name} if err := h.Valid(); err != nil { return err } @@ -221,10 +222,10 @@ func (b *restBackend) Remove(t backend.Type, name string) error { // List returns a channel that yields all names of blobs of type t. A // goroutine is started for this. If the channel done is closed, sending // stops. -func (b *restBackend) List(t backend.Type, done <-chan struct{}) <-chan string { +func (b *restBackend) List(t restic.FileType, done <-chan struct{}) <-chan string { ch := make(chan string) - url := restPath(b.url, backend.Handle{Type: t}) + url := restPath(b.url, restic.Handle{FileType: t}) if !strings.HasSuffix(url, "/") { url += "/" } diff --git a/src/restic/backend/rest/rest_path_test.go b/src/restic/backend/rest/rest_path_test.go index 285240cac..8542e42e2 100644 --- a/src/restic/backend/rest/rest_path_test.go +++ b/src/restic/backend/rest/rest_path_test.go @@ -2,36 +2,36 @@ package rest import ( "net/url" - "restic/backend" + "restic" "testing" ) var restPathTests = []struct { - Handle backend.Handle + Handle restic.Handle URL *url.URL Result string }{ { URL: parseURL("https://hostname.foo"), - Handle: backend.Handle{ - Type: backend.Data, - Name: "foobar", + Handle: restic.Handle{ + FileType: restic.DataFile, + Name: "foobar", }, Result: "https://hostname.foo/data/foobar", }, { URL: parseURL("https://hostname.foo:1234/prefix/repo"), - Handle: backend.Handle{ - Type: backend.Lock, - Name: "foobar", + Handle: restic.Handle{ + FileType: restic.LockFile, + Name: "foobar", }, Result: "https://hostname.foo:1234/prefix/repo/locks/foobar", }, { URL: parseURL("https://hostname.foo:1234/prefix/repo"), - Handle: backend.Handle{ - Type: backend.Config, - Name: "foobar", + Handle: restic.Handle{ + FileType: restic.ConfigFile, + Name: "foobar", }, Result: "https://hostname.foo:1234/prefix/repo/config", }, diff --git a/src/restic/backend/rest/rest_test.go b/src/restic/backend/rest/rest_test.go index 4e77cf612..206f7c18a 100644 --- a/src/restic/backend/rest/rest_test.go +++ b/src/restic/backend/rest/rest_test.go @@ -4,6 +4,7 @@ import ( "fmt" "net/url" "os" + "restic" "github.com/pkg/errors" @@ -37,7 +38,7 @@ func init() { return nil, err } - exists, err := be.Test(backend.Config, "") + exists, err := be.Test(restic.ConfigFile, "") if err != nil { return nil, err } diff --git a/src/restic/backend/s3/s3.go b/src/restic/backend/s3/s3.go index 835a7c485..67dbeeb91 100644 --- a/src/restic/backend/s3/s3.go +++ b/src/restic/backend/s3/s3.go @@ -3,13 +3,13 @@ package s3 import ( "bytes" "io" + "restic" "strings" "github.com/pkg/errors" "github.com/minio/minio-go" - "restic/backend" "restic/debug" ) @@ -25,7 +25,7 @@ type s3 struct { // Open opens the S3 backend at bucket and region. The bucket is created if it // does not exist yet. -func Open(cfg Config) (backend.Backend, error) { +func Open(cfg Config) (restic.Backend, error) { debug.Log("s3.Open", "open, config %#v", cfg) client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP) @@ -53,7 +53,7 @@ func Open(cfg Config) (backend.Backend, error) { return be, nil } -func (be *s3) s3path(t backend.Type, name string) string { +func (be *s3) s3path(t restic.FileType, name string) string { var path string if be.prefix != "" { @@ -61,7 +61,7 @@ func (be *s3) s3path(t backend.Type, name string) string { } path += string(t) - if t == backend.Config { + if t == restic.ConfigFile { return path } return path + "/" + name @@ -81,11 +81,11 @@ func (be *s3) Location() string { // Load returns the data stored in the backend for h at the given offset // and saves it in p. Load has the same semantics as io.ReaderAt. -func (be s3) Load(h backend.Handle, p []byte, off int64) (n int, err error) { +func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) { var obj *minio.Object debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p)) - path := be.s3path(h.Type, h.Name) + path := be.s3path(h.FileType, h.Name) <-be.connChan defer func() { @@ -153,14 +153,14 @@ func (be s3) Load(h backend.Handle, p []byte, off int64) (n int, err error) { } // Save stores data in the backend at the handle. -func (be s3) Save(h backend.Handle, p []byte) (err error) { +func (be s3) Save(h restic.Handle, p []byte) (err error) { if err := h.Valid(); err != nil { return err } debug.Log("s3.Save", "%v with %d bytes", h, len(p)) - path := be.s3path(h.Type, h.Name) + path := be.s3path(h.FileType, h.Name) // Check key does not already exist _, err = be.client.StatObject(be.bucketname, path) @@ -183,16 +183,16 @@ func (be s3) Save(h backend.Handle, p []byte) (err error) { } // Stat returns information about a blob. -func (be s3) Stat(h backend.Handle) (bi backend.BlobInfo, err error) { +func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) { debug.Log("s3.Stat", "%v", h) - path := be.s3path(h.Type, h.Name) + path := be.s3path(h.FileType, h.Name) var obj *minio.Object obj, err = be.client.GetObject(be.bucketname, path) if err != nil { debug.Log("s3.Stat", "GetObject() err %v", err) - return backend.BlobInfo{}, errors.Wrap(err, "client.GetObject") + return restic.FileInfo{}, errors.Wrap(err, "client.GetObject") } // make sure that the object is closed properly. @@ -206,14 +206,14 @@ func (be s3) Stat(h backend.Handle) (bi backend.BlobInfo, err error) { fi, err := obj.Stat() if err != nil { debug.Log("s3.Stat", "Stat() err %v", err) - return backend.BlobInfo{}, errors.Wrap(err, "Stat") + return restic.FileInfo{}, errors.Wrap(err, "Stat") } - return backend.BlobInfo{Size: fi.Size}, nil + return restic.FileInfo{Size: fi.Size}, nil } // Test returns true if a blob of the given type and name exists in the backend. -func (be *s3) Test(t backend.Type, name string) (bool, error) { +func (be *s3) Test(t restic.FileType, name string) (bool, error) { found := false path := be.s3path(t, name) _, err := be.client.StatObject(be.bucketname, path) @@ -226,7 +226,7 @@ func (be *s3) Test(t backend.Type, name string) (bool, error) { } // Remove removes the blob with the given name and type. -func (be *s3) Remove(t backend.Type, name string) error { +func (be *s3) Remove(t restic.FileType, name string) error { path := be.s3path(t, name) err := be.client.RemoveObject(be.bucketname, path) debug.Log("s3.Remove", "%v %v -> err %v", t, name, err) @@ -236,7 +236,7 @@ func (be *s3) Remove(t backend.Type, name string) error { // List returns a channel that yields all names of blobs of type t. A // goroutine is started for this. If the channel done is closed, sending // stops. -func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string { +func (be *s3) List(t restic.FileType, done <-chan struct{}) <-chan string { debug.Log("s3.List", "listing %v", t) ch := make(chan string) @@ -264,11 +264,11 @@ func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string { } // Remove keys for a specified backend type. -func (be *s3) removeKeys(t backend.Type) error { +func (be *s3) removeKeys(t restic.FileType) error { done := make(chan struct{}) defer close(done) - for key := range be.List(backend.Data, done) { - err := be.Remove(backend.Data, key) + for key := range be.List(restic.DataFile, done) { + err := be.Remove(restic.DataFile, key) if err != nil { return err } @@ -279,12 +279,12 @@ func (be *s3) removeKeys(t backend.Type) error { // Delete removes all restic keys in the bucket. It will not remove the bucket itself. func (be *s3) Delete() error { - alltypes := []backend.Type{ - backend.Data, - backend.Key, - backend.Lock, - backend.Snapshot, - backend.Index} + alltypes := []restic.FileType{ + restic.DataFile, + restic.KeyFile, + restic.LockFile, + restic.SnapshotFile, + restic.IndexFile} for _, t := range alltypes { err := be.removeKeys(t) @@ -293,7 +293,7 @@ func (be *s3) Delete() error { } } - return be.Remove(backend.Config, "") + return be.Remove(restic.ConfigFile, "") } // Close does nothing diff --git a/src/restic/backend/s3/s3_test.go b/src/restic/backend/s3/s3_test.go index 6fd9c3bf6..4f0aa0d6b 100644 --- a/src/restic/backend/s3/s3_test.go +++ b/src/restic/backend/s3/s3_test.go @@ -44,7 +44,7 @@ func init() { return nil, err } - exists, err := be.Test(backend.Config, "") + exists, err := be.Test(restic.ConfigFile, "") if err != nil { return nil, err } diff --git a/src/restic/backend/sftp/sftp.go b/src/restic/backend/sftp/sftp.go index c82e29683..ca39e45ea 100644 --- a/src/restic/backend/sftp/sftp.go +++ b/src/restic/backend/sftp/sftp.go @@ -9,6 +9,7 @@ import ( "os" "os/exec" "path" + "restic" "strings" "time" @@ -256,11 +257,11 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error { } // Rename temp file to final name according to type and name. -func (r *SFTP) renameFile(oldname string, t backend.Type, name string) error { +func (r *SFTP) renameFile(oldname string, t restic.FileType, name string) error { filename := r.filename(t, name) // create directories if necessary - if t == backend.Data { + if t == restic.DataFile { err := r.mkdirAll(path.Dir(filename), backend.Modes.Dir) if err != nil { return err @@ -293,9 +294,9 @@ func Join(parts ...string) string { return path.Clean(path.Join(parts...)) } -// Construct path for given backend.Type and name. -func (r *SFTP) filename(t backend.Type, name string) string { - if t == backend.Config { +// Construct path for given restic.Type and name. +func (r *SFTP) filename(t restic.FileType, name string) string { + if t == restic.ConfigFile { return Join(r.p, "config") } @@ -303,21 +304,21 @@ func (r *SFTP) filename(t backend.Type, name string) string { } // Construct directory for given backend.Type. -func (r *SFTP) dirname(t backend.Type, name string) string { +func (r *SFTP) dirname(t restic.FileType, name string) string { var n string switch t { - case backend.Data: + case restic.DataFile: n = backend.Paths.Data if len(name) > 2 { n = Join(n, name[:2]) } - case backend.Snapshot: + case restic.SnapshotFile: n = backend.Paths.Snapshots - case backend.Index: + case restic.IndexFile: n = backend.Paths.Index - case backend.Lock: + case restic.LockFile: n = backend.Paths.Locks - case backend.Key: + case restic.KeyFile: n = backend.Paths.Keys } return Join(r.p, n) @@ -325,7 +326,7 @@ func (r *SFTP) dirname(t backend.Type, name string) string { // Load returns the data stored in the backend for h at the given offset // and saves it in p. Load has the same semantics as io.ReaderAt. -func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) { +func (r *SFTP) Load(h restic.Handle, p []byte, off int64) (n int, err error) { debug.Log("sftp.Load", "load %v, %d bytes, offset %v", h, len(p), off) if err := r.clientError(); err != nil { return 0, err @@ -335,7 +336,7 @@ func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) { return 0, err } - f, err := r.c.Open(r.filename(h.Type, h.Name)) + f, err := r.c.Open(r.filename(h.FileType, h.Name)) if err != nil { return 0, errors.Wrap(err, "Open") } @@ -362,7 +363,7 @@ func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) { } // Save stores data in the backend at the handle. -func (r *SFTP) Save(h backend.Handle, p []byte) (err error) { +func (r *SFTP) Save(h restic.Handle, p []byte) (err error) { debug.Log("sftp.Save", "save %v bytes to %v", h, len(p)) if err := r.clientError(); err != nil { return err @@ -393,14 +394,14 @@ func (r *SFTP) Save(h backend.Handle, p []byte) (err error) { return errors.Wrap(err, "Close") } - err = r.renameFile(filename, h.Type, h.Name) + err = r.renameFile(filename, h.FileType, h.Name) debug.Log("sftp.Save", "save %v: rename %v: %v", h, path.Base(filename), err) return err } // Stat returns information about a blob. -func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) { +func (r *SFTP) Stat(h restic.Handle) (backend.BlobInfo, error) { debug.Log("sftp.Stat", "stat %v", h) if err := r.clientError(); err != nil { return backend.BlobInfo{}, err @@ -410,7 +411,7 @@ func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) { return backend.BlobInfo{}, err } - fi, err := r.c.Lstat(r.filename(h.Type, h.Name)) + fi, err := r.c.Lstat(r.filename(h.FileType, h.Name)) if err != nil { return backend.BlobInfo{}, errors.Wrap(err, "Lstat") } @@ -419,7 +420,7 @@ func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) { } // Test returns true if a blob of the given type and name exists in the backend. -func (r *SFTP) Test(t backend.Type, name string) (bool, error) { +func (r *SFTP) Test(t restic.FileType, name string) (bool, error) { debug.Log("sftp.Test", "type %v, name %v", t, name) if err := r.clientError(); err != nil { return false, err @@ -438,7 +439,7 @@ func (r *SFTP) Test(t backend.Type, name string) (bool, error) { } // Remove removes the content stored at name. -func (r *SFTP) Remove(t backend.Type, name string) error { +func (r *SFTP) Remove(t restic.FileType, name string) error { debug.Log("sftp.Remove", "type %v, name %v", t, name) if err := r.clientError(); err != nil { return err @@ -450,14 +451,14 @@ func (r *SFTP) Remove(t backend.Type, name string) error { // List returns a channel that yields all names of blobs of type t. A // goroutine is started for this. If the channel done is closed, sending // stops. -func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string { +func (r *SFTP) List(t restic.FileType, done <-chan struct{}) <-chan string { debug.Log("sftp.List", "list all %v", t) ch := make(chan string) go func() { defer close(ch) - if t == backend.Data { + if t == restic.DataFile { // read first level basedir := r.dirname(t, "") diff --git a/src/restic/backend/test/tests.go b/src/restic/backend/test/tests.go index a9b18e361..217e9b683 100644 --- a/src/restic/backend/test/tests.go +++ b/src/restic/backend/test/tests.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "math/rand" "reflect" + "restic" "sort" "testing" @@ -118,7 +119,7 @@ func TestCreateWithConfig(t testing.TB) { defer close(t) // save a config - store(t, b, backend.Config, []byte("test config")) + store(t, b, restic.ConfigFile, []byte("test config")) // now create the backend again, this must fail _, err := CreateFn() @@ -127,7 +128,7 @@ func TestCreateWithConfig(t testing.TB) { } // remove config - err = b.Remove(backend.Config, "") + err = b.Remove(restic.ConfigFile, "") if err != nil { t.Fatalf("unexpected error removing config: %v", err) } @@ -152,12 +153,12 @@ func TestConfig(t testing.TB) { var testString = "Config" // create config and read it back - _, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil) + _, err := backend.LoadAll(b, restic.Handle{Type: restic.ConfigFile}, nil) if err == nil { t.Fatalf("did not get expected error for non-existing config") } - err = b.Save(backend.Handle{Type: backend.Config}, []byte(testString)) + err = b.Save(restic.Handle{Type: restic.ConfigFile}, []byte(testString)) if err != nil { t.Fatalf("Save() error: %v", err) } @@ -165,7 +166,7 @@ func TestConfig(t testing.TB) { // try accessing the config with different names, should all return the // same config for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} { - h := backend.Handle{Type: backend.Config, Name: name} + h := restic.Handle{Type: restic.ConfigFile, Name: name} buf, err := backend.LoadAll(b, h, nil) if err != nil { t.Fatalf("unable to read config with name %q: %v", name, err) @@ -182,12 +183,12 @@ func TestLoad(t testing.TB) { b := open(t) defer close(t) - _, err := b.Load(backend.Handle{}, nil, 0) + _, err := b.Load(restic.Handle{}, nil, 0) if err == nil { t.Fatalf("Load() did not return an error for invalid handle") } - _, err = b.Load(backend.Handle{Type: backend.Data, Name: "foobar"}, nil, 0) + _, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, nil, 0) if err == nil { t.Fatalf("Load() did not return an error for non-existing blob") } @@ -197,7 +198,7 @@ func TestLoad(t testing.TB) { data := Random(23, length) id := backend.Hash(data) - handle := backend.Handle{Type: backend.Data, Name: id.String()} + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} err = b.Save(handle, data) if err != nil { t.Fatalf("Save() error: %v", err) @@ -309,7 +310,7 @@ func TestLoad(t testing.TB) { t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err) } - OK(t, b.Remove(backend.Data, id.String())) + OK(t, b.Remove(restic.DataFile, id.String())) } // TestLoadNegativeOffset tests the backend's Load function with negative offsets. @@ -322,7 +323,7 @@ func TestLoadNegativeOffset(t testing.TB) { data := Random(23, length) id := backend.Hash(data) - handle := backend.Handle{Type: backend.Data, Name: id.String()} + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} err := b.Save(handle, data) if err != nil { t.Fatalf("Save() error: %v", err) @@ -365,7 +366,7 @@ func TestLoadNegativeOffset(t testing.TB) { } - OK(t, b.Remove(backend.Data, id.String())) + OK(t, b.Remove(restic.DataFile, id.String())) } // TestSave tests saving data in the backend. @@ -380,8 +381,8 @@ func TestSave(t testing.TB) { // use the first 32 byte as the ID copy(id[:], data) - h := backend.Handle{ - Type: backend.Data, + h := restic.Handle{ + Type: restic.DataFile, Name: fmt.Sprintf("%s-%d", id, i), } err := b.Save(h, data) @@ -429,7 +430,7 @@ func TestSaveFilenames(t testing.TB) { defer close(t) for i, test := range filenameTests { - h := backend.Handle{Name: test.name, Type: backend.Data} + h := restic.Handle{Name: test.name, Type: restic.DataFile} err := b.Save(h, []byte(test.data)) if err != nil { t.Errorf("test %d failed: Save() returned %v", i, err) @@ -464,9 +465,9 @@ var testStrings = []struct { {"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"}, } -func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) { +func store(t testing.TB, b backend.Backend, tpe restic.FileType, data []byte) { id := backend.Hash(data) - err := b.Save(backend.Handle{Name: id.String(), Type: tpe}, data) + err := b.Save(restic.Handle{Name: id.String(), Type: tpe}, data) OK(t, err) } @@ -483,9 +484,9 @@ func TestBackend(t testing.TB) { b := open(t) defer close(t) - for _, tpe := range []backend.Type{ - backend.Data, backend.Key, backend.Lock, - backend.Snapshot, backend.Index, + for _, tpe := range []restic.FileType{ + restic.DataFile, restic.KeyFile, restic.LockFile, + restic.SnapshotFile, restic.IndexFile, } { // detect non-existing files for _, test := range testStrings { @@ -498,7 +499,7 @@ func TestBackend(t testing.TB) { Assert(t, !ret, "blob was found to exist before creating") // try to stat a not existing blob - h := backend.Handle{Type: tpe, Name: id.String()} + h := restic.Handle{Type: tpe, Name: id.String()} _, err = b.Stat(h) Assert(t, err != nil, "blob data could be extracted before creation") @@ -517,7 +518,7 @@ func TestBackend(t testing.TB) { store(t, b, tpe, []byte(test.data)) // test Load() - h := backend.Handle{Type: tpe, Name: test.id} + h := restic.Handle{Type: tpe, Name: test.id} buf, err := backend.LoadAll(b, h, nil) OK(t, err) Equals(t, test.data, string(buf)) @@ -538,7 +539,7 @@ func TestBackend(t testing.TB) { test := testStrings[0] // create blob - err := b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data)) + err := b.Save(restic.Handle{Type: tpe, Name: test.id}, []byte(test.data)) Assert(t, err != nil, "expected error, got %v", err) // remove and recreate @@ -551,7 +552,7 @@ func TestBackend(t testing.TB) { Assert(t, ok == false, "removed blob still present") // create blob - err = b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data)) + err = b.Save(restic.Handle{Type: tpe, Name: test.id}, []byte(test.data)) OK(t, err) // list items diff --git a/src/restic/backend/utils.go b/src/restic/backend/utils.go index 9bd87b4fb..f060b6fca 100644 --- a/src/restic/backend/utils.go +++ b/src/restic/backend/utils.go @@ -2,6 +2,7 @@ package backend import ( "io" + "restic" "github.com/pkg/errors" ) @@ -10,7 +11,7 @@ import ( // is resized to accomodate all data in the blob. Errors returned by be.Load() // are passed on, except io.ErrUnexpectedEOF is silenced and nil returned // instead, since it means this function is working properly. -func LoadAll(be Backend, h Handle, buf []byte) ([]byte, error) { +func LoadAll(be restic.Backend, h restic.Handle, buf []byte) ([]byte, error) { fi, err := be.Stat(h) if err != nil { return nil, errors.Wrap(err, "Stat") diff --git a/src/restic/backend/utils_test.go b/src/restic/backend/utils_test.go index ad39c3b1d..51bb6f8da 100644 --- a/src/restic/backend/utils_test.go +++ b/src/restic/backend/utils_test.go @@ -20,10 +20,10 @@ func TestLoadAll(t *testing.T) { data := Random(23+i, rand.Intn(MiB)+500*KiB) id := backend.Hash(data) - err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) + err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) OK(t, err) - buf, err := backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, nil) + buf, err := backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, nil) OK(t, err) if len(buf) != len(data) { @@ -45,11 +45,11 @@ func TestLoadSmallBuffer(t *testing.T) { data := Random(23+i, rand.Intn(MiB)+500*KiB) id := backend.Hash(data) - err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) + err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) OK(t, err) buf := make([]byte, len(data)-23) - buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf) + buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf) OK(t, err) if len(buf) != len(data) { @@ -71,11 +71,11 @@ func TestLoadLargeBuffer(t *testing.T) { data := Random(23+i, rand.Intn(MiB)+500*KiB) id := backend.Hash(data) - err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data) + err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) OK(t, err) buf := make([]byte, len(data)+100) - buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf) + buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf) OK(t, err) if len(buf) != len(data) { diff --git a/src/restic/backend_find.go b/src/restic/backend_find.go new file mode 100644 index 000000000..6ab6427fb --- /dev/null +++ b/src/restic/backend_find.go @@ -0,0 +1,70 @@ +package restic + +import "github.com/pkg/errors" + +// ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix +// could be found. +var ErrNoIDPrefixFound = errors.New("no ID found") + +// ErrMultipleIDMatches is returned by Find() when multiple IDs with the given +// prefix are found. +var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found") + +// Find loads the list of all files of type t and searches for names which +// start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. +// If more than one is found, nil and ErrMultipleIDMatches is returned. +func Find(be Lister, t FileType, prefix string) (string, error) { + done := make(chan struct{}) + defer close(done) + + match := "" + + // TODO: optimize by sorting list etc. + for name := range be.List(t, done) { + if prefix == name[:len(prefix)] { + if match == "" { + match = name + } else { + return "", ErrMultipleIDMatches + } + } + } + + if match != "" { + return match, nil + } + + return "", ErrNoIDPrefixFound +} + +const minPrefixLength = 8 + +// PrefixLength returns the number of bytes required so that all prefixes of +// all names of type t are unique. +func PrefixLength(be Lister, t FileType) (int, error) { + done := make(chan struct{}) + defer close(done) + + // load all IDs of the given type + list := make([]string, 0, 100) + for name := range be.List(t, done) { + list = append(list, name) + } + + // select prefixes of length l, test if the last one is the same as the current one +outer: + for l := minPrefixLength; l < IDSize; l++ { + var last string + + for _, name := range list { + if last == name[:l] { + continue outer + } + last = name[:l] + } + + return l, nil + } + + return IDSize, nil +} diff --git a/src/restic/checker/checker.go b/src/restic/checker/checker.go index 51b473641..6bcea9b81 100644 --- a/src/restic/checker/checker.go +++ b/src/restic/checker/checker.go @@ -21,14 +21,14 @@ import ( // A Checker only tests for internal errors within the data structures of the // repository (e.g. missing blobs), and needs a valid Repository to work on. type Checker struct { - packs backend.IDSet - blobs backend.IDSet + packs restic.IDSet + blobs restic.IDSet blobRefs struct { sync.Mutex - M map[backend.ID]uint + M map[restic.ID]uint } - indexes map[backend.ID]*repository.Index - orphanedPacks backend.IDs + indexes map[restic.ID]*repository.Index + orphanedPacks restic.IDs masterIndex *repository.MasterIndex @@ -38,14 +38,14 @@ type Checker struct { // New returns a new checker which runs on repo. func New(repo *repository.Repository) *Checker { c := &Checker{ - packs: backend.NewIDSet(), - blobs: backend.NewIDSet(), + packs: restic.NewIDSet(), + blobs: restic.NewIDSet(), masterIndex: repository.NewMasterIndex(), - indexes: make(map[backend.ID]*repository.Index), + indexes: make(map[restic.ID]*repository.Index), repo: repo, } - c.blobRefs.M = make(map[backend.ID]uint) + c.blobRefs.M = make(map[restic.ID]uint) return c } @@ -54,8 +54,8 @@ const defaultParallelism = 40 // ErrDuplicatePacks is returned when a pack is found in more than one index. type ErrDuplicatePacks struct { - PackID backend.ID - Indexes backend.IDSet + PackID restic.ID + Indexes restic.IDSet } func (e ErrDuplicatePacks) Error() string { @@ -65,7 +65,7 @@ func (e ErrDuplicatePacks) Error() string { // ErrOldIndexFormat is returned when an index with the old format is // found. type ErrOldIndexFormat struct { - backend.ID + restic.ID } func (err ErrOldIndexFormat) Error() string { @@ -82,7 +82,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) { indexCh := make(chan indexRes) - worker := func(id backend.ID, done <-chan struct{}) error { + worker := func(id restic.ID, done <-chan struct{}) error { debug.Log("LoadIndex", "worker got index %v", id) idx, err := repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeIndex) if errors.Cause(err) == repository.ErrOldIndexFormat { @@ -108,7 +108,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) { go func() { defer close(indexCh) debug.Log("LoadIndex", "start loading indexes in parallel") - perr = repository.FilesInParallel(c.repo.Backend(), backend.Index, defaultParallelism, + perr = repository.FilesInParallel(c.repo.Backend(), restic.IndexFile, defaultParallelism, repository.ParallelWorkFuncParseID(worker)) debug.Log("LoadIndex", "loading indexes finished, error: %v", perr) }() @@ -121,11 +121,11 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) { return hints, errs } - packToIndex := make(map[backend.ID]backend.IDSet) + packToIndex := make(map[restic.ID]restic.IDSet) for res := range indexCh { debug.Log("LoadIndex", "process index %v", res.ID) - idxID, err := backend.ParseID(res.ID) + idxID, err := restic.ParseID(res.ID) if err != nil { errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID)) continue @@ -143,7 +143,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) { cnt++ if _, ok := packToIndex[blob.PackID]; !ok { - packToIndex[blob.PackID] = backend.NewIDSet() + packToIndex[blob.PackID] = restic.NewIDSet() } packToIndex[blob.PackID].Insert(idxID) } @@ -171,7 +171,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) { // PackError describes an error with a specific pack. type PackError struct { - ID backend.ID + ID restic.ID Orphaned bool Err error } @@ -180,14 +180,14 @@ func (e PackError) Error() string { return "pack " + e.ID.String() + ": " + e.Err.Error() } -func packIDTester(repo *repository.Repository, inChan <-chan backend.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) { +func packIDTester(repo *repository.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) { debug.Log("Checker.testPackID", "worker start") defer debug.Log("Checker.testPackID", "worker done") defer wg.Done() for id := range inChan { - ok, err := repo.Backend().Test(backend.Data, id.String()) + ok, err := repo.Backend().Test(restic.DataFile, id.String()) if err != nil { err = PackError{ID: id, Err: err} } else { @@ -218,11 +218,11 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) { defer close(errChan) debug.Log("Checker.Packs", "checking for %d packs", len(c.packs)) - seenPacks := backend.NewIDSet() + seenPacks := restic.NewIDSet() var workerWG sync.WaitGroup - IDChan := make(chan backend.ID) + IDChan := make(chan restic.ID) for i := 0; i < defaultParallelism; i++ { workerWG.Add(1) go packIDTester(c.repo, IDChan, errChan, &workerWG, done) @@ -238,7 +238,7 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) { workerWG.Wait() debug.Log("Checker.Packs", "workers terminated") - for id := range c.repo.List(backend.Data, done) { + for id := range c.repo.List(restic.DataFile, done) { debug.Log("Checker.Packs", "check data blob %v", id.Str()) if !seenPacks.Has(id) { c.orphanedPacks = append(c.orphanedPacks, id) @@ -253,8 +253,8 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) { // Error is an error that occurred while checking a repository. type Error struct { - TreeID backend.ID - BlobID backend.ID + TreeID restic.ID + BlobID restic.ID Err error } @@ -273,25 +273,25 @@ func (e Error) Error() string { return e.Err.Error() } -func loadTreeFromSnapshot(repo *repository.Repository, id backend.ID) (backend.ID, error) { +func loadTreeFromSnapshot(repo *repository.Repository, id restic.ID) (restic.ID, error) { sn, err := restic.LoadSnapshot(repo, id) if err != nil { debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err) - return backend.ID{}, err + return restic.ID{}, err } if sn.Tree == nil { debug.Log("Checker.loadTreeFromSnapshot", "snapshot %v has no tree", id.Str()) - return backend.ID{}, errors.Errorf("snapshot %v has no tree", id) + return restic.ID{}, errors.Errorf("snapshot %v has no tree", id) } return *sn.Tree, nil } // loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs. -func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) { +func loadSnapshotTreeIDs(repo *repository.Repository) (restic.IDs, []error) { var trees struct { - IDs backend.IDs + IDs restic.IDs sync.Mutex } @@ -301,7 +301,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) { } snapshotWorker := func(strID string, done <-chan struct{}) error { - id, err := backend.ParseID(strID) + id, err := restic.ParseID(strID) if err != nil { return err } @@ -324,7 +324,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) { return nil } - err := repository.FilesInParallel(repo.Backend(), backend.Snapshot, defaultParallelism, snapshotWorker) + err := repository.FilesInParallel(repo.Backend(), restic.SnapshotFile, defaultParallelism, snapshotWorker) if err != nil { errs.errs = append(errs.errs, err) } @@ -334,7 +334,7 @@ func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) { // TreeError collects several errors that occurred while processing a tree. type TreeError struct { - ID backend.ID + ID restic.ID Errors []error } @@ -343,14 +343,14 @@ func (e TreeError) Error() string { } type treeJob struct { - backend.ID + restic.ID error *restic.Tree } // loadTreeWorker loads trees from repo and sends them to out. func loadTreeWorker(repo *repository.Repository, - in <-chan backend.ID, out chan<- treeJob, + in <-chan restic.ID, out chan<- treeJob, done <-chan struct{}, wg *sync.WaitGroup) { defer func() { @@ -454,7 +454,7 @@ func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-ch } } -func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) { +func filterTrees(backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) { defer func() { debug.Log("checker.filterTrees", "closing output channels") close(loaderChan) @@ -466,7 +466,7 @@ func filterTrees(backlog backend.IDs, loaderChan chan<- backend.ID, in <-chan tr outCh = out loadCh = loaderChan job treeJob - nextTreeID backend.ID + nextTreeID restic.ID outstandingLoadTreeJobs = 0 ) @@ -559,7 +559,7 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) { } } - treeIDChan := make(chan backend.ID) + treeIDChan := make(chan restic.ID) treeJobChan1 := make(chan treeJob) treeJobChan2 := make(chan treeJob) @@ -575,10 +575,10 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) { wg.Wait() } -func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) { +func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { debug.Log("Checker.checkTree", "checking tree %v", id.Str()) - var blobs []backend.ID + var blobs []restic.ID for _, node := range tree.Nodes { switch node.FileType { @@ -634,7 +634,7 @@ func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) { } // UnusedBlobs returns all blobs that have never been referenced. -func (c *Checker) UnusedBlobs() (blobs backend.IDs) { +func (c *Checker) UnusedBlobs() (blobs restic.IDs) { c.blobRefs.Lock() defer c.blobRefs.Unlock() @@ -650,7 +650,7 @@ func (c *Checker) UnusedBlobs() (blobs backend.IDs) { } // OrphanedPacks returns a slice of unused packs (only available after Packs() was run). -func (c *Checker) OrphanedPacks() backend.IDs { +func (c *Checker) OrphanedPacks() restic.IDs { return c.orphanedPacks } @@ -660,15 +660,15 @@ func (c *Checker) CountPacks() uint64 { } // checkPack reads a pack and checks the integrity of all blobs. -func checkPack(r *repository.Repository, id backend.ID) error { +func checkPack(r *repository.Repository, id restic.ID) error { debug.Log("Checker.checkPack", "checking pack %v", id.Str()) - h := backend.Handle{Type: backend.Data, Name: id.String()} + h := restic.Handle{FileType: restic.DataFile, Name: id.String()} buf, err := backend.LoadAll(r.Backend(), h, nil) if err != nil { return err } - hash := backend.Hash(buf) + hash := restic.Hash(buf) if !hash.Equal(id) { debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) @@ -691,7 +691,7 @@ func checkPack(r *repository.Repository, id backend.ID) error { continue } - hash := backend.Hash(plainBuf) + hash := restic.Hash(plainBuf) if !hash.Equal(blob.ID) { debug.Log("Checker.checkPack", " Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()) errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())) @@ -713,10 +713,10 @@ func (c *Checker) ReadData(p *restic.Progress, errChan chan<- error, done <-chan p.Start() defer p.Done() - worker := func(wg *sync.WaitGroup, in <-chan backend.ID) { + worker := func(wg *sync.WaitGroup, in <-chan restic.ID) { defer wg.Done() for { - var id backend.ID + var id restic.ID var ok bool select { @@ -742,7 +742,7 @@ func (c *Checker) ReadData(p *restic.Progress, errChan chan<- error, done <-chan } } - ch := c.repo.List(backend.Data, done) + ch := c.repo.List(restic.DataFile, done) var wg sync.WaitGroup for i := 0; i < defaultParallelism; i++ { diff --git a/src/restic/checker/checker_test.go b/src/restic/checker/checker_test.go index d06b2139b..e719cd229 100644 --- a/src/restic/checker/checker_test.go +++ b/src/restic/checker/checker_test.go @@ -17,7 +17,7 @@ import ( var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz") -func list(repo *repository.Repository, t backend.Type) (IDs []string) { +func list(repo *repository.Repository, t restic.FileType) (IDs []string) { done := make(chan struct{}) defer close(done) @@ -83,7 +83,7 @@ func TestMissingPack(t *testing.T) { repo := OpenLocalRepo(t, repodir) packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6" - OK(t, repo.Backend().Remove(backend.Data, packID)) + OK(t, repo.Backend().Remove(restic.DataFile, packID)) chkr := checker.New(repo) hints, errs := chkr.LoadIndex() @@ -115,7 +115,7 @@ func TestUnreferencedPack(t *testing.T) { // index 3f1a only references pack 60e0 indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44" packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" - OK(t, repo.Backend().Remove(backend.Index, indexID)) + OK(t, repo.Backend().Remove(restic.IndexFile, indexID)) chkr := checker.New(repo) hints, errs := chkr.LoadIndex() @@ -145,7 +145,7 @@ func TestUnreferencedBlobs(t *testing.T) { repo := OpenLocalRepo(t, repodir) snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02" - OK(t, repo.Backend().Remove(backend.Snapshot, snID)) + OK(t, repo.Backend().Remove(restic.SnapshotFile, snID)) unusedBlobsBySnapshot := backend.IDs{ ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"), @@ -216,7 +216,7 @@ type errorBackend struct { ProduceErrors bool } -func (b errorBackend) Load(h backend.Handle, p []byte, off int64) (int, error) { +func (b errorBackend) Load(h restic.Handle, p []byte, off int64) (int, error) { fmt.Printf("load %v\n", h) n, err := b.Backend.Load(h, p, off) diff --git a/src/restic/repository/config.go b/src/restic/config.go similarity index 90% rename from src/restic/repository/config.go rename to src/restic/config.go index fcb408f99..d2e9bd39d 100644 --- a/src/restic/repository/config.go +++ b/src/restic/config.go @@ -1,11 +1,10 @@ -package repository +package restic import ( "crypto/rand" "crypto/sha256" "encoding/hex" "io" - "restic" "testing" "github.com/pkg/errors" @@ -31,12 +30,12 @@ const RepoVersion = 1 // JSONUnpackedSaver saves unpacked JSON. type JSONUnpackedSaver interface { - SaveJSONUnpacked(restic.FileType, interface{}) (restic.ID, error) + SaveJSONUnpacked(FileType, interface{}) (ID, error) } // JSONUnpackedLoader loads unpacked JSON. type JSONUnpackedLoader interface { - LoadJSONUnpacked(restic.FileType, restic.ID, interface{}) error + LoadJSONUnpacked(FileType, ID, interface{}) error } // CreateConfig creates a config file with a randomly selected polynomial and @@ -87,7 +86,7 @@ func LoadConfig(r JSONUnpackedLoader) (Config, error) { cfg Config ) - err := r.LoadJSONUnpacked(restic.ConfigFile, restic.ID{}, &cfg) + err := r.LoadJSONUnpacked(ConfigFile, ID{}, &cfg) if err != nil { return Config{}, err } diff --git a/src/restic/repository/config_test.go b/src/restic/config_test.go similarity index 81% rename from src/restic/repository/config_test.go rename to src/restic/config_test.go index 8c17a7867..c5d2166e3 100644 --- a/src/restic/repository/config_test.go +++ b/src/restic/config_test.go @@ -1,10 +1,9 @@ -package repository_test +package restic_test import ( "restic" "testing" - "restic/repository" . "restic/test" ) @@ -21,18 +20,18 @@ func (l loader) LoadJSONUnpacked(t restic.FileType, id restic.ID, arg interface{ } func TestConfig(t *testing.T) { - resultConfig := repository.Config{} + resultConfig := restic.Config{} save := func(tpe restic.FileType, arg interface{}) (restic.ID, error) { Assert(t, tpe == restic.ConfigFile, "wrong backend type: got %v, wanted %v", tpe, restic.ConfigFile) - cfg := arg.(repository.Config) + cfg := arg.(restic.Config) resultConfig = cfg return restic.ID{}, nil } - cfg1, err := repository.CreateConfig() + cfg1, err := restic.CreateConfig() OK(t, err) _, err = saver(save).SaveJSONUnpacked(restic.ConfigFile, cfg1) @@ -42,12 +41,12 @@ func TestConfig(t *testing.T) { "wrong backend type: got %v, wanted %v", tpe, restic.ConfigFile) - cfg := arg.(*repository.Config) + cfg := arg.(*restic.Config) *cfg = resultConfig return nil } - cfg2, err := repository.LoadConfig(loader(load)) + cfg2, err := restic.LoadConfig(loader(load)) OK(t, err) Assert(t, cfg1 == cfg2, diff --git a/src/restic/find_test.go b/src/restic/find_test.go index a9ffdffa0..effc58f24 100644 --- a/src/restic/find_test.go +++ b/src/restic/find_test.go @@ -15,18 +15,18 @@ import ( "restic/repository" ) -func loadIDSet(t testing.TB, filename string) BlobSet { +func loadIDSet(t testing.TB, filename string) restic.BlobSet { f, err := os.Open(filename) if err != nil { t.Logf("unable to open golden file %v: %v", filename, err) - return NewBlobSet() + return restic.NewBlobSet() } sc := bufio.NewScanner(f) - blobs := NewBlobSet() + blobs := restic.NewBlobSet() for sc.Scan() { - var h Handle + var h restic.BlobHandle err := json.Unmarshal([]byte(sc.Text()), &h) if err != nil { t.Errorf("file %v contained invalid blob: %#v", filename, err) @@ -43,14 +43,14 @@ func loadIDSet(t testing.TB, filename string) BlobSet { return blobs } -func saveIDSet(t testing.TB, filename string, s BlobSet) { +func saveIDSet(t testing.TB, filename string, s restic.BlobSet) { f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644) if err != nil { t.Fatalf("unable to update golden file %v: %v", filename, err) return } - var hs Handles + var hs restic.BlobHandles for h := range s { hs = append(hs, h) } @@ -83,16 +83,16 @@ func TestFindUsedBlobs(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() - var snapshots []*Snapshot + var snapshots []*restic.Snapshot for i := 0; i < findTestSnapshots; i++ { - sn := TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0) + sn := restic.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0) t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str()) snapshots = append(snapshots, sn) } for i, sn := range snapshots { - usedBlobs := NewBlobSet() - err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, NewBlobSet()) + usedBlobs := restic.NewBlobSet() + err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, restic.NewBlobSet()) if err != nil { t.Errorf("FindUsedBlobs returned error: %v", err) continue @@ -121,13 +121,13 @@ func BenchmarkFindUsedBlobs(b *testing.B) { repo, cleanup := repository.TestRepository(b) defer cleanup() - sn := TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0) + sn := restic.TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0) b.ResetTimer() for i := 0; i < b.N; i++ { - seen := NewBlobSet() - blobs := NewBlobSet() + seen := restic.NewBlobSet() + blobs := restic.NewBlobSet() err := restic.FindUsedBlobs(repo, *sn.Tree, blobs, seen) if err != nil { b.Error(err) diff --git a/src/restic/fuse/snapshot.go b/src/restic/fuse/snapshot.go index a384e3fb5..c4753ec4f 100644 --- a/src/restic/fuse/snapshot.go +++ b/src/restic/fuse/snapshot.go @@ -65,7 +65,7 @@ func (sn *SnapshotsDir) updateCache(ctx context.Context) error { sn.Lock() defer sn.Unlock() - for id := range sn.repo.List(backend.Snapshot, ctx.Done()) { + for id := range sn.repo.List(restic.SnapshotFile, ctx.Done()) { snapshot, err := restic.LoadSnapshot(sn.repo, id) if err != nil { return err diff --git a/src/restic/index/index.go b/src/restic/index/index.go index 20d2e08fe..d2497bce1 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -103,7 +103,7 @@ func loadIndexJSON(repo types.Repository, id backend.ID) (*indexJSON, error) { debug.Log("index.loadIndexJSON", "process index %v\n", id.Str()) var idx indexJSON - err := repo.LoadJSONUnpacked(backend.Index, id, &idx) + err := repo.LoadJSONUnpacked(restic.IndexFile, id, &idx) if err != nil { return nil, err } @@ -126,7 +126,7 @@ func Load(repo types.Repository, p *restic.Progress) (*Index, error) { index := newIndex() - for id := range repo.List(backend.Index, done) { + for id := range repo.List(restic.IndexFile, done) { p.Report(restic.Stat{Blobs: 1}) debug.Log("index.Load", "Load index %v", id.Str()) @@ -335,5 +335,5 @@ func Save(repo types.Repository, packs map[backend.ID][]pack.Blob, supersedes ba idx.Packs = append(idx.Packs, p) } - return repo.SaveJSONUnpacked(backend.Index, idx) + return repo.SaveJSONUnpacked(restic.IndexFile, idx) } diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index 559c3f08b..0f273a1c5 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -28,7 +28,7 @@ func createFilledRepo(t testing.TB, snapshots int, dup float32) (*repository.Rep } func validateIndex(t testing.TB, repo *repository.Repository, idx *Index) { - for id := range repo.List(backend.Data, nil) { + for id := range repo.List(restic.DataFile, nil) { if _, ok := idx.Packs[id]; !ok { t.Errorf("pack %v missing from index", id.Str()) } @@ -197,7 +197,7 @@ func TestIndexSave(t *testing.T) { for id := range idx.IndexIDs { t.Logf("remove index %v", id.Str()) - err = repo.Backend().Remove(backend.Index, id.String()) + err = repo.Backend().Remove(restic.IndexFile, id.String()) if err != nil { t.Errorf("error removing index %v: %v", id, err) } @@ -235,7 +235,7 @@ func TestIndexAddRemovePack(t *testing.T) { done := make(chan struct{}) defer close(done) - packID := <-repo.List(backend.Data, done) + packID := <-repo.List(restic.DataFile, done) t.Logf("selected pack %v", packID.Str()) @@ -298,7 +298,7 @@ func TestIndexLoadDocReference(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() - id, err := repo.SaveUnpacked(backend.Index, docExample) + id, err := repo.SaveUnpacked(restic.IndexFile, docExample) if err != nil { t.Fatalf("SaveUnpacked() returned error %v", err) } diff --git a/src/restic/mock/backend.go b/src/restic/mock/backend.go index b27b64a1c..717f38bf3 100644 --- a/src/restic/mock/backend.go +++ b/src/restic/mock/backend.go @@ -11,7 +11,7 @@ type Backend struct { CloseFn func() error LoadFn func(h restic.Handle, p []byte, off int64) (int, error) SaveFn func(h restic.Handle, p []byte) error - StatFn func(h restic.Handle) (restic.BlobInfo, error) + StatFn func(h restic.Handle) (restic.FileInfo, error) ListFn func(restic.FileType, <-chan struct{}) <-chan string RemoveFn func(restic.FileType, string) error TestFn func(restic.FileType, string) (bool, error) @@ -56,9 +56,9 @@ func (m *Backend) Save(h restic.Handle, p []byte) error { } // Stat an object in the backend. -func (m *Backend) Stat(h restic.Handle) (restic.BlobInfo, error) { +func (m *Backend) Stat(h restic.Handle) (restic.FileInfo, error) { if m.StatFn == nil { - return restic.BlobInfo{}, errors.New("not implemented") + return restic.FileInfo{}, errors.New("not implemented") } return m.StatFn(h) diff --git a/src/restic/node.go b/src/restic/node.go index b7f6fbc03..b18b84c6d 100644 --- a/src/restic/node.go +++ b/src/restic/node.go @@ -42,7 +42,7 @@ type Node struct { tree *Tree - path string + Path string `json:"-"` err error } @@ -67,7 +67,7 @@ func (node Node) Tree() *Tree { func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky node := &Node{ - path: path, + Path: path, Name: fi.Name(), Mode: fi.Mode() & mask, ModTime: fi.ModTime(), @@ -370,15 +370,15 @@ func (node Node) sameContent(other Node) bool { return true } -func (node *Node) isNewer(path string, fi os.FileInfo) bool { +func (node *Node) IsNewer(path string, fi os.FileInfo) bool { if node.FileType != "file" { - debug.Log("node.isNewer", "node %v is newer: not file", path) + debug.Log("node.IsNewer", "node %v is newer: not file", path) return true } tpe := nodeTypeFromFileInfo(fi) if node.Name != fi.Name() || node.FileType != tpe { - debug.Log("node.isNewer", "node %v is newer: name or type changed", path) + debug.Log("node.IsNewer", "node %v is newer: name or type changed", path) return true } @@ -388,7 +388,7 @@ func (node *Node) isNewer(path string, fi os.FileInfo) bool { if !ok { if node.ModTime != fi.ModTime() || node.Size != size { - debug.Log("node.isNewer", "node %v is newer: timestamp or size changed", path) + debug.Log("node.IsNewer", "node %v is newer: timestamp or size changed", path) return true } return false @@ -400,11 +400,11 @@ func (node *Node) isNewer(path string, fi os.FileInfo) bool { node.ChangeTime != changeTime(extendedStat) || node.Inode != uint64(inode) || node.Size != size { - debug.Log("node.isNewer", "node %v is newer: timestamp, size or inode changed", path) + debug.Log("node.IsNewer", "node %v is newer: timestamp, size or inode changed", path) return true } - debug.Log("node.isNewer", "node %v is not newer", path) + debug.Log("node.IsNewer", "node %v is not newer", path) return false } diff --git a/src/restic/node_test.go b/src/restic/node_test.go index a1d2be8e8..ca2b2add1 100644 --- a/src/restic/node_test.go +++ b/src/restic/node_test.go @@ -9,7 +9,6 @@ import ( "time" "restic" - "restic/backend" . "restic/test" ) @@ -75,7 +74,7 @@ var nodeTests = []restic.Node{ restic.Node{ Name: "testFile", FileType: "file", - Content: []backend.ID{}, + Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0604, @@ -86,7 +85,7 @@ var nodeTests = []restic.Node{ restic.Node{ Name: "testSuidFile", FileType: "file", - Content: []backend.ID{}, + Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0755 | os.ModeSetuid, @@ -97,7 +96,7 @@ var nodeTests = []restic.Node{ restic.Node{ Name: "testSuidFile2", FileType: "file", - Content: []backend.ID{}, + Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0755 | os.ModeSetgid, @@ -108,7 +107,7 @@ var nodeTests = []restic.Node{ restic.Node{ Name: "testSticky", FileType: "file", - Content: []backend.ID{}, + Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), Mode: 0755 | os.ModeSticky, diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index 34e2b442b..e100578c0 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -10,26 +10,12 @@ import ( "github.com/pkg/errors" - "restic/backend" "restic/crypto" ) -// Blob is a blob within a pack. -type Blob struct { - Type restic.BlobType - Length uint - ID restic.ID - Offset uint -} - -func (b Blob) String() string { - return fmt.Sprintf("", - b.ID.Str(), b.Type, b.Length, b.Offset) -} - // Packer is used to create a new Pack. type Packer struct { - blobs []Blob + blobs []restic.Blob bytes uint k *crypto.Key @@ -53,7 +39,7 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error) p.m.Lock() defer p.m.Unlock() - c := Blob{Type: t, ID: id} + c := restic.Blob{Type: t, ID: id} n, err := p.wr.Write(data) c.Length = uint(n) @@ -64,13 +50,13 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error) return n, errors.Wrap(err, "Write") } -var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize) +var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + restic.IDSize) // headerEntry is used with encoding/binary to read and write header entries type headerEntry struct { Type uint8 Length uint32 - ID [backend.IDSize]byte + ID [restic.IDSize]byte } // Finalize writes the header for all added blobs and finalizes the pack. @@ -167,7 +153,7 @@ func (p *Packer) Count() int { } // Blobs returns the slice of blobs that have been written. -func (p *Packer) Blobs() []Blob { +func (p *Packer) Blobs() []restic.Blob { p.m.Lock() defer p.m.Unlock() @@ -233,7 +219,7 @@ func readHeader(rd io.ReaderAt, size int64) ([]byte, error) { } // List returns the list of entries found in a pack file. -func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error) { +func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err error) { buf, err := readHeader(rd, size) if err != nil { return nil, err @@ -258,7 +244,7 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error) return nil, errors.Wrap(err, "binary.Read") } - entry := Blob{ + entry := restic.Blob{ Length: uint(e.Length), ID: e.ID, Offset: pos, diff --git a/src/restic/pack/pack_test.go b/src/restic/pack/pack_test.go index 82b026e7e..bfafa0631 100644 --- a/src/restic/pack/pack_test.go +++ b/src/restic/pack/pack_test.go @@ -7,6 +7,7 @@ import ( "encoding/binary" "encoding/json" "io" + "restic" "testing" "restic/backend" @@ -126,9 +127,9 @@ func TestUnpackReadSeeker(t *testing.T) { b := mem.New() id := backend.Hash(packData) - handle := backend.Handle{Type: backend.Data, Name: id.String()} + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} OK(t, b.Save(handle, packData)) - verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize) + verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) } func TestShortPack(t *testing.T) { @@ -139,7 +140,7 @@ func TestShortPack(t *testing.T) { b := mem.New() id := backend.Hash(packData) - handle := backend.Handle{Type: backend.Data, Name: id.String()} + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} OK(t, b.Save(handle, packData)) verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize) } diff --git a/src/restic/backend/readerat.go b/src/restic/readerat.go similarity index 95% rename from src/restic/backend/readerat.go rename to src/restic/readerat.go index 027b34456..7d36b3396 100644 --- a/src/restic/backend/readerat.go +++ b/src/restic/readerat.go @@ -1,4 +1,4 @@ -package backend +package restic import ( "io" diff --git a/src/restic/repository.go b/src/restic/repository.go index 82f6ee99e..a5950c33f 100644 --- a/src/restic/repository.go +++ b/src/restic/repository.go @@ -1,7 +1,5 @@ package restic -import "github.com/restic/chunker" - // Repository stores data in a backend. It provides high-level functions and // transparently encrypts/decrypts data. type Repository interface { @@ -9,12 +7,13 @@ type Repository interface { // Backend returns the backend used by the repository Backend() Backend - SetIndex(interface{}) + SetIndex(Index) Index() Index SaveFullIndex() error SaveJSON(BlobType, interface{}) (ID, error) + SaveUnpacked(FileType, []byte) (ID, error) Config() Config @@ -34,13 +33,13 @@ type Repository interface { Flush() error } +// Lister allows listing files in a backend. +type Lister interface { + List(FileType, <-chan struct{}) <-chan string +} + // Index keeps track of the blobs are stored within files. type Index interface { Has(ID, BlobType) bool Lookup(ID, BlobType) ([]PackedBlob, error) } - -// Config stores information about the repository. -type Config interface { - ChunkerPolynomial() chunker.Pol -} diff --git a/src/restic/repository/index.go b/src/restic/repository/index.go index 9cd8967f0..266b5d79a 100644 --- a/src/restic/repository/index.go +++ b/src/restic/repository/index.go @@ -3,7 +3,6 @@ package repository import ( "bytes" "encoding/json" - "fmt" "io" "restic" "sync" @@ -40,7 +39,7 @@ func NewIndex() *Index { } } -func (idx *Index) store(blob PackedBlob) { +func (idx *Index) store(blob restic.PackedBlob) { newEntry := indexEntry{ packID: blob.PackID, offset: blob.Offset, @@ -97,7 +96,7 @@ var IndexFull = func(idx *Index) bool { // Store remembers the id and pack in the index. An existing entry will be // silently overwritten. -func (idx *Index) Store(blob PackedBlob) { +func (idx *Index) Store(blob restic.PackedBlob) { idx.m.Lock() defer idx.m.Unlock() @@ -110,25 +109,27 @@ func (idx *Index) Store(blob PackedBlob) { idx.store(blob) } -// Lookup queries the index for the blob ID and returns a PackedBlob. -func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob, err error) { +// Lookup queries the index for the blob ID and returns a restic.PackedBlob. +func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) { idx.m.Lock() defer idx.m.Unlock() h := restic.BlobHandle{ID: id, Type: tpe} if packs, ok := idx.pack[h]; ok { - blobs = make([]PackedBlob, 0, len(packs)) + blobs = make([]restic.PackedBlob, 0, len(packs)) for _, p := range packs { debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d", id.Str(), p.packID.Str(), p.offset, p.length) - blob := PackedBlob{ - Type: tpe, - Length: p.length, - ID: id, - Offset: p.offset, + blob := restic.PackedBlob{ + Blob: restic.Blob{ + Type: tpe, + Length: p.length, + ID: id, + Offset: p.offset, + }, PackID: p.packID, } @@ -143,18 +144,20 @@ func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob, } // ListPack returns a list of blobs contained in a pack. -func (idx *Index) ListPack(id restic.ID) (list []PackedBlob) { +func (idx *Index) ListPack(id restic.ID) (list []restic.PackedBlob) { idx.m.Lock() defer idx.m.Unlock() for h, packList := range idx.pack { for _, entry := range packList { if entry.packID == id { - list = append(list, PackedBlob{ - ID: h.ID, - Type: h.Type, - Length: entry.length, - Offset: entry.offset, + list = append(list, restic.PackedBlob{ + Blob: restic.Blob{ + ID: h.ID, + Type: h.Type, + Length: entry.length, + Offset: entry.offset, + }, PackID: entry.packID, }) } @@ -182,7 +185,7 @@ func (idx *Index) LookupSize(id restic.ID, tpe restic.BlobType) (cleartextLength return 0, err } - return blobs[0].PlaintextLength(), nil + return blobs[0].Length - crypto.Extension, nil } // Supersedes returns the list of indexes this index supersedes, if any. @@ -204,32 +207,13 @@ func (idx *Index) AddToSupersedes(ids ...restic.ID) error { return nil } -// PackedBlob is a blob already saved within a pack. -type PackedBlob struct { - Type restic.BlobType - Length uint - ID restic.ID - Offset uint - PackID restic.ID -} - -func (pb PackedBlob) String() string { - return fmt.Sprintf(" 0 && checked > maxKeys { return nil, ErrMaxKeysReached } @@ -226,8 +226,8 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error) // store in repository and return h := restic.Handle{ - Type: backend.Key, - Name: restic.Hash(buf).String(), + FileType: restic.KeyFile, + Name: restic.Hash(buf).String(), } err = s.be.Save(h, buf) diff --git a/src/restic/repository/master_index.go b/src/restic/repository/master_index.go index 50a4a9e03..f82121fb1 100644 --- a/src/restic/repository/master_index.go +++ b/src/restic/repository/master_index.go @@ -7,7 +7,6 @@ import ( "github.com/pkg/errors" "restic/debug" - "restic/pack" ) // MasterIndex is a collection of indexes and IDs of chunks that are in the process of being saved. @@ -22,7 +21,7 @@ func NewMasterIndex() *MasterIndex { } // Lookup queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob, err error) { +func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() @@ -58,7 +57,7 @@ func (mi *MasterIndex) LookupSize(id restic.ID, tpe restic.BlobType) (uint, erro // ListPack returns the list of blobs in a pack. The first matching index is // returned, or nil if no index contains information about the pack id. -func (mi *MasterIndex) ListPack(id restic.ID) (list []PackedBlob) { +func (mi *MasterIndex) ListPack(id restic.ID) (list []restic.PackedBlob) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() diff --git a/src/restic/repository/packer_manager.go b/src/restic/repository/packer_manager.go index ea638979f..2b065ed7d 100644 --- a/src/restic/repository/packer_manager.go +++ b/src/restic/repository/packer_manager.go @@ -115,7 +115,7 @@ func (r *Repository) savePacker(p *pack.Packer) error { } id := restic.Hash(data) - h := restic.Handle{Type: restic.DataFile, Name: id.String()} + h := restic.Handle{FileType: restic.DataFile, Name: id.String()} err = r.be.Save(h, data) if err != nil { @@ -133,12 +133,14 @@ func (r *Repository) savePacker(p *pack.Packer) error { // update blobs in the index for _, b := range p.Blobs() { debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), id.Str()) - r.idx.Current().Store(PackedBlob{ - Type: b.Type, - ID: b.ID, + r.idx.Current().Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: b.Type, + ID: b.ID, + Offset: b.Offset, + Length: uint(b.Length), + }, PackID: id, - Offset: b.Offset, - Length: uint(b.Length), }) } diff --git a/src/restic/repository/parallel.go b/src/restic/repository/parallel.go index cf892a779..02e2d8f12 100644 --- a/src/restic/repository/parallel.go +++ b/src/restic/repository/parallel.go @@ -4,7 +4,6 @@ import ( "restic" "sync" - "restic/backend" "restic/debug" ) @@ -22,14 +21,14 @@ func closeIfOpen(ch chan struct{}) { // processing stops. If done is closed, the function should return. type ParallelWorkFunc func(id string, done <-chan struct{}) error -// ParallelIDWorkFunc gets one backend.ID to work on. If an error is returned, +// ParallelIDWorkFunc gets one restic.ID to work on. If an error is returned, // processing stops. If done is closed, the function should return. type ParallelIDWorkFunc func(id restic.ID, done <-chan struct{}) error // FilesInParallel runs n workers of f in parallel, on the IDs that // repo.List(t) yield. If f returns an error, the process is aborted and the // first error is returned. -func FilesInParallel(repo backend.Lister, t restic.FileType, n uint, f ParallelWorkFunc) error { +func FilesInParallel(repo restic.Lister, t restic.FileType, n uint, f ParallelWorkFunc) error { done := make(chan struct{}) defer closeIfOpen(done) @@ -76,12 +75,12 @@ func FilesInParallel(repo backend.Lister, t restic.FileType, n uint, f ParallelW return nil } -// ParallelWorkFuncParseID converts a function that takes a backend.ID to a -// function that takes a string. Filenames that do not parse as a backend.ID +// ParallelWorkFuncParseID converts a function that takes a restic.ID to a +// function that takes a string. Filenames that do not parse as a restic.ID // are ignored. func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc { return func(s string, done <-chan struct{}) error { - id, err := backend.ParseID(s) + id, err := restic.ParseID(s) if err != nil { debug.Log("repository.ParallelWorkFuncParseID", "invalid ID %q: %v", id, err) return err diff --git a/src/restic/repository/repack.go b/src/restic/repository/repack.go index a799f9de5..dea26e6eb 100644 --- a/src/restic/repository/repack.go +++ b/src/restic/repository/repack.go @@ -15,13 +15,13 @@ import ( // these packs. Each pack is loaded and the blobs listed in keepBlobs is saved // into a new pack. Afterwards, the packs are removed. This operation requires // an exclusive lock on the repo. -func Repack(repo *Repository, packs restic.IDSet, keepBlobs pack.BlobSet) (err error) { +func Repack(repo *Repository, packs restic.IDSet, keepBlobs restic.BlobSet) (err error) { debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs)) buf := make([]byte, 0, maxPackSize) for packID := range packs { // load the complete pack - h := restic.Handle{Type: restic.DataFile, Name: packID.String()} + h := restic.Handle{FileType: restic.DataFile, Name: packID.String()} l, err := repo.Backend().Load(h, buf[:cap(buf)], 0) if errors.Cause(err) == io.ErrUnexpectedEOF { @@ -43,7 +43,7 @@ func Repack(repo *Repository, packs restic.IDSet, keepBlobs pack.BlobSet) (err e debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(blobs)) var plaintext []byte for _, entry := range blobs { - h := pack.Handle{ID: entry.ID, Type: entry.Type} + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} if !keepBlobs.Has(h) { continue } diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index 55e1871a7..b3860e8ed 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -19,7 +19,7 @@ import ( // Repository is used to access a repository in a backend. type Repository struct { be restic.Backend - Config Config + cfg restic.Config key *crypto.Key keyName string idx *MasterIndex @@ -38,17 +38,21 @@ func New(be restic.Backend) *Repository { return repo } +func (r *Repository) Config() restic.Config { + return r.cfg +} + // Find loads the list of all blobs of type t and searches for names which start // with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If // more than one is found, nil and ErrMultipleIDMatches is returned. func (r *Repository) Find(t restic.FileType, prefix string) (string, error) { - return backend.Find(r.be, t, prefix) + return restic.Find(r.be, t, prefix) } // PrefixLength returns the number of bytes required so that all prefixes of // all IDs of type t are unique. func (r *Repository) PrefixLength(t restic.FileType) (int, error) { - return backend.PrefixLength(r.be, t) + return restic.PrefixLength(r.be, t) } // LoadAndDecrypt loads and decrypts data identified by t and id from the @@ -56,7 +60,7 @@ func (r *Repository) PrefixLength(t restic.FileType) (int, error) { func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) { debug.Log("Repo.Load", "load %v with id %v", t, id.Str()) - h := restic.Handle{Type: t, Name: id.String()} + h := restic.Handle{FileType: t, Name: id.String()} buf, err := backend.LoadAll(r.be, h, nil) if err != nil { debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err) @@ -112,7 +116,7 @@ func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by } // load blob from pack - h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()} + h := restic.Handle{FileType: restic.DataFile, Name: blob.PackID.String()} ciphertextBuf := make([]byte, blob.Length) n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset)) if err != nil { @@ -274,7 +278,7 @@ func (r *Repository) SaveUnpacked(t restic.FileType, p []byte) (id restic.ID, er } id = restic.Hash(ciphertext) - h := restic.Handle{Type: t, Name: id.String()} + h := restic.Handle{FileType: t, Name: id.String()} err = r.be.Save(h, ciphertext) if err != nil { @@ -309,13 +313,13 @@ func (r *Repository) Backend() restic.Backend { } // Index returns the currently used MasterIndex. -func (r *Repository) Index() *MasterIndex { +func (r *Repository) Index() restic.Index { return r.idx } // SetIndex instructs the repository to use the given index. -func (r *Repository) SetIndex(i *MasterIndex) { - r.idx = i +func (r *Repository) SetIndex(i restic.Index) { + r.idx = i.(*MasterIndex) } // SaveIndex saves an index in the repository. @@ -423,7 +427,7 @@ func (r *Repository) SearchKey(password string, maxKeys int) error { r.key = key.master r.packerManager.key = key.master r.keyName = key.Name() - r.Config, err = LoadConfig(r) + r.cfg, err = restic.LoadConfig(r) return err } @@ -438,7 +442,7 @@ func (r *Repository) Init(password string) error { return errors.New("repository master key and config already initialized") } - cfg, err := CreateConfig() + cfg, err := restic.CreateConfig() if err != nil { return err } @@ -448,7 +452,7 @@ func (r *Repository) Init(password string) error { // init creates a new master key with the supplied password and uses it to save // the config into the repo. -func (r *Repository) init(password string, cfg Config) error { +func (r *Repository) init(password string, cfg restic.Config) error { key, err := createMasterKey(r, password) if err != nil { return err @@ -457,7 +461,7 @@ func (r *Repository) init(password string, cfg Config) error { r.key = key.master r.packerManager.key = key.master r.keyName = key.Name() - r.Config = cfg + r.cfg = cfg _, err = r.SaveJSONUnpacked(restic.ConfigFile, cfg) return err } @@ -528,7 +532,7 @@ func (r *Repository) list(t restic.FileType, done <-chan struct{}, out chan<- re // input channel closed, we're done return } - id, err = backend.ParseID(strID) + id, err = restic.ParseID(strID) if err != nil { // ignore invalid IDs continue @@ -554,15 +558,15 @@ func (r *Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic // ListPack returns the list of blobs saved in the pack id and the length of // the file as stored in the backend. -func (r *Repository) ListPack(id restic.ID) ([]pack.Blob, int64, error) { - h := restic.Handle{Type: restic.DataFile, Name: id.String()} +func (r *Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) { + h := restic.Handle{FileType: restic.DataFile, Name: id.String()} blobInfo, err := r.Backend().Stat(h) if err != nil { return nil, 0, err } - blobs, err := pack.List(r.Key(), backend.ReaderAt(r.Backend(), h), blobInfo.Size) + blobs, err := pack.List(r.Key(), restic.ReaderAt(r.Backend(), h), blobInfo.Size) if err != nil { return nil, 0, err } diff --git a/src/restic/repository/testing.go b/src/restic/repository/testing.go index 5b696a3a6..2cf2f8308 100644 --- a/src/restic/repository/testing.go +++ b/src/restic/repository/testing.go @@ -47,7 +47,7 @@ func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r *Repository, r = New(be) - cfg := TestCreateConfig(t, testChunkerPol) + cfg := restic.TestCreateConfig(t, testChunkerPol) err := r.init(TestPassword, cfg) if err != nil { t.Fatalf("TestRepository(): initialize repo failed: %v", err) diff --git a/src/restic/snapshot_filter_test.go b/src/restic/snapshot_filter_test.go index a9c2e9fae..6c5397a4f 100644 --- a/src/restic/snapshot_filter_test.go +++ b/src/restic/snapshot_filter_test.go @@ -1,20 +1,18 @@ -package restic +package restic_test import ( "encoding/json" - "flag" "fmt" "io/ioutil" "path/filepath" "reflect" + "restic" "sort" "testing" "time" ) -var updateGoldenFiles = flag.Bool("update", false, "update golden files in testdata/") - -func parseTime(s string) time.Time { +func parseTimeUTC(s string) time.Time { t, err := time.Parse("2006-01-02 15:04:05", s) if err != nil { panic(err) @@ -23,29 +21,29 @@ func parseTime(s string) time.Time { return t.UTC() } -var testFilterSnapshots = Snapshots{ - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-01 01:02:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 01:03:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-03 07:02:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 07:08:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 10:23:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 11:23:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:23:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:24:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:28:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:30:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 16:23:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-05 09:02:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-06 08:02:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-07 10:02:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "root", Time: parseTime("2016-01-08 20:02:03"), Paths: []string{"/usr", "/sbin"}}, - {Hostname: "foo", Username: "root", Time: parseTime("2016-01-09 21:02:03"), Paths: []string{"/usr", "/sbin"}}, - {Hostname: "bar", Username: "root", Time: parseTime("2016-01-12 21:02:03"), Paths: []string{"/usr", "/sbin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-12 21:08:03"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-18 12:02:03"), Paths: []string{"/usr", "/bin"}}, +var testFilterSnapshots = restic.Snapshots{ + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-01 01:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "bar", Username: "testuser", Time: parseTimeUTC("2016-01-01 01:03:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-03 07:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "bar", Username: "testuser", Time: parseTimeUTC("2016-01-01 07:08:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 10:23:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 11:23:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:23:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:24:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:28:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 12:30:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-04 16:23:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-05 09:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-06 08:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-07 10:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "root", Time: parseTimeUTC("2016-01-08 20:02:03"), Paths: []string{"/usr", "/sbin"}}, + {Hostname: "foo", Username: "root", Time: parseTimeUTC("2016-01-09 21:02:03"), Paths: []string{"/usr", "/sbin"}}, + {Hostname: "bar", Username: "root", Time: parseTimeUTC("2016-01-12 21:02:03"), Paths: []string{"/usr", "/sbin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-12 21:08:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTimeUTC("2016-01-18 12:02:03"), Paths: []string{"/usr", "/bin"}}, } -var filterTests = []SnapshotFilter{ +var filterTests = []restic.SnapshotFilter{ {Hostname: "foo"}, {Username: "root"}, {Hostname: "foo", Username: "root"}, @@ -58,7 +56,7 @@ func TestFilterSnapshots(t *testing.T) { sort.Sort(testFilterSnapshots) for i, f := range filterTests { - res := FilterSnapshots(testFilterSnapshots, f) + res := restic.FilterSnapshots(testFilterSnapshots, f) goldenFilename := filepath.Join("testdata", fmt.Sprintf("filter_snapshots_%d", i)) @@ -79,7 +77,7 @@ func TestFilterSnapshots(t *testing.T) { continue } - var want Snapshots + var want restic.Snapshots err = json.Unmarshal(buf, &want) if !reflect.DeepEqual(res, want) { @@ -89,109 +87,109 @@ func TestFilterSnapshots(t *testing.T) { } } -var testExpireSnapshots = Snapshots{ - {Time: parseTime("2014-09-01 10:20:30")}, - {Time: parseTime("2014-09-02 10:20:30")}, - {Time: parseTime("2014-09-05 10:20:30")}, - {Time: parseTime("2014-09-06 10:20:30")}, - {Time: parseTime("2014-09-08 10:20:30")}, - {Time: parseTime("2014-09-09 10:20:30")}, - {Time: parseTime("2014-09-10 10:20:30")}, - {Time: parseTime("2014-09-11 10:20:30")}, - {Time: parseTime("2014-09-20 10:20:30")}, - {Time: parseTime("2014-09-22 10:20:30")}, - {Time: parseTime("2014-08-08 10:20:30")}, - {Time: parseTime("2014-08-10 10:20:30")}, - {Time: parseTime("2014-08-12 10:20:30")}, - {Time: parseTime("2014-08-13 10:20:30")}, - {Time: parseTime("2014-08-13 10:20:30")}, - {Time: parseTime("2014-08-15 10:20:30")}, - {Time: parseTime("2014-08-18 10:20:30")}, - {Time: parseTime("2014-08-20 10:20:30")}, - {Time: parseTime("2014-08-21 10:20:30")}, - {Time: parseTime("2014-08-22 10:20:30")}, - {Time: parseTime("2014-10-01 10:20:30")}, - {Time: parseTime("2014-10-02 10:20:30")}, - {Time: parseTime("2014-10-05 10:20:30")}, - {Time: parseTime("2014-10-06 10:20:30")}, - {Time: parseTime("2014-10-08 10:20:30")}, - {Time: parseTime("2014-10-09 10:20:30")}, - {Time: parseTime("2014-10-10 10:20:30")}, - {Time: parseTime("2014-10-11 10:20:30")}, - {Time: parseTime("2014-10-20 10:20:30")}, - {Time: parseTime("2014-10-22 10:20:30")}, - {Time: parseTime("2014-11-08 10:20:30")}, - {Time: parseTime("2014-11-10 10:20:30")}, - {Time: parseTime("2014-11-12 10:20:30")}, - {Time: parseTime("2014-11-13 10:20:30")}, - {Time: parseTime("2014-11-13 10:20:30")}, - {Time: parseTime("2014-11-15 10:20:30")}, - {Time: parseTime("2014-11-18 10:20:30")}, - {Time: parseTime("2014-11-20 10:20:30")}, - {Time: parseTime("2014-11-21 10:20:30")}, - {Time: parseTime("2014-11-22 10:20:30")}, - {Time: parseTime("2015-09-01 10:20:30")}, - {Time: parseTime("2015-09-02 10:20:30")}, - {Time: parseTime("2015-09-05 10:20:30")}, - {Time: parseTime("2015-09-06 10:20:30")}, - {Time: parseTime("2015-09-08 10:20:30")}, - {Time: parseTime("2015-09-09 10:20:30")}, - {Time: parseTime("2015-09-10 10:20:30")}, - {Time: parseTime("2015-09-11 10:20:30")}, - {Time: parseTime("2015-09-20 10:20:30")}, - {Time: parseTime("2015-09-22 10:20:30")}, - {Time: parseTime("2015-08-08 10:20:30")}, - {Time: parseTime("2015-08-10 10:20:30")}, - {Time: parseTime("2015-08-12 10:20:30")}, - {Time: parseTime("2015-08-13 10:20:30")}, - {Time: parseTime("2015-08-13 10:20:30")}, - {Time: parseTime("2015-08-15 10:20:30")}, - {Time: parseTime("2015-08-18 10:20:30")}, - {Time: parseTime("2015-08-20 10:20:30")}, - {Time: parseTime("2015-08-21 10:20:30")}, - {Time: parseTime("2015-08-22 10:20:30")}, - {Time: parseTime("2015-10-01 10:20:30")}, - {Time: parseTime("2015-10-02 10:20:30")}, - {Time: parseTime("2015-10-05 10:20:30")}, - {Time: parseTime("2015-10-06 10:20:30")}, - {Time: parseTime("2015-10-08 10:20:30")}, - {Time: parseTime("2015-10-09 10:20:30")}, - {Time: parseTime("2015-10-10 10:20:30")}, - {Time: parseTime("2015-10-11 10:20:30")}, - {Time: parseTime("2015-10-20 10:20:30")}, - {Time: parseTime("2015-10-22 10:20:30")}, - {Time: parseTime("2015-11-08 10:20:30")}, - {Time: parseTime("2015-11-10 10:20:30")}, - {Time: parseTime("2015-11-12 10:20:30")}, - {Time: parseTime("2015-11-13 10:20:30")}, - {Time: parseTime("2015-11-13 10:20:30")}, - {Time: parseTime("2015-11-15 10:20:30")}, - {Time: parseTime("2015-11-18 10:20:30")}, - {Time: parseTime("2015-11-20 10:20:30")}, - {Time: parseTime("2015-11-21 10:20:30")}, - {Time: parseTime("2015-11-22 10:20:30")}, - {Time: parseTime("2016-01-01 01:02:03")}, - {Time: parseTime("2016-01-01 01:03:03")}, - {Time: parseTime("2016-01-01 07:08:03")}, - {Time: parseTime("2016-01-03 07:02:03")}, - {Time: parseTime("2016-01-04 10:23:03")}, - {Time: parseTime("2016-01-04 11:23:03")}, - {Time: parseTime("2016-01-04 12:23:03")}, - {Time: parseTime("2016-01-04 12:24:03")}, - {Time: parseTime("2016-01-04 12:28:03")}, - {Time: parseTime("2016-01-04 12:30:03")}, - {Time: parseTime("2016-01-04 16:23:03")}, - {Time: parseTime("2016-01-05 09:02:03")}, - {Time: parseTime("2016-01-06 08:02:03")}, - {Time: parseTime("2016-01-07 10:02:03")}, - {Time: parseTime("2016-01-08 20:02:03")}, - {Time: parseTime("2016-01-09 21:02:03")}, - {Time: parseTime("2016-01-12 21:02:03")}, - {Time: parseTime("2016-01-12 21:08:03")}, - {Time: parseTime("2016-01-18 12:02:03")}, +var testExpireSnapshots = restic.Snapshots{ + {Time: parseTimeUTC("2014-09-01 10:20:30")}, + {Time: parseTimeUTC("2014-09-02 10:20:30")}, + {Time: parseTimeUTC("2014-09-05 10:20:30")}, + {Time: parseTimeUTC("2014-09-06 10:20:30")}, + {Time: parseTimeUTC("2014-09-08 10:20:30")}, + {Time: parseTimeUTC("2014-09-09 10:20:30")}, + {Time: parseTimeUTC("2014-09-10 10:20:30")}, + {Time: parseTimeUTC("2014-09-11 10:20:30")}, + {Time: parseTimeUTC("2014-09-20 10:20:30")}, + {Time: parseTimeUTC("2014-09-22 10:20:30")}, + {Time: parseTimeUTC("2014-08-08 10:20:30")}, + {Time: parseTimeUTC("2014-08-10 10:20:30")}, + {Time: parseTimeUTC("2014-08-12 10:20:30")}, + {Time: parseTimeUTC("2014-08-13 10:20:30")}, + {Time: parseTimeUTC("2014-08-13 10:20:30")}, + {Time: parseTimeUTC("2014-08-15 10:20:30")}, + {Time: parseTimeUTC("2014-08-18 10:20:30")}, + {Time: parseTimeUTC("2014-08-20 10:20:30")}, + {Time: parseTimeUTC("2014-08-21 10:20:30")}, + {Time: parseTimeUTC("2014-08-22 10:20:30")}, + {Time: parseTimeUTC("2014-10-01 10:20:30")}, + {Time: parseTimeUTC("2014-10-02 10:20:30")}, + {Time: parseTimeUTC("2014-10-05 10:20:30")}, + {Time: parseTimeUTC("2014-10-06 10:20:30")}, + {Time: parseTimeUTC("2014-10-08 10:20:30")}, + {Time: parseTimeUTC("2014-10-09 10:20:30")}, + {Time: parseTimeUTC("2014-10-10 10:20:30")}, + {Time: parseTimeUTC("2014-10-11 10:20:30")}, + {Time: parseTimeUTC("2014-10-20 10:20:30")}, + {Time: parseTimeUTC("2014-10-22 10:20:30")}, + {Time: parseTimeUTC("2014-11-08 10:20:30")}, + {Time: parseTimeUTC("2014-11-10 10:20:30")}, + {Time: parseTimeUTC("2014-11-12 10:20:30")}, + {Time: parseTimeUTC("2014-11-13 10:20:30")}, + {Time: parseTimeUTC("2014-11-13 10:20:30")}, + {Time: parseTimeUTC("2014-11-15 10:20:30")}, + {Time: parseTimeUTC("2014-11-18 10:20:30")}, + {Time: parseTimeUTC("2014-11-20 10:20:30")}, + {Time: parseTimeUTC("2014-11-21 10:20:30")}, + {Time: parseTimeUTC("2014-11-22 10:20:30")}, + {Time: parseTimeUTC("2015-09-01 10:20:30")}, + {Time: parseTimeUTC("2015-09-02 10:20:30")}, + {Time: parseTimeUTC("2015-09-05 10:20:30")}, + {Time: parseTimeUTC("2015-09-06 10:20:30")}, + {Time: parseTimeUTC("2015-09-08 10:20:30")}, + {Time: parseTimeUTC("2015-09-09 10:20:30")}, + {Time: parseTimeUTC("2015-09-10 10:20:30")}, + {Time: parseTimeUTC("2015-09-11 10:20:30")}, + {Time: parseTimeUTC("2015-09-20 10:20:30")}, + {Time: parseTimeUTC("2015-09-22 10:20:30")}, + {Time: parseTimeUTC("2015-08-08 10:20:30")}, + {Time: parseTimeUTC("2015-08-10 10:20:30")}, + {Time: parseTimeUTC("2015-08-12 10:20:30")}, + {Time: parseTimeUTC("2015-08-13 10:20:30")}, + {Time: parseTimeUTC("2015-08-13 10:20:30")}, + {Time: parseTimeUTC("2015-08-15 10:20:30")}, + {Time: parseTimeUTC("2015-08-18 10:20:30")}, + {Time: parseTimeUTC("2015-08-20 10:20:30")}, + {Time: parseTimeUTC("2015-08-21 10:20:30")}, + {Time: parseTimeUTC("2015-08-22 10:20:30")}, + {Time: parseTimeUTC("2015-10-01 10:20:30")}, + {Time: parseTimeUTC("2015-10-02 10:20:30")}, + {Time: parseTimeUTC("2015-10-05 10:20:30")}, + {Time: parseTimeUTC("2015-10-06 10:20:30")}, + {Time: parseTimeUTC("2015-10-08 10:20:30")}, + {Time: parseTimeUTC("2015-10-09 10:20:30")}, + {Time: parseTimeUTC("2015-10-10 10:20:30")}, + {Time: parseTimeUTC("2015-10-11 10:20:30")}, + {Time: parseTimeUTC("2015-10-20 10:20:30")}, + {Time: parseTimeUTC("2015-10-22 10:20:30")}, + {Time: parseTimeUTC("2015-11-08 10:20:30")}, + {Time: parseTimeUTC("2015-11-10 10:20:30")}, + {Time: parseTimeUTC("2015-11-12 10:20:30")}, + {Time: parseTimeUTC("2015-11-13 10:20:30")}, + {Time: parseTimeUTC("2015-11-13 10:20:30")}, + {Time: parseTimeUTC("2015-11-15 10:20:30")}, + {Time: parseTimeUTC("2015-11-18 10:20:30")}, + {Time: parseTimeUTC("2015-11-20 10:20:30")}, + {Time: parseTimeUTC("2015-11-21 10:20:30")}, + {Time: parseTimeUTC("2015-11-22 10:20:30")}, + {Time: parseTimeUTC("2016-01-01 01:02:03")}, + {Time: parseTimeUTC("2016-01-01 01:03:03")}, + {Time: parseTimeUTC("2016-01-01 07:08:03")}, + {Time: parseTimeUTC("2016-01-03 07:02:03")}, + {Time: parseTimeUTC("2016-01-04 10:23:03")}, + {Time: parseTimeUTC("2016-01-04 11:23:03")}, + {Time: parseTimeUTC("2016-01-04 12:23:03")}, + {Time: parseTimeUTC("2016-01-04 12:24:03")}, + {Time: parseTimeUTC("2016-01-04 12:28:03")}, + {Time: parseTimeUTC("2016-01-04 12:30:03")}, + {Time: parseTimeUTC("2016-01-04 16:23:03")}, + {Time: parseTimeUTC("2016-01-05 09:02:03")}, + {Time: parseTimeUTC("2016-01-06 08:02:03")}, + {Time: parseTimeUTC("2016-01-07 10:02:03")}, + {Time: parseTimeUTC("2016-01-08 20:02:03")}, + {Time: parseTimeUTC("2016-01-09 21:02:03")}, + {Time: parseTimeUTC("2016-01-12 21:02:03")}, + {Time: parseTimeUTC("2016-01-12 21:08:03")}, + {Time: parseTimeUTC("2016-01-18 12:02:03")}, } -var expireTests = []ExpirePolicy{ +var expireTests = []restic.ExpirePolicy{ {}, {Last: 10}, {Last: 15}, @@ -214,7 +212,7 @@ var expireTests = []ExpirePolicy{ func TestApplyPolicy(t *testing.T) { for i, p := range expireTests { - keep, remove := ApplyPolicy(testExpireSnapshots, p) + keep, remove := restic.ApplyPolicy(testExpireSnapshots, p) t.Logf("test %d: returned keep %v, remove %v (of %v) expired snapshots for policy %v", i, len(keep), len(remove), len(testExpireSnapshots), p) @@ -255,7 +253,7 @@ func TestApplyPolicy(t *testing.T) { continue } - var want Snapshots + var want restic.Snapshots err = json.Unmarshal(buf, &want) if !reflect.DeepEqual(keep, want) { diff --git a/src/restic/test/backend.go b/src/restic/test/backend.go index 5516cecdf..d73f0d4fe 100644 --- a/src/restic/test/backend.go +++ b/src/restic/test/backend.go @@ -8,7 +8,7 @@ import ( "testing" "restic" - "restic/backend" + "restic/archiver" "restic/backend/local" "restic/repository" ) @@ -83,8 +83,8 @@ func TeardownRepo(repo *repository.Repository) { } } -func SnapshotDir(t testing.TB, repo *repository.Repository, path string, parent *backend.ID) *restic.Snapshot { - arch := restic.NewArchiver(repo) +func SnapshotDir(t testing.TB, repo *repository.Repository, path string, parent *restic.ID) *restic.Snapshot { + arch := archiver.New(repo) sn, _, err := arch.Snapshot(nil, []string{path}, parent) OK(t, err) return sn diff --git a/src/restic/testing.go b/src/restic/testing.go index 68b6b2592..16af93156 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -29,7 +29,7 @@ type fakeFileSystem struct { // IDs is returned. func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) { blobs = IDs{} - ch := chunker.New(rd, fs.repo.Config().ChunkerPolynomial()) + ch := chunker.New(rd, fs.repo.Config().ChunkerPolynomial) for { chunk, err := ch.Next(getBuf()) diff --git a/src/restic/tree_test.go b/src/restic/tree_test.go index 2241419f4..f8d632df9 100644 --- a/src/restic/tree_test.go +++ b/src/restic/tree_test.go @@ -97,7 +97,7 @@ func TestLoadTree(t *testing.T) { // save tree tree := restic.NewTree() - id, err := repo.SaveJSON(TreeBlob, tree) + id, err := repo.SaveJSON(restic.TreeBlob, tree) OK(t, err) // save packs diff --git a/src/restic/types/repository.go b/src/restic/types/repository.go index d13d93333..c49cb5e68 100644 --- a/src/restic/types/repository.go +++ b/src/restic/types/repository.go @@ -1,20 +1,21 @@ package types import ( + "restic" "restic/backend" "restic/pack" ) // Repository manages encrypted and packed data stored in a backend. type Repository interface { - LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error - SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error) + LoadJSONUnpacked(restic.FileType, backend.ID, interface{}) error + SaveJSONUnpacked(restic.FileType, interface{}) (backend.ID, error) Lister } // Lister combines lists packs in a repo and blobs in a pack. type Lister interface { - List(backend.Type, <-chan struct{}) <-chan backend.ID + List(restic.FileType, <-chan struct{}) <-chan backend.ID ListPack(backend.ID) ([]pack.Blob, int64, error) } diff --git a/src/restic/walk_test.go b/src/restic/walk_test.go index c359c0ec9..1edd052ba 100644 --- a/src/restic/walk_test.go +++ b/src/restic/walk_test.go @@ -8,7 +8,7 @@ import ( "time" "restic" - "restic/backend" + "restic/archiver" "restic/pipe" "restic/repository" . "restic/test" @@ -22,7 +22,7 @@ func TestWalkTree(t *testing.T) { OK(t, err) // archive a few files - arch := restic.NewArchiver(repo) + arch := archiver.New(repo) sn, _, err := arch.Snapshot(nil, dirs, nil) OK(t, err) @@ -94,7 +94,7 @@ type delayRepo struct { delay time.Duration } -func (d delayRepo) LoadJSONPack(t BlobType, id backend.ID, dst interface{}) error { +func (d delayRepo) LoadJSONPack(t restic.BlobType, id restic.ID, dst interface{}) error { time.Sleep(d.delay) return d.repo.LoadJSONPack(t, id, dst) } @@ -1344,7 +1344,7 @@ func TestDelayedWalkTree(t *testing.T) { repo := OpenLocalRepo(t, repodir) OK(t, repo.LoadIndex()) - root, err := backend.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") + root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") OK(t, err) dr := delayRepo{repo, 100 * time.Millisecond} @@ -1373,7 +1373,7 @@ func BenchmarkDelayedWalkTree(t *testing.B) { repo := OpenLocalRepo(t, repodir) OK(t, repo.LoadIndex()) - root, err := backend.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") + root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") OK(t, err) dr := delayRepo{repo, 10 * time.Millisecond} From 4c95d2cfdcbf2c7d627bb4276c290fed67b10aba Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 31 Aug 2016 22:51:35 +0200 Subject: [PATCH 08/40] wip --- src/restic/archiver/archiver_int_test.go | 6 +- src/restic/backend.go | 3 - src/restic/backend/generic.go | 14 ++- src/restic/backend/generic_test.go | 2 +- src/restic/backend/handle.go | 49 -------- src/restic/backend/handle_test.go | 28 ----- src/restic/backend/id.go | 109 ------------------ src/restic/backend/id_int_test.go | 16 --- src/restic/backend/id_test.go | 43 ------- src/restic/backend/ids.go | 69 ------------ src/restic/backend/ids_test.go | 58 ---------- src/restic/backend/idset.go | 111 ------------------- src/restic/backend/idset_test.go | 35 ------ src/restic/backend/interface.go | 63 ----------- src/restic/backend/local/local_test.go | 6 +- src/restic/backend/mem/mem_backend_test.go | 9 +- src/restic/backend/rest/rest.go | 16 +-- src/restic/backend/rest/rest_test.go | 5 +- src/restic/backend/s3/s3_test.go | 6 +- src/restic/backend/sftp/sftp.go | 12 +- src/restic/backend/sftp/sftp_backend_test.go | 6 +- src/restic/backend/test/tests.go | 62 +++++------ src/restic/backend/test/tests_test.go | 9 +- src/restic/backend/utils_test.go | 19 ++-- src/restic/fuse/file.go | 10 +- src/restic/fuse/file_test.go | 18 ++- src/restic/fuse/fuse.go | 5 +- src/restic/fuse/snapshot.go | 3 +- src/restic/index/index.go | 11 +- src/restic/repository.go | 5 + src/restic/repository/repository.go | 3 +- src/restic/test/helpers.go | 8 +- src/restic/types/repository.go | 21 ---- 33 files changed, 121 insertions(+), 719 deletions(-) delete mode 100644 src/restic/backend/handle.go delete mode 100644 src/restic/backend/handle_test.go delete mode 100644 src/restic/backend/id.go delete mode 100644 src/restic/backend/id_int_test.go delete mode 100644 src/restic/backend/id_test.go delete mode 100644 src/restic/backend/ids.go delete mode 100644 src/restic/backend/ids_test.go delete mode 100644 src/restic/backend/idset.go delete mode 100644 src/restic/backend/idset_test.go delete mode 100644 src/restic/backend/interface.go delete mode 100644 src/restic/types/repository.go diff --git a/src/restic/archiver/archiver_int_test.go b/src/restic/archiver/archiver_int_test.go index a35410672..28305b38f 100644 --- a/src/restic/archiver/archiver_int_test.go +++ b/src/restic/archiver/archiver_int_test.go @@ -82,12 +82,12 @@ func (j testPipeJob) Error() error { return j.err } func (j testPipeJob) Info() os.FileInfo { return j.fi } func (j testPipeJob) Result() chan<- pipe.Result { return j.res } -func testTreeWalker(done <-chan struct{}, out chan<- WalkTreeJob) { +func testTreeWalker(done <-chan struct{}, out chan<- restic.WalkTreeJob) { for _, e := range treeJobs { select { case <-done: return - case out <- WalkTreeJob{Path: e}: + case out <- restic.WalkTreeJob{Path: e}: } } @@ -109,7 +109,7 @@ func testPipeWalker(done <-chan struct{}, out chan<- pipe.Job) { func TestArchivePipe(t *testing.T) { done := make(chan struct{}) - treeCh := make(chan WalkTreeJob) + treeCh := make(chan restic.WalkTreeJob) pipeCh := make(chan pipe.Job) go testTreeWalker(done, treeCh) diff --git a/src/restic/backend.go b/src/restic/backend.go index 39cd83480..37a840412 100644 --- a/src/restic/backend.go +++ b/src/restic/backend.go @@ -31,9 +31,6 @@ type Backend interface { // arbitrary order. A goroutine is started for this. If the channel done is // closed, sending stops. List(t FileType, done <-chan struct{}) <-chan string - - // Delete the complete repository. - Delete() error } // FileInfo is returned by Stat() and contains information about a file in the diff --git a/src/restic/backend/generic.go b/src/restic/backend/generic.go index 7510ad0fe..a267922f3 100644 --- a/src/restic/backend/generic.go +++ b/src/restic/backend/generic.go @@ -1,6 +1,10 @@ package backend -import "github.com/pkg/errors" +import ( + "restic" + + "github.com/pkg/errors" +) // ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix // could be found. @@ -13,7 +17,7 @@ var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found") // Find loads the list of all blobs of type t and searches for names which // start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. // If more than one is found, nil and ErrMultipleIDMatches is returned. -func Find(be Lister, t Type, prefix string) (string, error) { +func Find(be restic.Lister, t restic.FileType, prefix string) (string, error) { done := make(chan struct{}) defer close(done) @@ -41,7 +45,7 @@ const minPrefixLength = 8 // PrefixLength returns the number of bytes required so that all prefixes of // all names of type t are unique. -func PrefixLength(be Lister, t Type) (int, error) { +func PrefixLength(be restic.Lister, t restic.FileType) (int, error) { done := make(chan struct{}) defer close(done) @@ -53,7 +57,7 @@ func PrefixLength(be Lister, t Type) (int, error) { // select prefixes of length l, test if the last one is the same as the current one outer: - for l := minPrefixLength; l < IDSize; l++ { + for l := minPrefixLength; l < restic.IDSize; l++ { var last string for _, name := range list { @@ -66,5 +70,5 @@ outer: return l, nil } - return IDSize, nil + return restic.IDSize, nil } diff --git a/src/restic/backend/generic_test.go b/src/restic/backend/generic_test.go index 20fedc997..64b82a769 100644 --- a/src/restic/backend/generic_test.go +++ b/src/restic/backend/generic_test.go @@ -16,7 +16,7 @@ func (m mockBackend) List(t restic.FileType, done <-chan struct{}) <-chan string return m.list(t, done) } -var samples = backend.IDs{ +var samples = restic.IDs{ ParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"), ParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"), ParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"), diff --git a/src/restic/backend/handle.go b/src/restic/backend/handle.go deleted file mode 100644 index 09561161b..000000000 --- a/src/restic/backend/handle.go +++ /dev/null @@ -1,49 +0,0 @@ -package backend - -import ( - "fmt" - - "github.com/pkg/errors" -) - -// Handle is used to store and access data in a backend. -type Handle struct { - Type Type - Name string -} - -func (h Handle) String() string { - name := h.Name - if len(name) > 10 { - name = name[:10] - } - return fmt.Sprintf("<%s/%s>", h.Type, name) -} - -// Valid returns an error if h is not valid. -func (h Handle) Valid() error { - if h.Type == "" { - return errors.New("type is empty") - } - - switch h.Type { - case Data: - case Key: - case Lock: - case Snapshot: - case Index: - case Config: - default: - return errors.Errorf("invalid Type %q", h.Type) - } - - if h.Type == Config { - return nil - } - - if h.Name == "" { - return errors.New("invalid Name") - } - - return nil -} diff --git a/src/restic/backend/handle_test.go b/src/restic/backend/handle_test.go deleted file mode 100644 index a477c0aec..000000000 --- a/src/restic/backend/handle_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package backend - -import "testing" - -var handleTests = []struct { - h Handle - valid bool -}{ - {Handle{Name: "foo"}, false}, - {Handle{Type: "foobar"}, false}, - {Handle{Type: Config, Name: ""}, true}, - {Handle{Type: Data, Name: ""}, false}, - {Handle{Type: "", Name: "x"}, false}, - {Handle{Type: Lock, Name: "010203040506"}, true}, -} - -func TestHandleValid(t *testing.T) { - for i, test := range handleTests { - err := test.h.Valid() - if err != nil && test.valid { - t.Errorf("test %v failed: error returned for valid handle: %v", i, err) - } - - if !test.valid && err == nil { - t.Errorf("test %v failed: expected error for invalid handle not found", i) - } - } -} diff --git a/src/restic/backend/id.go b/src/restic/backend/id.go deleted file mode 100644 index 2714ee63e..000000000 --- a/src/restic/backend/id.go +++ /dev/null @@ -1,109 +0,0 @@ -package backend - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "encoding/json" - - "github.com/pkg/errors" -) - -// Hash returns the ID for data. -func Hash(data []byte) ID { - return sha256.Sum256(data) -} - -// IDSize contains the size of an ID, in bytes. -const IDSize = sha256.Size - -// ID references content within a repository. -type ID [IDSize]byte - -// ParseID converts the given string to an ID. -func ParseID(s string) (ID, error) { - b, err := hex.DecodeString(s) - - if err != nil { - return ID{}, errors.Wrap(err, "hex.DecodeString") - } - - if len(b) != IDSize { - return ID{}, errors.New("invalid length for hash") - } - - id := ID{} - copy(id[:], b) - - return id, nil -} - -func (id ID) String() string { - return hex.EncodeToString(id[:]) -} - -const shortStr = 4 - -// Str returns the shortened string version of id. -func (id *ID) Str() string { - if id == nil { - return "[nil]" - } - - if id.IsNull() { - return "[null]" - } - - return hex.EncodeToString(id[:shortStr]) -} - -// IsNull returns true iff id only consists of null bytes. -func (id ID) IsNull() bool { - var nullID ID - - return id == nullID -} - -// Equal compares an ID to another other. -func (id ID) Equal(other ID) bool { - return id == other -} - -// EqualString compares this ID to another one, given as a string. -func (id ID) EqualString(other string) (bool, error) { - s, err := hex.DecodeString(other) - if err != nil { - return false, errors.Wrap(err, "hex.DecodeString") - } - - id2 := ID{} - copy(id2[:], s) - - return id == id2, nil -} - -// Compare compares this ID to another one, returning -1, 0, or 1. -func (id ID) Compare(other ID) int { - return bytes.Compare(other[:], id[:]) -} - -// MarshalJSON returns the JSON encoding of id. -func (id ID) MarshalJSON() ([]byte, error) { - return json.Marshal(id.String()) -} - -// UnmarshalJSON parses the JSON-encoded data and stores the result in id. -func (id *ID) UnmarshalJSON(b []byte) error { - var s string - err := json.Unmarshal(b, &s) - if err != nil { - return errors.Wrap(err, "Unmarshal") - } - - _, err = hex.Decode(id[:], []byte(s)) - if err != nil { - return errors.Wrap(err, "hex.Decode") - } - - return nil -} diff --git a/src/restic/backend/id_int_test.go b/src/restic/backend/id_int_test.go deleted file mode 100644 index d46a1554b..000000000 --- a/src/restic/backend/id_int_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package backend - -import "testing" - -func TestIDMethods(t *testing.T) { - var id ID - - if id.Str() != "[null]" { - t.Errorf("ID.Str() returned wrong value, want %v, got %v", "[null]", id.Str()) - } - - var pid *ID - if pid.Str() != "[nil]" { - t.Errorf("ID.Str() returned wrong value, want %v, got %v", "[nil]", pid.Str()) - } -} diff --git a/src/restic/backend/id_test.go b/src/restic/backend/id_test.go deleted file mode 100644 index 47d12d319..000000000 --- a/src/restic/backend/id_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package backend_test - -import ( - "testing" - - "restic/backend" - . "restic/test" -) - -var TestStrings = []struct { - id string - data string -}{ - {"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"}, - {"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"}, - {"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"}, - {"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"}, -} - -func TestID(t *testing.T) { - for _, test := range TestStrings { - id, err := backend.ParseID(test.id) - OK(t, err) - - id2, err := backend.ParseID(test.id) - OK(t, err) - Assert(t, id.Equal(id2), "ID.Equal() does not work as expected") - - ret, err := id.EqualString(test.id) - OK(t, err) - Assert(t, ret, "ID.EqualString() returned wrong value") - - // test json marshalling - buf, err := id.MarshalJSON() - OK(t, err) - Equals(t, "\""+test.id+"\"", string(buf)) - - var id3 backend.ID - err = id3.UnmarshalJSON(buf) - OK(t, err) - Equals(t, id, id3) - } -} diff --git a/src/restic/backend/ids.go b/src/restic/backend/ids.go deleted file mode 100644 index 11cf436d2..000000000 --- a/src/restic/backend/ids.go +++ /dev/null @@ -1,69 +0,0 @@ -package backend - -import ( - "encoding/hex" - "fmt" -) - -// IDs is an ordered list of IDs that implements sort.Interface. -type IDs []ID - -func (ids IDs) Len() int { - return len(ids) -} - -func (ids IDs) Less(i, j int) bool { - if len(ids[i]) < len(ids[j]) { - return true - } - - for k, b := range ids[i] { - if b == ids[j][k] { - continue - } - - if b < ids[j][k] { - return true - } - - return false - } - - return false -} - -func (ids IDs) Swap(i, j int) { - ids[i], ids[j] = ids[j], ids[i] -} - -// Uniq returns list without duplicate IDs. The returned list retains the order -// of the original list so that the order of the first occurrence of each ID -// stays the same. -func (ids IDs) Uniq() (list IDs) { - seen := NewIDSet() - - for _, id := range ids { - if seen.Has(id) { - continue - } - - list = append(list, id) - seen.Insert(id) - } - - return list -} - -type shortID ID - -func (id shortID) String() string { - return hex.EncodeToString(id[:shortStr]) -} - -func (ids IDs) String() string { - elements := make([]shortID, 0, len(ids)) - for _, id := range ids { - elements = append(elements, shortID(id)) - } - return fmt.Sprintf("%v", elements) -} diff --git a/src/restic/backend/ids_test.go b/src/restic/backend/ids_test.go deleted file mode 100644 index f4e3162ca..000000000 --- a/src/restic/backend/ids_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package backend_test - -import ( - "reflect" - "testing" - - "restic/backend" - . "restic/test" -) - -var uniqTests = []struct { - before, after backend.IDs -}{ - { - backend.IDs{ - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - }, - backend.IDs{ - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - }, - }, - { - backend.IDs{ - ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - }, - backend.IDs{ - ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - }, - }, - { - backend.IDs{ - ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - }, - backend.IDs{ - ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), - ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), - ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), - }, - }, -} - -func TestUniqIDs(t *testing.T) { - for i, test := range uniqTests { - uniq := test.before.Uniq() - if !reflect.DeepEqual(uniq, test.after) { - t.Errorf("uniqIDs() test %v failed\n wanted: %v\n got: %v", i, test.after, uniq) - } - } -} diff --git a/src/restic/backend/idset.go b/src/restic/backend/idset.go deleted file mode 100644 index 4bfe52ca2..000000000 --- a/src/restic/backend/idset.go +++ /dev/null @@ -1,111 +0,0 @@ -package backend - -import "sort" - -// IDSet is a set of IDs. -type IDSet map[ID]struct{} - -// NewIDSet returns a new IDSet, populated with ids. -func NewIDSet(ids ...ID) IDSet { - m := make(IDSet) - for _, id := range ids { - m[id] = struct{}{} - } - - return m -} - -// Has returns true iff id is contained in the set. -func (s IDSet) Has(id ID) bool { - _, ok := s[id] - return ok -} - -// Insert adds id to the set. -func (s IDSet) Insert(id ID) { - s[id] = struct{}{} -} - -// Delete removes id from the set. -func (s IDSet) Delete(id ID) { - delete(s, id) -} - -// List returns a slice of all IDs in the set. -func (s IDSet) List() IDs { - list := make(IDs, 0, len(s)) - for id := range s { - list = append(list, id) - } - - sort.Sort(list) - - return list -} - -// Equals returns true iff s equals other. -func (s IDSet) Equals(other IDSet) bool { - if len(s) != len(other) { - return false - } - - for id := range s { - if _, ok := other[id]; !ok { - return false - } - } - - // length + one-way comparison is sufficient implication of equality - - return true -} - -// Merge adds the blobs in other to the current set. -func (s IDSet) Merge(other IDSet) { - for id := range other { - s.Insert(id) - } -} - -// Intersect returns a new set containing the IDs that are present in both sets. -func (s IDSet) Intersect(other IDSet) (result IDSet) { - result = NewIDSet() - - set1 := s - set2 := other - - // iterate over the smaller set - if len(set2) < len(set1) { - set1, set2 = set2, set1 - } - - for id := range set1 { - if set2.Has(id) { - result.Insert(id) - } - } - - return result -} - -// Sub returns a new set containing all IDs that are present in s but not in -// other. -func (s IDSet) Sub(other IDSet) (result IDSet) { - result = NewIDSet() - for id := range s { - if !other.Has(id) { - result.Insert(id) - } - } - - return result -} - -func (s IDSet) String() string { - str := s.List().String() - if len(str) < 2 { - return "{}" - } - - return "{" + str[1:len(str)-1] + "}" -} diff --git a/src/restic/backend/idset_test.go b/src/restic/backend/idset_test.go deleted file mode 100644 index 32dce0e0d..000000000 --- a/src/restic/backend/idset_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package backend_test - -import ( - "testing" - - "restic/backend" - . "restic/test" -) - -var idsetTests = []struct { - id backend.ID - seen bool -}{ - {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false}, - {ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false}, - {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, - {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, - {ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, - {ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false}, - {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, - {ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, - {ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true}, - {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, -} - -func TestIDSet(t *testing.T) { - set := backend.NewIDSet() - for i, test := range idsetTests { - seen := set.Has(test.id) - if seen != test.seen { - t.Errorf("IDSet test %v failed: wanted %v, got %v", i, test.seen, seen) - } - set.Insert(test.id) - } -} diff --git a/src/restic/backend/interface.go b/src/restic/backend/interface.go deleted file mode 100644 index 24838ddfd..000000000 --- a/src/restic/backend/interface.go +++ /dev/null @@ -1,63 +0,0 @@ -package backend - -// Type is the type of a Blob. -type Type string - -// These are the different data types a backend can store. -const ( - Data Type = "data" - Key = "key" - Lock = "lock" - Snapshot = "snapshot" - Index = "index" - Config = "config" -) - -// Backend is used to store and access data. -type Backend interface { - // Location returns a string that describes the type and location of the - // repository. - Location() string - - // Test a boolean value whether a Blob with the name and type exists. - Test(t Type, name string) (bool, error) - - // Remove removes a Blob with type t and name. - Remove(t Type, name string) error - - // Close the backend - Close() error - - Lister - - // Load returns the data stored in the backend for h at the given offset - // and saves it in p. Load has the same semantics as io.ReaderAt, except - // that a negative offset is also allowed. In this case it references a - // position relative to the end of the file (similar to Seek()). - Load(h Handle, p []byte, off int64) (int, error) - - // Save stores the data in the backend under the given handle. - Save(h Handle, p []byte) error - - // Stat returns information about the blob identified by h. - Stat(h Handle) (BlobInfo, error) -} - -// Lister implements listing data items stored in a backend. -type Lister interface { - // List returns a channel that yields all names of blobs of type t in an - // arbitrary order. A goroutine is started for this. If the channel done is - // closed, sending stops. - List(t Type, done <-chan struct{}) <-chan string -} - -// Deleter are backends that allow to self-delete all content stored in them. -type Deleter interface { - // Delete the complete repository. - Delete() error -} - -// BlobInfo is returned by Stat() and contains information about a stored blob. -type BlobInfo struct { - Size int64 -} diff --git a/src/restic/backend/local/local_test.go b/src/restic/backend/local/local_test.go index b139b0aca..3bae88753 100644 --- a/src/restic/backend/local/local_test.go +++ b/src/restic/backend/local/local_test.go @@ -4,8 +4,8 @@ import ( "fmt" "io/ioutil" "os" + "restic" - "restic/backend" "restic/backend/local" "restic/backend/test" ) @@ -30,7 +30,7 @@ func createTempdir() error { } func init() { - test.CreateFn = func() (backend.Backend, error) { + test.CreateFn = func() (restic.Backend, error) { err := createTempdir() if err != nil { return nil, err @@ -38,7 +38,7 @@ func init() { return local.Create(tempBackendDir) } - test.OpenFn = func() (backend.Backend, error) { + test.OpenFn = func() (restic.Backend, error) { err := createTempdir() if err != nil { return nil, err diff --git a/src/restic/backend/mem/mem_backend_test.go b/src/restic/backend/mem/mem_backend_test.go index cde3bda1c..310f4b915 100644 --- a/src/restic/backend/mem/mem_backend_test.go +++ b/src/restic/backend/mem/mem_backend_test.go @@ -1,19 +1,20 @@ package mem_test import ( + "restic" + "github.com/pkg/errors" - "restic/backend" "restic/backend/mem" "restic/backend/test" ) -var be backend.Backend +var be restic.Backend //go:generate go run ../test/generate_backend_tests.go func init() { - test.CreateFn = func() (backend.Backend, error) { + test.CreateFn = func() (restic.Backend, error) { if be != nil { return nil, errors.New("temporary memory backend dir already exists") } @@ -23,7 +24,7 @@ func init() { return be, nil } - test.OpenFn = func() (backend.Backend, error) { + test.OpenFn = func() (restic.Backend, error) { if be == nil { return nil, errors.New("repository not initialized") } diff --git a/src/restic/backend/rest/rest.go b/src/restic/backend/rest/rest.go index 5e756579a..6ea21a1c5 100644 --- a/src/restic/backend/rest/rest.go +++ b/src/restic/backend/rest/rest.go @@ -54,7 +54,7 @@ type restBackend struct { } // Open opens the REST backend with the given config. -func Open(cfg Config) (backend.Backend, error) { +func Open(cfg Config) (restic.Backend, error) { connChan := make(chan struct{}, connLimit) for i := 0; i < connLimit; i++ { connChan <- struct{}{} @@ -152,31 +152,31 @@ func (b *restBackend) Save(h restic.Handle, p []byte) (err error) { } // Stat returns information about a blob. -func (b *restBackend) Stat(h restic.Handle) (backend.BlobInfo, error) { +func (b *restBackend) Stat(h restic.Handle) (restic.FileInfo, error) { if err := h.Valid(); err != nil { - return backend.BlobInfo{}, err + return restic.FileInfo{}, err } <-b.connChan resp, err := b.client.Head(restPath(b.url, h)) b.connChan <- struct{}{} if err != nil { - return backend.BlobInfo{}, errors.Wrap(err, "client.Head") + return restic.FileInfo{}, errors.Wrap(err, "client.Head") } if err = resp.Body.Close(); err != nil { - return backend.BlobInfo{}, errors.Wrap(err, "Close") + return restic.FileInfo{}, errors.Wrap(err, "Close") } if resp.StatusCode != 200 { - return backend.BlobInfo{}, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode) + return restic.FileInfo{}, errors.Errorf("unexpected HTTP response code %v", resp.StatusCode) } if resp.ContentLength < 0 { - return backend.BlobInfo{}, errors.New("negative content length") + return restic.FileInfo{}, errors.New("negative content length") } - bi := backend.BlobInfo{ + bi := restic.FileInfo{ Size: resp.ContentLength, } diff --git a/src/restic/backend/rest/rest_test.go b/src/restic/backend/rest/rest_test.go index 206f7c18a..81c64f480 100644 --- a/src/restic/backend/rest/rest_test.go +++ b/src/restic/backend/rest/rest_test.go @@ -8,7 +8,6 @@ import ( "github.com/pkg/errors" - "restic/backend" "restic/backend/rest" "restic/backend/test" . "restic/test" @@ -32,7 +31,7 @@ func init() { URL: url, } - test.CreateFn = func() (backend.Backend, error) { + test.CreateFn = func() (restic.Backend, error) { be, err := rest.Open(cfg) if err != nil { return nil, err @@ -50,7 +49,7 @@ func init() { return be, nil } - test.OpenFn = func() (backend.Backend, error) { + test.OpenFn = func() (restic.Backend, error) { return rest.Open(cfg) } } diff --git a/src/restic/backend/s3/s3_test.go b/src/restic/backend/s3/s3_test.go index 4f0aa0d6b..ab4cc855a 100644 --- a/src/restic/backend/s3/s3_test.go +++ b/src/restic/backend/s3/s3_test.go @@ -4,10 +4,10 @@ import ( "fmt" "net/url" "os" + "restic" "github.com/pkg/errors" - "restic/backend" "restic/backend/s3" "restic/backend/test" . "restic/test" @@ -38,7 +38,7 @@ func init() { cfg.UseHTTP = true } - test.CreateFn = func() (backend.Backend, error) { + test.CreateFn = func() (restic.Backend, error) { be, err := s3.Open(cfg) if err != nil { return nil, err @@ -56,7 +56,7 @@ func init() { return be, nil } - test.OpenFn = func() (backend.Backend, error) { + test.OpenFn = func() (restic.Backend, error) { return s3.Open(cfg) } diff --git a/src/restic/backend/sftp/sftp.go b/src/restic/backend/sftp/sftp.go index ca39e45ea..c5488a815 100644 --- a/src/restic/backend/sftp/sftp.go +++ b/src/restic/backend/sftp/sftp.go @@ -34,6 +34,8 @@ type SFTP struct { result <-chan error } +var _ restic.Backend = &SFTP{} + func startClient(program string, args ...string) (*SFTP, error) { // Connect to a remote host and request the sftp subsystem via the 'ssh' // command. This assumes that passwordless login is correctly configured. @@ -401,22 +403,22 @@ func (r *SFTP) Save(h restic.Handle, p []byte) (err error) { } // Stat returns information about a blob. -func (r *SFTP) Stat(h restic.Handle) (backend.BlobInfo, error) { +func (r *SFTP) Stat(h restic.Handle) (restic.FileInfo, error) { debug.Log("sftp.Stat", "stat %v", h) if err := r.clientError(); err != nil { - return backend.BlobInfo{}, err + return restic.FileInfo{}, err } if err := h.Valid(); err != nil { - return backend.BlobInfo{}, err + return restic.FileInfo{}, err } fi, err := r.c.Lstat(r.filename(h.FileType, h.Name)) if err != nil { - return backend.BlobInfo{}, errors.Wrap(err, "Lstat") + return restic.FileInfo{}, errors.Wrap(err, "Lstat") } - return backend.BlobInfo{Size: fi.Size()}, nil + return restic.FileInfo{Size: fi.Size()}, nil } // Test returns true if a blob of the given type and name exists in the backend. diff --git a/src/restic/backend/sftp/sftp_backend_test.go b/src/restic/backend/sftp/sftp_backend_test.go index 2d8e609ca..b7bcc2591 100644 --- a/src/restic/backend/sftp/sftp_backend_test.go +++ b/src/restic/backend/sftp/sftp_backend_test.go @@ -4,11 +4,11 @@ import ( "io/ioutil" "os" "path/filepath" + "restic" "strings" "github.com/pkg/errors" - "restic/backend" "restic/backend/sftp" "restic/backend/test" @@ -52,7 +52,7 @@ func init() { args := []string{"-e"} - test.CreateFn = func() (backend.Backend, error) { + test.CreateFn = func() (restic.Backend, error) { err := createTempdir() if err != nil { return nil, err @@ -61,7 +61,7 @@ func init() { return sftp.Create(tempBackendDir, sftpserver, args...) } - test.OpenFn = func() (backend.Backend, error) { + test.OpenFn = func() (restic.Backend, error) { err := createTempdir() if err != nil { return nil, err diff --git a/src/restic/backend/test/tests.go b/src/restic/backend/test/tests.go index 217e9b683..f1a7ad82e 100644 --- a/src/restic/backend/test/tests.go +++ b/src/restic/backend/test/tests.go @@ -18,18 +18,18 @@ import ( ) // CreateFn is a function that creates a temporary repository for the tests. -var CreateFn func() (backend.Backend, error) +var CreateFn func() (restic.Backend, error) // OpenFn is a function that opens a previously created temporary repository. -var OpenFn func() (backend.Backend, error) +var OpenFn func() (restic.Backend, error) // CleanupFn removes temporary files and directories created during the tests. var CleanupFn func() error -var but backend.Backend // backendUnderTest +var but restic.Backend // backendUnderTest var butInitialized bool -func open(t testing.TB) backend.Backend { +func open(t testing.TB) restic.Backend { if OpenFn == nil { t.Fatal("OpenFn not set") } @@ -153,12 +153,12 @@ func TestConfig(t testing.TB) { var testString = "Config" // create config and read it back - _, err := backend.LoadAll(b, restic.Handle{Type: restic.ConfigFile}, nil) + _, err := backend.LoadAll(b, restic.Handle{FileType: restic.ConfigFile}, nil) if err == nil { t.Fatalf("did not get expected error for non-existing config") } - err = b.Save(restic.Handle{Type: restic.ConfigFile}, []byte(testString)) + err = b.Save(restic.Handle{FileType: restic.ConfigFile}, []byte(testString)) if err != nil { t.Fatalf("Save() error: %v", err) } @@ -166,7 +166,7 @@ func TestConfig(t testing.TB) { // try accessing the config with different names, should all return the // same config for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} { - h := restic.Handle{Type: restic.ConfigFile, Name: name} + h := restic.Handle{FileType: restic.ConfigFile, Name: name} buf, err := backend.LoadAll(b, h, nil) if err != nil { t.Fatalf("unable to read config with name %q: %v", name, err) @@ -188,7 +188,7 @@ func TestLoad(t testing.TB) { t.Fatalf("Load() did not return an error for invalid handle") } - _, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, nil, 0) + _, err = b.Load(restic.Handle{FileType: restic.DataFile, Name: "foobar"}, nil, 0) if err == nil { t.Fatalf("Load() did not return an error for non-existing blob") } @@ -196,9 +196,9 @@ func TestLoad(t testing.TB) { length := rand.Intn(1<<24) + 2000 data := Random(23, length) - id := backend.Hash(data) + id := restic.Hash(data) - handle := restic.Handle{Type: restic.DataFile, Name: id.String()} + handle := restic.Handle{FileType: restic.DataFile, Name: id.String()} err = b.Save(handle, data) if err != nil { t.Fatalf("Save() error: %v", err) @@ -321,9 +321,9 @@ func TestLoadNegativeOffset(t testing.TB) { length := rand.Intn(1<<24) + 2000 data := Random(23, length) - id := backend.Hash(data) + id := restic.Hash(data) - handle := restic.Handle{Type: restic.DataFile, Name: id.String()} + handle := restic.Handle{FileType: restic.DataFile, Name: id.String()} err := b.Save(handle, data) if err != nil { t.Fatalf("Save() error: %v", err) @@ -373,7 +373,7 @@ func TestLoadNegativeOffset(t testing.TB) { func TestSave(t testing.TB) { b := open(t) defer close(t) - var id backend.ID + var id restic.ID for i := 0; i < 10; i++ { length := rand.Intn(1<<23) + 200000 @@ -382,8 +382,8 @@ func TestSave(t testing.TB) { copy(id[:], data) h := restic.Handle{ - Type: restic.DataFile, - Name: fmt.Sprintf("%s-%d", id, i), + FileType: restic.DataFile, + Name: fmt.Sprintf("%s-%d", id, i), } err := b.Save(h, data) OK(t, err) @@ -405,7 +405,7 @@ func TestSave(t testing.TB) { t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size) } - err = b.Remove(h.Type, h.Name) + err = b.Remove(h.FileType, h.Name) if err != nil { t.Fatalf("error removing item: %v", err) } @@ -430,7 +430,7 @@ func TestSaveFilenames(t testing.TB) { defer close(t) for i, test := range filenameTests { - h := restic.Handle{Name: test.name, Type: restic.DataFile} + h := restic.Handle{Name: test.name, FileType: restic.DataFile} err := b.Save(h, []byte(test.data)) if err != nil { t.Errorf("test %d failed: Save() returned %v", i, err) @@ -447,7 +447,7 @@ func TestSaveFilenames(t testing.TB) { t.Errorf("test %d: returned wrong bytes", i) } - err = b.Remove(h.Type, h.Name) + err = b.Remove(h.FileType, h.Name) if err != nil { t.Errorf("test %d failed: Remove() returned %v", i, err) continue @@ -465,9 +465,9 @@ var testStrings = []struct { {"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"}, } -func store(t testing.TB, b backend.Backend, tpe restic.FileType, data []byte) { - id := backend.Hash(data) - err := b.Save(restic.Handle{Name: id.String(), Type: tpe}, data) +func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) { + id := restic.Hash(data) + err := b.Save(restic.Handle{Name: id.String(), FileType: tpe}, data) OK(t, err) } @@ -490,7 +490,7 @@ func TestBackend(t testing.TB) { } { // detect non-existing files for _, test := range testStrings { - id, err := backend.ParseID(test.id) + id, err := restic.ParseID(test.id) OK(t, err) // test if blob is already in repository @@ -499,7 +499,7 @@ func TestBackend(t testing.TB) { Assert(t, !ret, "blob was found to exist before creating") // try to stat a not existing blob - h := restic.Handle{Type: tpe, Name: id.String()} + h := restic.Handle{FileType: tpe, Name: id.String()} _, err = b.Stat(h) Assert(t, err != nil, "blob data could be extracted before creation") @@ -518,7 +518,7 @@ func TestBackend(t testing.TB) { store(t, b, tpe, []byte(test.data)) // test Load() - h := restic.Handle{Type: tpe, Name: test.id} + h := restic.Handle{FileType: tpe, Name: test.id} buf, err := backend.LoadAll(b, h, nil) OK(t, err) Equals(t, test.data, string(buf)) @@ -539,7 +539,7 @@ func TestBackend(t testing.TB) { test := testStrings[0] // create blob - err := b.Save(restic.Handle{Type: tpe, Name: test.id}, []byte(test.data)) + err := b.Save(restic.Handle{FileType: tpe, Name: test.id}, []byte(test.data)) Assert(t, err != nil, "expected error, got %v", err) // remove and recreate @@ -552,19 +552,19 @@ func TestBackend(t testing.TB) { Assert(t, ok == false, "removed blob still present") // create blob - err = b.Save(restic.Handle{Type: tpe, Name: test.id}, []byte(test.data)) + err = b.Save(restic.Handle{FileType: tpe, Name: test.id}, []byte(test.data)) OK(t, err) // list items - IDs := backend.IDs{} + IDs := restic.IDs{} for _, test := range testStrings { - id, err := backend.ParseID(test.id) + id, err := restic.ParseID(test.id) OK(t, err) IDs = append(IDs, id) } - list := backend.IDs{} + list := restic.IDs{} for s := range b.List(tpe, nil) { list = append(list, ParseID(s)) @@ -584,7 +584,7 @@ func TestBackend(t testing.TB) { // remove content if requested if TestCleanupTempDirs { for _, test := range testStrings { - id, err := backend.ParseID(test.id) + id, err := restic.ParseID(test.id) OK(t, err) found, err := b.Test(tpe, id.String()) @@ -605,7 +605,7 @@ func TestDelete(t testing.TB) { b := open(t) defer close(t) - be, ok := b.(backend.Deleter) + be, ok := b.(restic.Deleter) if !ok { return } diff --git a/src/restic/backend/test/tests_test.go b/src/restic/backend/test/tests_test.go index 5dbba88a4..92c086440 100644 --- a/src/restic/backend/test/tests_test.go +++ b/src/restic/backend/test/tests_test.go @@ -1,19 +1,20 @@ package test_test import ( + "restic" + "github.com/pkg/errors" - "restic/backend" "restic/backend/mem" "restic/backend/test" ) -var be backend.Backend +var be restic.Backend //go:generate go run ../test/generate_backend_tests.go func init() { - test.CreateFn = func() (backend.Backend, error) { + test.CreateFn = func() (restic.Backend, error) { if be != nil { return nil, errors.New("temporary memory backend dir already exists") } @@ -23,7 +24,7 @@ func init() { return be, nil } - test.OpenFn = func() (backend.Backend, error) { + test.OpenFn = func() (restic.Backend, error) { if be == nil { return nil, errors.New("repository not initialized") } diff --git a/src/restic/backend/utils_test.go b/src/restic/backend/utils_test.go index 51bb6f8da..8f2f63161 100644 --- a/src/restic/backend/utils_test.go +++ b/src/restic/backend/utils_test.go @@ -3,6 +3,7 @@ package backend_test import ( "bytes" "math/rand" + "restic" "testing" "restic/backend" @@ -19,11 +20,11 @@ func TestLoadAll(t *testing.T) { for i := 0; i < 20; i++ { data := Random(23+i, rand.Intn(MiB)+500*KiB) - id := backend.Hash(data) - err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) + id := restic.Hash(data) + err := b.Save(restic.Handle{Name: id.String(), FileType: restic.DataFile}, data) OK(t, err) - buf, err := backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, nil) + buf, err := backend.LoadAll(b, restic.Handle{FileType: restic.DataFile, Name: id.String()}, nil) OK(t, err) if len(buf) != len(data) { @@ -44,12 +45,12 @@ func TestLoadSmallBuffer(t *testing.T) { for i := 0; i < 20; i++ { data := Random(23+i, rand.Intn(MiB)+500*KiB) - id := backend.Hash(data) - err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) + id := restic.Hash(data) + err := b.Save(restic.Handle{Name: id.String(), FileType: restic.DataFile}, data) OK(t, err) buf := make([]byte, len(data)-23) - buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf) + buf, err = backend.LoadAll(b, restic.Handle{FileType: restic.DataFile, Name: id.String()}, buf) OK(t, err) if len(buf) != len(data) { @@ -70,12 +71,12 @@ func TestLoadLargeBuffer(t *testing.T) { for i := 0; i < 20; i++ { data := Random(23+i, rand.Intn(MiB)+500*KiB) - id := backend.Hash(data) - err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) + id := restic.Hash(data) + err := b.Save(restic.Handle{Name: id.String(), FileType: restic.DataFile}, data) OK(t, err) buf := make([]byte, len(data)+100) - buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf) + buf, err = backend.LoadAll(b, restic.Handle{FileType: restic.DataFile, Name: id.String()}, buf) OK(t, err) if len(buf) != len(data) { diff --git a/src/restic/fuse/file.go b/src/restic/fuse/file.go index f6dfa771e..83a173471 100644 --- a/src/restic/fuse/file.go +++ b/src/restic/fuse/file.go @@ -9,9 +9,7 @@ import ( "github.com/pkg/errors" "restic" - "restic/backend" "restic/debug" - "restic/pack" "bazil.org/fuse" "bazil.org/fuse/fs" @@ -28,8 +26,8 @@ var _ = fs.HandleReleaser(&file{}) // BlobLoader is an abstracted repository with a reduced set of methods used // for fuse operations. type BlobLoader interface { - LookupBlobSize(backend.ID, pack.BlobType) (uint, error) - LoadBlob(backend.ID, pack.BlobType, []byte) ([]byte, error) + LookupBlobSize(restic.ID, restic.BlobType) (uint, error) + LoadBlob(restic.ID, restic.BlobType, []byte) ([]byte, error) } type file struct { @@ -54,7 +52,7 @@ func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool) (*file, error var bytes uint64 sizes := make([]uint, len(node.Content)) for i, id := range node.Content { - size, err := repo.LookupBlobSize(id, pack.Data) + size, err := repo.LookupBlobSize(id, restic.DataBlob) if err != nil { return nil, err } @@ -111,7 +109,7 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) { buf = make([]byte, f.sizes[i]) } - blob, err = f.repo.LoadBlob(f.node.Content[i], pack.Data, buf) + blob, err = f.repo.LoadBlob(f.node.Content[i], restic.DataBlob, buf) if err != nil { debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err) return nil, err diff --git a/src/restic/fuse/file_test.go b/src/restic/fuse/file_test.go index cb1c67452..bd7bfdca9 100644 --- a/src/restic/fuse/file_test.go +++ b/src/restic/fuse/file_test.go @@ -14,20 +14,18 @@ import ( "bazil.org/fuse" "restic" - "restic/backend" - "restic/pack" . "restic/test" ) type MockRepo struct { - blobs map[backend.ID][]byte + blobs map[restic.ID][]byte } -func NewMockRepo(content map[backend.ID][]byte) *MockRepo { +func NewMockRepo(content map[restic.ID][]byte) *MockRepo { return &MockRepo{blobs: content} } -func (m *MockRepo) LookupBlobSize(id backend.ID, t pack.BlobType) (uint, error) { +func (m *MockRepo) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, error) { buf, ok := m.blobs[id] if !ok { return 0, errors.New("blob not found") @@ -36,7 +34,7 @@ func (m *MockRepo) LookupBlobSize(id backend.ID, t pack.BlobType) (uint, error) return uint(len(buf)), nil } -func (m *MockRepo) LoadBlob(id backend.ID, t pack.BlobType, buf []byte) ([]byte, error) { +func (m *MockRepo) LoadBlob(id restic.ID, t restic.BlobType, buf []byte) ([]byte, error) { size, err := m.LookupBlobSize(id, t) if err != nil { return nil, err @@ -68,12 +66,12 @@ var testContentLengths = []uint{ } var testMaxFileSize uint -func genTestContent() map[backend.ID][]byte { - m := make(map[backend.ID][]byte) +func genTestContent() map[restic.ID][]byte { + m := make(map[restic.ID][]byte) for _, length := range testContentLengths { buf := Random(int(length), int(length)) - id := backend.Hash(buf) + id := restic.Hash(buf) m[id] = buf testMaxFileSize += length } @@ -111,7 +109,7 @@ func TestFuseFile(t *testing.T) { memfile := make([]byte, 0, maxBufSize) - var ids backend.IDs + var ids restic.IDs for id, buf := range repo.blobs { ids = append(ids, id) memfile = append(memfile, buf...) diff --git a/src/restic/fuse/fuse.go b/src/restic/fuse/fuse.go index 6ef3e48cc..45bf3342d 100644 --- a/src/restic/fuse/fuse.go +++ b/src/restic/fuse/fuse.go @@ -5,13 +5,12 @@ package fuse import ( "encoding/binary" - - "restic/backend" + "restic" ) // inodeFromBackendId returns a unique uint64 from a backend id. // Endianness has no specific meaning, it is just the simplest way to // transform a []byte to an uint64 -func inodeFromBackendId(id backend.ID) uint64 { +func inodeFromBackendId(id restic.ID) uint64 { return binary.BigEndian.Uint64(id[:8]) } diff --git a/src/restic/fuse/snapshot.go b/src/restic/fuse/snapshot.go index c4753ec4f..8d14823b0 100644 --- a/src/restic/fuse/snapshot.go +++ b/src/restic/fuse/snapshot.go @@ -12,7 +12,6 @@ import ( "bazil.org/fuse/fs" "restic" - "restic/backend" "restic/debug" "restic/repository" @@ -21,7 +20,7 @@ import ( type SnapshotWithId struct { *restic.Snapshot - backend.ID + restic.ID } // These lines statically ensure that a *SnapshotsDir implement the given diff --git a/src/restic/index/index.go b/src/restic/index/index.go index d2497bce1..7d8f47250 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -9,7 +9,6 @@ import ( "restic/debug" "restic/list" "restic/pack" - "restic/types" "restic/worker" "github.com/pkg/errors" @@ -43,7 +42,7 @@ func newIndex() *Index { } // New creates a new index for repo from scratch. -func New(repo types.Repository, p *restic.Progress) (*Index, error) { +func New(repo restic.Repository, p *restic.Progress) (*Index, error) { done := make(chan struct{}) defer close(done) @@ -99,7 +98,7 @@ type indexJSON struct { Packs []*packJSON `json:"packs"` } -func loadIndexJSON(repo types.Repository, id backend.ID) (*indexJSON, error) { +func loadIndexJSON(repo restic.Repository, id backend.ID) (*indexJSON, error) { debug.Log("index.loadIndexJSON", "process index %v\n", id.Str()) var idx indexJSON @@ -112,7 +111,7 @@ func loadIndexJSON(repo types.Repository, id backend.ID) (*indexJSON, error) { } // Load creates an index by loading all index files from the repo. -func Load(repo types.Repository, p *restic.Progress) (*Index, error) { +func Load(repo restic.Repository, p *restic.Progress) (*Index, error) { debug.Log("index.Load", "loading indexes") p.Start() @@ -300,7 +299,7 @@ func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) { } // Save writes the complete index to the repo. -func (idx *Index) Save(repo types.Repository, supersedes backend.IDs) (backend.ID, error) { +func (idx *Index) Save(repo restic.Repository, supersedes backend.IDs) (backend.ID, error) { packs := make(map[backend.ID][]pack.Blob, len(idx.Packs)) for id, p := range idx.Packs { packs[id] = p.Entries @@ -310,7 +309,7 @@ func (idx *Index) Save(repo types.Repository, supersedes backend.IDs) (backend.I } // Save writes a new index containing the given packs. -func Save(repo types.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) { +func Save(repo restic.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) { idx := &indexJSON{ Supersedes: supersedes, Packs: make([]*packJSON, 0, len(packs)), diff --git a/src/restic/repository.go b/src/restic/repository.go index a5950c33f..9bd22dabb 100644 --- a/src/restic/repository.go +++ b/src/restic/repository.go @@ -33,6 +33,11 @@ type Repository interface { Flush() error } +// Deleter removes all data stored in a backend/repo. +type Deleter interface { + Delete() error +} + // Lister allows listing files in a backend. type Lister interface { List(FileType, <-chan struct{}) <-chan string diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index b3860e8ed..1a12ac608 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -38,6 +38,7 @@ func New(be restic.Backend) *Repository { return repo } +// Config returns the repository configuration. func (r *Repository) Config() restic.Config { return r.cfg } @@ -577,7 +578,7 @@ func (r *Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) { // Delete calls backend.Delete() if implemented, and returns an error // otherwise. func (r *Repository) Delete() error { - if b, ok := r.be.(backend.Deleter); ok { + if b, ok := r.be.(restic.Deleter); ok { return b.Delete() } diff --git a/src/restic/test/helpers.go b/src/restic/test/helpers.go index 353c9b8ed..6c7ee8de1 100644 --- a/src/restic/test/helpers.go +++ b/src/restic/test/helpers.go @@ -11,12 +11,12 @@ import ( "os/exec" "path/filepath" "reflect" + "restic" "runtime" "testing" mrand "math/rand" - "restic/backend" "restic/backend/local" "restic/repository" ) @@ -63,9 +63,9 @@ func Equals(tb testing.TB, exp, act interface{}) { } } -// ParseID parses s as a backend.ID and panics if that fails. -func ParseID(s string) backend.ID { - id, err := backend.ParseID(s) +// ParseID parses s as a restic.ID and panics if that fails. +func ParseID(s string) restic.ID { + id, err := restic.ParseID(s) if err != nil { panic(err) } diff --git a/src/restic/types/repository.go b/src/restic/types/repository.go deleted file mode 100644 index c49cb5e68..000000000 --- a/src/restic/types/repository.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -import ( - "restic" - "restic/backend" - "restic/pack" -) - -// Repository manages encrypted and packed data stored in a backend. -type Repository interface { - LoadJSONUnpacked(restic.FileType, backend.ID, interface{}) error - SaveJSONUnpacked(restic.FileType, interface{}) (backend.ID, error) - - Lister -} - -// Lister combines lists packs in a repo and blobs in a pack. -type Lister interface { - List(restic.FileType, <-chan struct{}) <-chan backend.ID - ListPack(backend.ID) ([]pack.Blob, int64, error) -} From 3695ba5882a6d20b73611b9aa3d47c2a520b0508 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 31 Aug 2016 23:07:50 +0200 Subject: [PATCH 09/40] Tests pass for restic/ --- .../archiver/archiver_duplication_test.go | 15 ++-- src/restic/archiver/archiver_int_test.go | 1 + src/restic/archiver/archiver_test.go | 53 ++---------- src/restic/checker/checker.go | 14 ++-- src/restic/checker/checker_test.go | 8 +- src/restic/index/index.go | 80 +++++++++--------- src/restic/index/index_test.go | 12 ++- src/restic/pack/pack_test.go | 25 +++--- src/restic/repository.go | 6 ++ src/restic/repository/index.go | 2 +- src/restic/repository/index_test.go | 82 ++++++++++--------- src/restic/repository/packer_manager_test.go | 5 +- src/restic/repository/repack_test.go | 27 +++--- src/restic/repository/repository_test.go | 21 +++-- 14 files changed, 156 insertions(+), 195 deletions(-) diff --git a/src/restic/archiver/archiver_duplication_test.go b/src/restic/archiver/archiver_duplication_test.go index 37bed0830..9f0867d5e 100644 --- a/src/restic/archiver/archiver_duplication_test.go +++ b/src/restic/archiver/archiver_duplication_test.go @@ -11,9 +11,8 @@ import ( "github.com/pkg/errors" "restic" - "restic/backend" + "restic/archiver" "restic/mock" - "restic/pack" "restic/repository" ) @@ -21,14 +20,14 @@ const parallelSaves = 50 const testSaveIndexTime = 100 * time.Millisecond const testTimeout = 2 * time.Second -var DupID backend.ID +var DupID restic.ID -func randomID() backend.ID { +func randomID() restic.ID { if mrand.Float32() < 0.5 { return DupID } - id := backend.ID{} + id := restic.ID{} _, err := io.ReadFull(rand.Reader, id[:]) if err != nil { panic(err) @@ -52,8 +51,8 @@ func forgetfulBackend() restic.Backend { return nil } - be.StatFn = func(h restic.Handle) (restic.BlobInfo, error) { - return restic.BlobInfo{}, errors.New("not found") + be.StatFn = func(h restic.Handle) (restic.FileInfo, error) { + return restic.FileInfo{}, errors.New("not found") } be.RemoveFn = func(t restic.FileType, name string) error { @@ -86,7 +85,7 @@ func testArchiverDuplication(t *testing.T) { t.Fatal(err) } - arch := restic.NewArchiver(repo) + arch := archiver.New(repo) wg := &sync.WaitGroup{} done := make(chan struct{}) diff --git a/src/restic/archiver/archiver_int_test.go b/src/restic/archiver/archiver_int_test.go index 28305b38f..3b5309a20 100644 --- a/src/restic/archiver/archiver_int_test.go +++ b/src/restic/archiver/archiver_int_test.go @@ -2,6 +2,7 @@ package archiver import ( "os" + "restic" "testing" "restic/pipe" diff --git a/src/restic/archiver/archiver_test.go b/src/restic/archiver/archiver_test.go index 57903931a..176a01802 100644 --- a/src/restic/archiver/archiver_test.go +++ b/src/restic/archiver/archiver_test.go @@ -7,10 +7,9 @@ import ( "time" "restic" - "restic/backend" + "restic/archiver" "restic/checker" "restic/crypto" - "restic/pack" . "restic/test" "github.com/pkg/errors" @@ -101,7 +100,7 @@ func archiveDirectory(b testing.TB) { repo := SetupRepo() defer TeardownRepo(repo) - arch := restic.NewArchiver(repo) + arch := archiver.New(repo) _, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil) OK(b, err) @@ -191,48 +190,6 @@ func TestArchiveDedup(t *testing.T) { archiveWithDedup(t) } -func BenchmarkLoadTree(t *testing.B) { - repo := SetupRepo() - defer TeardownRepo(repo) - - if BenchArchiveDirectory == "" { - t.Skip("benchdir not set, skipping TestArchiverDedup") - } - - // archive a few files - arch := restic.NewArchiver(repo) - sn, _, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil) - OK(t, err) - t.Logf("archived snapshot %v", sn.ID()) - - list := make([]backend.ID, 0, 10) - done := make(chan struct{}) - - for _, idx := range repo.Index().All() { - for blob := range idx.Each(done) { - if blob.Type != restic.TreeBlob { - continue - } - - list = append(list, blob.ID) - if len(list) == cap(list) { - close(done) - break - } - } - } - - // start benchmark - t.ResetTimer() - - for i := 0; i < t.N; i++ { - for _, id := range list { - _, err := restic.LoadTree(repo, id) - OK(t, err) - } - } -} - // Saves several identical chunks concurrently and later checks that there are no // unreferenced packs in the repository. See also #292 and #358. func TestParallelSaveWithDuplication(t *testing.T) { @@ -248,7 +205,7 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) { dataSizeMb := 128 duplication := 7 - arch := restic.NewArchiver(repo) + arch := archiver.New(repo) chunks := getRandomData(seed, dataSizeMb*1024*1024) errChannels := [](<-chan error){} @@ -265,7 +222,7 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) { go func(c chunker.Chunk, errChan chan<- error) { barrier <- struct{}{} - id := backend.Hash(c.Data) + id := restic.Hash(c.Data) time.Sleep(time.Duration(id[0])) err := arch.Save(restic.DataBlob, c.Data, id) <-barrier @@ -301,7 +258,7 @@ func getRandomData(seed int, size int) []chunker.Chunk { return chunks } -func createAndInitChecker(t *testing.T, repo Repository) *checker.Checker { +func createAndInitChecker(t *testing.T, repo restic.Repository) *checker.Checker { chkr := checker.New(repo) hints, errs := chkr.LoadIndex() diff --git a/src/restic/checker/checker.go b/src/restic/checker/checker.go index 6bcea9b81..3b488cced 100644 --- a/src/restic/checker/checker.go +++ b/src/restic/checker/checker.go @@ -32,11 +32,11 @@ type Checker struct { masterIndex *repository.MasterIndex - repo *repository.Repository + repo restic.Repository } // New returns a new checker which runs on repo. -func New(repo *repository.Repository) *Checker { +func New(repo restic.Repository) *Checker { c := &Checker{ packs: restic.NewIDSet(), blobs: restic.NewIDSet(), @@ -180,7 +180,7 @@ func (e PackError) Error() string { return "pack " + e.ID.String() + ": " + e.Err.Error() } -func packIDTester(repo *repository.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) { +func packIDTester(repo restic.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) { debug.Log("Checker.testPackID", "worker start") defer debug.Log("Checker.testPackID", "worker done") @@ -273,7 +273,7 @@ func (e Error) Error() string { return e.Err.Error() } -func loadTreeFromSnapshot(repo *repository.Repository, id restic.ID) (restic.ID, error) { +func loadTreeFromSnapshot(repo restic.Repository, id restic.ID) (restic.ID, error) { sn, err := restic.LoadSnapshot(repo, id) if err != nil { debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err) @@ -289,7 +289,7 @@ func loadTreeFromSnapshot(repo *repository.Repository, id restic.ID) (restic.ID, } // loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs. -func loadSnapshotTreeIDs(repo *repository.Repository) (restic.IDs, []error) { +func loadSnapshotTreeIDs(repo restic.Repository) (restic.IDs, []error) { var trees struct { IDs restic.IDs sync.Mutex @@ -349,7 +349,7 @@ type treeJob struct { } // loadTreeWorker loads trees from repo and sends them to out. -func loadTreeWorker(repo *repository.Repository, +func loadTreeWorker(repo restic.Repository, in <-chan restic.ID, out chan<- treeJob, done <-chan struct{}, wg *sync.WaitGroup) { @@ -660,7 +660,7 @@ func (c *Checker) CountPacks() uint64 { } // checkPack reads a pack and checks the integrity of all blobs. -func checkPack(r *repository.Repository, id restic.ID) error { +func checkPack(r restic.Repository, id restic.ID) error { debug.Log("Checker.checkPack", "checking pack %v", id.Str()) h := restic.Handle{FileType: restic.DataFile, Name: id.String()} buf, err := backend.LoadAll(r.Backend(), h, nil) diff --git a/src/restic/checker/checker_test.go b/src/restic/checker/checker_test.go index e719cd229..6e9f29d06 100644 --- a/src/restic/checker/checker_test.go +++ b/src/restic/checker/checker_test.go @@ -8,7 +8,7 @@ import ( "testing" "restic" - "restic/backend" + "restic/archiver" "restic/backend/mem" "restic/checker" "restic/repository" @@ -147,7 +147,7 @@ func TestUnreferencedBlobs(t *testing.T) { snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02" OK(t, repo.Backend().Remove(restic.SnapshotFile, snID)) - unusedBlobsBySnapshot := backend.IDs{ + unusedBlobsBySnapshot := restic.IDs{ ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"), ParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"), ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"), @@ -212,7 +212,7 @@ func TestDuplicatePacksInIndex(t *testing.T) { // errorBackend randomly modifies data after reading. type errorBackend struct { - backend.Backend + restic.Backend ProduceErrors bool } @@ -244,7 +244,7 @@ func TestCheckerModifiedData(t *testing.T) { repo := repository.New(be) OK(t, repo.Init(TestPassword)) - arch := restic.NewArchiver(repo) + arch := archiver.New(repo) _, id, err := arch.Snapshot(nil, []string{"."}, nil) OK(t, err) t.Logf("archived as %v", id.Str()) diff --git a/src/restic/index/index.go b/src/restic/index/index.go index 7d8f47250..e2f7f8317 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -5,10 +5,8 @@ import ( "fmt" "os" "restic" - "restic/backend" "restic/debug" "restic/list" - "restic/pack" "restic/worker" "github.com/pkg/errors" @@ -17,27 +15,27 @@ import ( // Pack contains information about the contents of a pack. type Pack struct { Size int64 - Entries []pack.Blob + Entries []restic.Blob } // Blob contains information about a blob. type Blob struct { Size int64 - Packs backend.IDSet + Packs restic.IDSet } // Index contains information about blobs and packs stored in a repo. type Index struct { - Packs map[backend.ID]Pack - Blobs map[pack.Handle]Blob - IndexIDs backend.IDSet + Packs map[restic.ID]Pack + Blobs map[restic.BlobHandle]Blob + IndexIDs restic.IDSet } func newIndex() *Index { return &Index{ - Packs: make(map[backend.ID]Pack), - Blobs: make(map[pack.Handle]Blob), - IndexIDs: backend.NewIDSet(), + Packs: make(map[restic.ID]Pack), + Blobs: make(map[restic.BlobHandle]Blob), + IndexIDs: restic.NewIDSet(), } } @@ -57,7 +55,7 @@ func New(repo restic.Repository, p *restic.Progress) (*Index, error) { for job := range ch { p.Report(restic.Stat{Blobs: 1}) - packID := job.Data.(backend.ID) + packID := job.Data.(restic.ID) if job.Error != nil { fmt.Fprintf(os.Stderr, "unable to list pack %v: %v\n", packID.Str(), job.Error) continue @@ -82,23 +80,23 @@ func New(repo restic.Repository, p *restic.Progress) (*Index, error) { const loadIndexParallelism = 20 type packJSON struct { - ID backend.ID `json:"id"` + ID restic.ID `json:"id"` Blobs []blobJSON `json:"blobs"` } type blobJSON struct { - ID backend.ID `json:"id"` - Type pack.BlobType `json:"type"` - Offset uint `json:"offset"` - Length uint `json:"length"` + ID restic.ID `json:"id"` + Type restic.BlobType `json:"type"` + Offset uint `json:"offset"` + Length uint `json:"length"` } type indexJSON struct { - Supersedes backend.IDs `json:"supersedes,omitempty"` + Supersedes restic.IDs `json:"supersedes,omitempty"` Packs []*packJSON `json:"packs"` } -func loadIndexJSON(repo restic.Repository, id backend.ID) (*indexJSON, error) { +func loadIndexJSON(repo restic.Repository, id restic.ID) (*indexJSON, error) { debug.Log("index.loadIndexJSON", "process index %v\n", id.Str()) var idx indexJSON @@ -120,8 +118,8 @@ func Load(repo restic.Repository, p *restic.Progress) (*Index, error) { done := make(chan struct{}) defer close(done) - supersedes := make(map[backend.ID]backend.IDSet) - results := make(map[backend.ID]map[backend.ID]Pack) + supersedes := make(map[restic.ID]restic.IDSet) + results := make(map[restic.ID]map[restic.ID]Pack) index := newIndex() @@ -134,17 +132,17 @@ func Load(repo restic.Repository, p *restic.Progress) (*Index, error) { return nil, err } - res := make(map[backend.ID]Pack) - supersedes[id] = backend.NewIDSet() + res := make(map[restic.ID]Pack) + supersedes[id] = restic.NewIDSet() for _, sid := range idx.Supersedes { debug.Log("index.Load", " index %v supersedes %v", id.Str(), sid) supersedes[id].Insert(sid) } for _, jpack := range idx.Packs { - entries := make([]pack.Blob, 0, len(jpack.Blobs)) + entries := make([]restic.Blob, 0, len(jpack.Blobs)) for _, blob := range jpack.Blobs { - entry := pack.Blob{ + entry := restic.Blob{ ID: blob.ID, Type: blob.Type, Offset: blob.Offset, @@ -178,7 +176,7 @@ func Load(repo restic.Repository, p *restic.Progress) (*Index, error) { // AddPack adds a pack to the index. If this pack is already in the index, an // error is returned. -func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error { +func (idx *Index) AddPack(id restic.ID, size int64, entries []restic.Blob) error { if _, ok := idx.Packs[id]; ok { return errors.Errorf("pack %v already present in the index", id.Str()) } @@ -186,11 +184,11 @@ func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error idx.Packs[id] = Pack{Size: size, Entries: entries} for _, entry := range entries { - h := pack.Handle{ID: entry.ID, Type: entry.Type} + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} if _, ok := idx.Blobs[h]; !ok { idx.Blobs[h] = Blob{ Size: int64(entry.Length), - Packs: backend.NewIDSet(), + Packs: restic.NewIDSet(), } } @@ -201,13 +199,13 @@ func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error } // RemovePack deletes a pack from the index. -func (idx *Index) RemovePack(id backend.ID) error { +func (idx *Index) RemovePack(id restic.ID) error { if _, ok := idx.Packs[id]; !ok { return errors.Errorf("pack %v not found in the index", id.Str()) } for _, blob := range idx.Packs[id].Entries { - h := pack.Handle{ID: blob.ID, Type: blob.Type} + h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} idx.Blobs[h].Packs.Delete(id) if len(idx.Blobs[h].Packs) == 0 { @@ -222,13 +220,13 @@ func (idx *Index) RemovePack(id backend.ID) error { // DuplicateBlobs returns a list of blobs that are stored more than once in the // repo. -func (idx *Index) DuplicateBlobs() (dups pack.BlobSet) { - dups = pack.NewBlobSet() - seen := pack.NewBlobSet() +func (idx *Index) DuplicateBlobs() (dups restic.BlobSet) { + dups = restic.NewBlobSet() + seen := restic.NewBlobSet() for _, p := range idx.Packs { for _, entry := range p.Entries { - h := pack.Handle{ID: entry.ID, Type: entry.Type} + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} if seen.Has(h) { dups.Insert(h) } @@ -240,8 +238,8 @@ func (idx *Index) DuplicateBlobs() (dups pack.BlobSet) { } // PacksForBlobs returns the set of packs in which the blobs are contained. -func (idx *Index) PacksForBlobs(blobs pack.BlobSet) (packs backend.IDSet) { - packs = backend.NewIDSet() +func (idx *Index) PacksForBlobs(blobs restic.BlobSet) (packs restic.IDSet) { + packs = restic.NewIDSet() for h := range blobs { blob, ok := idx.Blobs[h] @@ -259,8 +257,8 @@ func (idx *Index) PacksForBlobs(blobs pack.BlobSet) (packs backend.IDSet) { // Location describes the location of a blob in a pack. type Location struct { - PackID backend.ID - pack.Blob + PackID restic.ID + restic.Blob } // ErrBlobNotFound is return by FindBlob when the blob could not be found in @@ -268,7 +266,7 @@ type Location struct { var ErrBlobNotFound = errors.New("blob not found in index") // FindBlob returns a list of packs and positions the blob can be found in. -func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) { +func (idx *Index) FindBlob(h restic.BlobHandle) ([]Location, error) { blob, ok := idx.Blobs[h] if !ok { return nil, ErrBlobNotFound @@ -299,8 +297,8 @@ func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) { } // Save writes the complete index to the repo. -func (idx *Index) Save(repo restic.Repository, supersedes backend.IDs) (backend.ID, error) { - packs := make(map[backend.ID][]pack.Blob, len(idx.Packs)) +func (idx *Index) Save(repo restic.Repository, supersedes restic.IDs) (restic.ID, error) { + packs := make(map[restic.ID][]restic.Blob, len(idx.Packs)) for id, p := range idx.Packs { packs[id] = p.Entries } @@ -309,7 +307,7 @@ func (idx *Index) Save(repo restic.Repository, supersedes backend.IDs) (backend. } // Save writes a new index containing the given packs. -func Save(repo restic.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) { +func Save(repo restic.Repository, packs map[restic.ID][]restic.Blob, supersedes restic.IDs) (restic.ID, error) { idx := &indexJSON{ Supersedes: supersedes, Packs: make([]*packJSON, 0, len(packs)), diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index 0f273a1c5..521d0c0b0 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -3,8 +3,6 @@ package index import ( "math/rand" "restic" - "restic/backend" - "restic/pack" "restic/repository" . "restic/test" "testing" @@ -179,7 +177,7 @@ func TestIndexSave(t *testing.T) { idx := loadIndex(t, repo) - packs := make(map[backend.ID][]pack.Blob) + packs := make(map[restic.ID][]restic.Blob) for id := range idx.Packs { if rand.Float32() < 0.5 { packs[id] = idx.Packs[id].Entries @@ -248,7 +246,7 @@ func TestIndexAddRemovePack(t *testing.T) { } for _, blob := range blobs { - h := pack.Handle{ID: blob.ID, Type: blob.Type} + h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} _, err := idx.FindBlob(h) if err == nil { t.Errorf("removed blob %v found in index", h) @@ -308,7 +306,7 @@ func TestIndexLoadDocReference(t *testing.T) { idx := loadIndex(t, repo) blobID := ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66") - locs, err := idx.FindBlob(pack.Handle{ID: blobID, Type: pack.Data}) + locs, err := idx.FindBlob(restic.BlobHandle{ID: blobID, Type: restic.DataBlob}) if err != nil { t.Errorf("FindBlob() returned error %v", err) } @@ -322,8 +320,8 @@ func TestIndexLoadDocReference(t *testing.T) { t.Errorf("blob IDs are not equal: %v != %v", l.ID, blobID) } - if l.Type != pack.Data { - t.Errorf("want type %v, got %v", pack.Data, l.Type) + if l.Type != restic.DataBlob { + t.Errorf("want type %v, got %v", restic.DataBlob, l.Type) } if l.Offset != 150 { diff --git a/src/restic/pack/pack_test.go b/src/restic/pack/pack_test.go index bfafa0631..5e5b1fc1e 100644 --- a/src/restic/pack/pack_test.go +++ b/src/restic/pack/pack_test.go @@ -10,7 +10,6 @@ import ( "restic" "testing" - "restic/backend" "restic/backend/mem" "restic/crypto" "restic/pack" @@ -21,7 +20,7 @@ var testLens = []int{23, 31650, 25860, 10928, 13769, 19862, 5211, 127, 13690, 30 type Buf struct { data []byte - id backend.ID + id restic.ID } func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) { @@ -38,7 +37,7 @@ func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) { // pack blobs p := pack.NewPacker(k, nil) for _, b := range bufs { - p.Add(pack.Tree, b.id, b.data) + p.Add(restic.TreeBlob, b.id, b.data) } _, err := p.Finalize() @@ -56,7 +55,7 @@ func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSi // header length written += binary.Size(uint32(0)) // header - written += len(bufs) * (binary.Size(pack.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize) + written += len(bufs) * (binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + restic.IDSize) // header crypto written += crypto.Extension @@ -96,11 +95,11 @@ func TestCreatePack(t *testing.T) { } var blobTypeJSON = []struct { - t pack.BlobType + t restic.BlobType res string }{ - {pack.Data, `"data"`}, - {pack.Tree, `"tree"`}, + {restic.DataBlob, `"data"`}, + {restic.TreeBlob, `"tree"`}, } func TestBlobTypeJSON(t *testing.T) { @@ -111,7 +110,7 @@ func TestBlobTypeJSON(t *testing.T) { Equals(t, test.res, string(buf)) // test unserialize - var v pack.BlobType + var v restic.BlobType err = json.Unmarshal([]byte(test.res), &v) OK(t, err) Equals(t, test.t, v) @@ -125,9 +124,9 @@ func TestUnpackReadSeeker(t *testing.T) { bufs, packData, packSize := newPack(t, k, testLens) b := mem.New() - id := backend.Hash(packData) + id := restic.Hash(packData) - handle := restic.Handle{Type: restic.DataFile, Name: id.String()} + handle := restic.Handle{FileType: restic.DataFile, Name: id.String()} OK(t, b.Save(handle, packData)) verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) } @@ -138,9 +137,9 @@ func TestShortPack(t *testing.T) { bufs, packData, packSize := newPack(t, k, []int{23}) b := mem.New() - id := backend.Hash(packData) + id := restic.Hash(packData) - handle := restic.Handle{Type: restic.DataFile, Name: id.String()} + handle := restic.Handle{FileType: restic.DataFile, Name: id.String()} OK(t, b.Save(handle, packData)) - verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize) + verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) } diff --git a/src/restic/repository.go b/src/restic/repository.go index 9bd22dabb..060a8f3a9 100644 --- a/src/restic/repository.go +++ b/src/restic/repository.go @@ -1,5 +1,7 @@ package restic +import "restic/crypto" + // Repository stores data in a backend. It provides high-level functions and // transparently encrypts/decrypts data. type Repository interface { @@ -7,6 +9,8 @@ type Repository interface { // Backend returns the backend used by the repository Backend() Backend + Key() *crypto.Key + SetIndex(Index) Index() Index @@ -24,6 +28,7 @@ type Repository interface { LoadJSONPack(BlobType, ID, interface{}) error LoadJSONUnpacked(FileType, ID, interface{}) error LoadBlob(ID, BlobType, []byte) ([]byte, error) + LoadAndDecrypt(FileType, ID) ([]byte, error) LookupBlobSize(ID, BlobType) (uint, error) @@ -47,4 +52,5 @@ type Lister interface { type Index interface { Has(ID, BlobType) bool Lookup(ID, BlobType) ([]PackedBlob, error) + Count(BlobType) uint } diff --git a/src/restic/repository/index.go b/src/restic/repository/index.go index 266b5d79a..f543a25ec 100644 --- a/src/restic/repository/index.go +++ b/src/restic/repository/index.go @@ -534,7 +534,7 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) { } // LoadIndexWithDecoder loads the index and decodes it with fn. -func LoadIndexWithDecoder(repo *Repository, id restic.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) { +func LoadIndexWithDecoder(repo restic.Repository, id restic.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) { debug.Log("LoadIndexWithDecoder", "Loading index %v", id[:8]) buf, err := repo.LoadAndDecrypt(restic.IndexFile, id) diff --git a/src/restic/repository/index_test.go b/src/restic/repository/index_test.go index 6b45d5990..65e8cdbd7 100644 --- a/src/restic/repository/index_test.go +++ b/src/restic/repository/index_test.go @@ -5,8 +5,6 @@ import ( "restic" "testing" - "restic/backend" - "restic/pack" "restic/repository" . "restic/test" ) @@ -24,24 +22,26 @@ func TestIndexSerialize(t *testing.T) { // create 50 packs with 20 blobs each for i := 0; i < 50; i++ { - packID := backend.RandomID() + packID := restic.TestRandomID() pos := uint(0) for j := 0; j < 20; j++ { - id := backend.RandomID() + id := restic.TestRandomID() length := uint(i*100 + j) - idx.Store(repository.PackedBlob{ - Type: pack.Data, - ID: id, + idx.Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: restic.DataBlob, + ID: id, + Offset: pos, + Length: length, + }, PackID: packID, - Offset: pos, - Length: length, }) tests = append(tests, testEntry{ id: id, pack: packID, - tpe: pack.Data, + tpe: restic.DataBlob, offset: pos, length: length, }) @@ -94,24 +94,26 @@ func TestIndexSerialize(t *testing.T) { // add more blobs to idx newtests := []testEntry{} for i := 0; i < 10; i++ { - packID := backend.RandomID() + packID := restic.TestRandomID() pos := uint(0) for j := 0; j < 10; j++ { - id := backend.RandomID() + id := restic.TestRandomID() length := uint(i*100 + j) - idx.Store(repository.PackedBlob{ - Type: pack.Data, - ID: id, + idx.Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: restic.DataBlob, + ID: id, + Offset: pos, + Length: length, + }, PackID: packID, - Offset: pos, - Length: length, }) newtests = append(newtests, testEntry{ id: id, pack: packID, - tpe: pack.Data, + tpe: restic.DataBlob, offset: pos, length: length, }) @@ -128,7 +130,7 @@ func TestIndexSerialize(t *testing.T) { Assert(t, idx.Final(), "index not final after encoding") - id := backend.RandomID() + id := restic.TestRandomID() OK(t, idx.SetID(id)) id2, err := idx.ID() Assert(t, id2.Equal(id), @@ -165,18 +167,20 @@ func TestIndexSize(t *testing.T) { packs := 200 blobs := 100 for i := 0; i < packs; i++ { - packID := backend.RandomID() + packID := restic.TestRandomID() pos := uint(0) for j := 0; j < blobs; j++ { - id := backend.RandomID() + id := restic.TestRandomID() length := uint(i*100 + j) - idx.Store(repository.PackedBlob{ - Type: pack.Data, - ID: id, + idx.Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: restic.DataBlob, + ID: id, + Offset: pos, + Length: length, + }, PackID: packID, - Offset: pos, - Length: length, }) pos += length @@ -257,15 +261,15 @@ var exampleTests = []struct { { ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"), ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), - pack.Data, 0, 25, + restic.DataBlob, 0, 25, }, { ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"), ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), - pack.Tree, 38, 100, + restic.TreeBlob, 38, 100, }, { ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"), ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), - pack.Data, 150, 123, + restic.DataBlob, 150, 123, }, } @@ -275,9 +279,9 @@ var exampleLookupTest = struct { }{ ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), map[restic.ID]restic.BlobType{ - ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): pack.Data, - ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): pack.Tree, - ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): pack.Data, + ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): restic.DataBlob, + ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): restic.TreeBlob, + ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): restic.DataBlob, }, } @@ -349,13 +353,15 @@ func TestIndexPacks(t *testing.T) { packs := restic.NewIDSet() for i := 0; i < 20; i++ { - packID := backend.RandomID() - idx.Store(repository.PackedBlob{ - Type: pack.Data, - ID: backend.RandomID(), + packID := restic.TestRandomID() + idx.Store(restic.PackedBlob{ + Blob: restic.Blob{ + Type: restic.DataBlob, + ID: restic.TestRandomID(), + Offset: 0, + Length: 23, + }, PackID: packID, - Offset: 0, - Length: 23, }) packs.Insert(packID) diff --git a/src/restic/repository/packer_manager_test.go b/src/restic/repository/packer_manager_test.go index 0e99e2e90..323051612 100644 --- a/src/restic/repository/packer_manager_test.go +++ b/src/restic/repository/packer_manager_test.go @@ -7,7 +7,6 @@ import ( "restic" "restic/backend/mem" "restic/crypto" - "restic/pack" "testing" ) @@ -64,7 +63,7 @@ func saveFile(t testing.TB, be Saver, filename string, n int) { t.Fatal(err) } - h := restic.Handle{Type: restic.DataFile, Name: restic.Hash(data).String()} + h := restic.Handle{FileType: restic.DataFile, Name: restic.Hash(data).String()} err = be.Save(h, data) if err != nil { @@ -95,7 +94,7 @@ func fillPacks(t testing.TB, rnd *randReader, be Saver, pm *packerManager, buf [ t.Fatal(err) } - n, err := packer.Add(pack.Data, id, buf) + n, err := packer.Add(restic.DataBlob, id, buf) if n != l { t.Errorf("Add() returned invalid number of bytes: want %v, got %v", n, l) } diff --git a/src/restic/repository/repack_test.go b/src/restic/repository/repack_test.go index 804cf77f1..026e43cbc 100644 --- a/src/restic/repository/repack_test.go +++ b/src/restic/repository/repack_test.go @@ -4,7 +4,6 @@ import ( "io" "math/rand" "restic" - "restic/pack" "restic/repository" "testing" ) @@ -32,18 +31,18 @@ func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pDa ) if rand.Float32() < pData { - tpe = pack.Data + tpe = restic.DataBlob length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data } else { - tpe = pack.Tree + tpe = restic.TreeBlob length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB } buf := random(t, length) id := restic.Hash(buf) - if repo.Index().Has(id, pack.Data) { - t.Errorf("duplicate blob %v/%v ignored", id, pack.Data) + if repo.Index().Has(id, restic.DataBlob) { + t.Errorf("duplicate blob %v/%v ignored", id, restic.DataBlob) continue } @@ -66,14 +65,14 @@ func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pDa // selectBlobs splits the list of all blobs randomly into two lists. A blob // will be contained in the firstone ith probability p. -func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, list2 pack.BlobSet) { +func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, list2 restic.BlobSet) { done := make(chan struct{}) defer close(done) - list1 = pack.NewBlobSet() - list2 = pack.NewBlobSet() + list1 = restic.NewBlobSet() + list2 = restic.NewBlobSet() - blobs := pack.NewBlobSet() + blobs := restic.NewBlobSet() for id := range repo.List(restic.DataFile, done) { entries, _, err := repo.ListPack(id) @@ -82,7 +81,7 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l } for _, entry := range entries { - h := pack.Handle{ID: entry.ID, Type: entry.Type} + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} if blobs.Has(h) { t.Errorf("ignoring duplicate blob %v", h) continue @@ -90,9 +89,9 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l blobs.Insert(h) if rand.Float32() <= p { - list1.Insert(pack.Handle{ID: entry.ID, Type: entry.Type}) + list1.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type}) } else { - list2.Insert(pack.Handle{ID: entry.ID, Type: entry.Type}) + list2.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type}) } } @@ -113,7 +112,7 @@ func listPacks(t *testing.T, repo *repository.Repository) restic.IDSet { return list } -func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.BlobSet) restic.IDSet { +func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs restic.BlobSet) restic.IDSet { packs := restic.NewIDSet() idx := repo.Index() @@ -131,7 +130,7 @@ func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.Blo return packs } -func repack(t *testing.T, repo *repository.Repository, packs restic.IDSet, blobs pack.BlobSet) { +func repack(t *testing.T, repo *repository.Repository, packs restic.IDSet, blobs restic.BlobSet) { err := repository.Repack(repo, packs, blobs) if err != nil { t.Fatal(err) diff --git a/src/restic/repository/repository_test.go b/src/restic/repository/repository_test.go index 4e27b8943..98b8edd84 100644 --- a/src/restic/repository/repository_test.go +++ b/src/restic/repository/repository_test.go @@ -11,7 +11,6 @@ import ( "testing" "restic" - "restic/pack" "restic/repository" . "restic/test" ) @@ -36,7 +35,7 @@ func TestSaveJSON(t *testing.T) { data = append(data, '\n') h := sha256.Sum256(data) - id, err := repo.SaveJSON(pack.Tree, obj) + id, err := repo.SaveJSON(restic.TreeBlob, obj) OK(t, err) Assert(t, h == id, @@ -59,7 +58,7 @@ func BenchmarkSaveJSON(t *testing.B) { t.ResetTimer() for i := 0; i < t.N; i++ { - id, err := repo.SaveJSON(pack.Tree, obj) + id, err := repo.SaveJSON(restic.TreeBlob, obj) OK(t, err) Assert(t, h == id, @@ -82,7 +81,7 @@ func TestSave(t *testing.T) { id := restic.Hash(data) // save - sid, err := repo.SaveAndEncrypt(pack.Data, data, nil) + sid, err := repo.SaveAndEncrypt(restic.DataBlob, data, nil) OK(t, err) Equals(t, id, sid) @@ -91,7 +90,7 @@ func TestSave(t *testing.T) { // OK(t, repo.SaveIndex()) // read back - buf, err := repo.LoadBlob(id, pack.Data, make([]byte, size)) + buf, err := repo.LoadBlob(id, restic.DataBlob, make([]byte, size)) OK(t, err) Assert(t, len(buf) == len(data), @@ -116,14 +115,14 @@ func TestSaveFrom(t *testing.T) { id := restic.Hash(data) // save - id2, err := repo.SaveAndEncrypt(pack.Data, data, &id) + id2, err := repo.SaveAndEncrypt(restic.DataBlob, data, &id) OK(t, err) Equals(t, id, id2) OK(t, repo.Flush()) // read back - buf, err := repo.LoadBlob(id, pack.Data, make([]byte, size)) + buf, err := repo.LoadBlob(id, restic.DataBlob, make([]byte, size)) OK(t, err) Assert(t, len(buf) == len(data), @@ -153,7 +152,7 @@ func BenchmarkSaveAndEncrypt(t *testing.B) { for i := 0; i < t.N; i++ { // save - _, err = repo.SaveAndEncrypt(pack.Data, data, &id) + _, err = repo.SaveAndEncrypt(restic.DataBlob, data, &id) OK(t, err) } } @@ -171,7 +170,7 @@ func TestLoadJSONPack(t *testing.T) { OK(t, repo.Flush()) tree := restic.NewTree() - err := repo.LoadJSONPack(pack.Tree, *sn.Tree, &tree) + err := repo.LoadJSONPack(restic.TreeBlob, *sn.Tree, &tree) OK(t, err) } @@ -192,7 +191,7 @@ func BenchmarkLoadJSONPack(t *testing.B) { t.ResetTimer() for i := 0; i < t.N; i++ { - err := repo.LoadJSONPack(pack.Tree, *sn.Tree, &tree) + err := repo.LoadJSONPack(restic.TreeBlob, *sn.Tree, &tree) OK(t, err) } } @@ -253,7 +252,7 @@ func saveRandomDataBlobs(t testing.TB, repo *repository.Repository, num int, siz _, err := io.ReadFull(rand.Reader, buf) OK(t, err) - _, err = repo.SaveAndEncrypt(pack.Data, buf, nil) + _, err = repo.SaveAndEncrypt(restic.DataBlob, buf, nil) OK(t, err) } } From f7ae0cb78f4bbe469daf384351a0026ede5c013a Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 16:04:29 +0200 Subject: [PATCH 10/40] Fix cmds/restic for new structure --- src/cmds/restic/cmd_backup.go | 10 +-- src/cmds/restic/cmd_cache.go | 52 --------------- src/cmds/restic/cmd_cat.go | 21 +++--- src/cmds/restic/cmd_dump.go | 81 ++++++++++++------------ src/cmds/restic/cmd_find.go | 9 ++- src/cmds/restic/cmd_forget.go | 5 +- src/cmds/restic/cmd_init.go | 2 +- src/cmds/restic/cmd_key.go | 11 ++-- src/cmds/restic/cmd_list.go | 40 ++++++------ src/cmds/restic/cmd_ls.go | 9 ++- src/cmds/restic/cmd_prune.go | 28 ++++---- src/cmds/restic/cmd_restore.go | 3 +- src/cmds/restic/cmd_snapshots.go | 5 +- src/cmds/restic/global.go | 5 +- src/cmds/restic/integration_fuse_test.go | 4 +- src/cmds/restic/integration_test.go | 18 +++--- src/restic/backend/generic.go | 74 ---------------------- src/restic/backend/generic_test.go | 62 ------------------ src/restic/backend_find_test.go | 70 ++++++++++++++++++++ src/restic/snapshot.go | 13 ++++ 20 files changed, 202 insertions(+), 320 deletions(-) delete mode 100644 src/cmds/restic/cmd_cache.go delete mode 100644 src/restic/backend/generic.go delete mode 100644 src/restic/backend/generic_test.go create mode 100644 src/restic/backend_find_test.go diff --git a/src/cmds/restic/cmd_backup.go b/src/cmds/restic/cmd_backup.go index 7a7640249..38f6fcf70 100644 --- a/src/cmds/restic/cmd_backup.go +++ b/src/cmds/restic/cmd_backup.go @@ -6,7 +6,7 @@ import ( "os" "path/filepath" "restic" - "restic/backend" + "restic/archiver" "restic/debug" "restic/filter" "restic/fs" @@ -259,7 +259,7 @@ func (cmd CmdBackup) readFromStdin(args []string) error { return err } - _, id, err := restic.ArchiveReader(repo, cmd.newArchiveStdinProgress(), os.Stdin, cmd.StdinFilename) + _, id, err := archiver.ArchiveReader(repo, cmd.newArchiveStdinProgress(), os.Stdin, cmd.StdinFilename) if err != nil { return err } @@ -306,7 +306,7 @@ func (cmd CmdBackup) Execute(args []string) error { return err } - var parentSnapshotID *backend.ID + var parentSnapshotID *restic.ID // Force using a parent if !cmd.Force && cmd.Parent != "" { @@ -365,12 +365,12 @@ func (cmd CmdBackup) Execute(args []string) error { return !matched } - stat, err := restic.Scan(target, selectFilter, cmd.newScanProgress()) + stat, err := archiver.Scan(target, selectFilter, cmd.newScanProgress()) if err != nil { return err } - arch := restic.NewArchiver(repo) + arch := archiver.New(repo) arch.Excludes = cmd.Excludes arch.SelectFilter = selectFilter diff --git a/src/cmds/restic/cmd_cache.go b/src/cmds/restic/cmd_cache.go deleted file mode 100644 index aa4d5765f..000000000 --- a/src/cmds/restic/cmd_cache.go +++ /dev/null @@ -1,52 +0,0 @@ -package main - -import ( - "fmt" - - "restic" -) - -type CmdCache struct { - global *GlobalOptions -} - -func init() { - _, err := parser.AddCommand("cache", - "manage cache", - "The cache command creates and manages the local cache", - &CmdCache{global: &globalOpts}) - if err != nil { - panic(err) - } -} - -func (cmd CmdCache) Usage() string { - return "[update|clear]" -} - -func (cmd CmdCache) Execute(args []string) error { - repo, err := cmd.global.OpenRepository() - if err != nil { - return err - } - - lock, err := lockRepo(repo) - defer unlockRepo(lock) - if err != nil { - return err - } - - cache, err := restic.NewCache(repo, cmd.global.CacheDir) - if err != nil { - return err - } - - fmt.Printf("clear cache for old snapshots\n") - err = cache.Clear(repo) - if err != nil { - return err - } - fmt.Printf("done\n") - - return nil -} diff --git a/src/cmds/restic/cmd_cat.go b/src/cmds/restic/cmd_cat.go index 4e52848da..75edc258b 100644 --- a/src/cmds/restic/cmd_cat.go +++ b/src/cmds/restic/cmd_cat.go @@ -8,7 +8,6 @@ import ( "restic" "restic/backend" "restic/debug" - "restic/pack" "restic/repository" ) @@ -48,9 +47,9 @@ func (cmd CmdCat) Execute(args []string) error { tpe := args[0] - var id backend.ID + var id restic.ID if tpe != "masterkey" && tpe != "config" { - id, err = backend.ParseID(args[1]) + id, err = restic.ParseID(args[1]) if err != nil { if tpe != "snapshot" { return restic.Fatalf("unable to parse ID: %v\n", err) @@ -67,7 +66,7 @@ func (cmd CmdCat) Execute(args []string) error { // handle all types that don't need an index switch tpe { case "config": - buf, err := json.MarshalIndent(repo.Config, "", " ") + buf, err := json.MarshalIndent(repo.Config(), "", " ") if err != nil { return err } @@ -75,7 +74,7 @@ func (cmd CmdCat) Execute(args []string) error { fmt.Println(string(buf)) return nil case "index": - buf, err := repo.LoadAndDecrypt(backend.Index, id) + buf, err := repo.LoadAndDecrypt(restic.IndexFile, id) if err != nil { return err } @@ -85,7 +84,7 @@ func (cmd CmdCat) Execute(args []string) error { case "snapshot": sn := &restic.Snapshot{} - err = repo.LoadJSONUnpacked(backend.Snapshot, id, sn) + err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, sn) if err != nil { return err } @@ -99,7 +98,7 @@ func (cmd CmdCat) Execute(args []string) error { return nil case "key": - h := backend.Handle{Type: backend.Key, Name: id.String()} + h := restic.Handle{FileType: restic.KeyFile, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err @@ -150,13 +149,13 @@ func (cmd CmdCat) Execute(args []string) error { switch tpe { case "pack": - h := backend.Handle{Type: backend.Data, Name: id.String()} + h := restic.Handle{FileType: restic.DataFile, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err } - hash := backend.Hash(buf) + hash := restic.Hash(buf) if !hash.Equal(id) { fmt.Fprintf(cmd.global.stderr, "Warning: hash of data does not match ID, want\n %v\ngot:\n %v\n", id.String(), hash.String()) } @@ -165,7 +164,7 @@ func (cmd CmdCat) Execute(args []string) error { return err case "blob": - for _, t := range []pack.BlobType{pack.Data, pack.Tree} { + for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { list, err := repo.Index().Lookup(id, t) if err != nil { continue @@ -187,7 +186,7 @@ func (cmd CmdCat) Execute(args []string) error { case "tree": debug.Log("cat", "cat tree %v", id.Str()) tree := restic.NewTree() - err = repo.LoadJSONPack(pack.Tree, id, tree) + err = repo.LoadJSONPack(restic.TreeBlob, id, tree) if err != nil { debug.Log("cat", "unable to load tree %v: %v", id.Str(), err) return err diff --git a/src/cmds/restic/cmd_dump.go b/src/cmds/restic/cmd_dump.go index 32b789094..9bab5151a 100644 --- a/src/cmds/restic/cmd_dump.go +++ b/src/cmds/restic/cmd_dump.go @@ -9,7 +9,6 @@ import ( "os" "restic" - "restic/backend" "restic/pack" "restic/repository" @@ -50,7 +49,7 @@ func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error { done := make(chan struct{}) defer close(done) - for id := range repo.List(backend.Snapshot, done) { + for id := range repo.List(restic.SnapshotFile, done) { snapshot, err := restic.LoadSnapshot(repo, id) if err != nil { fmt.Fprintf(os.Stderr, "LoadSnapshot(%v): %v", id.Str(), err) @@ -68,36 +67,36 @@ func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error { return nil } -func printTrees(repo *repository.Repository, wr io.Writer) error { - done := make(chan struct{}) - defer close(done) +// func printTrees(repo *repository.Repository, wr io.Writer) error { +// done := make(chan struct{}) +// defer close(done) - trees := []backend.ID{} +// trees := []restic.ID{} - for _, idx := range repo.Index().All() { - for blob := range idx.Each(nil) { - if blob.Type != pack.Tree { - continue - } +// for _, idx := range repo.Index().All() { +// for blob := range idx.Each(nil) { +// if blob.Type != pack.Tree { +// continue +// } - trees = append(trees, blob.ID) - } - } +// trees = append(trees, blob.ID) +// } +// } - for _, id := range trees { - tree, err := restic.LoadTree(repo, id) - if err != nil { - fmt.Fprintf(os.Stderr, "LoadTree(%v): %v", id.Str(), err) - continue - } +// for _, id := range trees { +// tree, err := restic.LoadTree(repo, id) +// if err != nil { +// fmt.Fprintf(os.Stderr, "LoadTree(%v): %v", id.Str(), err) +// continue +// } - fmt.Fprintf(wr, "tree_id: %v\n", id) +// fmt.Fprintf(wr, "tree_id: %v\n", id) - prettyPrintJSON(wr, tree) - } +// prettyPrintJSON(wr, tree) +// } - return nil -} +// return nil +// } const dumpPackWorkers = 10 @@ -110,10 +109,10 @@ type Pack struct { // Blob is the struct used in printPacks. type Blob struct { - Type pack.BlobType `json:"type"` - Length uint `json:"length"` - ID backend.ID `json:"id"` - Offset uint `json:"offset"` + Type restic.BlobType `json:"type"` + Length uint `json:"length"` + ID restic.ID `json:"id"` + Offset uint `json:"offset"` } func printPacks(repo *repository.Repository, wr io.Writer) error { @@ -123,14 +122,14 @@ func printPacks(repo *repository.Repository, wr io.Writer) error { f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { name := job.Data.(string) - h := backend.Handle{Type: backend.Data, Name: name} + h := restic.Handle{FileType: restic.DataFile, Name: name} blobInfo, err := repo.Backend().Stat(h) if err != nil { return nil, err } - blobs, err := pack.List(repo.Key(), backend.ReaderAt(repo.Backend(), h), blobInfo.Size) + blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), blobInfo.Size) if err != nil { return nil, err } @@ -143,7 +142,7 @@ func printPacks(repo *repository.Repository, wr io.Writer) error { wp := worker.New(dumpPackWorkers, f, jobCh, resCh) go func() { - for name := range repo.Backend().List(backend.Data, done) { + for name := range repo.Backend().List(restic.DataFile, done) { jobCh <- worker.Job{Data: name} } close(jobCh) @@ -157,7 +156,7 @@ func printPacks(repo *repository.Repository, wr io.Writer) error { continue } - entries := job.Result.([]pack.Blob) + entries := job.Result.([]restic.Blob) p := Pack{ Name: name, Blobs: make([]Blob, len(entries)), @@ -183,7 +182,7 @@ func (cmd CmdDump) DumpIndexes() error { done := make(chan struct{}) defer close(done) - for id := range cmd.repo.List(backend.Index, done) { + for id := range cmd.repo.List(restic.IndexFile, done) { fmt.Printf("index_id: %v\n", id) idx, err := repository.LoadIndex(cmd.repo, id) @@ -229,8 +228,8 @@ func (cmd CmdDump) Execute(args []string) error { return cmd.DumpIndexes() case "snapshots": return debugPrintSnapshots(repo, os.Stdout) - case "trees": - return printTrees(repo, os.Stdout) + // case "trees": + // return printTrees(repo, os.Stdout) case "packs": return printPacks(repo, os.Stdout) case "all": @@ -240,12 +239,12 @@ func (cmd CmdDump) Execute(args []string) error { return err } - fmt.Printf("\ntrees:\n") + // fmt.Printf("\ntrees:\n") - err = printTrees(repo, os.Stdout) - if err != nil { - return err - } + // err = printTrees(repo, os.Stdout) + // if err != nil { + // return err + // } fmt.Printf("\nindexes:\n") err = cmd.DumpIndexes() diff --git a/src/cmds/restic/cmd_find.go b/src/cmds/restic/cmd_find.go index 1c66cd757..783d65b35 100644 --- a/src/cmds/restic/cmd_find.go +++ b/src/cmds/restic/cmd_find.go @@ -5,7 +5,6 @@ import ( "time" "restic" - "restic/backend" "restic/debug" "restic/repository" ) @@ -59,7 +58,7 @@ func parseTime(str string) (time.Time, error) { return time.Time{}, restic.Fatalf("unable to parse time: %q", str) } -func (c CmdFind) findInTree(repo *repository.Repository, id backend.ID, path string) ([]findResult, error) { +func (c CmdFind) findInTree(repo *repository.Repository, id restic.ID, path string) ([]findResult, error) { debug.Log("restic.find", "checking tree %v\n", id) tree, err := restic.LoadTree(repo, id) if err != nil { @@ -92,7 +91,7 @@ func (c CmdFind) findInTree(repo *repository.Repository, id backend.ID, path str debug.Log("restic.find", " pattern does not match\n") } - if node.Type == "dir" { + if node.FileType == "dir" { subdirResults, err := c.findInTree(repo, *node.Subtree, filepath.Join(path, node.Name)) if err != nil { return nil, err @@ -105,7 +104,7 @@ func (c CmdFind) findInTree(repo *repository.Repository, id backend.ID, path str return results, nil } -func (c CmdFind) findInSnapshot(repo *repository.Repository, id backend.ID) error { +func (c CmdFind) findInSnapshot(repo *repository.Repository, id restic.ID) error { debug.Log("restic.find", "searching in snapshot %s\n for entries within [%s %s]", id.Str(), c.oldest, c.newest) sn, err := restic.LoadSnapshot(repo, id) @@ -184,7 +183,7 @@ func (c CmdFind) Execute(args []string) error { done := make(chan struct{}) defer close(done) - for snapshotID := range repo.List(backend.Snapshot, done) { + for snapshotID := range repo.List(restic.SnapshotFile, done) { err := c.findInSnapshot(repo, snapshotID) if err != nil { diff --git a/src/cmds/restic/cmd_forget.go b/src/cmds/restic/cmd_forget.go index 16da4b556..3f4ec12c6 100644 --- a/src/cmds/restic/cmd_forget.go +++ b/src/cmds/restic/cmd_forget.go @@ -4,7 +4,6 @@ import ( "fmt" "io" "restic" - "restic/backend" "strings" ) @@ -93,7 +92,7 @@ func (cmd CmdForget) Execute(args []string) error { } if !cmd.DryRun { - err = repo.Backend().Remove(backend.Snapshot, id.String()) + err = repo.Backend().Remove(restic.SnapshotFile, id.String()) if err != nil { return err } @@ -156,7 +155,7 @@ func (cmd CmdForget) Execute(args []string) error { if !cmd.DryRun { for _, sn := range remove { - err = repo.Backend().Remove(backend.Snapshot, sn.ID().String()) + err = repo.Backend().Remove(restic.SnapshotFile, sn.ID().String()) if err != nil { return err } diff --git a/src/cmds/restic/cmd_init.go b/src/cmds/restic/cmd_init.go index 49b0907ad..3e68ebdea 100644 --- a/src/cmds/restic/cmd_init.go +++ b/src/cmds/restic/cmd_init.go @@ -32,7 +32,7 @@ func (cmd CmdInit) Execute(args []string) error { cmd.global.Exitf(1, "creating key in backend at %s failed: %v\n", cmd.global.Repo, err) } - cmd.global.Verbosef("created restic backend %v at %s\n", s.Config.ID[:10], cmd.global.Repo) + cmd.global.Verbosef("created restic backend %v at %s\n", s.Config().ID[:10], cmd.global.Repo) cmd.global.Verbosef("\n") cmd.global.Verbosef("Please note that knowledge of your password is required to access\n") cmd.global.Verbosef("the repository. Losing your password means that your data is\n") diff --git a/src/cmds/restic/cmd_key.go b/src/cmds/restic/cmd_key.go index 67d5afa64..629f5ddf0 100644 --- a/src/cmds/restic/cmd_key.go +++ b/src/cmds/restic/cmd_key.go @@ -4,7 +4,6 @@ import ( "fmt" "restic" - "restic/backend" "restic/repository" ) @@ -28,7 +27,7 @@ func (cmd CmdKey) listKeys(s *repository.Repository) error { tab.Header = fmt.Sprintf(" %-10s %-10s %-10s %s", "ID", "User", "Host", "Created") tab.RowFormat = "%s%-10s %-10s %-10s %s" - plen, err := s.PrefixLength(backend.Key) + plen, err := s.PrefixLength(restic.KeyFile) if err != nil { return err } @@ -36,7 +35,7 @@ func (cmd CmdKey) listKeys(s *repository.Repository) error { done := make(chan struct{}) defer close(done) - for id := range s.List(backend.Key, done) { + for id := range s.List(restic.KeyFile, done) { k, err := repository.LoadKey(s, id.String()) if err != nil { cmd.global.Warnf("LoadKey() failed: %v\n", err) @@ -82,7 +81,7 @@ func (cmd CmdKey) deleteKey(repo *repository.Repository, name string) error { return restic.Fatal("refusing to remove key currently used to access repository") } - err := repo.Backend().Remove(backend.Key, name) + err := repo.Backend().Remove(restic.KeyFile, name) if err != nil { return err } @@ -97,7 +96,7 @@ func (cmd CmdKey) changePassword(repo *repository.Repository) error { return restic.Fatalf("creating new key failed: %v\n", err) } - err = repo.Backend().Remove(backend.Key, repo.KeyName()) + err = repo.Backend().Remove(restic.KeyFile, repo.KeyName()) if err != nil { return err } @@ -145,7 +144,7 @@ func (cmd CmdKey) Execute(args []string) error { return err } - id, err := backend.Find(repo.Backend(), backend.Key, args[1]) + id, err := restic.Find(repo.Backend(), restic.KeyFile, args[1]) if err != nil { return err } diff --git a/src/cmds/restic/cmd_list.go b/src/cmds/restic/cmd_list.go index 717d65ade..4418dbb08 100644 --- a/src/cmds/restic/cmd_list.go +++ b/src/cmds/restic/cmd_list.go @@ -1,9 +1,6 @@ package main -import ( - "restic" - "restic/backend" -) +import "restic" type CmdList struct { global *GlobalOptions @@ -41,31 +38,32 @@ func (cmd CmdList) Execute(args []string) error { } } - var t backend.Type + var t restic.FileType switch args[0] { - case "blobs": - err = repo.LoadIndex() - if err != nil { - return err - } + // case "blobs": + // restic.Lister + // err = repo.LoadIndex() + // if err != nil { + // return err + // } - for _, idx := range repo.Index().All() { - for blob := range idx.Each(nil) { - cmd.global.Printf("%s\n", blob.ID) - } - } + // for _, idx := range repo.Index().All() { + // for blob := range idx.Each(nil) { + // cmd.global.Printf("%s\n", blob.ID) + // } + // } - return nil + // return nil case "packs": - t = backend.Data + t = restic.DataFile case "index": - t = backend.Index + t = restic.IndexFile case "snapshots": - t = backend.Snapshot + t = restic.SnapshotFile case "keys": - t = backend.Key + t = restic.KeyFile case "locks": - t = backend.Lock + t = restic.LockFile default: return restic.Fatal("invalid type") } diff --git a/src/cmds/restic/cmd_ls.go b/src/cmds/restic/cmd_ls.go index c55670a93..8157c74f2 100644 --- a/src/cmds/restic/cmd_ls.go +++ b/src/cmds/restic/cmd_ls.go @@ -6,7 +6,6 @@ import ( "path/filepath" "restic" - "restic/backend" "restic/repository" ) @@ -31,7 +30,7 @@ func (cmd CmdLs) printNode(prefix string, n *restic.Node) string { return filepath.Join(prefix, n.Name) } - switch n.Type { + switch n.FileType { case "file": return fmt.Sprintf("%s %5d %5d %6d %s %s", n.Mode, n.UID, n.GID, n.Size, n.ModTime, filepath.Join(prefix, n.Name)) @@ -42,11 +41,11 @@ func (cmd CmdLs) printNode(prefix string, n *restic.Node) string { return fmt.Sprintf("%s %5d %5d %6d %s %s -> %s", n.Mode|os.ModeSymlink, n.UID, n.GID, n.Size, n.ModTime, filepath.Join(prefix, n.Name), n.LinkTarget) default: - return fmt.Sprintf("", n.Type, n.Name) + return fmt.Sprintf("", n.FileType, n.Name) } } -func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id backend.ID) error { +func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id restic.ID) error { tree, err := restic.LoadTree(repo, id) if err != nil { return err @@ -55,7 +54,7 @@ func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id backen for _, entry := range tree.Nodes { cmd.global.Printf(cmd.printNode(prefix, entry) + "\n") - if entry.Type == "dir" && entry.Subtree != nil { + if entry.FileType == "dir" && entry.Subtree != nil { err = cmd.printTree(filepath.Join(prefix, entry.Name), repo, *entry.Subtree) if err != nil { return err diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go index eee330131..c21cab16a 100644 --- a/src/cmds/restic/cmd_prune.go +++ b/src/cmds/restic/cmd_prune.go @@ -4,10 +4,8 @@ import ( "fmt" "os" "restic" - "restic/backend" "restic/debug" "restic/index" - "restic/pack" "restic/repository" "time" @@ -94,7 +92,7 @@ func (cmd CmdPrune) Execute(args []string) error { } cmd.global.Verbosef("counting files in repo\n") - for _ = range repo.List(backend.Data, done) { + for _ = range repo.List(restic.DataFile, done) { stats.packs++ } @@ -112,7 +110,7 @@ func (cmd CmdPrune) Execute(args []string) error { cmd.global.Verbosef("repository contains %v packs (%v blobs) with %v bytes\n", len(idx.Packs), len(idx.Blobs), formatBytes(uint64(stats.bytes))) - blobCount := make(map[pack.Handle]int) + blobCount := make(map[restic.BlobHandle]int) duplicateBlobs := 0 duplicateBytes := 0 @@ -120,7 +118,7 @@ func (cmd CmdPrune) Execute(args []string) error { for _, p := range idx.Packs { for _, entry := range p.Entries { stats.blobs++ - h := pack.Handle{ID: entry.ID, Type: entry.Type} + h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} blobCount[h]++ if blobCount[h] > 1 { @@ -144,8 +142,8 @@ func (cmd CmdPrune) Execute(args []string) error { cmd.global.Verbosef("find data that is still in use for %d snapshots\n", stats.snapshots) - usedBlobs := pack.NewBlobSet() - seenBlobs := pack.NewBlobSet() + usedBlobs := restic.NewBlobSet() + seenBlobs := restic.NewBlobSet() bar = newProgressMax(cmd.global.ShowProgress(), uint64(len(snapshots)), "snapshots") bar.Start() @@ -165,7 +163,7 @@ func (cmd CmdPrune) Execute(args []string) error { cmd.global.Verbosef("found %d of %d data blobs still in use\n", len(usedBlobs), stats.blobs) // find packs that need a rewrite - rewritePacks := backend.NewIDSet() + rewritePacks := restic.NewIDSet() for h, blob := range idx.Blobs { if !usedBlobs.Has(h) { rewritePacks.Merge(blob.Packs) @@ -178,11 +176,11 @@ func (cmd CmdPrune) Execute(args []string) error { } // find packs that are unneeded - removePacks := backend.NewIDSet() + removePacks := restic.NewIDSet() nextPack: for packID, p := range idx.Packs { for _, blob := range p.Entries { - h := pack.Handle{ID: blob.ID, Type: blob.Type} + h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} if usedBlobs.Has(h) { continue nextPack } @@ -205,7 +203,7 @@ nextPack: } for packID := range removePacks { - err = repo.Backend().Remove(backend.Data, packID.String()) + err = repo.Backend().Remove(restic.DataFile, packID.String()) if err != nil { cmd.global.Warnf("unable to remove file %v from the repository\n", packID.Str()) } @@ -214,7 +212,7 @@ nextPack: cmd.global.Verbosef("creating new index\n") stats.packs = 0 - for _ = range repo.List(backend.Data, done) { + for _ = range repo.List(restic.DataFile, done) { stats.packs++ } bar = newProgressMax(cmd.global.ShowProgress(), uint64(stats.packs), "packs") @@ -223,9 +221,9 @@ nextPack: return err } - var supersedes backend.IDs - for idxID := range repo.List(backend.Index, done) { - err := repo.Backend().Remove(backend.Index, idxID.String()) + var supersedes restic.IDs + for idxID := range repo.List(restic.IndexFile, done) { + err := repo.Backend().Remove(restic.IndexFile, idxID.String()) if err != nil { fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", idxID.Str(), err) } diff --git a/src/cmds/restic/cmd_restore.go b/src/cmds/restic/cmd_restore.go index 9ff57565d..1a55cdb4e 100644 --- a/src/cmds/restic/cmd_restore.go +++ b/src/cmds/restic/cmd_restore.go @@ -2,7 +2,6 @@ package main import ( "restic" - "restic/backend" "restic/debug" "restic/filter" ) @@ -66,7 +65,7 @@ func (cmd CmdRestore) Execute(args []string) error { return err } - var id backend.ID + var id restic.ID if snapshotIDString == "latest" { id, err = restic.FindLatestSnapshot(repo, cmd.Paths, cmd.Host) diff --git a/src/cmds/restic/cmd_snapshots.go b/src/cmds/restic/cmd_snapshots.go index ccf889d23..23f9eb709 100644 --- a/src/cmds/restic/cmd_snapshots.go +++ b/src/cmds/restic/cmd_snapshots.go @@ -9,7 +9,6 @@ import ( "strings" "restic" - "restic/backend" ) type Table struct { @@ -92,7 +91,7 @@ func (cmd CmdSnapshots) Execute(args []string) error { defer close(done) list := []*restic.Snapshot{} - for id := range repo.List(backend.Snapshot, done) { + for id := range repo.List(restic.SnapshotFile, done) { sn, err := restic.LoadSnapshot(repo, id) if err != nil { fmt.Fprintf(os.Stderr, "error loading snapshot %s: %v\n", id, err) @@ -115,7 +114,7 @@ func (cmd CmdSnapshots) Execute(args []string) error { } - plen, err := repo.PrefixLength(backend.Snapshot) + plen, err := repo.PrefixLength(restic.SnapshotFile) if err != nil { return err } diff --git a/src/cmds/restic/global.go b/src/cmds/restic/global.go index 2aedb026e..b7eff3e41 100644 --- a/src/cmds/restic/global.go +++ b/src/cmds/restic/global.go @@ -9,7 +9,6 @@ import ( "strings" "syscall" - "restic/backend" "restic/backend/local" "restic/backend/rest" "restic/backend/s3" @@ -270,7 +269,7 @@ func (o GlobalOptions) OpenRepository() (*repository.Repository, error) { } // Open the backend specified by a location config. -func open(s string) (backend.Backend, error) { +func open(s string) (restic.Backend, error) { debug.Log("open", "parsing location %v", s) loc, err := location.Parse(s) if err != nil { @@ -305,7 +304,7 @@ func open(s string) (backend.Backend, error) { } // Create the backend specified by URI. -func create(s string) (backend.Backend, error) { +func create(s string) (restic.Backend, error) { debug.Log("open", "parsing location %v", s) loc, err := location.Parse(s) if err != nil { diff --git a/src/cmds/restic/integration_fuse_test.go b/src/cmds/restic/integration_fuse_test.go index 25f4b7e84..30c261703 100644 --- a/src/cmds/restic/integration_fuse_test.go +++ b/src/cmds/restic/integration_fuse_test.go @@ -71,7 +71,7 @@ func TestMount(t *testing.T) { t.Skip("Skipping fuse tests") } - checkSnapshots := func(repo *repository.Repository, mountpoint string, snapshotIDs []backend.ID) { + checkSnapshots := func(repo *repository.Repository, mountpoint string, snapshotIDs []restic.ID) { snapshotsDir, err := os.Open(filepath.Join(mountpoint, "snapshots")) OK(t, err) namesInSnapshots, err := snapshotsDir.Readdirnames(-1) @@ -123,7 +123,7 @@ func TestMount(t *testing.T) { Assert(t, len(names) == 1 && names[0] == "snapshots", `The fuse virtual directory "snapshots" doesn't exist`) OK(t, mountpointDir.Close()) - checkSnapshots(repo, mountpoint, []backend.ID{}) + checkSnapshots(repo, mountpoint, []restic.ID{}) datafile := filepath.Join("testdata", "backup-data.tar.gz") fd, err := os.Open(datafile) diff --git a/src/cmds/restic/integration_test.go b/src/cmds/restic/integration_test.go index fff765bef..8bd9b81c5 100644 --- a/src/cmds/restic/integration_test.go +++ b/src/cmds/restic/integration_test.go @@ -25,8 +25,8 @@ import ( . "restic/test" ) -func parseIDsFromReader(t testing.TB, rd io.Reader) backend.IDs { - IDs := backend.IDs{} +func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs { + IDs := restic.IDs{} sc := bufio.NewScanner(rd) for sc.Scan() { @@ -51,11 +51,11 @@ func cmdInit(t testing.TB, global GlobalOptions) { t.Logf("repository initialized at %v", global.Repo) } -func cmdBackup(t testing.TB, global GlobalOptions, target []string, parentID *backend.ID) { +func cmdBackup(t testing.TB, global GlobalOptions, target []string, parentID *restic.ID) { cmdBackupExcludes(t, global, target, parentID, nil) } -func cmdBackupExcludes(t testing.TB, global GlobalOptions, target []string, parentID *backend.ID, excludes []string) { +func cmdBackupExcludes(t testing.TB, global GlobalOptions, target []string, parentID *restic.ID, excludes []string) { cmd := &CmdBackup{global: &global, Excludes: excludes} if parentID != nil { cmd.Parent = parentID.String() @@ -66,19 +66,19 @@ func cmdBackupExcludes(t testing.TB, global GlobalOptions, target []string, pare OK(t, cmd.Execute(target)) } -func cmdList(t testing.TB, global GlobalOptions, tpe string) backend.IDs { +func cmdList(t testing.TB, global GlobalOptions, tpe string) restic.IDs { cmd := &CmdList{global: &global} return executeAndParseIDs(t, cmd, tpe) } -func executeAndParseIDs(t testing.TB, cmd *CmdList, args ...string) backend.IDs { +func executeAndParseIDs(t testing.TB, cmd *CmdList, args ...string) restic.IDs { buf := bytes.NewBuffer(nil) cmd.global.stdout = buf OK(t, cmd.Execute(args)) return parseIDsFromReader(t, buf) } -func cmdRestore(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID) { +func cmdRestore(t testing.TB, global GlobalOptions, dir string, snapshotID restic.ID) { cmdRestoreExcludes(t, global, dir, snapshotID, nil) } @@ -87,12 +87,12 @@ func cmdRestoreLatest(t testing.TB, global GlobalOptions, dir string, paths []st OK(t, cmd.Execute([]string{"latest"})) } -func cmdRestoreExcludes(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID, excludes []string) { +func cmdRestoreExcludes(t testing.TB, global GlobalOptions, dir string, snapshotID restic.ID, excludes []string) { cmd := &CmdRestore{global: &global, Target: dir, Exclude: excludes} OK(t, cmd.Execute([]string{snapshotID.String()})) } -func cmdRestoreIncludes(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID, includes []string) { +func cmdRestoreIncludes(t testing.TB, global GlobalOptions, dir string, snapshotID restic.ID, includes []string) { cmd := &CmdRestore{global: &global, Target: dir, Include: includes} OK(t, cmd.Execute([]string{snapshotID.String()})) } diff --git a/src/restic/backend/generic.go b/src/restic/backend/generic.go deleted file mode 100644 index a267922f3..000000000 --- a/src/restic/backend/generic.go +++ /dev/null @@ -1,74 +0,0 @@ -package backend - -import ( - "restic" - - "github.com/pkg/errors" -) - -// ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix -// could be found. -var ErrNoIDPrefixFound = errors.New("no ID found") - -// ErrMultipleIDMatches is returned by Find() when multiple IDs with the given -// prefix are found. -var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found") - -// Find loads the list of all blobs of type t and searches for names which -// start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. -// If more than one is found, nil and ErrMultipleIDMatches is returned. -func Find(be restic.Lister, t restic.FileType, prefix string) (string, error) { - done := make(chan struct{}) - defer close(done) - - match := "" - - // TODO: optimize by sorting list etc. - for name := range be.List(t, done) { - if prefix == name[:len(prefix)] { - if match == "" { - match = name - } else { - return "", ErrMultipleIDMatches - } - } - } - - if match != "" { - return match, nil - } - - return "", ErrNoIDPrefixFound -} - -const minPrefixLength = 8 - -// PrefixLength returns the number of bytes required so that all prefixes of -// all names of type t are unique. -func PrefixLength(be restic.Lister, t restic.FileType) (int, error) { - done := make(chan struct{}) - defer close(done) - - // load all IDs of the given type - list := make([]string, 0, 100) - for name := range be.List(t, done) { - list = append(list, name) - } - - // select prefixes of length l, test if the last one is the same as the current one -outer: - for l := minPrefixLength; l < restic.IDSize; l++ { - var last string - - for _, name := range list { - if last == name[:l] { - continue outer - } - last = name[:l] - } - - return l, nil - } - - return restic.IDSize, nil -} diff --git a/src/restic/backend/generic_test.go b/src/restic/backend/generic_test.go deleted file mode 100644 index 64b82a769..000000000 --- a/src/restic/backend/generic_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package backend_test - -import ( - "restic" - "testing" - - "restic/backend" - . "restic/test" -) - -type mockBackend struct { - list func(restic.FileType, <-chan struct{}) <-chan string -} - -func (m mockBackend) List(t restic.FileType, done <-chan struct{}) <-chan string { - return m.list(t, done) -} - -var samples = restic.IDs{ - ParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"), - ParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"), - ParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"), - ParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"), - ParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"), - ParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"), - ParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"), - ParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"), -} - -func TestPrefixLength(t *testing.T) { - list := samples - - m := mockBackend{} - m.list = func(t restic.FileType, done <-chan struct{}) <-chan string { - ch := make(chan string) - go func() { - defer close(ch) - for _, id := range list { - select { - case ch <- id.String(): - case <-done: - return - } - } - }() - return ch - } - - l, err := backend.PrefixLength(m, restic.SnapshotFile) - OK(t, err) - Equals(t, 19, l) - - list = samples[:3] - l, err = backend.PrefixLength(m, restic.SnapshotFile) - OK(t, err) - Equals(t, 19, l) - - list = samples[3:] - l, err = backend.PrefixLength(m, restic.SnapshotFile) - OK(t, err) - Equals(t, 8, l) -} diff --git a/src/restic/backend_find_test.go b/src/restic/backend_find_test.go new file mode 100644 index 000000000..cc86cd810 --- /dev/null +++ b/src/restic/backend_find_test.go @@ -0,0 +1,70 @@ +package restic + +import ( + "testing" +) + +type mockBackend struct { + list func(FileType, <-chan struct{}) <-chan string +} + +func (m mockBackend) List(t FileType, done <-chan struct{}) <-chan string { + return m.list(t, done) +} + +var samples = IDs{ + TestParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"), + TestParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"), + TestParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"), + TestParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"), + TestParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"), + TestParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"), + TestParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"), + TestParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"), +} + +func TestPrefixLength(t *testing.T) { + list := samples + + m := mockBackend{} + m.list = func(t FileType, done <-chan struct{}) <-chan string { + ch := make(chan string) + go func() { + defer close(ch) + for _, id := range list { + select { + case ch <- id.String(): + case <-done: + return + } + } + }() + return ch + } + + l, err := PrefixLength(m, SnapshotFile) + if err != nil { + t.Error(err) + } + if l != 19 { + t.Errorf("wrong prefix length returned, want %d, got %d", 19, l) + } + + list = samples[:3] + l, err = PrefixLength(m, SnapshotFile) + if err != nil { + t.Error(err) + } + if l != 19 { + t.Errorf("wrong prefix length returned, want %d, got %d", 19, l) + } + + list = samples[3:] + l, err = PrefixLength(m, SnapshotFile) + if err != nil { + t.Error(err) + } + if l != 8 { + t.Errorf("wrong prefix length returned, want %d, got %d", 8, l) + } +} diff --git a/src/restic/snapshot.go b/src/restic/snapshot.go index a81dfb82c..4775cbd7b 100644 --- a/src/restic/snapshot.go +++ b/src/restic/snapshot.go @@ -153,3 +153,16 @@ func FindLatestSnapshot(repo Repository, targets []string, source string) (ID, e return latestID, nil } + +// FindSnapshot takes a string and tries to find a snapshot whose ID matches +// the string as closely as possible. +func FindSnapshot(repo Repository, s string) (ID, error) { + + // find snapshot id with prefix + name, err := Find(repo.Backend(), SnapshotFile, s) + if err != nil { + return ID{}, err + } + + return ParseID(name) +} From 528c3018914a7839ebccbe6a0bd778a1e23a7c61 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 19:06:53 +0200 Subject: [PATCH 11/40] Last fixes for integration tests --- src/cmds/restic/integration_fuse_test.go | 1 - src/cmds/restic/integration_helpers_test.go | 3 +++ src/cmds/restic/integration_test.go | 9 ++++----- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/cmds/restic/integration_fuse_test.go b/src/cmds/restic/integration_fuse_test.go index 30c261703..857f67361 100644 --- a/src/cmds/restic/integration_fuse_test.go +++ b/src/cmds/restic/integration_fuse_test.go @@ -13,7 +13,6 @@ import ( "github.com/pkg/errors" "restic" - "restic/backend" "restic/repository" . "restic/test" ) diff --git a/src/cmds/restic/integration_helpers_test.go b/src/cmds/restic/integration_helpers_test.go index d6615f6b0..15111d688 100644 --- a/src/cmds/restic/integration_helpers_test.go +++ b/src/cmds/restic/integration_helpers_test.go @@ -8,6 +8,7 @@ import ( "runtime" "testing" + "restic/repository" . "restic/test" ) @@ -193,6 +194,8 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions)) t.Skip("integration tests disabled") } + repository.TestUseLowSecurityKDFParameters(t) + tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") OK(t, err) diff --git a/src/cmds/restic/integration_test.go b/src/cmds/restic/integration_test.go index 8bd9b81c5..e0e881301 100644 --- a/src/cmds/restic/integration_test.go +++ b/src/cmds/restic/integration_test.go @@ -18,7 +18,6 @@ import ( "github.com/pkg/errors" - "restic/backend" "restic/debug" "restic/filter" "restic/repository" @@ -30,7 +29,7 @@ func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs { sc := bufio.NewScanner(rd) for sc.Scan() { - id, err := backend.ParseID(sc.Text()) + id, err := restic.ParseID(sc.Text()) if err != nil { t.Logf("parse id %v: %v", sc.Text(), err) continue @@ -811,11 +810,11 @@ func TestRebuildIndexAlwaysFull(t *testing.T) { var optimizeTests = []struct { testFilename string - snapshots backend.IDSet + snapshots restic.IDSet }{ { filepath.Join("..", "..", "restic", "checker", "testdata", "checker-test-repo.tar.gz"), - backend.NewIDSet(ParseID("a13c11e582b77a693dd75ab4e3a3ba96538a056594a4b9076e4cacebe6e06d43")), + restic.NewIDSet(ParseID("a13c11e582b77a693dd75ab4e3a3ba96538a056594a4b9076e4cacebe6e06d43")), }, { filepath.Join("testdata", "old-index-repo.tar.gz"), @@ -823,7 +822,7 @@ var optimizeTests = []struct { }, { filepath.Join("testdata", "old-index-repo.tar.gz"), - backend.NewIDSet( + restic.NewIDSet( ParseID("f7d83db709977178c9d1a09e4009355e534cde1a135b8186b8b118a3fc4fcd41"), ParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"), ), From eb6e3ba8b3ea522e6e8fd0da1be4da4d91a5143d Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 19:20:15 +0200 Subject: [PATCH 12/40] Fix imported package --- src/restic/blob.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/restic/blob.go b/src/restic/blob.go index 731dc8f6f..56e478adc 100644 --- a/src/restic/blob.go +++ b/src/restic/blob.go @@ -1,8 +1,9 @@ package restic import ( - "errors" "fmt" + + "github.com/pkg/errors" ) // Blob is one part of a file or a tree. From 88d0f24ce7a9f55b20c8175cd84020ff713b6f29 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 21:13:06 +0200 Subject: [PATCH 13/40] Reduce lock timeout to zero --- src/cmds/restic/integration_test.go | 1 + src/restic/lock.go | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/cmds/restic/integration_test.go b/src/cmds/restic/integration_test.go index e0e881301..3a3c53e6a 100644 --- a/src/cmds/restic/integration_test.go +++ b/src/cmds/restic/integration_test.go @@ -43,6 +43,7 @@ func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs { func cmdInit(t testing.TB, global GlobalOptions) { repository.TestUseLowSecurityKDFParameters(t) + restic.TestSetLockTimeout(t, 0) cmd := &CmdInit{global: &global} OK(t, cmd.Execute(nil)) diff --git a/src/restic/lock.go b/src/restic/lock.go index 9f181106a..2cb0a1134 100644 --- a/src/restic/lock.go +++ b/src/restic/lock.go @@ -7,6 +7,7 @@ import ( "os/user" "sync" "syscall" + "testing" "time" "github.com/pkg/errors" @@ -68,7 +69,13 @@ func NewExclusiveLock(repo Repository) (*Lock, error) { return newLock(repo, true) } -const waitBeforeLockCheck = 200 * time.Millisecond +var waitBeforeLockCheck = 200 * time.Millisecond + +// TestSetLockTimeout can be used to reduce the lock wait timeout for tests. +func TestSetLockTimeout(t testing.TB, d time.Duration) { + t.Logf("setting lock timeout to %v", d) + waitBeforeLockCheck = d +} func newLock(repo Repository, excl bool) (*Lock, error) { lock := &Lock{ From 5e3a41dbd2a469dc29f9d00e2a62ffa291748dc6 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 21:19:30 +0200 Subject: [PATCH 14/40] Rename struct member FileType -> Type --- src/cmds/restic/cmd_cat.go | 4 +-- src/restic/backend/local/local.go | 8 ++--- src/restic/backend/mem/mem_backend.go | 16 +++++----- src/restic/backend/rest/rest.go | 10 +++--- src/restic/backend/rest/rest_path_test.go | 12 ++++---- src/restic/backend/s3/s3.go | 6 ++-- src/restic/backend/sftp/sftp.go | 6 ++-- src/restic/backend/test/tests.go | 32 ++++++++++---------- src/restic/backend/utils_test.go | 12 ++++---- src/restic/checker/checker.go | 2 +- src/restic/file.go | 14 ++++----- src/restic/file_test.go | 10 +++--- src/restic/pack/pack_test.go | 4 +-- src/restic/repository/key.go | 6 ++-- src/restic/repository/packer_manager.go | 2 +- src/restic/repository/packer_manager_test.go | 2 +- src/restic/repository/repack.go | 2 +- src/restic/repository/repository.go | 8 ++--- 18 files changed, 78 insertions(+), 78 deletions(-) diff --git a/src/cmds/restic/cmd_cat.go b/src/cmds/restic/cmd_cat.go index 75edc258b..f99d93e7e 100644 --- a/src/cmds/restic/cmd_cat.go +++ b/src/cmds/restic/cmd_cat.go @@ -98,7 +98,7 @@ func (cmd CmdCat) Execute(args []string) error { return nil case "key": - h := restic.Handle{FileType: restic.KeyFile, Name: id.String()} + h := restic.Handle{Type: restic.KeyFile, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err @@ -149,7 +149,7 @@ func (cmd CmdCat) Execute(args []string) error { switch tpe { case "pack": - h := restic.Handle{FileType: restic.DataFile, Name: id.String()} + h := restic.Handle{Type: restic.DataFile, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err diff --git a/src/restic/backend/local/local.go b/src/restic/backend/local/local.go index 4ac5c0fa7..4cfd4de95 100644 --- a/src/restic/backend/local/local.go +++ b/src/restic/backend/local/local.go @@ -111,7 +111,7 @@ func (b *Local) Load(h restic.Handle, p []byte, off int64) (n int, err error) { return 0, err } - f, err := fs.Open(filename(b.p, h.FileType, h.Name)) + f, err := fs.Open(filename(b.p, h.Type, h.Name)) if err != nil { return 0, errors.Wrap(err, "Open") } @@ -183,7 +183,7 @@ func (b *Local) Save(h restic.Handle, p []byte) (err error) { return err } - filename := filename(b.p, h.FileType, h.Name) + filename := filename(b.p, h.Type, h.Name) // test if new path already exists if _, err := fs.Stat(filename); err == nil { @@ -191,7 +191,7 @@ func (b *Local) Save(h restic.Handle, p []byte) (err error) { } // create directories if necessary, ignore errors - if h.FileType == restic.DataFile { + if h.Type == restic.DataFile { err = fs.MkdirAll(filepath.Dir(filename), backend.Modes.Dir) if err != nil { return errors.Wrap(err, "MkdirAll") @@ -222,7 +222,7 @@ func (b *Local) Stat(h restic.Handle) (restic.FileInfo, error) { return restic.FileInfo{}, err } - fi, err := fs.Stat(filename(b.p, h.FileType, h.Name)) + fi, err := fs.Stat(filename(b.p, h.Type, h.Name)) if err != nil { return restic.FileInfo{}, errors.Wrap(err, "Stat") } diff --git a/src/restic/backend/mem/mem_backend.go b/src/restic/backend/mem/mem_backend.go index a2fbc8150..4d1ac49ae 100644 --- a/src/restic/backend/mem/mem_backend.go +++ b/src/restic/backend/mem/mem_backend.go @@ -61,17 +61,17 @@ func (be *MemoryBackend) Load(h restic.Handle, p []byte, off int64) (int, error) be.m.Lock() defer be.m.Unlock() - if h.FileType == restic.ConfigFile { + if h.Type == restic.ConfigFile { h.Name = "" } debug.Log("MemoryBackend.Load", "get %v offset %v len %v", h, off, len(p)) - if _, ok := be.data[entry{h.FileType, h.Name}]; !ok { + if _, ok := be.data[entry{h.Type, h.Name}]; !ok { return 0, errors.New("no such data") } - buf := be.data[entry{h.FileType, h.Name}] + buf := be.data[entry{h.Type, h.Name}] switch { case off > int64(len(buf)): return 0, errors.New("offset beyond end of file") @@ -101,18 +101,18 @@ func (be *MemoryBackend) Save(h restic.Handle, p []byte) error { be.m.Lock() defer be.m.Unlock() - if h.FileType == restic.ConfigFile { + if h.Type == restic.ConfigFile { h.Name = "" } - if _, ok := be.data[entry{h.FileType, h.Name}]; ok { + if _, ok := be.data[entry{h.Type, h.Name}]; ok { return errors.New("file already exists") } debug.Log("MemoryBackend.Save", "save %v bytes at %v", len(p), h) buf := make([]byte, len(p)) copy(buf, p) - be.data[entry{h.FileType, h.Name}] = buf + be.data[entry{h.Type, h.Name}] = buf return nil } @@ -126,13 +126,13 @@ func (be *MemoryBackend) Stat(h restic.Handle) (restic.FileInfo, error) { return restic.FileInfo{}, err } - if h.FileType == restic.ConfigFile { + if h.Type == restic.ConfigFile { h.Name = "" } debug.Log("MemoryBackend.Stat", "stat %v", h) - e, ok := be.data[entry{h.FileType, h.Name}] + e, ok := be.data[entry{h.Type, h.Name}] if !ok { return restic.FileInfo{}, errors.New("no such data") } diff --git a/src/restic/backend/rest/rest.go b/src/restic/backend/rest/rest.go index 6ea21a1c5..040faac97 100644 --- a/src/restic/backend/rest/rest.go +++ b/src/restic/backend/rest/rest.go @@ -24,7 +24,7 @@ func restPath(url *url.URL, h restic.Handle) string { var dir string - switch h.FileType { + switch h.Type { case restic.ConfigFile: dir = "" h.Name = "config" @@ -39,7 +39,7 @@ func restPath(url *url.URL, h restic.Handle) string { case restic.KeyFile: dir = backend.Paths.Keys default: - dir = string(h.FileType) + dir = string(h.Type) } u.Path = path.Join(url.Path, dir, h.Name) @@ -185,7 +185,7 @@ func (b *restBackend) Stat(h restic.Handle) (restic.FileInfo, error) { // Test returns true if a blob of the given type and name exists in the backend. func (b *restBackend) Test(t restic.FileType, name string) (bool, error) { - _, err := b.Stat(restic.Handle{FileType: t, Name: name}) + _, err := b.Stat(restic.Handle{Type: t, Name: name}) if err != nil { return false, nil } @@ -195,7 +195,7 @@ func (b *restBackend) Test(t restic.FileType, name string) (bool, error) { // Remove removes the blob with the given name and type. func (b *restBackend) Remove(t restic.FileType, name string) error { - h := restic.Handle{FileType: t, Name: name} + h := restic.Handle{Type: t, Name: name} if err := h.Valid(); err != nil { return err } @@ -225,7 +225,7 @@ func (b *restBackend) Remove(t restic.FileType, name string) error { func (b *restBackend) List(t restic.FileType, done <-chan struct{}) <-chan string { ch := make(chan string) - url := restPath(b.url, restic.Handle{FileType: t}) + url := restPath(b.url, restic.Handle{Type: t}) if !strings.HasSuffix(url, "/") { url += "/" } diff --git a/src/restic/backend/rest/rest_path_test.go b/src/restic/backend/rest/rest_path_test.go index 8542e42e2..8356abfba 100644 --- a/src/restic/backend/rest/rest_path_test.go +++ b/src/restic/backend/rest/rest_path_test.go @@ -14,24 +14,24 @@ var restPathTests = []struct { { URL: parseURL("https://hostname.foo"), Handle: restic.Handle{ - FileType: restic.DataFile, - Name: "foobar", + Type: restic.DataFile, + Name: "foobar", }, Result: "https://hostname.foo/data/foobar", }, { URL: parseURL("https://hostname.foo:1234/prefix/repo"), Handle: restic.Handle{ - FileType: restic.LockFile, - Name: "foobar", + Type: restic.LockFile, + Name: "foobar", }, Result: "https://hostname.foo:1234/prefix/repo/locks/foobar", }, { URL: parseURL("https://hostname.foo:1234/prefix/repo"), Handle: restic.Handle{ - FileType: restic.ConfigFile, - Name: "foobar", + Type: restic.ConfigFile, + Name: "foobar", }, Result: "https://hostname.foo:1234/prefix/repo/config", }, diff --git a/src/restic/backend/s3/s3.go b/src/restic/backend/s3/s3.go index 67dbeeb91..3af656456 100644 --- a/src/restic/backend/s3/s3.go +++ b/src/restic/backend/s3/s3.go @@ -85,7 +85,7 @@ func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) { var obj *minio.Object debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p)) - path := be.s3path(h.FileType, h.Name) + path := be.s3path(h.Type, h.Name) <-be.connChan defer func() { @@ -160,7 +160,7 @@ func (be s3) Save(h restic.Handle, p []byte) (err error) { debug.Log("s3.Save", "%v with %d bytes", h, len(p)) - path := be.s3path(h.FileType, h.Name) + path := be.s3path(h.Type, h.Name) // Check key does not already exist _, err = be.client.StatObject(be.bucketname, path) @@ -186,7 +186,7 @@ func (be s3) Save(h restic.Handle, p []byte) (err error) { func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) { debug.Log("s3.Stat", "%v", h) - path := be.s3path(h.FileType, h.Name) + path := be.s3path(h.Type, h.Name) var obj *minio.Object obj, err = be.client.GetObject(be.bucketname, path) diff --git a/src/restic/backend/sftp/sftp.go b/src/restic/backend/sftp/sftp.go index c5488a815..a4681142f 100644 --- a/src/restic/backend/sftp/sftp.go +++ b/src/restic/backend/sftp/sftp.go @@ -338,7 +338,7 @@ func (r *SFTP) Load(h restic.Handle, p []byte, off int64) (n int, err error) { return 0, err } - f, err := r.c.Open(r.filename(h.FileType, h.Name)) + f, err := r.c.Open(r.filename(h.Type, h.Name)) if err != nil { return 0, errors.Wrap(err, "Open") } @@ -396,7 +396,7 @@ func (r *SFTP) Save(h restic.Handle, p []byte) (err error) { return errors.Wrap(err, "Close") } - err = r.renameFile(filename, h.FileType, h.Name) + err = r.renameFile(filename, h.Type, h.Name) debug.Log("sftp.Save", "save %v: rename %v: %v", h, path.Base(filename), err) return err @@ -413,7 +413,7 @@ func (r *SFTP) Stat(h restic.Handle) (restic.FileInfo, error) { return restic.FileInfo{}, err } - fi, err := r.c.Lstat(r.filename(h.FileType, h.Name)) + fi, err := r.c.Lstat(r.filename(h.Type, h.Name)) if err != nil { return restic.FileInfo{}, errors.Wrap(err, "Lstat") } diff --git a/src/restic/backend/test/tests.go b/src/restic/backend/test/tests.go index f1a7ad82e..8134eafbd 100644 --- a/src/restic/backend/test/tests.go +++ b/src/restic/backend/test/tests.go @@ -153,12 +153,12 @@ func TestConfig(t testing.TB) { var testString = "Config" // create config and read it back - _, err := backend.LoadAll(b, restic.Handle{FileType: restic.ConfigFile}, nil) + _, err := backend.LoadAll(b, restic.Handle{Type: restic.ConfigFile}, nil) if err == nil { t.Fatalf("did not get expected error for non-existing config") } - err = b.Save(restic.Handle{FileType: restic.ConfigFile}, []byte(testString)) + err = b.Save(restic.Handle{Type: restic.ConfigFile}, []byte(testString)) if err != nil { t.Fatalf("Save() error: %v", err) } @@ -166,7 +166,7 @@ func TestConfig(t testing.TB) { // try accessing the config with different names, should all return the // same config for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} { - h := restic.Handle{FileType: restic.ConfigFile, Name: name} + h := restic.Handle{Type: restic.ConfigFile, Name: name} buf, err := backend.LoadAll(b, h, nil) if err != nil { t.Fatalf("unable to read config with name %q: %v", name, err) @@ -188,7 +188,7 @@ func TestLoad(t testing.TB) { t.Fatalf("Load() did not return an error for invalid handle") } - _, err = b.Load(restic.Handle{FileType: restic.DataFile, Name: "foobar"}, nil, 0) + _, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, nil, 0) if err == nil { t.Fatalf("Load() did not return an error for non-existing blob") } @@ -198,7 +198,7 @@ func TestLoad(t testing.TB) { data := Random(23, length) id := restic.Hash(data) - handle := restic.Handle{FileType: restic.DataFile, Name: id.String()} + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} err = b.Save(handle, data) if err != nil { t.Fatalf("Save() error: %v", err) @@ -323,7 +323,7 @@ func TestLoadNegativeOffset(t testing.TB) { data := Random(23, length) id := restic.Hash(data) - handle := restic.Handle{FileType: restic.DataFile, Name: id.String()} + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} err := b.Save(handle, data) if err != nil { t.Fatalf("Save() error: %v", err) @@ -382,8 +382,8 @@ func TestSave(t testing.TB) { copy(id[:], data) h := restic.Handle{ - FileType: restic.DataFile, - Name: fmt.Sprintf("%s-%d", id, i), + Type: restic.DataFile, + Name: fmt.Sprintf("%s-%d", id, i), } err := b.Save(h, data) OK(t, err) @@ -405,7 +405,7 @@ func TestSave(t testing.TB) { t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size) } - err = b.Remove(h.FileType, h.Name) + err = b.Remove(h.Type, h.Name) if err != nil { t.Fatalf("error removing item: %v", err) } @@ -430,7 +430,7 @@ func TestSaveFilenames(t testing.TB) { defer close(t) for i, test := range filenameTests { - h := restic.Handle{Name: test.name, FileType: restic.DataFile} + h := restic.Handle{Name: test.name, Type: restic.DataFile} err := b.Save(h, []byte(test.data)) if err != nil { t.Errorf("test %d failed: Save() returned %v", i, err) @@ -447,7 +447,7 @@ func TestSaveFilenames(t testing.TB) { t.Errorf("test %d: returned wrong bytes", i) } - err = b.Remove(h.FileType, h.Name) + err = b.Remove(h.Type, h.Name) if err != nil { t.Errorf("test %d failed: Remove() returned %v", i, err) continue @@ -467,7 +467,7 @@ var testStrings = []struct { func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) { id := restic.Hash(data) - err := b.Save(restic.Handle{Name: id.String(), FileType: tpe}, data) + err := b.Save(restic.Handle{Name: id.String(), Type: tpe}, data) OK(t, err) } @@ -499,7 +499,7 @@ func TestBackend(t testing.TB) { Assert(t, !ret, "blob was found to exist before creating") // try to stat a not existing blob - h := restic.Handle{FileType: tpe, Name: id.String()} + h := restic.Handle{Type: tpe, Name: id.String()} _, err = b.Stat(h) Assert(t, err != nil, "blob data could be extracted before creation") @@ -518,7 +518,7 @@ func TestBackend(t testing.TB) { store(t, b, tpe, []byte(test.data)) // test Load() - h := restic.Handle{FileType: tpe, Name: test.id} + h := restic.Handle{Type: tpe, Name: test.id} buf, err := backend.LoadAll(b, h, nil) OK(t, err) Equals(t, test.data, string(buf)) @@ -539,7 +539,7 @@ func TestBackend(t testing.TB) { test := testStrings[0] // create blob - err := b.Save(restic.Handle{FileType: tpe, Name: test.id}, []byte(test.data)) + err := b.Save(restic.Handle{Type: tpe, Name: test.id}, []byte(test.data)) Assert(t, err != nil, "expected error, got %v", err) // remove and recreate @@ -552,7 +552,7 @@ func TestBackend(t testing.TB) { Assert(t, ok == false, "removed blob still present") // create blob - err = b.Save(restic.Handle{FileType: tpe, Name: test.id}, []byte(test.data)) + err = b.Save(restic.Handle{Type: tpe, Name: test.id}, []byte(test.data)) OK(t, err) // list items diff --git a/src/restic/backend/utils_test.go b/src/restic/backend/utils_test.go index 8f2f63161..59eed7089 100644 --- a/src/restic/backend/utils_test.go +++ b/src/restic/backend/utils_test.go @@ -21,10 +21,10 @@ func TestLoadAll(t *testing.T) { data := Random(23+i, rand.Intn(MiB)+500*KiB) id := restic.Hash(data) - err := b.Save(restic.Handle{Name: id.String(), FileType: restic.DataFile}, data) + err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) OK(t, err) - buf, err := backend.LoadAll(b, restic.Handle{FileType: restic.DataFile, Name: id.String()}, nil) + buf, err := backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, nil) OK(t, err) if len(buf) != len(data) { @@ -46,11 +46,11 @@ func TestLoadSmallBuffer(t *testing.T) { data := Random(23+i, rand.Intn(MiB)+500*KiB) id := restic.Hash(data) - err := b.Save(restic.Handle{Name: id.String(), FileType: restic.DataFile}, data) + err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) OK(t, err) buf := make([]byte, len(data)-23) - buf, err = backend.LoadAll(b, restic.Handle{FileType: restic.DataFile, Name: id.String()}, buf) + buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf) OK(t, err) if len(buf) != len(data) { @@ -72,11 +72,11 @@ func TestLoadLargeBuffer(t *testing.T) { data := Random(23+i, rand.Intn(MiB)+500*KiB) id := restic.Hash(data) - err := b.Save(restic.Handle{Name: id.String(), FileType: restic.DataFile}, data) + err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) OK(t, err) buf := make([]byte, len(data)+100) - buf, err = backend.LoadAll(b, restic.Handle{FileType: restic.DataFile, Name: id.String()}, buf) + buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf) OK(t, err) if len(buf) != len(data) { diff --git a/src/restic/checker/checker.go b/src/restic/checker/checker.go index 3b488cced..5ee592f51 100644 --- a/src/restic/checker/checker.go +++ b/src/restic/checker/checker.go @@ -662,7 +662,7 @@ func (c *Checker) CountPacks() uint64 { // checkPack reads a pack and checks the integrity of all blobs. func checkPack(r restic.Repository, id restic.ID) error { debug.Log("Checker.checkPack", "checking pack %v", id.Str()) - h := restic.Handle{FileType: restic.DataFile, Name: id.String()} + h := restic.Handle{Type: restic.DataFile, Name: id.String()} buf, err := backend.LoadAll(r.Backend(), h, nil) if err != nil { return err diff --git a/src/restic/file.go b/src/restic/file.go index af5c374c7..166546f52 100644 --- a/src/restic/file.go +++ b/src/restic/file.go @@ -21,8 +21,8 @@ const ( // Handle is used to store and access data in a backend. type Handle struct { - FileType FileType - Name string + Type FileType + Name string } func (h Handle) String() string { @@ -30,16 +30,16 @@ func (h Handle) String() string { if len(name) > 10 { name = name[:10] } - return fmt.Sprintf("<%s/%s>", h.FileType, name) + return fmt.Sprintf("<%s/%s>", h.Type, name) } // Valid returns an error if h is not valid. func (h Handle) Valid() error { - if h.FileType == "" { + if h.Type == "" { return errors.New("type is empty") } - switch h.FileType { + switch h.Type { case DataFile: case KeyFile: case LockFile: @@ -47,10 +47,10 @@ func (h Handle) Valid() error { case IndexFile: case ConfigFile: default: - return errors.Errorf("invalid Type %q", h.FileType) + return errors.Errorf("invalid Type %q", h.Type) } - if h.FileType == ConfigFile { + if h.Type == ConfigFile { return nil } diff --git a/src/restic/file_test.go b/src/restic/file_test.go index d5044558e..2f8f395c2 100644 --- a/src/restic/file_test.go +++ b/src/restic/file_test.go @@ -7,11 +7,11 @@ var handleTests = []struct { valid bool }{ {Handle{Name: "foo"}, false}, - {Handle{FileType: "foobar"}, false}, - {Handle{FileType: ConfigFile, Name: ""}, true}, - {Handle{FileType: DataFile, Name: ""}, false}, - {Handle{FileType: "", Name: "x"}, false}, - {Handle{FileType: LockFile, Name: "010203040506"}, true}, + {Handle{Type: "foobar"}, false}, + {Handle{Type: ConfigFile, Name: ""}, true}, + {Handle{Type: DataFile, Name: ""}, false}, + {Handle{Type: "", Name: "x"}, false}, + {Handle{Type: LockFile, Name: "010203040506"}, true}, } func TestHandleValid(t *testing.T) { diff --git a/src/restic/pack/pack_test.go b/src/restic/pack/pack_test.go index 5e5b1fc1e..8a21785df 100644 --- a/src/restic/pack/pack_test.go +++ b/src/restic/pack/pack_test.go @@ -126,7 +126,7 @@ func TestUnpackReadSeeker(t *testing.T) { b := mem.New() id := restic.Hash(packData) - handle := restic.Handle{FileType: restic.DataFile, Name: id.String()} + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} OK(t, b.Save(handle, packData)) verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) } @@ -139,7 +139,7 @@ func TestShortPack(t *testing.T) { b := mem.New() id := restic.Hash(packData) - handle := restic.Handle{FileType: restic.DataFile, Name: id.String()} + handle := restic.Handle{Type: restic.DataFile, Name: id.String()} OK(t, b.Save(handle, packData)) verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) } diff --git a/src/restic/repository/key.go b/src/restic/repository/key.go index 69dabadbb..7b03ae108 100644 --- a/src/restic/repository/key.go +++ b/src/restic/repository/key.go @@ -143,7 +143,7 @@ func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) { // LoadKey loads a key from the backend. func LoadKey(s *Repository, name string) (k *Key, err error) { - h := restic.Handle{FileType: restic.KeyFile, Name: name} + h := restic.Handle{Type: restic.KeyFile, Name: name} data, err := backend.LoadAll(s.be, h, nil) if err != nil { return nil, err @@ -226,8 +226,8 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error) // store in repository and return h := restic.Handle{ - FileType: restic.KeyFile, - Name: restic.Hash(buf).String(), + Type: restic.KeyFile, + Name: restic.Hash(buf).String(), } err = s.be.Save(h, buf) diff --git a/src/restic/repository/packer_manager.go b/src/restic/repository/packer_manager.go index 2b065ed7d..ea08a114c 100644 --- a/src/restic/repository/packer_manager.go +++ b/src/restic/repository/packer_manager.go @@ -115,7 +115,7 @@ func (r *Repository) savePacker(p *pack.Packer) error { } id := restic.Hash(data) - h := restic.Handle{FileType: restic.DataFile, Name: id.String()} + h := restic.Handle{Type: restic.DataFile, Name: id.String()} err = r.be.Save(h, data) if err != nil { diff --git a/src/restic/repository/packer_manager_test.go b/src/restic/repository/packer_manager_test.go index 323051612..bf6258428 100644 --- a/src/restic/repository/packer_manager_test.go +++ b/src/restic/repository/packer_manager_test.go @@ -63,7 +63,7 @@ func saveFile(t testing.TB, be Saver, filename string, n int) { t.Fatal(err) } - h := restic.Handle{FileType: restic.DataFile, Name: restic.Hash(data).String()} + h := restic.Handle{Type: restic.DataFile, Name: restic.Hash(data).String()} err = be.Save(h, data) if err != nil { diff --git a/src/restic/repository/repack.go b/src/restic/repository/repack.go index dea26e6eb..95e0eae07 100644 --- a/src/restic/repository/repack.go +++ b/src/restic/repository/repack.go @@ -21,7 +21,7 @@ func Repack(repo *Repository, packs restic.IDSet, keepBlobs restic.BlobSet) (err buf := make([]byte, 0, maxPackSize) for packID := range packs { // load the complete pack - h := restic.Handle{FileType: restic.DataFile, Name: packID.String()} + h := restic.Handle{Type: restic.DataFile, Name: packID.String()} l, err := repo.Backend().Load(h, buf[:cap(buf)], 0) if errors.Cause(err) == io.ErrUnexpectedEOF { diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index 1a12ac608..bc36e0507 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -61,7 +61,7 @@ func (r *Repository) PrefixLength(t restic.FileType) (int, error) { func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) { debug.Log("Repo.Load", "load %v with id %v", t, id.Str()) - h := restic.Handle{FileType: t, Name: id.String()} + h := restic.Handle{Type: t, Name: id.String()} buf, err := backend.LoadAll(r.be, h, nil) if err != nil { debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err) @@ -117,7 +117,7 @@ func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by } // load blob from pack - h := restic.Handle{FileType: restic.DataFile, Name: blob.PackID.String()} + h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()} ciphertextBuf := make([]byte, blob.Length) n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset)) if err != nil { @@ -279,7 +279,7 @@ func (r *Repository) SaveUnpacked(t restic.FileType, p []byte) (id restic.ID, er } id = restic.Hash(ciphertext) - h := restic.Handle{FileType: t, Name: id.String()} + h := restic.Handle{Type: t, Name: id.String()} err = r.be.Save(h, ciphertext) if err != nil { @@ -560,7 +560,7 @@ func (r *Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic // ListPack returns the list of blobs saved in the pack id and the length of // the file as stored in the backend. func (r *Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) { - h := restic.Handle{FileType: restic.DataFile, Name: id.String()} + h := restic.Handle{Type: restic.DataFile, Name: id.String()} blobInfo, err := r.Backend().Stat(h) if err != nil { From 5764b55aee344240103c7f14fefec341a0b7ba26 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 21:20:03 +0200 Subject: [PATCH 15/40] Rename Node.FileType -> Type --- src/cmds/restic/cmd_find.go | 2 +- src/cmds/restic/cmd_ls.go | 6 ++--- src/restic/archiver/archive_reader.go | 2 +- src/restic/archiver/archiver.go | 4 ++-- src/restic/checker/checker.go | 4 ++-- src/restic/find.go | 2 +- src/restic/fuse/dir.go | 8 +++---- src/restic/node.go | 30 +++++++++++------------ src/restic/node_test.go | 34 +++++++++++++-------------- src/restic/restorer.go | 2 +- src/restic/testing.go | 16 ++++++------- src/restic/tree.go | 2 +- src/restic/walk.go | 4 ++-- 13 files changed, 58 insertions(+), 58 deletions(-) diff --git a/src/cmds/restic/cmd_find.go b/src/cmds/restic/cmd_find.go index 783d65b35..96d31dd6f 100644 --- a/src/cmds/restic/cmd_find.go +++ b/src/cmds/restic/cmd_find.go @@ -91,7 +91,7 @@ func (c CmdFind) findInTree(repo *repository.Repository, id restic.ID, path stri debug.Log("restic.find", " pattern does not match\n") } - if node.FileType == "dir" { + if node.Type == "dir" { subdirResults, err := c.findInTree(repo, *node.Subtree, filepath.Join(path, node.Name)) if err != nil { return nil, err diff --git a/src/cmds/restic/cmd_ls.go b/src/cmds/restic/cmd_ls.go index 8157c74f2..d875e58e6 100644 --- a/src/cmds/restic/cmd_ls.go +++ b/src/cmds/restic/cmd_ls.go @@ -30,7 +30,7 @@ func (cmd CmdLs) printNode(prefix string, n *restic.Node) string { return filepath.Join(prefix, n.Name) } - switch n.FileType { + switch n.Type { case "file": return fmt.Sprintf("%s %5d %5d %6d %s %s", n.Mode, n.UID, n.GID, n.Size, n.ModTime, filepath.Join(prefix, n.Name)) @@ -41,7 +41,7 @@ func (cmd CmdLs) printNode(prefix string, n *restic.Node) string { return fmt.Sprintf("%s %5d %5d %6d %s %s -> %s", n.Mode|os.ModeSymlink, n.UID, n.GID, n.Size, n.ModTime, filepath.Join(prefix, n.Name), n.LinkTarget) default: - return fmt.Sprintf("", n.FileType, n.Name) + return fmt.Sprintf("", n.Type, n.Name) } } @@ -54,7 +54,7 @@ func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id restic for _, entry := range tree.Nodes { cmd.global.Printf(cmd.printNode(prefix, entry) + "\n") - if entry.FileType == "dir" && entry.Subtree != nil { + if entry.Type == "dir" && entry.Subtree != nil { err = cmd.printTree(filepath.Join(prefix, entry.Name), repo, *entry.Subtree) if err != nil { return err diff --git a/src/restic/archiver/archive_reader.go b/src/restic/archiver/archive_reader.go index 1f835c202..0ddefd151 100644 --- a/src/restic/archiver/archive_reader.go +++ b/src/restic/archiver/archive_reader.go @@ -81,7 +81,7 @@ func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, nam Name: name, AccessTime: time.Now(), ModTime: time.Now(), - FileType: "file", + Type: "file", Mode: 0644, Size: fileSize, UID: sn.UID, diff --git a/src/restic/archiver/archiver.go b/src/restic/archiver/archiver.go index b4e201871..9cbb34a06 100644 --- a/src/restic/archiver/archiver.go +++ b/src/restic/archiver/archiver.go @@ -304,7 +304,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <- } // otherwise read file normally - if node.FileType == "file" && len(node.Content) == 0 { + if node.Type == "file" && len(node.Content) == 0 { debug.Log("Archiver.fileWorker", " read and save %v, content: %v", e.Path(), node.Content) err = arch.SaveFile(p, node) if err != nil { @@ -371,7 +371,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-c node := res.(*restic.Node) tree.Insert(node) - if node.FileType == "dir" { + if node.Type == "dir" { debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.Path, node.Subtree) if node.Subtree.IsNull() { diff --git a/src/restic/checker/checker.go b/src/restic/checker/checker.go index 5ee592f51..9d673a94c 100644 --- a/src/restic/checker/checker.go +++ b/src/restic/checker/checker.go @@ -581,7 +581,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { var blobs []restic.ID for _, node := range tree.Nodes { - switch node.FileType { + switch node.Type { case "file": if node.Content == nil { errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)}) @@ -609,7 +609,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { // nothing to check default: - errs = append(errs, Error{TreeID: id, Err: errors.Errorf("node %q with invalid type %q", node.Name, node.FileType)}) + errs = append(errs, Error{TreeID: id, Err: errors.Errorf("node %q with invalid type %q", node.Name, node.Type)}) } if node.Name == "" { diff --git a/src/restic/find.go b/src/restic/find.go index 204a61a09..bfcdbb58f 100644 --- a/src/restic/find.go +++ b/src/restic/find.go @@ -12,7 +12,7 @@ func FindUsedBlobs(repo Repository, treeID ID, blobs BlobSet, seen BlobSet) erro } for _, node := range tree.Nodes { - switch node.FileType { + switch node.Type { case "file": for _, blob := range node.Content { blobs.Insert(BlobHandle{ID: blob, Type: DataBlob}) diff --git a/src/restic/fuse/dir.go b/src/restic/fuse/dir.go index b553da1ad..a89617e5f 100644 --- a/src/restic/fuse/dir.go +++ b/src/restic/fuse/dir.go @@ -51,7 +51,7 @@ func newDir(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (* // replaceSpecialNodes replaces nodes with name "." and "/" by their contents. // Otherwise, the node is returned. func replaceSpecialNodes(repo *repository.Repository, node *restic.Node) ([]*restic.Node, error) { - if node.FileType != "dir" || node.Subtree == nil { + if node.Type != "dir" || node.Subtree == nil { return []*restic.Node{node}, nil } @@ -124,7 +124,7 @@ func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { for _, node := range d.items { var typ fuse.DirentType - switch node.FileType { + switch node.Type { case "dir": typ = fuse.DT_Dir case "file": @@ -150,7 +150,7 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { debug.Log("dir.Lookup", " Lookup(%v) -> not found", name) return nil, fuse.ENOENT } - switch node.FileType { + switch node.Type { case "dir": return newDir(d.repo, node, d.ownerIsRoot) case "file": @@ -158,7 +158,7 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { case "symlink": return newLink(d.repo, node, d.ownerIsRoot) default: - debug.Log("dir.Lookup", " node %v has unknown type %v", name, node.FileType) + debug.Log("dir.Lookup", " node %v has unknown type %v", name, node.Type) return nil, fuse.ENOENT } } diff --git a/src/restic/node.go b/src/restic/node.go index b18b84c6d..842517d42 100644 --- a/src/restic/node.go +++ b/src/restic/node.go @@ -21,7 +21,7 @@ import ( // Node is a file, directory or other item in a backup. type Node struct { Name string `json:"name"` - FileType string `json:"type"` + Type string `json:"type"` Mode os.FileMode `json:"mode,omitempty"` ModTime time.Time `json:"mtime,omitempty"` AccessTime time.Time `json:"atime,omitempty"` @@ -47,7 +47,7 @@ type Node struct { } func (node Node) String() string { - switch node.FileType { + switch node.Type { case "file": return fmt.Sprintf("%s %5d %5d %6d %s %s", node.Mode, node.UID, node.GID, node.Size, node.ModTime, node.Name) @@ -56,7 +56,7 @@ func (node Node) String() string { node.Mode|os.ModeDir, node.UID, node.GID, node.Size, node.ModTime, node.Name) } - return fmt.Sprintf("", node.FileType, node.Name) + return fmt.Sprintf("", node.Type, node.Name) } func (node Node) Tree() *Tree { @@ -73,8 +73,8 @@ func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { ModTime: fi.ModTime(), } - node.FileType = nodeTypeFromFileInfo(fi) - if node.FileType == "file" { + node.Type = nodeTypeFromFileInfo(fi) + if node.Type == "file" { node.Size = uint64(fi.Size()) } @@ -107,7 +107,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string { func (node *Node) CreateAt(path string, repo Repository) error { debug.Log("Node.CreateAt", "create node %v at %v", node.Name, path) - switch node.FileType { + switch node.Type { case "dir": if err := node.createDirAt(path); err != nil { return err @@ -135,7 +135,7 @@ func (node *Node) CreateAt(path string, repo Repository) error { case "socket": return nil default: - return errors.Errorf("filetype %q not implemented!\n", node.FileType) + return errors.Errorf("filetype %q not implemented!\n", node.Type) } err := node.restoreMetadata(path) @@ -154,14 +154,14 @@ func (node Node) restoreMetadata(path string) error { return errors.Wrap(err, "Lchown") } - if node.FileType != "symlink" { + if node.Type != "symlink" { err = fs.Chmod(path, node.Mode) if err != nil { return errors.Wrap(err, "Chmod") } } - if node.FileType != "dir" { + if node.Type != "dir" { err = node.RestoreTimestamps(path) if err != nil { debug.Log("Node.restoreMetadata", "error restoring timestamps for dir %v: %v", path, err) @@ -178,7 +178,7 @@ func (node Node) RestoreTimestamps(path string) error { syscall.NsecToTimespec(node.ModTime.UnixNano()), } - if node.FileType == "symlink" { + if node.Type == "symlink" { return node.restoreSymlinkTimestamps(path, utimes) } @@ -283,7 +283,7 @@ func (node Node) Equals(other Node) bool { if node.Name != other.Name { return false } - if node.FileType != other.FileType { + if node.Type != other.Type { return false } if node.Mode != other.Mode { @@ -371,13 +371,13 @@ func (node Node) sameContent(other Node) bool { } func (node *Node) IsNewer(path string, fi os.FileInfo) bool { - if node.FileType != "file" { + if node.Type != "file" { debug.Log("node.IsNewer", "node %v is newer: not file", path) return true } tpe := nodeTypeFromFileInfo(fi) - if node.Name != fi.Name() || node.FileType != tpe { + if node.Name != fi.Name() || node.Type != tpe { debug.Log("node.IsNewer", "node %v is newer: name or type changed", path) return true } @@ -465,7 +465,7 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error { return err } - switch node.FileType { + switch node.Type { case "file": node.Size = uint64(stat.size()) node.Links = uint64(stat.nlink()) @@ -480,7 +480,7 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error { case "fifo": case "socket": default: - err = errors.Errorf("invalid node type %q", node.FileType) + err = errors.Errorf("invalid node type %q", node.Type) } return err diff --git a/src/restic/node_test.go b/src/restic/node_test.go index ca2b2add1..a2e175b14 100644 --- a/src/restic/node_test.go +++ b/src/restic/node_test.go @@ -73,7 +73,7 @@ func parseTime(s string) time.Time { var nodeTests = []restic.Node{ restic.Node{ Name: "testFile", - FileType: "file", + Type: "file", Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -84,7 +84,7 @@ var nodeTests = []restic.Node{ }, restic.Node{ Name: "testSuidFile", - FileType: "file", + Type: "file", Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -95,7 +95,7 @@ var nodeTests = []restic.Node{ }, restic.Node{ Name: "testSuidFile2", - FileType: "file", + Type: "file", Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -106,7 +106,7 @@ var nodeTests = []restic.Node{ }, restic.Node{ Name: "testSticky", - FileType: "file", + Type: "file", Content: restic.IDs{}, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -117,7 +117,7 @@ var nodeTests = []restic.Node{ }, restic.Node{ Name: "testDir", - FileType: "dir", + Type: "dir", Subtree: nil, UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -128,7 +128,7 @@ var nodeTests = []restic.Node{ }, restic.Node{ Name: "testSymlink", - FileType: "symlink", + Type: "symlink", LinkTarget: "invalid", UID: uint32(os.Getuid()), GID: uint32(os.Getgid()), @@ -155,10 +155,10 @@ func TestNodeRestoreAt(t *testing.T) { nodePath := filepath.Join(tempdir, test.Name) OK(t, test.CreateAt(nodePath, nil)) - if test.FileType == "symlink" && runtime.GOOS == "windows" { + if test.Type == "symlink" && runtime.GOOS == "windows" { continue } - if test.FileType == "dir" { + if test.Type == "dir" { OK(t, test.RestoreTimestamps(nodePath)) } @@ -169,25 +169,25 @@ func TestNodeRestoreAt(t *testing.T) { OK(t, err) Assert(t, test.Name == n2.Name, - "%v: name doesn't match (%v != %v)", test.FileType, test.Name, n2.Name) - Assert(t, test.FileType == n2.FileType, - "%v: type doesn't match (%v != %v)", test.FileType, test.FileType, n2.FileType) + "%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name) + Assert(t, test.Type == n2.Type, + "%v: type doesn't match (%v != %v)", test.Type, test.Type, n2.Type) Assert(t, test.Size == n2.Size, "%v: size doesn't match (%v != %v)", test.Size, test.Size, n2.Size) if runtime.GOOS != "windows" { Assert(t, test.UID == n2.UID, - "%v: UID doesn't match (%v != %v)", test.FileType, test.UID, n2.UID) + "%v: UID doesn't match (%v != %v)", test.Type, test.UID, n2.UID) Assert(t, test.GID == n2.GID, - "%v: GID doesn't match (%v != %v)", test.FileType, test.GID, n2.GID) - if test.FileType != "symlink" { + "%v: GID doesn't match (%v != %v)", test.Type, test.GID, n2.GID) + if test.Type != "symlink" { Assert(t, test.Mode == n2.Mode, - "%v: mode doesn't match (0%o != 0%o)", test.FileType, test.Mode, n2.Mode) + "%v: mode doesn't match (0%o != 0%o)", test.Type, test.Mode, n2.Mode) } } - AssertFsTimeEqual(t, "AccessTime", test.FileType, test.AccessTime, n2.AccessTime) - AssertFsTimeEqual(t, "ModTime", test.FileType, test.ModTime, n2.ModTime) + AssertFsTimeEqual(t, "AccessTime", test.Type, test.AccessTime, n2.AccessTime) + AssertFsTimeEqual(t, "ModTime", test.Type, test.ModTime, n2.ModTime) } } diff --git a/src/restic/restorer.go b/src/restic/restorer.go index 8ba2b94f9..9784df8e9 100644 --- a/src/restic/restorer.go +++ b/src/restic/restorer.go @@ -56,7 +56,7 @@ func (res *Restorer) restoreTo(dst string, dir string, treeID ID) error { } } - if node.FileType == "dir" { + if node.Type == "dir" { if node.Subtree == nil { return errors.Errorf("Dir without subtree in tree %v", treeID.Str()) } diff --git a/src/restic/testing.go b/src/restic/testing.go index 16af93156..a28f98d7d 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -108,10 +108,10 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) ID { id := fs.saveTree(treeSeed, depth-1) node := &Node{ - Name: fmt.Sprintf("dir-%v", treeSeed), - FileType: "dir", - Mode: 0755, - Subtree: &id, + Name: fmt.Sprintf("dir-%v", treeSeed), + Type: "dir", + Mode: 0755, + Subtree: &id, } tree.Nodes = append(tree.Nodes, node) @@ -122,10 +122,10 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) ID { fileSize := (maxFileSize / maxSeed) * fileSeed node := &Node{ - Name: fmt.Sprintf("file-%v", fileSeed), - FileType: "file", - Mode: 0644, - Size: uint64(fileSize), + Name: fmt.Sprintf("file-%v", fileSeed), + Type: "file", + Mode: 0644, + Size: uint64(fileSize), } node.Content = fs.saveFile(fakeFile(fs.t, fileSeed, fileSize)) diff --git a/src/restic/tree.go b/src/restic/tree.go index 17b1d3403..604fac39c 100644 --- a/src/restic/tree.go +++ b/src/restic/tree.go @@ -95,7 +95,7 @@ func (t Tree) Find(name string) (*Node, error) { // Subtrees returns a slice of all subtree IDs of the tree. func (t Tree) Subtrees() (trees IDs) { for _, node := range t.Nodes { - if node.FileType == "dir" && node.Subtree != nil { + if node.Type == "dir" && node.Subtree != nil { trees = append(trees, *node.Subtree) } } diff --git a/src/restic/walk.go b/src/restic/walk.go index 8c5e52391..1c8fa60b6 100644 --- a/src/restic/walk.go +++ b/src/restic/walk.go @@ -71,7 +71,7 @@ func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { // load all subtrees in parallel results := make([]<-chan loadTreeResult, len(tree.Nodes)) for i, node := range tree.Nodes { - if node.FileType == "dir" { + if node.Type == "dir" { resCh := make(chan loadTreeResult, 1) tw.ch <- loadTreeJob{ id: *node.Subtree, @@ -86,7 +86,7 @@ func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { p := filepath.Join(path, node.Name) var job WalkTreeJob - if node.FileType == "dir" { + if node.Type == "dir" { if results[i] == nil { panic("result chan should not be nil") } From 0045f2fb61ed0d75007d7f09d6e1266509deab22 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 21:26:21 +0200 Subject: [PATCH 16/40] Remove functions --- src/cmds/restic/cmd_dump.go | 40 ------------------------------------- src/cmds/restic/cmd_list.go | 14 ------------- 2 files changed, 54 deletions(-) diff --git a/src/cmds/restic/cmd_dump.go b/src/cmds/restic/cmd_dump.go index 9bab5151a..fb4389f66 100644 --- a/src/cmds/restic/cmd_dump.go +++ b/src/cmds/restic/cmd_dump.go @@ -67,37 +67,6 @@ func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error { return nil } -// func printTrees(repo *repository.Repository, wr io.Writer) error { -// done := make(chan struct{}) -// defer close(done) - -// trees := []restic.ID{} - -// for _, idx := range repo.Index().All() { -// for blob := range idx.Each(nil) { -// if blob.Type != pack.Tree { -// continue -// } - -// trees = append(trees, blob.ID) -// } -// } - -// for _, id := range trees { -// tree, err := restic.LoadTree(repo, id) -// if err != nil { -// fmt.Fprintf(os.Stderr, "LoadTree(%v): %v", id.Str(), err) -// continue -// } - -// fmt.Fprintf(wr, "tree_id: %v\n", id) - -// prettyPrintJSON(wr, tree) -// } - -// return nil -// } - const dumpPackWorkers = 10 // Pack is the struct used in printPacks. @@ -228,8 +197,6 @@ func (cmd CmdDump) Execute(args []string) error { return cmd.DumpIndexes() case "snapshots": return debugPrintSnapshots(repo, os.Stdout) - // case "trees": - // return printTrees(repo, os.Stdout) case "packs": return printPacks(repo, os.Stdout) case "all": @@ -239,13 +206,6 @@ func (cmd CmdDump) Execute(args []string) error { return err } - // fmt.Printf("\ntrees:\n") - - // err = printTrees(repo, os.Stdout) - // if err != nil { - // return err - // } - fmt.Printf("\nindexes:\n") err = cmd.DumpIndexes() if err != nil { diff --git a/src/cmds/restic/cmd_list.go b/src/cmds/restic/cmd_list.go index 4418dbb08..b208e3ee3 100644 --- a/src/cmds/restic/cmd_list.go +++ b/src/cmds/restic/cmd_list.go @@ -40,20 +40,6 @@ func (cmd CmdList) Execute(args []string) error { var t restic.FileType switch args[0] { - // case "blobs": - // restic.Lister - // err = repo.LoadIndex() - // if err != nil { - // return err - // } - - // for _, idx := range repo.Index().All() { - // for blob := range idx.Each(nil) { - // cmd.global.Printf("%s\n", blob.ID) - // } - // } - - // return nil case "packs": t = restic.DataFile case "index": From debf1fce5426186b2e98ac81bed3b639262ba125 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 21:37:59 +0200 Subject: [PATCH 17/40] Remove IDSize, TestRandomID -> NewRandomID --- src/restic/backend_find.go | 5 +++-- src/restic/config.go | 23 ++--------------------- src/restic/id.go | 21 +++++++++++++++++---- src/restic/pack/pack.go | 4 ++-- src/restic/pack/pack_test.go | 2 +- src/restic/repository/index_test.go | 18 +++++++++--------- src/restic/testing.go | 12 ------------ 7 files changed, 34 insertions(+), 51 deletions(-) diff --git a/src/restic/backend_find.go b/src/restic/backend_find.go index 6ab6427fb..d788e6797 100644 --- a/src/restic/backend_find.go +++ b/src/restic/backend_find.go @@ -52,8 +52,9 @@ func PrefixLength(be Lister, t FileType) (int, error) { } // select prefixes of length l, test if the last one is the same as the current one + id := ID{} outer: - for l := minPrefixLength; l < IDSize; l++ { + for l := minPrefixLength; l < len(id); l++ { var last string for _, name := range list { @@ -66,5 +67,5 @@ outer: return l, nil } - return IDSize, nil + return len(id), nil } diff --git a/src/restic/config.go b/src/restic/config.go index d2e9bd39d..5d1699295 100644 --- a/src/restic/config.go +++ b/src/restic/config.go @@ -1,10 +1,6 @@ package restic import ( - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "io" "testing" "github.com/pkg/errors" @@ -21,9 +17,6 @@ type Config struct { ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"` } -// repositoryIDSize is the length of the ID chosen at random for a new repository. -const repositoryIDSize = sha256.Size - // RepoVersion is the version that is written to the config when a repository // is newly created with Init(). const RepoVersion = 1 @@ -51,13 +44,7 @@ func CreateConfig() (Config, error) { return Config{}, errors.Wrap(err, "chunker.RandomPolynomial") } - newID := make([]byte, repositoryIDSize) - _, err = io.ReadFull(rand.Reader, newID) - if err != nil { - return Config{}, errors.Wrap(err, "io.ReadFull") - } - - cfg.ID = hex.EncodeToString(newID) + cfg.ID = NewRandomID().String() cfg.Version = RepoVersion debug.Log("Repo.CreateConfig", "New config: %#v", cfg) @@ -68,13 +55,7 @@ func CreateConfig() (Config, error) { func TestCreateConfig(t testing.TB, pol chunker.Pol) (cfg Config) { cfg.ChunkerPolynomial = pol - newID := make([]byte, repositoryIDSize) - _, err := io.ReadFull(rand.Reader, newID) - if err != nil { - t.Fatalf("unable to create random ID: %v", err) - } - - cfg.ID = hex.EncodeToString(newID) + cfg.ID = NewRandomID().String() cfg.Version = RepoVersion return cfg diff --git a/src/restic/id.go b/src/restic/id.go index 2e9308888..5a1f4ab6d 100644 --- a/src/restic/id.go +++ b/src/restic/id.go @@ -2,9 +2,11 @@ package restic import ( "bytes" + "crypto/rand" "crypto/sha256" "encoding/hex" "encoding/json" + "io" "github.com/pkg/errors" ) @@ -14,11 +16,11 @@ func Hash(data []byte) ID { return sha256.Sum256(data) } -// IDSize contains the size of an ID, in bytes. -const IDSize = sha256.Size +// idSize contains the size of an ID, in bytes. +const idSize = sha256.Size // ID references content within a repository. -type ID [IDSize]byte +type ID [idSize]byte // ParseID converts the given string to an ID. func ParseID(s string) (ID, error) { @@ -28,7 +30,7 @@ func ParseID(s string) (ID, error) { return ID{}, errors.Wrap(err, "hex.DecodeString") } - if len(b) != IDSize { + if len(b) != idSize { return ID{}, errors.New("invalid length for hash") } @@ -42,6 +44,17 @@ func (id ID) String() string { return hex.EncodeToString(id[:]) } +// NewRandomID retuns a randomly generated ID. When reading from rand fails, +// the function panics. +func NewRandomID() ID { + id := ID{} + _, err := io.ReadFull(rand.Reader, id[:]) + if err != nil { + panic(err) + } + return id +} + const shortStr = 4 // Str returns the shortened string version of id. diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index e100578c0..a1e62d543 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -50,13 +50,13 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error) return n, errors.Wrap(err, "Write") } -var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + restic.IDSize) +var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{})) // headerEntry is used with encoding/binary to read and write header entries type headerEntry struct { Type uint8 Length uint32 - ID [restic.IDSize]byte + ID restic.ID } // Finalize writes the header for all added blobs and finalizes the pack. diff --git a/src/restic/pack/pack_test.go b/src/restic/pack/pack_test.go index 8a21785df..f90d1a426 100644 --- a/src/restic/pack/pack_test.go +++ b/src/restic/pack/pack_test.go @@ -55,7 +55,7 @@ func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSi // header length written += binary.Size(uint32(0)) // header - written += len(bufs) * (binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + restic.IDSize) + written += len(bufs) * (binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{})) // header crypto written += crypto.Extension diff --git a/src/restic/repository/index_test.go b/src/restic/repository/index_test.go index 65e8cdbd7..243104547 100644 --- a/src/restic/repository/index_test.go +++ b/src/restic/repository/index_test.go @@ -22,11 +22,11 @@ func TestIndexSerialize(t *testing.T) { // create 50 packs with 20 blobs each for i := 0; i < 50; i++ { - packID := restic.TestRandomID() + packID := restic.NewRandomID() pos := uint(0) for j := 0; j < 20; j++ { - id := restic.TestRandomID() + id := restic.NewRandomID() length := uint(i*100 + j) idx.Store(restic.PackedBlob{ Blob: restic.Blob{ @@ -94,11 +94,11 @@ func TestIndexSerialize(t *testing.T) { // add more blobs to idx newtests := []testEntry{} for i := 0; i < 10; i++ { - packID := restic.TestRandomID() + packID := restic.NewRandomID() pos := uint(0) for j := 0; j < 10; j++ { - id := restic.TestRandomID() + id := restic.NewRandomID() length := uint(i*100 + j) idx.Store(restic.PackedBlob{ Blob: restic.Blob{ @@ -130,7 +130,7 @@ func TestIndexSerialize(t *testing.T) { Assert(t, idx.Final(), "index not final after encoding") - id := restic.TestRandomID() + id := restic.NewRandomID() OK(t, idx.SetID(id)) id2, err := idx.ID() Assert(t, id2.Equal(id), @@ -167,11 +167,11 @@ func TestIndexSize(t *testing.T) { packs := 200 blobs := 100 for i := 0; i < packs; i++ { - packID := restic.TestRandomID() + packID := restic.NewRandomID() pos := uint(0) for j := 0; j < blobs; j++ { - id := restic.TestRandomID() + id := restic.NewRandomID() length := uint(i*100 + j) idx.Store(restic.PackedBlob{ Blob: restic.Blob{ @@ -353,11 +353,11 @@ func TestIndexPacks(t *testing.T) { packs := restic.NewIDSet() for i := 0; i < 20; i++ { - packID := restic.TestRandomID() + packID := restic.NewRandomID() idx.Store(restic.PackedBlob{ Blob: restic.Blob{ Type: restic.DataBlob, - ID: restic.TestRandomID(), + ID: restic.NewRandomID(), Offset: 0, Length: 23, }, diff --git a/src/restic/testing.go b/src/restic/testing.go index a28f98d7d..1be705bb4 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -1,7 +1,6 @@ package restic import ( - crand "crypto/rand" "encoding/json" "fmt" "io" @@ -201,14 +200,3 @@ func TestParseID(s string) ID { return id } - -// TestRandomID retuns a randomly generated ID. When reading from rand fails, -// the function panics. -func TestRandomID() ID { - id := ID{} - _, err := io.ReadFull(crand.Reader, id[:]) - if err != nil { - panic(err) - } - return id -} From 5d7b38cabf11c3ec3b781ccf73c5e8d9c92c377b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 22:08:45 +0200 Subject: [PATCH 18/40] Remove sentinel errors --- src/restic/tree.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/restic/tree.go b/src/restic/tree.go index 604fac39c..94e51b322 100644 --- a/src/restic/tree.go +++ b/src/restic/tree.go @@ -9,15 +9,12 @@ import ( "restic/debug" ) +// Tree is an ordered list of nodes. type Tree struct { Nodes []*Node `json:"nodes"` } -var ( - ErrNodeNotFound = errors.New("named node not found") - ErrNodeAlreadyInTree = errors.New("node already present") -) - +// NewTree creates a new tree object. func NewTree() *Tree { return &Tree{ Nodes: []*Node{}, @@ -61,10 +58,11 @@ func (t Tree) Equals(other *Tree) bool { return true } +// Insert adds a new node at the correct place in the tree. func (t *Tree) Insert(node *Node) error { pos, _, err := t.binarySearch(node.Name) if err == nil { - return ErrNodeAlreadyInTree + return errors.New("node already present") } // https://code.google.com/p/go-wiki/wiki/SliceTricks @@ -84,7 +82,7 @@ func (t Tree) binarySearch(name string) (int, *Node, error) { return pos, t.Nodes[pos], nil } - return pos, nil, ErrNodeNotFound + return pos, nil, errors.New("named node not found") } func (t Tree) Find(name string) (*Node, error) { From 765b5437bd0ebee4eece9b94a1c64911bb1d58b2 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 22:09:34 +0200 Subject: [PATCH 19/40] Fix command 'dump' --- src/cmds/restic/cmd_dump.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmds/restic/cmd_dump.go b/src/cmds/restic/cmd_dump.go index fb4389f66..5dc0247e0 100644 --- a/src/cmds/restic/cmd_dump.go +++ b/src/cmds/restic/cmd_dump.go @@ -91,7 +91,7 @@ func printPacks(repo *repository.Repository, wr io.Writer) error { f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { name := job.Data.(string) - h := restic.Handle{FileType: restic.DataFile, Name: name} + h := restic.Handle{Type: restic.DataFile, Name: name} blobInfo, err := repo.Backend().Stat(h) if err != nil { From bc42dbdf875fa8d2f62b1d4b899264132234fdff Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 22:17:37 +0200 Subject: [PATCH 20/40] Create package restic/errors --- src/cmds/restic/cmd_backup.go | 10 ++++---- src/cmds/restic/cmd_cat.go | 9 ++++---- src/cmds/restic/cmd_check.go | 7 +++--- src/cmds/restic/cmd_dump.go | 5 ++-- src/cmds/restic/cmd_find.go | 7 +++--- src/cmds/restic/cmd_init.go | 4 ++-- src/cmds/restic/cmd_key.go | 9 ++++---- src/cmds/restic/cmd_list.go | 9 +++++--- src/cmds/restic/cmd_ls.go | 3 ++- src/cmds/restic/cmd_mount.go | 5 ++-- src/cmds/restic/cmd_prune.go | 3 ++- src/cmds/restic/cmd_restore.go | 7 +++--- src/cmds/restic/cmd_snapshots.go | 3 ++- src/cmds/restic/global.go | 11 +++++---- src/cmds/restic/integration_fuse_test.go | 4 ++-- src/cmds/restic/integration_test.go | 4 ++-- src/cmds/restic/main.go | 5 ++-- src/restic/archiver/archive_reader.go | 2 +- src/restic/archiver/archiver.go | 2 +- .../archiver/archiver_duplication_test.go | 2 +- src/restic/archiver/archiver_test.go | 2 +- src/restic/backend/local/config.go | 2 +- src/restic/backend/local/local.go | 2 +- src/restic/backend/mem/mem_backend.go | 2 +- src/restic/backend/mem/mem_backend_test.go | 2 +- src/restic/backend/rest/config.go | 2 +- src/restic/backend/rest/rest.go | 2 +- src/restic/backend/rest/rest_test.go | 2 +- src/restic/backend/s3/config.go | 2 +- src/restic/backend/s3/s3.go | 2 +- src/restic/backend/s3/s3_test.go | 2 +- src/restic/backend/sftp/config.go | 2 +- src/restic/backend/sftp/sftp.go | 2 +- src/restic/backend/sftp/sftp_backend_test.go | 2 +- src/restic/backend/test/tests.go | 2 +- src/restic/backend/test/tests_test.go | 2 +- src/restic/backend/utils.go | 2 +- src/restic/backend_find.go | 2 +- src/restic/blob.go | 2 +- src/restic/checker/checker.go | 2 +- src/restic/config.go | 2 +- src/restic/crypto/crypto.go | 2 +- src/restic/crypto/kdf.go | 2 +- src/restic/debug/debug.go | 2 +- src/restic/errors/doc.go | 2 ++ src/restic/{errors.go => errors/fatal.go} | 2 +- src/restic/errors/wrap.go | 23 +++++++++++++++++++ src/restic/file.go | 2 +- src/restic/filter/filter.go | 2 +- src/restic/fs/file_linux.go | 2 +- src/restic/fuse/file.go | 2 +- src/restic/fuse/file_test.go | 2 +- src/restic/id.go | 2 +- src/restic/index/index.go | 2 +- src/restic/lock.go | 2 +- src/restic/lock_unix.go | 2 +- src/restic/mock/backend.go | 2 +- src/restic/node.go | 2 +- src/restic/node_linux.go | 2 +- src/restic/node_windows.go | 2 +- src/restic/pack/pack.go | 2 +- src/restic/pipe/pipe.go | 2 +- src/restic/rand_reader.go | 2 +- src/restic/repository/index.go | 2 +- src/restic/repository/key.go | 2 +- src/restic/repository/master_index.go | 2 +- src/restic/repository/packer_manager.go | 2 +- src/restic/repository/parallel_test.go | 2 +- src/restic/repository/repack.go | 2 +- src/restic/repository/repository.go | 2 +- src/restic/restorer.go | 2 +- src/restic/snapshot.go | 2 +- src/restic/testing.go | 2 +- src/restic/tree.go | 2 +- src/restic/worker/pool_test.go | 2 +- 75 files changed, 140 insertions(+), 102 deletions(-) create mode 100644 src/restic/errors/doc.go rename src/restic/{errors.go => errors/fatal.go} (98%) create mode 100644 src/restic/errors/wrap.go diff --git a/src/cmds/restic/cmd_backup.go b/src/cmds/restic/cmd_backup.go index 38f6fcf70..dc07be01d 100644 --- a/src/cmds/restic/cmd_backup.go +++ b/src/cmds/restic/cmd_backup.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/pkg/errors" + "restic/errors" "golang.org/x/crypto/ssh/terminal" ) @@ -232,7 +232,7 @@ func filterExisting(items []string) (result []string, err error) { } if len(result) == 0 { - return nil, restic.Fatal("all target directories/files do not exist") + return nil, errors.Fatal("all target directories/files do not exist") } return @@ -240,7 +240,7 @@ func filterExisting(items []string) (result []string, err error) { func (cmd CmdBackup) readFromStdin(args []string) error { if len(args) != 0 { - return restic.Fatalf("when reading from stdin, no additional files can be specified") + return errors.Fatalf("when reading from stdin, no additional files can be specified") } repo, err := cmd.global.OpenRepository() @@ -274,7 +274,7 @@ func (cmd CmdBackup) Execute(args []string) error { } if len(args) == 0 { - return restic.Fatalf("wrong number of parameters, Usage: %s", cmd.Usage()) + return errors.Fatalf("wrong number of parameters, Usage: %s", cmd.Usage()) } target := make([]string, 0, len(args)) @@ -312,7 +312,7 @@ func (cmd CmdBackup) Execute(args []string) error { if !cmd.Force && cmd.Parent != "" { id, err := restic.FindSnapshot(repo, cmd.Parent) if err != nil { - return restic.Fatalf("invalid id %q: %v", cmd.Parent, err) + return errors.Fatalf("invalid id %q: %v", cmd.Parent, err) } parentSnapshotID = &id diff --git a/src/cmds/restic/cmd_cat.go b/src/cmds/restic/cmd_cat.go index f99d93e7e..51fb2f69f 100644 --- a/src/cmds/restic/cmd_cat.go +++ b/src/cmds/restic/cmd_cat.go @@ -8,6 +8,7 @@ import ( "restic" "restic/backend" "restic/debug" + "restic/errors" "restic/repository" ) @@ -31,7 +32,7 @@ func (cmd CmdCat) Usage() string { func (cmd CmdCat) Execute(args []string) error { if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) { - return restic.Fatalf("type or ID not specified, Usage: %s", cmd.Usage()) + return errors.Fatalf("type or ID not specified, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() @@ -52,7 +53,7 @@ func (cmd CmdCat) Execute(args []string) error { id, err = restic.ParseID(args[1]) if err != nil { if tpe != "snapshot" { - return restic.Fatalf("unable to parse ID: %v\n", err) + return errors.Fatalf("unable to parse ID: %v\n", err) } // find snapshot id with prefix @@ -181,7 +182,7 @@ func (cmd CmdCat) Execute(args []string) error { return err } - return restic.Fatal("blob not found") + return errors.Fatal("blob not found") case "tree": debug.Log("cat", "cat tree %v", id.Str()) @@ -202,6 +203,6 @@ func (cmd CmdCat) Execute(args []string) error { return nil default: - return restic.Fatal("invalid type") + return errors.Fatal("invalid type") } } diff --git a/src/cmds/restic/cmd_check.go b/src/cmds/restic/cmd_check.go index a151fc93b..cd2fc409b 100644 --- a/src/cmds/restic/cmd_check.go +++ b/src/cmds/restic/cmd_check.go @@ -9,6 +9,7 @@ import ( "restic" "restic/checker" + "restic/errors" ) type CmdCheck struct { @@ -65,7 +66,7 @@ func (cmd CmdCheck) newReadProgress(todo restic.Stat) *restic.Progress { func (cmd CmdCheck) Execute(args []string) error { if len(args) != 0 { - return restic.Fatal("check has no arguments") + return errors.Fatal("check has no arguments") } repo, err := cmd.global.OpenRepository() @@ -103,7 +104,7 @@ func (cmd CmdCheck) Execute(args []string) error { for _, err := range errs { cmd.global.Warnf("error: %v\n", err) } - return restic.Fatal("LoadIndex returned errors") + return errors.Fatal("LoadIndex returned errors") } done := make(chan struct{}) @@ -158,7 +159,7 @@ func (cmd CmdCheck) Execute(args []string) error { } if errorsFound { - return restic.Fatal("repository contains errors") + return errors.Fatal("repository contains errors") } return nil } diff --git a/src/cmds/restic/cmd_dump.go b/src/cmds/restic/cmd_dump.go index 5dc0247e0..f29aff905 100644 --- a/src/cmds/restic/cmd_dump.go +++ b/src/cmds/restic/cmd_dump.go @@ -9,6 +9,7 @@ import ( "os" "restic" + "restic/errors" "restic/pack" "restic/repository" @@ -170,7 +171,7 @@ func (cmd CmdDump) DumpIndexes() error { func (cmd CmdDump) Execute(args []string) error { if len(args) != 1 { - return restic.Fatalf("type not specified, Usage: %s", cmd.Usage()) + return errors.Fatalf("type not specified, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() @@ -214,6 +215,6 @@ func (cmd CmdDump) Execute(args []string) error { return nil default: - return restic.Fatalf("no such type %q", tpe) + return errors.Fatalf("no such type %q", tpe) } } diff --git a/src/cmds/restic/cmd_find.go b/src/cmds/restic/cmd_find.go index 96d31dd6f..24258d510 100644 --- a/src/cmds/restic/cmd_find.go +++ b/src/cmds/restic/cmd_find.go @@ -6,6 +6,7 @@ import ( "restic" "restic/debug" + "restic/errors" "restic/repository" ) @@ -55,7 +56,7 @@ func parseTime(str string) (time.Time, error) { } } - return time.Time{}, restic.Fatalf("unable to parse time: %q", str) + return time.Time{}, errors.Fatalf("unable to parse time: %q", str) } func (c CmdFind) findInTree(repo *repository.Repository, id restic.ID, path string) ([]findResult, error) { @@ -135,7 +136,7 @@ func (CmdFind) Usage() string { func (c CmdFind) Execute(args []string) error { if len(args) != 1 { - return restic.Fatalf("wrong number of arguments, Usage: %s", c.Usage()) + return errors.Fatalf("wrong number of arguments, Usage: %s", c.Usage()) } var err error @@ -175,7 +176,7 @@ func (c CmdFind) Execute(args []string) error { if c.Snapshot != "" { snapshotID, err := restic.FindSnapshot(repo, c.Snapshot) if err != nil { - return restic.Fatalf("invalid id %q: %v", args[1], err) + return errors.Fatalf("invalid id %q: %v", args[1], err) } return c.findInSnapshot(repo, snapshotID) diff --git a/src/cmds/restic/cmd_init.go b/src/cmds/restic/cmd_init.go index 3e68ebdea..39b8cd2fd 100644 --- a/src/cmds/restic/cmd_init.go +++ b/src/cmds/restic/cmd_init.go @@ -1,7 +1,7 @@ package main import ( - "restic" + "restic/errors" "restic/repository" ) @@ -11,7 +11,7 @@ type CmdInit struct { func (cmd CmdInit) Execute(args []string) error { if cmd.global.Repo == "" { - return restic.Fatal("Please specify repository location (-r)") + return errors.Fatal("Please specify repository location (-r)") } be, err := create(cmd.global.Repo) diff --git a/src/cmds/restic/cmd_key.go b/src/cmds/restic/cmd_key.go index 629f5ddf0..848018150 100644 --- a/src/cmds/restic/cmd_key.go +++ b/src/cmds/restic/cmd_key.go @@ -4,6 +4,7 @@ import ( "fmt" "restic" + "restic/errors" "restic/repository" ) @@ -68,7 +69,7 @@ func (cmd CmdKey) getNewPassword() string { func (cmd CmdKey) addKey(repo *repository.Repository) error { id, err := repository.AddKey(repo, cmd.getNewPassword(), repo.Key()) if err != nil { - return restic.Fatalf("creating new key failed: %v\n", err) + return errors.Fatalf("creating new key failed: %v\n", err) } cmd.global.Verbosef("saved new key as %s\n", id) @@ -78,7 +79,7 @@ func (cmd CmdKey) addKey(repo *repository.Repository) error { func (cmd CmdKey) deleteKey(repo *repository.Repository, name string) error { if name == repo.KeyName() { - return restic.Fatal("refusing to remove key currently used to access repository") + return errors.Fatal("refusing to remove key currently used to access repository") } err := repo.Backend().Remove(restic.KeyFile, name) @@ -93,7 +94,7 @@ func (cmd CmdKey) deleteKey(repo *repository.Repository, name string) error { func (cmd CmdKey) changePassword(repo *repository.Repository) error { id, err := repository.AddKey(repo, cmd.getNewPassword(), repo.Key()) if err != nil { - return restic.Fatalf("creating new key failed: %v\n", err) + return errors.Fatalf("creating new key failed: %v\n", err) } err = repo.Backend().Remove(restic.KeyFile, repo.KeyName()) @@ -112,7 +113,7 @@ func (cmd CmdKey) Usage() string { func (cmd CmdKey) Execute(args []string) error { if len(args) < 1 || (args[0] == "rm" && len(args) != 2) { - return restic.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) + return errors.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() diff --git a/src/cmds/restic/cmd_list.go b/src/cmds/restic/cmd_list.go index b208e3ee3..a17d5ce64 100644 --- a/src/cmds/restic/cmd_list.go +++ b/src/cmds/restic/cmd_list.go @@ -1,6 +1,9 @@ package main -import "restic" +import ( + "restic" + "restic/errors" +) type CmdList struct { global *GlobalOptions @@ -22,7 +25,7 @@ func (cmd CmdList) Usage() string { func (cmd CmdList) Execute(args []string) error { if len(args) != 1 { - return restic.Fatalf("type not specified, Usage: %s", cmd.Usage()) + return errors.Fatalf("type not specified, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() @@ -51,7 +54,7 @@ func (cmd CmdList) Execute(args []string) error { case "locks": t = restic.LockFile default: - return restic.Fatal("invalid type") + return errors.Fatal("invalid type") } for id := range repo.List(t, nil) { diff --git a/src/cmds/restic/cmd_ls.go b/src/cmds/restic/cmd_ls.go index d875e58e6..733f424b6 100644 --- a/src/cmds/restic/cmd_ls.go +++ b/src/cmds/restic/cmd_ls.go @@ -6,6 +6,7 @@ import ( "path/filepath" "restic" + "restic/errors" "restic/repository" ) @@ -71,7 +72,7 @@ func (cmd CmdLs) Usage() string { func (cmd CmdLs) Execute(args []string) error { if len(args) < 1 || len(args) > 2 { - return restic.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) + return errors.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() diff --git a/src/cmds/restic/cmd_mount.go b/src/cmds/restic/cmd_mount.go index 36a2ce997..e25306e31 100644 --- a/src/cmds/restic/cmd_mount.go +++ b/src/cmds/restic/cmd_mount.go @@ -5,9 +5,8 @@ package main import ( "os" - "restic" - "github.com/pkg/errors" + "restic/errors" resticfs "restic/fs" "restic/fuse" @@ -44,7 +43,7 @@ func (cmd CmdMount) Usage() string { func (cmd CmdMount) Execute(args []string) error { if len(args) == 0 { - return restic.Fatalf("wrong number of parameters, Usage: %s", cmd.Usage()) + return errors.Fatalf("wrong number of parameters, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go index c21cab16a..4fa8ba0d3 100644 --- a/src/cmds/restic/cmd_prune.go +++ b/src/cmds/restic/cmd_prune.go @@ -5,6 +5,7 @@ import ( "os" "restic" "restic/debug" + "restic/errors" "restic/index" "restic/repository" "time" @@ -189,7 +190,7 @@ nextPack: removePacks.Insert(packID) if !rewritePacks.Has(packID) { - return restic.Fatalf("pack %v is unneeded, but not contained in rewritePacks", packID.Str()) + return errors.Fatalf("pack %v is unneeded, but not contained in rewritePacks", packID.Str()) } rewritePacks.Delete(packID) diff --git a/src/cmds/restic/cmd_restore.go b/src/cmds/restic/cmd_restore.go index 1a55cdb4e..88099b677 100644 --- a/src/cmds/restic/cmd_restore.go +++ b/src/cmds/restic/cmd_restore.go @@ -3,6 +3,7 @@ package main import ( "restic" "restic/debug" + "restic/errors" "restic/filter" ) @@ -32,15 +33,15 @@ func (cmd CmdRestore) Usage() string { func (cmd CmdRestore) Execute(args []string) error { if len(args) != 1 { - return restic.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) + return errors.Fatalf("wrong number of arguments, Usage: %s", cmd.Usage()) } if cmd.Target == "" { - return restic.Fatal("please specify a directory to restore to (--target)") + return errors.Fatal("please specify a directory to restore to (--target)") } if len(cmd.Exclude) > 0 && len(cmd.Include) > 0 { - return restic.Fatal("exclude and include patterns are mutually exclusive") + return errors.Fatal("exclude and include patterns are mutually exclusive") } snapshotIDString := args[0] diff --git a/src/cmds/restic/cmd_snapshots.go b/src/cmds/restic/cmd_snapshots.go index 23f9eb709..d7bc4e65d 100644 --- a/src/cmds/restic/cmd_snapshots.go +++ b/src/cmds/restic/cmd_snapshots.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "restic/errors" "sort" "strings" @@ -69,7 +70,7 @@ func (cmd CmdSnapshots) Usage() string { func (cmd CmdSnapshots) Execute(args []string) error { if len(args) != 0 { - return restic.Fatalf("wrong number of arguments, usage: %s", cmd.Usage()) + return errors.Fatalf("wrong number of arguments, usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() diff --git a/src/cmds/restic/global.go b/src/cmds/restic/global.go index b7eff3e41..ee4255f7b 100644 --- a/src/cmds/restic/global.go +++ b/src/cmds/restic/global.go @@ -17,8 +17,9 @@ import ( "restic/location" "restic/repository" + "restic/errors" + "github.com/jessevdk/go-flags" - "github.com/pkg/errors" "golang.org/x/crypto/ssh/terminal" ) @@ -246,7 +247,7 @@ const maxKeys = 20 // OpenRepository reads the password and opens the repository. func (o GlobalOptions) OpenRepository() (*repository.Repository, error) { if o.Repo == "" { - return nil, restic.Fatal("Please specify repository location (-r)") + return nil, errors.Fatal("Please specify repository location (-r)") } be, err := open(o.Repo) @@ -262,7 +263,7 @@ func (o GlobalOptions) OpenRepository() (*repository.Repository, error) { err = s.SearchKey(o.password, maxKeys) if err != nil { - return nil, restic.Fatalf("unable to open repo: %v", err) + return nil, errors.Fatalf("unable to open repo: %v", err) } return s, nil @@ -300,7 +301,7 @@ func open(s string) (restic.Backend, error) { } debug.Log("open", "invalid repository location: %v", s) - return nil, restic.Fatalf("invalid scheme %q", loc.Scheme) + return nil, errors.Fatalf("invalid scheme %q", loc.Scheme) } // Create the backend specified by URI. @@ -335,5 +336,5 @@ func create(s string) (restic.Backend, error) { } debug.Log("open", "invalid repository scheme: %v", s) - return nil, restic.Fatalf("invalid scheme %q", loc.Scheme) + return nil, errors.Fatalf("invalid scheme %q", loc.Scheme) } diff --git a/src/cmds/restic/integration_fuse_test.go b/src/cmds/restic/integration_fuse_test.go index 857f67361..a106d035d 100644 --- a/src/cmds/restic/integration_fuse_test.go +++ b/src/cmds/restic/integration_fuse_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/pkg/errors" + "restic/errors" "restic" "restic/repository" @@ -50,7 +50,7 @@ func waitForMount(dir string) error { time.Sleep(mountSleep) } - return restic.Fatalf("subdir %q of dir %s never appeared", mountTestSubdir, dir) + return errors.Fatalf("subdir %q of dir %s never appeared", mountTestSubdir, dir) } func cmdMount(t testing.TB, global GlobalOptions, dir string, ready, done chan struct{}) { diff --git a/src/cmds/restic/integration_test.go b/src/cmds/restic/integration_test.go index 3a3c53e6a..4a737a6a9 100644 --- a/src/cmds/restic/integration_test.go +++ b/src/cmds/restic/integration_test.go @@ -16,7 +16,7 @@ import ( "testing" "time" - "github.com/pkg/errors" + "restic/errors" "restic/debug" "restic/filter" @@ -582,7 +582,7 @@ func testFileSize(filename string, size int64) error { } if fi.Size() != size { - return restic.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size()) + return errors.Fatalf("wrong file size for %v: expected %v, got %v", filename, size, fi.Size()) } return nil diff --git a/src/cmds/restic/main.go b/src/cmds/restic/main.go index 5ad0ab128..6477c1e62 100644 --- a/src/cmds/restic/main.go +++ b/src/cmds/restic/main.go @@ -7,8 +7,9 @@ import ( "restic/debug" "runtime" + "restic/errors" + "github.com/jessevdk/go-flags" - "github.com/pkg/errors" ) func init() { @@ -42,7 +43,7 @@ func main() { switch { case restic.IsAlreadyLocked(errors.Cause(err)): fmt.Fprintf(os.Stderr, "%v\nthe `unlock` command can be used to remove stale locks\n", err) - case restic.IsFatal(errors.Cause(err)): + case errors.IsFatal(errors.Cause(err)): fmt.Fprintf(os.Stderr, "%v\n", err) case err != nil: fmt.Fprintf(os.Stderr, "%+v\n", err) diff --git a/src/restic/archiver/archive_reader.go b/src/restic/archiver/archive_reader.go index 0ddefd151..2a184ee4b 100644 --- a/src/restic/archiver/archive_reader.go +++ b/src/restic/archiver/archive_reader.go @@ -7,8 +7,8 @@ import ( "restic/debug" "time" - "github.com/pkg/errors" "github.com/restic/chunker" + "restic/errors" ) // saveTreeJSON stores a tree in the repository. diff --git a/src/restic/archiver/archiver.go b/src/restic/archiver/archiver.go index 9cbb34a06..99c91a394 100644 --- a/src/restic/archiver/archiver.go +++ b/src/restic/archiver/archiver.go @@ -11,7 +11,7 @@ import ( "sync" "time" - "github.com/pkg/errors" + "restic/errors" "restic/debug" "restic/fs" diff --git a/src/restic/archiver/archiver_duplication_test.go b/src/restic/archiver/archiver_duplication_test.go index 9f0867d5e..aadfc5904 100644 --- a/src/restic/archiver/archiver_duplication_test.go +++ b/src/restic/archiver/archiver_duplication_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/pkg/errors" + "restic/errors" "restic" "restic/archiver" diff --git a/src/restic/archiver/archiver_test.go b/src/restic/archiver/archiver_test.go index 176a01802..1d43254cb 100644 --- a/src/restic/archiver/archiver_test.go +++ b/src/restic/archiver/archiver_test.go @@ -12,8 +12,8 @@ import ( "restic/crypto" . "restic/test" - "github.com/pkg/errors" "github.com/restic/chunker" + "restic/errors" ) var testPol = chunker.Pol(0x3DA3358B4DC173) diff --git a/src/restic/backend/local/config.go b/src/restic/backend/local/config.go index a430f9dec..8a5c67a2c 100644 --- a/src/restic/backend/local/config.go +++ b/src/restic/backend/local/config.go @@ -3,7 +3,7 @@ package local import ( "strings" - "github.com/pkg/errors" + "restic/errors" ) // ParseConfig parses a local backend config. diff --git a/src/restic/backend/local/local.go b/src/restic/backend/local/local.go index 4cfd4de95..1b76e31e5 100644 --- a/src/restic/backend/local/local.go +++ b/src/restic/backend/local/local.go @@ -7,7 +7,7 @@ import ( "path/filepath" "restic" - "github.com/pkg/errors" + "restic/errors" "restic/backend" "restic/debug" diff --git a/src/restic/backend/mem/mem_backend.go b/src/restic/backend/mem/mem_backend.go index 4d1ac49ae..d8885de49 100644 --- a/src/restic/backend/mem/mem_backend.go +++ b/src/restic/backend/mem/mem_backend.go @@ -5,7 +5,7 @@ import ( "restic" "sync" - "github.com/pkg/errors" + "restic/errors" "restic/debug" ) diff --git a/src/restic/backend/mem/mem_backend_test.go b/src/restic/backend/mem/mem_backend_test.go index 310f4b915..75b65f4c7 100644 --- a/src/restic/backend/mem/mem_backend_test.go +++ b/src/restic/backend/mem/mem_backend_test.go @@ -3,7 +3,7 @@ package mem_test import ( "restic" - "github.com/pkg/errors" + "restic/errors" "restic/backend/mem" "restic/backend/test" diff --git a/src/restic/backend/rest/config.go b/src/restic/backend/rest/config.go index e59031071..929fda120 100644 --- a/src/restic/backend/rest/config.go +++ b/src/restic/backend/rest/config.go @@ -4,7 +4,7 @@ import ( "net/url" "strings" - "github.com/pkg/errors" + "restic/errors" ) // Config contains all configuration necessary to connect to a REST server. diff --git a/src/restic/backend/rest/rest.go b/src/restic/backend/rest/rest.go index 040faac97..ce1d25db9 100644 --- a/src/restic/backend/rest/rest.go +++ b/src/restic/backend/rest/rest.go @@ -11,7 +11,7 @@ import ( "restic" "strings" - "github.com/pkg/errors" + "restic/errors" "restic/backend" ) diff --git a/src/restic/backend/rest/rest_test.go b/src/restic/backend/rest/rest_test.go index 81c64f480..2e7095b29 100644 --- a/src/restic/backend/rest/rest_test.go +++ b/src/restic/backend/rest/rest_test.go @@ -6,7 +6,7 @@ import ( "os" "restic" - "github.com/pkg/errors" + "restic/errors" "restic/backend/rest" "restic/backend/test" diff --git a/src/restic/backend/s3/config.go b/src/restic/backend/s3/config.go index 4eda2b0e8..2df02b58c 100644 --- a/src/restic/backend/s3/config.go +++ b/src/restic/backend/s3/config.go @@ -5,7 +5,7 @@ import ( "path" "strings" - "github.com/pkg/errors" + "restic/errors" ) // Config contains all configuration necessary to connect to an s3 compatible diff --git a/src/restic/backend/s3/s3.go b/src/restic/backend/s3/s3.go index 3af656456..b9f29b6b7 100644 --- a/src/restic/backend/s3/s3.go +++ b/src/restic/backend/s3/s3.go @@ -6,7 +6,7 @@ import ( "restic" "strings" - "github.com/pkg/errors" + "restic/errors" "github.com/minio/minio-go" diff --git a/src/restic/backend/s3/s3_test.go b/src/restic/backend/s3/s3_test.go index ab4cc855a..355352fa5 100644 --- a/src/restic/backend/s3/s3_test.go +++ b/src/restic/backend/s3/s3_test.go @@ -6,7 +6,7 @@ import ( "os" "restic" - "github.com/pkg/errors" + "restic/errors" "restic/backend/s3" "restic/backend/test" diff --git a/src/restic/backend/sftp/config.go b/src/restic/backend/sftp/config.go index d8e200491..abd8b0c2f 100644 --- a/src/restic/backend/sftp/config.go +++ b/src/restic/backend/sftp/config.go @@ -5,7 +5,7 @@ import ( "path" "strings" - "github.com/pkg/errors" + "restic/errors" ) // Config collects all information required to connect to an sftp server. diff --git a/src/restic/backend/sftp/sftp.go b/src/restic/backend/sftp/sftp.go index a4681142f..b323eb1b9 100644 --- a/src/restic/backend/sftp/sftp.go +++ b/src/restic/backend/sftp/sftp.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/pkg/errors" + "restic/errors" "restic/backend" "restic/debug" diff --git a/src/restic/backend/sftp/sftp_backend_test.go b/src/restic/backend/sftp/sftp_backend_test.go index b7bcc2591..567b2cf94 100644 --- a/src/restic/backend/sftp/sftp_backend_test.go +++ b/src/restic/backend/sftp/sftp_backend_test.go @@ -7,7 +7,7 @@ import ( "restic" "strings" - "github.com/pkg/errors" + "restic/errors" "restic/backend/sftp" "restic/backend/test" diff --git a/src/restic/backend/test/tests.go b/src/restic/backend/test/tests.go index 8134eafbd..4171b0bd9 100644 --- a/src/restic/backend/test/tests.go +++ b/src/restic/backend/test/tests.go @@ -11,7 +11,7 @@ import ( "sort" "testing" - "github.com/pkg/errors" + "restic/errors" "restic/backend" . "restic/test" diff --git a/src/restic/backend/test/tests_test.go b/src/restic/backend/test/tests_test.go index 92c086440..04e9936e0 100644 --- a/src/restic/backend/test/tests_test.go +++ b/src/restic/backend/test/tests_test.go @@ -3,7 +3,7 @@ package test_test import ( "restic" - "github.com/pkg/errors" + "restic/errors" "restic/backend/mem" "restic/backend/test" diff --git a/src/restic/backend/utils.go b/src/restic/backend/utils.go index f060b6fca..82a899515 100644 --- a/src/restic/backend/utils.go +++ b/src/restic/backend/utils.go @@ -4,7 +4,7 @@ import ( "io" "restic" - "github.com/pkg/errors" + "restic/errors" ) // LoadAll reads all data stored in the backend for the handle. The buffer buf diff --git a/src/restic/backend_find.go b/src/restic/backend_find.go index d788e6797..193fd165b 100644 --- a/src/restic/backend_find.go +++ b/src/restic/backend_find.go @@ -1,6 +1,6 @@ package restic -import "github.com/pkg/errors" +import "restic/errors" // ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix // could be found. diff --git a/src/restic/blob.go b/src/restic/blob.go index 56e478adc..6074b59b1 100644 --- a/src/restic/blob.go +++ b/src/restic/blob.go @@ -3,7 +3,7 @@ package restic import ( "fmt" - "github.com/pkg/errors" + "restic/errors" ) // Blob is one part of a file or a tree. diff --git a/src/restic/checker/checker.go b/src/restic/checker/checker.go index 9d673a94c..88a8eec8e 100644 --- a/src/restic/checker/checker.go +++ b/src/restic/checker/checker.go @@ -5,7 +5,7 @@ import ( "fmt" "sync" - "github.com/pkg/errors" + "restic/errors" "restic" "restic/backend" diff --git a/src/restic/config.go b/src/restic/config.go index 5d1699295..0afb5426b 100644 --- a/src/restic/config.go +++ b/src/restic/config.go @@ -3,7 +3,7 @@ package restic import ( "testing" - "github.com/pkg/errors" + "restic/errors" "restic/debug" diff --git a/src/restic/crypto/crypto.go b/src/restic/crypto/crypto.go index 33b9dfda8..2ebf5d31b 100644 --- a/src/restic/crypto/crypto.go +++ b/src/restic/crypto/crypto.go @@ -7,7 +7,7 @@ import ( "encoding/json" "fmt" - "github.com/pkg/errors" + "restic/errors" "golang.org/x/crypto/poly1305" ) diff --git a/src/restic/crypto/kdf.go b/src/restic/crypto/kdf.go index ea8be37b6..ccde35ace 100644 --- a/src/restic/crypto/kdf.go +++ b/src/restic/crypto/kdf.go @@ -5,8 +5,8 @@ import ( "time" sscrypt "github.com/elithrar/simple-scrypt" - "github.com/pkg/errors" "golang.org/x/crypto/scrypt" + "restic/errors" ) const saltLength = 64 diff --git a/src/restic/debug/debug.go b/src/restic/debug/debug.go index aeae376cd..b1ab2b38c 100644 --- a/src/restic/debug/debug.go +++ b/src/restic/debug/debug.go @@ -15,7 +15,7 @@ import ( "sync" "time" - "github.com/pkg/errors" + "restic/errors" ) type process struct { diff --git a/src/restic/errors/doc.go b/src/restic/errors/doc.go new file mode 100644 index 000000000..9f63cf958 --- /dev/null +++ b/src/restic/errors/doc.go @@ -0,0 +1,2 @@ +// Package errors provides custom error types used within restic. +package errors diff --git a/src/restic/errors.go b/src/restic/errors/fatal.go similarity index 98% rename from src/restic/errors.go rename to src/restic/errors/fatal.go index 1aa7e1fdc..dce3a92b0 100644 --- a/src/restic/errors.go +++ b/src/restic/errors/fatal.go @@ -1,4 +1,4 @@ -package restic +package errors import "fmt" diff --git a/src/restic/errors/wrap.go b/src/restic/errors/wrap.go new file mode 100644 index 000000000..65b48de8b --- /dev/null +++ b/src/restic/errors/wrap.go @@ -0,0 +1,23 @@ +package errors + +import "github.com/pkg/errors" + +// Cause returns the cause of an error. +func Cause(err error) error { + return errors.Cause(err) +} + +// New creates a new error based on message. +func New(message string) error { + return errors.New(message) +} + +// Errorf creates an error based on a format string and values. +func Errorf(format string, args ...interface{}) error { + return errors.Errorf(format, args...) +} + +// Wrap wraps an error retrieved from outside of restic. +func Wrap(err error, message string) error { + return errors.Wrap(err, message) +} diff --git a/src/restic/file.go b/src/restic/file.go index 166546f52..bfe44ad42 100644 --- a/src/restic/file.go +++ b/src/restic/file.go @@ -3,7 +3,7 @@ package restic import ( "fmt" - "github.com/pkg/errors" + "restic/errors" ) // FileType is the type of a file in the backend. diff --git a/src/restic/filter/filter.go b/src/restic/filter/filter.go index 48ce01fb8..bb483d31c 100644 --- a/src/restic/filter/filter.go +++ b/src/restic/filter/filter.go @@ -4,7 +4,7 @@ import ( "path/filepath" "strings" - "github.com/pkg/errors" + "restic/errors" ) // ErrBadString is returned when Match is called with the empty string as the diff --git a/src/restic/fs/file_linux.go b/src/restic/fs/file_linux.go index e3cdf9600..f02c6470d 100644 --- a/src/restic/fs/file_linux.go +++ b/src/restic/fs/file_linux.go @@ -6,7 +6,7 @@ import ( "os" "syscall" - "github.com/pkg/errors" + "restic/errors" "golang.org/x/sys/unix" ) diff --git a/src/restic/fuse/file.go b/src/restic/fuse/file.go index 83a173471..d5949ee86 100644 --- a/src/restic/fuse/file.go +++ b/src/restic/fuse/file.go @@ -6,7 +6,7 @@ package fuse import ( "sync" - "github.com/pkg/errors" + "restic/errors" "restic" "restic/debug" diff --git a/src/restic/fuse/file_test.go b/src/restic/fuse/file_test.go index bd7bfdca9..58e6b33ba 100644 --- a/src/restic/fuse/file_test.go +++ b/src/restic/fuse/file_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/pkg/errors" + "restic/errors" "bazil.org/fuse" diff --git a/src/restic/id.go b/src/restic/id.go index 5a1f4ab6d..6d1f55de2 100644 --- a/src/restic/id.go +++ b/src/restic/id.go @@ -8,7 +8,7 @@ import ( "encoding/json" "io" - "github.com/pkg/errors" + "restic/errors" ) // Hash returns the ID for data. diff --git a/src/restic/index/index.go b/src/restic/index/index.go index e2f7f8317..9027e3fda 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -9,7 +9,7 @@ import ( "restic/list" "restic/worker" - "github.com/pkg/errors" + "restic/errors" ) // Pack contains information about the contents of a pack. diff --git a/src/restic/lock.go b/src/restic/lock.go index 2cb0a1134..f32df4f79 100644 --- a/src/restic/lock.go +++ b/src/restic/lock.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/pkg/errors" + "restic/errors" "restic/debug" ) diff --git a/src/restic/lock_unix.go b/src/restic/lock_unix.go index 6b481ed26..d1b7fb0a3 100644 --- a/src/restic/lock_unix.go +++ b/src/restic/lock_unix.go @@ -8,7 +8,7 @@ import ( "strconv" "syscall" - "github.com/pkg/errors" + "restic/errors" "restic/debug" ) diff --git a/src/restic/mock/backend.go b/src/restic/mock/backend.go index 717f38bf3..5aadc849d 100644 --- a/src/restic/mock/backend.go +++ b/src/restic/mock/backend.go @@ -3,7 +3,7 @@ package mock import ( "restic" - "github.com/pkg/errors" + "restic/errors" ) // Backend implements a mock backend. diff --git a/src/restic/node.go b/src/restic/node.go index 842517d42..427ed6303 100644 --- a/src/restic/node.go +++ b/src/restic/node.go @@ -10,7 +10,7 @@ import ( "syscall" "time" - "github.com/pkg/errors" + "restic/errors" "runtime" diff --git a/src/restic/node_linux.go b/src/restic/node_linux.go index 57a5e5c47..7ebad89f3 100644 --- a/src/restic/node_linux.go +++ b/src/restic/node_linux.go @@ -6,7 +6,7 @@ import ( "golang.org/x/sys/unix" - "github.com/pkg/errors" + "restic/errors" "restic/fs" ) diff --git a/src/restic/node_windows.go b/src/restic/node_windows.go index 08a7f86a2..050de8f27 100644 --- a/src/restic/node_windows.go +++ b/src/restic/node_windows.go @@ -3,7 +3,7 @@ package restic import ( "syscall" - "github.com/pkg/errors" + "restic/errors" ) // mknod() creates a filesystem node (file, device diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index a1e62d543..40d10839b 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -8,7 +8,7 @@ import ( "restic" "sync" - "github.com/pkg/errors" + "restic/errors" "restic/crypto" ) diff --git a/src/restic/pipe/pipe.go b/src/restic/pipe/pipe.go index 48a83a362..1ed9b6162 100644 --- a/src/restic/pipe/pipe.go +++ b/src/restic/pipe/pipe.go @@ -6,7 +6,7 @@ import ( "path/filepath" "sort" - "github.com/pkg/errors" + "restic/errors" "restic/debug" "restic/fs" diff --git a/src/restic/rand_reader.go b/src/restic/rand_reader.go index cfe50222e..205fd6aba 100644 --- a/src/restic/rand_reader.go +++ b/src/restic/rand_reader.go @@ -4,7 +4,7 @@ import ( "io" "math/rand" - "github.com/pkg/errors" + "restic/errors" ) // RandReader allows reading from a rand.Rand. diff --git a/src/restic/repository/index.go b/src/restic/repository/index.go index f543a25ec..029374063 100644 --- a/src/restic/repository/index.go +++ b/src/restic/repository/index.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/pkg/errors" + "restic/errors" "restic/crypto" "restic/debug" diff --git a/src/restic/repository/key.go b/src/restic/repository/key.go index 7b03ae108..b874dc644 100644 --- a/src/restic/repository/key.go +++ b/src/restic/repository/key.go @@ -8,7 +8,7 @@ import ( "restic" "time" - "github.com/pkg/errors" + "restic/errors" "restic/backend" "restic/crypto" diff --git a/src/restic/repository/master_index.go b/src/restic/repository/master_index.go index f82121fb1..a3489e53b 100644 --- a/src/restic/repository/master_index.go +++ b/src/restic/repository/master_index.go @@ -4,7 +4,7 @@ import ( "restic" "sync" - "github.com/pkg/errors" + "restic/errors" "restic/debug" ) diff --git a/src/restic/repository/packer_manager.go b/src/restic/repository/packer_manager.go index ea08a114c..85c5b186c 100644 --- a/src/restic/repository/packer_manager.go +++ b/src/restic/repository/packer_manager.go @@ -7,7 +7,7 @@ import ( "restic" "sync" - "github.com/pkg/errors" + "restic/errors" "restic/crypto" "restic/debug" diff --git a/src/restic/repository/parallel_test.go b/src/restic/repository/parallel_test.go index 30b0238bd..cfa384a01 100644 --- a/src/restic/repository/parallel_test.go +++ b/src/restic/repository/parallel_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/pkg/errors" + "restic/errors" "restic/repository" . "restic/test" diff --git a/src/restic/repository/repack.go b/src/restic/repository/repack.go index 95e0eae07..274f2d320 100644 --- a/src/restic/repository/repack.go +++ b/src/restic/repository/repack.go @@ -8,7 +8,7 @@ import ( "restic/debug" "restic/pack" - "github.com/pkg/errors" + "restic/errors" ) // Repack takes a list of packs together with a list of blobs contained in diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index bc36e0507..a400b85e3 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -8,7 +8,7 @@ import ( "os" "restic" - "github.com/pkg/errors" + "restic/errors" "restic/backend" "restic/crypto" diff --git a/src/restic/restorer.go b/src/restic/restorer.go index 9784df8e9..7ea7f0123 100644 --- a/src/restic/restorer.go +++ b/src/restic/restorer.go @@ -4,7 +4,7 @@ import ( "os" "path/filepath" - "github.com/pkg/errors" + "restic/errors" "restic/debug" "restic/fs" diff --git a/src/restic/snapshot.go b/src/restic/snapshot.go index 4775cbd7b..dc351e8e4 100644 --- a/src/restic/snapshot.go +++ b/src/restic/snapshot.go @@ -7,7 +7,7 @@ import ( "path/filepath" "time" - "github.com/pkg/errors" + "restic/errors" ) // Snapshot is the state of a resource at one point in time. diff --git a/src/restic/testing.go b/src/restic/testing.go index 1be705bb4..e4fe6ddb3 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -8,8 +8,8 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/restic/chunker" + "restic/errors" ) // fakeFile returns a reader which yields deterministic pseudo-random data. diff --git a/src/restic/tree.go b/src/restic/tree.go index 94e51b322..f27393fa8 100644 --- a/src/restic/tree.go +++ b/src/restic/tree.go @@ -4,7 +4,7 @@ import ( "fmt" "sort" - "github.com/pkg/errors" + "restic/errors" "restic/debug" ) diff --git a/src/restic/worker/pool_test.go b/src/restic/worker/pool_test.go index 329ce9a88..9d6159b89 100644 --- a/src/restic/worker/pool_test.go +++ b/src/restic/worker/pool_test.go @@ -3,7 +3,7 @@ package worker_test import ( "testing" - "github.com/pkg/errors" + "restic/errors" "restic/worker" ) From 714a5d1dc45ccd9a6234fcdbf65329e9edf8a9f6 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 22:24:48 +0200 Subject: [PATCH 21/40] Move tree walker to restic/walk --- src/restic/archiver/archiver.go | 13 ++--- .../testdata/walktree-test-repo.tar.gz | Bin src/restic/{ => walk}/walk.go | 45 +++++++++--------- src/restic/{ => walk}/walk_test.go | 15 +++--- 4 files changed, 38 insertions(+), 35 deletions(-) rename src/restic/{ => walk}/testdata/walktree-test-repo.tar.gz (100%) rename src/restic/{ => walk}/walk.go (76%) rename src/restic/{ => walk}/walk_test.go (99%) diff --git a/src/restic/archiver/archiver.go b/src/restic/archiver/archiver.go index 99c91a394..30decd1d9 100644 --- a/src/restic/archiver/archiver.go +++ b/src/restic/archiver/archiver.go @@ -12,6 +12,7 @@ import ( "time" "restic/errors" + "restic/walk" "restic/debug" "restic/fs" @@ -421,7 +422,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-c } type archivePipe struct { - Old <-chan restic.WalkTreeJob + Old <-chan walk.TreeJob New <-chan pipe.Job } @@ -456,7 +457,7 @@ func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) { type archiveJob struct { hasOld bool - old restic.WalkTreeJob + old walk.TreeJob new pipe.Job } @@ -470,7 +471,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) { var ( loadOld, loadNew bool = true, true ok bool - oldJob restic.WalkTreeJob + oldJob walk.TreeJob newJob pipe.Job ) @@ -667,12 +668,12 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths []string, parentID *res } // start walker on old tree - ch := make(chan restic.WalkTreeJob) - go restic.WalkTree(arch.repo, *parent.Tree, done, ch) + ch := make(chan walk.TreeJob) + go walk.Tree(arch.repo, *parent.Tree, done, ch) jobs.Old = ch } else { // use closed channel - ch := make(chan restic.WalkTreeJob) + ch := make(chan walk.TreeJob) close(ch) jobs.Old = ch } diff --git a/src/restic/testdata/walktree-test-repo.tar.gz b/src/restic/walk/testdata/walktree-test-repo.tar.gz similarity index 100% rename from src/restic/testdata/walktree-test-repo.tar.gz rename to src/restic/walk/testdata/walktree-test-repo.tar.gz diff --git a/src/restic/walk.go b/src/restic/walk/walk.go similarity index 76% rename from src/restic/walk.go rename to src/restic/walk/walk.go index 1c8fa60b6..456c2b9be 100644 --- a/src/restic/walk.go +++ b/src/restic/walk/walk.go @@ -1,39 +1,40 @@ -package restic +package walk import ( "fmt" "os" "path/filepath" + "restic" "sync" "restic/debug" ) -// WalkTreeJob is a job sent from the tree walker. -type WalkTreeJob struct { +// TreeJob is a job sent from the tree walker. +type TreeJob struct { Path string Error error - Node *Node - Tree *Tree + Node *restic.Node + Tree *restic.Tree } // TreeWalker traverses a tree in the repository depth-first and sends a job // for each item (file or dir) that it encounters. type TreeWalker struct { ch chan<- loadTreeJob - out chan<- WalkTreeJob + out chan<- TreeJob } // NewTreeWalker uses ch to load trees from the repository and sends jobs to // out. -func NewTreeWalker(ch chan<- loadTreeJob, out chan<- WalkTreeJob) *TreeWalker { +func NewTreeWalker(ch chan<- loadTreeJob, out chan<- TreeJob) *TreeWalker { return &TreeWalker{ch: ch, out: out} } // Walk starts walking the tree given by id. When the channel done is closed, // processing stops. -func (tw *TreeWalker) Walk(path string, id ID, done chan struct{}) { +func (tw *TreeWalker) Walk(path string, id restic.ID, done chan struct{}) { debug.Log("TreeWalker.Walk", "starting on tree %v for %v", id.Str(), path) defer debug.Log("TreeWalker.Walk", "done walking tree %v for %v", id.Str(), path) @@ -46,7 +47,7 @@ func (tw *TreeWalker) Walk(path string, id ID, done chan struct{}) { res := <-resCh if res.err != nil { select { - case tw.out <- WalkTreeJob{Path: path, Error: res.err}: + case tw.out <- TreeJob{Path: path, Error: res.err}: case <-done: return } @@ -56,13 +57,13 @@ func (tw *TreeWalker) Walk(path string, id ID, done chan struct{}) { tw.walk(path, res.tree, done) select { - case tw.out <- WalkTreeJob{Path: path, Tree: res.tree}: + case tw.out <- TreeJob{Path: path, Tree: res.tree}: case <-done: return } } -func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { +func (tw *TreeWalker) walk(path string, tree *restic.Tree, done chan struct{}) { debug.Log("TreeWalker.walk", "start on %q", path) defer debug.Log("TreeWalker.walk", "done for %q", path) @@ -84,7 +85,7 @@ func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { for i, node := range tree.Nodes { p := filepath.Join(path, node.Name) - var job WalkTreeJob + var job TreeJob if node.Type == "dir" { if results[i] == nil { @@ -98,9 +99,9 @@ func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { fmt.Fprintf(os.Stderr, "error loading tree: %v\n", res.err) } - job = WalkTreeJob{Path: p, Tree: res.tree, Error: res.err} + job = TreeJob{Path: p, Tree: res.tree, Error: res.err} } else { - job = WalkTreeJob{Path: p, Node: node} + job = TreeJob{Path: p, Node: node} } select { @@ -112,16 +113,16 @@ func (tw *TreeWalker) walk(path string, tree *Tree, done chan struct{}) { } type loadTreeResult struct { - tree *Tree + tree *restic.Tree err error } type loadTreeJob struct { - id ID + id restic.ID res chan<- loadTreeResult } -type treeLoader func(ID) (*Tree, error) +type treeLoader func(restic.ID) (*restic.Tree, error) func loadTreeWorker(wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader, done <-chan struct{}) { debug.Log("loadTreeWorker", "start") @@ -157,15 +158,15 @@ func loadTreeWorker(wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader, const loadTreeWorkers = 10 -// WalkTree walks the tree specified by id recursively and sends a job for each +// Tree walks the tree specified by id recursively and sends a job for each // file and directory it finds. When the channel done is closed, processing // stops. -func WalkTree(repo TreeLoader, id ID, done chan struct{}, jobCh chan<- WalkTreeJob) { +func Tree(repo restic.TreeLoader, id restic.ID, done chan struct{}, jobCh chan<- TreeJob) { debug.Log("WalkTree", "start on %v, start workers", id.Str()) - load := func(id ID) (*Tree, error) { - tree := &Tree{} - err := repo.LoadJSONPack(TreeBlob, id, tree) + load := func(id restic.ID) (*restic.Tree, error) { + tree := &restic.Tree{} + err := repo.LoadJSONPack(restic.TreeBlob, id, tree) if err != nil { return nil, err } diff --git a/src/restic/walk_test.go b/src/restic/walk/walk_test.go similarity index 99% rename from src/restic/walk_test.go rename to src/restic/walk/walk_test.go index 1edd052ba..1a33c0ca0 100644 --- a/src/restic/walk_test.go +++ b/src/restic/walk/walk_test.go @@ -1,4 +1,4 @@ -package restic_test +package walk_test import ( "os" @@ -12,6 +12,7 @@ import ( "restic/pipe" "restic/repository" . "restic/test" + "restic/walk" ) func TestWalkTree(t *testing.T) { @@ -32,8 +33,8 @@ func TestWalkTree(t *testing.T) { done := make(chan struct{}) // start tree walker - treeJobs := make(chan restic.WalkTreeJob) - go restic.WalkTree(repo, *sn.Tree, done, treeJobs) + treeJobs := make(chan walk.TreeJob) + go walk.Tree(repo, *sn.Tree, done, treeJobs) // start filesystem walker fsJobs := make(chan pipe.Job) @@ -1350,8 +1351,8 @@ func TestDelayedWalkTree(t *testing.T) { dr := delayRepo{repo, 100 * time.Millisecond} // start tree walker - treeJobs := make(chan restic.WalkTreeJob) - go restic.WalkTree(dr, root, nil, treeJobs) + treeJobs := make(chan walk.TreeJob) + go walk.Tree(dr, root, nil, treeJobs) i := 0 for job := range treeJobs { @@ -1382,8 +1383,8 @@ func BenchmarkDelayedWalkTree(t *testing.B) { for i := 0; i < t.N; i++ { // start tree walker - treeJobs := make(chan restic.WalkTreeJob) - go restic.WalkTree(dr, root, nil, treeJobs) + treeJobs := make(chan walk.TreeJob) + go walk.Tree(dr, root, nil, treeJobs) for _ = range treeJobs { } From 619939ccd90cef50b101d02ae8ce5f39e5790848 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 1 Sep 2016 22:42:12 +0200 Subject: [PATCH 22/40] Reorder methods in interface Repository --- src/restic/repository.go | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/restic/repository.go b/src/restic/repository.go index 060a8f3a9..c0567ec98 100644 --- a/src/restic/repository.go +++ b/src/restic/repository.go @@ -15,20 +15,9 @@ type Repository interface { Index() Index SaveFullIndex() error - - SaveJSON(BlobType, interface{}) (ID, error) - SaveUnpacked(FileType, []byte) (ID, error) - - Config() Config - - SaveAndEncrypt(BlobType, []byte, *ID) (ID, error) - SaveJSONUnpacked(FileType, interface{}) (ID, error) SaveIndex() error - LoadJSONPack(BlobType, ID, interface{}) error - LoadJSONUnpacked(FileType, ID, interface{}) error - LoadBlob(ID, BlobType, []byte) ([]byte, error) - LoadAndDecrypt(FileType, ID) ([]byte, error) + Config() Config LookupBlobSize(ID, BlobType) (uint, error) @@ -36,6 +25,16 @@ type Repository interface { ListPack(ID) ([]Blob, int64, error) Flush() error + + SaveJSON(BlobType, interface{}) (ID, error) + SaveUnpacked(FileType, []byte) (ID, error) + SaveAndEncrypt(BlobType, []byte, *ID) (ID, error) + SaveJSONUnpacked(FileType, interface{}) (ID, error) + + LoadJSONPack(BlobType, ID, interface{}) error + LoadJSONUnpacked(FileType, ID, interface{}) error + LoadBlob(ID, BlobType, []byte) ([]byte, error) + LoadAndDecrypt(FileType, ID) ([]byte, error) } // Deleter removes all data stored in a backend/repo. From 573410afabd9d7df60f55e86ea75fd43b01b5f7a Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 2 Sep 2016 22:17:02 +0200 Subject: [PATCH 23/40] Fix archiver test --- src/restic/archiver/archiver_int_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/restic/archiver/archiver_int_test.go b/src/restic/archiver/archiver_int_test.go index 3b5309a20..c4014f5b0 100644 --- a/src/restic/archiver/archiver_int_test.go +++ b/src/restic/archiver/archiver_int_test.go @@ -2,10 +2,10 @@ package archiver import ( "os" - "restic" "testing" "restic/pipe" + "restic/walk" ) var treeJobs = []string{ @@ -83,12 +83,12 @@ func (j testPipeJob) Error() error { return j.err } func (j testPipeJob) Info() os.FileInfo { return j.fi } func (j testPipeJob) Result() chan<- pipe.Result { return j.res } -func testTreeWalker(done <-chan struct{}, out chan<- restic.WalkTreeJob) { +func testTreeWalker(done <-chan struct{}, out chan<- walk.TreeJob) { for _, e := range treeJobs { select { case <-done: return - case out <- restic.WalkTreeJob{Path: e}: + case out <- walk.TreeJob{Path: e}: } } @@ -110,7 +110,7 @@ func testPipeWalker(done <-chan struct{}, out chan<- pipe.Job) { func TestArchivePipe(t *testing.T) { done := make(chan struct{}) - treeCh := make(chan restic.WalkTreeJob) + treeCh := make(chan walk.TreeJob) pipeCh := make(chan pipe.Job) go testTreeWalker(done, treeCh) From 84f95a09d7442f5cd2ceb01943b38ea84cf5b584 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 3 Sep 2016 11:22:01 +0200 Subject: [PATCH 24/40] Introduce LoadTreeBlob and LoadDataBlob --- src/cmds/restic/cmd_find.go | 2 +- src/cmds/restic/cmd_ls.go | 2 +- src/restic/archiver/archive_reader_test.go | 2 +- src/restic/checker/checker.go | 2 +- src/restic/find.go | 2 +- src/restic/fuse/dir.go | 6 ++-- src/restic/node.go | 3 +- src/restic/repository.go | 5 +-- src/restic/repository/repository.go | 42 ++++++++++++++++++++++ src/restic/restorer.go | 2 +- src/restic/tree.go | 15 +------- src/restic/tree_test.go | 2 +- src/restic/walk/walk.go | 10 ++++-- src/restic/walk/walk_test.go | 4 +-- 14 files changed, 67 insertions(+), 32 deletions(-) diff --git a/src/cmds/restic/cmd_find.go b/src/cmds/restic/cmd_find.go index 24258d510..683adaa87 100644 --- a/src/cmds/restic/cmd_find.go +++ b/src/cmds/restic/cmd_find.go @@ -61,7 +61,7 @@ func parseTime(str string) (time.Time, error) { func (c CmdFind) findInTree(repo *repository.Repository, id restic.ID, path string) ([]findResult, error) { debug.Log("restic.find", "checking tree %v\n", id) - tree, err := restic.LoadTree(repo, id) + tree, err := repo.LoadTree(id) if err != nil { return nil, err } diff --git a/src/cmds/restic/cmd_ls.go b/src/cmds/restic/cmd_ls.go index 733f424b6..4e3b29e8a 100644 --- a/src/cmds/restic/cmd_ls.go +++ b/src/cmds/restic/cmd_ls.go @@ -47,7 +47,7 @@ func (cmd CmdLs) printNode(prefix string, n *restic.Node) string { } func (cmd CmdLs) printTree(prefix string, repo *repository.Repository, id restic.ID) error { - tree, err := restic.LoadTree(repo, id) + tree, err := repo.LoadTree(id) if err != nil { return err } diff --git a/src/restic/archiver/archive_reader_test.go b/src/restic/archiver/archive_reader_test.go index da46f030a..b402bc6d1 100644 --- a/src/restic/archiver/archive_reader_test.go +++ b/src/restic/archiver/archive_reader_test.go @@ -21,7 +21,7 @@ func loadBlob(t *testing.T, repo *repository.Repository, id restic.ID, buf []byt } func checkSavedFile(t *testing.T, repo *repository.Repository, treeID restic.ID, name string, rd io.Reader) { - tree, err := restic.LoadTree(repo, treeID) + tree, err := repo.LoadTree(treeID) if err != nil { t.Fatalf("LoadTree() returned error %v", err) } diff --git a/src/restic/checker/checker.go b/src/restic/checker/checker.go index 88a8eec8e..df879fdfd 100644 --- a/src/restic/checker/checker.go +++ b/src/restic/checker/checker.go @@ -376,7 +376,7 @@ func loadTreeWorker(repo restic.Repository, } debug.Log("checker.loadTreeWorker", "load tree %v", treeID.Str()) - tree, err := restic.LoadTree(repo, treeID) + tree, err := repo.LoadTree(treeID) debug.Log("checker.loadTreeWorker", "load tree %v (%v) returned err: %v", tree, treeID.Str(), err) job = treeJob{ID: treeID, error: err, Tree: tree} outCh = out diff --git a/src/restic/find.go b/src/restic/find.go index bfcdbb58f..dcc9d0251 100644 --- a/src/restic/find.go +++ b/src/restic/find.go @@ -6,7 +6,7 @@ package restic func FindUsedBlobs(repo Repository, treeID ID, blobs BlobSet, seen BlobSet) error { blobs.Insert(BlobHandle{ID: treeID, Type: TreeBlob}) - tree, err := LoadTree(repo, treeID) + tree, err := repo.LoadTree(treeID) if err != nil { return err } diff --git a/src/restic/fuse/dir.go b/src/restic/fuse/dir.go index a89617e5f..14f8c7f21 100644 --- a/src/restic/fuse/dir.go +++ b/src/restic/fuse/dir.go @@ -29,7 +29,7 @@ type dir struct { func newDir(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (*dir, error) { debug.Log("newDir", "new dir for %v (%v)", node.Name, node.Subtree.Str()) - tree, err := restic.LoadTree(repo, *node.Subtree) + tree, err := repo.LoadTree(*node.Subtree) if err != nil { debug.Log("newDir", " error loading tree %v: %v", node.Subtree.Str(), err) return nil, err @@ -59,7 +59,7 @@ func replaceSpecialNodes(repo *repository.Repository, node *restic.Node) ([]*res return []*restic.Node{node}, nil } - tree, err := restic.LoadTree(repo, *node.Subtree) + tree, err := repo.LoadTree(*node.Subtree) if err != nil { return nil, err } @@ -69,7 +69,7 @@ func replaceSpecialNodes(repo *repository.Repository, node *restic.Node) ([]*res func newDirFromSnapshot(repo *repository.Repository, snapshot SnapshotWithId, ownerIsRoot bool) (*dir, error) { debug.Log("newDirFromSnapshot", "new dir for snapshot %v (%v)", snapshot.ID.Str(), snapshot.Tree.Str()) - tree, err := restic.LoadTree(repo, *snapshot.Tree) + tree, err := repo.LoadTree(*snapshot.Tree) if err != nil { debug.Log("newDirFromSnapshot", " loadTree(%v) failed: %v", snapshot.ID.Str(), err) return nil, err diff --git a/src/restic/node.go b/src/restic/node.go index 427ed6303..c4cff8ca5 100644 --- a/src/restic/node.go +++ b/src/restic/node.go @@ -218,10 +218,11 @@ func (node Node) createFileAt(path string, repo Repository) error { buf = make([]byte, size) } - buf, err := repo.LoadBlob(id, DataBlob, buf) + n, err := repo.LoadDataBlob(id, buf) if err != nil { return err } + buf = buf[:n] _, err = f.Write(buf) if err != nil { diff --git a/src/restic/repository.go b/src/restic/repository.go index c0567ec98..5c1067f7b 100644 --- a/src/restic/repository.go +++ b/src/restic/repository.go @@ -31,10 +31,11 @@ type Repository interface { SaveAndEncrypt(BlobType, []byte, *ID) (ID, error) SaveJSONUnpacked(FileType, interface{}) (ID, error) - LoadJSONPack(BlobType, ID, interface{}) error LoadJSONUnpacked(FileType, ID, interface{}) error - LoadBlob(ID, BlobType, []byte) ([]byte, error) LoadAndDecrypt(FileType, ID) ([]byte, error) + + LoadTree(id ID) (*Tree, error) + LoadDataBlob(id ID, buf []byte) (int, error) } // Deleter removes all data stored in a backend/repo. diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index a400b85e3..87025425d 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -589,3 +589,45 @@ func (r *Repository) Delete() error { func (r *Repository) Close() error { return r.be.Close() } + +// LoadTree loads a tree from the repository. +func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) { + size, err := r.idx.LookupSize(id, restic.TreeBlob) + if err != nil { + return nil, err + } + + buf := make([]byte, size) + + buf, err = r.LoadBlob(id, restic.TreeBlob, nil) + if err != nil { + return nil, err + } + + t := &restic.Tree{} + err = json.Unmarshal(buf, t) + if err != nil { + return nil, err + } + + return t, nil +} + +// LoadDataBlob loads a data blob from the repository to the buffer. +func (r *Repository) LoadDataBlob(id restic.ID, buf []byte) (int, error) { + size, err := r.idx.LookupSize(id, restic.DataBlob) + if err != nil { + return 0, err + } + + if len(buf) < int(size) { + return 0, errors.Errorf("buffer is too small for data blob (%d < %d)", len(buf), size) + } + + buf, err = r.LoadBlob(id, restic.DataBlob, buf) + if err != nil { + return 0, err + } + + return len(buf), err +} diff --git a/src/restic/restorer.go b/src/restic/restorer.go index 7ea7f0123..e3fceb67f 100644 --- a/src/restic/restorer.go +++ b/src/restic/restorer.go @@ -39,7 +39,7 @@ func NewRestorer(repo Repository, id ID) (*Restorer, error) { } func (res *Restorer) restoreTo(dst string, dir string, treeID ID) error { - tree, err := LoadTree(res.repo, treeID) + tree, err := res.repo.LoadTree(treeID) if err != nil { return res.Error(dir, nil, err) } diff --git a/src/restic/tree.go b/src/restic/tree.go index f27393fa8..f2c1c04a9 100644 --- a/src/restic/tree.go +++ b/src/restic/tree.go @@ -25,20 +25,6 @@ func (t Tree) String() string { return fmt.Sprintf("Tree<%d nodes>", len(t.Nodes)) } -type TreeLoader interface { - LoadJSONPack(BlobType, ID, interface{}) error -} - -func LoadTree(repo TreeLoader, id ID) (*Tree, error) { - tree := &Tree{} - err := repo.LoadJSONPack(TreeBlob, id, tree) - if err != nil { - return nil, err - } - - return tree, nil -} - // Equals returns true if t and other have exactly the same nodes. func (t Tree) Equals(other *Tree) bool { if len(t.Nodes) != len(other.Nodes) { @@ -85,6 +71,7 @@ func (t Tree) binarySearch(name string) (int, *Node, error) { return pos, nil, errors.New("named node not found") } +// Find returns a node with the given name. func (t Tree) Find(name string) (*Node, error) { _, node, err := t.binarySearch(name) return node, err diff --git a/src/restic/tree_test.go b/src/restic/tree_test.go index f8d632df9..3c581ec68 100644 --- a/src/restic/tree_test.go +++ b/src/restic/tree_test.go @@ -104,7 +104,7 @@ func TestLoadTree(t *testing.T) { OK(t, repo.Flush()) // load tree again - tree2, err := restic.LoadTree(repo, id) + tree2, err := repo.LoadTree(id) OK(t, err) Assert(t, tree.Equals(tree2), diff --git a/src/restic/walk/walk.go b/src/restic/walk/walk.go index 456c2b9be..fbe322f63 100644 --- a/src/restic/walk/walk.go +++ b/src/restic/walk/walk.go @@ -156,17 +156,21 @@ func loadTreeWorker(wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader, } } +// TreeLoader loads tree objects. +type TreeLoader interface { + LoadTree(restic.ID) (*restic.Tree, error) +} + const loadTreeWorkers = 10 // Tree walks the tree specified by id recursively and sends a job for each // file and directory it finds. When the channel done is closed, processing // stops. -func Tree(repo restic.TreeLoader, id restic.ID, done chan struct{}, jobCh chan<- TreeJob) { +func Tree(repo TreeLoader, id restic.ID, done chan struct{}, jobCh chan<- TreeJob) { debug.Log("WalkTree", "start on %v, start workers", id.Str()) load := func(id restic.ID) (*restic.Tree, error) { - tree := &restic.Tree{} - err := repo.LoadJSONPack(restic.TreeBlob, id, tree) + tree, err := repo.LoadTree(id) if err != nil { return nil, err } diff --git a/src/restic/walk/walk_test.go b/src/restic/walk/walk_test.go index 1a33c0ca0..221f5df9c 100644 --- a/src/restic/walk/walk_test.go +++ b/src/restic/walk/walk_test.go @@ -95,9 +95,9 @@ type delayRepo struct { delay time.Duration } -func (d delayRepo) LoadJSONPack(t restic.BlobType, id restic.ID, dst interface{}) error { +func (d delayRepo) LoadTree(id restic.ID) (*restic.Tree, error) { time.Sleep(d.delay) - return d.repo.LoadJSONPack(t, id, dst) + return d.repo.LoadTree(id) } var repoFixture = filepath.Join("testdata", "walktree-test-repo.tar.gz") From ffbe05af9b9ce8a05fc5a9c5f35097d77de7d1b3 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 3 Sep 2016 13:34:04 +0200 Subject: [PATCH 25/40] Rework crypto, use restic.Repository everywhere --- src/restic/archiver/archive_reader_test.go | 8 ++-- src/restic/archiver/archiver_test.go | 17 ++++++-- src/restic/checker/checker.go | 3 +- src/restic/checker/checker_test.go | 2 +- src/restic/checker/testing.go | 4 +- src/restic/crypto/crypto.go | 19 +++++---- src/restic/crypto/crypto_int_test.go | 12 ++++-- src/restic/crypto/crypto_test.go | 17 +++++--- src/restic/fuse/dir.go | 9 ++-- src/restic/fuse/file.go | 6 +-- src/restic/fuse/file_test.go | 10 ++--- src/restic/fuse/link.go | 3 +- src/restic/fuse/snapshot.go | 5 +-- src/restic/index/index_test.go | 6 +-- src/restic/pack/pack.go | 5 ++- src/restic/repository.go | 1 + src/restic/repository/key.go | 4 +- src/restic/repository/repack.go | 13 ++++-- src/restic/repository/repack_test.go | 16 ++++---- src/restic/repository/repository.go | 48 +++++++++------------- src/restic/repository/repository_test.go | 21 +++++----- src/restic/test/backend.go | 16 ++++---- src/restic/test/helpers.go | 4 +- src/restic/testing.go | 3 +- src/restic/walk/walk_test.go | 3 +- 25 files changed, 140 insertions(+), 115 deletions(-) diff --git a/src/restic/archiver/archive_reader_test.go b/src/restic/archiver/archive_reader_test.go index b402bc6d1..e7e88d6cd 100644 --- a/src/restic/archiver/archive_reader_test.go +++ b/src/restic/archiver/archive_reader_test.go @@ -11,16 +11,16 @@ import ( "github.com/restic/chunker" ) -func loadBlob(t *testing.T, repo *repository.Repository, id restic.ID, buf []byte) []byte { - buf, err := repo.LoadBlob(id, restic.DataBlob, buf) +func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) []byte { + n, err := repo.LoadDataBlob(id, buf) if err != nil { t.Fatalf("LoadBlob(%v) returned error %v", id, err) } - return buf + return buf[:n] } -func checkSavedFile(t *testing.T, repo *repository.Repository, treeID restic.ID, name string, rd io.Reader) { +func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name string, rd io.Reader) { tree, err := repo.LoadTree(treeID) if err != nil { t.Fatalf("LoadTree() returned error %v", err) diff --git a/src/restic/archiver/archiver_test.go b/src/restic/archiver/archiver_test.go index 1d43254cb..a1fa47683 100644 --- a/src/restic/archiver/archiver_test.go +++ b/src/restic/archiver/archiver_test.go @@ -12,8 +12,9 @@ import ( "restic/crypto" . "restic/test" - "github.com/restic/chunker" "restic/errors" + + "github.com/restic/chunker" ) var testPol = chunker.Pol(0x3DA3358B4DC173) @@ -126,6 +127,14 @@ func BenchmarkArchiveDirectory(b *testing.B) { } } +func countPacks(repo restic.Repository, t restic.FileType) (n uint) { + for _ = range repo.Backend().List(t, nil) { + n++ + } + + return n +} + func archiveWithDedup(t testing.TB) { repo := SetupRepo() defer TeardownRepo(repo) @@ -145,7 +154,7 @@ func archiveWithDedup(t testing.TB) { t.Logf("archived snapshot %v", sn.ID().Str()) // get archive stats - cnt.before.packs = repo.Count(restic.DataFile) + cnt.before.packs = countPacks(repo, restic.DataFile) cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob) cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob) t.Logf("packs %v, data blobs %v, tree blobs %v", @@ -156,7 +165,7 @@ func archiveWithDedup(t testing.TB) { t.Logf("archived snapshot %v", sn2.ID().Str()) // get archive stats again - cnt.after.packs = repo.Count(restic.DataFile) + cnt.after.packs = countPacks(repo, restic.DataFile) cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob) cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob) t.Logf("packs %v, data blobs %v, tree blobs %v", @@ -173,7 +182,7 @@ func archiveWithDedup(t testing.TB) { t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str()) // get archive stats again - cnt.after2.packs = repo.Count(restic.DataFile) + cnt.after2.packs = countPacks(repo, restic.DataFile) cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob) cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob) t.Logf("packs %v, data blobs %v, tree blobs %v", diff --git a/src/restic/checker/checker.go b/src/restic/checker/checker.go index df879fdfd..ebb416938 100644 --- a/src/restic/checker/checker.go +++ b/src/restic/checker/checker.go @@ -684,12 +684,13 @@ func checkPack(r restic.Repository, id restic.ID) error { debug.Log("Checker.checkPack", " check blob %d: %v", i, blob.ID.Str()) plainBuf := make([]byte, blob.Length) - plainBuf, err = crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length]) + n, err := crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length]) if err != nil { debug.Log("Checker.checkPack", " error decrypting blob %v: %v", blob.ID.Str(), err) errs = append(errs, errors.Errorf("blob %v: %v", i, err)) continue } + plainBuf = plainBuf[:n] hash := restic.Hash(plainBuf) if !hash.Equal(blob.ID) { diff --git a/src/restic/checker/checker_test.go b/src/restic/checker/checker_test.go index 6e9f29d06..0037f0adb 100644 --- a/src/restic/checker/checker_test.go +++ b/src/restic/checker/checker_test.go @@ -17,7 +17,7 @@ import ( var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz") -func list(repo *repository.Repository, t restic.FileType) (IDs []string) { +func list(repo restic.Repository, t restic.FileType) (IDs []string) { done := make(chan struct{}) defer close(done) diff --git a/src/restic/checker/testing.go b/src/restic/checker/testing.go index 3bf9aa2ec..7b642dea1 100644 --- a/src/restic/checker/testing.go +++ b/src/restic/checker/testing.go @@ -1,12 +1,12 @@ package checker import ( - "restic/repository" + "restic" "testing" ) // TestCheckRepo runs the checker on repo. -func TestCheckRepo(t testing.TB, repo *repository.Repository) { +func TestCheckRepo(t testing.TB, repo restic.Repository) { chkr := New(repo) hints, errs := chkr.LoadIndex() diff --git a/src/restic/crypto/crypto.go b/src/restic/crypto/crypto.go index 2ebf5d31b..57fdd6230 100644 --- a/src/restic/crypto/crypto.go +++ b/src/restic/crypto/crypto.go @@ -274,9 +274,9 @@ func Encrypt(ks *Key, ciphertext []byte, plaintext []byte) ([]byte, error) { // Decrypt verifies and decrypts the ciphertext. Ciphertext must be in the form // IV || Ciphertext || MAC. plaintext and ciphertext may point to (exactly) the // same slice. -func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) ([]byte, error) { +func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) (int, error) { if !ks.Valid() { - return nil, errors.New("invalid key") + return 0, errors.New("invalid key") } // check for plausible length @@ -284,21 +284,26 @@ func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) ([]byte, error panic("trying to decrypt invalid data: ciphertext too small") } + // check buffer length for plaintext + plaintextLength := len(ciphertextWithMac) - ivSize - macSize + if len(plaintext) < plaintextLength { + return 0, errors.Errorf("plaintext buffer too small, %d < %d", len(plaintext), plaintextLength) + } + // extract mac l := len(ciphertextWithMac) - macSize ciphertextWithIV, mac := ciphertextWithMac[:l], ciphertextWithMac[l:] // verify mac if !poly1305Verify(ciphertextWithIV[ivSize:], ciphertextWithIV[:ivSize], &ks.MAC, mac) { - return nil, ErrUnauthenticated + return 0, ErrUnauthenticated } // extract iv iv, ciphertext := ciphertextWithIV[:ivSize], ciphertextWithIV[ivSize:] - if cap(plaintext) < len(ciphertext) { - // extend plaintext - plaintext = append(plaintext, make([]byte, len(ciphertext)-cap(plaintext))...) + if len(ciphertext) != plaintextLength { + return 0, errors.Errorf("plaintext and ciphertext lengths do not match: %d != %d", len(ciphertext), plaintextLength) } // decrypt data @@ -312,7 +317,7 @@ func Decrypt(ks *Key, plaintext []byte, ciphertextWithMac []byte) ([]byte, error plaintext = plaintext[:len(ciphertext)] e.XORKeyStream(plaintext, ciphertext) - return plaintext, nil + return plaintextLength, nil } // Valid tests if the key is valid. diff --git a/src/restic/crypto/crypto_int_test.go b/src/restic/crypto/crypto_int_test.go index 5fed6b54c..1dbc32623 100644 --- a/src/restic/crypto/crypto_int_test.go +++ b/src/restic/crypto/crypto_int_test.go @@ -100,15 +100,17 @@ func TestCrypto(t *testing.T) { } // decrypt message - _, err = Decrypt(k, []byte{}, msg) + buf := make([]byte, len(tv.plaintext)) + n, err := Decrypt(k, buf, msg) if err != nil { t.Fatal(err) } + buf = buf[:n] // change mac, this must fail msg[len(msg)-8] ^= 0x23 - if _, err = Decrypt(k, []byte{}, msg); err != ErrUnauthenticated { + if _, err = Decrypt(k, buf, msg); err != ErrUnauthenticated { t.Fatal("wrong MAC value not detected") } @@ -118,15 +120,17 @@ func TestCrypto(t *testing.T) { // tamper with message, this must fail msg[16+5] ^= 0x85 - if _, err = Decrypt(k, []byte{}, msg); err != ErrUnauthenticated { + if _, err = Decrypt(k, buf, msg); err != ErrUnauthenticated { t.Fatal("tampered message not detected") } // test decryption - p, err := Decrypt(k, []byte{}, tv.ciphertext) + p := make([]byte, len(tv.ciphertext)) + n, err = Decrypt(k, p, tv.ciphertext) if err != nil { t.Fatal(err) } + p = p[:n] if !bytes.Equal(p, tv.plaintext) { t.Fatalf("wrong plaintext: expected %q but got %q\n", tv.plaintext, p) diff --git a/src/restic/crypto/crypto_test.go b/src/restic/crypto/crypto_test.go index fe799da77..39c3cc169 100644 --- a/src/restic/crypto/crypto_test.go +++ b/src/restic/crypto/crypto_test.go @@ -32,8 +32,10 @@ func TestEncryptDecrypt(t *testing.T) { "ciphertext length does not match: want %d, got %d", len(data)+crypto.Extension, len(ciphertext)) - plaintext, err := crypto.Decrypt(k, nil, ciphertext) + plaintext := make([]byte, len(ciphertext)) + n, err := crypto.Decrypt(k, plaintext, ciphertext) OK(t, err) + plaintext = plaintext[:n] Assert(t, len(plaintext) == len(data), "plaintext length does not match: want %d, got %d", len(data), len(plaintext)) @@ -58,8 +60,10 @@ func TestSmallBuffer(t *testing.T) { cap(ciphertext)) // check for the correct plaintext - plaintext, err := crypto.Decrypt(k, nil, ciphertext) + plaintext := make([]byte, len(ciphertext)) + n, err := crypto.Decrypt(k, plaintext, ciphertext) OK(t, err) + plaintext = plaintext[:n] Assert(t, bytes.Equal(plaintext, data), "wrong plaintext returned") } @@ -78,8 +82,9 @@ func TestSameBuffer(t *testing.T) { OK(t, err) // use the same buffer for decryption - ciphertext, err = crypto.Decrypt(k, ciphertext, ciphertext) + n, err := crypto.Decrypt(k, ciphertext, ciphertext) OK(t, err) + ciphertext = ciphertext[:n] Assert(t, bytes.Equal(ciphertext, data), "wrong plaintext returned") } @@ -97,9 +102,9 @@ func TestCornerCases(t *testing.T) { len(c)) // this should decrypt to nil - p, err := crypto.Decrypt(k, nil, c) + n, err := crypto.Decrypt(k, nil, c) OK(t, err) - Equals(t, []byte(nil), p) + Equals(t, 0, n) // test encryption for same slice, this should return an error _, err = crypto.Encrypt(k, c, c) @@ -160,7 +165,7 @@ func BenchmarkDecrypt(b *testing.B) { b.SetBytes(int64(size)) for i := 0; i < b.N; i++ { - plaintext, err = crypto.Decrypt(k, plaintext, ciphertext) + _, err = crypto.Decrypt(k, plaintext, ciphertext) OK(b, err) } } diff --git a/src/restic/fuse/dir.go b/src/restic/fuse/dir.go index 14f8c7f21..004d02086 100644 --- a/src/restic/fuse/dir.go +++ b/src/restic/fuse/dir.go @@ -12,7 +12,6 @@ import ( "restic" "restic/debug" - "restic/repository" ) // Statically ensure that *dir implement those interface @@ -20,14 +19,14 @@ var _ = fs.HandleReadDirAller(&dir{}) var _ = fs.NodeStringLookuper(&dir{}) type dir struct { - repo *repository.Repository + repo restic.Repository items map[string]*restic.Node inode uint64 node *restic.Node ownerIsRoot bool } -func newDir(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (*dir, error) { +func newDir(repo restic.Repository, node *restic.Node, ownerIsRoot bool) (*dir, error) { debug.Log("newDir", "new dir for %v (%v)", node.Name, node.Subtree.Str()) tree, err := repo.LoadTree(*node.Subtree) if err != nil { @@ -50,7 +49,7 @@ func newDir(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (* // replaceSpecialNodes replaces nodes with name "." and "/" by their contents. // Otherwise, the node is returned. -func replaceSpecialNodes(repo *repository.Repository, node *restic.Node) ([]*restic.Node, error) { +func replaceSpecialNodes(repo restic.Repository, node *restic.Node) ([]*restic.Node, error) { if node.Type != "dir" || node.Subtree == nil { return []*restic.Node{node}, nil } @@ -67,7 +66,7 @@ func replaceSpecialNodes(repo *repository.Repository, node *restic.Node) ([]*res return tree.Nodes, nil } -func newDirFromSnapshot(repo *repository.Repository, snapshot SnapshotWithId, ownerIsRoot bool) (*dir, error) { +func newDirFromSnapshot(repo restic.Repository, snapshot SnapshotWithId, ownerIsRoot bool) (*dir, error) { debug.Log("newDirFromSnapshot", "new dir for snapshot %v (%v)", snapshot.ID.Str(), snapshot.Tree.Str()) tree, err := repo.LoadTree(*snapshot.Tree) if err != nil { diff --git a/src/restic/fuse/file.go b/src/restic/fuse/file.go index d5949ee86..ae1b90124 100644 --- a/src/restic/fuse/file.go +++ b/src/restic/fuse/file.go @@ -27,7 +27,7 @@ var _ = fs.HandleReleaser(&file{}) // for fuse operations. type BlobLoader interface { LookupBlobSize(restic.ID, restic.BlobType) (uint, error) - LoadBlob(restic.ID, restic.BlobType, []byte) ([]byte, error) + LoadDataBlob(restic.ID, []byte) (int, error) } type file struct { @@ -109,12 +109,12 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) { buf = make([]byte, f.sizes[i]) } - blob, err = f.repo.LoadBlob(f.node.Content[i], restic.DataBlob, buf) + n, err := f.repo.LoadDataBlob(f.node.Content[i], buf) if err != nil { debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err) return nil, err } - f.blobs[i] = blob + f.blobs[i] = buf[:n] return blob, nil } diff --git a/src/restic/fuse/file_test.go b/src/restic/fuse/file_test.go index 58e6b33ba..0101cadc9 100644 --- a/src/restic/fuse/file_test.go +++ b/src/restic/fuse/file_test.go @@ -34,19 +34,19 @@ func (m *MockRepo) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, error) return uint(len(buf)), nil } -func (m *MockRepo) LoadBlob(id restic.ID, t restic.BlobType, buf []byte) ([]byte, error) { - size, err := m.LookupBlobSize(id, t) +func (m *MockRepo) LoadDataBlob(id restic.ID, buf []byte) (int, error) { + size, err := m.LookupBlobSize(id, restic.DataBlob) if err != nil { - return nil, err + return 0, err } if uint(cap(buf)) < size { - return nil, errors.New("buffer too small") + return 0, errors.New("buffer too small") } buf = buf[:size] copy(buf, m.blobs[id]) - return buf, nil + return int(size), nil } type MockContext struct{} diff --git a/src/restic/fuse/link.go b/src/restic/fuse/link.go index 732446a7a..43fb35020 100644 --- a/src/restic/fuse/link.go +++ b/src/restic/fuse/link.go @@ -5,7 +5,6 @@ package fuse import ( "restic" - "restic/repository" "bazil.org/fuse" "bazil.org/fuse/fs" @@ -20,7 +19,7 @@ type link struct { ownerIsRoot bool } -func newLink(repo *repository.Repository, node *restic.Node, ownerIsRoot bool) (*link, error) { +func newLink(repo restic.Repository, node *restic.Node, ownerIsRoot bool) (*link, error) { return &link{node: node, ownerIsRoot: ownerIsRoot}, nil } diff --git a/src/restic/fuse/snapshot.go b/src/restic/fuse/snapshot.go index 8d14823b0..b97e3ced9 100644 --- a/src/restic/fuse/snapshot.go +++ b/src/restic/fuse/snapshot.go @@ -13,7 +13,6 @@ import ( "restic" "restic/debug" - "restic/repository" "golang.org/x/net/context" ) @@ -30,7 +29,7 @@ var _ = fs.HandleReadDirAller(&SnapshotsDir{}) var _ = fs.NodeStringLookuper(&SnapshotsDir{}) type SnapshotsDir struct { - repo *repository.Repository + repo restic.Repository ownerIsRoot bool // knownSnapshots maps snapshot timestamp to the snapshot @@ -38,7 +37,7 @@ type SnapshotsDir struct { knownSnapshots map[string]SnapshotWithId } -func NewSnapshotsDir(repo *repository.Repository, ownerIsRoot bool) *SnapshotsDir { +func NewSnapshotsDir(repo restic.Repository, ownerIsRoot bool) *SnapshotsDir { debug.Log("NewSnapshotsDir", "fuse mount initiated") return &SnapshotsDir{ repo: repo, diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index 521d0c0b0..f1378531f 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -15,7 +15,7 @@ var ( depth = 3 ) -func createFilledRepo(t testing.TB, snapshots int, dup float32) (*repository.Repository, func()) { +func createFilledRepo(t testing.TB, snapshots int, dup float32) (restic.Repository, func()) { repo, cleanup := repository.TestRepository(t) for i := 0; i < 3; i++ { @@ -25,7 +25,7 @@ func createFilledRepo(t testing.TB, snapshots int, dup float32) (*repository.Rep return repo, cleanup } -func validateIndex(t testing.TB, repo *repository.Repository, idx *Index) { +func validateIndex(t testing.TB, repo restic.Repository, idx *Index) { for id := range repo.List(restic.DataFile, nil) { if _, ok := idx.Packs[id]; !ok { t.Errorf("pack %v missing from index", id.Str()) @@ -162,7 +162,7 @@ func TestIndexDuplicateBlobs(t *testing.T) { t.Logf("%d packs with duplicate blobs", len(packs)) } -func loadIndex(t testing.TB, repo *repository.Repository) *Index { +func loadIndex(t testing.TB, repo restic.Repository) *Index { idx, err := Load(repo, nil) if err != nil { t.Fatalf("Load() returned error %v", err) diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index 40d10839b..17f79b09a 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -225,12 +225,13 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err return nil, err } - hdr, err := crypto.Decrypt(k, buf, buf) + n, err := crypto.Decrypt(k, buf, buf) if err != nil { return nil, err } + buf = buf[:n] - hdrRd := bytes.NewReader(hdr) + hdrRd := bytes.NewReader(buf) pos := uint(0) for { diff --git a/src/restic/repository.go b/src/restic/repository.go index 5c1067f7b..c7e8ae170 100644 --- a/src/restic/repository.go +++ b/src/restic/repository.go @@ -16,6 +16,7 @@ type Repository interface { Index() Index SaveFullIndex() error SaveIndex() error + LoadIndex() error Config() Config diff --git a/src/restic/repository/key.go b/src/restic/repository/key.go index b874dc644..9a0a85c21 100644 --- a/src/restic/repository/key.go +++ b/src/restic/repository/key.go @@ -85,10 +85,12 @@ func OpenKey(s *Repository, name string, password string) (*Key, error) { } // decrypt master keys - buf, err := crypto.Decrypt(k.user, []byte{}, k.Data) + buf := make([]byte, len(k.Data)) + n, err := crypto.Decrypt(k.user, buf, k.Data) if err != nil { return nil, err } + buf = buf[:n] // restore json k.master = &crypto.Key{} diff --git a/src/restic/repository/repack.go b/src/restic/repository/repack.go index 274f2d320..2ce701ad9 100644 --- a/src/restic/repository/repack.go +++ b/src/restic/repository/repack.go @@ -15,7 +15,7 @@ import ( // these packs. Each pack is loaded and the blobs listed in keepBlobs is saved // into a new pack. Afterwards, the packs are removed. This operation requires // an exclusive lock on the repo. -func Repack(repo *Repository, packs restic.IDSet, keepBlobs restic.BlobSet) (err error) { +func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet) (err error) { debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs)) buf := make([]byte, 0, maxPackSize) @@ -48,16 +48,21 @@ func Repack(repo *Repository, packs restic.IDSet, keepBlobs restic.BlobSet) (err continue } - ciphertext := buf[entry.Offset : entry.Offset+entry.Length] + debug.Log("Repack", " process blob %v", h) - if cap(plaintext) < len(ciphertext) { + ciphertext := buf[entry.Offset : entry.Offset+entry.Length] + plaintext = plaintext[:len(plaintext)] + if len(plaintext) < len(ciphertext) { plaintext = make([]byte, len(ciphertext)) } - plaintext, err = crypto.Decrypt(repo.Key(), plaintext, ciphertext) + debug.Log("Repack", " ciphertext %d, plaintext %d", len(plaintext), len(ciphertext)) + + n, err := crypto.Decrypt(repo.Key(), plaintext, ciphertext) if err != nil { return err } + plaintext = plaintext[:n] _, err = repo.SaveAndEncrypt(entry.Type, plaintext, &entry.ID) if err != nil { diff --git a/src/restic/repository/repack_test.go b/src/restic/repository/repack_test.go index 026e43cbc..9b118d7b4 100644 --- a/src/restic/repository/repack_test.go +++ b/src/restic/repository/repack_test.go @@ -23,7 +23,7 @@ func random(t testing.TB, length int) []byte { return buf } -func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pData float32) { +func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32) { for i := 0; i < blobs; i++ { var ( tpe restic.BlobType @@ -65,7 +65,7 @@ func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pDa // selectBlobs splits the list of all blobs randomly into two lists. A blob // will be contained in the firstone ith probability p. -func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, list2 restic.BlobSet) { +func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 restic.BlobSet) { done := make(chan struct{}) defer close(done) @@ -100,7 +100,7 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l return list1, list2 } -func listPacks(t *testing.T, repo *repository.Repository) restic.IDSet { +func listPacks(t *testing.T, repo restic.Repository) restic.IDSet { done := make(chan struct{}) defer close(done) @@ -112,7 +112,7 @@ func listPacks(t *testing.T, repo *repository.Repository) restic.IDSet { return list } -func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs restic.BlobSet) restic.IDSet { +func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSet) restic.IDSet { packs := restic.NewIDSet() idx := repo.Index() @@ -130,26 +130,26 @@ func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs restic.B return packs } -func repack(t *testing.T, repo *repository.Repository, packs restic.IDSet, blobs restic.BlobSet) { +func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs restic.BlobSet) { err := repository.Repack(repo, packs, blobs) if err != nil { t.Fatal(err) } } -func saveIndex(t *testing.T, repo *repository.Repository) { +func saveIndex(t *testing.T, repo restic.Repository) { if err := repo.SaveIndex(); err != nil { t.Fatalf("repo.SaveIndex() %v", err) } } -func rebuildIndex(t *testing.T, repo *repository.Repository) { +func rebuildIndex(t *testing.T, repo restic.Repository) { if err := repository.RebuildIndex(repo); err != nil { t.Fatalf("error rebuilding index: %v", err) } } -func reloadIndex(t *testing.T, repo *repository.Repository) { +func reloadIndex(t *testing.T, repo restic.Repository) { repo.SetIndex(repository.NewMasterIndex()) if err := repo.LoadIndex(); err != nil { t.Fatalf("error loading new index: %v", err) diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index 87025425d..e0d3659c2 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -72,20 +72,22 @@ func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, er return nil, errors.New("invalid data returned") } + plain := make([]byte, len(buf)) + // decrypt - plain, err := r.Decrypt(buf) + n, err := r.decryptTo(plain, buf) if err != nil { return nil, err } - return plain, nil + return plain[:n], nil } // LoadBlob tries to load and decrypt content identified by t and id from a // pack from the backend, the result is stored in plaintextBuf, which must be // large enough to hold the complete blob. func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []byte) ([]byte, error) { - debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str()) + debug.Log("Repo.LoadBlob", "load %v with id %v (buf %d)", t, id.Str(), len(plaintextBuf)) // lookup plaintext size of blob size, err := r.idx.LookupSize(id, t) @@ -94,11 +96,8 @@ func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by } // make sure the plaintext buffer is large enough, extend otherwise - plaintextBufSize := uint(cap(plaintextBuf)) - if size > plaintextBufSize { - debug.Log("Repo.LoadBlob", "need to expand buffer: want %d bytes, got %d", - size, plaintextBufSize) - plaintextBuf = make([]byte, size) + if len(plaintextBuf) < int(size) { + return nil, errors.Errorf("buffer is too small: %d < %d", len(plaintextBuf), size) } // lookup packs @@ -134,11 +133,12 @@ func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by } // decrypt - plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf) + n, err = r.decryptTo(plaintextBuf, ciphertextBuf) if err != nil { lastError = errors.Errorf("decrypting blob %v failed: %v", id, err) continue } + plaintextBuf = plaintextBuf[:n] // check hash if !restic.Hash(plaintextBuf).Equal(id) { @@ -403,7 +403,7 @@ func (r *Repository) LoadIndex() error { } // LoadIndex loads the index id from backend and returns it. -func LoadIndex(repo *Repository, id restic.ID) (*Index, error) { +func LoadIndex(repo restic.Repository, id restic.ID) (*Index, error) { idx, err := LoadIndexWithDecoder(repo, id, DecodeIndex) if err == nil { return idx, nil @@ -467,19 +467,14 @@ func (r *Repository) init(password string, cfg restic.Config) error { return err } -// Decrypt authenticates and decrypts ciphertext and returns the plaintext. -func (r *Repository) Decrypt(ciphertext []byte) ([]byte, error) { - return r.decryptTo(nil, ciphertext) -} - // decrypt authenticates and decrypts ciphertext and stores the result in // plaintext. -func (r *Repository) decryptTo(plaintext, ciphertext []byte) ([]byte, error) { +func (r *Repository) decryptTo(plaintext, ciphertext []byte) (int, error) { if r.key == nil { - return nil, errors.New("key for repository not set") + return 0, errors.New("key for repository not set") } - return crypto.Decrypt(r.key, nil, ciphertext) + return crypto.Decrypt(r.key, plaintext, ciphertext) } // Encrypt encrypts and authenticates the plaintext and saves the result in @@ -502,15 +497,6 @@ func (r *Repository) KeyName() string { return r.keyName } -// Count returns the number of blobs of a given type in the backend. -func (r *Repository) Count(t restic.FileType) (n uint) { - for _ = range r.be.List(t, nil) { - n++ - } - - return -} - func (r *Repository) list(t restic.FileType, done <-chan struct{}, out chan<- restic.ID) { defer close(out) in := r.be.List(t, done) @@ -592,14 +578,17 @@ func (r *Repository) Close() error { // LoadTree loads a tree from the repository. func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) { + debug.Log("repo.LoadTree", "load tree %v", id.Str()) + size, err := r.idx.LookupSize(id, restic.TreeBlob) if err != nil { return nil, err } + debug.Log("repo.LoadTree", "size is %d, create buffer", size) buf := make([]byte, size) - buf, err = r.LoadBlob(id, restic.TreeBlob, nil) + buf, err = r.LoadBlob(id, restic.TreeBlob, buf) if err != nil { return nil, err } @@ -615,6 +604,7 @@ func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) { // LoadDataBlob loads a data blob from the repository to the buffer. func (r *Repository) LoadDataBlob(id restic.ID, buf []byte) (int, error) { + debug.Log("repo.LoadDataBlob", "load blob %v into buf %p", id.Str(), buf) size, err := r.idx.LookupSize(id, restic.DataBlob) if err != nil { return 0, err @@ -629,5 +619,7 @@ func (r *Repository) LoadDataBlob(id restic.ID, buf []byte) (int, error) { return 0, err } + debug.Log("repo.LoadDataBlob", "loaded %d bytes into buf %p", len(buf), buf) + return len(buf), err } diff --git a/src/restic/repository/repository_test.go b/src/restic/repository/repository_test.go index 98b8edd84..644650c30 100644 --- a/src/restic/repository/repository_test.go +++ b/src/restic/repository/repository_test.go @@ -90,8 +90,10 @@ func TestSave(t *testing.T) { // OK(t, repo.SaveIndex()) // read back - buf, err := repo.LoadBlob(id, restic.DataBlob, make([]byte, size)) + buf := make([]byte, size) + n, err := repo.LoadDataBlob(id, buf) OK(t, err) + Equals(t, len(buf), n) Assert(t, len(buf) == len(data), "number of bytes read back does not match: expected %d, got %d", @@ -122,8 +124,10 @@ func TestSaveFrom(t *testing.T) { OK(t, repo.Flush()) // read back - buf, err := repo.LoadBlob(id, restic.DataBlob, make([]byte, size)) + buf := make([]byte, size) + n, err := repo.LoadDataBlob(id, buf) OK(t, err) + Equals(t, len(buf), n) Assert(t, len(buf) == len(data), "number of bytes read back does not match: expected %d, got %d", @@ -157,7 +161,7 @@ func BenchmarkSaveAndEncrypt(t *testing.B) { } } -func TestLoadJSONPack(t *testing.T) { +func TestLoadTree(t *testing.T) { repo := SetupRepo() defer TeardownRepo(repo) @@ -169,12 +173,11 @@ func TestLoadJSONPack(t *testing.T) { sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil) OK(t, repo.Flush()) - tree := restic.NewTree() - err := repo.LoadJSONPack(restic.TreeBlob, *sn.Tree, &tree) + _, err := repo.LoadTree(*sn.Tree) OK(t, err) } -func BenchmarkLoadJSONPack(t *testing.B) { +func BenchmarkLoadTree(t *testing.B) { repo := SetupRepo() defer TeardownRepo(repo) @@ -186,12 +189,10 @@ func BenchmarkLoadJSONPack(t *testing.B) { sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil) OK(t, repo.Flush()) - tree := restic.NewTree() - t.ResetTimer() for i := 0; i < t.N; i++ { - err := repo.LoadJSONPack(restic.TreeBlob, *sn.Tree, &tree) + _, err := repo.LoadTree(*sn.Tree) OK(t, err) } } @@ -244,7 +245,7 @@ func BenchmarkLoadIndex(b *testing.B) { } // saveRandomDataBlobs generates random data blobs and saves them to the repository. -func saveRandomDataBlobs(t testing.TB, repo *repository.Repository, num int, sizeMax int) { +func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax int) { for i := 0; i < num; i++ { size := mrand.Int() % sizeMax diff --git a/src/restic/test/backend.go b/src/restic/test/backend.go index d73f0d4fe..e8710b8c8 100644 --- a/src/restic/test/backend.go +++ b/src/restic/test/backend.go @@ -49,7 +49,7 @@ func getBoolVar(name string, defaultValue bool) bool { return defaultValue } -func SetupRepo() *repository.Repository { +func SetupRepo() restic.Repository { tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") if err != nil { panic(err) @@ -70,27 +70,29 @@ func SetupRepo() *repository.Repository { return repo } -func TeardownRepo(repo *repository.Repository) { +func TeardownRepo(repo restic.Repository) { if !TestCleanupTempDirs { l := repo.Backend().(*local.Local) fmt.Printf("leaving local backend at %s\n", l.Location()) return } - err := repo.Delete() - if err != nil { - panic(err) + if r, ok := repo.(restic.Deleter); ok { + err := r.Delete() + if err != nil { + panic(err) + } } } -func SnapshotDir(t testing.TB, repo *repository.Repository, path string, parent *restic.ID) *restic.Snapshot { +func SnapshotDir(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot { arch := archiver.New(repo) sn, _, err := arch.Snapshot(nil, []string{path}, parent) OK(t, err) return sn } -func WithRepo(t testing.TB, f func(*repository.Repository)) { +func WithRepo(t testing.TB, f func(restic.Repository)) { repo := SetupRepo() f(repo) TeardownRepo(repo) diff --git a/src/restic/test/helpers.go b/src/restic/test/helpers.go index 6c7ee8de1..2fbdb83d6 100644 --- a/src/restic/test/helpers.go +++ b/src/restic/test/helpers.go @@ -34,7 +34,7 @@ func Assert(tb testing.TB, condition bool, msg string, v ...interface{}) { func OK(tb testing.TB, err error) { if err != nil { _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) + fmt.Printf("\033[31m%s:%d: unexpected error: %+v\033[39m\n\n", filepath.Base(file), line, err) tb.FailNow() } } @@ -209,7 +209,7 @@ func WithTestEnvironment(t testing.TB, repoFixture string, f func(repodir string } // OpenLocalRepo opens the local repository located at dir. -func OpenLocalRepo(t testing.TB, dir string) *repository.Repository { +func OpenLocalRepo(t testing.TB, dir string) restic.Repository { be, err := local.Open(dir) OK(t, err) diff --git a/src/restic/testing.go b/src/restic/testing.go index e4fe6ddb3..9b6b627c8 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -8,8 +8,9 @@ import ( "testing" "time" - "github.com/restic/chunker" "restic/errors" + + "github.com/restic/chunker" ) // fakeFile returns a reader which yields deterministic pseudo-random data. diff --git a/src/restic/walk/walk_test.go b/src/restic/walk/walk_test.go index 221f5df9c..681c2f1e6 100644 --- a/src/restic/walk/walk_test.go +++ b/src/restic/walk/walk_test.go @@ -10,7 +10,6 @@ import ( "restic" "restic/archiver" "restic/pipe" - "restic/repository" . "restic/test" "restic/walk" ) @@ -91,7 +90,7 @@ func TestWalkTree(t *testing.T) { } type delayRepo struct { - repo *repository.Repository + repo restic.Repository delay time.Duration } From 2054e3c026b401e8d90ed3c9ac2d41b3f44f59fb Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 3 Sep 2016 14:03:43 +0200 Subject: [PATCH 26/40] Fix tests --- src/restic/archiver/archive_reader_test.go | 21 +++++++++++++-------- src/restic/fuse/file_test.go | 16 ++++++++-------- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/src/restic/archiver/archive_reader_test.go b/src/restic/archiver/archive_reader_test.go index e7e88d6cd..f9417cb51 100644 --- a/src/restic/archiver/archive_reader_test.go +++ b/src/restic/archiver/archive_reader_test.go @@ -7,17 +7,15 @@ import ( "restic" "restic/repository" "testing" - - "github.com/restic/chunker" ) -func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) []byte { +func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) int { n, err := repo.LoadDataBlob(id, buf) if err != nil { t.Fatalf("LoadBlob(%v) returned error %v", id, err) } - return buf[:n] + return n } func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name string, rd io.Reader) { @@ -40,12 +38,19 @@ func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name } // check blobs - buf := make([]byte, chunker.MaxSize) - buf2 := make([]byte, chunker.MaxSize) for i, id := range node.Content { - buf = loadBlob(t, repo, id, buf) + size, err := repo.LookupBlobSize(id, restic.DataBlob) + if err != nil { + t.Fatal(err) + } - buf2 = buf2[:len(buf)] + buf := make([]byte, int(size)) + n := loadBlob(t, repo, id, buf) + if n != len(buf) { + t.Errorf("wrong number of bytes read, want %d, got %d", len(buf), n) + } + + buf2 := make([]byte, int(size)) _, err = io.ReadFull(rd, buf2) if err != nil { t.Fatal(err) diff --git a/src/restic/fuse/file_test.go b/src/restic/fuse/file_test.go index 0101cadc9..1013a07b5 100644 --- a/src/restic/fuse/file_test.go +++ b/src/restic/fuse/file_test.go @@ -40,7 +40,7 @@ func (m *MockRepo) LoadDataBlob(id restic.ID, buf []byte) (int, error) { return 0, err } - if uint(cap(buf)) < size { + if uint(len(buf)) < size { return 0, errors.New("buffer too small") } @@ -81,7 +81,7 @@ func genTestContent() map[restic.ID][]byte { const maxBufSize = 20 * 1024 * 1024 -func testRead(t *testing.T, f *file, offset, length int, data []byte) []byte { +func testRead(t *testing.T, f *file, offset, length int, data []byte) { ctx := MockContext{} req := &fuse.ReadRequest{ @@ -92,8 +92,6 @@ func testRead(t *testing.T, f *file, offset, length int, data []byte) []byte { Data: make([]byte, length), } OK(t, f.Read(ctx, req, resp)) - - return resp.Data } var offsetReadsTests = []struct { @@ -135,8 +133,9 @@ func TestFuseFile(t *testing.T) { for i, test := range offsetReadsTests { b := memfile[test.offset : test.offset+test.length] - res := testRead(t, f, test.offset, test.length, b) - if !bytes.Equal(b, res) { + buf := make([]byte, test.length) + testRead(t, f, test.offset, test.length, buf) + if !bytes.Equal(b, buf) { t.Errorf("test %d failed, wrong data returned", i) } } @@ -150,8 +149,9 @@ func TestFuseFile(t *testing.T) { } b := memfile[offset : offset+length] - res := testRead(t, f, offset, length, b) - if !bytes.Equal(b, res) { + buf := make([]byte, length) + testRead(t, f, offset, length, buf) + if !bytes.Equal(b, buf) { t.Errorf("test %d failed (offset %d, length %d), wrong data returned", i, offset, length) } } From 5170c4898a4bc55e9fe85c148db9d2bdc14fe5b6 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 3 Sep 2016 14:13:26 +0200 Subject: [PATCH 27/40] Address hound comments --- src/restic/fuse/dir.go | 2 +- src/restic/fuse/fuse.go | 2 +- src/restic/fuse/snapshot.go | 2 +- src/restic/node.go | 2 ++ src/restic/test/backend.go | 1 + 5 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/restic/fuse/dir.go b/src/restic/fuse/dir.go index 004d02086..de970d526 100644 --- a/src/restic/fuse/dir.go +++ b/src/restic/fuse/dir.go @@ -97,7 +97,7 @@ func newDirFromSnapshot(repo restic.Repository, snapshot SnapshotWithId, ownerIs Mode: os.ModeDir | 0555, }, items: items, - inode: inodeFromBackendId(snapshot.ID), + inode: inodeFromBackendID(snapshot.ID), ownerIsRoot: ownerIsRoot, }, nil } diff --git a/src/restic/fuse/fuse.go b/src/restic/fuse/fuse.go index 45bf3342d..e8e45c445 100644 --- a/src/restic/fuse/fuse.go +++ b/src/restic/fuse/fuse.go @@ -11,6 +11,6 @@ import ( // inodeFromBackendId returns a unique uint64 from a backend id. // Endianness has no specific meaning, it is just the simplest way to // transform a []byte to an uint64 -func inodeFromBackendId(id restic.ID) uint64 { +func inodeFromBackendID(id restic.ID) uint64 { return binary.BigEndian.Uint64(id[:8]) } diff --git a/src/restic/fuse/snapshot.go b/src/restic/fuse/snapshot.go index b97e3ced9..e318a79dd 100644 --- a/src/restic/fuse/snapshot.go +++ b/src/restic/fuse/snapshot.go @@ -94,7 +94,7 @@ func (sn *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { ret := make([]fuse.Dirent, 0) for _, snapshot := range sn.knownSnapshots { ret = append(ret, fuse.Dirent{ - Inode: inodeFromBackendId(snapshot.ID), + Inode: inodeFromBackendID(snapshot.ID), Type: fuse.DT_Dir, Name: snapshot.Time.Format(time.RFC3339), }) diff --git a/src/restic/node.go b/src/restic/node.go index c4cff8ca5..bc80835bf 100644 --- a/src/restic/node.go +++ b/src/restic/node.go @@ -59,6 +59,7 @@ func (node Node) String() string { return fmt.Sprintf("", node.Type, node.Name) } +// Tree returns this node's tree object. func (node Node) Tree() *Tree { return node.tree } @@ -371,6 +372,7 @@ func (node Node) sameContent(other Node) bool { return true } +// IsNewer returns true of the file has been updated since the last Stat(). func (node *Node) IsNewer(path string, fi os.FileInfo) bool { if node.Type != "file" { debug.Log("node.IsNewer", "node %v is newer: not file", path) diff --git a/src/restic/test/backend.go b/src/restic/test/backend.go index e8710b8c8..9ed9a2efe 100644 --- a/src/restic/test/backend.go +++ b/src/restic/test/backend.go @@ -85,6 +85,7 @@ func TeardownRepo(repo restic.Repository) { } } +// SnapshotDir creates a new snapshot of path. func SnapshotDir(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot { arch := archiver.New(repo) sn, _, err := arch.Snapshot(nil, []string{path}, parent) From 878c1cd9361baa14b6a0ae8e536b7a286f0cf019 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 3 Sep 2016 14:15:20 +0200 Subject: [PATCH 28/40] Add more comments --- src/restic/fuse/snapshot.go | 1 + src/restic/test/backend.go | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/restic/fuse/snapshot.go b/src/restic/fuse/snapshot.go index e318a79dd..d71adbc79 100644 --- a/src/restic/fuse/snapshot.go +++ b/src/restic/fuse/snapshot.go @@ -37,6 +37,7 @@ type SnapshotsDir struct { knownSnapshots map[string]SnapshotWithId } +// NewSnapshotsDir returns a new dir object for the snapshots. func NewSnapshotsDir(repo restic.Repository, ownerIsRoot bool) *SnapshotsDir { debug.Log("NewSnapshotsDir", "fuse mount initiated") return &SnapshotsDir{ diff --git a/src/restic/test/backend.go b/src/restic/test/backend.go index 9ed9a2efe..66567ab79 100644 --- a/src/restic/test/backend.go +++ b/src/restic/test/backend.go @@ -49,6 +49,7 @@ func getBoolVar(name string, defaultValue bool) bool { return defaultValue } +// SetupRepo returns a repo setup in a temp dir. func SetupRepo() restic.Repository { tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") if err != nil { @@ -70,6 +71,7 @@ func SetupRepo() restic.Repository { return repo } +// TeardownRepo removes a repository created by SetupRepo. func TeardownRepo(repo restic.Repository) { if !TestCleanupTempDirs { l := repo.Backend().(*local.Local) @@ -93,6 +95,7 @@ func SnapshotDir(t testing.TB, repo restic.Repository, path string, parent *rest return sn } +// WithRepo runs the function t with a repository that is removed after f returns. func WithRepo(t testing.TB, f func(restic.Repository)) { repo := SetupRepo() f(repo) From 1cc59010f508a63ebea08585e2f7d5863eee7462 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 3 Sep 2016 14:23:49 +0200 Subject: [PATCH 29/40] Remove LoadJSONPack, un-export loadBlob --- src/cmds/restic/cmd_cat.go | 8 ++--- src/restic/repository/repository.go | 45 ++++++++++++----------------- 2 files changed, 22 insertions(+), 31 deletions(-) diff --git a/src/cmds/restic/cmd_cat.go b/src/cmds/restic/cmd_cat.go index 51fb2f69f..d5bae8b06 100644 --- a/src/cmds/restic/cmd_cat.go +++ b/src/cmds/restic/cmd_cat.go @@ -173,12 +173,13 @@ func (cmd CmdCat) Execute(args []string) error { blob := list[0] buf := make([]byte, blob.Length) - data, err := repo.LoadBlob(id, t, buf) + n, err := repo.LoadDataBlob(id, buf) if err != nil { return err } + buf = buf[:n] - _, err = os.Stdout.Write(data) + _, err = os.Stdout.Write(buf) return err } @@ -186,8 +187,7 @@ func (cmd CmdCat) Execute(args []string) error { case "tree": debug.Log("cat", "cat tree %v", id.Str()) - tree := restic.NewTree() - err = repo.LoadJSONPack(restic.TreeBlob, id, tree) + tree, err := repo.LoadTree(id) if err != nil { debug.Log("cat", "unable to load tree %v: %v", id.Str(), err) return err diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index e0d3659c2..5a6ee64f4 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -83,36 +83,36 @@ func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, er return plain[:n], nil } -// LoadBlob tries to load and decrypt content identified by t and id from a +// loadBlob tries to load and decrypt content identified by t and id from a // pack from the backend, the result is stored in plaintextBuf, which must be // large enough to hold the complete blob. -func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []byte) ([]byte, error) { - debug.Log("Repo.LoadBlob", "load %v with id %v (buf %d)", t, id.Str(), len(plaintextBuf)) +func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []byte) (int, error) { + debug.Log("Repo.loadBlob", "load %v with id %v (buf %d)", t, id.Str(), len(plaintextBuf)) // lookup plaintext size of blob size, err := r.idx.LookupSize(id, t) if err != nil { - return nil, err + return 0, err } // make sure the plaintext buffer is large enough, extend otherwise if len(plaintextBuf) < int(size) { - return nil, errors.Errorf("buffer is too small: %d < %d", len(plaintextBuf), size) + return 0, errors.Errorf("buffer is too small: %d < %d", len(plaintextBuf), size) } // lookup packs blobs, err := r.idx.Lookup(id, t) if err != nil { - debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err) - return nil, err + debug.Log("Repo.loadBlob", "id %v not found in index: %v", id.Str(), err) + return 0, err } var lastError error for _, blob := range blobs { - debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob) + debug.Log("Repo.loadBlob", "id %v found: %v", id.Str(), blob) if blob.Type != t { - debug.Log("Repo.LoadBlob", "blob %v has wrong block type, want %v", blob, t) + debug.Log("Repo.loadBlob", "blob %v has wrong block type, want %v", blob, t) } // load blob from pack @@ -120,7 +120,7 @@ func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by ciphertextBuf := make([]byte, blob.Length) n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset)) if err != nil { - debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err) + debug.Log("Repo.loadBlob", "error loading blob %v: %v", blob, err) lastError = err continue } @@ -128,7 +128,7 @@ func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by if uint(n) != blob.Length { lastError = errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d", id.Str(), blob.Length, uint(n)) - debug.Log("Repo.LoadBlob", "lastError: %v", lastError) + debug.Log("Repo.loadBlob", "lastError: %v", lastError) continue } @@ -146,14 +146,14 @@ func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by continue } - return plaintextBuf, nil + return len(plaintextBuf), nil } if lastError != nil { - return nil, lastError + return 0, lastError } - return nil, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs)) + return 0, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs)) } // closeOrErr calls cl.Close() and sets err to the returned error value if @@ -177,17 +177,6 @@ func (r *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item inte return json.Unmarshal(buf, item) } -// LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the -// data and afterwards call json.Unmarshal on the item. -func (r *Repository) LoadJSONPack(t restic.BlobType, id restic.ID, item interface{}) (err error) { - buf, err := r.LoadBlob(id, t, nil) - if err != nil { - return err - } - - return json.Unmarshal(buf, item) -} - // LookupBlobSize returns the size of blob id. func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, error) { return r.idx.LookupSize(id, tpe) @@ -588,10 +577,11 @@ func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) { debug.Log("repo.LoadTree", "size is %d, create buffer", size) buf := make([]byte, size) - buf, err = r.LoadBlob(id, restic.TreeBlob, buf) + n, err := r.loadBlob(id, restic.TreeBlob, buf) if err != nil { return nil, err } + buf = buf[:n] t := &restic.Tree{} err = json.Unmarshal(buf, t) @@ -614,10 +604,11 @@ func (r *Repository) LoadDataBlob(id restic.ID, buf []byte) (int, error) { return 0, errors.Errorf("buffer is too small for data blob (%d < %d)", len(buf), size) } - buf, err = r.LoadBlob(id, restic.DataBlob, buf) + n, err := r.loadBlob(id, restic.DataBlob, buf) if err != nil { return 0, err } + buf = buf[:n] debug.Log("repo.LoadDataBlob", "loaded %d bytes into buf %p", len(buf), buf) From fe8c12c798e8502c098d838b859980c3dc7cbf65 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 3 Sep 2016 20:11:10 +0200 Subject: [PATCH 30/40] Replace repolitoy.SaveAndEncrypt to SaveBlob() --- src/restic/archiver/archive_reader.go | 5 +++-- src/restic/archiver/archiver.go | 2 +- src/restic/repository.go | 3 ++- src/restic/repository/repack.go | 2 +- src/restic/repository/repack_test.go | 2 +- src/restic/repository/repository.go | 10 ++++++++++ src/restic/repository/repository_test.go | 8 ++++---- src/restic/testing.go | 2 +- 8 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/restic/archiver/archive_reader.go b/src/restic/archiver/archive_reader.go index 2a184ee4b..1868ee0c1 100644 --- a/src/restic/archiver/archive_reader.go +++ b/src/restic/archiver/archive_reader.go @@ -7,8 +7,9 @@ import ( "restic/debug" "time" - "github.com/restic/chunker" "restic/errors" + + "github.com/restic/chunker" ) // saveTreeJSON stores a tree in the repository. @@ -58,7 +59,7 @@ func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, nam id := restic.Hash(chunk.Data) if !repo.Index().Has(id, restic.DataBlob) { - _, err := repo.SaveAndEncrypt(restic.DataBlob, chunk.Data, nil) + _, err := repo.SaveBlob(restic.DataBlob, chunk.Data, id) if err != nil { return nil, restic.ID{}, err } diff --git a/src/restic/archiver/archiver.go b/src/restic/archiver/archiver.go index 30decd1d9..6decc5fd5 100644 --- a/src/restic/archiver/archiver.go +++ b/src/restic/archiver/archiver.go @@ -98,7 +98,7 @@ func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error { return nil } - _, err := arch.repo.SaveAndEncrypt(t, data, &id) + _, err := arch.repo.SaveBlob(t, data, id) if err != nil { debug.Log("Archiver.Save", "Save(%v, %v): error %v\n", t, id.Str(), err) return err diff --git a/src/restic/repository.go b/src/restic/repository.go index c7e8ae170..bf8d31453 100644 --- a/src/restic/repository.go +++ b/src/restic/repository.go @@ -29,7 +29,6 @@ type Repository interface { SaveJSON(BlobType, interface{}) (ID, error) SaveUnpacked(FileType, []byte) (ID, error) - SaveAndEncrypt(BlobType, []byte, *ID) (ID, error) SaveJSONUnpacked(FileType, interface{}) (ID, error) LoadJSONUnpacked(FileType, ID, interface{}) error @@ -37,6 +36,8 @@ type Repository interface { LoadTree(id ID) (*Tree, error) LoadDataBlob(id ID, buf []byte) (int, error) + + SaveBlob(BlobType, []byte, ID) (ID, error) } // Deleter removes all data stored in a backend/repo. diff --git a/src/restic/repository/repack.go b/src/restic/repository/repack.go index 2ce701ad9..edb717efb 100644 --- a/src/restic/repository/repack.go +++ b/src/restic/repository/repack.go @@ -64,7 +64,7 @@ func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet } plaintext = plaintext[:n] - _, err = repo.SaveAndEncrypt(entry.Type, plaintext, &entry.ID) + _, err = repo.SaveBlob(entry.Type, plaintext, entry.ID) if err != nil { return err } diff --git a/src/restic/repository/repack_test.go b/src/restic/repository/repack_test.go index 9b118d7b4..6d910c97b 100644 --- a/src/restic/repository/repack_test.go +++ b/src/restic/repository/repack_test.go @@ -46,7 +46,7 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl continue } - _, err := repo.SaveAndEncrypt(tpe, buf, &id) + _, err := repo.SaveBlob(tpe, buf, id) if err != nil { t.Fatalf("SaveFrom() error %v", err) } diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index 5a6ee64f4..55b9ea1be 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -614,3 +614,13 @@ func (r *Repository) LoadDataBlob(id restic.ID, buf []byte) (int, error) { return len(buf), err } + +// SaveBlob saves a blob of type t into the repository. If id is the null id, it +// will be computed and returned. +func (r *Repository) SaveBlob(t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { + var i *restic.ID + if !id.IsNull() { + i = &id + } + return r.SaveAndEncrypt(t, buf, i) +} diff --git a/src/restic/repository/repository_test.go b/src/restic/repository/repository_test.go index 644650c30..1b93dc8b0 100644 --- a/src/restic/repository/repository_test.go +++ b/src/restic/repository/repository_test.go @@ -81,7 +81,7 @@ func TestSave(t *testing.T) { id := restic.Hash(data) // save - sid, err := repo.SaveAndEncrypt(restic.DataBlob, data, nil) + sid, err := repo.SaveBlob(restic.DataBlob, data, restic.ID{}) OK(t, err) Equals(t, id, sid) @@ -117,7 +117,7 @@ func TestSaveFrom(t *testing.T) { id := restic.Hash(data) // save - id2, err := repo.SaveAndEncrypt(restic.DataBlob, data, &id) + id2, err := repo.SaveBlob(restic.DataBlob, data, id) OK(t, err) Equals(t, id, id2) @@ -156,7 +156,7 @@ func BenchmarkSaveAndEncrypt(t *testing.B) { for i := 0; i < t.N; i++ { // save - _, err = repo.SaveAndEncrypt(restic.DataBlob, data, &id) + _, err = repo.SaveBlob(restic.DataBlob, data, id) OK(t, err) } } @@ -253,7 +253,7 @@ func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax _, err := io.ReadFull(rand.Reader, buf) OK(t, err) - _, err = repo.SaveAndEncrypt(restic.DataBlob, buf, nil) + _, err = repo.SaveBlob(restic.DataBlob, buf, restic.ID{}) OK(t, err) } } diff --git a/src/restic/testing.go b/src/restic/testing.go index 9b6b627c8..85cab0d69 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -43,7 +43,7 @@ func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) { id := Hash(chunk.Data) if !fs.blobIsKnown(id, DataBlob) { - _, err := fs.repo.SaveAndEncrypt(DataBlob, chunk.Data, &id) + _, err := fs.repo.SaveBlob(DataBlob, chunk.Data, id) if err != nil { fs.t.Fatalf("error saving chunk: %v", err) } From 436332d5f2d47fada8f5877be79b8bed4685c019 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 3 Sep 2016 20:20:14 +0200 Subject: [PATCH 31/40] LoadDataBlob -> LoadBlob --- src/cmds/restic/cmd_cat.go | 2 +- src/restic/archiver/archive_reader_test.go | 2 +- src/restic/fuse/file.go | 4 ++-- src/restic/fuse/file_test.go | 4 ++-- src/restic/node.go | 2 +- src/restic/repository.go | 4 ++-- src/restic/repository/repository.go | 12 ++++++------ src/restic/repository/repository_test.go | 4 ++-- 8 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/cmds/restic/cmd_cat.go b/src/cmds/restic/cmd_cat.go index d5bae8b06..802257066 100644 --- a/src/cmds/restic/cmd_cat.go +++ b/src/cmds/restic/cmd_cat.go @@ -173,7 +173,7 @@ func (cmd CmdCat) Execute(args []string) error { blob := list[0] buf := make([]byte, blob.Length) - n, err := repo.LoadDataBlob(id, buf) + n, err := repo.LoadBlob(restic.DataBlob, id, buf) if err != nil { return err } diff --git a/src/restic/archiver/archive_reader_test.go b/src/restic/archiver/archive_reader_test.go index f9417cb51..86c8a4ca8 100644 --- a/src/restic/archiver/archive_reader_test.go +++ b/src/restic/archiver/archive_reader_test.go @@ -10,7 +10,7 @@ import ( ) func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) int { - n, err := repo.LoadDataBlob(id, buf) + n, err := repo.LoadBlob(restic.DataBlob, id, buf) if err != nil { t.Fatalf("LoadBlob(%v) returned error %v", id, err) } diff --git a/src/restic/fuse/file.go b/src/restic/fuse/file.go index ae1b90124..6590b2635 100644 --- a/src/restic/fuse/file.go +++ b/src/restic/fuse/file.go @@ -27,7 +27,7 @@ var _ = fs.HandleReleaser(&file{}) // for fuse operations. type BlobLoader interface { LookupBlobSize(restic.ID, restic.BlobType) (uint, error) - LoadDataBlob(restic.ID, []byte) (int, error) + LoadBlob(restic.BlobType, restic.ID, []byte) (int, error) } type file struct { @@ -109,7 +109,7 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) { buf = make([]byte, f.sizes[i]) } - n, err := f.repo.LoadDataBlob(f.node.Content[i], buf) + n, err := f.repo.LoadBlob(restic.DataBlob, f.node.Content[i], buf) if err != nil { debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err) return nil, err diff --git a/src/restic/fuse/file_test.go b/src/restic/fuse/file_test.go index 1013a07b5..090e43200 100644 --- a/src/restic/fuse/file_test.go +++ b/src/restic/fuse/file_test.go @@ -34,8 +34,8 @@ func (m *MockRepo) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, error) return uint(len(buf)), nil } -func (m *MockRepo) LoadDataBlob(id restic.ID, buf []byte) (int, error) { - size, err := m.LookupBlobSize(id, restic.DataBlob) +func (m *MockRepo) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) { + size, err := m.LookupBlobSize(id, t) if err != nil { return 0, err } diff --git a/src/restic/node.go b/src/restic/node.go index bc80835bf..1d33aa6ad 100644 --- a/src/restic/node.go +++ b/src/restic/node.go @@ -219,7 +219,7 @@ func (node Node) createFileAt(path string, repo Repository) error { buf = make([]byte, size) } - n, err := repo.LoadDataBlob(id, buf) + n, err := repo.LoadBlob(DataBlob, id, buf) if err != nil { return err } diff --git a/src/restic/repository.go b/src/restic/repository.go index bf8d31453..c84ba4fd9 100644 --- a/src/restic/repository.go +++ b/src/restic/repository.go @@ -34,8 +34,8 @@ type Repository interface { LoadJSONUnpacked(FileType, ID, interface{}) error LoadAndDecrypt(FileType, ID) ([]byte, error) - LoadTree(id ID) (*Tree, error) - LoadDataBlob(id ID, buf []byte) (int, error) + LoadTree(ID) (*Tree, error) + LoadBlob(BlobType, ID, []byte) (int, error) SaveBlob(BlobType, []byte, ID) (ID, error) } diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index 55b9ea1be..9c2e90e31 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -592,10 +592,10 @@ func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) { return t, nil } -// LoadDataBlob loads a data blob from the repository to the buffer. -func (r *Repository) LoadDataBlob(id restic.ID, buf []byte) (int, error) { - debug.Log("repo.LoadDataBlob", "load blob %v into buf %p", id.Str(), buf) - size, err := r.idx.LookupSize(id, restic.DataBlob) +// LoadBlob loads a blob of type t from the repository to the buffer. +func (r *Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) { + debug.Log("repo.LoadBlob", "load blob %v into buf %p", id.Str(), buf) + size, err := r.idx.LookupSize(id, t) if err != nil { return 0, err } @@ -604,13 +604,13 @@ func (r *Repository) LoadDataBlob(id restic.ID, buf []byte) (int, error) { return 0, errors.Errorf("buffer is too small for data blob (%d < %d)", len(buf), size) } - n, err := r.loadBlob(id, restic.DataBlob, buf) + n, err := r.loadBlob(id, t, buf) if err != nil { return 0, err } buf = buf[:n] - debug.Log("repo.LoadDataBlob", "loaded %d bytes into buf %p", len(buf), buf) + debug.Log("repo.LoadBlob", "loaded %d bytes into buf %p", len(buf), buf) return len(buf), err } diff --git a/src/restic/repository/repository_test.go b/src/restic/repository/repository_test.go index 1b93dc8b0..3910f57be 100644 --- a/src/restic/repository/repository_test.go +++ b/src/restic/repository/repository_test.go @@ -91,7 +91,7 @@ func TestSave(t *testing.T) { // read back buf := make([]byte, size) - n, err := repo.LoadDataBlob(id, buf) + n, err := repo.LoadBlob(restic.DataBlob, id, buf) OK(t, err) Equals(t, len(buf), n) @@ -125,7 +125,7 @@ func TestSaveFrom(t *testing.T) { // read back buf := make([]byte, size) - n, err := repo.LoadDataBlob(id, buf) + n, err := repo.LoadBlob(restic.DataBlob, id, buf) OK(t, err) Equals(t, len(buf), n) From 1fb80bf0e21b3c4e05a0c2aa329b5b8ca0f63c54 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 3 Sep 2016 20:33:28 +0200 Subject: [PATCH 32/40] Fix fuse mount --- src/restic/fuse/file.go | 2 +- src/restic/repository/repository.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/restic/fuse/file.go b/src/restic/fuse/file.go index 6590b2635..d2fc12222 100644 --- a/src/restic/fuse/file.go +++ b/src/restic/fuse/file.go @@ -116,7 +116,7 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) { } f.blobs[i] = buf[:n] - return blob, nil + return buf[:n], nil } func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index 9c2e90e31..ca5fc92b6 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -87,7 +87,7 @@ func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, er // pack from the backend, the result is stored in plaintextBuf, which must be // large enough to hold the complete blob. func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []byte) (int, error) { - debug.Log("Repo.loadBlob", "load %v with id %v (buf %d)", t, id.Str(), len(plaintextBuf)) + debug.Log("Repo.loadBlob", "load %v with id %v (buf %p, len %d)", t, id.Str(), plaintextBuf, len(plaintextBuf)) // lookup plaintext size of blob size, err := r.idx.LookupSize(id, t) From b5b3c0eaf8eacd9d4695e5ef8a245fd3a899008a Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 3 Sep 2016 20:55:22 +0200 Subject: [PATCH 33/40] Add repository.SaveTree --- src/restic/archiver/archive_reader.go | 20 +---- src/restic/archiver/archiver.go | 2 +- src/restic/repository.go | 6 +- src/restic/repository/repository.go | 95 ++++++++++++------------ src/restic/repository/repository_test.go | 53 ------------- src/restic/testing.go | 12 +-- src/restic/tree_test.go | 2 +- 7 files changed, 61 insertions(+), 129 deletions(-) diff --git a/src/restic/archiver/archive_reader.go b/src/restic/archiver/archive_reader.go index 1868ee0c1..fb3803e34 100644 --- a/src/restic/archiver/archive_reader.go +++ b/src/restic/archiver/archive_reader.go @@ -1,7 +1,6 @@ package archiver import ( - "encoding/json" "io" "restic" "restic/debug" @@ -12,23 +11,6 @@ import ( "github.com/restic/chunker" ) -// saveTreeJSON stores a tree in the repository. -func saveTreeJSON(repo restic.Repository, item interface{}) (restic.ID, error) { - data, err := json.Marshal(item) - if err != nil { - return restic.ID{}, errors.Wrap(err, "") - } - data = append(data, '\n') - - // check if tree has been saved before - id := restic.Hash(data) - if repo.Index().Has(id, restic.TreeBlob) { - return id, nil - } - - return repo.SaveJSON(restic.TreeBlob, item) -} - // ArchiveReader reads from the reader and archives the data. Returned is the // resulting snapshot and its ID. func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, name string) (*restic.Snapshot, restic.ID, error) { @@ -93,7 +75,7 @@ func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, nam }, } - treeID, err := saveTreeJSON(repo, tree) + treeID, err := repo.SaveTree(tree) if err != nil { return nil, restic.ID{}, err } diff --git a/src/restic/archiver/archiver.go b/src/restic/archiver/archiver.go index 6decc5fd5..3be272adc 100644 --- a/src/restic/archiver/archiver.go +++ b/src/restic/archiver/archiver.go @@ -122,7 +122,7 @@ func (arch *Archiver) SaveTreeJSON(item interface{}) (restic.ID, error) { return id, nil } - return arch.repo.SaveJSON(restic.TreeBlob, item) + return arch.repo.SaveBlob(restic.TreeBlob, data, id) } func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*restic.Node, error) { diff --git a/src/restic/repository.go b/src/restic/repository.go index c84ba4fd9..959c0bd3c 100644 --- a/src/restic/repository.go +++ b/src/restic/repository.go @@ -27,17 +27,17 @@ type Repository interface { Flush() error - SaveJSON(BlobType, interface{}) (ID, error) SaveUnpacked(FileType, []byte) (ID, error) SaveJSONUnpacked(FileType, interface{}) (ID, error) LoadJSONUnpacked(FileType, ID, interface{}) error LoadAndDecrypt(FileType, ID) ([]byte, error) - LoadTree(ID) (*Tree, error) LoadBlob(BlobType, ID, []byte) (int, error) - SaveBlob(BlobType, []byte, ID) (ID, error) + + LoadTree(ID) (*Tree, error) + SaveTree(t *Tree) (ID, error) } // Deleter removes all data stored in a backend/repo. diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index ca5fc92b6..a7258e090 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -227,25 +227,6 @@ func (r *Repository) SaveAndEncrypt(t restic.BlobType, data []byte, id *restic.I return *id, r.savePacker(packer) } -// SaveJSON serialises item as JSON and encrypts and saves it in a pack in the -// backend as type t. -func (r *Repository) SaveJSON(t restic.BlobType, item interface{}) (restic.ID, error) { - debug.Log("Repo.SaveJSON", "save %v blob", t) - buf := getBuf()[:0] - defer freeBuf(buf) - - wr := bytes.NewBuffer(buf) - - enc := json.NewEncoder(wr) - err := enc.Encode(item) - if err != nil { - return restic.ID{}, errors.Errorf("json.Encode: %v", err) - } - - buf = wr.Bytes() - return r.SaveAndEncrypt(t, buf, nil) -} - // SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the // backend as type t, without a pack. It returns the storage hash. func (r *Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (restic.ID, error) { @@ -565,33 +546,6 @@ func (r *Repository) Close() error { return r.be.Close() } -// LoadTree loads a tree from the repository. -func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) { - debug.Log("repo.LoadTree", "load tree %v", id.Str()) - - size, err := r.idx.LookupSize(id, restic.TreeBlob) - if err != nil { - return nil, err - } - - debug.Log("repo.LoadTree", "size is %d, create buffer", size) - buf := make([]byte, size) - - n, err := r.loadBlob(id, restic.TreeBlob, buf) - if err != nil { - return nil, err - } - buf = buf[:n] - - t := &restic.Tree{} - err = json.Unmarshal(buf, t) - if err != nil { - return nil, err - } - - return t, nil -} - // LoadBlob loads a blob of type t from the repository to the buffer. func (r *Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) { debug.Log("repo.LoadBlob", "load blob %v into buf %p", id.Str(), buf) @@ -624,3 +578,52 @@ func (r *Repository) SaveBlob(t restic.BlobType, buf []byte, id restic.ID) (rest } return r.SaveAndEncrypt(t, buf, i) } + +// LoadTree loads a tree from the repository. +func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) { + debug.Log("repo.LoadTree", "load tree %v", id.Str()) + + size, err := r.idx.LookupSize(id, restic.TreeBlob) + if err != nil { + return nil, err + } + + debug.Log("repo.LoadTree", "size is %d, create buffer", size) + buf := make([]byte, size) + + n, err := r.loadBlob(id, restic.TreeBlob, buf) + if err != nil { + return nil, err + } + buf = buf[:n] + + t := &restic.Tree{} + err = json.Unmarshal(buf, t) + if err != nil { + return nil, err + } + + return t, nil +} + +// SaveTree stores a tree into the repository and returns the ID. The ID is +// checked against the index. The tree is only stored when the index does not +// contain the ID. +func (r *Repository) SaveTree(t *restic.Tree) (restic.ID, error) { + buf, err := json.Marshal(t) + if err != nil { + return restic.ID{}, errors.Wrap(err, "MarshalJSON") + } + + // append a newline so that the data is always consistent (json.Encoder + // adds a newline after each object) + buf = append(buf, '\n') + + id := restic.Hash(buf) + if r.idx.Has(id, restic.TreeBlob) { + return id, nil + } + + _, err = r.SaveBlob(restic.TreeBlob, buf, id) + return id, err +} diff --git a/src/restic/repository/repository_test.go b/src/restic/repository/repository_test.go index 3910f57be..3295ab5d9 100644 --- a/src/restic/repository/repository_test.go +++ b/src/restic/repository/repository_test.go @@ -4,7 +4,6 @@ import ( "bytes" "crypto/rand" "crypto/sha256" - "encoding/json" "io" mrand "math/rand" "path/filepath" @@ -15,58 +14,6 @@ import ( . "restic/test" ) -type testJSONStruct struct { - Foo uint32 - Bar string - Baz []byte -} - -var repoTests = []testJSONStruct{ - testJSONStruct{Foo: 23, Bar: "Teststring", Baz: []byte("xx")}, -} - -func TestSaveJSON(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) - - for _, obj := range repoTests { - data, err := json.Marshal(obj) - OK(t, err) - data = append(data, '\n') - h := sha256.Sum256(data) - - id, err := repo.SaveJSON(restic.TreeBlob, obj) - OK(t, err) - - Assert(t, h == id, - "TestSaveJSON: wrong plaintext ID: expected %02x, got %02x", - h, id) - } -} - -func BenchmarkSaveJSON(t *testing.B) { - repo := SetupRepo() - defer TeardownRepo(repo) - - obj := repoTests[0] - - data, err := json.Marshal(obj) - OK(t, err) - data = append(data, '\n') - h := sha256.Sum256(data) - - t.ResetTimer() - - for i := 0; i < t.N; i++ { - id, err := repo.SaveJSON(restic.TreeBlob, obj) - OK(t, err) - - Assert(t, h == id, - "TestSaveJSON: wrong plaintext ID: expected %02x, got %02x", - h, id) - } -} - var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20} func TestSave(t *testing.T) { diff --git a/src/restic/testing.go b/src/restic/testing.go index 85cab0d69..039b908f7 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -64,17 +64,16 @@ const ( maxNodes = 32 ) -func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, ID) { +func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, []byte, ID) { data, err := json.Marshal(tree) if err != nil { fs.t.Fatalf("json.Marshal(tree) returned error: %v", err) - return false, ID{} + return false, nil, ID{} } data = append(data, '\n') id := Hash(data) - return fs.blobIsKnown(id, TreeBlob), id - + return fs.blobIsKnown(id, TreeBlob), data, id } func (fs fakeFileSystem) blobIsKnown(id ID, t BlobType) bool { @@ -132,11 +131,12 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) ID { tree.Nodes = append(tree.Nodes, node) } - if known, id := fs.treeIsKnown(&tree); known { + known, buf, id := fs.treeIsKnown(&tree) + if known { return id } - id, err := fs.repo.SaveJSON(TreeBlob, tree) + _, err := fs.repo.SaveBlob(TreeBlob, buf, id) if err != nil { fs.t.Fatal(err) } diff --git a/src/restic/tree_test.go b/src/restic/tree_test.go index 3c581ec68..779842552 100644 --- a/src/restic/tree_test.go +++ b/src/restic/tree_test.go @@ -97,7 +97,7 @@ func TestLoadTree(t *testing.T) { // save tree tree := restic.NewTree() - id, err := repo.SaveJSON(restic.TreeBlob, tree) + id, err := repo.SaveTree(tree) OK(t, err) // save packs From bef5c4acb8e906f04a363762e884e125e8ddcb45 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 4 Sep 2016 12:52:43 +0200 Subject: [PATCH 34/40] Add mock.Repository, Rework SetupRepo --- src/restic/archiver/archiver_test.go | 26 ++--- src/restic/archiver/testing.go | 16 +++ src/restic/lock_test.go | 36 +++--- src/restic/mock/repository.go | 141 +++++++++++++++++++++++ src/restic/repository/repository_test.go | 33 +++--- src/restic/test/backend.go | 55 +++------ src/restic/tree_test.go | 4 +- src/restic/walk/walk_test.go | 4 +- 8 files changed, 228 insertions(+), 87 deletions(-) create mode 100644 src/restic/archiver/testing.go create mode 100644 src/restic/mock/repository.go diff --git a/src/restic/archiver/archiver_test.go b/src/restic/archiver/archiver_test.go index a1fa47683..67a9ff8f6 100644 --- a/src/restic/archiver/archiver_test.go +++ b/src/restic/archiver/archiver_test.go @@ -47,8 +47,8 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.K } func BenchmarkChunkEncrypt(b *testing.B) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(b) + defer cleanup() data := Random(23, 10<<20) // 10MiB rd := bytes.NewReader(data) @@ -79,8 +79,8 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key) } func BenchmarkChunkEncryptParallel(b *testing.B) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(b) + defer cleanup() data := Random(23, 10<<20) // 10MiB @@ -98,8 +98,8 @@ func BenchmarkChunkEncryptParallel(b *testing.B) { } func archiveDirectory(b testing.TB) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(b) + defer cleanup() arch := archiver.New(repo) @@ -136,8 +136,8 @@ func countPacks(repo restic.Repository, t restic.FileType) (n uint) { } func archiveWithDedup(t testing.TB) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() if BenchArchiveDirectory == "" { t.Skip("benchdir not set, skipping TestArchiverDedup") @@ -150,7 +150,7 @@ func archiveWithDedup(t testing.TB) { } // archive a few files - sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil) + sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil) t.Logf("archived snapshot %v", sn.ID().Str()) // get archive stats @@ -161,7 +161,7 @@ func archiveWithDedup(t testing.TB) { cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs) // archive the same files again, without parent snapshot - sn2 := SnapshotDir(t, repo, BenchArchiveDirectory, nil) + sn2 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil) t.Logf("archived snapshot %v", sn2.ID().Str()) // get archive stats again @@ -178,7 +178,7 @@ func archiveWithDedup(t testing.TB) { } // archive the same files again, with a parent snapshot - sn3 := SnapshotDir(t, repo, BenchArchiveDirectory, sn2.ID()) + sn3 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, sn2.ID()) t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str()) // get archive stats again @@ -208,8 +208,8 @@ func TestParallelSaveWithDuplication(t *testing.T) { } func testParallelSaveWithDuplication(t *testing.T, seed int) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() dataSizeMb := 128 duplication := 7 diff --git a/src/restic/archiver/testing.go b/src/restic/archiver/testing.go new file mode 100644 index 000000000..b73f09dcd --- /dev/null +++ b/src/restic/archiver/testing.go @@ -0,0 +1,16 @@ +package archiver + +import ( + "restic" + "testing" +) + +// TestSnapshot creates a new snapshot of path. +func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot { + arch := New(repo) + sn, _, err := arch.Snapshot(nil, []string{path}, parent) + if err != nil { + t.Fatal(err) + } + return sn +} diff --git a/src/restic/lock_test.go b/src/restic/lock_test.go index b97bc97a8..b60b9ea8a 100644 --- a/src/restic/lock_test.go +++ b/src/restic/lock_test.go @@ -10,8 +10,8 @@ import ( ) func TestLock(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() lock, err := restic.NewLock(repo) OK(t, err) @@ -20,8 +20,8 @@ func TestLock(t *testing.T) { } func TestDoubleUnlock(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() lock, err := restic.NewLock(repo) OK(t, err) @@ -34,8 +34,8 @@ func TestDoubleUnlock(t *testing.T) { } func TestMultipleLock(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() lock1, err := restic.NewLock(repo) OK(t, err) @@ -48,8 +48,8 @@ func TestMultipleLock(t *testing.T) { } func TestLockExclusive(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() elock, err := restic.NewExclusiveLock(repo) OK(t, err) @@ -57,8 +57,8 @@ func TestLockExclusive(t *testing.T) { } func TestLockOnExclusiveLockedRepo(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() elock, err := restic.NewExclusiveLock(repo) OK(t, err) @@ -74,8 +74,8 @@ func TestLockOnExclusiveLockedRepo(t *testing.T) { } func TestExclusiveLockOnLockedRepo(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() elock, err := restic.NewLock(repo) OK(t, err) @@ -168,8 +168,8 @@ func lockExists(repo restic.Repository, t testing.TB, id restic.ID) bool { } func TestLockWithStaleLock(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) OK(t, err) @@ -193,8 +193,8 @@ func TestLockWithStaleLock(t *testing.T) { } func TestRemoveAllLocks(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) OK(t, err) @@ -216,8 +216,8 @@ func TestRemoveAllLocks(t *testing.T) { } func TestLockRefresh(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() lock, err := restic.NewLock(repo) OK(t, err) diff --git a/src/restic/mock/repository.go b/src/restic/mock/repository.go new file mode 100644 index 000000000..df109c5f3 --- /dev/null +++ b/src/restic/mock/repository.go @@ -0,0 +1,141 @@ +package mock + +import ( + "restic" + "restic/crypto" +) + +// Repository implements a mock Repository. +type Repository struct { + BackendFn func() Backend + + KeyFn func() *crypto.Key + + SetIndexFn func(restic.Index) + + IndexFn func() restic.Index + SaveFullIndexFn func() error + SaveIndexFn func() error + LoadIndexFn func() error + + ConfigFn func() restic.Config + + LookupBlobSizeFn func(restic.ID, restic.BlobType) (uint, error) + + ListFn func(restic.FileType, <-chan struct{}) <-chan restic.ID + ListPackFn func(restic.ID) ([]restic.Blob, int64, error) + + FlushFn func() error + + SaveUnpackedFn func(restic.FileType, []byte) (restic.ID, error) + SaveJSONUnpackedFn func(restic.FileType, interface{}) (restic.ID, error) + + LoadJSONUnpackedFn func(restic.FileType, restic.ID, interface{}) error + LoadAndDecryptFn func(restic.FileType, restic.ID) ([]byte, error) + + LoadBlobFn func(restic.BlobType, restic.ID, []byte) (int, error) + SaveBlobFn func(restic.BlobType, []byte, restic.ID) (restic.ID, error) + + LoadTreeFn func(restic.ID) (*restic.Tree, error) + SaveTreeFn func(t *restic.Tree) (restic.ID, error) +} + +// Backend is a stub method. +func (repo *Repository) Backend() Backend { + return repo.BackendFn() +} + +// Key is a stub method. +func (repo *Repository) Key() *crypto.Key { + return repo.KeyFn() +} + +// SetIndex is a stub method. +func (repo *Repository) SetIndex(idx restic.Index) { + repo.SetIndexFn(idx) +} + +// Index is a stub method. +func (repo *Repository) Index() restic.Index { + return repo.IndexFn() +} + +// SaveFullIndex is a stub method. +func (repo *Repository) SaveFullIndex() error { + return repo.SaveFullIndexFn() +} + +// SaveIndex is a stub method. +func (repo *Repository) SaveIndex() error { + return repo.SaveIndexFn() +} + +// LoadIndex is a stub method. +func (repo *Repository) LoadIndex() error { + return repo.LoadIndexFn() +} + +// Config is a stub method. +func (repo *Repository) Config() restic.Config { + return repo.ConfigFn() +} + +// LookupBlobSize is a stub method. +func (repo *Repository) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, error) { + return repo.LookupBlobSizeFn(id, t) +} + +// List is a stub method. +func (repo *Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic.ID { + return repo.ListFn(t, done) +} + +// ListPack is a stub method. +func (repo *Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) { + return repo.ListPackFn(id) +} + +// Flush is a stub method. +func (repo *Repository) Flush() error { + return repo.FlushFn() +} + +// SaveUnpacked is a stub method. +func (repo *Repository) SaveUnpacked(t restic.FileType, buf []byte) (restic.ID, error) { + return repo.SaveUnpackedFn(t, buf) +} + +// SaveJSONUnpacked is a stub method. +func (repo *Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (restic.ID, error) { + return repo.SaveJSONUnpackedFn(t, item) +} + +// LoadJSONUnpacked is a stub method. +func (repo *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) error { + return repo.LoadJSONUnpackedFn(t, id, item) +} + +// LoadAndDecrypt is a stub method. +func (repo *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) { + return repo.LoadAndDecryptFn(t, id) +} + +// LoadBlob is a stub method. +func (repo *Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) { + return repo.LoadBlobFn(t, id, buf) +} + +// SaveBlob is a stub method. +func (repo *Repository) SaveBlob(t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { + return repo.SaveBlobFn(t, buf, id) +} + +// LoadTree is a stub method. +func (repo *Repository) LoadTree(id restic.ID) (*restic.Tree, error) { + return repo.LoadTreeFn(id) +} + +// SaveTree is a stub method. +func (repo *Repository) SaveTree(t *restic.Tree) (restic.ID, error) { + return repo.SaveTreeFn(t) +} diff --git a/src/restic/repository/repository_test.go b/src/restic/repository/repository_test.go index 3295ab5d9..2a2d54aaa 100644 --- a/src/restic/repository/repository_test.go +++ b/src/restic/repository/repository_test.go @@ -10,6 +10,7 @@ import ( "testing" "restic" + "restic/archiver" "restic/repository" . "restic/test" ) @@ -17,8 +18,8 @@ import ( var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20} func TestSave(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() for _, size := range testSizes { data := make([]byte, size) @@ -53,8 +54,8 @@ func TestSave(t *testing.T) { } func TestSaveFrom(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() for _, size := range testSizes { data := make([]byte, size) @@ -87,8 +88,8 @@ func TestSaveFrom(t *testing.T) { } func BenchmarkSaveAndEncrypt(t *testing.B) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() size := 4 << 20 // 4MiB @@ -109,15 +110,15 @@ func BenchmarkSaveAndEncrypt(t *testing.B) { } func TestLoadTree(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() if BenchArchiveDirectory == "" { t.Skip("benchdir not set, skipping") } // archive a few files - sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil) + sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil) OK(t, repo.Flush()) _, err := repo.LoadTree(*sn.Tree) @@ -125,15 +126,15 @@ func TestLoadTree(t *testing.T) { } func BenchmarkLoadTree(t *testing.B) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() if BenchArchiveDirectory == "" { t.Skip("benchdir not set, skipping") } // archive a few files - sn := SnapshotDir(t, repo, BenchArchiveDirectory, nil) + sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil) OK(t, repo.Flush()) t.ResetTimer() @@ -145,8 +146,8 @@ func BenchmarkLoadTree(t *testing.B) { } func TestLoadJSONUnpacked(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() if BenchArchiveDirectory == "" { t.Skip("benchdir not set, skipping") @@ -206,8 +207,8 @@ func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax } func TestRepositoryIncrementalIndex(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() repository.IndexFull = func(*repository.Index) bool { return true } diff --git a/src/restic/test/backend.go b/src/restic/test/backend.go index 66567ab79..055d7c10e 100644 --- a/src/restic/test/backend.go +++ b/src/restic/test/backend.go @@ -8,7 +8,6 @@ import ( "testing" "restic" - "restic/archiver" "restic/backend/local" "restic/repository" ) @@ -50,54 +49,38 @@ func getBoolVar(name string, defaultValue bool) bool { } // SetupRepo returns a repo setup in a temp dir. -func SetupRepo() restic.Repository { +func SetupRepo(t testing.TB) (repo restic.Repository, cleanup func()) { tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") if err != nil { - panic(err) + t.Fatal(err) } // create repository below temp dir b, err := local.Create(filepath.Join(tempdir, "repo")) if err != nil { - panic(err) + t.Fatal(err) } - repo := repository.New(b) - err = repo.Init(TestPassword) + r := repository.New(b) + err = r.Init(TestPassword) if err != nil { - panic(err) + t.Fatal(err) } + repo = r + cleanup = func() { + if !TestCleanupTempDirs { + l := repo.Backend().(*local.Local) + fmt.Printf("leaving local backend at %s\n", l.Location()) + return + } - return repo -} - -// TeardownRepo removes a repository created by SetupRepo. -func TeardownRepo(repo restic.Repository) { - if !TestCleanupTempDirs { - l := repo.Backend().(*local.Local) - fmt.Printf("leaving local backend at %s\n", l.Location()) - return - } - - if r, ok := repo.(restic.Deleter); ok { - err := r.Delete() - if err != nil { - panic(err) + if r, ok := repo.(restic.Deleter); ok { + err := r.Delete() + if err != nil { + t.Fatal(err) + } } } -} -// SnapshotDir creates a new snapshot of path. -func SnapshotDir(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot { - arch := archiver.New(repo) - sn, _, err := arch.Snapshot(nil, []string{path}, parent) - OK(t, err) - return sn -} - -// WithRepo runs the function t with a repository that is removed after f returns. -func WithRepo(t testing.TB, f func(restic.Repository)) { - repo := SetupRepo() - f(repo) - TeardownRepo(repo) + return repo, cleanup } diff --git a/src/restic/tree_test.go b/src/restic/tree_test.go index 779842552..71c4441ed 100644 --- a/src/restic/tree_test.go +++ b/src/restic/tree_test.go @@ -92,8 +92,8 @@ func TestNodeComparison(t *testing.T) { } func TestLoadTree(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() // save tree tree := restic.NewTree() diff --git a/src/restic/walk/walk_test.go b/src/restic/walk/walk_test.go index 681c2f1e6..19874c3b0 100644 --- a/src/restic/walk/walk_test.go +++ b/src/restic/walk/walk_test.go @@ -15,8 +15,8 @@ import ( ) func TestWalkTree(t *testing.T) { - repo := SetupRepo() - defer TeardownRepo(repo) + repo, cleanup := SetupRepo(t) + defer cleanup() dirs, err := filepath.Glob(TestWalkerPath) OK(t, err) From ea073f58cf924bcdbfd67039cedd6703491f9c02 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 4 Sep 2016 13:08:05 +0200 Subject: [PATCH 35/40] Correct comment --- src/restic/backend/doc.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/restic/backend/doc.go b/src/restic/backend/doc.go index f82c3d671..daab2e2f8 100644 --- a/src/restic/backend/doc.go +++ b/src/restic/backend/doc.go @@ -1,5 +1,4 @@ // Package backend provides local and remote storage for restic repositories. -// All backends need to implement the Backend interface. There is a -// MockBackend, which can be used for mocking in tests, and a MemBackend, which -// stores all data in a hash internally. +// All backends need to implement the Backend interface. There is a MemBackend, +// which stores all data in a map internally and can be used for testing. package backend From f5b9ee53a30540735496925d45b8879efee58452 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 4 Sep 2016 13:18:25 +0200 Subject: [PATCH 36/40] Fix mock.Repository --- src/restic/mock/repository.go | 42 +++++++++++++++++------------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/src/restic/mock/repository.go b/src/restic/mock/repository.go index df109c5f3..3143a8ceb 100644 --- a/src/restic/mock/repository.go +++ b/src/restic/mock/repository.go @@ -7,7 +7,7 @@ import ( // Repository implements a mock Repository. type Repository struct { - BackendFn func() Backend + BackendFn func() restic.Backend KeyFn func() *crypto.Key @@ -41,101 +41,101 @@ type Repository struct { } // Backend is a stub method. -func (repo *Repository) Backend() Backend { +func (repo Repository) Backend() restic.Backend { return repo.BackendFn() } // Key is a stub method. -func (repo *Repository) Key() *crypto.Key { +func (repo Repository) Key() *crypto.Key { return repo.KeyFn() } // SetIndex is a stub method. -func (repo *Repository) SetIndex(idx restic.Index) { +func (repo Repository) SetIndex(idx restic.Index) { repo.SetIndexFn(idx) } // Index is a stub method. -func (repo *Repository) Index() restic.Index { +func (repo Repository) Index() restic.Index { return repo.IndexFn() } // SaveFullIndex is a stub method. -func (repo *Repository) SaveFullIndex() error { +func (repo Repository) SaveFullIndex() error { return repo.SaveFullIndexFn() } // SaveIndex is a stub method. -func (repo *Repository) SaveIndex() error { +func (repo Repository) SaveIndex() error { return repo.SaveIndexFn() } // LoadIndex is a stub method. -func (repo *Repository) LoadIndex() error { +func (repo Repository) LoadIndex() error { return repo.LoadIndexFn() } // Config is a stub method. -func (repo *Repository) Config() restic.Config { +func (repo Repository) Config() restic.Config { return repo.ConfigFn() } // LookupBlobSize is a stub method. -func (repo *Repository) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, error) { +func (repo Repository) LookupBlobSize(id restic.ID, t restic.BlobType) (uint, error) { return repo.LookupBlobSizeFn(id, t) } // List is a stub method. -func (repo *Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic.ID { +func (repo Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic.ID { return repo.ListFn(t, done) } // ListPack is a stub method. -func (repo *Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) { +func (repo Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) { return repo.ListPackFn(id) } // Flush is a stub method. -func (repo *Repository) Flush() error { +func (repo Repository) Flush() error { return repo.FlushFn() } // SaveUnpacked is a stub method. -func (repo *Repository) SaveUnpacked(t restic.FileType, buf []byte) (restic.ID, error) { +func (repo Repository) SaveUnpacked(t restic.FileType, buf []byte) (restic.ID, error) { return repo.SaveUnpackedFn(t, buf) } // SaveJSONUnpacked is a stub method. -func (repo *Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (restic.ID, error) { +func (repo Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (restic.ID, error) { return repo.SaveJSONUnpackedFn(t, item) } // LoadJSONUnpacked is a stub method. -func (repo *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) error { +func (repo Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) error { return repo.LoadJSONUnpackedFn(t, id, item) } // LoadAndDecrypt is a stub method. -func (repo *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) { +func (repo Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) { return repo.LoadAndDecryptFn(t, id) } // LoadBlob is a stub method. -func (repo *Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) { +func (repo Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) { return repo.LoadBlobFn(t, id, buf) } // SaveBlob is a stub method. -func (repo *Repository) SaveBlob(t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { +func (repo Repository) SaveBlob(t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { return repo.SaveBlobFn(t, buf, id) } // LoadTree is a stub method. -func (repo *Repository) LoadTree(id restic.ID) (*restic.Tree, error) { +func (repo Repository) LoadTree(id restic.ID) (*restic.Tree, error) { return repo.LoadTreeFn(id) } // SaveTree is a stub method. -func (repo *Repository) SaveTree(t *restic.Tree) (restic.ID, error) { +func (repo Repository) SaveTree(t *restic.Tree) (restic.ID, error) { return repo.SaveTreeFn(t) } From 6ab425f13088eda65254c12c0c16c9c9e76c5c31 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 4 Sep 2016 13:24:51 +0200 Subject: [PATCH 37/40] Remove SetupRepo --- src/restic/archiver/archiver_test.go | 11 +++--- src/restic/crypto/kdf.go | 3 +- src/restic/lock_test.go | 19 +++++----- src/restic/repository/repository_test.go | 14 ++++---- src/restic/test/{backend.go => vars.go} | 44 ------------------------ src/restic/tree_test.go | 3 +- src/restic/walk/walk_test.go | 3 +- 7 files changed, 29 insertions(+), 68 deletions(-) rename src/restic/test/{backend.go => vars.go} (57%) diff --git a/src/restic/archiver/archiver_test.go b/src/restic/archiver/archiver_test.go index 67a9ff8f6..6073b654d 100644 --- a/src/restic/archiver/archiver_test.go +++ b/src/restic/archiver/archiver_test.go @@ -10,6 +10,7 @@ import ( "restic/archiver" "restic/checker" "restic/crypto" + "restic/repository" . "restic/test" "restic/errors" @@ -47,7 +48,7 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.K } func BenchmarkChunkEncrypt(b *testing.B) { - repo, cleanup := SetupRepo(b) + repo, cleanup := repository.TestRepository(b) defer cleanup() data := Random(23, 10<<20) // 10MiB @@ -79,7 +80,7 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key) } func BenchmarkChunkEncryptParallel(b *testing.B) { - repo, cleanup := SetupRepo(b) + repo, cleanup := repository.TestRepository(b) defer cleanup() data := Random(23, 10<<20) // 10MiB @@ -98,7 +99,7 @@ func BenchmarkChunkEncryptParallel(b *testing.B) { } func archiveDirectory(b testing.TB) { - repo, cleanup := SetupRepo(b) + repo, cleanup := repository.TestRepository(b) defer cleanup() arch := archiver.New(repo) @@ -136,7 +137,7 @@ func countPacks(repo restic.Repository, t restic.FileType) (n uint) { } func archiveWithDedup(t testing.TB) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() if BenchArchiveDirectory == "" { @@ -208,7 +209,7 @@ func TestParallelSaveWithDuplication(t *testing.T) { } func testParallelSaveWithDuplication(t *testing.T, seed int) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() dataSizeMb := 128 diff --git a/src/restic/crypto/kdf.go b/src/restic/crypto/kdf.go index ccde35ace..158f462f1 100644 --- a/src/restic/crypto/kdf.go +++ b/src/restic/crypto/kdf.go @@ -4,9 +4,10 @@ import ( "crypto/rand" "time" + "restic/errors" + sscrypt "github.com/elithrar/simple-scrypt" "golang.org/x/crypto/scrypt" - "restic/errors" ) const saltLength = 64 diff --git a/src/restic/lock_test.go b/src/restic/lock_test.go index b60b9ea8a..a6854dbe6 100644 --- a/src/restic/lock_test.go +++ b/src/restic/lock_test.go @@ -6,11 +6,12 @@ import ( "time" "restic" + "restic/repository" . "restic/test" ) func TestLock(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() lock, err := restic.NewLock(repo) @@ -20,7 +21,7 @@ func TestLock(t *testing.T) { } func TestDoubleUnlock(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() lock, err := restic.NewLock(repo) @@ -34,7 +35,7 @@ func TestDoubleUnlock(t *testing.T) { } func TestMultipleLock(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() lock1, err := restic.NewLock(repo) @@ -48,7 +49,7 @@ func TestMultipleLock(t *testing.T) { } func TestLockExclusive(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() elock, err := restic.NewExclusiveLock(repo) @@ -57,7 +58,7 @@ func TestLockExclusive(t *testing.T) { } func TestLockOnExclusiveLockedRepo(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() elock, err := restic.NewExclusiveLock(repo) @@ -74,7 +75,7 @@ func TestLockOnExclusiveLockedRepo(t *testing.T) { } func TestExclusiveLockOnLockedRepo(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() elock, err := restic.NewLock(repo) @@ -168,7 +169,7 @@ func lockExists(repo restic.Repository, t testing.TB, id restic.ID) bool { } func TestLockWithStaleLock(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) @@ -193,7 +194,7 @@ func TestLockWithStaleLock(t *testing.T) { } func TestRemoveAllLocks(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid()) @@ -216,7 +217,7 @@ func TestRemoveAllLocks(t *testing.T) { } func TestLockRefresh(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() lock, err := restic.NewLock(repo) diff --git a/src/restic/repository/repository_test.go b/src/restic/repository/repository_test.go index 2a2d54aaa..a161e6509 100644 --- a/src/restic/repository/repository_test.go +++ b/src/restic/repository/repository_test.go @@ -18,7 +18,7 @@ import ( var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20} func TestSave(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() for _, size := range testSizes { @@ -54,7 +54,7 @@ func TestSave(t *testing.T) { } func TestSaveFrom(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() for _, size := range testSizes { @@ -88,7 +88,7 @@ func TestSaveFrom(t *testing.T) { } func BenchmarkSaveAndEncrypt(t *testing.B) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() size := 4 << 20 // 4MiB @@ -110,7 +110,7 @@ func BenchmarkSaveAndEncrypt(t *testing.B) { } func TestLoadTree(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() if BenchArchiveDirectory == "" { @@ -126,7 +126,7 @@ func TestLoadTree(t *testing.T) { } func BenchmarkLoadTree(t *testing.B) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() if BenchArchiveDirectory == "" { @@ -146,7 +146,7 @@ func BenchmarkLoadTree(t *testing.B) { } func TestLoadJSONUnpacked(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() if BenchArchiveDirectory == "" { @@ -207,7 +207,7 @@ func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax } func TestRepositoryIncrementalIndex(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() repository.IndexFull = func(*repository.Index) bool { return true } diff --git a/src/restic/test/backend.go b/src/restic/test/vars.go similarity index 57% rename from src/restic/test/backend.go rename to src/restic/test/vars.go index 055d7c10e..cde5f94a4 100644 --- a/src/restic/test/backend.go +++ b/src/restic/test/vars.go @@ -2,14 +2,7 @@ package test_helper import ( "fmt" - "io/ioutil" "os" - "path/filepath" - "testing" - - "restic" - "restic/backend/local" - "restic/repository" ) var ( @@ -47,40 +40,3 @@ func getBoolVar(name string, defaultValue bool) bool { return defaultValue } - -// SetupRepo returns a repo setup in a temp dir. -func SetupRepo(t testing.TB) (repo restic.Repository, cleanup func()) { - tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") - if err != nil { - t.Fatal(err) - } - - // create repository below temp dir - b, err := local.Create(filepath.Join(tempdir, "repo")) - if err != nil { - t.Fatal(err) - } - - r := repository.New(b) - err = r.Init(TestPassword) - if err != nil { - t.Fatal(err) - } - repo = r - cleanup = func() { - if !TestCleanupTempDirs { - l := repo.Backend().(*local.Local) - fmt.Printf("leaving local backend at %s\n", l.Location()) - return - } - - if r, ok := repo.(restic.Deleter); ok { - err := r.Delete() - if err != nil { - t.Fatal(err) - } - } - } - - return repo, cleanup -} diff --git a/src/restic/tree_test.go b/src/restic/tree_test.go index 71c4441ed..1d23e9240 100644 --- a/src/restic/tree_test.go +++ b/src/restic/tree_test.go @@ -8,6 +8,7 @@ import ( "testing" "restic" + "restic/repository" . "restic/test" ) @@ -92,7 +93,7 @@ func TestNodeComparison(t *testing.T) { } func TestLoadTree(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() // save tree diff --git a/src/restic/walk/walk_test.go b/src/restic/walk/walk_test.go index 19874c3b0..d8416a65d 100644 --- a/src/restic/walk/walk_test.go +++ b/src/restic/walk/walk_test.go @@ -10,12 +10,13 @@ import ( "restic" "restic/archiver" "restic/pipe" + "restic/repository" . "restic/test" "restic/walk" ) func TestWalkTree(t *testing.T) { - repo, cleanup := SetupRepo(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() dirs, err := filepath.Glob(TestWalkerPath) From 512a92895ff4fe26ca15714793a6aba11f8ce894 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 4 Sep 2016 14:29:04 +0200 Subject: [PATCH 38/40] Rename WithTestEnvironment -> Env --- src/restic/checker/checker_test.go | 228 ++++++++++++----------- src/restic/repository/repository_test.go | 26 +-- src/restic/repository/testing.go | 26 ++- src/restic/test/doc.go | 4 +- src/restic/test/helpers.go | 37 ++-- src/restic/test/vars.go | 2 +- src/restic/walk/walk_test.go | 84 +++++---- 7 files changed, 207 insertions(+), 200 deletions(-) diff --git a/src/restic/checker/checker_test.go b/src/restic/checker/checker_test.go index 0037f0adb..d900a1548 100644 --- a/src/restic/checker/checker_test.go +++ b/src/restic/checker/checker_test.go @@ -1,7 +1,6 @@ package checker_test import ( - "fmt" "math/rand" "path/filepath" "sort" @@ -12,7 +11,7 @@ import ( "restic/backend/mem" "restic/checker" "restic/repository" - . "restic/test" + "restic/test" ) var checkerTestData = filepath.Join("testdata", "checker-test-repo.tar.gz") @@ -60,154 +59,158 @@ func checkData(chkr *checker.Checker) []error { } func TestCheckRepo(t *testing.T) { - WithTestEnvironment(t, checkerTestData, func(repodir string) { - repo := OpenLocalRepo(t, repodir) + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() - chkr := checker.New(repo) - hints, errs := chkr.LoadIndex() - if len(errs) > 0 { - t.Fatalf("expected no errors, got %v: %v", len(errs), errs) - } + repo := repository.TestOpenLocal(t, repodir) - if len(hints) > 0 { - t.Errorf("expected no hints, got %v: %v", len(hints), hints) - } + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex() + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } - OKs(t, checkPacks(chkr)) - OKs(t, checkStruct(chkr)) - }) + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } + + test.OKs(t, checkPacks(chkr)) + test.OKs(t, checkStruct(chkr)) } func TestMissingPack(t *testing.T) { - WithTestEnvironment(t, checkerTestData, func(repodir string) { - repo := OpenLocalRepo(t, repodir) + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() - packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6" - OK(t, repo.Backend().Remove(restic.DataFile, packID)) + repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo) - hints, errs := chkr.LoadIndex() - if len(errs) > 0 { - t.Fatalf("expected no errors, got %v: %v", len(errs), errs) - } + packID := "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6" + test.OK(t, repo.Backend().Remove(restic.DataFile, packID)) - if len(hints) > 0 { - t.Errorf("expected no hints, got %v: %v", len(hints), hints) - } + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex() + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } - errs = checkPacks(chkr) + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } - Assert(t, len(errs) == 1, - "expected exactly one error, got %v", len(errs)) + errs = checkPacks(chkr) - if err, ok := errs[0].(checker.PackError); ok { - Equals(t, packID, err.ID.String()) - } else { - t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) - } - }) + test.Assert(t, len(errs) == 1, + "expected exactly one error, got %v", len(errs)) + + if err, ok := errs[0].(checker.PackError); ok { + test.Equals(t, packID, err.ID.String()) + } else { + t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) + } } func TestUnreferencedPack(t *testing.T) { - WithTestEnvironment(t, checkerTestData, func(repodir string) { - repo := OpenLocalRepo(t, repodir) + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() - // index 3f1a only references pack 60e0 - indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44" - packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" - OK(t, repo.Backend().Remove(restic.IndexFile, indexID)) + repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo) - hints, errs := chkr.LoadIndex() - if len(errs) > 0 { - t.Fatalf("expected no errors, got %v: %v", len(errs), errs) - } + // index 3f1a only references pack 60e0 + indexID := "3f1abfcb79c6f7d0a3be517d2c83c8562fba64ef2c8e9a3544b4edaf8b5e3b44" + packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" + test.OK(t, repo.Backend().Remove(restic.IndexFile, indexID)) - if len(hints) > 0 { - t.Errorf("expected no hints, got %v: %v", len(hints), hints) - } + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex() + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } - errs = checkPacks(chkr) + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } - Assert(t, len(errs) == 1, - "expected exactly one error, got %v", len(errs)) + errs = checkPacks(chkr) - if err, ok := errs[0].(checker.PackError); ok { - Equals(t, packID, err.ID.String()) - } else { - t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) - } - }) + test.Assert(t, len(errs) == 1, + "expected exactly one error, got %v", len(errs)) + + if err, ok := errs[0].(checker.PackError); ok { + test.Equals(t, packID, err.ID.String()) + } else { + t.Errorf("expected error returned by checker.Packs() to be PackError, got %v", err) + } } func TestUnreferencedBlobs(t *testing.T) { - WithTestEnvironment(t, checkerTestData, func(repodir string) { - repo := OpenLocalRepo(t, repodir) + repodir, cleanup := test.Env(t, checkerTestData) + defer cleanup() - snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02" - OK(t, repo.Backend().Remove(restic.SnapshotFile, snID)) + repo := repository.TestOpenLocal(t, repodir) - unusedBlobsBySnapshot := restic.IDs{ - ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"), - ParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"), - ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"), - ParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"), - ParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"), - ParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"), - } + snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02" + test.OK(t, repo.Backend().Remove(restic.SnapshotFile, snID)) - sort.Sort(unusedBlobsBySnapshot) + unusedBlobsBySnapshot := restic.IDs{ + test.ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"), + test.ParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"), + test.ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"), + test.ParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"), + test.ParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"), + test.ParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"), + } - chkr := checker.New(repo) - hints, errs := chkr.LoadIndex() - if len(errs) > 0 { - t.Fatalf("expected no errors, got %v: %v", len(errs), errs) - } + sort.Sort(unusedBlobsBySnapshot) - if len(hints) > 0 { - t.Errorf("expected no hints, got %v: %v", len(hints), hints) - } + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex() + if len(errs) > 0 { + t.Fatalf("expected no errors, got %v: %v", len(errs), errs) + } - OKs(t, checkPacks(chkr)) - OKs(t, checkStruct(chkr)) + if len(hints) > 0 { + t.Errorf("expected no hints, got %v: %v", len(hints), hints) + } - blobs := chkr.UnusedBlobs() - sort.Sort(blobs) + test.OKs(t, checkPacks(chkr)) + test.OKs(t, checkStruct(chkr)) - Equals(t, unusedBlobsBySnapshot, blobs) - }) + blobs := chkr.UnusedBlobs() + sort.Sort(blobs) + + test.Equals(t, unusedBlobsBySnapshot, blobs) } var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz") func TestDuplicatePacksInIndex(t *testing.T) { - WithTestEnvironment(t, checkerDuplicateIndexTestData, func(repodir string) { - repo := OpenLocalRepo(t, repodir) + repodir, cleanup := test.Env(t, checkerDuplicateIndexTestData) + defer cleanup() - chkr := checker.New(repo) - hints, errs := chkr.LoadIndex() - if len(hints) == 0 { - t.Fatalf("did not get expected checker hints for duplicate packs in indexes") + repo := repository.TestOpenLocal(t, repodir) + + chkr := checker.New(repo) + hints, errs := chkr.LoadIndex() + if len(hints) == 0 { + t.Fatalf("did not get expected checker hints for duplicate packs in indexes") + } + + found := false + for _, hint := range hints { + if _, ok := hint.(checker.ErrDuplicatePacks); ok { + found = true + } else { + t.Errorf("got unexpected hint: %v", hint) } + } - found := false - for _, hint := range hints { - if _, ok := hint.(checker.ErrDuplicatePacks); ok { - found = true - } else { - t.Errorf("got unexpected hint: %v", hint) - } - } + if !found { + t.Fatalf("did not find hint ErrDuplicatePacks") + } - if !found { - t.Fatalf("did not find hint ErrDuplicatePacks") - } - - if len(errs) > 0 { - t.Errorf("expected no errors, got %v: %v", len(errs), errs) - } - - }) + if len(errs) > 0 { + t.Errorf("expected no errors, got %v: %v", len(errs), errs) + } } // errorBackend randomly modifies data after reading. @@ -217,7 +220,6 @@ type errorBackend struct { } func (b errorBackend) Load(h restic.Handle, p []byte, off int64) (int, error) { - fmt.Printf("load %v\n", h) n, err := b.Backend.Load(h, p, off) if b.ProduceErrors { @@ -242,16 +244,16 @@ func TestCheckerModifiedData(t *testing.T) { repository.TestUseLowSecurityKDFParameters(t) repo := repository.New(be) - OK(t, repo.Init(TestPassword)) + test.OK(t, repo.Init(test.TestPassword)) arch := archiver.New(repo) _, id, err := arch.Snapshot(nil, []string{"."}, nil) - OK(t, err) + test.OK(t, err) t.Logf("archived as %v", id.Str()) beError := &errorBackend{Backend: be} checkRepo := repository.New(beError) - OK(t, checkRepo.SearchKey(TestPassword, 5)) + test.OK(t, checkRepo.SearchKey(test.TestPassword, 5)) chkr := checker.New(checkRepo) diff --git a/src/restic/repository/repository_test.go b/src/restic/repository/repository_test.go index a161e6509..ce4fb68ed 100644 --- a/src/restic/repository/repository_test.go +++ b/src/restic/repository/repository_test.go @@ -174,22 +174,24 @@ func TestLoadJSONUnpacked(t *testing.T) { var repoFixture = filepath.Join("testdata", "test-repo.tar.gz") func TestRepositoryLoadIndex(t *testing.T) { - WithTestEnvironment(t, repoFixture, func(repodir string) { - repo := OpenLocalRepo(t, repodir) - OK(t, repo.LoadIndex()) - }) + repodir, cleanup := Env(t, repoFixture) + defer cleanup() + + repo := repository.TestOpenLocal(t, repodir) + OK(t, repo.LoadIndex()) } func BenchmarkLoadIndex(b *testing.B) { - WithTestEnvironment(b, repoFixture, func(repodir string) { - repo := OpenLocalRepo(b, repodir) - b.ResetTimer() + repodir, cleanup := Env(b, repoFixture) + defer cleanup() - for i := 0; i < b.N; i++ { - repo.SetIndex(repository.NewMasterIndex()) - OK(b, repo.LoadIndex()) - } - }) + repo := repository.TestOpenLocal(b, repodir) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + repo.SetIndex(repository.NewMasterIndex()) + OK(b, repo.LoadIndex()) + } } // saveRandomDataBlobs generates random data blobs and saves them to the repository. diff --git a/src/restic/repository/testing.go b/src/restic/repository/testing.go index 2cf2f8308..79b9b00a7 100644 --- a/src/restic/repository/testing.go +++ b/src/restic/repository/testing.go @@ -37,7 +37,7 @@ const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) // TestRepositoryWithBackend returns a repository initialized with a test // password. If be is nil, an in-memory backend is used. A constant polynomial // is used for the chunker and low-security test parameters. -func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r *Repository, cleanup func()) { +func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r restic.Repository, cleanup func()) { TestUseLowSecurityKDFParameters(t) var beCleanup func() @@ -45,15 +45,15 @@ func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r *Repository, be, beCleanup = TestBackend(t) } - r = New(be) + repo := New(be) cfg := restic.TestCreateConfig(t, testChunkerPol) - err := r.init(TestPassword, cfg) + err := repo.init(TestPassword, cfg) if err != nil { t.Fatalf("TestRepository(): initialize repo failed: %v", err) } - return r, func() { + return repo, func() { if beCleanup != nil { beCleanup() } @@ -64,7 +64,7 @@ func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r *Repository, // in-memory backend. When the environment variable RESTIC_TEST_REPO is set to // a non-existing directory, a local backend is created there and this is used // instead. The directory is not removed, but left there for inspection. -func TestRepository(t testing.TB) (r *Repository, cleanup func()) { +func TestRepository(t testing.TB) (r restic.Repository, cleanup func()) { dir := os.Getenv("RESTIC_TEST_REPO") if dir != "" { _, err := os.Stat(dir) @@ -83,3 +83,19 @@ func TestRepository(t testing.TB) (r *Repository, cleanup func()) { return TestRepositoryWithBackend(t, nil) } + +// TestOpenLocal opens a local repository. +func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { + be, err := local.Open(dir) + if err != nil { + t.Fatal(err) + } + + repo := New(be) + err = repo.SearchKey(TestPassword, 10) + if err != nil { + t.Fatal(err) + } + + return repo +} diff --git a/src/restic/test/doc.go b/src/restic/test/doc.go index 44183c141..060bad354 100644 --- a/src/restic/test/doc.go +++ b/src/restic/test/doc.go @@ -1,2 +1,2 @@ -// Package test_helper provides helper functions for writing tests for restic. -package test_helper +// Package test provides helper functions for writing tests for restic. +package test diff --git a/src/restic/test/helpers.go b/src/restic/test/helpers.go index 2fbdb83d6..53e50d432 100644 --- a/src/restic/test/helpers.go +++ b/src/restic/test/helpers.go @@ -1,4 +1,4 @@ -package test_helper +package test import ( "compress/bzip2" @@ -16,9 +16,6 @@ import ( "testing" mrand "math/rand" - - "restic/backend/local" - "restic/repository" ) // Assert fails the test if the condition is false. @@ -184,40 +181,28 @@ func SetupTarTestFixture(t testing.TB, outputDir, tarFile string) { OK(t, cmd.Run()) } -// WithTestEnvironment creates a test environment, extracts the repository -// fixture and and calls f with the repository dir. -func WithTestEnvironment(t testing.TB, repoFixture string, f func(repodir string)) { +// Env creates a test environment and extracts the repository fixture. +// Returned is the repo path and a cleanup function. +func Env(t testing.TB, repoFixture string) (repodir string, cleanup func()) { tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-") OK(t, err) fd, err := os.Open(repoFixture) if err != nil { - panic(err) + t.Fatal(err) } OK(t, fd.Close()) SetupTarTestFixture(t, tempdir, repoFixture) - f(filepath.Join(tempdir, "repo")) + return filepath.Join(tempdir, "repo"), func() { + if !TestCleanupTempDirs { + t.Logf("leaving temporary directory %v used for test", tempdir) + return + } - if !TestCleanupTempDirs { - t.Logf("leaving temporary directory %v used for test", tempdir) - return + RemoveAll(t, tempdir) } - - RemoveAll(t, tempdir) -} - -// OpenLocalRepo opens the local repository located at dir. -func OpenLocalRepo(t testing.TB, dir string) restic.Repository { - be, err := local.Open(dir) - OK(t, err) - - repo := repository.New(be) - err = repo.SearchKey(TestPassword, 10) - OK(t, err) - - return repo } func isFile(fi os.FileInfo) bool { diff --git a/src/restic/test/vars.go b/src/restic/test/vars.go index cde5f94a4..bb9f6b13d 100644 --- a/src/restic/test/vars.go +++ b/src/restic/test/vars.go @@ -1,4 +1,4 @@ -package test_helper +package test import ( "fmt" diff --git a/src/restic/walk/walk_test.go b/src/restic/walk/walk_test.go index d8416a65d..d4643014e 100644 --- a/src/restic/walk/walk_test.go +++ b/src/restic/walk/walk_test.go @@ -1341,53 +1341,55 @@ var walktreeTestItems = []string{ } func TestDelayedWalkTree(t *testing.T) { - WithTestEnvironment(t, repoFixture, func(repodir string) { - repo := OpenLocalRepo(t, repodir) - OK(t, repo.LoadIndex()) + repodir, cleanup := Env(t, repoFixture) + defer cleanup() - root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") - OK(t, err) + repo := repository.TestOpenLocal(t, repodir) + OK(t, repo.LoadIndex()) - dr := delayRepo{repo, 100 * time.Millisecond} + root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") + OK(t, err) + dr := delayRepo{repo, 100 * time.Millisecond} + + // start tree walker + treeJobs := make(chan walk.TreeJob) + go walk.Tree(dr, root, nil, treeJobs) + + i := 0 + for job := range treeJobs { + expectedPath := filepath.Join(strings.Split(walktreeTestItems[i], "/")...) + if job.Path != expectedPath { + t.Fatalf("expected path %q (%v), got %q", walktreeTestItems[i], i, job.Path) + } + i++ + } + + if i != len(walktreeTestItems) { + t.Fatalf("got %d items, expected %v", i, len(walktreeTestItems)) + } +} + +func BenchmarkDelayedWalkTree(t *testing.B) { + repodir, cleanup := Env(t, repoFixture) + defer cleanup() + + repo := repository.TestOpenLocal(t, repodir) + OK(t, repo.LoadIndex()) + + root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") + OK(t, err) + + dr := delayRepo{repo, 10 * time.Millisecond} + + t.ResetTimer() + + for i := 0; i < t.N; i++ { // start tree walker treeJobs := make(chan walk.TreeJob) go walk.Tree(dr, root, nil, treeJobs) - i := 0 - for job := range treeJobs { - expectedPath := filepath.Join(strings.Split(walktreeTestItems[i], "/")...) - if job.Path != expectedPath { - t.Fatalf("expected path %q (%v), got %q", walktreeTestItems[i], i, job.Path) - } - i++ + for _ = range treeJobs { } - - if i != len(walktreeTestItems) { - t.Fatalf("got %d items, expected %v", i, len(walktreeTestItems)) - } - }) -} - -func BenchmarkDelayedWalkTree(t *testing.B) { - WithTestEnvironment(t, repoFixture, func(repodir string) { - repo := OpenLocalRepo(t, repodir) - OK(t, repo.LoadIndex()) - - root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") - OK(t, err) - - dr := delayRepo{repo, 10 * time.Millisecond} - - t.ResetTimer() - - for i := 0; i < t.N; i++ { - // start tree walker - treeJobs := make(chan walk.TreeJob) - go walk.Tree(dr, root, nil, treeJobs) - - for _ = range treeJobs { - } - } - }) + } } From dfc0cbf3a8b7b6552f8765e1c2c0f5979d603d5c Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 4 Sep 2016 14:30:14 +0200 Subject: [PATCH 39/40] Use one test password --- src/restic/repository/testing.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/restic/repository/testing.go b/src/restic/repository/testing.go index 79b9b00a7..7650ad8b9 100644 --- a/src/restic/repository/testing.go +++ b/src/restic/repository/testing.go @@ -6,6 +6,7 @@ import ( "restic/backend/local" "restic/backend/mem" "restic/crypto" + "restic/test" "testing" "github.com/restic/chunker" @@ -29,9 +30,6 @@ func TestBackend(t testing.TB) (be restic.Backend, cleanup func()) { return mem.New(), func() {} } -// TestPassword is used for all repositories created by the Test* functions. -const TestPassword = "geheim" - const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) // TestRepositoryWithBackend returns a repository initialized with a test @@ -48,7 +46,7 @@ func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r restic.Reposi repo := New(be) cfg := restic.TestCreateConfig(t, testChunkerPol) - err := repo.init(TestPassword, cfg) + err := repo.init(test.TestPassword, cfg) if err != nil { t.Fatalf("TestRepository(): initialize repo failed: %v", err) } @@ -92,7 +90,7 @@ func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { } repo := New(be) - err = repo.SearchKey(TestPassword, 10) + err = repo.SearchKey(test.TestPassword, 10) if err != nil { t.Fatal(err) } From b628bcee27e2626fb0e58747ee1c1d32c85e5dce Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 4 Sep 2016 14:38:18 +0200 Subject: [PATCH 40/40] Remove redundant ParseID --- src/cmds/restic/integration_test.go | 6 +- src/restic/backend/test/tests.go | 106 ++++++++++++++-------------- src/restic/checker/checker_test.go | 12 ++-- src/restic/index/index_test.go | 3 +- src/restic/repository/index_test.go | 22 +++--- src/restic/test/helpers.go | 11 --- 6 files changed, 74 insertions(+), 86 deletions(-) diff --git a/src/cmds/restic/integration_test.go b/src/cmds/restic/integration_test.go index 4a737a6a9..0dd6c6165 100644 --- a/src/cmds/restic/integration_test.go +++ b/src/cmds/restic/integration_test.go @@ -815,7 +815,7 @@ var optimizeTests = []struct { }{ { filepath.Join("..", "..", "restic", "checker", "testdata", "checker-test-repo.tar.gz"), - restic.NewIDSet(ParseID("a13c11e582b77a693dd75ab4e3a3ba96538a056594a4b9076e4cacebe6e06d43")), + restic.NewIDSet(restic.TestParseID("a13c11e582b77a693dd75ab4e3a3ba96538a056594a4b9076e4cacebe6e06d43")), }, { filepath.Join("testdata", "old-index-repo.tar.gz"), @@ -824,8 +824,8 @@ var optimizeTests = []struct { { filepath.Join("testdata", "old-index-repo.tar.gz"), restic.NewIDSet( - ParseID("f7d83db709977178c9d1a09e4009355e534cde1a135b8186b8b118a3fc4fcd41"), - ParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"), + restic.TestParseID("f7d83db709977178c9d1a09e4009355e534cde1a135b8186b8b118a3fc4fcd41"), + restic.TestParseID("51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"), ), }, } diff --git a/src/restic/backend/test/tests.go b/src/restic/backend/test/tests.go index 4171b0bd9..e79fca366 100644 --- a/src/restic/backend/test/tests.go +++ b/src/restic/backend/test/tests.go @@ -12,9 +12,9 @@ import ( "testing" "restic/errors" + "restic/test" "restic/backend" - . "restic/test" ) // CreateFn is a function that creates a temporary repository for the tests. @@ -195,7 +195,7 @@ func TestLoad(t testing.TB) { length := rand.Intn(1<<24) + 2000 - data := Random(23, length) + data := test.Random(23, length) id := restic.Hash(data) handle := restic.Handle{Type: restic.DataFile, Name: id.String()} @@ -310,7 +310,7 @@ func TestLoad(t testing.TB) { t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err) } - OK(t, b.Remove(restic.DataFile, id.String())) + test.OK(t, b.Remove(restic.DataFile, id.String())) } // TestLoadNegativeOffset tests the backend's Load function with negative offsets. @@ -320,7 +320,7 @@ func TestLoadNegativeOffset(t testing.TB) { length := rand.Intn(1<<24) + 2000 - data := Random(23, length) + data := test.Random(23, length) id := restic.Hash(data) handle := restic.Handle{Type: restic.DataFile, Name: id.String()} @@ -366,7 +366,7 @@ func TestLoadNegativeOffset(t testing.TB) { } - OK(t, b.Remove(restic.DataFile, id.String())) + test.OK(t, b.Remove(restic.DataFile, id.String())) } // TestSave tests saving data in the backend. @@ -377,7 +377,7 @@ func TestSave(t testing.TB) { for i := 0; i < 10; i++ { length := rand.Intn(1<<23) + 200000 - data := Random(23, length) + data := test.Random(23, length) // use the first 32 byte as the ID copy(id[:], data) @@ -386,10 +386,10 @@ func TestSave(t testing.TB) { Name: fmt.Sprintf("%s-%d", id, i), } err := b.Save(h, data) - OK(t, err) + test.OK(t, err) buf, err := backend.LoadAll(b, h, nil) - OK(t, err) + test.OK(t, err) if len(buf) != len(data) { t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf)) } @@ -399,7 +399,7 @@ func TestSave(t testing.TB) { } fi, err := b.Stat(h) - OK(t, err) + test.OK(t, err) if fi.Size != int64(len(data)) { t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size) @@ -468,14 +468,14 @@ var testStrings = []struct { func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) { id := restic.Hash(data) err := b.Save(restic.Handle{Name: id.String(), Type: tpe}, data) - OK(t, err) + test.OK(t, err) } func read(t testing.TB, rd io.Reader, expectedData []byte) { buf, err := ioutil.ReadAll(rd) - OK(t, err) + test.OK(t, err) if expectedData != nil { - Equals(t, expectedData, buf) + test.Equals(t, expectedData, buf) } } @@ -489,85 +489,85 @@ func TestBackend(t testing.TB) { restic.SnapshotFile, restic.IndexFile, } { // detect non-existing files - for _, test := range testStrings { - id, err := restic.ParseID(test.id) - OK(t, err) + for _, ts := range testStrings { + id, err := restic.ParseID(ts.id) + test.OK(t, err) // test if blob is already in repository ret, err := b.Test(tpe, id.String()) - OK(t, err) - Assert(t, !ret, "blob was found to exist before creating") + test.OK(t, err) + test.Assert(t, !ret, "blob was found to exist before creating") // try to stat a not existing blob h := restic.Handle{Type: tpe, Name: id.String()} _, err = b.Stat(h) - Assert(t, err != nil, "blob data could be extracted before creation") + test.Assert(t, err != nil, "blob data could be extracted before creation") // try to read not existing blob _, err = b.Load(h, nil, 0) - Assert(t, err != nil, "blob reader could be obtained before creation") + test.Assert(t, err != nil, "blob reader could be obtained before creation") // try to get string out, should fail ret, err = b.Test(tpe, id.String()) - OK(t, err) - Assert(t, !ret, "id %q was found (but should not have)", test.id) + test.OK(t, err) + test.Assert(t, !ret, "id %q was found (but should not have)", ts.id) } // add files - for _, test := range testStrings { - store(t, b, tpe, []byte(test.data)) + for _, ts := range testStrings { + store(t, b, tpe, []byte(ts.data)) // test Load() - h := restic.Handle{Type: tpe, Name: test.id} + h := restic.Handle{Type: tpe, Name: ts.id} buf, err := backend.LoadAll(b, h, nil) - OK(t, err) - Equals(t, test.data, string(buf)) + test.OK(t, err) + test.Equals(t, ts.data, string(buf)) // try to read it out with an offset and a length start := 1 - end := len(test.data) - 2 + end := len(ts.data) - 2 length := end - start buf2 := make([]byte, length) n, err := b.Load(h, buf2, int64(start)) - OK(t, err) - Equals(t, length, n) - Equals(t, test.data[start:end], string(buf2)) + test.OK(t, err) + test.Equals(t, length, n) + test.Equals(t, ts.data[start:end], string(buf2)) } // test adding the first file again - test := testStrings[0] + ts := testStrings[0] // create blob - err := b.Save(restic.Handle{Type: tpe, Name: test.id}, []byte(test.data)) - Assert(t, err != nil, "expected error, got %v", err) + err := b.Save(restic.Handle{Type: tpe, Name: ts.id}, []byte(ts.data)) + test.Assert(t, err != nil, "expected error, got %v", err) // remove and recreate - err = b.Remove(tpe, test.id) - OK(t, err) + err = b.Remove(tpe, ts.id) + test.OK(t, err) // test that the blob is gone - ok, err := b.Test(tpe, test.id) - OK(t, err) - Assert(t, ok == false, "removed blob still present") + ok, err := b.Test(tpe, ts.id) + test.OK(t, err) + test.Assert(t, ok == false, "removed blob still present") // create blob - err = b.Save(restic.Handle{Type: tpe, Name: test.id}, []byte(test.data)) - OK(t, err) + err = b.Save(restic.Handle{Type: tpe, Name: ts.id}, []byte(ts.data)) + test.OK(t, err) // list items IDs := restic.IDs{} - for _, test := range testStrings { - id, err := restic.ParseID(test.id) - OK(t, err) + for _, ts := range testStrings { + id, err := restic.ParseID(ts.id) + test.OK(t, err) IDs = append(IDs, id) } list := restic.IDs{} for s := range b.List(tpe, nil) { - list = append(list, ParseID(s)) + list = append(list, restic.TestParseID(s)) } if len(IDs) != len(list) { @@ -582,19 +582,19 @@ func TestBackend(t testing.TB) { } // remove content if requested - if TestCleanupTempDirs { - for _, test := range testStrings { - id, err := restic.ParseID(test.id) - OK(t, err) + if test.TestCleanupTempDirs { + for _, ts := range testStrings { + id, err := restic.ParseID(ts.id) + test.OK(t, err) found, err := b.Test(tpe, id.String()) - OK(t, err) + test.OK(t, err) - OK(t, b.Remove(tpe, id.String())) + test.OK(t, b.Remove(tpe, id.String())) found, err = b.Test(tpe, id.String()) - OK(t, err) - Assert(t, !found, fmt.Sprintf("id %q not found after removal", id)) + test.OK(t, err) + test.Assert(t, !found, fmt.Sprintf("id %q not found after removal", id)) } } } @@ -623,7 +623,7 @@ func TestCleanup(t testing.TB) { return } - if !TestCleanupTempDirs { + if !test.TestCleanupTempDirs { t.Logf("not cleaning up backend") return } diff --git a/src/restic/checker/checker_test.go b/src/restic/checker/checker_test.go index d900a1548..b5cf3732c 100644 --- a/src/restic/checker/checker_test.go +++ b/src/restic/checker/checker_test.go @@ -152,12 +152,12 @@ func TestUnreferencedBlobs(t *testing.T) { test.OK(t, repo.Backend().Remove(restic.SnapshotFile, snID)) unusedBlobsBySnapshot := restic.IDs{ - test.ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"), - test.ParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"), - test.ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"), - test.ParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"), - test.ParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"), - test.ParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"), + restic.TestParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"), + restic.TestParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"), + restic.TestParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"), + restic.TestParseID("bec3a53d7dc737f9a9bee68b107ec9e8ad722019f649b34d474b9982c3a3fec7"), + restic.TestParseID("2a6f01e5e92d8343c4c6b78b51c5a4dc9c39d42c04e26088c7614b13d8d0559d"), + restic.TestParseID("18b51b327df9391732ba7aaf841a4885f350d8a557b2da8352c9acf8898e3f10"), } sort.Sort(unusedBlobsBySnapshot) diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index f1378531f..608ff944a 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -4,7 +4,6 @@ import ( "math/rand" "restic" "restic/repository" - . "restic/test" "testing" "time" ) @@ -305,7 +304,7 @@ func TestIndexLoadDocReference(t *testing.T) { idx := loadIndex(t, repo) - blobID := ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66") + blobID := restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66") locs, err := idx.FindBlob(restic.BlobHandle{ID: blobID, Type: restic.DataBlob}) if err != nil { t.Errorf("FindBlob() returned error %v", err) diff --git a/src/restic/repository/index_test.go b/src/restic/repository/index_test.go index 243104547..986f9efc7 100644 --- a/src/restic/repository/index_test.go +++ b/src/restic/repository/index_test.go @@ -259,16 +259,16 @@ var exampleTests = []struct { offset, length uint }{ { - ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"), - ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), + restic.TestParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"), + restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), restic.DataBlob, 0, 25, }, { - ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"), - ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), + restic.TestParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"), + restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), restic.TreeBlob, 38, 100, }, { - ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"), - ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), + restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"), + restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), restic.DataBlob, 150, 123, }, } @@ -277,16 +277,16 @@ var exampleLookupTest = struct { packID restic.ID blobs map[restic.ID]restic.BlobType }{ - ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), + restic.TestParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), map[restic.ID]restic.BlobType{ - ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): restic.DataBlob, - ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): restic.TreeBlob, - ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): restic.DataBlob, + restic.TestParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): restic.DataBlob, + restic.TestParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): restic.TreeBlob, + restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): restic.DataBlob, }, } func TestIndexUnserialize(t *testing.T) { - oldIdx := restic.IDs{ParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")} + oldIdx := restic.IDs{restic.TestParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")} idx, err := repository.DecodeIndex(bytes.NewReader(docExample)) OK(t, err) diff --git a/src/restic/test/helpers.go b/src/restic/test/helpers.go index 53e50d432..d363e09c9 100644 --- a/src/restic/test/helpers.go +++ b/src/restic/test/helpers.go @@ -11,7 +11,6 @@ import ( "os/exec" "path/filepath" "reflect" - "restic" "runtime" "testing" @@ -60,16 +59,6 @@ func Equals(tb testing.TB, exp, act interface{}) { } } -// ParseID parses s as a restic.ID and panics if that fails. -func ParseID(s string) restic.ID { - id, err := restic.ParseID(s) - if err != nil { - panic(err) - } - - return id -} - // Random returns size bytes of pseudo-random data derived from the seed. func Random(seed, count int) []byte { p := make([]byte, count)