forked from TrueCloudLab/restic
Refactor backends
This commit is contained in:
parent
f51aba1510
commit
5e69788eac
31 changed files with 1106 additions and 1125 deletions
26
archiver.go
26
archiver.go
|
@ -73,14 +73,19 @@ func (arch *Archiver) Cache() *Cache {
|
|||
|
||||
// Preload loads all blobs for all cached snapshots.
|
||||
func (arch *Archiver) Preload() error {
|
||||
// list snapshots first
|
||||
snapshots, err := arch.s.List(backend.Snapshot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
// list snapshots
|
||||
// TODO: track seen tree ids, load trees that aren't in the set
|
||||
for _, id := range snapshots {
|
||||
snapshots := 0
|
||||
for name := range arch.s.List(backend.Snapshot, done) {
|
||||
id, err := backend.ParseID(name)
|
||||
if err != nil {
|
||||
debug.Log("Archiver.Preload", "unable to parse name %v as id: %v", name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := arch.c.LoadMap(arch.s, id)
|
||||
if err != nil {
|
||||
debug.Log("Archiver.Preload", "blobs for snapshot %v not cached: %v", id.Str(), err)
|
||||
|
@ -89,9 +94,10 @@ func (arch *Archiver) Preload() error {
|
|||
|
||||
arch.m.Merge(m)
|
||||
debug.Log("Archiver.Preload", "done loading cached blobs for snapshot %v", id.Str())
|
||||
snapshots++
|
||||
}
|
||||
|
||||
debug.Log("Archiver.Preload", "Loaded %v blobs from %v snapshots", arch.m.Len(), len(snapshots))
|
||||
debug.Log("Archiver.Preload", "Loaded %v blobs from %v snapshots", arch.m.Len(), snapshots)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -120,7 +126,7 @@ func (arch *Archiver) Save(t backend.Type, id backend.ID, length uint, rd io.Rea
|
|||
|
||||
// remove the blob again
|
||||
// TODO: implement a list of blobs in transport, so this doesn't happen so often
|
||||
err = arch.s.Remove(t, blob.Storage)
|
||||
err = arch.s.Remove(t, blob.Storage.String())
|
||||
if err != nil {
|
||||
return Blob{}, err
|
||||
}
|
||||
|
@ -295,7 +301,7 @@ func (arch *Archiver) saveTree(p *Progress, t *Tree) (Blob, error) {
|
|||
continue
|
||||
}
|
||||
|
||||
if ok, err := arch.s.Test(backend.Data, blob.Storage); !ok || err != nil {
|
||||
if ok, err := arch.s.Test(backend.Data, blob.Storage.String()); !ok || err != nil {
|
||||
debug.Log("Archiver.saveTree", "blob %v not in repository (error is %v)", blob, err)
|
||||
arch.Error(node.path, nil, fmt.Errorf("blob %v not in repository (error is %v)", blob.Storage.Str(), err))
|
||||
removeContent = true
|
||||
|
@ -419,7 +425,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
|
|||
// check if all content is still available in the repository
|
||||
contentMissing := false
|
||||
for _, blob := range oldNode.blobs {
|
||||
if ok, err := arch.s.Test(backend.Data, blob.Storage); !ok || err != nil {
|
||||
if ok, err := arch.s.Test(backend.Data, blob.Storage.String()); !ok || err != nil {
|
||||
debug.Log("Archiver.fileWorker", " %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str())
|
||||
contentMissing = true
|
||||
break
|
||||
|
|
|
@ -153,17 +153,7 @@ func snapshot(t testing.TB, server restic.Server, path string, parent backend.ID
|
|||
}
|
||||
|
||||
func countBlobs(t testing.TB, server restic.Server) (trees int, data int) {
|
||||
list, err := server.List(backend.Tree)
|
||||
ok(t, err)
|
||||
|
||||
trees = len(list)
|
||||
|
||||
list, err = server.List(backend.Data)
|
||||
ok(t, err)
|
||||
|
||||
data = len(list)
|
||||
|
||||
return
|
||||
return server.Count(backend.Tree), server.Count(backend.Data)
|
||||
}
|
||||
|
||||
func archiveWithPreload(t testing.TB) {
|
||||
|
@ -262,15 +252,30 @@ func BenchmarkLoadTree(t *testing.B) {
|
|||
ok(t, err)
|
||||
t.Logf("archived snapshot %v", sn.ID())
|
||||
|
||||
list := make([]backend.ID, 0, 10)
|
||||
done := make(chan struct{})
|
||||
|
||||
for name := range server.List(backend.Tree, done) {
|
||||
id, err := backend.ParseID(name)
|
||||
if err != nil {
|
||||
t.Logf("invalid id for tree %v", name)
|
||||
continue
|
||||
}
|
||||
|
||||
list = append(list, id)
|
||||
if len(list) == cap(list) {
|
||||
close(done)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// start benchmark
|
||||
t.ResetTimer()
|
||||
|
||||
list, err := server.List(backend.Tree)
|
||||
ok(t, err)
|
||||
list = list[:10]
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
_, err := restic.LoadTree(server, list[0])
|
||||
ok(t, err)
|
||||
for _, name := range list {
|
||||
_, err := restic.LoadTree(server, name)
|
||||
ok(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
119
backend/backend_test.go
Normal file
119
backend/backend_test.go
Normal file
|
@ -0,0 +1,119 @@
|
|||
package backend_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/backend"
|
||||
)
|
||||
|
||||
func testBackend(b backend.Backend, t *testing.T) {
|
||||
for _, tpe := range []backend.Type{backend.Data, backend.Key, backend.Lock, backend.Snapshot, backend.Tree} {
|
||||
// detect non-existing files
|
||||
for _, test := range TestStrings {
|
||||
id, err := backend.ParseID(test.id)
|
||||
ok(t, err)
|
||||
|
||||
// test if blob is already in repository
|
||||
ret, err := b.Test(tpe, id.String())
|
||||
ok(t, err)
|
||||
assert(t, !ret, "blob was found to exist before creating")
|
||||
|
||||
// try to open not existing blob
|
||||
_, err = b.Get(tpe, id.String())
|
||||
assert(t, err != nil, "blob data could be extracted before creation")
|
||||
|
||||
// try to get string out, should fail
|
||||
ret, err = b.Test(tpe, id.String())
|
||||
ok(t, err)
|
||||
assert(t, !ret, "id %q was found (but should not have)", test.id)
|
||||
}
|
||||
|
||||
// add files
|
||||
for _, test := range TestStrings {
|
||||
// store string in backend
|
||||
blob, err := b.Create()
|
||||
ok(t, err)
|
||||
|
||||
_, err = blob.Write([]byte(test.data))
|
||||
ok(t, err)
|
||||
ok(t, blob.Finalize(tpe, test.id))
|
||||
|
||||
// try to get it out again
|
||||
rd, err := b.Get(tpe, test.id)
|
||||
ok(t, err)
|
||||
assert(t, rd != nil, "Get() returned nil")
|
||||
|
||||
buf, err := ioutil.ReadAll(rd)
|
||||
ok(t, err)
|
||||
equals(t, test.data, string(buf))
|
||||
|
||||
// compare content
|
||||
equals(t, test.data, string(buf))
|
||||
}
|
||||
|
||||
// test adding the first file again
|
||||
test := TestStrings[0]
|
||||
|
||||
// create blob
|
||||
blob, err := b.Create()
|
||||
ok(t, err)
|
||||
|
||||
_, err = blob.Write([]byte(test.data))
|
||||
ok(t, err)
|
||||
err = blob.Finalize(tpe, test.id)
|
||||
assert(t, err != nil, "expected error, got %v", err)
|
||||
|
||||
// remove and recreate
|
||||
err = b.Remove(tpe, test.id)
|
||||
ok(t, err)
|
||||
|
||||
// create blob
|
||||
blob, err = b.Create()
|
||||
ok(t, err)
|
||||
|
||||
_, err = io.Copy(blob, bytes.NewReader([]byte(test.data)))
|
||||
ok(t, err)
|
||||
ok(t, blob.Finalize(tpe, test.id))
|
||||
|
||||
// list items
|
||||
IDs := backend.IDs{}
|
||||
|
||||
for _, test := range TestStrings {
|
||||
id, err := backend.ParseID(test.id)
|
||||
ok(t, err)
|
||||
IDs = append(IDs, id)
|
||||
}
|
||||
|
||||
sort.Sort(IDs)
|
||||
|
||||
i := 0
|
||||
for s := range b.List(tpe, nil) {
|
||||
equals(t, IDs[i].String(), s)
|
||||
i++
|
||||
}
|
||||
|
||||
// remove content if requested
|
||||
if *testCleanup {
|
||||
for _, test := range TestStrings {
|
||||
id, err := backend.ParseID(test.id)
|
||||
ok(t, err)
|
||||
|
||||
found, err := b.Test(tpe, id.String())
|
||||
ok(t, err)
|
||||
assert(t, found, fmt.Sprintf("id %q was not found before removal", id))
|
||||
|
||||
ok(t, b.Remove(tpe, id.String()))
|
||||
|
||||
found, err = b.Test(tpe, id.String())
|
||||
ok(t, err)
|
||||
assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -1,15 +1,12 @@
|
|||
package backend
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"sort"
|
||||
)
|
||||
|
||||
const (
|
||||
MinPrefixLength = 4
|
||||
MinPrefixLength = 8
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -24,21 +21,6 @@ var (
|
|||
|
||||
const hashSize = sha256.Size
|
||||
|
||||
// Each lists all entries of type t in the backend and calls function f() with
|
||||
// the id.
|
||||
func EachID(be Lister, t Type, f func(ID)) error {
|
||||
ids, err := be.List(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
f(id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash returns the ID for data.
|
||||
func Hash(data []byte) ID {
|
||||
h := hashData(data)
|
||||
|
@ -47,78 +29,67 @@ func Hash(data []byte) ID {
|
|||
return id
|
||||
}
|
||||
|
||||
// Find loads the list of all blobs of type t and searches for IDs which start
|
||||
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
|
||||
// more than one is found, nil and ErrMultipleIDMatches is returned.
|
||||
func Find(be Lister, t Type, prefix string) (ID, error) {
|
||||
p, err := hex.DecodeString(prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Find loads the list of all blobs of type t and searches for names which
|
||||
// start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned.
|
||||
// If more than one is found, nil and ErrMultipleIDMatches is returned.
|
||||
func Find(be Lister, t Type, prefix string) (string, error) {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
list, err := be.List(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
match := ID(nil)
|
||||
match := ""
|
||||
|
||||
// TODO: optimize by sorting list etc.
|
||||
for _, id := range list {
|
||||
if bytes.Equal(p, id[:len(p)]) {
|
||||
if match == nil {
|
||||
match = id
|
||||
for name := range be.List(t, done) {
|
||||
if prefix == name[:len(prefix)] {
|
||||
if match == "" {
|
||||
match = name
|
||||
} else {
|
||||
return nil, ErrMultipleIDMatches
|
||||
return "", ErrMultipleIDMatches
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if match != nil {
|
||||
if match != "" {
|
||||
return match, nil
|
||||
}
|
||||
|
||||
return nil, ErrNoIDPrefixFound
|
||||
return "", ErrNoIDPrefixFound
|
||||
}
|
||||
|
||||
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
|
||||
// the string as closely as possible.
|
||||
func FindSnapshot(be Lister, s string) (ID, error) {
|
||||
// parse ID directly
|
||||
if id, err := ParseID(s); err == nil {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func FindSnapshot(be Lister, s string) (string, error) {
|
||||
// find snapshot id with prefix
|
||||
id, err := Find(be, Snapshot, s)
|
||||
name, err := Find(be, Snapshot, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
return id, nil
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// PrefixLength returns the number of bytes required so that all prefixes of
|
||||
// all IDs of type t are unique.
|
||||
// all names of type t are unique.
|
||||
func PrefixLength(be Lister, t Type) (int, error) {
|
||||
// load all IDs of the given type
|
||||
list, err := be.List(t)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
sort.Sort(list)
|
||||
// load all IDs of the given type
|
||||
list := make([]string, 0, 100)
|
||||
for name := range be.List(t, done) {
|
||||
list = append(list, name)
|
||||
}
|
||||
|
||||
// select prefixes of length l, test if the last one is the same as the current one
|
||||
outer:
|
||||
for l := MinPrefixLength; l < IDSize; l++ {
|
||||
var last ID
|
||||
var last string
|
||||
|
||||
for _, id := range list {
|
||||
if bytes.Equal(last, id[:l]) {
|
||||
for _, name := range list {
|
||||
if last == name[:l] {
|
||||
continue outer
|
||||
}
|
||||
last = id[:l]
|
||||
last = name[:l]
|
||||
}
|
||||
|
||||
return l, nil
|
||||
|
|
|
@ -48,7 +48,7 @@ func str2id(s string) backend.ID {
|
|||
}
|
||||
|
||||
type mockBackend struct {
|
||||
list func(backend.Type) (backend.IDs, error)
|
||||
list func(backend.Type, <-chan struct{}) <-chan string
|
||||
get func(backend.Type, backend.ID) ([]byte, error)
|
||||
getReader func(backend.Type, backend.ID) (io.ReadCloser, error)
|
||||
create func(backend.Type, []byte) (backend.ID, error)
|
||||
|
@ -58,8 +58,8 @@ type mockBackend struct {
|
|||
close func() error
|
||||
}
|
||||
|
||||
func (m mockBackend) List(t backend.Type) (backend.IDs, error) {
|
||||
return m.list(t)
|
||||
func (m mockBackend) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
return m.list(t, done)
|
||||
}
|
||||
|
||||
func (m mockBackend) Get(t backend.Type, id backend.ID) ([]byte, error) {
|
||||
|
@ -105,21 +105,32 @@ func TestPrefixLength(t *testing.T) {
|
|||
list := samples
|
||||
|
||||
m := mockBackend{}
|
||||
m.list = func(t backend.Type) (backend.IDs, error) {
|
||||
return list, nil
|
||||
m.list = func(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
ch := make(chan string)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
for _, id := range list {
|
||||
select {
|
||||
case ch <- id.String():
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
l, err := backend.PrefixLength(m, backend.Snapshot)
|
||||
ok(t, err)
|
||||
equals(t, 10, l)
|
||||
equals(t, 19, l)
|
||||
|
||||
list = samples[:3]
|
||||
l, err = backend.PrefixLength(m, backend.Snapshot)
|
||||
ok(t, err)
|
||||
equals(t, 10, l)
|
||||
equals(t, 19, l)
|
||||
|
||||
list = samples[3:]
|
||||
l, err = backend.PrefixLength(m, backend.Snapshot)
|
||||
ok(t, err)
|
||||
equals(t, 4, l)
|
||||
equals(t, 8, l)
|
||||
}
|
||||
|
|
|
@ -6,6 +6,16 @@ import (
|
|||
"github.com/restic/restic/backend"
|
||||
)
|
||||
|
||||
var TestStrings = []struct {
|
||||
id string
|
||||
data string
|
||||
}{
|
||||
{"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"},
|
||||
{"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
|
||||
{"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"},
|
||||
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
|
||||
}
|
||||
|
||||
func TestID(t *testing.T) {
|
||||
for _, test := range TestStrings {
|
||||
id, err := backend.ParseID(test.id)
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
package backend
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
import "io"
|
||||
|
||||
// Type is the type of a Blob.
|
||||
type Type string
|
||||
|
||||
const (
|
||||
|
@ -16,62 +14,60 @@ const (
|
|||
)
|
||||
|
||||
const (
|
||||
BackendVersion = 1
|
||||
Version = 1
|
||||
)
|
||||
|
||||
var (
|
||||
ErrAlreadyPresent = errors.New("blob is already present in backend")
|
||||
)
|
||||
// A Backend manages blobs of data.
|
||||
type Backend interface {
|
||||
// Location returns a string that specifies the location of the repository,
|
||||
// like a URL.
|
||||
Location() string
|
||||
|
||||
type Blob interface {
|
||||
io.WriteCloser
|
||||
ID() (ID, error)
|
||||
Size() uint
|
||||
// Create creates a new Blob. The data is available only after Finalize()
|
||||
// has been called on the returned Blob.
|
||||
Create() (Blob, error)
|
||||
|
||||
// Get returns an io.ReadCloser for the Blob with the given name of type t.
|
||||
Get(t Type, name string) (io.ReadCloser, error)
|
||||
|
||||
// Test a boolean value whether a Blob with the name and type exists.
|
||||
Test(t Type, name string) (bool, error)
|
||||
|
||||
// Remove removes a Blob with type t and name.
|
||||
Remove(t Type, name string) error
|
||||
|
||||
// Close the backend
|
||||
Close() error
|
||||
|
||||
Identifier
|
||||
Lister
|
||||
}
|
||||
|
||||
type Identifier interface {
|
||||
// ID returns a unique ID for a specific repository. This means restic can
|
||||
// recognize repositories accessed via different methods (e.g. local file
|
||||
// access and sftp).
|
||||
ID() string
|
||||
}
|
||||
|
||||
type Lister interface {
|
||||
List(Type) (IDs, error)
|
||||
}
|
||||
|
||||
type Getter interface {
|
||||
Get(Type, ID) ([]byte, error)
|
||||
GetReader(Type, ID) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
type Creater interface {
|
||||
Create(Type) (Blob, error)
|
||||
}
|
||||
|
||||
type Tester interface {
|
||||
Test(Type, ID) (bool, error)
|
||||
}
|
||||
|
||||
type Remover interface {
|
||||
Remove(Type, ID) error
|
||||
}
|
||||
|
||||
type Closer interface {
|
||||
Close() error
|
||||
// List returns a channel that yields all names of blobs of type t in
|
||||
// lexicographic order. A goroutine is started for this. If the channel
|
||||
// done is closed, sending stops.
|
||||
List(t Type, done <-chan struct{}) <-chan string
|
||||
}
|
||||
|
||||
type Deleter interface {
|
||||
// Delete the complete repository.
|
||||
Delete() error
|
||||
}
|
||||
|
||||
type Locationer interface {
|
||||
Location() string
|
||||
}
|
||||
type Blob interface {
|
||||
io.Writer
|
||||
|
||||
type IDer interface {
|
||||
ID() ID
|
||||
}
|
||||
// Finalize moves the data blob to the final location for type and name.
|
||||
Finalize(t Type, name string) error
|
||||
|
||||
type Backend interface {
|
||||
Lister
|
||||
Getter
|
||||
Creater
|
||||
Tester
|
||||
Remover
|
||||
Closer
|
||||
IDer
|
||||
// Size returns the number of bytes written to the backend so far.
|
||||
Size() uint
|
||||
}
|
||||
|
|
456
backend/local.go
456
backend/local.go
|
@ -1,456 +0,0 @@
|
|||
package backend
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
dirMode = 0700
|
||||
dataPath = "data"
|
||||
snapshotPath = "snapshots"
|
||||
treePath = "trees"
|
||||
lockPath = "locks"
|
||||
keyPath = "keys"
|
||||
tempPath = "tmp"
|
||||
versionFileName = "version"
|
||||
idFileName = "id"
|
||||
)
|
||||
|
||||
var ErrWrongData = errors.New("wrong data returned by backend, checksum does not match")
|
||||
|
||||
type Local struct {
|
||||
p string
|
||||
ver uint
|
||||
id ID
|
||||
}
|
||||
|
||||
// OpenLocal opens the local backend at dir.
|
||||
func OpenLocal(dir string) (*Local, error) {
|
||||
items := []string{
|
||||
dir,
|
||||
filepath.Join(dir, dataPath),
|
||||
filepath.Join(dir, snapshotPath),
|
||||
filepath.Join(dir, treePath),
|
||||
filepath.Join(dir, lockPath),
|
||||
filepath.Join(dir, keyPath),
|
||||
filepath.Join(dir, tempPath),
|
||||
}
|
||||
|
||||
// test if all necessary dirs and files are there
|
||||
for _, d := range items {
|
||||
if _, err := os.Stat(d); err != nil {
|
||||
return nil, fmt.Errorf("%s does not exist", d)
|
||||
}
|
||||
}
|
||||
|
||||
// read version file
|
||||
f, err := os.Open(filepath.Join(dir, versionFileName))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read version file: %v\n", err)
|
||||
}
|
||||
|
||||
var version uint
|
||||
n, err := fmt.Fscanf(f, "%d", &version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n != 1 {
|
||||
return nil, errors.New("could not read version from file")
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check version
|
||||
if version != BackendVersion {
|
||||
return nil, fmt.Errorf("wrong version %d", version)
|
||||
}
|
||||
|
||||
// read ID
|
||||
f, err = os.Open(filepath.Join(dir, idFileName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, err := ParseID(strings.TrimSpace(string(buf)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Local{p: dir, ver: version, id: id}, nil
|
||||
}
|
||||
|
||||
// CreateLocal creates all the necessary files and directories for a new local
|
||||
// backend at dir.
|
||||
func CreateLocal(dir string) (*Local, error) {
|
||||
versionFile := filepath.Join(dir, versionFileName)
|
||||
idFile := filepath.Join(dir, idFileName)
|
||||
dirs := []string{
|
||||
dir,
|
||||
filepath.Join(dir, dataPath),
|
||||
filepath.Join(dir, snapshotPath),
|
||||
filepath.Join(dir, treePath),
|
||||
filepath.Join(dir, lockPath),
|
||||
filepath.Join(dir, keyPath),
|
||||
filepath.Join(dir, tempPath),
|
||||
}
|
||||
|
||||
// test if files already exist
|
||||
_, err := os.Lstat(versionFile)
|
||||
if err == nil {
|
||||
return nil, errors.New("version file already exists")
|
||||
}
|
||||
|
||||
_, err = os.Lstat(idFile)
|
||||
if err == nil {
|
||||
return nil, errors.New("id file already exists")
|
||||
}
|
||||
|
||||
// test if directories already exist
|
||||
for _, d := range dirs[1:] {
|
||||
if _, err := os.Stat(d); err == nil {
|
||||
return nil, fmt.Errorf("dir %s already exists", d)
|
||||
}
|
||||
}
|
||||
|
||||
// create paths for data, refs and temp
|
||||
for _, d := range dirs {
|
||||
err := os.MkdirAll(d, dirMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// create version file
|
||||
f, err := os.Create(versionFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintf(f, "%d\n", BackendVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create ID file
|
||||
id := make([]byte, sha256.Size)
|
||||
_, err = rand.Read(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err = os.Create(idFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintf(f, "%s\n", ID(id).String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// open backend
|
||||
return OpenLocal(dir)
|
||||
}
|
||||
|
||||
// Location returns this backend's location (the directory name).
|
||||
func (b *Local) Location() string {
|
||||
return b.p
|
||||
}
|
||||
|
||||
// Return temp directory in correct directory for this backend.
|
||||
func (b *Local) tempFile() (*os.File, error) {
|
||||
return ioutil.TempFile(filepath.Join(b.p, tempPath), "temp-")
|
||||
}
|
||||
|
||||
// Rename temp file to final name according to type and ID.
|
||||
func (b *Local) renameFile(file *os.File, t Type, id ID) error {
|
||||
filename := b.filename(t, id)
|
||||
oldname := file.Name()
|
||||
|
||||
if t == Data || t == Tree {
|
||||
// create directories if necessary, ignore errors
|
||||
os.MkdirAll(filepath.Dir(filename), dirMode)
|
||||
}
|
||||
|
||||
err := os.Rename(oldname, filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set mode to read-only
|
||||
fi, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Chmod(filename, fi.Mode()&os.FileMode(^uint32(0222)))
|
||||
}
|
||||
|
||||
// Construct directory for given Type.
|
||||
func (b *Local) dirname(t Type, id ID) string {
|
||||
var n string
|
||||
switch t {
|
||||
case Data:
|
||||
n = dataPath
|
||||
if id != nil {
|
||||
n = filepath.Join(dataPath, fmt.Sprintf("%02x", id[0]))
|
||||
}
|
||||
case Snapshot:
|
||||
n = snapshotPath
|
||||
case Tree:
|
||||
n = treePath
|
||||
if id != nil {
|
||||
n = filepath.Join(treePath, fmt.Sprintf("%02x", id[0]))
|
||||
}
|
||||
case Lock:
|
||||
n = lockPath
|
||||
case Key:
|
||||
n = keyPath
|
||||
}
|
||||
return filepath.Join(b.p, n)
|
||||
}
|
||||
|
||||
type localBlob struct {
|
||||
f *os.File
|
||||
hw *HashingWriter
|
||||
backend *Local
|
||||
tpe Type
|
||||
id ID
|
||||
size uint
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (lb *localBlob) Close() error {
|
||||
if lb.closed {
|
||||
return errors.New("Close() called on closed file")
|
||||
|
||||
}
|
||||
lb.closed = true
|
||||
|
||||
err := lb.f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("local: file.Close: %v", err)
|
||||
}
|
||||
|
||||
// get ID
|
||||
lb.id = ID(lb.hw.Sum(nil))
|
||||
|
||||
// check for duplicate ID
|
||||
res, err := lb.backend.Test(lb.tpe, lb.id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("testing presence of ID %v failed: %v", lb.id, err)
|
||||
}
|
||||
|
||||
if res {
|
||||
return ErrAlreadyPresent
|
||||
}
|
||||
|
||||
// rename file
|
||||
err = lb.backend.renameFile(lb.f, lb.tpe, lb.id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lb *localBlob) Write(p []byte) (int, error) {
|
||||
n, err := lb.hw.Write(p)
|
||||
lb.size += uint(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (lb *localBlob) ID() (ID, error) {
|
||||
if lb.id == nil {
|
||||
return nil, errors.New("blob is not closed, ID unavailable")
|
||||
}
|
||||
|
||||
return lb.id, nil
|
||||
}
|
||||
|
||||
func (lb *localBlob) Size() uint {
|
||||
return lb.size
|
||||
}
|
||||
|
||||
// Create creates a new blob of type t. Blob implements io.WriteCloser. Once
|
||||
// Close() has been called, ID() can be used to retrieve the ID. If the blob is
|
||||
// already present, Close() returns ErrAlreadyPresent.
|
||||
func (b *Local) Create(t Type) (Blob, error) {
|
||||
// TODO: make sure that tempfile is removed upon error
|
||||
|
||||
// create tempfile in backend
|
||||
file, err := b.tempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hw := NewHashingWriter(file, newHash())
|
||||
blob := localBlob{
|
||||
hw: hw,
|
||||
f: file,
|
||||
backend: b,
|
||||
tpe: t,
|
||||
}
|
||||
|
||||
return &blob, nil
|
||||
}
|
||||
|
||||
// Construct path for given Type and ID.
|
||||
func (b *Local) filename(t Type, id ID) string {
|
||||
return filepath.Join(b.dirname(t, id), id.String())
|
||||
}
|
||||
|
||||
// Get returns the content stored under the given ID. If the data doesn't match
|
||||
// the requested ID, ErrWrongData is returned.
|
||||
func (b *Local) Get(t Type, id ID) ([]byte, error) {
|
||||
if id == nil {
|
||||
return nil, errors.New("unable to load nil ID")
|
||||
}
|
||||
|
||||
// try to open file
|
||||
file, err := os.Open(b.filename(t, id))
|
||||
defer file.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// read all
|
||||
buf, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check id
|
||||
if !Hash(buf).Equal(id) {
|
||||
return nil, ErrWrongData
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// GetReader returns a reader that yields the content stored under the given
|
||||
// ID. The content is not verified. The reader should be closed after draining
|
||||
// it.
|
||||
func (b *Local) GetReader(t Type, id ID) (io.ReadCloser, error) {
|
||||
if id == nil {
|
||||
return nil, errors.New("unable to load nil ID")
|
||||
}
|
||||
|
||||
// try to open file
|
||||
file, err := os.Open(b.filename(t, id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// Test returns true if a blob of the given type and ID exists in the backend.
|
||||
func (b *Local) Test(t Type, id ID) (bool, error) {
|
||||
// try to open file
|
||||
file, err := os.Open(b.filename(t, id))
|
||||
defer func() {
|
||||
file.Close()
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Remove removes the content stored at ID.
|
||||
func (b *Local) Remove(t Type, id ID) error {
|
||||
return os.Remove(b.filename(t, id))
|
||||
}
|
||||
|
||||
// List lists all objects of a given type.
|
||||
func (b *Local) List(t Type) (IDs, error) {
|
||||
// TODO: use os.Open() and d.Readdirnames() instead of Glob()
|
||||
var pattern string
|
||||
if t == Data || t == Tree {
|
||||
pattern = filepath.Join(b.dirname(t, nil), "*", "*")
|
||||
} else {
|
||||
pattern = filepath.Join(b.dirname(t, nil), "*")
|
||||
}
|
||||
|
||||
matches, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ids := make(IDs, 0, len(matches))
|
||||
|
||||
for _, m := range matches {
|
||||
base := filepath.Base(m)
|
||||
|
||||
if base == "" {
|
||||
continue
|
||||
}
|
||||
id, err := ParseID(base)
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// Version returns the version of this local backend.
|
||||
func (b *Local) Version() uint {
|
||||
return b.ver
|
||||
}
|
||||
|
||||
// ID returns the ID of this local backend.
|
||||
func (b *Local) ID() ID {
|
||||
return b.id
|
||||
}
|
||||
|
||||
// Close closes the backend
|
||||
func (b *Local) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes the repository and all files.
|
||||
func (b *Local) Delete() error {
|
||||
return os.RemoveAll(b.p)
|
||||
}
|
36
backend/local/generic_test.go
Normal file
36
backend/local/generic_test.go
Normal file
|
@ -0,0 +1,36 @@
|
|||
package local_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// assert fails the test if the condition is false.
|
||||
func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
|
||||
if !condition {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
|
||||
tb.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// ok fails the test if an err is not nil.
|
||||
func ok(tb testing.TB, err error) {
|
||||
if err != nil {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error())
|
||||
tb.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// equals fails the test if exp is not equal to act.
|
||||
func equals(tb testing.TB, exp, act interface{}) {
|
||||
if !reflect.DeepEqual(exp, act) {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
|
||||
tb.FailNow()
|
||||
}
|
||||
}
|
377
backend/local/local.go
Normal file
377
backend/local/local.go
Normal file
|
@ -0,0 +1,377 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/restic/restic/backend"
|
||||
)
|
||||
|
||||
var ErrWrongData = errors.New("wrong data returned by backend, checksum does not match")
|
||||
|
||||
type Local struct {
|
||||
p string
|
||||
ver uint
|
||||
name string
|
||||
id string
|
||||
}
|
||||
|
||||
// Open opens the local backend at dir.
|
||||
func Open(dir string) (*Local, error) {
|
||||
items := []string{
|
||||
dir,
|
||||
filepath.Join(dir, backend.Paths.Data),
|
||||
filepath.Join(dir, backend.Paths.Snapshots),
|
||||
filepath.Join(dir, backend.Paths.Trees),
|
||||
filepath.Join(dir, backend.Paths.Locks),
|
||||
filepath.Join(dir, backend.Paths.Keys),
|
||||
filepath.Join(dir, backend.Paths.Temp),
|
||||
}
|
||||
|
||||
// test if all necessary dirs and files are there
|
||||
for _, d := range items {
|
||||
if _, err := os.Stat(d); err != nil {
|
||||
return nil, fmt.Errorf("%s does not exist", d)
|
||||
}
|
||||
}
|
||||
|
||||
// read version file
|
||||
f, err := os.Open(filepath.Join(dir, backend.Paths.Version))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read version file: %v\n", err)
|
||||
}
|
||||
|
||||
var version uint
|
||||
n, err := fmt.Fscanf(f, "%d", &version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n != 1 {
|
||||
return nil, errors.New("could not read version from file")
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check version
|
||||
if version != backend.Version {
|
||||
return nil, fmt.Errorf("wrong version %d", version)
|
||||
}
|
||||
|
||||
// read ID
|
||||
f, err = os.Open(filepath.Join(dir, backend.Paths.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id := strings.TrimSpace(string(buf))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Local{p: dir, ver: version, id: id}, nil
|
||||
}
|
||||
|
||||
// Create creates all the necessary files and directories for a new local
|
||||
// backend at dir.
|
||||
func Create(dir string) (*Local, error) {
|
||||
versionFile := filepath.Join(dir, backend.Paths.Version)
|
||||
idFile := filepath.Join(dir, backend.Paths.ID)
|
||||
dirs := []string{
|
||||
dir,
|
||||
filepath.Join(dir, backend.Paths.Data),
|
||||
filepath.Join(dir, backend.Paths.Snapshots),
|
||||
filepath.Join(dir, backend.Paths.Trees),
|
||||
filepath.Join(dir, backend.Paths.Locks),
|
||||
filepath.Join(dir, backend.Paths.Keys),
|
||||
filepath.Join(dir, backend.Paths.Temp),
|
||||
}
|
||||
|
||||
// test if files already exist
|
||||
_, err := os.Lstat(versionFile)
|
||||
if err == nil {
|
||||
return nil, errors.New("version file already exists")
|
||||
}
|
||||
|
||||
_, err = os.Lstat(idFile)
|
||||
if err == nil {
|
||||
return nil, errors.New("id file already exists")
|
||||
}
|
||||
|
||||
// test if directories already exist
|
||||
for _, d := range dirs[1:] {
|
||||
if _, err := os.Stat(d); err == nil {
|
||||
return nil, fmt.Errorf("dir %s already exists", d)
|
||||
}
|
||||
}
|
||||
|
||||
// create paths for data, refs and temp
|
||||
for _, d := range dirs {
|
||||
err := os.MkdirAll(d, backend.Modes.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// create version file
|
||||
f, err := os.Create(versionFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintf(f, "%d\n", backend.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create ID file
|
||||
id := make([]byte, sha256.Size)
|
||||
_, err = rand.Read(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err = os.Create(idFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintln(f, hex.EncodeToString(id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// open backend
|
||||
return Open(dir)
|
||||
}
|
||||
|
||||
// Location returns this backend's location (the directory name).
|
||||
func (b *Local) Location() string {
|
||||
return b.p
|
||||
}
|
||||
|
||||
// Return temp directory in correct directory for this backend.
|
||||
func (b *Local) tempFile() (*os.File, error) {
|
||||
return ioutil.TempFile(filepath.Join(b.p, backend.Paths.Temp), "temp-")
|
||||
}
|
||||
|
||||
type localBlob struct {
|
||||
f *os.File
|
||||
size uint
|
||||
final bool
|
||||
basedir string
|
||||
}
|
||||
|
||||
func (lb *localBlob) Write(p []byte) (int, error) {
|
||||
if lb.final {
|
||||
return 0, errors.New("blob already closed")
|
||||
}
|
||||
|
||||
n, err := lb.f.Write(p)
|
||||
lb.size += uint(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (lb *localBlob) Size() uint {
|
||||
return lb.size
|
||||
}
|
||||
|
||||
func (lb *localBlob) Finalize(t backend.Type, name string) error {
|
||||
if lb.final {
|
||||
return errors.New("Already finalized")
|
||||
}
|
||||
|
||||
lb.final = true
|
||||
|
||||
err := lb.f.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("local: file.Close: %v", err)
|
||||
}
|
||||
|
||||
f := filename(lb.basedir, t, name)
|
||||
|
||||
// create directories if necessary, ignore errors
|
||||
if t == backend.Data || t == backend.Tree {
|
||||
os.MkdirAll(filepath.Dir(f), backend.Modes.Dir)
|
||||
}
|
||||
|
||||
// test if new path already exists
|
||||
if _, err := os.Stat(f); err == nil {
|
||||
return fmt.Errorf("Close(): file %v already exists", f)
|
||||
}
|
||||
|
||||
if err := os.Rename(lb.f.Name(), f); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set mode to read-only
|
||||
fi, err := os.Stat(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Chmod(f, fi.Mode()&os.FileMode(^uint32(0222)))
|
||||
}
|
||||
|
||||
// Create creates a new Blob. The data is available only after Finalize()
|
||||
// has been called on the returned Blob.
|
||||
func (b *Local) Create() (backend.Blob, error) {
|
||||
// TODO: make sure that tempfile is removed upon error
|
||||
|
||||
// create tempfile in backend
|
||||
file, err := b.tempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blob := localBlob{
|
||||
f: file,
|
||||
basedir: b.p,
|
||||
}
|
||||
|
||||
return &blob, nil
|
||||
}
|
||||
|
||||
// Construct path for given Type and name.
|
||||
func filename(base string, t backend.Type, name string) string {
|
||||
return filepath.Join(dirname(base, t, name), name)
|
||||
}
|
||||
|
||||
// Construct directory for given Type.
|
||||
func dirname(base string, t backend.Type, name string) string {
|
||||
var n string
|
||||
switch t {
|
||||
case backend.Data:
|
||||
n = backend.Paths.Data
|
||||
if len(name) > 2 {
|
||||
n = filepath.Join(n, name[:2])
|
||||
}
|
||||
case backend.Snapshot:
|
||||
n = backend.Paths.Snapshots
|
||||
case backend.Tree:
|
||||
n = backend.Paths.Trees
|
||||
if len(name) > 2 {
|
||||
n = filepath.Join(n, name[:2])
|
||||
}
|
||||
case backend.Lock:
|
||||
n = backend.Paths.Locks
|
||||
case backend.Key:
|
||||
n = backend.Paths.Keys
|
||||
}
|
||||
return filepath.Join(base, n)
|
||||
}
|
||||
|
||||
// Get returns a reader that yields the content stored under the given
|
||||
// name. The reader should be closed after draining it.
|
||||
func (b *Local) Get(t backend.Type, name string) (io.ReadCloser, error) {
|
||||
return os.Open(filename(b.p, t, name))
|
||||
}
|
||||
|
||||
// Test returns true if a blob of the given type and name exists in the backend.
|
||||
func (b *Local) Test(t backend.Type, name string) (bool, error) {
|
||||
_, err := os.Stat(filename(b.p, t, name))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Remove removes the blob with the given name and type.
|
||||
func (b *Local) Remove(t backend.Type, name string) error {
|
||||
return os.Remove(filename(b.p, t, name))
|
||||
}
|
||||
|
||||
// List returns a channel that yields all names of blobs of type t. A
|
||||
// goroutine ist started for this. If the channel done is closed, sending
|
||||
// stops.
|
||||
func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
// TODO: use os.Open() and d.Readdirnames() instead of Glob()
|
||||
var pattern string
|
||||
if t == backend.Data || t == backend.Tree {
|
||||
pattern = filepath.Join(dirname(b.p, t, ""), "*", "*")
|
||||
} else {
|
||||
pattern = filepath.Join(dirname(b.p, t, ""), "*")
|
||||
}
|
||||
|
||||
ch := make(chan string)
|
||||
matches, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
for i := range matches {
|
||||
matches[i] = filepath.Base(matches[i])
|
||||
}
|
||||
|
||||
sort.Strings(matches)
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
for _, m := range matches {
|
||||
if m == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- m:
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// Version returns the version of this local backend.
|
||||
func (b *Local) Version() uint {
|
||||
return b.ver
|
||||
}
|
||||
|
||||
// ID returns the ID of this local backend.
|
||||
func (b *Local) ID() string {
|
||||
return b.id
|
||||
}
|
||||
|
||||
// Delete removes the repository and all files.
|
||||
func (b *Local) Delete() error { return os.RemoveAll(b.p) }
|
||||
|
||||
// Close does nothing
|
||||
func (b *Local) Close() error { return nil }
|
|
@ -1,34 +1,21 @@
|
|||
package backend_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/backend"
|
||||
"github.com/restic/restic/backend/local"
|
||||
)
|
||||
|
||||
var testCleanup = flag.Bool("test.cleanup", true, "clean up after running tests (remove local backend directory with all content)")
|
||||
|
||||
var TestStrings = []struct {
|
||||
id string
|
||||
data string
|
||||
}{
|
||||
{"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"},
|
||||
{"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
|
||||
{"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"},
|
||||
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
|
||||
}
|
||||
|
||||
func setupLocalBackend(t *testing.T) *backend.Local {
|
||||
func setupLocalBackend(t *testing.T) *local.Local {
|
||||
tempdir, err := ioutil.TempDir("", "restic-test-")
|
||||
ok(t, err)
|
||||
|
||||
b, err := backend.CreateLocal(tempdir)
|
||||
b, err := local.Create(tempdir)
|
||||
ok(t, err)
|
||||
|
||||
t.Logf("created local backend at %s", tempdir)
|
||||
|
@ -36,7 +23,7 @@ func setupLocalBackend(t *testing.T) *backend.Local {
|
|||
return b
|
||||
}
|
||||
|
||||
func teardownLocalBackend(t *testing.T, b *backend.Local) {
|
||||
func teardownLocalBackend(t *testing.T, b *local.Local) {
|
||||
if !*testCleanup {
|
||||
t.Logf("leaving local backend at %s\n", b.Location())
|
||||
return
|
||||
|
@ -45,137 +32,9 @@ func teardownLocalBackend(t *testing.T, b *backend.Local) {
|
|||
ok(t, b.Delete())
|
||||
}
|
||||
|
||||
func testBackend(b backend.Backend, t *testing.T) {
|
||||
for _, tpe := range []backend.Type{backend.Data, backend.Key, backend.Lock, backend.Snapshot, backend.Tree} {
|
||||
// detect non-existing files
|
||||
for _, test := range TestStrings {
|
||||
id, err := backend.ParseID(test.id)
|
||||
ok(t, err)
|
||||
|
||||
// test if blob is already in repository
|
||||
ret, err := b.Test(tpe, id)
|
||||
ok(t, err)
|
||||
assert(t, !ret, "blob was found to exist before creating")
|
||||
|
||||
// try to open not existing blob
|
||||
d, err := b.Get(tpe, id)
|
||||
assert(t, err != nil && d == nil, "blob data could be extracted befor creation")
|
||||
|
||||
// try to get string out, should fail
|
||||
ret, err = b.Test(tpe, id)
|
||||
ok(t, err)
|
||||
assert(t, !ret, fmt.Sprintf("id %q was found (but should not have)", test.id))
|
||||
}
|
||||
|
||||
// add files
|
||||
for _, test := range TestStrings {
|
||||
// store string in backend
|
||||
blob, err := b.Create(tpe)
|
||||
ok(t, err)
|
||||
|
||||
_, err = blob.Write([]byte(test.data))
|
||||
ok(t, err)
|
||||
ok(t, blob.Close())
|
||||
|
||||
id, err := blob.ID()
|
||||
ok(t, err)
|
||||
|
||||
equals(t, test.id, id.String())
|
||||
|
||||
// try to get it out again
|
||||
buf, err := b.Get(tpe, id)
|
||||
ok(t, err)
|
||||
assert(t, buf != nil, "Get() returned nil")
|
||||
|
||||
// compare content
|
||||
equals(t, test.data, string(buf))
|
||||
|
||||
// compare content again via stream function
|
||||
rd, err := b.GetReader(tpe, id)
|
||||
ok(t, err)
|
||||
buf, err = ioutil.ReadAll(rd)
|
||||
ok(t, err)
|
||||
equals(t, test.data, string(buf))
|
||||
}
|
||||
|
||||
// test adding the first file again
|
||||
test := TestStrings[0]
|
||||
id, err := backend.ParseID(test.id)
|
||||
ok(t, err)
|
||||
|
||||
// create blob
|
||||
blob, err := b.Create(tpe)
|
||||
ok(t, err)
|
||||
|
||||
_, err = io.Copy(blob, bytes.NewReader([]byte(test.data)))
|
||||
ok(t, err)
|
||||
err = blob.Close()
|
||||
assert(t, err == backend.ErrAlreadyPresent,
|
||||
"wrong error returned: expected %v, got %v",
|
||||
backend.ErrAlreadyPresent, err)
|
||||
|
||||
id2, err := blob.ID()
|
||||
ok(t, err)
|
||||
|
||||
assert(t, id.Equal(id2), "IDs do not match: expected %v, got %v", id, id2)
|
||||
|
||||
// remove and recreate
|
||||
err = b.Remove(tpe, id)
|
||||
ok(t, err)
|
||||
|
||||
// create blob
|
||||
blob, err = b.Create(tpe)
|
||||
ok(t, err)
|
||||
|
||||
_, err = io.Copy(blob, bytes.NewReader([]byte(test.data)))
|
||||
ok(t, err)
|
||||
err = blob.Close()
|
||||
ok(t, err)
|
||||
|
||||
id2, err = blob.ID()
|
||||
ok(t, err)
|
||||
assert(t, id.Equal(id2), "IDs do not match: expected %v, got %v", id, id2)
|
||||
|
||||
// list items
|
||||
IDs := backend.IDs{}
|
||||
|
||||
for _, test := range TestStrings {
|
||||
id, err := backend.ParseID(test.id)
|
||||
ok(t, err)
|
||||
IDs = append(IDs, id)
|
||||
}
|
||||
|
||||
ids, err := b.List(tpe)
|
||||
ok(t, err)
|
||||
|
||||
sort.Sort(ids)
|
||||
sort.Sort(IDs)
|
||||
equals(t, IDs, ids)
|
||||
|
||||
// remove content if requested
|
||||
if *testCleanup {
|
||||
for _, test := range TestStrings {
|
||||
id, err := backend.ParseID(test.id)
|
||||
ok(t, err)
|
||||
|
||||
found, err := b.Test(tpe, id)
|
||||
ok(t, err)
|
||||
assert(t, found, fmt.Sprintf("id %q was not found before removal", id))
|
||||
|
||||
ok(t, b.Remove(tpe, id))
|
||||
|
||||
found, err = b.Test(tpe, id)
|
||||
ok(t, err)
|
||||
assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackend(t *testing.T) {
|
||||
func TestLocalBackend(t *testing.T) {
|
||||
// test for non-existing backend
|
||||
b, err := backend.OpenLocal("/invalid-restic-test")
|
||||
b, err := local.Open("/invalid-restic-test")
|
||||
assert(t, err != nil, "opening invalid repository at /invalid-restic-test should have failed, but err is nil")
|
||||
assert(t, b == nil, fmt.Sprintf("opening invalid repository at /invalid-restic-test should have failed, but b is not nil: %v", b))
|
||||
|
||||
|
@ -190,10 +49,10 @@ func TestLocalBackendCreationFailures(t *testing.T) {
|
|||
defer teardownLocalBackend(t, b)
|
||||
|
||||
// test failure to create a new repository at the same location
|
||||
b2, err := backend.CreateLocal(b.Location())
|
||||
b2, err := local.Create(b.Location())
|
||||
assert(t, err != nil && b2 == nil, fmt.Sprintf("creating a repository at %s for the second time should have failed", b.Location()))
|
||||
|
||||
// test failure to create a new repository at the same location without a config file
|
||||
b2, err = backend.CreateLocal(b.Location())
|
||||
b2, err = local.Create(b.Location())
|
||||
assert(t, err != nil && b2 == nil, fmt.Sprintf("creating a repository at %s for the second time should have failed", b.Location()))
|
||||
}
|
||||
|
|
27
backend/paths.go
Normal file
27
backend/paths.go
Normal file
|
@ -0,0 +1,27 @@
|
|||
package backend
|
||||
|
||||
import "os"
|
||||
|
||||
// Default paths for file-based backends (e.g. local)
|
||||
var Paths = struct {
|
||||
Data string
|
||||
Snapshots string
|
||||
Trees string
|
||||
Locks string
|
||||
Keys string
|
||||
Temp string
|
||||
Version string
|
||||
ID string
|
||||
}{
|
||||
"data",
|
||||
"snapshots",
|
||||
"trees",
|
||||
"locks",
|
||||
"keys",
|
||||
"tmp",
|
||||
"version",
|
||||
"id",
|
||||
}
|
||||
|
||||
// Default modes for file-based backends
|
||||
var Modes = struct{ Dir, File os.FileMode }{0700, 0600}
|
|
@ -1,4 +1,4 @@
|
|||
package backend
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
|
@ -12,9 +12,11 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/restic/restic/backend"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -25,7 +27,7 @@ type SFTP struct {
|
|||
c *sftp.Client
|
||||
p string
|
||||
ver uint
|
||||
id ID
|
||||
id string
|
||||
|
||||
cmd *exec.Cmd
|
||||
}
|
||||
|
@ -62,10 +64,10 @@ func start_client(program string, args ...string) (*SFTP, error) {
|
|||
return &SFTP{c: client, cmd: cmd}, nil
|
||||
}
|
||||
|
||||
// OpenSFTP opens an sftp backend. When the command is started via
|
||||
// Open opens an sftp backend. When the command is started via
|
||||
// exec.Command, it is expected to speak sftp on stdin/stdout. The backend
|
||||
// is expected at the given path.
|
||||
func OpenSFTP(dir string, program string, args ...string) (*SFTP, error) {
|
||||
func Open(dir string, program string, args ...string) (*SFTP, error) {
|
||||
sftp, err := start_client(program, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -74,12 +76,13 @@ func OpenSFTP(dir string, program string, args ...string) (*SFTP, error) {
|
|||
// test if all necessary dirs and files are there
|
||||
items := []string{
|
||||
dir,
|
||||
filepath.Join(dir, dataPath),
|
||||
filepath.Join(dir, snapshotPath),
|
||||
filepath.Join(dir, treePath),
|
||||
filepath.Join(dir, lockPath),
|
||||
filepath.Join(dir, keyPath),
|
||||
filepath.Join(dir, tempPath),
|
||||
filepath.Join(dir, backend.Paths.Data),
|
||||
filepath.Join(dir, backend.Paths.Snapshots),
|
||||
filepath.Join(dir, backend.Paths.Trees),
|
||||
filepath.Join(dir, backend.Paths.Locks),
|
||||
filepath.Join(dir, backend.Paths.Keys),
|
||||
filepath.Join(dir, backend.Paths.Version),
|
||||
filepath.Join(dir, backend.Paths.ID),
|
||||
}
|
||||
for _, d := range items {
|
||||
if _, err := sftp.c.Lstat(d); err != nil {
|
||||
|
@ -88,7 +91,7 @@ func OpenSFTP(dir string, program string, args ...string) (*SFTP, error) {
|
|||
}
|
||||
|
||||
// read version file
|
||||
f, err := sftp.c.Open(filepath.Join(dir, versionFileName))
|
||||
f, err := sftp.c.Open(filepath.Join(dir, backend.Paths.Version))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read version file: %v\n", err)
|
||||
}
|
||||
|
@ -109,12 +112,12 @@ func OpenSFTP(dir string, program string, args ...string) (*SFTP, error) {
|
|||
}
|
||||
|
||||
// check version
|
||||
if version != BackendVersion {
|
||||
if version != backend.Version {
|
||||
return nil, fmt.Errorf("wrong version %d", version)
|
||||
}
|
||||
|
||||
// read ID
|
||||
f, err = sftp.c.Open(filepath.Join(dir, idFileName))
|
||||
f, err = sftp.c.Open(filepath.Join(dir, backend.Paths.ID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -129,35 +132,30 @@ func OpenSFTP(dir string, program string, args ...string) (*SFTP, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
id, err := ParseID(strings.TrimSpace(string(buf)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sftp.id = id
|
||||
sftp.id = strings.TrimSpace(string(buf))
|
||||
sftp.p = dir
|
||||
|
||||
return sftp, nil
|
||||
}
|
||||
|
||||
// CreateSFTP creates all the necessary files and directories for a new sftp
|
||||
// Create creates all the necessary files and directories for a new sftp
|
||||
// backend at dir.
|
||||
func CreateSFTP(dir string, program string, args ...string) (*SFTP, error) {
|
||||
func Create(dir string, program string, args ...string) (*SFTP, error) {
|
||||
sftp, err := start_client(program, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versionFile := filepath.Join(dir, versionFileName)
|
||||
idFile := filepath.Join(dir, idFileName)
|
||||
versionFile := filepath.Join(dir, backend.Paths.Version)
|
||||
idFile := filepath.Join(dir, backend.Paths.ID)
|
||||
dirs := []string{
|
||||
dir,
|
||||
filepath.Join(dir, dataPath),
|
||||
filepath.Join(dir, snapshotPath),
|
||||
filepath.Join(dir, treePath),
|
||||
filepath.Join(dir, lockPath),
|
||||
filepath.Join(dir, keyPath),
|
||||
filepath.Join(dir, tempPath),
|
||||
filepath.Join(dir, backend.Paths.Data),
|
||||
filepath.Join(dir, backend.Paths.Snapshots),
|
||||
filepath.Join(dir, backend.Paths.Trees),
|
||||
filepath.Join(dir, backend.Paths.Locks),
|
||||
filepath.Join(dir, backend.Paths.Keys),
|
||||
filepath.Join(dir, backend.Paths.Temp),
|
||||
}
|
||||
|
||||
// test if files already exist
|
||||
|
@ -180,7 +178,7 @@ func CreateSFTP(dir string, program string, args ...string) (*SFTP, error) {
|
|||
|
||||
// create paths for data, refs and temp blobs
|
||||
for _, d := range dirs {
|
||||
err = sftp.mkdirAll(d, dirMode)
|
||||
err = sftp.mkdirAll(d, backend.Modes.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -192,7 +190,7 @@ func CreateSFTP(dir string, program string, args ...string) (*SFTP, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintf(f, "%d\n", BackendVersion)
|
||||
_, err = fmt.Fprintf(f, "%d\n", backend.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -214,7 +212,7 @@ func CreateSFTP(dir string, program string, args ...string) (*SFTP, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintf(f, "%s\n", ID(id).String())
|
||||
_, err = fmt.Fprintln(f, hex.EncodeToString(id))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -235,7 +233,7 @@ func CreateSFTP(dir string, program string, args ...string) (*SFTP, error) {
|
|||
}
|
||||
|
||||
// open backend
|
||||
return OpenSFTP(dir, program, args...)
|
||||
return Open(dir, program, args...)
|
||||
}
|
||||
|
||||
// Location returns this backend's location (the directory name).
|
||||
|
@ -257,7 +255,7 @@ func (r *SFTP) tempFile() (string, *sftp.File, error) {
|
|||
}
|
||||
|
||||
// construct tempfile name
|
||||
name := filepath.Join(r.p, tempPath, fmt.Sprintf("temp-%s", hex.EncodeToString(buf)))
|
||||
name := filepath.Join(r.p, backend.Paths.Temp, "temp-"+hex.EncodeToString(buf))
|
||||
|
||||
// create file in temp dir
|
||||
f, err := r.c.Create(name)
|
||||
|
@ -280,7 +278,7 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
|
|||
}
|
||||
|
||||
// create parent directories
|
||||
errMkdirAll := r.mkdirAll(filepath.Dir(dir), dirMode)
|
||||
errMkdirAll := r.mkdirAll(filepath.Dir(dir), backend.Modes.Dir)
|
||||
|
||||
// create directory
|
||||
errMkdir := r.c.Mkdir(dir)
|
||||
|
@ -300,18 +298,23 @@ func (r *SFTP) mkdirAll(dir string, mode os.FileMode) error {
|
|||
return r.c.Chmod(dir, mode)
|
||||
}
|
||||
|
||||
// Rename temp file to final name according to type and ID.
|
||||
func (r *SFTP) renameFile(oldname string, t Type, id ID) error {
|
||||
filename := r.filename(t, id)
|
||||
// Rename temp file to final name according to type and name.
|
||||
func (r *SFTP) renameFile(oldname string, t backend.Type, name string) error {
|
||||
filename := r.filename(t, name)
|
||||
|
||||
// create directories if necessary
|
||||
if t == Data || t == Tree {
|
||||
err := r.mkdirAll(filepath.Dir(filename), dirMode)
|
||||
if t == backend.Data || t == backend.Tree {
|
||||
err := r.mkdirAll(filepath.Dir(filename), backend.Modes.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// test if new file exists
|
||||
if _, err := r.c.Lstat(filename); err == nil {
|
||||
return fmt.Errorf("Close(): file %v already exists", filename)
|
||||
}
|
||||
|
||||
err := r.c.Rename(oldname, filename)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -326,42 +329,15 @@ func (r *SFTP) renameFile(oldname string, t Type, id ID) error {
|
|||
return r.c.Chmod(filename, fi.Mode()&os.FileMode(^uint32(0222)))
|
||||
}
|
||||
|
||||
// Construct directory for given Type.
|
||||
func (r *SFTP) dirname(t Type, id ID) string {
|
||||
var n string
|
||||
switch t {
|
||||
case Data:
|
||||
n = dataPath
|
||||
if id != nil {
|
||||
n = filepath.Join(dataPath, fmt.Sprintf("%02x", id[0]))
|
||||
}
|
||||
case Snapshot:
|
||||
n = snapshotPath
|
||||
case Tree:
|
||||
n = treePath
|
||||
if id != nil {
|
||||
n = filepath.Join(treePath, fmt.Sprintf("%02x", id[0]))
|
||||
}
|
||||
case Lock:
|
||||
n = lockPath
|
||||
case Key:
|
||||
n = keyPath
|
||||
}
|
||||
return filepath.Join(r.p, n)
|
||||
}
|
||||
|
||||
type sftpBlob struct {
|
||||
f *sftp.File
|
||||
name string
|
||||
hw *HashingWriter
|
||||
backend *SFTP
|
||||
tpe Type
|
||||
id ID
|
||||
size uint
|
||||
closed bool
|
||||
f *sftp.File
|
||||
tempname string
|
||||
size uint
|
||||
closed bool
|
||||
backend *SFTP
|
||||
}
|
||||
|
||||
func (sb *sftpBlob) Close() error {
|
||||
func (sb *sftpBlob) Finalize(t backend.Type, name string) error {
|
||||
if sb.closed {
|
||||
return errors.New("Close() called on closed file")
|
||||
}
|
||||
|
@ -372,30 +348,17 @@ func (sb *sftpBlob) Close() error {
|
|||
return fmt.Errorf("sftp: file.Close: %v", err)
|
||||
}
|
||||
|
||||
// get ID
|
||||
sb.id = ID(sb.hw.Sum(nil))
|
||||
|
||||
// check for duplicate ID
|
||||
res, err := sb.backend.Test(sb.tpe, sb.id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("testing presence of ID %v failed: %v", sb.id, err)
|
||||
}
|
||||
|
||||
if res {
|
||||
return ErrAlreadyPresent
|
||||
}
|
||||
|
||||
// rename file
|
||||
err = sb.backend.renameFile(sb.name, sb.tpe, sb.id)
|
||||
err = sb.backend.renameFile(sb.tempname, t, name)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("sftp: renameFile: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sb *sftpBlob) Write(p []byte) (int, error) {
|
||||
n, err := sb.hw.Write(p)
|
||||
n, err := sb.f.Write(p)
|
||||
sb.size += uint(n)
|
||||
return n, err
|
||||
}
|
||||
|
@ -404,18 +367,9 @@ func (sb *sftpBlob) Size() uint {
|
|||
return sb.size
|
||||
}
|
||||
|
||||
func (sb *sftpBlob) ID() (ID, error) {
|
||||
if sb.id == nil {
|
||||
return nil, errors.New("blob is not closed, ID unavailable")
|
||||
}
|
||||
|
||||
return sb.id, nil
|
||||
}
|
||||
|
||||
// Create creates a new blob of type t. Blob implements io.WriteCloser. Once
|
||||
// Close() has been called, ID() can be used to retrieve the ID. If the blob is
|
||||
// already present, Close() returns ErrAlreadyPresent.
|
||||
func (r *SFTP) Create(t Type) (Blob, error) {
|
||||
// Create creates a new Blob. The data is available only after Finalize()
|
||||
// has been called on the returned Blob.
|
||||
func (r *SFTP) Create() (backend.Blob, error) {
|
||||
// TODO: make sure that tempfile is removed upon error
|
||||
|
||||
// create tempfile in backend
|
||||
|
@ -425,64 +379,52 @@ func (r *SFTP) Create(t Type) (Blob, error) {
|
|||
}
|
||||
|
||||
blob := sftpBlob{
|
||||
hw: NewHashingWriter(file, newHash()),
|
||||
f: file,
|
||||
name: filename,
|
||||
backend: r,
|
||||
tpe: t,
|
||||
f: file,
|
||||
tempname: filename,
|
||||
backend: r,
|
||||
}
|
||||
|
||||
return &blob, nil
|
||||
}
|
||||
|
||||
// Construct path for given Type and ID.
|
||||
func (r *SFTP) filename(t Type, id ID) string {
|
||||
return filepath.Join(r.dirname(t, id), id.String())
|
||||
// Construct path for given backend.Type and name.
|
||||
func (r *SFTP) filename(t backend.Type, name string) string {
|
||||
return filepath.Join(r.dirname(t, name), name)
|
||||
}
|
||||
|
||||
// Get returns the content stored under the given ID. If the data doesn't match
|
||||
// the requested ID, ErrWrongData is returned.
|
||||
func (r *SFTP) Get(t Type, id ID) ([]byte, error) {
|
||||
if id == nil {
|
||||
return nil, errors.New("unable to load nil ID")
|
||||
}
|
||||
|
||||
// try to open file
|
||||
file, err := r.c.Open(r.filename(t, id))
|
||||
defer func() {
|
||||
// TODO: report bug against sftp client, ignore Close() for nil file
|
||||
if file != nil {
|
||||
file.Close()
|
||||
// Construct directory for given backend.Type.
|
||||
func (r *SFTP) dirname(t backend.Type, name string) string {
|
||||
var n string
|
||||
switch t {
|
||||
case backend.Data:
|
||||
n = backend.Paths.Data
|
||||
if len(name) > 2 {
|
||||
n = filepath.Join(n, name[:2])
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
case backend.Snapshot:
|
||||
n = backend.Paths.Snapshots
|
||||
case backend.Tree:
|
||||
n = backend.Paths.Trees
|
||||
if len(name) > 2 {
|
||||
n = filepath.Join(n, name[:2])
|
||||
}
|
||||
case backend.Lock:
|
||||
n = backend.Paths.Locks
|
||||
case backend.Key:
|
||||
n = backend.Paths.Keys
|
||||
}
|
||||
|
||||
// read all
|
||||
buf, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check id
|
||||
if !Hash(buf).Equal(id) {
|
||||
return nil, ErrWrongData
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
return filepath.Join(r.p, n)
|
||||
}
|
||||
|
||||
// GetReader returns a reader that yields the content stored under the given
|
||||
// ID. The content is not verified. The reader should be closed after draining
|
||||
// it.
|
||||
func (r *SFTP) GetReader(t Type, id ID) (io.ReadCloser, error) {
|
||||
if id == nil {
|
||||
return nil, errors.New("unable to load nil ID")
|
||||
// Get returns a reader that yields the content stored under the given
|
||||
// name. The reader should be closed after draining it.
|
||||
func (r *SFTP) Get(t backend.Type, name string) (io.ReadCloser, error) {
|
||||
if name == "" {
|
||||
return nil, errors.New("unable to load empty name")
|
||||
}
|
||||
|
||||
// try to open file
|
||||
file, err := r.c.Open(r.filename(t, id))
|
||||
file, err := r.c.Open(r.filename(t, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -490,15 +432,9 @@ func (r *SFTP) GetReader(t Type, id ID) (io.ReadCloser, error) {
|
|||
return file, nil
|
||||
}
|
||||
|
||||
// Test returns true if a blob of the given type and ID exists in the backend.
|
||||
func (r *SFTP) Test(t Type, id ID) (bool, error) {
|
||||
file, err := r.c.Open(r.filename(t, id))
|
||||
defer func() {
|
||||
if file != nil {
|
||||
file.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Test returns true if a blob of the given type and name exists in the backend.
|
||||
func (r *SFTP) Test(t backend.Type, name string) (bool, error) {
|
||||
_, err := r.c.Lstat(r.filename(t, name))
|
||||
if err != nil {
|
||||
if _, ok := err.(*sftp.StatusError); ok {
|
||||
return false, nil
|
||||
|
@ -510,54 +446,83 @@ func (r *SFTP) Test(t Type, id ID) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
// Remove removes the content stored at ID.
|
||||
func (r *SFTP) Remove(t Type, id ID) error {
|
||||
return r.c.Remove(r.filename(t, id))
|
||||
// Remove removes the content stored at name.
|
||||
func (r *SFTP) Remove(t backend.Type, name string) error {
|
||||
return r.c.Remove(r.filename(t, name))
|
||||
}
|
||||
|
||||
// List lists all objects of a given type.
|
||||
func (r *SFTP) List(t Type) (IDs, error) {
|
||||
list := []os.FileInfo{}
|
||||
var err error
|
||||
// List returns a channel that yields all names of blobs of type t. A
|
||||
// goroutine ist started for this. If the channel done is closed, sending
|
||||
// stops.
|
||||
func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
ch := make(chan string)
|
||||
|
||||
if t == Data || t == Tree {
|
||||
// read first level
|
||||
basedir := r.dirname(t, nil)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
list1, err := r.c.ReadDir(basedir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t == backend.Data || t == backend.Tree {
|
||||
// read first level
|
||||
basedir := r.dirname(t, "")
|
||||
|
||||
// read files
|
||||
for _, dir := range list1 {
|
||||
entries, err := r.c.ReadDir(filepath.Join(basedir, dir.Name()))
|
||||
list1, err := r.c.ReadDir(basedir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return
|
||||
}
|
||||
|
||||
dirs := make([]string, 0, len(list1))
|
||||
for _, d := range list1 {
|
||||
dirs = append(dirs, d.Name())
|
||||
}
|
||||
|
||||
sort.Strings(dirs)
|
||||
|
||||
// read files
|
||||
for _, dir := range dirs {
|
||||
entries, err := r.c.ReadDir(filepath.Join(basedir, dir))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
items := make([]string, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
items = append(items, entry.Name())
|
||||
}
|
||||
|
||||
sort.Strings(items)
|
||||
|
||||
for _, file := range items {
|
||||
select {
|
||||
case ch <- file:
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
entries, err := r.c.ReadDir(r.dirname(t, ""))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
items := make([]string, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
list = append(list, entry)
|
||||
items = append(items, entry.Name())
|
||||
}
|
||||
|
||||
sort.Strings(items)
|
||||
|
||||
for _, file := range items {
|
||||
select {
|
||||
case ch <- file:
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
list, err = r.c.ReadDir(r.dirname(t, nil))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ids := make(IDs, 0, len(list))
|
||||
for _, item := range list {
|
||||
id, err := ParseID(item.Name())
|
||||
// ignore everything that does not parse as an ID
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ids = append(ids, id)
|
||||
}
|
||||
return ch
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// Version returns the version of this local backend.
|
||||
|
@ -566,7 +531,7 @@ func (r *SFTP) Version() uint {
|
|||
}
|
||||
|
||||
// ID returns the ID of this local backend.
|
||||
func (r *SFTP) ID() ID {
|
||||
func (r *SFTP) ID() string {
|
||||
return r.id
|
||||
}
|
||||
|
|
@ -6,16 +6,16 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/backend"
|
||||
"github.com/restic/restic/backend/sftp"
|
||||
)
|
||||
|
||||
var sftpPath = flag.String("test.sftppath", "", "sftp binary path (default: empty)")
|
||||
|
||||
func setupSFTPBackend(t *testing.T) *backend.SFTP {
|
||||
func setupSFTPBackend(t *testing.T) *sftp.SFTP {
|
||||
tempdir, err := ioutil.TempDir("", "restic-test-")
|
||||
ok(t, err)
|
||||
|
||||
b, err := backend.CreateSFTP(tempdir, *sftpPath)
|
||||
b, err := sftp.Create(tempdir, *sftpPath)
|
||||
ok(t, err)
|
||||
|
||||
t.Logf("created sftp backend locally at %s", tempdir)
|
||||
|
@ -23,7 +23,7 @@ func setupSFTPBackend(t *testing.T) *backend.SFTP {
|
|||
return b
|
||||
}
|
||||
|
||||
func teardownSFTPBackend(t *testing.T, b *backend.SFTP) {
|
||||
func teardownSFTPBackend(t *testing.T, b *sftp.SFTP) {
|
||||
if !*testCleanup {
|
||||
t.Logf("leaving backend at %s\n", b.Location())
|
||||
return
|
||||
|
|
20
cache.go
20
cache.go
|
@ -17,7 +17,7 @@ type Cache struct {
|
|||
base string
|
||||
}
|
||||
|
||||
func NewCache(be backend.IDer) (c *Cache, err error) {
|
||||
func NewCache(be backend.Identifier) (c *Cache, err error) {
|
||||
// try to get explicit cache dir from environment
|
||||
dir := os.Getenv("RESTIC_CACHE")
|
||||
|
||||
|
@ -29,7 +29,7 @@ func NewCache(be backend.IDer) (c *Cache, err error) {
|
|||
}
|
||||
}
|
||||
|
||||
basedir := filepath.Join(dir, be.ID().String())
|
||||
basedir := filepath.Join(dir, be.ID())
|
||||
debug.Log("Cache.New", "opened cache at %v", basedir)
|
||||
|
||||
return &Cache{base: basedir}, nil
|
||||
|
@ -115,7 +115,7 @@ func (c *Cache) Clear(s backend.Backend) error {
|
|||
for _, entry := range list {
|
||||
debug.Log("Cache.Clear", "found entry %v", entry)
|
||||
|
||||
if ok, err := s.Test(backend.Snapshot, entry.ID); !ok || err != nil {
|
||||
if ok, err := s.Test(backend.Snapshot, entry.ID.String()); !ok || err != nil {
|
||||
debug.Log("Cache.Clear", "snapshot %v doesn't exist any more, removing %v", entry.ID, entry)
|
||||
|
||||
err = c.Purge(backend.Snapshot, entry.Subtype, entry.ID)
|
||||
|
@ -174,6 +174,7 @@ func (c *Cache) List(t backend.Type) ([]CacheEntry, error) {
|
|||
id, err := backend.ParseID(parts[0])
|
||||
// ignore invalid cache entries for now
|
||||
if err != nil {
|
||||
debug.Log("Cache.List", "unable to parse name %v as id: %v", parts[0], err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -220,13 +221,16 @@ func (c *Cache) RefreshSnapshots(s Server, p *Progress) error {
|
|||
}
|
||||
|
||||
// list snapshots first
|
||||
snapshots, err := s.List(backend.Snapshot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
// check that snapshot blobs are cached
|
||||
for _, id := range snapshots {
|
||||
for name := range s.List(backend.Snapshot, done) {
|
||||
id, err := backend.ParseID(name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// remove snapshot from list of entries
|
||||
for i, e := range entries {
|
||||
if e.ID.Equal(id) {
|
||||
|
|
|
@ -185,14 +185,22 @@ func (cmd CmdBackup) Execute(args []string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var parentSnapshotID backend.ID
|
||||
var (
|
||||
parentSnapshot string
|
||||
parentSnapshotID backend.ID
|
||||
)
|
||||
|
||||
if cmd.Parent != "" {
|
||||
parentSnapshotID, err = s.FindSnapshot(cmd.Parent)
|
||||
parentSnapshot, err = s.FindSnapshot(cmd.Parent)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid id %q: %v", cmd.Parent, err)
|
||||
}
|
||||
|
||||
parentSnapshotID, err = backend.ParseID(parentSnapshot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid parent snapshot id %v", parentSnapshot)
|
||||
}
|
||||
|
||||
fmt.Printf("found parent snapshot %v\n", parentSnapshotID)
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,12 @@ func (cmd CmdCat) Execute(args []string) error {
|
|||
}
|
||||
|
||||
// find snapshot id with prefix
|
||||
id, err = s.FindSnapshot(args[1])
|
||||
name, err := s.FindSnapshot(args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err = backend.ParseID(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -71,7 +76,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
|||
case "tree":
|
||||
// try storage id
|
||||
tree := &restic.Tree{}
|
||||
err := s.LoadJSONID(backend.Tree, id, tree)
|
||||
err := s.LoadJSONID(backend.Tree, id.String(), tree)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -86,7 +91,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
|||
return nil
|
||||
case "snapshot":
|
||||
sn := &restic.Snapshot{}
|
||||
err = s.LoadJSONID(backend.Snapshot, id, sn)
|
||||
err = s.LoadJSONID(backend.Snapshot, id.String(), sn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -100,13 +105,15 @@ func (cmd CmdCat) Execute(args []string) error {
|
|||
|
||||
return nil
|
||||
case "key":
|
||||
data, err := s.Get(backend.Key, id)
|
||||
rd, err := s.Get(backend.Key, id.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(rd)
|
||||
|
||||
var key restic.Key
|
||||
err = json.Unmarshal(data, &key)
|
||||
err = dec.Decode(&key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -115,8 +115,13 @@ func (c CmdFind) findInTree(s restic.Server, blob restic.Blob, path string) ([]f
|
|||
return results, nil
|
||||
}
|
||||
|
||||
func (c CmdFind) findInSnapshot(s restic.Server, id backend.ID) error {
|
||||
debug.Log("restic.find", "searching in snapshot %s\n for entries within [%s %s]", id, c.oldest, c.newest)
|
||||
func (c CmdFind) findInSnapshot(s restic.Server, name string) error {
|
||||
debug.Log("restic.find", "searching in snapshot %s\n for entries within [%s %s]", name, c.oldest, c.newest)
|
||||
|
||||
id, err := backend.ParseID(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sn, err := restic.LoadSnapshot(s, id)
|
||||
if err != nil {
|
||||
|
@ -182,12 +187,9 @@ func (c CmdFind) Execute(args []string) error {
|
|||
return c.findInSnapshot(s, snapshotID)
|
||||
}
|
||||
|
||||
list, err := s.List(backend.Snapshot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, snapshotID := range list {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
for snapshotID := range s.List(backend.Snapshot, done) {
|
||||
err := c.findInSnapshot(s, snapshotID)
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -55,7 +55,7 @@ func fsckFile(opts CmdFsck, s restic.Server, m *restic.Map, IDs []backend.ID) (u
|
|||
}
|
||||
} else {
|
||||
// test if data blob is there
|
||||
ok, err := s.Test(backend.Data, blob.Storage)
|
||||
ok, err := s.Test(backend.Data, blob.Storage.String())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -201,14 +201,19 @@ func (cmd CmdFsck) Execute(args []string) error {
|
|||
}
|
||||
|
||||
if cmd.Snapshot != "" {
|
||||
snapshotID, err := s.FindSnapshot(cmd.Snapshot)
|
||||
name, err := s.FindSnapshot(cmd.Snapshot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid id %q: %v", cmd.Snapshot, err)
|
||||
}
|
||||
|
||||
err = fsck_snapshot(cmd, s, snapshotID)
|
||||
id, err := backend.ParseID(name)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "check for snapshot %v failed\n", snapshotID)
|
||||
fmt.Fprintf(os.Stderr, "invalid snapshot id %v\n", name)
|
||||
}
|
||||
|
||||
err = fsck_snapshot(cmd, s, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "check for snapshot %v failed\n", id)
|
||||
}
|
||||
|
||||
return err
|
||||
|
@ -219,17 +224,20 @@ func (cmd CmdFsck) Execute(args []string) error {
|
|||
cmd.o_trees = backend.NewIDSet()
|
||||
}
|
||||
|
||||
list, err := s.List(backend.Snapshot)
|
||||
debug.Log("restic.fsck", "checking %d snapshots\n", len(list))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
var firstErr error
|
||||
for _, snapshotID := range list {
|
||||
err := fsck_snapshot(cmd, s, snapshotID)
|
||||
for name := range s.List(backend.Snapshot, done) {
|
||||
id, err := backend.ParseID(name)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "check for snapshot %v failed\n", snapshotID)
|
||||
fmt.Fprintf(os.Stderr, "invalid snapshot id %v\n", name)
|
||||
continue
|
||||
}
|
||||
|
||||
err = fsck_snapshot(cmd, s, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "check for snapshot %v failed\n", id)
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
|
@ -252,13 +260,16 @@ func (cmd CmdFsck) Execute(args []string) error {
|
|||
for _, d := range l {
|
||||
debug.Log("restic.fsck", "checking for orphaned %v\n", d.desc)
|
||||
|
||||
blobs, err := s.List(d.tpe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
done := make(chan struct{})
|
||||
|
||||
for _, id := range blobs {
|
||||
err := d.set.Find(id)
|
||||
for name := range s.List(d.tpe, done) {
|
||||
id, err := backend.ParseID(name)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "invalid id for %v: %v\n", d.tpe, name)
|
||||
continue
|
||||
}
|
||||
|
||||
err = d.set.Find(id)
|
||||
if err != nil {
|
||||
if !cmd.RemoveOrphaned {
|
||||
fmt.Printf("orphaned %v %v\n", d.desc, id)
|
||||
|
@ -266,7 +277,7 @@ func (cmd CmdFsck) Execute(args []string) error {
|
|||
}
|
||||
|
||||
fmt.Printf("removing orphaned %v %v\n", d.desc, id)
|
||||
err := s.Remove(d.tpe, id)
|
||||
err := s.Remove(d.tpe, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -31,22 +31,25 @@ func list_keys(s restic.Server) error {
|
|||
return err
|
||||
}
|
||||
|
||||
s.EachID(backend.Key, func(id backend.ID) {
|
||||
k, err := restic.LoadKey(s, id)
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
for name := range s.List(backend.Key, done) {
|
||||
k, err := restic.LoadKey(s, name)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "LoadKey() failed: %v\n", err)
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
var current string
|
||||
if id.Equal(s.Key().ID()) {
|
||||
if name == s.Key().Name() {
|
||||
current = "*"
|
||||
} else {
|
||||
current = " "
|
||||
}
|
||||
tab.Rows = append(tab.Rows, []interface{}{current, id[:plen],
|
||||
tab.Rows = append(tab.Rows, []interface{}{current, name[:plen],
|
||||
k.Username, k.Hostname, k.Created.Format(TimeFormat)})
|
||||
})
|
||||
}
|
||||
|
||||
tab.Write(os.Stdout)
|
||||
|
||||
|
@ -71,17 +74,17 @@ func add_key(s restic.Server) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func delete_key(s restic.Server, id backend.ID) error {
|
||||
if id.Equal(s.Key().ID()) {
|
||||
func delete_key(s restic.Server, name string) error {
|
||||
if name == s.Key().Name() {
|
||||
return errors.New("refusing to remove key currently used to access repository")
|
||||
}
|
||||
|
||||
err := s.Remove(backend.Key, id)
|
||||
err := s.Remove(backend.Key, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("removed key %v\n", id)
|
||||
fmt.Printf("removed key %v\n", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -100,7 +103,7 @@ func change_password(s restic.Server) error {
|
|||
}
|
||||
|
||||
// remove old key
|
||||
err = s.Remove(backend.Key, s.Key().ID())
|
||||
err = s.Remove(backend.Key, s.Key().Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -49,7 +49,9 @@ func (cmd CmdList) Execute(args []string) error {
|
|||
return errors.New("invalid type")
|
||||
}
|
||||
|
||||
return s.EachID(t, func(id backend.ID) {
|
||||
for id := range s.List(t, nil) {
|
||||
fmt.Printf("%s\n", id)
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -76,7 +76,12 @@ func (cmd CmdLs) Execute(args []string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
id, err := backend.FindSnapshot(s, args[0])
|
||||
name, err := backend.FindSnapshot(s, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err := backend.ParseID(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -35,7 +35,12 @@ func (cmd CmdRestore) Execute(args []string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
id, err := backend.FindSnapshot(s, args[0])
|
||||
name, err := backend.FindSnapshot(s, args[0])
|
||||
if err != nil {
|
||||
errx(1, "invalid id %q: %v", args[0], err)
|
||||
}
|
||||
|
||||
id, err := backend.ParseID(name)
|
||||
if err != nil {
|
||||
errx(1, "invalid id %q: %v", args[0], err)
|
||||
}
|
||||
|
|
|
@ -101,12 +101,21 @@ func (cmd CmdSnapshots) Execute(args []string) error {
|
|||
tab.Header = fmt.Sprintf("%-8s %-19s %-10s %s", "ID", "Date", "Source", "Directory")
|
||||
tab.RowFormat = "%-8s %-19s %-10s %s"
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
list := []*restic.Snapshot{}
|
||||
s.EachID(backend.Snapshot, func(id backend.ID) {
|
||||
for name := range s.List(backend.Snapshot, done) {
|
||||
id, err := backend.ParseID(name)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error parsing id: %v", name)
|
||||
continue
|
||||
}
|
||||
|
||||
sn, err := restic.LoadSnapshot(s, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error loading snapshot %s: %v\n", id, err)
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
pos := sort.Search(len(list), func(i int) bool {
|
||||
|
@ -120,7 +129,7 @@ func (cmd CmdSnapshots) Execute(args []string) error {
|
|||
} else {
|
||||
list = append(list, sn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
plen, err := s.PrefixLength(backend.Snapshot)
|
||||
if err != nil {
|
||||
|
|
|
@ -12,6 +12,8 @@ import (
|
|||
"github.com/jessevdk/go-flags"
|
||||
"github.com/restic/restic"
|
||||
"github.com/restic/restic/backend"
|
||||
"github.com/restic/restic/backend/local"
|
||||
"github.com/restic/restic/backend/sftp"
|
||||
"github.com/restic/restic/debug"
|
||||
)
|
||||
|
||||
|
@ -79,7 +81,7 @@ func (cmd CmdInit) Execute(args []string) error {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("created restic backend %v at %s\n", s.ID().Str(), opts.Repo)
|
||||
fmt.Printf("created restic backend %v at %s\n", s.ID(), opts.Repo)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -96,7 +98,7 @@ func open(u string) (backend.Backend, error) {
|
|||
}
|
||||
|
||||
if url.Scheme == "" {
|
||||
return backend.OpenLocal(url.Path)
|
||||
return local.Open(url.Path)
|
||||
}
|
||||
|
||||
args := []string{url.Host}
|
||||
|
@ -106,7 +108,7 @@ func open(u string) (backend.Backend, error) {
|
|||
}
|
||||
args = append(args, "-s")
|
||||
args = append(args, "sftp")
|
||||
return backend.OpenSFTP(url.Path[1:], "ssh", args...)
|
||||
return sftp.Open(url.Path[1:], "ssh", args...)
|
||||
}
|
||||
|
||||
// Create the backend specified by URI.
|
||||
|
@ -117,7 +119,7 @@ func create(u string) (backend.Backend, error) {
|
|||
}
|
||||
|
||||
if url.Scheme == "" {
|
||||
return backend.CreateLocal(url.Path)
|
||||
return local.Create(url.Path)
|
||||
}
|
||||
|
||||
args := []string{url.Host}
|
||||
|
@ -127,7 +129,7 @@ func create(u string) (backend.Backend, error) {
|
|||
}
|
||||
args = append(args, "-s")
|
||||
args = append(args, "sftp")
|
||||
return backend.CreateSFTP(url.Path[1:], "ssh", args...)
|
||||
return sftp.Create(url.Path[1:], "ssh", args...)
|
||||
}
|
||||
|
||||
func OpenRepo() (restic.Server, error) {
|
||||
|
|
57
key.go
57
key.go
|
@ -2,6 +2,7 @@ package restic
|
|||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -57,7 +58,7 @@ type Key struct {
|
|||
user *MasterKeys
|
||||
master *MasterKeys
|
||||
|
||||
id backend.ID
|
||||
name string
|
||||
}
|
||||
|
||||
// MasterKeys holds signing and encryption keys for a repository. It is stored
|
||||
|
@ -74,9 +75,9 @@ func CreateKey(s Server, password string) (*Key, error) {
|
|||
return AddKey(s, password, nil)
|
||||
}
|
||||
|
||||
// OpenKey tries do decrypt the key specified by id with the given password.
|
||||
func OpenKey(s Server, id backend.ID, password string) (*Key, error) {
|
||||
k, err := LoadKey(s, id)
|
||||
// OpenKey tries do decrypt the key specified by name with the given password.
|
||||
func OpenKey(s Server, name string, password string) (*Key, error) {
|
||||
k, err := LoadKey(s, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -104,7 +105,7 @@ func OpenKey(s Server, id backend.ID, password string) (*Key, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.id = id
|
||||
k.name = name
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
@ -112,16 +113,11 @@ func OpenKey(s Server, id backend.ID, password string) (*Key, error) {
|
|||
// SearchKey tries to decrypt all keys in the backend with the given password.
|
||||
// If none could be found, ErrNoKeyFound is returned.
|
||||
func SearchKey(s Server, password string) (*Key, error) {
|
||||
// list all keys
|
||||
ids, err := s.List(backend.Key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// try all keys in repo
|
||||
var key *Key
|
||||
for _, id := range ids {
|
||||
key, err = OpenKey(s, id, password)
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
for name := range s.List(backend.Key, done) {
|
||||
key, err := OpenKey(s, name, password)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@ -133,21 +129,23 @@ func SearchKey(s Server, password string) (*Key, error) {
|
|||
}
|
||||
|
||||
// LoadKey loads a key from the backend.
|
||||
func LoadKey(s Server, id backend.ID) (*Key, error) {
|
||||
func LoadKey(s Server, name string) (*Key, error) {
|
||||
// extract data from repo
|
||||
data, err := s.Get(backend.Key, id)
|
||||
rd, err := s.Get(backend.Key, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rd.Close()
|
||||
|
||||
// restore json
|
||||
k := &Key{}
|
||||
err = json.Unmarshal(data, k)
|
||||
dec := json.NewDecoder(rd)
|
||||
k := Key{}
|
||||
err = dec.Decode(&k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return k, err
|
||||
return &k, nil
|
||||
}
|
||||
|
||||
// AddKey adds a new key to an already existing repository.
|
||||
|
@ -209,27 +207,26 @@ func AddKey(s Server, password string, template *Key) (*Key, error) {
|
|||
}
|
||||
|
||||
// store in repository and return
|
||||
blob, err := s.Create(backend.Key)
|
||||
blob, err := s.Create()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = blob.Write(buf)
|
||||
plainhw := backend.NewHashingWriter(blob, sha256.New())
|
||||
|
||||
_, err = plainhw.Write(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = blob.Close()
|
||||
name := backend.ID(plainhw.Sum(nil)).String()
|
||||
|
||||
err = blob.Finalize(backend.Key, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, err := blob.ID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newkey.id = id
|
||||
newkey.name = name
|
||||
|
||||
FreeChunkBuf("key", newkey.Data)
|
||||
|
||||
|
@ -322,6 +319,6 @@ func (k *Key) String() string {
|
|||
return fmt.Sprintf("<Key of %s@%s, created on %s>", k.Username, k.Hostname, k.Created)
|
||||
}
|
||||
|
||||
func (k Key) ID() backend.ID {
|
||||
return k.id
|
||||
func (k Key) Name() string {
|
||||
return k.name
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/restic/restic"
|
||||
"github.com/restic/restic/backend"
|
||||
"github.com/restic/restic/backend/local"
|
||||
)
|
||||
|
||||
var testPassword = "foobar"
|
||||
|
@ -21,7 +21,7 @@ func setupBackend(t testing.TB) restic.Server {
|
|||
ok(t, err)
|
||||
|
||||
// create repository below temp dir
|
||||
b, err := backend.CreateLocal(filepath.Join(tempdir, "repo"))
|
||||
b, err := local.Create(filepath.Join(tempdir, "repo"))
|
||||
ok(t, err)
|
||||
|
||||
// set cache dir
|
||||
|
@ -33,7 +33,7 @@ func setupBackend(t testing.TB) restic.Server {
|
|||
|
||||
func teardownBackend(t testing.TB, s restic.Server) {
|
||||
if !*testCleanup {
|
||||
l := s.Backend().(*backend.Local)
|
||||
l := s.Backend().(*local.Local)
|
||||
t.Logf("leaving local backend at %s\n", l.Location())
|
||||
return
|
||||
}
|
||||
|
|
143
server.go
143
server.go
|
@ -6,6 +6,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
"github.com/restic/restic/backend"
|
||||
|
@ -25,23 +26,17 @@ func NewServerWithKey(be backend.Backend, key *Key) Server {
|
|||
return Server{be: be, key: key}
|
||||
}
|
||||
|
||||
// Each lists all entries of type t in the backend and calls function f() with
|
||||
// the id.
|
||||
func (s Server) EachID(t backend.Type, f func(backend.ID)) error {
|
||||
return backend.EachID(s.be, t, f)
|
||||
}
|
||||
|
||||
// Find loads the list of all blobs of type t and searches for IDs which start
|
||||
// Find loads the list of all blobs of type t and searches for names which start
|
||||
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
|
||||
// more than one is found, nil and ErrMultipleIDMatches is returned.
|
||||
func (s Server) Find(t backend.Type, prefix string) (backend.ID, error) {
|
||||
func (s Server) Find(t backend.Type, prefix string) (string, error) {
|
||||
return backend.Find(s.be, t, prefix)
|
||||
}
|
||||
|
||||
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
|
||||
// the string as closely as possible.
|
||||
func (s Server) FindSnapshot(id string) (backend.ID, error) {
|
||||
return backend.FindSnapshot(s.be, id)
|
||||
func (s Server) FindSnapshot(name string) (string, error) {
|
||||
return backend.FindSnapshot(s.be, name)
|
||||
}
|
||||
|
||||
// PrefixLength returns the number of bytes required so that all prefixes of
|
||||
|
@ -53,11 +48,21 @@ func (s Server) PrefixLength(t backend.Type) (int, error) {
|
|||
// Load tries to load and decrypt content identified by t and blob from the backend.
|
||||
func (s Server) Load(t backend.Type, blob Blob) ([]byte, error) {
|
||||
// load data
|
||||
buf, err := s.Get(t, blob.Storage)
|
||||
rd, err := s.Get(t, blob.Storage.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadAll(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check hash
|
||||
if !backend.Hash(buf).Equal(blob.Storage) {
|
||||
return nil, errors.New("invalid data returned")
|
||||
}
|
||||
|
||||
// check length
|
||||
if len(buf) != int(blob.StorageSize) {
|
||||
return nil, errors.New("Invalid storage length")
|
||||
|
@ -86,7 +91,12 @@ func (s Server) Load(t backend.Type, blob Blob) ([]byte, error) {
|
|||
// Load tries to load and decrypt content identified by t and id from the backend.
|
||||
func (s Server) LoadID(t backend.Type, storageID backend.ID) ([]byte, error) {
|
||||
// load data
|
||||
buf, err := s.Get(t, storageID)
|
||||
rd, err := s.Get(t, storageID.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadAll(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -103,14 +113,14 @@ func (s Server) LoadID(t backend.Type, storageID backend.ID) ([]byte, error) {
|
|||
// LoadJSON calls Load() to get content from the backend and afterwards calls
|
||||
// json.Unmarshal on the item.
|
||||
func (s Server) LoadJSON(t backend.Type, blob Blob, item interface{}) error {
|
||||
return s.LoadJSONID(t, blob.Storage, item)
|
||||
return s.LoadJSONID(t, blob.Storage.String(), item)
|
||||
}
|
||||
|
||||
// LoadJSONID calls Load() to get content from the backend and afterwards calls
|
||||
// json.Unmarshal on the item.
|
||||
func (s Server) LoadJSONID(t backend.Type, storageID backend.ID, item interface{}) error {
|
||||
func (s Server) LoadJSONID(t backend.Type, name string, item interface{}) error {
|
||||
// read
|
||||
rd, err := s.GetReader(t, storageID)
|
||||
rd, err := s.Get(t, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -169,8 +179,11 @@ func (s Server) Save(t backend.Type, data []byte, id backend.ID) (Blob, error) {
|
|||
|
||||
ciphertext = ciphertext[:n]
|
||||
|
||||
// compute ciphertext hash
|
||||
sid := backend.Hash(ciphertext)
|
||||
|
||||
// save blob
|
||||
backendBlob, err := s.Create(t)
|
||||
backendBlob, err := s.Create()
|
||||
if err != nil {
|
||||
return Blob{}, err
|
||||
}
|
||||
|
@ -180,12 +193,7 @@ func (s Server) Save(t backend.Type, data []byte, id backend.ID) (Blob, error) {
|
|||
return Blob{}, err
|
||||
}
|
||||
|
||||
err = backendBlob.Close()
|
||||
if err != nil {
|
||||
return Blob{}, err
|
||||
}
|
||||
|
||||
sid, err := backendBlob.ID()
|
||||
err = backendBlob.Finalize(t, sid.String())
|
||||
if err != nil {
|
||||
return Blob{}, err
|
||||
}
|
||||
|
@ -202,12 +210,13 @@ func (s Server) SaveFrom(t backend.Type, id backend.ID, length uint, rd io.Reade
|
|||
return Blob{}, errors.New("id is nil")
|
||||
}
|
||||
|
||||
backendBlob, err := s.Create(t)
|
||||
backendBlob, err := s.Create()
|
||||
if err != nil {
|
||||
return Blob{}, err
|
||||
}
|
||||
|
||||
encWr := s.key.EncryptTo(backendBlob)
|
||||
hw := backend.NewHashingWriter(backendBlob, sha256.New())
|
||||
encWr := s.key.EncryptTo(hw)
|
||||
|
||||
_, err = io.Copy(encWr, rd)
|
||||
if err != nil {
|
||||
|
@ -221,20 +230,16 @@ func (s Server) SaveFrom(t backend.Type, id backend.ID, length uint, rd io.Reade
|
|||
}
|
||||
|
||||
// finish backend blob
|
||||
err = backendBlob.Close()
|
||||
sid := backend.ID(hw.Sum(nil))
|
||||
err = backendBlob.Finalize(t, sid.String())
|
||||
if err != nil {
|
||||
return Blob{}, fmt.Errorf("backend.Blob.Close(): %v", err)
|
||||
}
|
||||
|
||||
storageID, err := backendBlob.ID()
|
||||
if err != nil {
|
||||
return Blob{}, fmt.Errorf("backend.Blob.ID(): %v", err)
|
||||
}
|
||||
|
||||
return Blob{
|
||||
ID: id,
|
||||
Size: uint64(length),
|
||||
Storage: storageID,
|
||||
Storage: sid,
|
||||
StorageSize: uint64(backendBlob.Size()),
|
||||
}, nil
|
||||
}
|
||||
|
@ -242,15 +247,16 @@ func (s Server) SaveFrom(t backend.Type, id backend.ID, length uint, rd io.Reade
|
|||
// SaveJSON serialises item as JSON and encrypts and saves it in the backend as
|
||||
// type t.
|
||||
func (s Server) SaveJSON(t backend.Type, item interface{}) (Blob, error) {
|
||||
backendBlob, err := s.Create(t)
|
||||
backendBlob, err := s.Create()
|
||||
if err != nil {
|
||||
return Blob{}, fmt.Errorf("Create: %v", err)
|
||||
}
|
||||
|
||||
encWr := s.key.EncryptTo(backendBlob)
|
||||
hw := backend.NewHashingWriter(encWr, sha256.New())
|
||||
storagehw := backend.NewHashingWriter(backendBlob, sha256.New())
|
||||
encWr := s.key.EncryptTo(storagehw)
|
||||
plainhw := backend.NewHashingWriter(encWr, sha256.New())
|
||||
|
||||
enc := json.NewEncoder(hw)
|
||||
enc := json.NewEncoder(plainhw)
|
||||
err = enc.Encode(item)
|
||||
if err != nil {
|
||||
return Blob{}, fmt.Errorf("json.NewEncoder: %v", err)
|
||||
|
@ -263,21 +269,18 @@ func (s Server) SaveJSON(t backend.Type, item interface{}) (Blob, error) {
|
|||
}
|
||||
|
||||
// finish backend blob
|
||||
err = backendBlob.Close()
|
||||
sid := backend.ID(storagehw.Sum(nil))
|
||||
err = backendBlob.Finalize(t, sid.String())
|
||||
if err != nil {
|
||||
return Blob{}, fmt.Errorf("backend.Blob.Close(): %v", err)
|
||||
}
|
||||
|
||||
id := hw.Sum(nil)
|
||||
storageID, err := backendBlob.ID()
|
||||
if err != nil {
|
||||
return Blob{}, fmt.Errorf("backend.Blob.ID(): %v", err)
|
||||
}
|
||||
id := backend.ID(plainhw.Sum(nil))
|
||||
|
||||
return Blob{
|
||||
ID: id,
|
||||
Size: uint64(hw.Size()),
|
||||
Storage: storageID,
|
||||
Size: uint64(plainhw.Size()),
|
||||
Storage: sid,
|
||||
StorageSize: uint64(backendBlob.Size()),
|
||||
}, nil
|
||||
}
|
||||
|
@ -354,53 +357,55 @@ func (s Server) Stats() (ServerStats, error) {
|
|||
|
||||
// list ids
|
||||
trees := 0
|
||||
err := s.EachID(backend.Tree, func(id backend.ID) {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
for name := range s.List(backend.Tree, done) {
|
||||
trees++
|
||||
id, err := backend.ParseID(name)
|
||||
if err != nil {
|
||||
debug.Log("Server.Stats", "unable to parse name %v as id: %v", name, err)
|
||||
continue
|
||||
}
|
||||
idCh <- id
|
||||
})
|
||||
}
|
||||
|
||||
close(idCh)
|
||||
|
||||
// wait for workers
|
||||
wg.Wait()
|
||||
|
||||
return ServerStats{Blobs: uint(blobs.Len()), Trees: uint(trees)}, err
|
||||
return ServerStats{Blobs: uint(blobs.Len()), Trees: uint(trees)}, nil
|
||||
}
|
||||
|
||||
// Count counts the number of objects of type t in the backend.
|
||||
func (s Server) Count(t backend.Type) (int, error) {
|
||||
l, err := s.be.List(t)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
// Count returns the number of blobs of a given type in the backend.
|
||||
func (s Server) Count(t backend.Type) (n int) {
|
||||
for range s.List(t, nil) {
|
||||
n++
|
||||
}
|
||||
|
||||
return len(l), nil
|
||||
return
|
||||
}
|
||||
|
||||
// Proxy methods to backend
|
||||
|
||||
func (s Server) List(t backend.Type) (backend.IDs, error) {
|
||||
return s.be.List(t)
|
||||
func (s Server) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
return s.be.List(t, done)
|
||||
}
|
||||
|
||||
func (s Server) Get(t backend.Type, id backend.ID) ([]byte, error) {
|
||||
return s.be.Get(t, id)
|
||||
func (s Server) Get(t backend.Type, name string) (io.ReadCloser, error) {
|
||||
return s.be.Get(t, name)
|
||||
}
|
||||
|
||||
func (s Server) GetReader(t backend.Type, id backend.ID) (io.ReadCloser, error) {
|
||||
return s.be.GetReader(t, id)
|
||||
func (s Server) Create() (backend.Blob, error) {
|
||||
return s.be.Create()
|
||||
}
|
||||
|
||||
func (s Server) Create(t backend.Type) (backend.Blob, error) {
|
||||
return s.be.Create(t)
|
||||
func (s Server) Test(t backend.Type, name string) (bool, error) {
|
||||
return s.be.Test(t, name)
|
||||
}
|
||||
|
||||
func (s Server) Test(t backend.Type, id backend.ID) (bool, error) {
|
||||
return s.be.Test(t, id)
|
||||
}
|
||||
|
||||
func (s Server) Remove(t backend.Type, id backend.ID) error {
|
||||
return s.be.Remove(t, id)
|
||||
func (s Server) Remove(t backend.Type, name string) error {
|
||||
return s.be.Remove(t, name)
|
||||
}
|
||||
|
||||
func (s Server) Close() error {
|
||||
|
@ -415,6 +420,10 @@ func (s Server) Delete() error {
|
|||
return errors.New("Delete() called for backend that does not implement this method")
|
||||
}
|
||||
|
||||
func (s Server) ID() backend.ID {
|
||||
func (s Server) ID() string {
|
||||
return s.be.ID()
|
||||
}
|
||||
|
||||
func (s Server) Location() string {
|
||||
return s.be.Location()
|
||||
}
|
||||
|
|
|
@ -158,15 +158,13 @@ func TestLoadJSONID(t *testing.T) {
|
|||
t.Logf("archived snapshot %v", sn.ID())
|
||||
|
||||
// benchmark loading first tree
|
||||
list, err := server.List(backend.Tree)
|
||||
ok(t, err)
|
||||
assert(t, len(list) > 0,
|
||||
"no Trees in repository found")
|
||||
|
||||
treeID := list[0]
|
||||
done := make(chan struct{})
|
||||
first, found := <-server.List(backend.Tree, done)
|
||||
assert(t, found, "no Trees in repository found")
|
||||
close(done)
|
||||
|
||||
tree := restic.NewTree()
|
||||
err = server.LoadJSONID(backend.Tree, treeID, &tree)
|
||||
err := server.LoadJSONID(backend.Tree, first, &tree)
|
||||
ok(t, err)
|
||||
}
|
||||
|
||||
|
@ -184,19 +182,12 @@ func BenchmarkLoadJSONID(t *testing.B) {
|
|||
sn := snapshot(t, server, *benchArchiveDirectory, nil)
|
||||
t.Logf("archived snapshot %v", sn.ID())
|
||||
|
||||
// benchmark loading first tree
|
||||
list, err := server.List(backend.Tree)
|
||||
ok(t, err)
|
||||
assert(t, len(list) > 0,
|
||||
"no Trees in repository found")
|
||||
|
||||
t.ResetTimer()
|
||||
|
||||
tree := restic.NewTree()
|
||||
for i := 0; i < t.N; i++ {
|
||||
for _, treeID := range list {
|
||||
err = server.LoadJSONID(backend.Tree, treeID, &tree)
|
||||
ok(t, err)
|
||||
for treeID := range be.List(backend.Tree, nil) {
|
||||
ok(t, server.LoadJSONID(backend.Tree, treeID, &tree))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ func NewSnapshot(paths []string) (*Snapshot, error) {
|
|||
|
||||
func LoadSnapshot(s Server, id backend.ID) (*Snapshot, error) {
|
||||
sn := &Snapshot{id: id}
|
||||
err := s.LoadJSONID(backend.Snapshot, id, sn)
|
||||
err := s.LoadJSONID(backend.Snapshot, id.String(), sn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
2
tree.go
2
tree.go
|
@ -32,7 +32,7 @@ func (t Tree) String() string {
|
|||
|
||||
func LoadTree(s Server, id backend.ID) (*Tree, error) {
|
||||
tree := &Tree{}
|
||||
err := s.LoadJSONID(backend.Tree, id, tree)
|
||||
err := s.LoadJSONID(backend.Tree, id.String(), tree)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue