Merge pull request #395 from restic/rework-backend-interface

WIP: Rework backend interface
This commit is contained in:
Alexander Neumann 2016-01-27 22:11:20 +01:00
commit ce4a7f16ca
60 changed files with 2405 additions and 1928 deletions

View file

@ -41,12 +41,15 @@ ENV PATH $PATH:$GOPATH/bin
RUN mkdir -p $GOPATH/src/github.com/restic/restic RUN mkdir -p $GOPATH/src/github.com/restic/restic
# install tools # pre-install tools, this speeds up running the tests itself
RUN go get github.com/tools/godep
RUN go get golang.org/x/tools/cmd/cover RUN go get golang.org/x/tools/cmd/cover
RUN go get github.com/mattn/goveralls RUN go get github.com/mattn/goveralls
RUN go get github.com/mitchellh/gox RUN go get github.com/mitchellh/gox
RUN go get github.com/pierrre/gotestcover RUN go get github.com/pierrre/gotestcover
RUN GO15VENDOREXPERIMENT=1 go get github.com/minio/minio RUN mkdir $HOME/bin \
&& wget -q -O $HOME/bin/minio https://dl.minio.io/server/minio/release/linux-${GOARCH}/minio \
&& chmod +x $HOME/bin/minio
# set TRAVIS_BUILD_DIR for integration script # set TRAVIS_BUILD_DIR for integration script
ENV TRAVIS_BUILD_DIR $GOPATH/src/github.com/restic/restic ENV TRAVIS_BUILD_DIR $GOPATH/src/github.com/restic/restic

View file

@ -1,283 +0,0 @@
package backend_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math/rand"
"sort"
"testing"
crand "crypto/rand"
"github.com/restic/restic/backend"
. "github.com/restic/restic/test"
)
func testBackendConfig(b backend.Backend, t *testing.T) {
// create config and read it back
_, err := b.Get(backend.Config, "")
Assert(t, err != nil, "did not get expected error for non-existing config")
blob, err := b.Create()
OK(t, err)
_, err = blob.Write([]byte("Config"))
OK(t, err)
OK(t, blob.Finalize(backend.Config, ""))
// try accessing the config with different names, should all return the
// same config
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
rd, err := b.Get(backend.Config, name)
Assert(t, err == nil, "unable to read config")
buf, err := ioutil.ReadAll(rd)
OK(t, err)
OK(t, rd.Close())
Assert(t, string(buf) == "Config", "wrong data returned for config")
}
}
func testGetReader(b backend.Backend, t testing.TB) {
length := rand.Intn(1<<24) + 2000
data := make([]byte, length)
_, err := io.ReadFull(crand.Reader, data)
OK(t, err)
blob, err := b.Create()
OK(t, err)
id := backend.Hash(data)
_, err = blob.Write([]byte(data))
OK(t, err)
OK(t, blob.Finalize(backend.Data, id.String()))
for i := 0; i < 500; i++ {
l := rand.Intn(length + 2000)
o := rand.Intn(length + 2000)
d := data
if o < len(d) {
d = d[o:]
} else {
o = len(d)
d = d[:0]
}
if l > 0 && l < len(d) {
d = d[:l]
}
rd, err := b.GetReader(backend.Data, id.String(), uint(o), uint(l))
OK(t, err)
buf, err := ioutil.ReadAll(rd)
OK(t, err)
if !bytes.Equal(buf, d) {
t.Fatalf("data not equal")
}
}
OK(t, b.Remove(backend.Data, id.String()))
}
func testWrite(b backend.Backend, t testing.TB) {
length := rand.Intn(1<<23) + 2000
data := make([]byte, length)
_, err := io.ReadFull(crand.Reader, data)
OK(t, err)
id := backend.Hash(data)
for i := 0; i < 10; i++ {
blob, err := b.Create()
OK(t, err)
o := 0
for o < len(data) {
l := rand.Intn(len(data) - o)
if len(data)-o < 20 {
l = len(data) - o
}
n, err := blob.Write(data[o : o+l])
OK(t, err)
if n != l {
t.Fatalf("wrong number of bytes written, want %v, got %v", l, n)
}
o += l
}
name := fmt.Sprintf("%s-%d", id, i)
OK(t, blob.Finalize(backend.Data, name))
rd, err := b.Get(backend.Data, name)
OK(t, err)
buf, err := ioutil.ReadAll(rd)
OK(t, err)
if len(buf) != len(data) {
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
}
if !bytes.Equal(buf, data) {
t.Fatalf("data not equal")
}
}
}
func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) {
id := backend.Hash(data)
blob, err := b.Create()
OK(t, err)
_, err = blob.Write([]byte(data))
OK(t, err)
OK(t, blob.Finalize(tpe, id.String()))
}
func read(t testing.TB, rd io.Reader, expectedData []byte) {
buf, err := ioutil.ReadAll(rd)
OK(t, err)
if expectedData != nil {
Equals(t, expectedData, buf)
}
}
func testBackend(b backend.Backend, t *testing.T) {
testBackendConfig(b, t)
for _, tpe := range []backend.Type{
backend.Data, backend.Key, backend.Lock,
backend.Snapshot, backend.Index,
} {
// detect non-existing files
for _, test := range TestStrings {
id, err := backend.ParseID(test.id)
OK(t, err)
// test if blob is already in repository
ret, err := b.Test(tpe, id.String())
OK(t, err)
Assert(t, !ret, "blob was found to exist before creating")
// try to open not existing blob
_, err = b.Get(tpe, id.String())
Assert(t, err != nil, "blob data could be extracted before creation")
// try to read not existing blob
_, err = b.GetReader(tpe, id.String(), 0, 1)
Assert(t, err != nil, "blob reader could be obtained before creation")
// try to get string out, should fail
ret, err = b.Test(tpe, id.String())
OK(t, err)
Assert(t, !ret, "id %q was found (but should not have)", test.id)
}
// add files
for _, test := range TestStrings {
store(t, b, tpe, []byte(test.data))
// test Get()
rd, err := b.Get(tpe, test.id)
OK(t, err)
Assert(t, rd != nil, "Get() returned nil")
read(t, rd, []byte(test.data))
OK(t, rd.Close())
// test GetReader()
rd, err = b.GetReader(tpe, test.id, 0, uint(len(test.data)))
OK(t, err)
Assert(t, rd != nil, "GetReader() returned nil")
read(t, rd, []byte(test.data))
OK(t, rd.Close())
// try to read it out with an offset and a length
start := 1
end := len(test.data) - 2
length := end - start
rd, err = b.GetReader(tpe, test.id, uint(start), uint(length))
OK(t, err)
Assert(t, rd != nil, "GetReader() returned nil")
read(t, rd, []byte(test.data[start:end]))
OK(t, rd.Close())
}
// test adding the first file again
test := TestStrings[0]
// create blob
blob, err := b.Create()
OK(t, err)
_, err = blob.Write([]byte(test.data))
OK(t, err)
err = blob.Finalize(tpe, test.id)
Assert(t, err != nil, "expected error, got %v", err)
// remove and recreate
err = b.Remove(tpe, test.id)
OK(t, err)
// test that the blob is gone
ok, err := b.Test(tpe, test.id)
OK(t, err)
Assert(t, ok == false, "removed blob still present")
// create blob
blob, err = b.Create()
OK(t, err)
_, err = io.Copy(blob, bytes.NewReader([]byte(test.data)))
OK(t, err)
OK(t, blob.Finalize(tpe, test.id))
// list items
IDs := backend.IDs{}
for _, test := range TestStrings {
id, err := backend.ParseID(test.id)
OK(t, err)
IDs = append(IDs, id)
}
sort.Sort(IDs)
i := 0
for s := range b.List(tpe, nil) {
Equals(t, IDs[i].String(), s)
i++
}
// remove content if requested
if TestCleanup {
for _, test := range TestStrings {
id, err := backend.ParseID(test.id)
OK(t, err)
found, err := b.Test(tpe, id.String())
OK(t, err)
OK(t, b.Remove(tpe, id.String()))
found, err = b.Test(tpe, id.String())
OK(t, err)
Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
}
}
}
testGetReader(b, t)
testWrite(b, t)
}

View file

@ -1,2 +1,5 @@
// Package backend provides local and remote storage for restic repositories. // Package backend provides local and remote storage for restic repositories.
// All backends need to implement the Backend interface. There is a
// MockBackend, which can be used for mocking in tests, and a MemBackend, which
// stores all data in a hash internally.
package backend package backend

View file

@ -1,30 +1,14 @@
package backend package backend
import ( import "errors"
"crypto/sha256"
"errors"
"io"
)
const ( // ErrNoIDPrefixFound is returned by Find() when no ID for the given prefix
MinPrefixLength = 8 // could be found.
) var ErrNoIDPrefixFound = errors.New("no ID found")
var ( // ErrMultipleIDMatches is returned by Find() when multiple IDs with the given
ErrNoIDPrefixFound = errors.New("no ID found") // prefix are found.
ErrMultipleIDMatches = errors.New("multiple IDs with prefix found") var ErrMultipleIDMatches = errors.New("multiple IDs with prefix found")
)
var (
hashData = sha256.Sum256
)
const hashSize = sha256.Size
// Hash returns the ID for data.
func Hash(data []byte) ID {
return hashData(data)
}
// Find loads the list of all blobs of type t and searches for names which // Find loads the list of all blobs of type t and searches for names which
// start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. // start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned.
@ -53,6 +37,8 @@ func Find(be Lister, t Type, prefix string) (string, error) {
return "", ErrNoIDPrefixFound return "", ErrNoIDPrefixFound
} }
const minPrefixLength = 8
// PrefixLength returns the number of bytes required so that all prefixes of // PrefixLength returns the number of bytes required so that all prefixes of
// all names of type t are unique. // all names of type t are unique.
func PrefixLength(be Lister, t Type) (int, error) { func PrefixLength(be Lister, t Type) (int, error) {
@ -67,7 +53,7 @@ func PrefixLength(be Lister, t Type) (int, error) {
// select prefixes of length l, test if the last one is the same as the current one // select prefixes of length l, test if the last one is the same as the current one
outer: outer:
for l := MinPrefixLength; l < IDSize; l++ { for l := minPrefixLength; l < IDSize; l++ {
var last string var last string
for _, name := range list { for _, name := range list {
@ -82,39 +68,3 @@ outer:
return IDSize, nil return IDSize, nil
} }
// wrap around io.LimitedReader that implements io.ReadCloser
type blobReader struct {
cl io.Closer
rd io.Reader
closed bool
}
func (l *blobReader) Read(p []byte) (int, error) {
n, err := l.rd.Read(p)
if err == io.EOF {
l.Close()
}
return n, err
}
func (l *blobReader) Close() error {
if l == nil {
return nil
}
if !l.closed {
err := l.cl.Close()
l.closed = true
return err
}
return nil
}
// LimitReadCloser returns a new reader wraps r in an io.LimitReader, but also
// implements the Close() method.
func LimitReadCloser(r io.ReadCloser, n int64) *blobReader {
return &blobReader{cl: r, rd: io.LimitReader(r, n)}
}

View file

@ -7,15 +7,6 @@ import (
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
) )
func str2id(s string) backend.ID {
id, err := backend.ParseID(s)
if err != nil {
panic(err)
}
return id
}
type mockBackend struct { type mockBackend struct {
list func(backend.Type, <-chan struct{}) <-chan string list func(backend.Type, <-chan struct{}) <-chan string
} }
@ -25,14 +16,14 @@ func (m mockBackend) List(t backend.Type, done <-chan struct{}) <-chan string {
} }
var samples = backend.IDs{ var samples = backend.IDs{
str2id("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"), ParseID("20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"),
str2id("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"), ParseID("20bdc1402a6fc9b633ccd578c4a92d0f4ef1a457fa2e16c596bc73fb409d6cc0"),
str2id("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"), ParseID("20bdc1402a6fc9b633ffffffffffffffffffffffffffffffffffffffffffffff"),
str2id("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"), ParseID("20ff988befa5fc40350f00d531a767606efefe242c837aaccb80673f286be53d"),
str2id("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"), ParseID("326cb59dfe802304f96ee9b5b9af93bdee73a30f53981e5ec579aedb6f1d0f07"),
str2id("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"), ParseID("86b60b9594d1d429c4aa98fa9562082cabf53b98c7dc083abe5dae31074dd15a"),
str2id("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"), ParseID("96c8dbe225079e624b5ce509f5bd817d1453cd0a85d30d536d01b64a8669aeae"),
str2id("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"), ParseID("fa31d65b87affcd167b119e9d3d2a27b8236ca4836cb077ed3e96fcbe209b792"),
} }
func TestPrefixLength(t *testing.T) { func TestPrefixLength(t *testing.T) {

48
backend/handle.go Normal file
View file

@ -0,0 +1,48 @@
package backend
import (
"errors"
"fmt"
)
// Handle is used to store and access data in a backend.
type Handle struct {
Type Type
Name string
}
func (h Handle) String() string {
name := h.Name
if len(name) > 10 {
name = name[:10]
}
return fmt.Sprintf("<%s/%s>", h.Type, name)
}
// Valid returns an error if h is not valid.
func (h Handle) Valid() error {
if h.Type == "" {
return errors.New("type is empty")
}
switch h.Type {
case Data:
case Key:
case Lock:
case Snapshot:
case Index:
case Config:
default:
return fmt.Errorf("invalid Type %q", h.Type)
}
if h.Type == Config {
return nil
}
if h.Name == "" {
return errors.New("invalid Name")
}
return nil
}

28
backend/handle_test.go Normal file
View file

@ -0,0 +1,28 @@
package backend
import "testing"
var handleTests = []struct {
h Handle
valid bool
}{
{Handle{Name: "foo"}, false},
{Handle{Type: "foobar"}, false},
{Handle{Type: Config, Name: ""}, true},
{Handle{Type: Data, Name: ""}, false},
{Handle{Type: "", Name: "x"}, false},
{Handle{Type: Lock, Name: "010203040506"}, true},
}
func TestHandleValid(t *testing.T) {
for i, test := range handleTests {
err := test.h.Valid()
if err != nil && test.valid {
t.Errorf("test %v failed: error returned for valid handle: %v", i, err)
}
if !test.valid && err == nil {
t.Errorf("test %v failed: expected error for invalid handle not found", i)
}
}
}

View file

@ -2,13 +2,19 @@ package backend
import ( import (
"bytes" "bytes"
"crypto/sha256"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors" "errors"
) )
// Hash returns the ID for data.
func Hash(data []byte) ID {
return sha256.Sum256(data)
}
// IDSize contains the size of an ID, in bytes. // IDSize contains the size of an ID, in bytes.
const IDSize = hashSize const IDSize = sha256.Size
// ID references content within a repository. // ID references content within a repository.
type ID [IDSize]byte type ID [IDSize]byte
@ -80,10 +86,12 @@ func (id ID) Compare(other ID) int {
return bytes.Compare(other[:], id[:]) return bytes.Compare(other[:], id[:])
} }
// MarshalJSON returns the JSON encoding of id.
func (id ID) MarshalJSON() ([]byte, error) { func (id ID) MarshalJSON() ([]byte, error) {
return json.Marshal(id.String()) return json.Marshal(id.String())
} }
// UnmarshalJSON parses the JSON-encoded data and stores the result in id.
func (id *ID) UnmarshalJSON(b []byte) error { func (id *ID) UnmarshalJSON(b []byte) error {
var s string var s string
err := json.Unmarshal(b, &s) err := json.Unmarshal(b, &s)
@ -98,7 +106,3 @@ func (id *ID) UnmarshalJSON(b []byte) error {
return nil return nil
} }
func IDFromData(d []byte) ID {
return hashData(d)
}

View file

@ -5,6 +5,7 @@ import (
"testing" "testing"
"github.com/restic/restic/backend" "github.com/restic/restic/backend"
. "github.com/restic/restic/test"
) )
var uniqTests = []struct { var uniqTests = []struct {
@ -12,37 +13,37 @@ var uniqTests = []struct {
}{ }{
{ {
backend.IDs{ backend.IDs{
str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
}, },
backend.IDs{ backend.IDs{
str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
}, },
}, },
{ {
backend.IDs{ backend.IDs{
str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
}, },
backend.IDs{ backend.IDs{
str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
}, },
}, },
{ {
backend.IDs{ backend.IDs{
str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
str2id("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"),
str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
}, },
backend.IDs{ backend.IDs{
str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"),
str2id("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"),
str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"),
}, },
}, },
} }

View file

@ -4,22 +4,23 @@ import (
"testing" "testing"
"github.com/restic/restic/backend" "github.com/restic/restic/backend"
. "github.com/restic/restic/test"
) )
var idsetTests = []struct { var idsetTests = []struct {
id backend.ID id backend.ID
seen bool seen bool
}{ }{
{str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false}, {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), false},
{str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false}, {ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), false},
{str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, {ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true},
{str2id("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false}, {ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), false},
{str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
{str2id("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true}, {ParseID("1285b30394f3b74693cc29a758d9624996ae643157776fce8154aabd2f01515f"), true},
{str2id("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true}, {ParseID("f658198b405d7e80db5ace1980d125c8da62f636b586c46bf81dfb856a49d0c8"), true},
{str2id("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true}, {ParseID("7bb086db0d06285d831485da8031281e28336a56baa313539eaea1c73a2a1a40"), true},
} }
func TestIDSet(t *testing.T) { func TestIDSet(t *testing.T) {

View file

@ -1,10 +1,9 @@
package backend package backend
import "io"
// Type is the type of a Blob. // Type is the type of a Blob.
type Type string type Type string
// These are the different data types a backend can store.
const ( const (
Data Type = "data" Data Type = "data"
Key = "key" Key = "key"
@ -14,23 +13,12 @@ const (
Config = "config" Config = "config"
) )
// A Backend manages data stored somewhere. // Backend is used to store and access data.
type Backend interface { type Backend interface {
// Location returns a string that specifies the location of the repository, // Location returns a string that describes the type and location of the
// like a URL. // repository.
Location() string Location() string
// Create creates a new Blob. The data is available only after Finalize()
// has been called on the returned Blob.
Create() (Blob, error)
// Get returns an io.ReadCloser for the Blob with the given name of type t.
Get(t Type, name string) (io.ReadCloser, error)
// GetReader returns an io.ReadCloser for the Blob with the given name of
// type t at offset and length.
GetReader(t Type, name string, offset, length uint) (io.ReadCloser, error)
// Test a boolean value whether a Blob with the name and type exists. // Test a boolean value whether a Blob with the name and type exists.
Test(t Type, name string) (bool, error) Test(t Type, name string) (bool, error)
@ -41,26 +29,33 @@ type Backend interface {
Close() error Close() error
Lister Lister
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
Load(h Handle, p []byte, off int64) (int, error)
// Save stores the data in the backend under the given handle.
Save(h Handle, p []byte) error
// Stat returns information about the blob identified by h.
Stat(h Handle) (BlobInfo, error)
} }
// Lister implements listing data items stored in a backend.
type Lister interface { type Lister interface {
// List returns a channel that yields all names of blobs of type t in // List returns a channel that yields all names of blobs of type t in an
// lexicographic order. A goroutine is started for this. If the channel // arbitrary order. A goroutine is started for this. If the channel done is
// done is closed, sending stops. // closed, sending stops.
List(t Type, done <-chan struct{}) <-chan string List(t Type, done <-chan struct{}) <-chan string
} }
// Deleter are backends that allow to self-delete all content stored in them.
type Deleter interface { type Deleter interface {
// Delete the complete repository. // Delete the complete repository.
Delete() error Delete() error
} }
type Blob interface { // BlobInfo is returned by Stat() and contains information about a stored blob.
io.Writer type BlobInfo struct {
Size int64
// Finalize moves the data blob to the final location for type and name.
Finalize(t Type, name string) error
// Size returns the number of bytes written to the backend so far.
Size() uint
} }

View file

@ -0,0 +1,87 @@
// DO NOT EDIT, AUTOMATICALLY GENERATED
package local_test
import (
"testing"
"github.com/restic/restic/backend/test"
)
var SkipMessage string
func TestLocalBackendCreate(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCreate(t)
}
func TestLocalBackendOpen(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestOpen(t)
}
func TestLocalBackendCreateWithConfig(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCreateWithConfig(t)
}
func TestLocalBackendLocation(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLocation(t)
}
func TestLocalBackendConfig(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestConfig(t)
}
func TestLocalBackendLoad(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLoad(t)
}
func TestLocalBackendSave(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestSave(t)
}
func TestLocalBackendSaveFilenames(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestSaveFilenames(t)
}
func TestLocalBackendBackend(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestBackend(t)
}
func TestLocalBackendDelete(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestDelete(t)
}
func TestLocalBackendCleanup(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCleanup(t)
}

View file

@ -7,23 +7,18 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"sync"
"github.com/restic/restic/backend" "github.com/restic/restic/backend"
"github.com/restic/restic/debug"
) )
var ErrWrongData = errors.New("wrong data returned by backend, checksum does not match") // Local is a backend in a local directory.
type Local struct { type Local struct {
p string p string
mu sync.Mutex
open map[string][]*os.File // Contains open files. Guarded by 'mu'.
} }
// Open opens the local backend as specified by config. func paths(dir string) []string {
func Open(dir string) (*Local, error) { return []string{
items := []string{
dir, dir,
filepath.Join(dir, backend.Paths.Data), filepath.Join(dir, backend.Paths.Data),
filepath.Join(dir, backend.Paths.Snapshots), filepath.Join(dir, backend.Paths.Snapshots),
@ -32,30 +27,23 @@ func Open(dir string) (*Local, error) {
filepath.Join(dir, backend.Paths.Keys), filepath.Join(dir, backend.Paths.Keys),
filepath.Join(dir, backend.Paths.Temp), filepath.Join(dir, backend.Paths.Temp),
} }
}
// Open opens the local backend as specified by config.
func Open(dir string) (*Local, error) {
// test if all necessary dirs are there // test if all necessary dirs are there
for _, d := range items { for _, d := range paths(dir) {
if _, err := os.Stat(d); err != nil { if _, err := os.Stat(d); err != nil {
return nil, fmt.Errorf("%s does not exist", d) return nil, fmt.Errorf("%s does not exist", d)
} }
} }
return &Local{p: dir, open: make(map[string][]*os.File)}, nil return &Local{p: dir}, nil
} }
// Create creates all the necessary files and directories for a new local // Create creates all the necessary files and directories for a new local
// backend at dir. Afterwards a new config blob should be created. // backend at dir. Afterwards a new config blob should be created.
func Create(dir string) (*Local, error) { func Create(dir string) (*Local, error) {
dirs := []string{
dir,
filepath.Join(dir, backend.Paths.Data),
filepath.Join(dir, backend.Paths.Snapshots),
filepath.Join(dir, backend.Paths.Index),
filepath.Join(dir, backend.Paths.Locks),
filepath.Join(dir, backend.Paths.Keys),
filepath.Join(dir, backend.Paths.Temp),
}
// test if config file already exists // test if config file already exists
_, err := os.Lstat(filepath.Join(dir, backend.Paths.Config)) _, err := os.Lstat(filepath.Join(dir, backend.Paths.Config))
if err == nil { if err == nil {
@ -63,7 +51,7 @@ func Create(dir string) (*Local, error) {
} }
// create paths for data, refs and temp // create paths for data, refs and temp
for _, d := range dirs { for _, d := range paths(dir) {
err := os.MkdirAll(d, backend.Modes.Dir) err := os.MkdirAll(d, backend.Modes.Dir)
if err != nil { if err != nil {
return nil, err return nil, err
@ -79,93 +67,6 @@ func (b *Local) Location() string {
return b.p return b.p
} }
// Return temp directory in correct directory for this backend.
func (b *Local) tempFile() (*os.File, error) {
return ioutil.TempFile(filepath.Join(b.p, backend.Paths.Temp), "temp-")
}
type localBlob struct {
f *os.File
size uint
final bool
basedir string
}
func (lb *localBlob) Write(p []byte) (int, error) {
if lb.final {
return 0, errors.New("blob already closed")
}
n, err := lb.f.Write(p)
lb.size += uint(n)
return n, err
}
func (lb *localBlob) Size() uint {
return lb.size
}
func (lb *localBlob) Finalize(t backend.Type, name string) error {
if lb.final {
return errors.New("Already finalized")
}
lb.final = true
err := lb.f.Close()
if err != nil {
return fmt.Errorf("local: file.Close: %v", err)
}
f := filename(lb.basedir, t, name)
// create directories if necessary, ignore errors
if t == backend.Data {
os.MkdirAll(filepath.Dir(f), backend.Modes.Dir)
}
// test if new path already exists
if _, err := os.Stat(f); err == nil {
return fmt.Errorf("Close(): file %v already exists", f)
}
if err := os.Rename(lb.f.Name(), f); err != nil {
return err
}
// set mode to read-only
fi, err := os.Stat(f)
if err != nil {
return err
}
return setNewFileMode(f, fi)
}
// Create creates a new Blob. The data is available only after Finalize()
// has been called on the returned Blob.
func (b *Local) Create() (backend.Blob, error) {
// TODO: make sure that tempfile is removed upon error
// create tempfile in backend
file, err := b.tempFile()
if err != nil {
return nil, err
}
blob := localBlob{
f: file,
basedir: b.p,
}
b.mu.Lock()
open, _ := b.open["blobs"]
b.open["blobs"] = append(open, file)
b.mu.Unlock()
return &blob, nil
}
// Construct path for given Type and name. // Construct path for given Type and name.
func filename(base string, t backend.Type, name string) string { func filename(base string, t backend.Type, name string) string {
if t == backend.Config { if t == backend.Config {
@ -196,45 +97,116 @@ func dirname(base string, t backend.Type, name string) string {
return filepath.Join(base, n) return filepath.Join(base, n)
} }
// Get returns a reader that yields the content stored under the given // Load returns the data stored in the backend for h at the given offset
// name. The reader should be closed after draining it. // and saves it in p. Load has the same semantics as io.ReaderAt.
func (b *Local) Get(t backend.Type, name string) (io.ReadCloser, error) { func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
file, err := os.Open(filename(b.p, t, name)) if err := h.Valid(); err != nil {
if err != nil { return 0, err
return nil, err
}
b.mu.Lock()
open, _ := b.open[filename(b.p, t, name)]
b.open[filename(b.p, t, name)] = append(open, file)
b.mu.Unlock()
return file, nil
}
// GetReader returns an io.ReadCloser for the Blob with the given name of
// type t at offset and length. If length is 0, the reader reads until EOF.
func (b *Local) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) {
f, err := os.Open(filename(b.p, t, name))
if err != nil {
return nil, err
} }
b.mu.Lock() f, err := os.Open(filename(b.p, h.Type, h.Name))
open, _ := b.open[filename(b.p, t, name)] if err != nil {
b.open[filename(b.p, t, name)] = append(open, f) return 0, err
b.mu.Unlock() }
if offset > 0 { defer func() {
_, err = f.Seek(int64(offset), 0) e := f.Close()
if err == nil && e != nil {
err = e
}
}()
if off > 0 {
_, err = f.Seek(off, 0)
if err != nil { if err != nil {
return nil, err return 0, err
} }
} }
if length == 0 { return io.ReadFull(f, p)
return f, nil }
// writeToTempfile saves p into a tempfile in tempdir.
func writeToTempfile(tempdir string, p []byte) (filename string, err error) {
tmpfile, err := ioutil.TempFile(tempdir, "temp-")
if err != nil {
return "", err
} }
return backend.LimitReadCloser(f, int64(length)), nil n, err := tmpfile.Write(p)
if err != nil {
return "", err
}
if n != len(p) {
return "", errors.New("not all bytes writen")
}
if err = tmpfile.Sync(); err != nil {
return "", err
}
err = tmpfile.Close()
if err != nil {
return "", err
}
return tmpfile.Name(), nil
}
// Save stores data in the backend at the handle.
func (b *Local) Save(h backend.Handle, p []byte) (err error) {
if err := h.Valid(); err != nil {
return err
}
tmpfile, err := writeToTempfile(filepath.Join(b.p, backend.Paths.Temp), p)
debug.Log("local.Save", "saved %v (%d bytes) to %v", h, len(p), tmpfile)
filename := filename(b.p, h.Type, h.Name)
// test if new path already exists
if _, err := os.Stat(filename); err == nil {
return fmt.Errorf("Rename(): file %v already exists", filename)
}
// create directories if necessary, ignore errors
if h.Type == backend.Data {
err = os.MkdirAll(filepath.Dir(filename), backend.Modes.Dir)
if err != nil {
return err
}
}
err = os.Rename(tmpfile, filename)
debug.Log("local.Save", "save %v: rename %v -> %v: %v",
h, filepath.Base(tmpfile), filepath.Base(filename), err)
if err != nil {
return err
}
// set mode to read-only
fi, err := os.Stat(filename)
if err != nil {
return err
}
return setNewFileMode(filename, fi)
}
// Stat returns information about a blob.
func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) {
if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err
}
fi, err := os.Stat(filename(b.p, h.Type, h.Name))
if err != nil {
return backend.BlobInfo{}, err
}
return backend.BlobInfo{Size: fi.Size()}, nil
} }
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
@ -252,15 +224,7 @@ func (b *Local) Test(t backend.Type, name string) (bool, error) {
// Remove removes the blob with the given name and type. // Remove removes the blob with the given name and type.
func (b *Local) Remove(t backend.Type, name string) error { func (b *Local) Remove(t backend.Type, name string) error {
// close all open files we may have.
fn := filename(b.p, t, name) fn := filename(b.p, t, name)
b.mu.Lock()
open, _ := b.open[fn]
for _, file := range open {
file.Close()
}
b.open[fn] = nil
b.mu.Unlock()
// reset read-only flag // reset read-only flag
err := os.Chmod(fn, 0666) err := os.Chmod(fn, 0666)
@ -275,7 +239,6 @@ func (b *Local) Remove(t backend.Type, name string) error {
// goroutine is started for this. If the channel done is closed, sending // goroutine is started for this. If the channel done is closed, sending
// stops. // stops.
func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
// TODO: use os.Open() and d.Readdirnames() instead of Glob()
var pattern string var pattern string
if t == backend.Data { if t == backend.Data {
pattern = filepath.Join(dirname(b.p, t, ""), "*", "*") pattern = filepath.Join(dirname(b.p, t, ""), "*", "*")
@ -294,8 +257,6 @@ func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
matches[i] = filepath.Base(matches[i]) matches[i] = filepath.Base(matches[i])
} }
sort.Strings(matches)
go func() { go func() {
defer close(ch) defer close(ch)
for _, m := range matches { for _, m := range matches {
@ -316,21 +277,12 @@ func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
// Delete removes the repository and all files. // Delete removes the repository and all files.
func (b *Local) Delete() error { func (b *Local) Delete() error {
b.Close()
return os.RemoveAll(b.p) return os.RemoveAll(b.p)
} }
// Close closes all open files. // Close closes all open files.
// They may have been closed already,
// so we ignore all errors.
func (b *Local) Close() error { func (b *Local) Close() error {
b.mu.Lock() // this does not need to do anything, all open files are closed within the
for _, open := range b.open { // same function.
for _, file := range open {
file.Close()
}
}
b.open = make(map[string][]*os.File)
b.mu.Unlock()
return nil return nil
} }

View file

@ -0,0 +1,59 @@
package local_test
import (
"fmt"
"io/ioutil"
"os"
"github.com/restic/restic/backend"
"github.com/restic/restic/backend/local"
"github.com/restic/restic/backend/test"
)
var tempBackendDir string
//go:generate go run ../test/generate_backend_tests.go
func createTempdir() error {
if tempBackendDir != "" {
return nil
}
tempdir, err := ioutil.TempDir("", "restic-local-test-")
if err != nil {
return err
}
fmt.Printf("created new test backend at %v\n", tempdir)
tempBackendDir = tempdir
return nil
}
func init() {
test.CreateFn = func() (backend.Backend, error) {
err := createTempdir()
if err != nil {
return nil, err
}
return local.Create(tempBackendDir)
}
test.OpenFn = func() (backend.Backend, error) {
err := createTempdir()
if err != nil {
return nil, err
}
return local.Open(tempBackendDir)
}
test.CleanupFn = func() error {
if tempBackendDir == "" {
return nil
}
fmt.Printf("removing test backend at %v\n", tempBackendDir)
err := os.RemoveAll(tempBackendDir)
tempBackendDir = ""
return err
}
}

View file

@ -1,59 +0,0 @@
package backend_test
import (
"fmt"
"io/ioutil"
"testing"
"github.com/restic/restic/backend"
"github.com/restic/restic/backend/local"
. "github.com/restic/restic/test"
)
func setupLocalBackend(t *testing.T) *local.Local {
tempdir, err := ioutil.TempDir("", "restic-test-")
OK(t, err)
b, err := local.Create(tempdir)
OK(t, err)
t.Logf("created local backend at %s", tempdir)
return b
}
func teardownLocalBackend(t *testing.T, b *local.Local) {
if !TestCleanup {
t.Logf("leaving local backend at %s\n", b.Location())
return
}
OK(t, b.Delete())
}
func TestLocalBackend(t *testing.T) {
// test for non-existing backend
b, err := local.Open("/invalid-restic-test")
Assert(t, err != nil, "opening invalid repository at /invalid-restic-test should have failed, but err is nil")
Assert(t, b == nil, fmt.Sprintf("opening invalid repository at /invalid-restic-test should have failed, but b is not nil: %v", b))
s := setupLocalBackend(t)
defer teardownLocalBackend(t, s)
testBackend(s, t)
}
func TestLocalBackendCreationFailures(t *testing.T) {
b := setupLocalBackend(t)
defer teardownLocalBackend(t, b)
// create a fake config file
blob, err := b.Create()
OK(t, err)
fmt.Fprintf(blob, "config\n")
OK(t, blob.Finalize(backend.Config, ""))
// test failure to create a new repository at the same location
b2, err := local.Create(b.Location())
Assert(t, err != nil && b2 == nil, fmt.Sprintf("creating a repository at %s for the second time should have failed", b.Location()))
}

View file

@ -0,0 +1,87 @@
// DO NOT EDIT, AUTOMATICALLY GENERATED
package mem_test
import (
"testing"
"github.com/restic/restic/backend/test"
)
var SkipMessage string
func TestMemBackendCreate(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCreate(t)
}
func TestMemBackendOpen(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestOpen(t)
}
func TestMemBackendCreateWithConfig(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCreateWithConfig(t)
}
func TestMemBackendLocation(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLocation(t)
}
func TestMemBackendConfig(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestConfig(t)
}
func TestMemBackendLoad(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLoad(t)
}
func TestMemBackendSave(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestSave(t)
}
func TestMemBackendSaveFilenames(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestSaveFilenames(t)
}
func TestMemBackendBackend(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestBackend(t)
}
func TestMemBackendDelete(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestDelete(t)
}
func TestMemBackendCleanup(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCleanup(t)
}

223
backend/mem/mem_backend.go Normal file
View file

@ -0,0 +1,223 @@
package mem
import (
"errors"
"io"
"sync"
"github.com/restic/restic/backend"
"github.com/restic/restic/debug"
)
type entry struct {
Type backend.Type
Name string
}
type memMap map[entry][]byte
// MemoryBackend is a mock backend that uses a map for storing all data in
// memory. This should only be used for tests.
type MemoryBackend struct {
data memMap
m sync.Mutex
backend.MockBackend
}
// New returns a new backend that saves all data in a map in memory.
func New() *MemoryBackend {
be := &MemoryBackend{
data: make(memMap),
}
be.MockBackend.TestFn = func(t backend.Type, name string) (bool, error) {
return memTest(be, t, name)
}
be.MockBackend.LoadFn = func(h backend.Handle, p []byte, off int64) (int, error) {
return memLoad(be, h, p, off)
}
be.MockBackend.SaveFn = func(h backend.Handle, p []byte) error {
return memSave(be, h, p)
}
be.MockBackend.StatFn = func(h backend.Handle) (backend.BlobInfo, error) {
return memStat(be, h)
}
be.MockBackend.RemoveFn = func(t backend.Type, name string) error {
return memRemove(be, t, name)
}
be.MockBackend.ListFn = func(t backend.Type, done <-chan struct{}) <-chan string {
return memList(be, t, done)
}
be.MockBackend.DeleteFn = func() error {
be.m.Lock()
defer be.m.Unlock()
be.data = make(memMap)
return nil
}
be.MockBackend.LocationFn = func() string {
return "Memory Backend"
}
debug.Log("MemoryBackend.New", "created new memory backend")
return be
}
func (be *MemoryBackend) insert(t backend.Type, name string, data []byte) error {
be.m.Lock()
defer be.m.Unlock()
if _, ok := be.data[entry{t, name}]; ok {
return errors.New("already present")
}
be.data[entry{t, name}] = data
return nil
}
func memTest(be *MemoryBackend, t backend.Type, name string) (bool, error) {
be.m.Lock()
defer be.m.Unlock()
debug.Log("MemoryBackend.Test", "test %v %v", t, name)
if _, ok := be.data[entry{t, name}]; ok {
return true, nil
}
return false, nil
}
func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, error) {
if err := h.Valid(); err != nil {
return 0, err
}
be.m.Lock()
defer be.m.Unlock()
if h.Type == backend.Config {
h.Name = ""
}
debug.Log("MemoryBackend.Load", "get %v offset %v len %v", h, off, len(p))
if _, ok := be.data[entry{h.Type, h.Name}]; !ok {
return 0, errors.New("no such data")
}
buf := be.data[entry{h.Type, h.Name}]
if off > int64(len(buf)) {
return 0, errors.New("offset beyond end of file")
}
buf = buf[off:]
n := copy(p, buf)
if len(p) > len(buf) {
return n, io.ErrUnexpectedEOF
}
return n, nil
}
func memSave(be *MemoryBackend, h backend.Handle, p []byte) error {
if err := h.Valid(); err != nil {
return err
}
be.m.Lock()
defer be.m.Unlock()
if h.Type == backend.Config {
h.Name = ""
}
if _, ok := be.data[entry{h.Type, h.Name}]; ok {
return errors.New("file already exists")
}
debug.Log("MemoryBackend.Save", "save %v bytes at %v", len(p), h)
buf := make([]byte, len(p))
copy(buf, p)
be.data[entry{h.Type, h.Name}] = buf
return nil
}
func memStat(be *MemoryBackend, h backend.Handle) (backend.BlobInfo, error) {
be.m.Lock()
defer be.m.Unlock()
if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err
}
if h.Type == backend.Config {
h.Name = ""
}
debug.Log("MemoryBackend.Stat", "stat %v", h)
e, ok := be.data[entry{h.Type, h.Name}]
if !ok {
return backend.BlobInfo{}, errors.New("no such data")
}
return backend.BlobInfo{Size: int64(len(e))}, nil
}
func memRemove(be *MemoryBackend, t backend.Type, name string) error {
be.m.Lock()
defer be.m.Unlock()
debug.Log("MemoryBackend.Remove", "get %v %v", t, name)
if _, ok := be.data[entry{t, name}]; !ok {
return errors.New("no such data")
}
delete(be.data, entry{t, name})
return nil
}
func memList(be *MemoryBackend, t backend.Type, done <-chan struct{}) <-chan string {
be.m.Lock()
defer be.m.Unlock()
ch := make(chan string)
var ids []string
for entry := range be.data {
if entry.Type != t {
continue
}
ids = append(ids, entry.Name)
}
debug.Log("MemoryBackend.List", "list %v: %v", t, ids)
go func() {
defer close(ch)
for _, id := range ids {
select {
case ch <- id:
case <-done:
return
}
}
}()
return ch
}

View file

@ -0,0 +1,38 @@
package mem_test
import (
"errors"
"github.com/restic/restic/backend"
"github.com/restic/restic/backend/mem"
"github.com/restic/restic/backend/test"
)
var be backend.Backend
//go:generate go run ../test/generate_backend_tests.go
func init() {
test.CreateFn = func() (backend.Backend, error) {
if be != nil {
return nil, errors.New("temporary memory backend dir already exists")
}
be = mem.New()
return be, nil
}
test.OpenFn = func() (backend.Backend, error) {
if be == nil {
return nil, errors.New("repository not initialized")
}
return be, nil
}
test.CleanupFn = func() error {
be = nil
return nil
}
}

View file

@ -1,240 +0,0 @@
package backend
import (
"bytes"
"errors"
"io"
"sort"
"sync"
"github.com/restic/restic/debug"
)
type entry struct {
Type Type
Name string
}
type memMap map[entry][]byte
// MemoryBackend is a mock backend that uses a map for storing all data in
// memory. This should only be used for tests.
type MemoryBackend struct {
data memMap
m sync.Mutex
MockBackend
}
// NewMemoryBackend returns a new backend that saves all data in a map in
// memory.
func NewMemoryBackend() *MemoryBackend {
be := &MemoryBackend{
data: make(memMap),
}
be.MockBackend.TestFn = func(t Type, name string) (bool, error) {
return memTest(be, t, name)
}
be.MockBackend.CreateFn = func() (Blob, error) {
return memCreate(be)
}
be.MockBackend.GetFn = func(t Type, name string) (io.ReadCloser, error) {
return memGet(be, t, name)
}
be.MockBackend.GetReaderFn = func(t Type, name string, offset, length uint) (io.ReadCloser, error) {
return memGetReader(be, t, name, offset, length)
}
be.MockBackend.RemoveFn = func(t Type, name string) error {
return memRemove(be, t, name)
}
be.MockBackend.ListFn = func(t Type, done <-chan struct{}) <-chan string {
return memList(be, t, done)
}
be.MockBackend.DeleteFn = func() error {
be.m.Lock()
defer be.m.Unlock()
be.data = make(memMap)
return nil
}
debug.Log("MemoryBackend.New", "created new memory backend")
return be
}
func (be *MemoryBackend) insert(t Type, name string, data []byte) error {
be.m.Lock()
defer be.m.Unlock()
if _, ok := be.data[entry{t, name}]; ok {
return errors.New("already present")
}
be.data[entry{t, name}] = data
return nil
}
func memTest(be *MemoryBackend, t Type, name string) (bool, error) {
be.m.Lock()
defer be.m.Unlock()
debug.Log("MemoryBackend.Test", "test %v %v", t, name)
if _, ok := be.data[entry{t, name}]; ok {
return true, nil
}
return false, nil
}
// tempMemEntry temporarily holds data written to the memory backend before it
// is finalized.
type tempMemEntry struct {
be *MemoryBackend
data bytes.Buffer
}
func (e *tempMemEntry) Write(p []byte) (int, error) {
return e.data.Write(p)
}
func (e *tempMemEntry) Size() uint {
return uint(len(e.data.Bytes()))
}
func (e *tempMemEntry) Finalize(t Type, name string) error {
if t == Config {
name = ""
}
debug.Log("MemoryBackend", "save blob %p (%d bytes) as %v %v", e, len(e.data.Bytes()), t, name)
return e.be.insert(t, name, e.data.Bytes())
}
func memCreate(be *MemoryBackend) (Blob, error) {
blob := &tempMemEntry{be: be}
debug.Log("MemoryBackend.Create", "create new blob %p", blob)
return blob, nil
}
// ReadCloser wraps a reader and adds a noop Close method if rd does not implement io.Closer.
func ReadCloser(rd io.Reader) io.ReadCloser {
return readCloser{rd}
}
// readCloser wraps a reader and adds a noop Close method if rd does not implement io.Closer.
type readCloser struct {
io.Reader
}
func (rd readCloser) Close() error {
if r, ok := rd.Reader.(io.Closer); ok {
return r.Close()
}
return nil
}
func memGet(be *MemoryBackend, t Type, name string) (io.ReadCloser, error) {
be.m.Lock()
defer be.m.Unlock()
if t == Config {
name = ""
}
debug.Log("MemoryBackend.Get", "get %v %v", t, name)
if _, ok := be.data[entry{t, name}]; !ok {
return nil, errors.New("no such data")
}
return readCloser{bytes.NewReader(be.data[entry{t, name}])}, nil
}
func memGetReader(be *MemoryBackend, t Type, name string, offset, length uint) (io.ReadCloser, error) {
be.m.Lock()
defer be.m.Unlock()
if t == Config {
name = ""
}
debug.Log("MemoryBackend.GetReader", "get %v %v offset %v len %v", t, name, offset, length)
if _, ok := be.data[entry{t, name}]; !ok {
return nil, errors.New("no such data")
}
buf := be.data[entry{t, name}]
if offset > uint(len(buf)) {
return nil, errors.New("offset beyond end of file")
}
buf = buf[offset:]
if length > 0 {
if length > uint(len(buf)) {
length = uint(len(buf))
}
buf = buf[:length]
}
return readCloser{bytes.NewReader(buf)}, nil
}
func memRemove(be *MemoryBackend, t Type, name string) error {
be.m.Lock()
defer be.m.Unlock()
debug.Log("MemoryBackend.Remove", "get %v %v", t, name)
if _, ok := be.data[entry{t, name}]; !ok {
return errors.New("no such data")
}
delete(be.data, entry{t, name})
return nil
}
func memList(be *MemoryBackend, t Type, done <-chan struct{}) <-chan string {
be.m.Lock()
defer be.m.Unlock()
ch := make(chan string)
var ids []string
for entry := range be.data {
if entry.Type != t {
continue
}
ids = append(ids, entry.Name)
}
sort.Strings(ids)
debug.Log("MemoryBackend.List", "list %v: %v", t, ids)
go func() {
defer close(ch)
for _, id := range ids {
select {
case ch <- id:
case <-done:
return
}
}
}()
return ch
}

View file

@ -1,12 +0,0 @@
package backend_test
import (
"testing"
"github.com/restic/restic/backend"
)
func TestMemoryBackend(t *testing.T) {
be := backend.NewMemoryBackend()
testBackend(be, t)
}

View file

@ -1,24 +1,22 @@
package backend package backend
import ( import "errors"
"errors"
"io"
)
// MockBackend implements a backend whose functions can be specified. This // MockBackend implements a backend whose functions can be specified. This
// should only be used for tests. // should only be used for tests.
type MockBackend struct { type MockBackend struct {
CloseFn func() error CloseFn func() error
CreateFn func() (Blob, error) LoadFn func(h Handle, p []byte, off int64) (int, error)
GetFn func(Type, string) (io.ReadCloser, error) SaveFn func(h Handle, p []byte) error
GetReaderFn func(Type, string, uint, uint) (io.ReadCloser, error) StatFn func(h Handle) (BlobInfo, error)
ListFn func(Type, <-chan struct{}) <-chan string ListFn func(Type, <-chan struct{}) <-chan string
RemoveFn func(Type, string) error RemoveFn func(Type, string) error
TestFn func(Type, string) (bool, error) TestFn func(Type, string) (bool, error)
DeleteFn func() error DeleteFn func() error
LocationFn func() string LocationFn func() string
} }
// Close the backend.
func (m *MockBackend) Close() error { func (m *MockBackend) Close() error {
if m.CloseFn == nil { if m.CloseFn == nil {
return nil return nil
@ -27,6 +25,7 @@ func (m *MockBackend) Close() error {
return m.CloseFn() return m.CloseFn()
} }
// Location returns a location string.
func (m *MockBackend) Location() string { func (m *MockBackend) Location() string {
if m.LocationFn == nil { if m.LocationFn == nil {
return "" return ""
@ -35,30 +34,34 @@ func (m *MockBackend) Location() string {
return m.LocationFn() return m.LocationFn()
} }
func (m *MockBackend) Create() (Blob, error) { // Load loads data from the backend.
if m.CreateFn == nil { func (m *MockBackend) Load(h Handle, p []byte, off int64) (int, error) {
return nil, errors.New("not implemented") if m.LoadFn == nil {
return 0, errors.New("not implemented")
} }
return m.CreateFn() return m.LoadFn(h, p, off)
} }
func (m *MockBackend) Get(t Type, name string) (io.ReadCloser, error) { // Save data in the backend.
if m.GetFn == nil { func (m *MockBackend) Save(h Handle, p []byte) error {
return nil, errors.New("not implemented") if m.SaveFn == nil {
return errors.New("not implemented")
} }
return m.GetFn(t, name) return m.SaveFn(h, p)
} }
func (m *MockBackend) GetReader(t Type, name string, offset, len uint) (io.ReadCloser, error) { // Stat an object in the backend.
if m.GetReaderFn == nil { func (m *MockBackend) Stat(h Handle) (BlobInfo, error) {
return nil, errors.New("not implemented") if m.StatFn == nil {
return BlobInfo{}, errors.New("not implemented")
} }
return m.GetReaderFn(t, name, offset, len) return m.StatFn(h)
} }
// List items of type t.
func (m *MockBackend) List(t Type, done <-chan struct{}) <-chan string { func (m *MockBackend) List(t Type, done <-chan struct{}) <-chan string {
if m.ListFn == nil { if m.ListFn == nil {
ch := make(chan string) ch := make(chan string)
@ -69,6 +72,7 @@ func (m *MockBackend) List(t Type, done <-chan struct{}) <-chan string {
return m.ListFn(t, done) return m.ListFn(t, done)
} }
// Remove data from the backend.
func (m *MockBackend) Remove(t Type, name string) error { func (m *MockBackend) Remove(t Type, name string) error {
if m.RemoveFn == nil { if m.RemoveFn == nil {
return errors.New("not implemented") return errors.New("not implemented")
@ -77,6 +81,7 @@ func (m *MockBackend) Remove(t Type, name string) error {
return m.RemoveFn(t, name) return m.RemoveFn(t, name)
} }
// Test for the existence of a specific item.
func (m *MockBackend) Test(t Type, name string) (bool, error) { func (m *MockBackend) Test(t Type, name string) (bool, error) {
if m.TestFn == nil { if m.TestFn == nil {
return false, errors.New("not implemented") return false, errors.New("not implemented")
@ -85,6 +90,7 @@ func (m *MockBackend) Test(t Type, name string) (bool, error) {
return m.TestFn(t, name) return m.TestFn(t, name)
} }
// Delete all data.
func (m *MockBackend) Delete() error { func (m *MockBackend) Delete() error {
if m.DeleteFn == nil { if m.DeleteFn == nil {
return errors.New("not implemented") return errors.New("not implemented")

View file

@ -2,7 +2,7 @@ package backend
import "os" import "os"
// Default paths for file-based backends (e.g. local) // Paths contains the default paths for file-based backends (e.g. local).
var Paths = struct { var Paths = struct {
Data string Data string
Snapshots string Snapshots string
@ -21,5 +21,6 @@ var Paths = struct {
"config", "config",
} }
// Default modes for file-based backends // Modes holds the default modes for directories and files for file-based
// backends.
var Modes = struct{ Dir, File os.FileMode }{0700, 0600} var Modes = struct{ Dir, File os.FileMode }{0700, 0600}

21
backend/readcloser.go Normal file
View file

@ -0,0 +1,21 @@
package backend
import "io"
// ReadCloser wraps a reader and adds a noop Close method if rd does not implement io.Closer.
func ReadCloser(rd io.Reader) io.ReadCloser {
return readCloser{rd}
}
// readCloser wraps a reader and adds a noop Close method if rd does not implement io.Closer.
type readCloser struct {
io.Reader
}
func (rd readCloser) Close() error {
if r, ok := rd.Reader.(io.Closer); ok {
return r.Close()
}
return nil
}

View file

@ -1,73 +0,0 @@
package backend
import (
"hash"
"io"
)
type HashAppendReader struct {
r io.Reader
h hash.Hash
sum []byte
closed bool
}
func NewHashAppendReader(r io.Reader, h hash.Hash) *HashAppendReader {
return &HashAppendReader{
h: h,
r: io.TeeReader(r, h),
sum: make([]byte, 0, h.Size()),
}
}
func (h *HashAppendReader) Read(p []byte) (n int, err error) {
if !h.closed {
n, err = h.r.Read(p)
if err == io.EOF {
h.closed = true
h.sum = h.h.Sum(h.sum)
} else if err != nil {
return
}
}
if h.closed {
// output hash
r := len(p) - n
if r > 0 {
c := copy(p[n:], h.sum)
h.sum = h.sum[c:]
n += c
err = nil
}
if len(h.sum) == 0 {
err = io.EOF
}
}
return
}
type HashingReader struct {
r io.Reader
h hash.Hash
}
func NewHashingReader(r io.Reader, h hash.Hash) *HashingReader {
return &HashingReader{
h: h,
r: io.TeeReader(r, h),
}
}
func (h *HashingReader) Read(p []byte) (int, error) {
return h.r.Read(p)
}
func (h *HashingReader) Sum(d []byte) []byte {
return h.h.Sum(d)
}

View file

@ -1,81 +0,0 @@
package backend_test
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"io"
"io/ioutil"
"testing"
"github.com/restic/restic/backend"
. "github.com/restic/restic/test"
)
func TestHashAppendReader(t *testing.T) {
tests := []int{5, 23, 2<<18 + 23, 1 << 20}
for _, size := range tests {
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
if err != nil {
t.Fatalf("ReadFull: %v", err)
}
expectedHash := sha256.Sum256(data)
rd := backend.NewHashAppendReader(bytes.NewReader(data), sha256.New())
target := bytes.NewBuffer(nil)
n, err := io.Copy(target, rd)
OK(t, err)
Assert(t, n == int64(size)+int64(len(expectedHash)),
"HashAppendReader: invalid number of bytes read: got %d, expected %d",
n, size+len(expectedHash))
r := target.Bytes()
resultingHash := r[len(r)-len(expectedHash):]
Assert(t, bytes.Equal(expectedHash[:], resultingHash),
"HashAppendReader: hashes do not match: expected %02x, got %02x",
expectedHash, resultingHash)
// try to read again, must return io.EOF
n2, err := rd.Read(make([]byte, 100))
Assert(t, n2 == 0, "HashAppendReader returned %d additional bytes", n)
Assert(t, err == io.EOF, "HashAppendReader returned %v instead of EOF", err)
}
}
func TestHashingReader(t *testing.T) {
tests := []int{5, 23, 2<<18 + 23, 1 << 20}
for _, size := range tests {
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
if err != nil {
t.Fatalf("ReadFull: %v", err)
}
expectedHash := sha256.Sum256(data)
rd := backend.NewHashingReader(bytes.NewReader(data), sha256.New())
n, err := io.Copy(ioutil.Discard, rd)
OK(t, err)
Assert(t, n == int64(size),
"HashAppendReader: invalid number of bytes read: got %d, expected %d",
n, size)
resultingHash := rd.Sum(nil)
Assert(t, bytes.Equal(expectedHash[:], resultingHash),
"HashAppendReader: hashes do not match: expected %02x, got %02x",
expectedHash, resultingHash)
// try to read again, must return io.EOF
n2, err := rd.Read(make([]byte, 100))
Assert(t, n2 == 0, "HashAppendReader returned %d additional bytes", n)
Assert(t, err == io.EOF, "HashAppendReader returned %v instead of EOF", err)
}
}

View file

@ -0,0 +1,87 @@
// DO NOT EDIT, AUTOMATICALLY GENERATED
package s3_test
import (
"testing"
"github.com/restic/restic/backend/test"
)
var SkipMessage string
func TestS3BackendCreate(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCreate(t)
}
func TestS3BackendOpen(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestOpen(t)
}
func TestS3BackendCreateWithConfig(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCreateWithConfig(t)
}
func TestS3BackendLocation(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLocation(t)
}
func TestS3BackendConfig(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestConfig(t)
}
func TestS3BackendLoad(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLoad(t)
}
func TestS3BackendSave(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestSave(t)
}
func TestS3BackendSaveFilenames(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestSaveFilenames(t)
}
func TestS3BackendBackend(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestBackend(t)
}
func TestS3BackendDelete(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestDelete(t)
}
func TestS3BackendCleanup(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCleanup(t)
}

View file

@ -1,16 +0,0 @@
package s3
import "io"
// ContinuousReader implements an io.Reader on top of an io.ReaderAt, advancing
// an offset.
type ContinuousReader struct {
R io.ReaderAt
Offset int64
}
func (c *ContinuousReader) Read(p []byte) (int, error) {
n, err := c.R.ReadAt(p, c.Offset)
c.Offset += int64(n)
return n, err
}

View file

@ -12,7 +12,6 @@ import (
"github.com/restic/restic/debug" "github.com/restic/restic/debug"
) )
const maxKeysInList = 1000
const connLimit = 10 const connLimit = 10
const backendPrefix = "restic" const backendPrefix = "restic"
@ -23,7 +22,8 @@ func s3path(t backend.Type, name string) string {
return backendPrefix + "/" + string(t) + "/" + name return backendPrefix + "/" + string(t) + "/" + name
} }
type S3Backend struct { // s3 is a backend which stores the data on an S3 endpoint.
type s3 struct {
client minio.CloudStorageClient client minio.CloudStorageClient
connChan chan struct{} connChan chan struct{}
bucketname string bucketname string
@ -39,7 +39,7 @@ func Open(cfg Config) (backend.Backend, error) {
return nil, err return nil, err
} }
be := &S3Backend{client: client, bucketname: cfg.Bucket} be := &s3{client: client, bucketname: cfg.Bucket}
be.createConnections() be.createConnections()
if err := client.BucketExists(cfg.Bucket); err != nil { if err := client.BucketExists(cfg.Bucket); err != nil {
@ -56,7 +56,7 @@ func Open(cfg Config) (backend.Backend, error) {
return be, nil return be, nil
} }
func (be *S3Backend) createConnections() { func (be *s3) createConnections() {
be.connChan = make(chan struct{}, connLimit) be.connChan = make(chan struct{}, connLimit)
for i := 0; i < connLimit; i++ { for i := 0; i < connLimit; i++ {
be.connChan <- struct{}{} be.connChan <- struct{}{}
@ -64,127 +64,86 @@ func (be *S3Backend) createConnections() {
} }
// Location returns this backend's location (the bucket name). // Location returns this backend's location (the bucket name).
func (be *S3Backend) Location() string { func (be *s3) Location() string {
return be.bucketname return be.bucketname
} }
type s3Blob struct { // Load returns the data stored in the backend for h at the given offset
b *S3Backend // and saves it in p. Load has the same semantics as io.ReaderAt.
buf *bytes.Buffer func (be s3) Load(h backend.Handle, p []byte, off int64) (int, error) {
final bool debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p))
} path := s3path(h.Type, h.Name)
func (bb *s3Blob) Write(p []byte) (int, error) {
if bb.final {
return 0, errors.New("blob already closed")
}
n, err := bb.buf.Write(p)
return n, err
}
func (bb *s3Blob) Read(p []byte) (int, error) {
return bb.buf.Read(p)
}
func (bb *s3Blob) Close() error {
bb.final = true
bb.buf.Reset()
return nil
}
func (bb *s3Blob) Size() uint {
return uint(bb.buf.Len())
}
func (bb *s3Blob) Finalize(t backend.Type, name string) error {
debug.Log("s3.blob.Finalize()", "bucket %v, finalize %v, %d bytes", bb.b.bucketname, name, bb.buf.Len())
if bb.final {
return errors.New("Already finalized")
}
bb.final = true
path := s3path(t, name)
// Check key does not already exist
_, err := bb.b.client.StatObject(bb.b.bucketname, path)
if err == nil {
debug.Log("s3.blob.Finalize()", "%v already exists", name)
return errors.New("key already exists")
}
expectedBytes := bb.buf.Len()
<-bb.b.connChan
debug.Log("s3.Finalize", "PutObject(%v, %v, %v, %v)",
bb.b.bucketname, path, int64(bb.buf.Len()), "binary/octet-stream")
n, err := bb.b.client.PutObject(bb.b.bucketname, path, bb.buf, "binary/octet-stream")
debug.Log("s3.Finalize", "finalized %v -> n %v, err %#v", path, n, err)
bb.b.connChan <- struct{}{}
if err != nil {
return err
}
if n != int64(expectedBytes) {
return errors.New("could not store all bytes")
}
return nil
}
// Create creates a new Blob. The data is available only after Finalize()
// has been called on the returned Blob.
func (be *S3Backend) Create() (backend.Blob, error) {
blob := s3Blob{
b: be,
buf: &bytes.Buffer{},
}
return &blob, nil
}
// Get returns a reader that yields the content stored under the given
// name. The reader should be closed after draining it.
func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) {
path := s3path(t, name)
rc, err := be.client.GetObject(be.bucketname, path)
debug.Log("s3.Get", "%v %v -> err %v", t, name, err)
if err != nil {
return nil, err
}
return rc, nil
}
// GetReader returns an io.ReadCloser for the Blob with the given name of
// type t at offset and length. If length is 0, the reader reads until EOF.
func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) {
debug.Log("s3.GetReader", "%v %v, offset %v len %v", t, name, offset, length)
path := s3path(t, name)
obj, err := be.client.GetObject(be.bucketname, path) obj, err := be.client.GetObject(be.bucketname, path)
if err != nil { if err != nil {
debug.Log("s3.GetReader", " err %v", err) debug.Log("s3.GetReader", " err %v", err)
return nil, err return 0, err
} }
if offset > 0 { if off > 0 {
_, err = obj.Seek(int64(offset), 0) _, err = obj.Seek(off, 0)
if err != nil { if err != nil {
return nil, err return 0, err
} }
} }
if length == 0 { <-be.connChan
return obj, nil defer func() {
be.connChan <- struct{}{}
}()
return io.ReadFull(obj, p)
}
// Save stores data in the backend at the handle.
func (be s3) Save(h backend.Handle, p []byte) (err error) {
if err := h.Valid(); err != nil {
return err
} }
return backend.LimitReadCloser(obj, int64(length)), nil debug.Log("s3.Save", "%v bytes at %d", len(p), h)
path := s3path(h.Type, h.Name)
// Check key does not already exist
_, err = be.client.StatObject(be.bucketname, path)
if err == nil {
debug.Log("s3.blob.Finalize()", "%v already exists", h)
return errors.New("key already exists")
}
<-be.connChan
defer func() {
be.connChan <- struct{}{}
}()
debug.Log("s3.Save", "PutObject(%v, %v, %v, %v)",
be.bucketname, path, int64(len(p)), "binary/octet-stream")
n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream")
debug.Log("s3.Save", "%v -> %v bytes, err %#v", path, n, err)
return err
}
// Stat returns information about a blob.
func (be s3) Stat(h backend.Handle) (backend.BlobInfo, error) {
debug.Log("s3.Stat", "%v")
path := s3path(h.Type, h.Name)
obj, err := be.client.GetObject(be.bucketname, path)
if err != nil {
debug.Log("s3.Stat", "GetObject() err %v", err)
return backend.BlobInfo{}, err
}
fi, err := obj.Stat()
if err != nil {
debug.Log("s3.Stat", "Stat() err %v", err)
return backend.BlobInfo{}, err
}
return backend.BlobInfo{Size: fi.Size}, nil
} }
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
func (be *S3Backend) Test(t backend.Type, name string) (bool, error) { func (be *s3) Test(t backend.Type, name string) (bool, error) {
found := false found := false
path := s3path(t, name) path := s3path(t, name)
_, err := be.client.StatObject(be.bucketname, path) _, err := be.client.StatObject(be.bucketname, path)
@ -197,7 +156,7 @@ func (be *S3Backend) Test(t backend.Type, name string) (bool, error) {
} }
// Remove removes the blob with the given name and type. // Remove removes the blob with the given name and type.
func (be *S3Backend) Remove(t backend.Type, name string) error { func (be *s3) Remove(t backend.Type, name string) error {
path := s3path(t, name) path := s3path(t, name)
err := be.client.RemoveObject(be.bucketname, path) err := be.client.RemoveObject(be.bucketname, path)
debug.Log("s3.Remove", "%v %v -> err %v", t, name, err) debug.Log("s3.Remove", "%v %v -> err %v", t, name, err)
@ -207,7 +166,7 @@ func (be *S3Backend) Remove(t backend.Type, name string) error {
// List returns a channel that yields all names of blobs of type t. A // List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending // goroutine is started for this. If the channel done is closed, sending
// stops. // stops.
func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string { func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string {
debug.Log("s3.List", "listing %v", t) debug.Log("s3.List", "listing %v", t)
ch := make(chan string) ch := make(chan string)
@ -235,7 +194,7 @@ func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string {
} }
// Remove keys for a specified backend type. // Remove keys for a specified backend type.
func (be *S3Backend) removeKeys(t backend.Type) error { func (be *s3) removeKeys(t backend.Type) error {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
for key := range be.List(backend.Data, done) { for key := range be.List(backend.Data, done) {
@ -249,7 +208,7 @@ func (be *S3Backend) removeKeys(t backend.Type) error {
} }
// Delete removes all restic keys in the bucket. It will not remove the bucket itself. // Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *S3Backend) Delete() error { func (be *s3) Delete() error {
alltypes := []backend.Type{ alltypes := []backend.Type{
backend.Data, backend.Data,
backend.Key, backend.Key,
@ -268,4 +227,4 @@ func (be *S3Backend) Delete() error {
} }
// Close does nothing // Close does nothing
func (be *S3Backend) Close() error { return nil } func (be *s3) Close() error { return nil }

View file

@ -1,7 +1,72 @@
package s3 package s3_test
import "testing" import (
"errors"
"fmt"
"net/url"
"os"
func TestGetReader(t *testing.T) { "github.com/restic/restic/backend"
"github.com/restic/restic/backend/s3"
"github.com/restic/restic/backend/test"
. "github.com/restic/restic/test"
)
//go:generate go run ../test/generate_backend_tests.go
func init() {
if TestS3Server == "" {
SkipMessage = "s3 test server not available"
return
}
url, err := url.Parse(TestS3Server)
if err != nil {
fmt.Fprintf(os.Stderr, "invalid url: %v\n", err)
return
}
cfg := s3.Config{
Endpoint: url.Host,
Bucket: "restictestbucket",
KeyID: os.Getenv("AWS_ACCESS_KEY_ID"),
Secret: os.Getenv("AWS_SECRET_ACCESS_KEY"),
}
if url.Scheme == "http" {
cfg.UseHTTP = true
}
test.CreateFn = func() (backend.Backend, error) {
be, err := s3.Open(cfg)
if err != nil {
return nil, err
}
exists, err := be.Test(backend.Config, "")
if err != nil {
return nil, err
}
if exists {
return nil, errors.New("config already exists")
}
return be, nil
}
test.OpenFn = func() (backend.Backend, error) {
return s3.Open(cfg)
}
// test.CleanupFn = func() error {
// if tempBackendDir == "" {
// return nil
// }
// fmt.Printf("removing test backend at %v\n", tempBackendDir)
// err := os.RemoveAll(tempBackendDir)
// tempBackendDir = ""
// return err
// }
} }

View file

@ -1,42 +0,0 @@
package backend_test
import (
"net/url"
"os"
"testing"
"github.com/restic/restic/backend/s3"
. "github.com/restic/restic/test"
)
type deleter interface {
Delete() error
}
func TestS3Backend(t *testing.T) {
if TestS3Server == "" {
t.Skip("s3 test server not available")
}
url, err := url.Parse(TestS3Server)
OK(t, err)
cfg := s3.Config{
Endpoint: url.Host,
Bucket: "restictestbucket",
KeyID: os.Getenv("AWS_ACCESS_KEY_ID"),
Secret: os.Getenv("AWS_SECRET_ACCESS_KEY"),
}
if url.Scheme == "http" {
cfg.UseHTTP = true
}
be, err := s3.Open(cfg)
OK(t, err)
testBackend(be, t)
del := be.(deleter)
OK(t, del.Delete())
}

View file

@ -0,0 +1,87 @@
// DO NOT EDIT, AUTOMATICALLY GENERATED
package sftp_test
import (
"testing"
"github.com/restic/restic/backend/test"
)
var SkipMessage string
func TestSftpBackendCreate(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCreate(t)
}
func TestSftpBackendOpen(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestOpen(t)
}
func TestSftpBackendCreateWithConfig(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCreateWithConfig(t)
}
func TestSftpBackendLocation(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLocation(t)
}
func TestSftpBackendConfig(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestConfig(t)
}
func TestSftpBackendLoad(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLoad(t)
}
func TestSftpBackendSave(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestSave(t)
}
func TestSftpBackendSaveFilenames(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestSaveFilenames(t)
}
func TestSftpBackendBackend(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestBackend(t)
}
func TestSftpBackendDelete(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestDelete(t)
}
func TestSftpBackendCleanup(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCleanup(t)
}

View file

@ -9,18 +9,19 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"sort"
"strings" "strings"
"github.com/juju/errors" "github.com/juju/errors"
"github.com/pkg/sftp" "github.com/pkg/sftp"
"github.com/restic/restic/backend" "github.com/restic/restic/backend"
"github.com/restic/restic/debug"
) )
const ( const (
tempfileRandomSuffixLength = 10 tempfileRandomSuffixLength = 10
) )
// SFTP is a backend in a directory accessed via SFTP.
type SFTP struct { type SFTP struct {
c *sftp.Client c *sftp.Client
p string p string
@ -63,6 +64,18 @@ func startClient(program string, args ...string) (*SFTP, error) {
return &SFTP{c: client, cmd: cmd}, nil return &SFTP{c: client, cmd: cmd}, nil
} }
func paths(dir string) []string {
return []string{
dir,
Join(dir, backend.Paths.Data),
Join(dir, backend.Paths.Snapshots),
Join(dir, backend.Paths.Index),
Join(dir, backend.Paths.Locks),
Join(dir, backend.Paths.Keys),
Join(dir, backend.Paths.Temp),
}
}
// Open opens an sftp backend. When the command is started via // Open opens an sftp backend. When the command is started via
// exec.Command, it is expected to speak sftp on stdin/stdout. The backend // exec.Command, it is expected to speak sftp on stdin/stdout. The backend
// is expected at the given path. // is expected at the given path.
@ -73,16 +86,7 @@ func Open(dir string, program string, args ...string) (*SFTP, error) {
} }
// test if all necessary dirs and files are there // test if all necessary dirs and files are there
items := []string{ for _, d := range paths(dir) {
dir,
Join(dir, backend.Paths.Data),
Join(dir, backend.Paths.Snapshots),
Join(dir, backend.Paths.Index),
Join(dir, backend.Paths.Locks),
Join(dir, backend.Paths.Keys),
Join(dir, backend.Paths.Temp),
}
for _, d := range items {
if _, err := sftp.c.Lstat(d); err != nil { if _, err := sftp.c.Lstat(d); err != nil {
return nil, fmt.Errorf("%s does not exist", d) return nil, fmt.Errorf("%s does not exist", d)
} }
@ -117,16 +121,6 @@ func Create(dir string, program string, args ...string) (*SFTP, error) {
return nil, err return nil, err
} }
dirs := []string{
dir,
Join(dir, backend.Paths.Data),
Join(dir, backend.Paths.Snapshots),
Join(dir, backend.Paths.Index),
Join(dir, backend.Paths.Locks),
Join(dir, backend.Paths.Keys),
Join(dir, backend.Paths.Temp),
}
// test if config file already exists // test if config file already exists
_, err = sftp.c.Lstat(Join(dir, backend.Paths.Config)) _, err = sftp.c.Lstat(Join(dir, backend.Paths.Config))
if err == nil { if err == nil {
@ -134,7 +128,7 @@ func Create(dir string, program string, args ...string) (*SFTP, error) {
} }
// create paths for data, refs and temp blobs // create paths for data, refs and temp blobs
for _, d := range dirs { for _, d := range paths(dir) {
err = sftp.mkdirAll(d, backend.Modes.Dir) err = sftp.mkdirAll(d, backend.Modes.Dir)
if err != nil { if err != nil {
return nil, err return nil, err
@ -252,64 +246,7 @@ func (r *SFTP) renameFile(oldname string, t backend.Type, name string) error {
return r.c.Chmod(filename, fi.Mode()&os.FileMode(^uint32(0222))) return r.c.Chmod(filename, fi.Mode()&os.FileMode(^uint32(0222)))
} }
type sftpBlob struct { // Join joins the given paths and cleans them afterwards.
f *sftp.File
tempname string
size uint
closed bool
backend *SFTP
}
func (sb *sftpBlob) Finalize(t backend.Type, name string) error {
if sb.closed {
return errors.New("Close() called on closed file")
}
sb.closed = true
err := sb.f.Close()
if err != nil {
return fmt.Errorf("sftp: file.Close: %v", err)
}
// rename file
err = sb.backend.renameFile(sb.tempname, t, name)
if err != nil {
return fmt.Errorf("sftp: renameFile: %v", err)
}
return nil
}
func (sb *sftpBlob) Write(p []byte) (int, error) {
n, err := sb.f.Write(p)
sb.size += uint(n)
return n, err
}
func (sb *sftpBlob) Size() uint {
return sb.size
}
// Create creates a new Blob. The data is available only after Finalize()
// has been called on the returned Blob.
func (r *SFTP) Create() (backend.Blob, error) {
// TODO: make sure that tempfile is removed upon error
// create tempfile in backend
filename, file, err := r.tempFile()
if err != nil {
return nil, errors.Annotate(err, "create tempfile")
}
blob := sftpBlob{
f: file,
tempname: filename,
backend: r,
}
return &blob, nil
}
func Join(parts ...string) string { func Join(parts ...string) string {
return filepath.Clean(strings.Join(parts, "/")) return filepath.Clean(strings.Join(parts, "/"))
} }
@ -344,38 +281,80 @@ func (r *SFTP) dirname(t backend.Type, name string) string {
return Join(r.p, n) return Join(r.p, n)
} }
// Get returns a reader that yields the content stored under the given // Load returns the data stored in the backend for h at the given offset
// name. The reader should be closed after draining it. // and saves it in p. Load has the same semantics as io.ReaderAt.
func (r *SFTP) Get(t backend.Type, name string) (io.ReadCloser, error) { func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) {
// try to open file if err := h.Valid(); err != nil {
file, err := r.c.Open(r.filename(t, name)) return 0, err
if err != nil {
return nil, err
} }
return file, nil f, err := r.c.Open(r.filename(h.Type, h.Name))
}
// GetReader returns an io.ReadCloser for the Blob with the given name of
// type t at offset and length. If length is 0, the reader reads until EOF.
func (r *SFTP) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) {
f, err := r.c.Open(r.filename(t, name))
if err != nil { if err != nil {
return nil, err return 0, err
} }
if offset > 0 { defer func() {
_, err = f.Seek(int64(offset), 0) e := f.Close()
if err == nil && e != nil {
err = e
}
}()
if off > 0 {
_, err = f.Seek(off, 0)
if err != nil { if err != nil {
return nil, err return 0, err
} }
} }
if length == 0 { return io.ReadFull(f, p)
return f, nil }
// Save stores data in the backend at the handle.
func (r *SFTP) Save(h backend.Handle, p []byte) (err error) {
if err := h.Valid(); err != nil {
return err
} }
return backend.LimitReadCloser(f, int64(length)), nil filename, tmpfile, err := r.tempFile()
debug.Log("sftp.Save", "save %v (%d bytes) to %v", h, len(p), filename)
n, err := tmpfile.Write(p)
if err != nil {
return err
}
if n != len(p) {
return errors.New("not all bytes writen")
}
err = tmpfile.Close()
if err != nil {
return err
}
err = r.renameFile(filename, h.Type, h.Name)
debug.Log("sftp.Save", "save %v: rename %v: %v",
h, filepath.Base(filename), err)
if err != nil {
return fmt.Errorf("sftp: renameFile: %v", err)
}
return nil
}
// Stat returns information about a blob.
func (r *SFTP) Stat(h backend.Handle) (backend.BlobInfo, error) {
if err := h.Valid(); err != nil {
return backend.BlobInfo{}, err
}
fi, err := r.c.Lstat(r.filename(h.Type, h.Name))
if err != nil {
return backend.BlobInfo{}, err
}
return backend.BlobInfo{Size: fi.Size()}, nil
} }
// Test returns true if a blob of the given type and name exists in the backend. // Test returns true if a blob of the given type and name exists in the backend.
@ -420,8 +399,6 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
dirs = append(dirs, d.Name()) dirs = append(dirs, d.Name())
} }
sort.Strings(dirs)
// read files // read files
for _, dir := range dirs { for _, dir := range dirs {
entries, err := r.c.ReadDir(Join(basedir, dir)) entries, err := r.c.ReadDir(Join(basedir, dir))
@ -434,8 +411,6 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
items = append(items, entry.Name()) items = append(items, entry.Name())
} }
sort.Strings(items)
for _, file := range items { for _, file := range items {
select { select {
case ch <- file: case ch <- file:
@ -455,8 +430,6 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
items = append(items, entry.Name()) items = append(items, entry.Name())
} }
sort.Strings(items)
for _, file := range items { for _, file := range items {
select { select {
case ch <- file: case ch <- file:
@ -472,16 +445,17 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
} }
// Close closes the sftp connection and terminates the underlying command. // Close closes the sftp connection and terminates the underlying command.
func (s *SFTP) Close() error { func (r *SFTP) Close() error {
if s == nil { if r == nil {
return nil return nil
} }
s.c.Close() err := r.c.Close()
debug.Log("sftp.Close", "Close returned error %v", err)
if err := s.cmd.Process.Kill(); err != nil { if err := r.cmd.Process.Kill(); err != nil {
return err return err
} }
return s.cmd.Wait() return r.cmd.Wait()
} }

View file

@ -0,0 +1,80 @@
package sftp_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/restic/restic/backend"
"github.com/restic/restic/backend/sftp"
"github.com/restic/restic/backend/test"
. "github.com/restic/restic/test"
)
var tempBackendDir string
//go:generate go run ../test/generate_backend_tests.go
func createTempdir() error {
if tempBackendDir != "" {
return nil
}
tempdir, err := ioutil.TempDir("", "restic-local-test-")
if err != nil {
return err
}
fmt.Printf("created new test backend at %v\n", tempdir)
tempBackendDir = tempdir
return nil
}
func init() {
sftpserver := ""
for _, dir := range strings.Split(TestSFTPPath, ":") {
testpath := filepath.Join(dir, "sftp-server")
_, err := os.Stat(testpath)
if !os.IsNotExist(err) {
sftpserver = testpath
break
}
}
if sftpserver == "" {
SkipMessage = "sftp server binary not found, skipping tests"
return
}
test.CreateFn = func() (backend.Backend, error) {
err := createTempdir()
if err != nil {
return nil, err
}
return sftp.Create(tempBackendDir, sftpserver)
}
test.OpenFn = func() (backend.Backend, error) {
err := createTempdir()
if err != nil {
return nil, err
}
return sftp.Open(tempBackendDir, sftpserver)
}
test.CleanupFn = func() error {
if tempBackendDir == "" {
return nil
}
fmt.Printf("removing test backend at %v\n", tempBackendDir)
err := os.RemoveAll(tempBackendDir)
tempBackendDir = ""
return err
}
}

View file

@ -1,65 +0,0 @@
package backend_test
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/restic/restic/backend/sftp"
. "github.com/restic/restic/test"
)
func setupSFTPBackend(t *testing.T) *sftp.SFTP {
sftpserver := ""
for _, dir := range strings.Split(TestSFTPPath, ":") {
testpath := filepath.Join(dir, "sftp-server")
fd, err := os.Open(testpath)
fd.Close()
if !os.IsNotExist(err) {
sftpserver = testpath
break
}
}
if sftpserver == "" {
return nil
}
tempdir, err := ioutil.TempDir("", "restic-test-")
OK(t, err)
b, err := sftp.Create(tempdir, sftpserver)
OK(t, err)
t.Logf("created sftp backend locally at %s", tempdir)
return b
}
func teardownSFTPBackend(t *testing.T, b *sftp.SFTP) {
if !TestCleanup {
t.Logf("leaving backend at %s\n", b.Location())
return
}
err := os.RemoveAll(b.Location())
OK(t, err)
}
func TestSFTPBackend(t *testing.T) {
if !RunIntegrationTest {
t.Skip("integration tests disabled")
}
s := setupSFTPBackend(t)
if s == nil {
t.Skip("unable to find sftp-server binary")
return
}
defer teardownSFTPBackend(t, s)
testBackend(s, t)
}

View file

@ -0,0 +1,87 @@
// DO NOT EDIT, AUTOMATICALLY GENERATED
package test_test
import (
"testing"
"github.com/restic/restic/backend/test"
)
var SkipMessage string
func TestTestBackendCreate(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCreate(t)
}
func TestTestBackendOpen(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestOpen(t)
}
func TestTestBackendCreateWithConfig(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCreateWithConfig(t)
}
func TestTestBackendLocation(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLocation(t)
}
func TestTestBackendConfig(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestConfig(t)
}
func TestTestBackendLoad(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLoad(t)
}
func TestTestBackendSave(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestSave(t)
}
func TestTestBackendSaveFilenames(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestSaveFilenames(t)
}
func TestTestBackendBackend(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestBackend(t)
}
func TestTestBackendDelete(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestDelete(t)
}
func TestTestBackendCleanup(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCleanup(t)
}

View file

@ -0,0 +1,140 @@
// +build ignore
package main
import (
"bufio"
"flag"
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"text/template"
"unicode"
"unicode/utf8"
)
var data struct {
Package string
PackagePrefix string
Funcs []string
}
var testTemplate = `
// DO NOT EDIT, AUTOMATICALLY GENERATED
package {{ .Package }}
import (
"testing"
"github.com/restic/restic/backend/test"
)
var SkipMessage string
{{ $prefix := .PackagePrefix }}
{{ range $f := .Funcs }}
func Test{{ $prefix }}{{ $f }}(t *testing.T){
if SkipMessage != "" { t.Skip(SkipMessage) }
test.Test{{ $f }}(t)
}
{{ end }}
`
var testFile = flag.String("testfile", "../test/tests.go", "file to search test functions in")
var outputFile = flag.String("output", "backend_test.go", "output file to write generated code to")
var packageName = flag.String("package", "", "the package name to use")
var prefix = flag.String("prefix", "", "test function prefix")
var quiet = flag.Bool("quiet", false, "be quiet")
func errx(err error) {
if err == nil {
return
}
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
var funcRegex = regexp.MustCompile(`^func\s+Test(.+)\s*\(`)
func findTestFunctions() (funcs []string) {
f, err := os.Open(*testFile)
errx(err)
sc := bufio.NewScanner(f)
for sc.Scan() {
match := funcRegex.FindStringSubmatch(sc.Text())
if len(match) > 0 {
funcs = append(funcs, match[1])
}
}
if err := sc.Err(); err != nil {
log.Fatalf("Error scanning file: %v", err)
}
errx(f.Close())
return funcs
}
func generateOutput(wr io.Writer, data interface{}) {
t := template.Must(template.New("backendtest").Parse(testTemplate))
cmd := exec.Command("gofmt")
cmd.Stdout = wr
in, err := cmd.StdinPipe()
errx(err)
errx(cmd.Start())
errx(t.Execute(in, data))
errx(in.Close())
errx(cmd.Wait())
}
func packageTestFunctionPrefix(pkg string) string {
if pkg == "" {
return ""
}
r, n := utf8.DecodeRuneInString(pkg)
return string(unicode.ToUpper(r)) + pkg[n:]
}
func init() {
flag.Parse()
}
func main() {
dir, err := os.Getwd()
if err != nil {
fmt.Fprintf(os.Stderr, "Getwd() %v\n", err)
os.Exit(1)
}
pkg := *packageName
if pkg == "" {
pkg = filepath.Base(dir)
}
f, err := os.Create(*outputFile)
errx(err)
data.Package = pkg + "_test"
if *prefix != "" {
data.PackagePrefix = *prefix
} else {
data.PackagePrefix = packageTestFunctionPrefix(pkg) + "Backend"
}
data.Funcs = findTestFunctions()
generateOutput(f, data)
errx(f.Close())
if !*quiet {
fmt.Printf("wrote backend tests for package %v to %v\n", data.Package, *outputFile)
}
}

514
backend/test/tests.go Normal file
View file

@ -0,0 +1,514 @@
package test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math/rand"
"reflect"
"sort"
"testing"
"github.com/restic/restic/backend"
. "github.com/restic/restic/test"
)
// CreateFn is a function that creates a temporary repository for the tests.
var CreateFn func() (backend.Backend, error)
// OpenFn is a function that opens a previously created temporary repository.
var OpenFn func() (backend.Backend, error)
// CleanupFn removes temporary files and directories created during the tests.
var CleanupFn func() error
var but backend.Backend // backendUnderTest
var butInitialized bool
func open(t testing.TB) backend.Backend {
if OpenFn == nil {
t.Fatal("OpenFn not set")
}
if CreateFn == nil {
t.Fatalf("CreateFn not set")
}
if !butInitialized {
be, err := CreateFn()
if err != nil {
t.Fatalf("Create returned unexpected error: %v", err)
}
but = be
butInitialized = true
}
if but == nil {
var err error
but, err = OpenFn()
if err != nil {
t.Fatalf("Open returned unexpected error: %v", err)
}
}
return but
}
func close(t testing.TB) {
if but == nil {
t.Fatalf("trying to close non-existing backend")
}
err := but.Close()
if err != nil {
t.Fatalf("Close returned unexpected error: %v", err)
}
but = nil
}
// TestCreate creates a backend.
func TestCreate(t testing.TB) {
if CreateFn == nil {
t.Fatalf("CreateFn not set!")
}
be, err := CreateFn()
if err != nil {
fmt.Printf("foo\n")
t.Fatalf("Create returned error: %v", err)
}
butInitialized = true
err = be.Close()
if err != nil {
t.Fatalf("Close returned error: %v", err)
}
}
// TestOpen opens a previously created backend.
func TestOpen(t testing.TB) {
if OpenFn == nil {
t.Fatalf("OpenFn not set!")
}
be, err := OpenFn()
if err != nil {
t.Fatalf("Open returned error: %v", err)
}
err = be.Close()
if err != nil {
t.Fatalf("Close returned error: %v", err)
}
}
// TestCreateWithConfig tests that creating a backend in a location which already
// has a config file fails.
func TestCreateWithConfig(t testing.TB) {
if CreateFn == nil {
t.Fatalf("CreateFn not set")
}
b := open(t)
defer close(t)
// save a config
store(t, b, backend.Config, []byte("test config"))
// now create the backend again, this must fail
_, err := CreateFn()
if err == nil {
t.Fatalf("expected error not found for creating a backend with an existing config file")
}
// remove config
err = b.Remove(backend.Config, "")
if err != nil {
t.Fatalf("unexpected error removing config: %v", err)
}
}
// TestLocation tests that a location string is returned.
func TestLocation(t testing.TB) {
b := open(t)
defer close(t)
l := b.Location()
if l == "" {
t.Fatalf("invalid location string %q", l)
}
}
// TestConfig saves and loads a config from the backend.
func TestConfig(t testing.TB) {
b := open(t)
defer close(t)
var testString = "Config"
// create config and read it back
_, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil)
if err == nil {
t.Fatalf("did not get expected error for non-existing config")
}
err = b.Save(backend.Handle{Type: backend.Config}, []byte(testString))
if err != nil {
t.Fatalf("Save() error: %v", err)
}
// try accessing the config with different names, should all return the
// same config
for _, name := range []string{"", "foo", "bar", "0000000000000000000000000000000000000000000000000000000000000000"} {
buf, err := backend.LoadAll(b, backend.Handle{Type: backend.Config}, nil)
if err != nil {
t.Fatalf("unable to read config with name %q: %v", name, err)
}
if string(buf) != testString {
t.Fatalf("wrong data returned, want %q, got %q", testString, string(buf))
}
}
}
// TestLoad tests the backend's Load function.
func TestLoad(t testing.TB) {
b := open(t)
defer close(t)
_, err := b.Load(backend.Handle{}, nil, 0)
if err == nil {
t.Fatalf("Load() did not return an error for invalid handle")
}
_, err = b.Load(backend.Handle{Type: backend.Data, Name: "foobar"}, nil, 0)
if err == nil {
t.Fatalf("Load() did not return an error for non-existing blob")
}
length := rand.Intn(1<<24) + 2000
data := Random(23, length)
id := backend.Hash(data)
handle := backend.Handle{Type: backend.Data, Name: id.String()}
err = b.Save(handle, data)
if err != nil {
t.Fatalf("Save() error: %v", err)
}
for i := 0; i < 50; i++ {
l := rand.Intn(length + 2000)
o := rand.Intn(length + 2000)
d := data
if o < len(d) {
d = d[o:]
} else {
o = len(d)
d = d[:0]
}
if l > 0 && l < len(d) {
d = d[:l]
}
buf := make([]byte, l)
n, err := b.Load(handle, buf, int64(o))
// if we requested data beyond the end of the file, ignore
// ErrUnexpectedEOF error
if l > len(d) && err == io.ErrUnexpectedEOF {
err = nil
buf = buf[:len(d)]
}
if err != nil {
t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err)
continue
}
if n != len(buf) {
t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
len(buf), int64(o), len(buf), n)
continue
}
buf = buf[:n]
if !bytes.Equal(buf, d) {
t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o))
continue
}
}
OK(t, b.Remove(backend.Data, id.String()))
}
// TestSave tests saving data in the backend.
func TestSave(t testing.TB) {
b := open(t)
defer close(t)
var id backend.ID
for i := 0; i < 10; i++ {
length := rand.Intn(1<<23) + 200000
data := Random(23, length)
// use the first 32 byte as the ID
copy(id[:], data)
h := backend.Handle{
Type: backend.Data,
Name: fmt.Sprintf("%s-%d", id, i),
}
err := b.Save(h, data)
OK(t, err)
buf, err := backend.LoadAll(b, h, nil)
OK(t, err)
if len(buf) != len(data) {
t.Fatalf("number of bytes does not match, want %v, got %v", len(data), len(buf))
}
if !bytes.Equal(buf, data) {
t.Fatalf("data not equal")
}
fi, err := b.Stat(h)
OK(t, err)
if fi.Size != int64(len(data)) {
t.Fatalf("Stat() returned different size, want %q, got %d", len(data), fi.Size)
}
err = b.Remove(h.Type, h.Name)
if err != nil {
t.Fatalf("error removing item: %v", err)
}
}
}
var filenameTests = []struct {
name string
data string
}{
{"1dfc6bc0f06cb255889e9ea7860a5753e8eb9665c9a96627971171b444e3113e", "x"},
{"foobar", "foobar"},
{
"1dfc6bc0f06cb255889e9ea7860a5753e8eb9665c9a96627971171b444e3113e4bf8f2d9144cc5420a80f04a4880ad6155fc58903a4fb6457c476c43541dcaa6-5",
"foobar content of data blob",
},
}
// TestSaveFilenames tests saving data with various file names in the backend.
func TestSaveFilenames(t testing.TB) {
b := open(t)
defer close(t)
for i, test := range filenameTests {
h := backend.Handle{Name: test.name, Type: backend.Data}
err := b.Save(h, []byte(test.data))
if err != nil {
t.Errorf("test %d failed: Save() returned %v", i, err)
continue
}
buf, err := backend.LoadAll(b, h, nil)
if err != nil {
t.Errorf("test %d failed: Load() returned %v", i, err)
continue
}
if !bytes.Equal(buf, []byte(test.data)) {
t.Errorf("test %d: returned wrong bytes", i)
}
err = b.Remove(h.Type, h.Name)
if err != nil {
t.Errorf("test %d failed: Remove() returned %v", i, err)
continue
}
}
}
var testStrings = []struct {
id string
data string
}{
{"c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", "foobar"},
{"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
{"cc5d46bdb4991c6eae3eb739c9c8a7a46fe9654fab79c47b4fe48383b5b25e1c", "foo/bar"},
{"4e54d2c721cbdb730f01b10b62dec622962b36966ec685880effa63d71c808f2", "foo/../../baz"},
}
func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) {
id := backend.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: tpe}, data)
OK(t, err)
}
func read(t testing.TB, rd io.Reader, expectedData []byte) {
buf, err := ioutil.ReadAll(rd)
OK(t, err)
if expectedData != nil {
Equals(t, expectedData, buf)
}
}
// TestBackend tests all functions of the backend.
func TestBackend(t testing.TB) {
b := open(t)
defer close(t)
for _, tpe := range []backend.Type{
backend.Data, backend.Key, backend.Lock,
backend.Snapshot, backend.Index,
} {
// detect non-existing files
for _, test := range testStrings {
id, err := backend.ParseID(test.id)
OK(t, err)
// test if blob is already in repository
ret, err := b.Test(tpe, id.String())
OK(t, err)
Assert(t, !ret, "blob was found to exist before creating")
// try to stat a not existing blob
h := backend.Handle{Type: tpe, Name: id.String()}
_, err = b.Stat(h)
Assert(t, err != nil, "blob data could be extracted before creation")
// try to read not existing blob
_, err = b.Load(h, nil, 0)
Assert(t, err != nil, "blob reader could be obtained before creation")
// try to get string out, should fail
ret, err = b.Test(tpe, id.String())
OK(t, err)
Assert(t, !ret, "id %q was found (but should not have)", test.id)
}
// add files
for _, test := range testStrings {
store(t, b, tpe, []byte(test.data))
// test Load()
h := backend.Handle{Type: tpe, Name: test.id}
buf, err := backend.LoadAll(b, h, nil)
OK(t, err)
Equals(t, test.data, string(buf))
// try to read it out with an offset and a length
start := 1
end := len(test.data) - 2
length := end - start
buf2 := make([]byte, length)
n, err := b.Load(h, buf2, int64(start))
OK(t, err)
Equals(t, length, n)
Equals(t, test.data[start:end], string(buf2))
}
// test adding the first file again
test := testStrings[0]
// create blob
err := b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data))
Assert(t, err != nil, "expected error, got %v", err)
// remove and recreate
err = b.Remove(tpe, test.id)
OK(t, err)
// test that the blob is gone
ok, err := b.Test(tpe, test.id)
OK(t, err)
Assert(t, ok == false, "removed blob still present")
// create blob
err = b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data))
OK(t, err)
// list items
IDs := backend.IDs{}
for _, test := range testStrings {
id, err := backend.ParseID(test.id)
OK(t, err)
IDs = append(IDs, id)
}
list := backend.IDs{}
for s := range b.List(tpe, nil) {
list = append(list, ParseID(s))
}
if len(IDs) != len(list) {
t.Fatalf("wrong number of IDs returned: want %d, got %d", len(IDs), len(list))
}
sort.Sort(IDs)
sort.Sort(list)
if !reflect.DeepEqual(IDs, list) {
t.Fatalf("lists aren't equal, want:\n %v\n got:\n%v\n", IDs, list)
}
// remove content if requested
if TestCleanupTempDirs {
for _, test := range testStrings {
id, err := backend.ParseID(test.id)
OK(t, err)
found, err := b.Test(tpe, id.String())
OK(t, err)
OK(t, b.Remove(tpe, id.String()))
found, err = b.Test(tpe, id.String())
OK(t, err)
Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
}
}
}
}
// TestDelete tests the Delete function.
func TestDelete(t testing.TB) {
b := open(t)
defer close(t)
be, ok := b.(backend.Deleter)
if !ok {
return
}
err := be.Delete()
if err != nil {
t.Fatalf("error deleting backend: %v", err)
}
}
// TestCleanup runs the cleanup function after all tests are run.
func TestCleanup(t testing.TB) {
if CleanupFn == nil {
t.Log("CleanupFn function not set")
return
}
if !TestCleanupTempDirs {
t.Logf("not cleaning up backend")
return
}
err := CleanupFn()
if err != nil {
t.Fatalf("Cleanup returned error: %v", err)
}
}

View file

@ -0,0 +1,38 @@
package test_test
import (
"errors"
"github.com/restic/restic/backend"
"github.com/restic/restic/backend/mem"
"github.com/restic/restic/backend/test"
)
var be backend.Backend
//go:generate go run ../test/generate_backend_tests.go
func init() {
test.CreateFn = func() (backend.Backend, error) {
if be != nil {
return nil, errors.New("temporary memory backend dir already exists")
}
be = mem.New()
return be, nil
}
test.OpenFn = func() (backend.Backend, error) {
if be == nil {
return nil, errors.New("repository not initialized")
}
return be, nil
}
test.CleanupFn = func() error {
be = nil
return nil
}
}

18
backend/utils.go Normal file
View file

@ -0,0 +1,18 @@
package backend
// LoadAll reads all data stored in the backend for the handle. The buffer buf
// is resized to accomodate all data in the blob.
func LoadAll(be Backend, h Handle, buf []byte) ([]byte, error) {
fi, err := be.Stat(h)
if err != nil {
return nil, err
}
if fi.Size > int64(len(buf)) {
buf = make([]byte, int(fi.Size))
}
n, err := be.Load(h, buf, 0)
buf = buf[:n]
return buf, err
}

39
backend/utils_test.go Normal file
View file

@ -0,0 +1,39 @@
package backend_test
import (
"bytes"
"math/rand"
"testing"
"github.com/restic/restic/backend"
"github.com/restic/restic/backend/mem"
. "github.com/restic/restic/test"
)
const KiB = 1 << 10
const MiB = 1 << 20
func TestLoadAll(t *testing.T) {
b := mem.New()
for i := 0; i < 20; i++ {
data := Random(23+i, rand.Intn(MiB)+500*KiB)
id := backend.Hash(data)
err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data)
OK(t, err)
buf, err := backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, nil)
OK(t, err)
if len(buf) != len(data) {
t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf))
continue
}
if !bytes.Equal(buf, data) {
t.Errorf("wrong data returned")
continue
}
}
}

View file

@ -1,38 +0,0 @@
package backend
import (
"hash"
"io"
)
// HashingWriter wraps an io.Writer to hashes all data that is written to it.
type HashingWriter struct {
w io.Writer
h hash.Hash
size int
}
// NewHashAppendWriter wraps the writer w and feeds all data written to the hash h.
func NewHashingWriter(w io.Writer, h hash.Hash) *HashingWriter {
return &HashingWriter{
h: h,
w: io.MultiWriter(w, h),
}
}
// Write wraps the write method of the underlying writer and also hashes all data.
func (h *HashingWriter) Write(p []byte) (int, error) {
n, err := h.w.Write(p)
h.size += n
return n, err
}
// Sum returns the hash of all data written so far.
func (h *HashingWriter) Sum(d []byte) []byte {
return h.h.Sum(d)
}
// Size returns the number of bytes written to the underlying writer.
func (h *HashingWriter) Size() int {
return h.size
}

View file

@ -1,45 +0,0 @@
package backend_test
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"io"
"io/ioutil"
"testing"
"github.com/restic/restic/backend"
. "github.com/restic/restic/test"
)
func TestHashingWriter(t *testing.T) {
tests := []int{5, 23, 2<<18 + 23, 1 << 20}
for _, size := range tests {
data := make([]byte, size)
_, err := io.ReadFull(rand.Reader, data)
if err != nil {
t.Fatalf("ReadFull: %v", err)
}
expectedHash := sha256.Sum256(data)
wr := backend.NewHashingWriter(ioutil.Discard, sha256.New())
n, err := io.Copy(wr, bytes.NewReader(data))
OK(t, err)
Assert(t, n == int64(size),
"HashAppendWriter: invalid number of bytes written: got %d, expected %d",
n, size)
Assert(t, wr.Size() == size,
"HashAppendWriter: invalid number of bytes returned: got %d, expected %d",
wr.Size, size)
resultingHash := wr.Sum(nil)
Assert(t, bytes.Equal(expectedHash[:], resultingHash),
"HashAppendWriter: hashes do not match: expected %02x, got %02x",
expectedHash, resultingHash)
}
}

View file

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"sync" "sync"
"github.com/restic/restic" "github.com/restic/restic"
@ -647,17 +646,8 @@ func (c *Checker) CountPacks() uint64 {
// checkPack reads a pack and checks the integrity of all blobs. // checkPack reads a pack and checks the integrity of all blobs.
func checkPack(r *repository.Repository, id backend.ID) error { func checkPack(r *repository.Repository, id backend.ID) error {
debug.Log("Checker.checkPack", "checking pack %v", id.Str()) debug.Log("Checker.checkPack", "checking pack %v", id.Str())
rd, err := r.Backend().Get(backend.Data, id.String()) h := backend.Handle{Type: backend.Data, Name: id.String()}
if err != nil { buf, err := backend.LoadAll(r.Backend(), h, nil)
return err
}
buf, err := ioutil.ReadAll(rd)
if err != nil {
return err
}
err = rd.Close()
if err != nil { if err != nil {
return err return err
} }

View file

@ -1,7 +1,7 @@
package checker_test package checker_test
import ( import (
"io" "fmt"
"math/rand" "math/rand"
"path/filepath" "path/filepath"
"sort" "sort"
@ -9,6 +9,7 @@ import (
"github.com/restic/restic" "github.com/restic/restic"
"github.com/restic/restic/backend" "github.com/restic/restic/backend"
"github.com/restic/restic/backend/mem"
"github.com/restic/restic/checker" "github.com/restic/restic/checker"
"github.com/restic/restic/repository" "github.com/restic/restic/repository"
. "github.com/restic/restic/test" . "github.com/restic/restic/test"
@ -212,37 +213,22 @@ func TestDuplicatePacksInIndex(t *testing.T) {
// errorBackend randomly modifies data after reading. // errorBackend randomly modifies data after reading.
type errorBackend struct { type errorBackend struct {
backend.Backend backend.Backend
ProduceErrors bool
} }
func (b errorBackend) Get(t backend.Type, name string) (io.ReadCloser, error) { func (b errorBackend) Load(h backend.Handle, p []byte, off int64) (int, error) {
rd, err := b.Backend.Get(t, name) fmt.Printf("load %v\n", h)
if err != nil { n, err := b.Backend.Load(h, p, off)
return rd, err
if b.ProduceErrors {
induceError(p)
} }
return n, err
if t != backend.Data {
return rd, err
}
return backend.ReadCloser(faultReader{rd}), nil
}
func (b errorBackend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) {
rd, err := b.Backend.GetReader(t, name, offset, length)
if err != nil {
return rd, err
}
if t != backend.Data {
return rd, err
}
return backend.ReadCloser(faultReader{rd}), nil
} }
// induceError flips a bit in the slice. // induceError flips a bit in the slice.
func induceError(data []byte) { func induceError(data []byte) {
if rand.Float32() < 0.8 { if rand.Float32() < 0.2 {
return return
} }
@ -250,22 +236,8 @@ func induceError(data []byte) {
data[pos] ^= 1 data[pos] ^= 1
} }
// faultReader wraps a reader and randomly modifies data on read.
type faultReader struct {
rd io.Reader
}
func (f faultReader) Read(p []byte) (int, error) {
n, err := f.rd.Read(p)
if n > 0 {
induceError(p)
}
return n, err
}
func TestCheckerModifiedData(t *testing.T) { func TestCheckerModifiedData(t *testing.T) {
be := backend.NewMemoryBackend() be := mem.New()
repo := repository.New(be) repo := repository.New(be)
OK(t, repo.Init(TestPassword)) OK(t, repo.Init(TestPassword))
@ -275,7 +247,8 @@ func TestCheckerModifiedData(t *testing.T) {
OK(t, err) OK(t, err)
t.Logf("archived as %v", id.Str()) t.Logf("archived as %v", id.Str())
checkRepo := repository.New(errorBackend{be}) beError := &errorBackend{Backend: be}
checkRepo := repository.New(beError)
OK(t, checkRepo.SearchKey(TestPassword)) OK(t, checkRepo.SearchKey(TestPassword))
chkr := checker.New(checkRepo) chkr := checker.New(checkRepo)
@ -289,6 +262,7 @@ func TestCheckerModifiedData(t *testing.T) {
t.Errorf("expected no hints, got %v: %v", len(hints), hints) t.Errorf("expected no hints, got %v: %v", len(hints), hints)
} }
beError.ProduceErrors = true
errFound := false errFound := false
for _, err := range checkPacks(chkr) { for _, err := range checkPacks(chkr) {
t.Logf("pack error: %v", err) t.Logf("pack error: %v", err)

View file

@ -4,7 +4,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io"
"os" "os"
"github.com/restic/restic" "github.com/restic/restic"
@ -101,20 +100,19 @@ func (cmd CmdCat) Execute(args []string) error {
return nil return nil
case "key": case "key":
rd, err := repo.Backend().Get(backend.Key, id.String()) h := backend.Handle{Type: backend.Key, Name: id.String()}
buf, err := backend.LoadAll(repo.Backend(), h, nil)
if err != nil { if err != nil {
return err return err
} }
dec := json.NewDecoder(rd) key := &repository.Key{}
err = json.Unmarshal(buf, key)
var key repository.Key
err = dec.Decode(&key)
if err != nil { if err != nil {
return err return err
} }
buf, err := json.MarshalIndent(&key, "", " ") buf, err = json.MarshalIndent(&key, "", " ")
if err != nil { if err != nil {
return err return err
} }
@ -153,12 +151,13 @@ func (cmd CmdCat) Execute(args []string) error {
switch tpe { switch tpe {
case "pack": case "pack":
rd, err := repo.Backend().Get(backend.Data, id.String()) h := backend.Handle{Type: backend.Data, Name: id.String()}
buf, err := backend.LoadAll(repo.Backend(), h, nil)
if err != nil { if err != nil {
return err return err
} }
_, err = io.Copy(os.Stdout, rd) _, err = os.Stdout.Write(buf)
return err return err
case "blob": case "blob":

View file

@ -2,8 +2,6 @@ package main
import ( import (
"bytes" "bytes"
"io"
"io/ioutil"
"github.com/restic/restic/backend" "github.com/restic/restic/backend"
"github.com/restic/restic/debug" "github.com/restic/restic/debug"
@ -126,6 +124,7 @@ func (cmd CmdRebuildIndex) RebuildIndex() error {
cmd.global.Printf("checking for additional packs\n") cmd.global.Printf("checking for additional packs\n")
newPacks := 0 newPacks := 0
var buf []byte
for packID := range cmd.repo.List(backend.Data, done) { for packID := range cmd.repo.List(backend.Data, done) {
if packsDone.Has(packID) { if packsDone.Has(packID) {
continue continue
@ -134,27 +133,12 @@ func (cmd CmdRebuildIndex) RebuildIndex() error {
debug.Log("RebuildIndex.RebuildIndex", "pack %v not indexed", packID.Str()) debug.Log("RebuildIndex.RebuildIndex", "pack %v not indexed", packID.Str())
newPacks++ newPacks++
rd, err := cmd.repo.Backend().GetReader(backend.Data, packID.String(), 0, 0) var err error
if err != nil {
debug.Log("RebuildIndex.RebuildIndex", "GetReader returned error: %v", err)
return err
}
var readSeeker io.ReadSeeker h := backend.Handle{Type: backend.Data, Name: packID.String()}
if r, ok := rd.(io.ReadSeeker); ok { buf, err = backend.LoadAll(cmd.repo.Backend(), h, buf)
debug.Log("RebuildIndex.RebuildIndex", "reader is seekable")
readSeeker = r
} else {
debug.Log("RebuildIndex.RebuildIndex", "reader is not seekable, loading contents to ram")
buf, err := ioutil.ReadAll(rd)
if err != nil {
return err
}
readSeeker = bytes.NewReader(buf) up, err := pack.NewUnpacker(cmd.repo.Key(), bytes.NewReader(buf))
}
up, err := pack.NewUnpacker(cmd.repo.Key(), readSeeker)
if err != nil { if err != nil {
debug.Log("RebuildIndex.RebuildIndex", "error while unpacking pack %v", packID.Str()) debug.Log("RebuildIndex.RebuildIndex", "error while unpacking pack %v", packID.Str())
return err return err
@ -171,9 +155,6 @@ func (cmd CmdRebuildIndex) RebuildIndex() error {
}) })
} }
err = rd.Close()
debug.Log("RebuildIndex.RebuildIndex", "error closing reader for pack %v: %v", packID.Str(), err)
if repository.IndexFull(combinedIndex) { if repository.IndexFull(combinedIndex) {
combinedIndex, err = cmd.storeIndex(combinedIndex) combinedIndex, err = cmd.storeIndex(combinedIndex)
if err != nil { if err != nil {

View file

@ -54,9 +54,13 @@ func waitForMount(dir string) error {
} }
func cmdMount(t testing.TB, global GlobalOptions, dir string, ready, done chan struct{}) { func cmdMount(t testing.TB, global GlobalOptions, dir string, ready, done chan struct{}) {
defer func() {
ready <- struct{}{}
}()
cmd := &CmdMount{global: &global, ready: ready, done: done} cmd := &CmdMount{global: &global, ready: ready, done: done}
OK(t, cmd.Execute([]string{dir})) OK(t, cmd.Execute([]string{dir}))
if TestCleanup { if TestCleanupTempDirs {
RemoveAll(t, dir) RemoveAll(t, dir)
} }
} }
@ -104,7 +108,7 @@ func TestMount(t *testing.T) {
// We remove the mountpoint now to check that cmdMount creates it // We remove the mountpoint now to check that cmdMount creates it
RemoveAll(t, mountpoint) RemoveAll(t, mountpoint)
ready := make(chan struct{}, 1) ready := make(chan struct{}, 2)
done := make(chan struct{}) done := make(chan struct{})
go cmdMount(t, global, mountpoint, ready, done) go cmdMount(t, global, mountpoint, ready, done)
<-ready <-ready

View file

@ -178,7 +178,7 @@ func configureRestic(t testing.TB, cache, repo string) GlobalOptions {
} }
func cleanupTempdir(t testing.TB, tempdir string) { func cleanupTempdir(t testing.TB, tempdir string) {
if !TestCleanup { if !TestCleanupTempDirs {
t.Logf("leaving temporary directory %v used for test", tempdir) t.Logf("leaving temporary directory %v used for test", tempdir)
return return
} }
@ -209,7 +209,7 @@ func withTestEnvironment(t testing.TB, f func(*testEnvironment, GlobalOptions))
f(&env, configureRestic(t, env.cache, env.repo)) f(&env, configureRestic(t, env.cache, env.repo))
if !TestCleanup { if !TestCleanupTempDirs {
t.Logf("leaving temporary directory %v used for test", tempdir) t.Logf("leaving temporary directory %v used for test", tempdir)
return return
} }

View file

@ -23,10 +23,7 @@ func TestEncryptDecrypt(t *testing.T) {
} }
for _, size := range tests { for _, size := range tests {
data := make([]byte, size) data := Random(42, size)
_, err := io.ReadFull(RandomReader(42, size), data)
OK(t, err)
buf := make([]byte, size+crypto.Extension) buf := make([]byte, size+crypto.Extension)
ciphertext, err := crypto.Encrypt(k, buf, data) ciphertext, err := crypto.Encrypt(k, buf, data)
@ -140,7 +137,7 @@ func BenchmarkEncryptWriter(b *testing.B) {
b.SetBytes(int64(size)) b.SetBytes(int64(size))
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
rd := RandomReader(23, size) rd := RandomLimitReader(23, size)
wr := crypto.EncryptTo(k, ioutil.Discard) wr := crypto.EncryptTo(k, ioutil.Discard)
n, err := io.Copy(wr, rd) n, err := io.Copy(wr, rd)
OK(b, err) OK(b, err)
@ -200,7 +197,7 @@ func BenchmarkEncryptDecryptReader(b *testing.B) {
buf := bytes.NewBuffer(nil) buf := bytes.NewBuffer(nil)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
rd := RandomReader(23, size) rd := RandomLimitReader(23, size)
buf.Reset() buf.Reset()
wr := crypto.EncryptTo(k, buf) wr := crypto.EncryptTo(k, buf)
_, err := io.Copy(wr, rd) _, err := io.Copy(wr, rd)
@ -245,14 +242,12 @@ func TestEncryptStreamWriter(t *testing.T) {
} }
for _, size := range tests { for _, size := range tests {
data := make([]byte, size) data := Random(42, size)
_, err := io.ReadFull(RandomReader(42, size), data)
OK(t, err)
ciphertext := bytes.NewBuffer(nil) ciphertext := bytes.NewBuffer(nil)
wr := crypto.EncryptTo(k, ciphertext) wr := crypto.EncryptTo(k, ciphertext)
_, err = io.Copy(wr, bytes.NewReader(data)) _, err := io.Copy(wr, bytes.NewReader(data))
OK(t, err) OK(t, err)
OK(t, wr.Close()) OK(t, wr.Close())
@ -279,10 +274,8 @@ func TestDecryptStreamReader(t *testing.T) {
} }
for _, size := range tests { for _, size := range tests {
data := make([]byte, size) data := Random(42, size)
_, err := io.ReadFull(RandomReader(42, size), data) var err error
OK(t, err)
ciphertext := make([]byte, size+crypto.Extension) ciphertext := make([]byte, size+crypto.Extension)
// encrypt with default function // encrypt with default function
@ -313,14 +306,12 @@ func TestEncryptWriter(t *testing.T) {
} }
for _, size := range tests { for _, size := range tests {
data := make([]byte, size) data := Random(42, size)
_, err := io.ReadFull(RandomReader(42, size), data)
OK(t, err)
buf := bytes.NewBuffer(nil) buf := bytes.NewBuffer(nil)
wr := crypto.EncryptTo(k, buf) wr := crypto.EncryptTo(k, buf)
_, err = io.Copy(wr, bytes.NewReader(data)) _, err := io.Copy(wr, bytes.NewReader(data))
OK(t, err) OK(t, err)
OK(t, wr.Close()) OK(t, wr.Close())

View file

@ -145,7 +145,7 @@ func TestNodeRestoreAt(t *testing.T) {
OK(t, err) OK(t, err)
defer func() { defer func() {
if TestCleanup { if TestCleanupTempDirs {
RemoveAll(t, tempdir) RemoveAll(t, tempdir)
} else { } else {
t.Logf("leaving tempdir at %v", tempdir) t.Logf("leaving tempdir at %v", tempdir)

View file

@ -1,7 +1,7 @@
package pack package pack
import ( import (
"crypto/sha256" "bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
@ -12,8 +12,10 @@ import (
"github.com/restic/restic/crypto" "github.com/restic/restic/crypto"
) )
// BlobType specifies what a blob stored in a pack is.
type BlobType uint8 type BlobType uint8
// These are the blob types that can be stored in a pack.
const ( const (
Data BlobType = 0 Data BlobType = 0
Tree = 1 Tree = 1
@ -30,6 +32,7 @@ func (t BlobType) String() string {
return fmt.Sprintf("<BlobType %d>", t) return fmt.Sprintf("<BlobType %d>", t)
} }
// MarshalJSON encodes the BlobType into JSON.
func (t BlobType) MarshalJSON() ([]byte, error) { func (t BlobType) MarshalJSON() ([]byte, error) {
switch t { switch t {
case Data: case Data:
@ -41,6 +44,7 @@ func (t BlobType) MarshalJSON() ([]byte, error) {
return nil, errors.New("unknown blob type") return nil, errors.New("unknown blob type")
} }
// UnmarshalJSON decodes the BlobType from JSON.
func (t *BlobType) UnmarshalJSON(buf []byte) error { func (t *BlobType) UnmarshalJSON(buf []byte) error {
switch string(buf) { switch string(buf) {
case `"data"`: case `"data"`:
@ -79,16 +83,15 @@ type Packer struct {
bytes uint bytes uint
k *crypto.Key k *crypto.Key
wr io.Writer buf *bytes.Buffer
hw *backend.HashingWriter
m sync.Mutex m sync.Mutex
} }
// NewPacker returns a new Packer that can be used to pack blobs // NewPacker returns a new Packer that can be used to pack blobs
// together. // together.
func NewPacker(k *crypto.Key, w io.Writer) *Packer { func NewPacker(k *crypto.Key, buf []byte) *Packer {
return &Packer{k: k, wr: w, hw: backend.NewHashingWriter(w, sha256.New())} return &Packer{k: k, buf: bytes.NewBuffer(buf)}
} }
// Add saves the data read from rd as a new blob to the packer. Returned is the // Add saves the data read from rd as a new blob to the packer. Returned is the
@ -99,7 +102,7 @@ func (p *Packer) Add(t BlobType, id backend.ID, rd io.Reader) (int64, error) {
c := Blob{Type: t, ID: id} c := Blob{Type: t, ID: id}
n, err := io.Copy(p.hw, rd) n, err := io.Copy(p.buf, rd)
c.Length = uint(n) c.Length = uint(n)
c.Offset = p.bytes c.Offset = p.bytes
p.bytes += uint(n) p.bytes += uint(n)
@ -118,45 +121,47 @@ type headerEntry struct {
} }
// Finalize writes the header for all added blobs and finalizes the pack. // Finalize writes the header for all added blobs and finalizes the pack.
// Returned are the complete number of bytes written, including the header. // Returned are all bytes written, including the header.
// After Finalize() has finished, the ID of this pack can be obtained by func (p *Packer) Finalize() ([]byte, error) {
// calling ID().
func (p *Packer) Finalize() (bytesWritten uint, err error) {
p.m.Lock() p.m.Lock()
defer p.m.Unlock() defer p.m.Unlock()
bytesWritten = p.bytes bytesWritten := p.bytes
// create writer to encrypt header hdrBuf := bytes.NewBuffer(nil)
wr := crypto.EncryptTo(p.k, p.hw) bytesHeader, err := p.writeHeader(hdrBuf)
bytesHeader, err := p.writeHeader(wr)
if err != nil { if err != nil {
wr.Close() return nil, err
return bytesWritten + bytesHeader, err
} }
bytesWritten += bytesHeader encryptedHeader, err := crypto.Encrypt(p.k, nil, hdrBuf.Bytes())
// finalize encrypted header
err = wr.Close()
if err != nil { if err != nil {
return bytesWritten, err return nil, err
} }
// account for crypto overhead // append the header
bytesWritten += crypto.Extension n, err := p.buf.Write(encryptedHeader)
if err != nil {
return nil, err
}
hdrBytes := bytesHeader + crypto.Extension
if uint(n) != hdrBytes {
return nil, errors.New("wrong number of bytes written")
}
bytesWritten += hdrBytes
// write length // write length
err = binary.Write(p.hw, binary.LittleEndian, uint32(uint(len(p.blobs))*entrySize+crypto.Extension)) err = binary.Write(p.buf, binary.LittleEndian, uint32(uint(len(p.blobs))*entrySize+crypto.Extension))
if err != nil { if err != nil {
return bytesWritten, err return nil, err
} }
bytesWritten += uint(binary.Size(uint32(0))) bytesWritten += uint(binary.Size(uint32(0)))
p.bytes = uint(bytesWritten) p.bytes = uint(bytesWritten)
return bytesWritten, nil return p.buf.Bytes(), nil
} }
// writeHeader constructs and writes the header to wr. // writeHeader constructs and writes the header to wr.
@ -179,18 +184,6 @@ func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) {
return return
} }
// ID returns the ID of all data written so far.
func (p *Packer) ID() backend.ID {
p.m.Lock()
defer p.m.Unlock()
hash := p.hw.Sum(nil)
id := backend.ID{}
copy(id[:], hash)
return id
}
// Size returns the number of bytes written so far. // Size returns the number of bytes written so far.
func (p *Packer) Size() uint { func (p *Packer) Size() uint {
p.m.Lock() p.m.Lock()
@ -215,11 +208,6 @@ func (p *Packer) Blobs() []Blob {
return p.blobs return p.blobs
} }
// Writer returns the underlying writer.
func (p *Packer) Writer() io.Writer {
return p.wr
}
func (p *Packer) String() string { func (p *Packer) String() string {
return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes) return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes)
} }

View file

@ -34,23 +34,19 @@ func TestCreatePack(t *testing.T) {
bufs = append(bufs, Buf{data: b, id: h}) bufs = append(bufs, Buf{data: b, id: h})
} }
file := bytes.NewBuffer(nil)
// create random keys // create random keys
k := crypto.NewRandomKey() k := crypto.NewRandomKey()
// pack blobs // pack blobs
p := pack.NewPacker(k, file) p := pack.NewPacker(k, nil)
for _, b := range bufs { for _, b := range bufs {
p.Add(pack.Tree, b.id, bytes.NewReader(b.data)) p.Add(pack.Tree, b.id, bytes.NewReader(b.data))
} }
// write file packData, err := p.Finalize()
n, err := p.Finalize()
OK(t, err) OK(t, err)
written := 0 written := 0
// data
for _, l := range lengths { for _, l := range lengths {
written += l written += l
} }
@ -62,11 +58,11 @@ func TestCreatePack(t *testing.T) {
written += crypto.Extension written += crypto.Extension
// check length // check length
Equals(t, uint(written), n) Equals(t, written, len(packData))
Equals(t, uint(written), p.Size()) Equals(t, uint(written), p.Size())
// read and parse it again // read and parse it again
rd := bytes.NewReader(file.Bytes()) rd := bytes.NewReader(packData)
np, err := pack.NewUnpacker(k, rd) np, err := pack.NewUnpacker(k, rd)
OK(t, err) OK(t, err)
Equals(t, len(np.Entries), len(bufs)) Equals(t, len(np.Entries), len(bufs))

View file

@ -1,6 +1,7 @@
package repository package repository
import ( import (
"bytes"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -564,13 +565,12 @@ func LoadIndexWithDecoder(repo *Repository, id string, fn func(io.Reader) (*Inde
return nil, err return nil, err
} }
rd, err := repo.GetDecryptReader(backend.Index, idxID.String()) buf, err := repo.LoadAndDecrypt(backend.Index, idxID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer closeOrErr(rd, &err)
idx, err = fn(rd) idx, err = fn(bytes.NewReader(buf))
if err != nil { if err != nil {
debug.Log("LoadIndexWithDecoder", "error while decoding index %v: %v", id, err) debug.Log("LoadIndexWithDecoder", "error while decoding index %v: %v", id, err)
return nil, err return nil, err
@ -594,33 +594,14 @@ func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) {
return id, err return id, err
} }
blob, err := repo.CreateEncryptedBlob(backend.Index) buf := bytes.NewBuffer(nil)
if err != nil {
return id, err
}
idx.supersedes = backend.IDs{id} idx.supersedes = backend.IDs{id}
err = idx.Encode(blob) err = idx.Encode(buf)
if err != nil { if err != nil {
debug.Log("ConvertIndex", "oldIdx.Encode() returned error: %v", err) debug.Log("ConvertIndex", "oldIdx.Encode() returned error: %v", err)
return id, err return id, err
} }
err = blob.Close() return repo.SaveUnpacked(backend.Index, buf.Bytes())
if err != nil {
debug.Log("ConvertIndex", "blob.Close() returned error: %v", err)
return id, err
}
newID := blob.ID()
debug.Log("ConvertIndex", "index %v converted to new format as %v", id.Str(), newID.Str())
err = repo.be.Remove(backend.Index, id.String())
if err != nil {
debug.Log("ConvertIndex", "backend.Remove(%v) returned error: %v", id.Str(), err)
return id, err
}
return newID, nil
} }

View file

@ -2,8 +2,6 @@ package repository
import ( import (
"crypto/rand" "crypto/rand"
"crypto/sha256"
"encoding/hex"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -119,17 +117,14 @@ func SearchKey(s *Repository, password string) (*Key, error) {
// LoadKey loads a key from the backend. // LoadKey loads a key from the backend.
func LoadKey(s *Repository, name string) (k *Key, err error) { func LoadKey(s *Repository, name string) (k *Key, err error) {
// extract data from repo h := backend.Handle{Type: backend.Key, Name: name}
rd, err := s.be.Get(backend.Key, name) data, err := backend.LoadAll(s.be, h, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer closeOrErr(rd, &err)
// restore json k = &Key{}
dec := json.NewDecoder(rd) err = json.Unmarshal(data, k)
k = new(Key)
err = dec.Decode(k)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -194,26 +189,17 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error)
} }
// store in repository and return // store in repository and return
blob, err := s.be.Create() h := backend.Handle{
Type: backend.Key,
Name: backend.Hash(buf).String(),
}
err = s.be.Save(h, buf)
if err != nil { if err != nil {
return nil, err return nil, err
} }
plainhw := backend.NewHashingWriter(blob, sha256.New()) newkey.name = h.Name
_, err = plainhw.Write(buf)
if err != nil {
return nil, err
}
name := hex.EncodeToString(plainhw.Sum(nil))
err = blob.Finalize(backend.Key, name)
if err != nil {
return nil, err
}
newkey.name = name
return newkey, nil return newkey, nil
} }
@ -225,6 +211,7 @@ func (k *Key) String() string {
return fmt.Sprintf("<Key of %s@%s, created on %s>", k.Username, k.Hostname, k.Created) return fmt.Sprintf("<Key of %s@%s, created on %s>", k.Username, k.Hostname, k.Created)
} }
// Name returns an identifier for the key.
func (k Key) Name() string { func (k Key) Name() string {
return k.name return k.name
} }

View file

@ -42,12 +42,8 @@ func (r *packerManager) findPacker(size uint) (*pack.Packer, error) {
} }
// no suitable packer found, return new // no suitable packer found, return new
blob, err := r.be.Create() debug.Log("Repo.findPacker", "create new pack for %d bytes", size)
if err != nil { return pack.NewPacker(r.key, nil), nil
return nil, err
}
debug.Log("Repo.findPacker", "create new pack %p for %d bytes", blob, size)
return pack.NewPacker(r.key, blob), nil
} }
// insertPacker appends p to s.packs. // insertPacker appends p to s.packs.
@ -62,28 +58,29 @@ func (r *packerManager) insertPacker(p *pack.Packer) {
// savePacker stores p in the backend. // savePacker stores p in the backend.
func (r *Repository) savePacker(p *pack.Packer) error { func (r *Repository) savePacker(p *pack.Packer) error {
debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count()) debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
_, err := p.Finalize() data, err := p.Finalize()
if err != nil { if err != nil {
return err return err
} }
// move file to the final location id := backend.Hash(data)
sid := p.ID() h := backend.Handle{Type: backend.Data, Name: id.String()}
err = p.Writer().(backend.Blob).Finalize(backend.Data, sid.String())
err = r.be.Save(h, data)
if err != nil { if err != nil {
debug.Log("Repo.savePacker", "blob Finalize() error: %v", err) debug.Log("Repo.savePacker", "Save(%v) error: %v", h, err)
return err return err
} }
debug.Log("Repo.savePacker", "saved as %v", sid.Str()) debug.Log("Repo.savePacker", "saved as %v", h)
// update blobs in the index // update blobs in the index
for _, b := range p.Blobs() { for _, b := range p.Blobs() {
debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), sid.Str()) debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), id.Str())
r.idx.Current().Store(PackedBlob{ r.idx.Current().Store(PackedBlob{
Type: b.Type, Type: b.Type,
ID: b.ID, ID: b.ID,
PackID: sid, PackID: id,
Offset: b.Offset, Offset: b.Offset,
Length: uint(b.Length), Length: uint(b.Length),
}) })

View file

@ -2,7 +2,6 @@ package repository
import ( import (
"bytes" "bytes"
"crypto/sha256"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -56,24 +55,14 @@ func (r *Repository) PrefixLength(t backend.Type) (int, error) {
func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) { func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) {
debug.Log("Repo.Load", "load %v with id %v", t, id.Str()) debug.Log("Repo.Load", "load %v with id %v", t, id.Str())
rd, err := r.be.Get(t, id.String()) h := backend.Handle{Type: t, Name: id.String()}
buf, err := backend.LoadAll(r.be, h, nil)
if err != nil { if err != nil {
debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err) debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
return nil, err return nil, err
} }
buf, err := ioutil.ReadAll(rd) if t != backend.Config && !backend.Hash(buf).Equal(id) {
if err != nil {
return nil, err
}
err = rd.Close()
if err != nil {
return nil, err
}
// check hash
if !backend.Hash(buf).Equal(id) {
return nil, errors.New("invalid data returned") return nil, errors.New("invalid data returned")
} }
@ -100,7 +89,9 @@ func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byt
plaintextBufSize := uint(cap(plaintextBuf)) plaintextBufSize := uint(cap(plaintextBuf))
if blob.PlaintextLength() > plaintextBufSize { if blob.PlaintextLength() > plaintextBufSize {
return nil, fmt.Errorf("buf is too small, need %d more bytes", blob.PlaintextLength()-plaintextBufSize) debug.Log("Repo.LoadBlob", "need to expand buffer: want %d bytes, got %d",
blob.PlaintextLength(), plaintextBufSize)
plaintextBuf = make([]byte, blob.PlaintextLength())
} }
if blob.Type != t { if blob.Type != t {
@ -111,22 +102,18 @@ func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byt
debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob) debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob)
// load blob from pack // load blob from pack
rd, err := r.be.GetReader(backend.Data, blob.PackID.String(), blob.Offset, blob.Length) h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()}
ciphertextBuf := make([]byte, blob.Length)
n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
if err != nil { if err != nil {
debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err) debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err)
return nil, err return nil, err
} }
// make buffer that is large enough for the complete blob if uint(n) != blob.Length {
ciphertextBuf := make([]byte, blob.Length) debug.Log("Repo.LoadBlob", "error loading blob %v: wrong length returned, want %d, got %d",
_, err = io.ReadFull(rd, ciphertextBuf) blob.Length, uint(n))
if err != nil { return nil, errors.New("wrong length returned")
return nil, err
}
err = rd.Close()
if err != nil {
return nil, err
} }
// decrypt // decrypt
@ -156,61 +143,23 @@ func closeOrErr(cl io.Closer, err *error) {
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on // LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
// the item. // the item.
func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) (err error) { func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) (err error) {
// load blob from backend buf, err := r.LoadAndDecrypt(t, id)
rd, err := r.be.Get(t, id.String())
if err != nil {
return err
}
defer closeOrErr(rd, &err)
// decrypt
decryptRd, err := crypto.DecryptFrom(r.key, rd)
defer closeOrErr(decryptRd, &err)
if err != nil { if err != nil {
return err return err
} }
// decode return json.Unmarshal(buf, item)
decoder := json.NewDecoder(decryptRd)
err = decoder.Decode(item)
if err != nil {
return err
}
return nil
} }
// LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the // LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the
// data and afterwards call json.Unmarshal on the item. // data and afterwards call json.Unmarshal on the item.
func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) (err error) { func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) (err error) {
// lookup pack buf, err := r.LoadBlob(t, id, nil)
blob, err := r.idx.Lookup(id)
if err != nil { if err != nil {
return err return err
} }
// load blob from pack return json.Unmarshal(buf, item)
rd, err := r.be.GetReader(backend.Data, blob.PackID.String(), blob.Offset, blob.Length)
if err != nil {
return err
}
defer closeOrErr(rd, &err)
// decrypt
decryptRd, err := crypto.DecryptFrom(r.key, rd)
defer closeOrErr(decryptRd, &err)
if err != nil {
return err
}
// decode
decoder := json.NewDecoder(decryptRd)
err = decoder.Decode(item)
if err != nil {
return err
}
return nil
} }
// LookupBlobSize returns the size of blob id. // LookupBlobSize returns the size of blob id.
@ -315,44 +264,35 @@ func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, er
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the // SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
// backend as type t, without a pack. It returns the storage hash. // backend as type t, without a pack. It returns the storage hash.
func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) { func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) {
// create file debug.Log("Repo.SaveJSONUnpacked", "save new blob %v", t)
blob, err := r.be.Create() plaintext, err := json.Marshal(item)
if err != nil {
return backend.ID{}, err
}
debug.Log("Repo.SaveJSONUnpacked", "create new blob %v", t)
// hash
hw := backend.NewHashingWriter(blob, sha256.New())
// encrypt blob
ewr := crypto.EncryptTo(r.key, hw)
enc := json.NewEncoder(ewr)
err = enc.Encode(item)
if err != nil { if err != nil {
return backend.ID{}, fmt.Errorf("json.Encode: %v", err) return backend.ID{}, fmt.Errorf("json.Encode: %v", err)
} }
err = ewr.Close() return r.SaveUnpacked(t, plaintext)
}
// SaveUnpacked encrypts data and stores it in the backend. Returned is the
// storage hash.
func (r *Repository) SaveUnpacked(t backend.Type, p []byte) (id backend.ID, err error) {
ciphertext := make([]byte, len(p)+crypto.Extension)
ciphertext, err = r.Encrypt(ciphertext, p)
if err != nil { if err != nil {
return backend.ID{}, err return backend.ID{}, err
} }
// finalize blob in the backend id = backend.Hash(ciphertext)
hash := hw.Sum(nil) h := backend.Handle{Type: t, Name: id.String()}
sid := backend.ID{}
copy(sid[:], hash)
err = blob.Finalize(t, sid.String()) err = r.be.Save(h, ciphertext)
if err != nil { if err != nil {
debug.Log("Repo.SaveJSONUnpacked", "error saving blob %v as %v: %v", t, sid, err) debug.Log("Repo.SaveJSONUnpacked", "error saving blob %v: %v", h, err)
return backend.ID{}, err return backend.ID{}, err
} }
debug.Log("Repo.SaveJSONUnpacked", "new blob %v saved as %v", t, sid) debug.Log("Repo.SaveJSONUnpacked", "blob %v saved", h)
return id, nil
return sid, nil
} }
// Flush saves all remaining packs. // Flush saves all remaining packs.
@ -388,80 +328,16 @@ func (r *Repository) SetIndex(i *MasterIndex) {
r.idx = i r.idx = i
} }
// BlobWriter encrypts and saves the data written to it in a backend. After // SaveIndex saves an index in the repository.
// Close() was called, ID() returns the backend.ID.
type BlobWriter struct {
id backend.ID
blob backend.Blob
hw *backend.HashingWriter
ewr io.WriteCloser
t backend.Type
closed bool
}
// CreateEncryptedBlob returns a BlobWriter that encrypts and saves the data
// written to it in the backend. After Close() was called, ID() returns the
// backend.ID.
func (r *Repository) CreateEncryptedBlob(t backend.Type) (*BlobWriter, error) {
blob, err := r.be.Create()
if err != nil {
return nil, err
}
// hash
hw := backend.NewHashingWriter(blob, sha256.New())
// encrypt blob
ewr := crypto.EncryptTo(r.key, hw)
return &BlobWriter{t: t, blob: blob, hw: hw, ewr: ewr}, nil
}
func (bw *BlobWriter) Write(buf []byte) (int, error) {
return bw.ewr.Write(buf)
}
// Close finalizes the blob in the backend, afterwards ID() can be used to retrieve the ID.
func (bw *BlobWriter) Close() error {
if bw.closed {
return errors.New("BlobWriter already closed")
}
bw.closed = true
err := bw.ewr.Close()
if err != nil {
return err
}
copy(bw.id[:], bw.hw.Sum(nil))
return bw.blob.Finalize(bw.t, bw.id.String())
}
// ID returns the Id the blob has been written to after Close() was called.
func (bw *BlobWriter) ID() backend.ID {
return bw.id
}
// SaveIndex saves an index to repo's backend.
func SaveIndex(repo *Repository, index *Index) (backend.ID, error) { func SaveIndex(repo *Repository, index *Index) (backend.ID, error) {
blob, err := repo.CreateEncryptedBlob(backend.Index) buf := bytes.NewBuffer(nil)
err := index.Finalize(buf)
if err != nil { if err != nil {
return backend.ID{}, err return backend.ID{}, err
} }
err = index.Finalize(blob) return repo.SaveUnpacked(backend.Index, buf.Bytes())
if err != nil {
return backend.ID{}, err
}
err = blob.Close()
if err != nil {
return backend.ID{}, err
}
sid := blob.ID()
err = index.SetID(sid)
return sid, err
} }
// saveIndex saves all indexes in the backend. // saveIndex saves all indexes in the backend.
@ -545,17 +421,6 @@ func LoadIndex(repo *Repository, id string) (*Index, error) {
return nil, err return nil, err
} }
// GetDecryptReader opens the file id stored in the backend and returns a
// reader that yields the decrypted content. The reader must be closed.
func (r *Repository) GetDecryptReader(t backend.Type, id string) (io.ReadCloser, error) {
rd, err := r.be.Get(t, id)
if err != nil {
return nil, err
}
return newDecryptReadCloser(r.key, rd)
}
// SearchKey finds a key with the supplied password, afterwards the config is // SearchKey finds a key with the supplied password, afterwards the config is
// read and parsed. // read and parsed.
func (r *Repository) SearchKey(password string) error { func (r *Repository) SearchKey(password string) error {

View file

@ -6,7 +6,9 @@ import (
"bytes" "bytes"
"flag" "flag"
"fmt" "fmt"
"io"
"io/ioutil" "io/ioutil"
"net/http"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
@ -17,6 +19,7 @@ import (
) )
var runCrossCompile = flag.Bool("cross-compile", true, "run cross compilation tests") var runCrossCompile = flag.Bool("cross-compile", true, "run cross compilation tests")
var minioServer = flag.String("minio", "", "path to the minio server binary")
func init() { func init() {
flag.Parse() flag.Parse()
@ -30,10 +33,57 @@ type CIEnvironment interface {
type TravisEnvironment struct { type TravisEnvironment struct {
goxArch []string goxArch []string
goxOS []string goxOS []string
minio string
} }
var envVendorExperiment = map[string]string{ func (env *TravisEnvironment) getMinio() {
"GO15VENDOREXPERIMENT": "1", if *minioServer != "" {
msg("using minio server at %q\n", *minioServer)
env.minio = *minioServer
return
}
tempfile, err := ioutil.TempFile("", "minio-server-")
if err != nil {
fmt.Fprintf(os.Stderr, "create tempfile failed: %v\n", err)
os.Exit(10)
}
url := fmt.Sprintf("https://dl.minio.io/server/minio/release/%s-%s/minio",
runtime.GOOS, runtime.GOARCH)
msg("downloading %v\n", url)
res, err := http.Get(url)
if err != nil {
msg("downloading minio failed: %v\n", err)
return
}
_, err = io.Copy(tempfile, res.Body)
if err != nil {
msg("downloading minio failed: %v\n", err)
return
}
err = res.Body.Close()
if err != nil {
msg("saving minio failed: %v\n", err)
return
}
err = tempfile.Close()
if err != nil {
msg("closing tempfile failed: %v\n", err)
return
}
err = os.Chmod(tempfile.Name(), 0755)
if err != nil {
msg("making minio server executable failed: %v\n", err)
return
}
msg("downloaded minio server to %v\n", tempfile.Name())
env.minio = tempfile.Name()
} }
func (env *TravisEnvironment) Prepare() { func (env *TravisEnvironment) Prepare() {
@ -42,7 +92,7 @@ func (env *TravisEnvironment) Prepare() {
run("go", "get", "golang.org/x/tools/cmd/cover") run("go", "get", "golang.org/x/tools/cmd/cover")
run("go", "get", "github.com/mattn/goveralls") run("go", "get", "github.com/mattn/goveralls")
run("go", "get", "github.com/pierrre/gotestcover") run("go", "get", "github.com/pierrre/gotestcover")
runWithEnv(envVendorExperiment, "go", "get", "github.com/minio/minio") env.getMinio()
if runtime.GOOS == "darwin" { if runtime.GOOS == "darwin" {
// install the libraries necessary for fuse // install the libraries necessary for fuse
@ -125,8 +175,8 @@ func (env *TravisEnvironment) RunTests() {
err error err error
) )
if goVersionAtLeast151() { if env.minio != "" {
srv, err = NewMinioServer() srv, err = NewMinioServer(env.minio)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "error running minio server: %v", err) fmt.Fprintf(os.Stderr, "error running minio server: %v", err)
os.Exit(8) os.Exit(8)
@ -273,7 +323,7 @@ var minioEnv = map[string]string{
// NewMinioServer prepares and runs a minio server for the s3 backend tests in // NewMinioServer prepares and runs a minio server for the s3 backend tests in
// a temporary directory. // a temporary directory.
func NewMinioServer() (*MinioServer, error) { func NewMinioServer(minio string) (*MinioServer, error) {
msg("running minio server\n") msg("running minio server\n")
cfgdir, err := ioutil.TempDir("", "minio-config-") cfgdir, err := ioutil.TempDir("", "minio-config-")
if err != nil { if err != nil {
@ -302,7 +352,7 @@ func NewMinioServer() (*MinioServer, error) {
out := bytes.NewBuffer(nil) out := bytes.NewBuffer(nil)
cmd := exec.Command("minio", cmd := exec.Command(minio,
"--config-folder", cfgdir, "--config-folder", cfgdir,
"--address", "127.0.0.1:9000", "--address", "127.0.0.1:9000",
"server", dir) "server", dir)

View file

@ -15,7 +15,7 @@ import (
var ( var (
TestPassword = getStringVar("RESTIC_TEST_PASSWORD", "geheim") TestPassword = getStringVar("RESTIC_TEST_PASSWORD", "geheim")
TestCleanup = getBoolVar("RESTIC_TEST_CLEANUP", true) TestCleanupTempDirs = getBoolVar("RESTIC_TEST_CLEANUP", true)
TestTempDir = getStringVar("RESTIC_TEST_TMPDIR", "") TestTempDir = getStringVar("RESTIC_TEST_TMPDIR", "")
RunIntegrationTest = getBoolVar("RESTIC_TEST_INTEGRATION", true) RunIntegrationTest = getBoolVar("RESTIC_TEST_INTEGRATION", true)
RunFuseTest = getBoolVar("RESTIC_TEST_FUSE", true) RunFuseTest = getBoolVar("RESTIC_TEST_FUSE", true)
@ -70,7 +70,7 @@ func SetupRepo() *repository.Repository {
} }
func TeardownRepo(repo *repository.Repository) { func TeardownRepo(repo *repository.Repository) {
if !TestCleanup { if !TestCleanupTempDirs {
l := repo.Backend().(*local.Local) l := repo.Backend().(*local.Local)
fmt.Printf("leaving local backend at %s\n", l.Location()) fmt.Printf("leaving local backend at %s\n", l.Location())
return return

View file

@ -75,14 +75,33 @@ func ParseID(s string) backend.ID {
// Random returns size bytes of pseudo-random data derived from the seed. // Random returns size bytes of pseudo-random data derived from the seed.
func Random(seed, count int) []byte { func Random(seed, count int) []byte {
buf := make([]byte, count) p := make([]byte, count)
rnd := mrand.New(mrand.NewSource(int64(seed))) rnd := mrand.New(mrand.NewSource(int64(seed)))
for i := 0; i < count; i++ {
buf[i] = byte(rnd.Uint32()) for i := 0; i < len(p); i += 8 {
val := rnd.Int63()
var data = []byte{
byte((val >> 0) & 0xff),
byte((val >> 8) & 0xff),
byte((val >> 16) & 0xff),
byte((val >> 24) & 0xff),
byte((val >> 32) & 0xff),
byte((val >> 40) & 0xff),
byte((val >> 48) & 0xff),
byte((val >> 56) & 0xff),
}
for j := range data {
cur := i + j
if len(p) >= cur {
break
}
p[cur] = data[j]
}
} }
return buf return p
} }
type rndReader struct { type rndReader struct {
@ -90,18 +109,41 @@ type rndReader struct {
} }
func (r *rndReader) Read(p []byte) (int, error) { func (r *rndReader) Read(p []byte) (int, error) {
for i := range p { for i := 0; i < len(p); i += 8 {
p[i] = byte(r.src.Uint32()) val := r.src.Int63()
var data = []byte{
byte((val >> 0) & 0xff),
byte((val >> 8) & 0xff),
byte((val >> 16) & 0xff),
byte((val >> 24) & 0xff),
byte((val >> 32) & 0xff),
byte((val >> 40) & 0xff),
byte((val >> 48) & 0xff),
byte((val >> 56) & 0xff),
}
for j := range data {
cur := i + j
if len(p) >= cur {
break
}
p[cur] = data[j]
}
} }
return len(p), nil return len(p), nil
} }
// RandomReader returns a reader that returns size bytes of pseudo-random data // RandomReader returns a reader that returns deterministic pseudo-random data
// derived from the seed. // derived from the seed.
func RandomReader(seed, size int) io.Reader { func RandomReader(seed int) io.Reader {
r := &rndReader{src: mrand.New(mrand.NewSource(int64(seed)))} return &rndReader{src: mrand.New(mrand.NewSource(int64(seed)))}
return io.LimitReader(r, int64(size)) }
// RandomLimitReader returns a reader that returns size bytes of deterministic
// pseudo-random data derived from the seed.
func RandomLimitReader(seed, size int) io.Reader {
return io.LimitReader(RandomReader(seed), int64(size))
} }
// GenRandom returns a []byte filled with up to 1000 random bytes. // GenRandom returns a []byte filled with up to 1000 random bytes.
@ -158,7 +200,7 @@ func WithTestEnvironment(t testing.TB, repoFixture string, f func(repodir string
f(filepath.Join(tempdir, "repo")) f(filepath.Join(tempdir, "repo"))
if !TestCleanup { if !TestCleanupTempDirs {
t.Logf("leaving temporary directory %v used for test", tempdir) t.Logf("leaving temporary directory %v used for test", tempdir)
return return
} }

View file

@ -49,7 +49,7 @@ func createTempDir(t *testing.T) string {
func TestTree(t *testing.T) { func TestTree(t *testing.T) {
dir := createTempDir(t) dir := createTempDir(t)
defer func() { defer func() {
if TestCleanup { if TestCleanupTempDirs {
RemoveAll(t, dir) RemoveAll(t, dir)
} }
}() }()