From 8be9e95d20db5c4c7fac31a726b13de0d4715268 Mon Sep 17 00:00:00 2001
From: Alexander Neumann <alexander@bumpern.de>
Date: Sat, 9 May 2015 13:21:28 +0200
Subject: [PATCH 1/6] Rename package 'server' to 'repo'

---
 archiver.go                     |  6 +++---
 cache.go                        |  6 +++---
 cmd/restic/cmd_cat.go           |  4 ++--
 cmd/restic/cmd_find.go          |  6 +++---
 cmd/restic/cmd_fsck.go          |  8 ++++----
 cmd/restic/cmd_key.go           | 16 ++++++++--------
 cmd/restic/cmd_ls.go            |  4 ++--
 cmd/restic/main.go              |  8 ++++----
 node.go                         |  8 ++++----
 {server => repo}/blob.go        |  2 +-
 repo/doc.go                     |  2 ++
 {server => repo}/index.go       |  2 +-
 {server => repo}/index_test.go  | 14 +++++++-------
 {server => repo}/key.go         |  2 +-
 {server => repo}/pool.go        |  2 +-
 {server => repo}/server.go      |  2 +-
 {server => repo}/server_test.go |  2 +-
 restorer.go                     |  6 +++---
 server/doc.go                   |  2 --
 snapshot.go                     |  4 ++--
 test/backend.go                 | 10 +++++-----
 tree.go                         |  4 ++--
 walk.go                         |  6 +++---
 23 files changed, 63 insertions(+), 63 deletions(-)
 rename {server => repo}/blob.go (98%)
 create mode 100644 repo/doc.go
 rename {server => repo}/index.go (99%)
 rename {server => repo}/index_test.go (95%)
 rename {server => repo}/key.go (99%)
 rename {server => repo}/pool.go (94%)
 rename {server => repo}/server.go (99%)
 rename {server => repo}/server_test.go (99%)
 delete mode 100644 server/doc.go

diff --git a/archiver.go b/archiver.go
index cc5d079b1..76108921a 100644
--- a/archiver.go
+++ b/archiver.go
@@ -15,7 +15,7 @@ import (
 	"github.com/restic/restic/debug"
 	"github.com/restic/restic/pack"
 	"github.com/restic/restic/pipe"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 
 	"github.com/juju/errors"
 )
@@ -30,7 +30,7 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
 
 // Archiver is used to backup a set of directories.
 type Archiver struct {
-	s *server.Server
+	s *repo.Server
 
 	blobToken chan struct{}
 
@@ -39,7 +39,7 @@ type Archiver struct {
 }
 
 // NewArchiver returns a new archiver.
-func NewArchiver(s *server.Server) *Archiver {
+func NewArchiver(s *repo.Server) *Archiver {
 	arch := &Archiver{
 		s:         s,
 		blobToken: make(chan struct{}, maxConcurrentBlobs),
diff --git a/cache.go b/cache.go
index a3c7b8953..83321a363 100644
--- a/cache.go
+++ b/cache.go
@@ -10,7 +10,7 @@ import (
 
 	"github.com/restic/restic/backend"
 	"github.com/restic/restic/debug"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 )
 
 // Cache is used to locally cache items from a server.
@@ -18,7 +18,7 @@ type Cache struct {
 	base string
 }
 
-func NewCache(s *server.Server) (*Cache, error) {
+func NewCache(s *repo.Server) (*Cache, error) {
 	cacheDir, err := getCacheDir()
 	if err != nil {
 		return nil, err
@@ -106,7 +106,7 @@ func (c *Cache) purge(t backend.Type, subtype string, id backend.ID) error {
 }
 
 // Clear removes information from the cache that isn't present in the server any more.
-func (c *Cache) Clear(s *server.Server) error {
+func (c *Cache) Clear(s *repo.Server) error {
 	list, err := c.list(backend.Snapshot)
 	if err != nil {
 		return err
diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go
index 8fa54ed27..b90d85d45 100644
--- a/cmd/restic/cmd_cat.go
+++ b/cmd/restic/cmd_cat.go
@@ -11,7 +11,7 @@ import (
 	"github.com/restic/restic/backend"
 	"github.com/restic/restic/debug"
 	"github.com/restic/restic/pack"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 )
 
 type CmdCat struct{}
@@ -107,7 +107,7 @@ func (cmd CmdCat) Execute(args []string) error {
 
 		dec := json.NewDecoder(rd)
 
-		var key server.Key
+		var key repo.Key
 		err = dec.Decode(&key)
 		if err != nil {
 			return err
diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go
index 077752001..43f0c45da 100644
--- a/cmd/restic/cmd_find.go
+++ b/cmd/restic/cmd_find.go
@@ -8,7 +8,7 @@ import (
 	"github.com/restic/restic"
 	"github.com/restic/restic/backend"
 	"github.com/restic/restic/debug"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 )
 
 type findResult struct {
@@ -59,7 +59,7 @@ func parseTime(str string) (time.Time, error) {
 	return time.Time{}, fmt.Errorf("unable to parse time: %q", str)
 }
 
-func (c CmdFind) findInTree(s *server.Server, id backend.ID, path string) ([]findResult, error) {
+func (c CmdFind) findInTree(s *repo.Server, id backend.ID, path string) ([]findResult, error) {
 	debug.Log("restic.find", "checking tree %v\n", id)
 	tree, err := restic.LoadTree(s, id)
 	if err != nil {
@@ -105,7 +105,7 @@ func (c CmdFind) findInTree(s *server.Server, id backend.ID, path string) ([]fin
 	return results, nil
 }
 
-func (c CmdFind) findInSnapshot(s *server.Server, name string) error {
+func (c CmdFind) findInSnapshot(s *repo.Server, name string) error {
 	debug.Log("restic.find", "searching in snapshot %s\n  for entries within [%s %s]", name, c.oldest, c.newest)
 
 	id, err := backend.ParseID(name)
diff --git a/cmd/restic/cmd_fsck.go b/cmd/restic/cmd_fsck.go
index eac4c4030..714753014 100644
--- a/cmd/restic/cmd_fsck.go
+++ b/cmd/restic/cmd_fsck.go
@@ -10,7 +10,7 @@ import (
 	"github.com/restic/restic/crypto"
 	"github.com/restic/restic/debug"
 	"github.com/restic/restic/pack"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 )
 
 type CmdFsck struct {
@@ -34,7 +34,7 @@ func init() {
 	}
 }
 
-func fsckFile(opts CmdFsck, s *server.Server, IDs []backend.ID) (uint64, error) {
+func fsckFile(opts CmdFsck, s *repo.Server, IDs []backend.ID) (uint64, error) {
 	debug.Log("restic.fsckFile", "checking file %v", IDs)
 	var bytes uint64
 
@@ -77,7 +77,7 @@ func fsckFile(opts CmdFsck, s *server.Server, IDs []backend.ID) (uint64, error)
 	return bytes, nil
 }
 
-func fsckTree(opts CmdFsck, s *server.Server, id backend.ID) error {
+func fsckTree(opts CmdFsck, s *repo.Server, id backend.ID) error {
 	debug.Log("restic.fsckTree", "checking tree %v", id.Str())
 
 	tree, err := restic.LoadTree(s, id)
@@ -157,7 +157,7 @@ func fsckTree(opts CmdFsck, s *server.Server, id backend.ID) error {
 	return firstErr
 }
 
-func fsckSnapshot(opts CmdFsck, s *server.Server, id backend.ID) error {
+func fsckSnapshot(opts CmdFsck, s *repo.Server, id backend.ID) error {
 	debug.Log("restic.fsck", "checking snapshot %v\n", id)
 
 	sn, err := restic.LoadSnapshot(s, id)
diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go
index de58d50cb..1fbccc084 100644
--- a/cmd/restic/cmd_key.go
+++ b/cmd/restic/cmd_key.go
@@ -6,7 +6,7 @@ import (
 	"os"
 
 	"github.com/restic/restic/backend"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 )
 
 type CmdKey struct{}
@@ -21,7 +21,7 @@ func init() {
 	}
 }
 
-func listKeys(s *server.Server) error {
+func listKeys(s *repo.Server) error {
 	tab := NewTable()
 	tab.Header = fmt.Sprintf(" %-10s  %-10s  %-10s  %s", "ID", "User", "Host", "Created")
 	tab.RowFormat = "%s%-10s  %-10s  %-10s  %s"
@@ -35,7 +35,7 @@ func listKeys(s *server.Server) error {
 	defer close(done)
 
 	for name := range s.List(backend.Key, done) {
-		k, err := server.LoadKey(s, name)
+		k, err := repo.LoadKey(s, name)
 		if err != nil {
 			fmt.Fprintf(os.Stderr, "LoadKey() failed: %v\n", err)
 			continue
@@ -56,7 +56,7 @@ func listKeys(s *server.Server) error {
 	return nil
 }
 
-func addKey(s *server.Server) error {
+func addKey(s *repo.Server) error {
 	pw := readPassword("RESTIC_NEWPASSWORD", "enter password for new key: ")
 	pw2 := readPassword("RESTIC_NEWPASSWORD", "enter password again: ")
 
@@ -64,7 +64,7 @@ func addKey(s *server.Server) error {
 		return errors.New("passwords do not match")
 	}
 
-	id, err := server.AddKey(s, pw, s.Key())
+	id, err := repo.AddKey(s, pw, s.Key())
 	if err != nil {
 		return fmt.Errorf("creating new key failed: %v\n", err)
 	}
@@ -74,7 +74,7 @@ func addKey(s *server.Server) error {
 	return nil
 }
 
-func deleteKey(s *server.Server, name string) error {
+func deleteKey(s *repo.Server, name string) error {
 	if name == s.KeyName() {
 		return errors.New("refusing to remove key currently used to access repository")
 	}
@@ -88,7 +88,7 @@ func deleteKey(s *server.Server, name string) error {
 	return nil
 }
 
-func changePassword(s *server.Server) error {
+func changePassword(s *repo.Server) error {
 	pw := readPassword("RESTIC_NEWPASSWORD", "enter password for new key: ")
 	pw2 := readPassword("RESTIC_NEWPASSWORD", "enter password again: ")
 
@@ -97,7 +97,7 @@ func changePassword(s *server.Server) error {
 	}
 
 	// add new key
-	id, err := server.AddKey(s, pw, s.Key())
+	id, err := repo.AddKey(s, pw, s.Key())
 	if err != nil {
 		return fmt.Errorf("creating new key failed: %v\n", err)
 	}
diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go
index b65a02eb1..d31aaaf8c 100644
--- a/cmd/restic/cmd_ls.go
+++ b/cmd/restic/cmd_ls.go
@@ -7,7 +7,7 @@ import (
 
 	"github.com/restic/restic"
 	"github.com/restic/restic/backend"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 )
 
 type CmdLs struct{}
@@ -38,7 +38,7 @@ func printNode(prefix string, n *restic.Node) string {
 	}
 }
 
-func printTree(prefix string, s *server.Server, id backend.ID) error {
+func printTree(prefix string, s *repo.Server, id backend.ID) error {
 	tree, err := restic.LoadTree(s, id)
 	if err != nil {
 		return err
diff --git a/cmd/restic/main.go b/cmd/restic/main.go
index 8b552ae26..d87f6aeec 100644
--- a/cmd/restic/main.go
+++ b/cmd/restic/main.go
@@ -14,7 +14,7 @@ import (
 	"github.com/restic/restic/backend/local"
 	"github.com/restic/restic/backend/sftp"
 	"github.com/restic/restic/debug"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 )
 
 var version = "compiled manually"
@@ -72,7 +72,7 @@ func (cmd CmdInit) Execute(args []string) error {
 		os.Exit(1)
 	}
 
-	s := server.NewServer(be)
+	s := repo.NewServer(be)
 	err = s.Init(pw)
 	if err != nil {
 		fmt.Fprintf(os.Stderr, "creating key in backend at %s failed: %v\n", opts.Repo, err)
@@ -133,7 +133,7 @@ func create(u string) (backend.Backend, error) {
 	return sftp.Create(url.Path[1:], "ssh", args...)
 }
 
-func OpenRepo() (*server.Server, error) {
+func OpenRepo() (*repo.Server, error) {
 	if opts.Repo == "" {
 		return nil, errors.New("Please specify repository location (-r)")
 	}
@@ -143,7 +143,7 @@ func OpenRepo() (*server.Server, error) {
 		return nil, err
 	}
 
-	s := server.NewServer(be)
+	s := repo.NewServer(be)
 
 	err = s.SearchKey(readPassword("RESTIC_PASSWORD", "enter password for repository: "))
 	if err != nil {
diff --git a/node.go b/node.go
index c39bc4941..023ce4463 100644
--- a/node.go
+++ b/node.go
@@ -14,7 +14,7 @@ import (
 	"github.com/restic/restic/backend"
 	"github.com/restic/restic/debug"
 	"github.com/restic/restic/pack"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 )
 
 // Node is a file, directory or other item in a backup.
@@ -43,7 +43,7 @@ type Node struct {
 
 	path  string
 	err   error
-	blobs server.Blobs
+	blobs repo.Blobs
 }
 
 func (node Node) String() string {
@@ -103,7 +103,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string {
 }
 
 // CreateAt creates the node at the given path and restores all the meta data.
-func (node *Node) CreateAt(path string, s *server.Server) error {
+func (node *Node) CreateAt(path string, s *repo.Server) error {
 	switch node.Type {
 	case "dir":
 		if err := node.createDirAt(path); err != nil {
@@ -176,7 +176,7 @@ func (node Node) createDirAt(path string) error {
 	return nil
 }
 
-func (node Node) createFileAt(path string, s *server.Server) error {
+func (node Node) createFileAt(path string, s *repo.Server) error {
 	f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600)
 	defer f.Close()
 
diff --git a/server/blob.go b/repo/blob.go
similarity index 98%
rename from server/blob.go
rename to repo/blob.go
index 093a1033a..81c8a629f 100644
--- a/server/blob.go
+++ b/repo/blob.go
@@ -1,4 +1,4 @@
-package server
+package repo
 
 import (
 	"bytes"
diff --git a/repo/doc.go b/repo/doc.go
new file mode 100644
index 000000000..bbc8a6f8e
--- /dev/null
+++ b/repo/doc.go
@@ -0,0 +1,2 @@
+// Package repo implements a restic repository on top of a backend.
+package repo
diff --git a/server/index.go b/repo/index.go
similarity index 99%
rename from server/index.go
rename to repo/index.go
index a6271ff69..2854859c1 100644
--- a/server/index.go
+++ b/repo/index.go
@@ -1,4 +1,4 @@
-package server
+package repo
 
 import (
 	"encoding/json"
diff --git a/server/index_test.go b/repo/index_test.go
similarity index 95%
rename from server/index_test.go
rename to repo/index_test.go
index 149fdc468..2cc4a971c 100644
--- a/server/index_test.go
+++ b/repo/index_test.go
@@ -1,4 +1,4 @@
-package server_test
+package repo_test
 
 import (
 	"bytes"
@@ -8,7 +8,7 @@ import (
 
 	"github.com/restic/restic/backend"
 	"github.com/restic/restic/pack"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 	. "github.com/restic/restic/test"
 )
 
@@ -30,7 +30,7 @@ func TestIndexSerialize(t *testing.T) {
 	}
 	tests := []testEntry{}
 
-	idx := server.NewIndex()
+	idx := repo.NewIndex()
 
 	// create 50 packs with 20 blobs each
 	for i := 0; i < 50; i++ {
@@ -58,7 +58,7 @@ func TestIndexSerialize(t *testing.T) {
 	err := idx.Encode(wr)
 	OK(t, err)
 
-	idx2, err := server.DecodeIndex(wr)
+	idx2, err := repo.DecodeIndex(wr)
 	OK(t, err)
 	Assert(t, idx2 != nil,
 		"nil returned for decoded index")
@@ -113,7 +113,7 @@ func TestIndexSerialize(t *testing.T) {
 	err = idx2.Encode(wr3)
 	OK(t, err)
 
-	idx3, err := server.DecodeIndex(wr3)
+	idx3, err := repo.DecodeIndex(wr3)
 	OK(t, err)
 	Assert(t, idx3 != nil,
 		"nil returned for decoded index")
@@ -138,7 +138,7 @@ func TestIndexSerialize(t *testing.T) {
 }
 
 func TestIndexSize(t *testing.T) {
-	idx := server.NewIndex()
+	idx := repo.NewIndex()
 
 	packs := 200
 	blobs := 100
@@ -210,7 +210,7 @@ var exampleTests = []struct {
 }
 
 func TestIndexUnserialize(t *testing.T) {
-	idx, err := server.DecodeIndex(bytes.NewReader(docExample))
+	idx, err := repo.DecodeIndex(bytes.NewReader(docExample))
 	OK(t, err)
 
 	for _, test := range exampleTests {
diff --git a/server/key.go b/repo/key.go
similarity index 99%
rename from server/key.go
rename to repo/key.go
index 1b0965b37..7ea31407b 100644
--- a/server/key.go
+++ b/repo/key.go
@@ -1,4 +1,4 @@
-package server
+package repo
 
 import (
 	"crypto/rand"
diff --git a/server/pool.go b/repo/pool.go
similarity index 94%
rename from server/pool.go
rename to repo/pool.go
index 304d20e1b..32100a3bd 100644
--- a/server/pool.go
+++ b/repo/pool.go
@@ -1,4 +1,4 @@
-package server
+package repo
 
 import (
 	"sync"
diff --git a/server/server.go b/repo/server.go
similarity index 99%
rename from server/server.go
rename to repo/server.go
index c6fc9732c..9bafa01b7 100644
--- a/server/server.go
+++ b/repo/server.go
@@ -1,4 +1,4 @@
-package server
+package repo
 
 import (
 	"bytes"
diff --git a/server/server_test.go b/repo/server_test.go
similarity index 99%
rename from server/server_test.go
rename to repo/server_test.go
index 0f44def1c..99de35701 100644
--- a/server/server_test.go
+++ b/repo/server_test.go
@@ -1,4 +1,4 @@
-package server_test
+package repo_test
 
 import (
 	"bytes"
diff --git a/restorer.go b/restorer.go
index cebd9c894..26f605611 100644
--- a/restorer.go
+++ b/restorer.go
@@ -7,14 +7,14 @@ import (
 	"syscall"
 
 	"github.com/restic/restic/backend"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 
 	"github.com/juju/errors"
 )
 
 // Restorer is used to restore a snapshot to a directory.
 type Restorer struct {
-	s  *server.Server
+	s  *repo.Server
 	sn *Snapshot
 
 	Error  func(dir string, node *Node, err error) error
@@ -24,7 +24,7 @@ type Restorer struct {
 var restorerAbortOnAllErrors = func(str string, node *Node, err error) error { return err }
 
 // NewRestorer creates a restorer preloaded with the content from the snapshot id.
-func NewRestorer(s *server.Server, id backend.ID) (*Restorer, error) {
+func NewRestorer(s *repo.Server, id backend.ID) (*Restorer, error) {
 	r := &Restorer{s: s, Error: restorerAbortOnAllErrors}
 
 	var err error
diff --git a/server/doc.go b/server/doc.go
deleted file mode 100644
index 9e1f227a2..000000000
--- a/server/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package server implements a restic repository on top of a backend.
-package server
diff --git a/snapshot.go b/snapshot.go
index a928e27ad..f32425f1f 100644
--- a/snapshot.go
+++ b/snapshot.go
@@ -9,7 +9,7 @@ import (
 	"time"
 
 	"github.com/restic/restic/backend"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 )
 
 type Snapshot struct {
@@ -50,7 +50,7 @@ func NewSnapshot(paths []string) (*Snapshot, error) {
 	return sn, nil
 }
 
-func LoadSnapshot(s *server.Server, id backend.ID) (*Snapshot, error) {
+func LoadSnapshot(s *repo.Server, id backend.ID) (*Snapshot, error) {
 	sn := &Snapshot{id: id}
 	err := s.LoadJSONUnpacked(backend.Snapshot, id, sn)
 	if err != nil {
diff --git a/test/backend.go b/test/backend.go
index 5a2524425..31b824155 100644
--- a/test/backend.go
+++ b/test/backend.go
@@ -10,14 +10,14 @@ import (
 	"github.com/restic/restic"
 	"github.com/restic/restic/backend"
 	"github.com/restic/restic/backend/local"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 )
 
 var TestPassword = flag.String("test.password", "", `use this password for repositories created during tests (default: "geheim")`)
 var TestCleanup = flag.Bool("test.cleanup", true, "clean up after running tests (remove local backend directory with all content)")
 var TestTempDir = flag.String("test.tempdir", "", "use this directory for temporary storage (default: system temp dir)")
 
-func SetupBackend(t testing.TB) *server.Server {
+func SetupBackend(t testing.TB) *repo.Server {
 	tempdir, err := ioutil.TempDir(*TestTempDir, "restic-test-")
 	OK(t, err)
 
@@ -29,12 +29,12 @@ func SetupBackend(t testing.TB) *server.Server {
 	err = os.Setenv("RESTIC_CACHE", filepath.Join(tempdir, "cache"))
 	OK(t, err)
 
-	s := server.NewServer(b)
+	s := repo.NewServer(b)
 	OK(t, s.Init(*TestPassword))
 	return s
 }
 
-func TeardownBackend(t testing.TB, s *server.Server) {
+func TeardownBackend(t testing.TB, s *repo.Server) {
 	if !*TestCleanup {
 		l := s.Backend().(*local.Local)
 		t.Logf("leaving local backend at %s\n", l.Location())
@@ -44,7 +44,7 @@ func TeardownBackend(t testing.TB, s *server.Server) {
 	OK(t, s.Delete())
 }
 
-func SnapshotDir(t testing.TB, server *server.Server, path string, parent backend.ID) *restic.Snapshot {
+func SnapshotDir(t testing.TB, server *repo.Server, path string, parent backend.ID) *restic.Snapshot {
 	arch := restic.NewArchiver(server)
 	sn, _, err := arch.Snapshot(nil, []string{path}, parent)
 	OK(t, err)
diff --git a/tree.go b/tree.go
index f059d67dd..d43a119e7 100644
--- a/tree.go
+++ b/tree.go
@@ -8,7 +8,7 @@ import (
 	"github.com/restic/restic/backend"
 	"github.com/restic/restic/debug"
 	"github.com/restic/restic/pack"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 )
 
 type Tree struct {
@@ -30,7 +30,7 @@ func (t Tree) String() string {
 	return fmt.Sprintf("Tree<%d nodes>", len(t.Nodes))
 }
 
-func LoadTree(s *server.Server, id backend.ID) (*Tree, error) {
+func LoadTree(s *repo.Server, id backend.ID) (*Tree, error) {
 	tree := &Tree{}
 	err := s.LoadJSONPack(pack.Tree, id, tree)
 	if err != nil {
diff --git a/walk.go b/walk.go
index 2ed5df070..a0f1566da 100644
--- a/walk.go
+++ b/walk.go
@@ -5,7 +5,7 @@ import (
 
 	"github.com/restic/restic/backend"
 	"github.com/restic/restic/debug"
-	"github.com/restic/restic/server"
+	"github.com/restic/restic/repo"
 )
 
 type WalkTreeJob struct {
@@ -16,7 +16,7 @@ type WalkTreeJob struct {
 	Tree *Tree
 }
 
-func walkTree(s *server.Server, path string, treeID backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
+func walkTree(s *repo.Server, path string, treeID backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
 	debug.Log("walkTree", "start on %q (%v)", path, treeID.Str())
 
 	t, err := LoadTree(s, treeID)
@@ -41,7 +41,7 @@ func walkTree(s *server.Server, path string, treeID backend.ID, done chan struct
 // WalkTree walks the tree specified by id recursively and sends a job for each
 // file and directory it finds. When the channel done is closed, processing
 // stops.
-func WalkTree(server *server.Server, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
+func WalkTree(server *repo.Server, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
 	debug.Log("WalkTree", "start on %v", id.Str())
 	walkTree(server, "", id, done, jobCh)
 	close(jobCh)

From 87ebf129454877c82623281a5b4362e07bd1de43 Mon Sep 17 00:00:00 2001
From: Alexander Neumann <alexander@bumpern.de>
Date: Sat, 9 May 2015 13:25:52 +0200
Subject: [PATCH 2/6] Rename 'Server' to 'Repository'

---
 archiver.go            |   4 +-
 cache.go               |   4 +-
 cmd/restic/cmd_find.go |   4 +-
 cmd/restic/cmd_fsck.go |   6 +-
 cmd/restic/cmd_key.go  |   8 +--
 cmd/restic/cmd_ls.go   |   2 +-
 cmd/restic/main.go     |   6 +-
 node.go                |   4 +-
 repo/key.go            |  10 +--
 repo/server.go         | 150 ++++++++++++++++++++---------------------
 restorer.go            |   4 +-
 snapshot.go            |   2 +-
 test/backend.go        |   8 +--
 tree.go                |   2 +-
 walk.go                |   4 +-
 15 files changed, 109 insertions(+), 109 deletions(-)

diff --git a/archiver.go b/archiver.go
index 76108921a..a7c15fc60 100644
--- a/archiver.go
+++ b/archiver.go
@@ -30,7 +30,7 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
 
 // Archiver is used to backup a set of directories.
 type Archiver struct {
-	s *repo.Server
+	s *repo.Repository
 
 	blobToken chan struct{}
 
@@ -39,7 +39,7 @@ type Archiver struct {
 }
 
 // NewArchiver returns a new archiver.
-func NewArchiver(s *repo.Server) *Archiver {
+func NewArchiver(s *repo.Repository) *Archiver {
 	arch := &Archiver{
 		s:         s,
 		blobToken: make(chan struct{}, maxConcurrentBlobs),
diff --git a/cache.go b/cache.go
index 83321a363..902aae2ea 100644
--- a/cache.go
+++ b/cache.go
@@ -18,7 +18,7 @@ type Cache struct {
 	base string
 }
 
-func NewCache(s *repo.Server) (*Cache, error) {
+func NewCache(s *repo.Repository) (*Cache, error) {
 	cacheDir, err := getCacheDir()
 	if err != nil {
 		return nil, err
@@ -106,7 +106,7 @@ func (c *Cache) purge(t backend.Type, subtype string, id backend.ID) error {
 }
 
 // Clear removes information from the cache that isn't present in the server any more.
-func (c *Cache) Clear(s *repo.Server) error {
+func (c *Cache) Clear(s *repo.Repository) error {
 	list, err := c.list(backend.Snapshot)
 	if err != nil {
 		return err
diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go
index 43f0c45da..9ffae8bdf 100644
--- a/cmd/restic/cmd_find.go
+++ b/cmd/restic/cmd_find.go
@@ -59,7 +59,7 @@ func parseTime(str string) (time.Time, error) {
 	return time.Time{}, fmt.Errorf("unable to parse time: %q", str)
 }
 
-func (c CmdFind) findInTree(s *repo.Server, id backend.ID, path string) ([]findResult, error) {
+func (c CmdFind) findInTree(s *repo.Repository, id backend.ID, path string) ([]findResult, error) {
 	debug.Log("restic.find", "checking tree %v\n", id)
 	tree, err := restic.LoadTree(s, id)
 	if err != nil {
@@ -105,7 +105,7 @@ func (c CmdFind) findInTree(s *repo.Server, id backend.ID, path string) ([]findR
 	return results, nil
 }
 
-func (c CmdFind) findInSnapshot(s *repo.Server, name string) error {
+func (c CmdFind) findInSnapshot(s *repo.Repository, name string) error {
 	debug.Log("restic.find", "searching in snapshot %s\n  for entries within [%s %s]", name, c.oldest, c.newest)
 
 	id, err := backend.ParseID(name)
diff --git a/cmd/restic/cmd_fsck.go b/cmd/restic/cmd_fsck.go
index 714753014..5beedf190 100644
--- a/cmd/restic/cmd_fsck.go
+++ b/cmd/restic/cmd_fsck.go
@@ -34,7 +34,7 @@ func init() {
 	}
 }
 
-func fsckFile(opts CmdFsck, s *repo.Server, IDs []backend.ID) (uint64, error) {
+func fsckFile(opts CmdFsck, s *repo.Repository, IDs []backend.ID) (uint64, error) {
 	debug.Log("restic.fsckFile", "checking file %v", IDs)
 	var bytes uint64
 
@@ -77,7 +77,7 @@ func fsckFile(opts CmdFsck, s *repo.Server, IDs []backend.ID) (uint64, error) {
 	return bytes, nil
 }
 
-func fsckTree(opts CmdFsck, s *repo.Server, id backend.ID) error {
+func fsckTree(opts CmdFsck, s *repo.Repository, id backend.ID) error {
 	debug.Log("restic.fsckTree", "checking tree %v", id.Str())
 
 	tree, err := restic.LoadTree(s, id)
@@ -157,7 +157,7 @@ func fsckTree(opts CmdFsck, s *repo.Server, id backend.ID) error {
 	return firstErr
 }
 
-func fsckSnapshot(opts CmdFsck, s *repo.Server, id backend.ID) error {
+func fsckSnapshot(opts CmdFsck, s *repo.Repository, id backend.ID) error {
 	debug.Log("restic.fsck", "checking snapshot %v\n", id)
 
 	sn, err := restic.LoadSnapshot(s, id)
diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go
index 1fbccc084..3337829ac 100644
--- a/cmd/restic/cmd_key.go
+++ b/cmd/restic/cmd_key.go
@@ -21,7 +21,7 @@ func init() {
 	}
 }
 
-func listKeys(s *repo.Server) error {
+func listKeys(s *repo.Repository) error {
 	tab := NewTable()
 	tab.Header = fmt.Sprintf(" %-10s  %-10s  %-10s  %s", "ID", "User", "Host", "Created")
 	tab.RowFormat = "%s%-10s  %-10s  %-10s  %s"
@@ -56,7 +56,7 @@ func listKeys(s *repo.Server) error {
 	return nil
 }
 
-func addKey(s *repo.Server) error {
+func addKey(s *repo.Repository) error {
 	pw := readPassword("RESTIC_NEWPASSWORD", "enter password for new key: ")
 	pw2 := readPassword("RESTIC_NEWPASSWORD", "enter password again: ")
 
@@ -74,7 +74,7 @@ func addKey(s *repo.Server) error {
 	return nil
 }
 
-func deleteKey(s *repo.Server, name string) error {
+func deleteKey(s *repo.Repository, name string) error {
 	if name == s.KeyName() {
 		return errors.New("refusing to remove key currently used to access repository")
 	}
@@ -88,7 +88,7 @@ func deleteKey(s *repo.Server, name string) error {
 	return nil
 }
 
-func changePassword(s *repo.Server) error {
+func changePassword(s *repo.Repository) error {
 	pw := readPassword("RESTIC_NEWPASSWORD", "enter password for new key: ")
 	pw2 := readPassword("RESTIC_NEWPASSWORD", "enter password again: ")
 
diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go
index d31aaaf8c..28c9d37ab 100644
--- a/cmd/restic/cmd_ls.go
+++ b/cmd/restic/cmd_ls.go
@@ -38,7 +38,7 @@ func printNode(prefix string, n *restic.Node) string {
 	}
 }
 
-func printTree(prefix string, s *repo.Server, id backend.ID) error {
+func printTree(prefix string, s *repo.Repository, id backend.ID) error {
 	tree, err := restic.LoadTree(s, id)
 	if err != nil {
 		return err
diff --git a/cmd/restic/main.go b/cmd/restic/main.go
index d87f6aeec..22266b538 100644
--- a/cmd/restic/main.go
+++ b/cmd/restic/main.go
@@ -72,7 +72,7 @@ func (cmd CmdInit) Execute(args []string) error {
 		os.Exit(1)
 	}
 
-	s := repo.NewServer(be)
+	s := repo.New(be)
 	err = s.Init(pw)
 	if err != nil {
 		fmt.Fprintf(os.Stderr, "creating key in backend at %s failed: %v\n", opts.Repo, err)
@@ -133,7 +133,7 @@ func create(u string) (backend.Backend, error) {
 	return sftp.Create(url.Path[1:], "ssh", args...)
 }
 
-func OpenRepo() (*repo.Server, error) {
+func OpenRepo() (*repo.Repository, error) {
 	if opts.Repo == "" {
 		return nil, errors.New("Please specify repository location (-r)")
 	}
@@ -143,7 +143,7 @@ func OpenRepo() (*repo.Server, error) {
 		return nil, err
 	}
 
-	s := repo.NewServer(be)
+	s := repo.New(be)
 
 	err = s.SearchKey(readPassword("RESTIC_PASSWORD", "enter password for repository: "))
 	if err != nil {
diff --git a/node.go b/node.go
index 023ce4463..b11ecf60b 100644
--- a/node.go
+++ b/node.go
@@ -103,7 +103,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string {
 }
 
 // CreateAt creates the node at the given path and restores all the meta data.
-func (node *Node) CreateAt(path string, s *repo.Server) error {
+func (node *Node) CreateAt(path string, s *repo.Repository) error {
 	switch node.Type {
 	case "dir":
 		if err := node.createDirAt(path); err != nil {
@@ -176,7 +176,7 @@ func (node Node) createDirAt(path string) error {
 	return nil
 }
 
-func (node Node) createFileAt(path string, s *repo.Server) error {
+func (node Node) createFileAt(path string, s *repo.Repository) error {
 	f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600)
 	defer f.Close()
 
diff --git a/repo/key.go b/repo/key.go
index 7ea31407b..c60909f21 100644
--- a/repo/key.go
+++ b/repo/key.go
@@ -49,12 +49,12 @@ type Key struct {
 
 // createMasterKey creates a new master key in the given backend and encrypts
 // it with the password.
-func createMasterKey(s *Server, password string) (*Key, error) {
+func createMasterKey(s *Repository, password string) (*Key, error) {
 	return AddKey(s, password, nil)
 }
 
 // OpenKey tries do decrypt the key specified by name with the given password.
-func OpenKey(s *Server, name string, password string) (*Key, error) {
+func OpenKey(s *Repository, name string, password string) (*Key, error) {
 	k, err := LoadKey(s, name)
 	if err != nil {
 		return nil, err
@@ -94,7 +94,7 @@ func OpenKey(s *Server, name string, password string) (*Key, error) {
 
 // SearchKey tries to decrypt all keys in the backend with the given password.
 // If none could be found, ErrNoKeyFound is returned.
-func SearchKey(s *Server, password string) (*Key, error) {
+func SearchKey(s *Repository, password string) (*Key, error) {
 	// try all keys in repo
 	done := make(chan struct{})
 	defer close(done)
@@ -111,7 +111,7 @@ func SearchKey(s *Server, password string) (*Key, error) {
 }
 
 // LoadKey loads a key from the backend.
-func LoadKey(s *Server, name string) (*Key, error) {
+func LoadKey(s *Repository, name string) (*Key, error) {
 	// extract data from repo
 	rd, err := s.be.Get(backend.Key, name)
 	if err != nil {
@@ -131,7 +131,7 @@ func LoadKey(s *Server, name string) (*Key, error) {
 }
 
 // AddKey adds a new key to an already existing repository.
-func AddKey(s *Server, password string, template *crypto.Key) (*Key, error) {
+func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error) {
 	// fill meta data about key
 	newkey := &Key{
 		Created: time.Now(),
diff --git a/repo/server.go b/repo/server.go
index 9bafa01b7..9f4d91622 100644
--- a/repo/server.go
+++ b/repo/server.go
@@ -26,8 +26,8 @@ type Config struct {
 	ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"`
 }
 
-// Server is used to access a repository in a backend.
-type Server struct {
+// Repository is used to access a repository in a backend.
+type Repository struct {
 	be      backend.Backend
 	Config  Config
 	key     *crypto.Key
@@ -38,8 +38,8 @@ type Server struct {
 	packs []*pack.Packer
 }
 
-func NewServer(be backend.Backend) *Server {
-	return &Server{
+func New(be backend.Backend) *Repository {
+	return &Repository{
 		be:  be,
 		idx: NewIndex(),
 	}
@@ -48,31 +48,31 @@ func NewServer(be backend.Backend) *Server {
 // Find loads the list of all blobs of type t and searches for names which start
 // with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
 // more than one is found, nil and ErrMultipleIDMatches is returned.
-func (s *Server) Find(t backend.Type, prefix string) (string, error) {
+func (s *Repository) Find(t backend.Type, prefix string) (string, error) {
 	return backend.Find(s.be, t, prefix)
 }
 
 // FindSnapshot takes a string and tries to find a snapshot whose ID matches
 // the string as closely as possible.
-func (s *Server) FindSnapshot(name string) (string, error) {
+func (s *Repository) FindSnapshot(name string) (string, error) {
 	return backend.FindSnapshot(s.be, name)
 }
 
 // PrefixLength returns the number of bytes required so that all prefixes of
 // all IDs of type t are unique.
-func (s *Server) PrefixLength(t backend.Type) (int, error) {
+func (s *Repository) PrefixLength(t backend.Type) (int, error) {
 	return backend.PrefixLength(s.be, t)
 }
 
 // Load tries to load and decrypt content identified by t and id from the
 // backend.
-func (s *Server) Load(t backend.Type, id backend.ID) ([]byte, error) {
-	debug.Log("Server.Load", "load %v with id %v", t, id.Str())
+func (s *Repository) Load(t backend.Type, id backend.ID) ([]byte, error) {
+	debug.Log("Repository.Load", "load %v with id %v", t, id.Str())
 
 	// load blob from pack
 	rd, err := s.be.Get(t, id.String())
 	if err != nil {
-		debug.Log("Server.Load", "error loading %v: %v", id.Str(), err)
+		debug.Log("Repository.Load", "error loading %v: %v", id.Str(), err)
 		return nil, err
 	}
 
@@ -102,26 +102,26 @@ func (s *Server) Load(t backend.Type, id backend.ID) ([]byte, error) {
 
 // LoadBlob tries to load and decrypt content identified by t and id from a
 // pack from the backend.
-func (s *Server) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
-	debug.Log("Server.LoadBlob", "load %v with id %v", t, id.Str())
+func (s *Repository) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
+	debug.Log("Repository.LoadBlob", "load %v with id %v", t, id.Str())
 	// lookup pack
 	packID, tpe, offset, length, err := s.idx.Lookup(id)
 	if err != nil {
-		debug.Log("Server.LoadBlob", "id %v not found in index: %v", id.Str(), err)
+		debug.Log("Repository.LoadBlob", "id %v not found in index: %v", id.Str(), err)
 		return nil, err
 	}
 
 	if tpe != t {
-		debug.Log("Server.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, tpe)
+		debug.Log("Repository.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, tpe)
 		return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", tpe, t)
 	}
 
-	debug.Log("Server.LoadBlob", "id %v found in pack %v at offset %v (length %d)", id.Str(), packID.Str(), offset, length)
+	debug.Log("Repository.LoadBlob", "id %v found in pack %v at offset %v (length %d)", id.Str(), packID.Str(), offset, length)
 
 	// load blob from pack
 	rd, err := s.be.GetReader(backend.Data, packID.String(), offset, length)
 	if err != nil {
-		debug.Log("Server.LoadBlob", "error loading pack %v for %v: %v", packID.Str(), id.Str(), err)
+		debug.Log("Repository.LoadBlob", "error loading pack %v for %v: %v", packID.Str(), id.Str(), err)
 		return nil, err
 	}
 
@@ -151,7 +151,7 @@ func (s *Server) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
 
 // LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
 // the item.
-func (s *Server) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) error {
+func (s *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) error {
 	// load blob from backend
 	rd, err := s.be.Get(t, id.String())
 	if err != nil {
@@ -178,7 +178,7 @@ func (s *Server) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{
 
 // LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the
 // data and afterwards call json.Unmarshal on the item.
-func (s *Server) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) error {
+func (s *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) error {
 	// lookup pack
 	packID, _, offset, length, err := s.idx.Lookup(id)
 	if err != nil {
@@ -215,16 +215,16 @@ const maxPackers = 200
 
 // findPacker returns a packer for a new blob of size bytes. Either a new one is
 // created or one is returned that already has some blobs.
-func (s *Server) findPacker(size uint) (*pack.Packer, error) {
+func (s *Repository) findPacker(size uint) (*pack.Packer, error) {
 	s.pm.Lock()
 	defer s.pm.Unlock()
 
 	// search for a suitable packer
 	if len(s.packs) > 0 {
-		debug.Log("Server.findPacker", "searching packer for %d bytes\n", size)
+		debug.Log("Repository.findPacker", "searching packer for %d bytes\n", size)
 		for i, p := range s.packs {
 			if p.Size()+size < maxPackSize {
-				debug.Log("Server.findPacker", "found packer %v", p)
+				debug.Log("Repository.findPacker", "found packer %v", p)
 				// remove from list
 				s.packs = append(s.packs[:i], s.packs[i+1:]...)
 				return p, nil
@@ -237,22 +237,22 @@ func (s *Server) findPacker(size uint) (*pack.Packer, error) {
 	if err != nil {
 		return nil, err
 	}
-	debug.Log("Server.findPacker", "create new pack %p", blob)
+	debug.Log("Repository.findPacker", "create new pack %p", blob)
 	return pack.NewPacker(s.key, blob), nil
 }
 
 // insertPacker appends p to s.packs.
-func (s *Server) insertPacker(p *pack.Packer) {
+func (s *Repository) insertPacker(p *pack.Packer) {
 	s.pm.Lock()
 	defer s.pm.Unlock()
 
 	s.packs = append(s.packs, p)
-	debug.Log("Server.insertPacker", "%d packers\n", len(s.packs))
+	debug.Log("Repository.insertPacker", "%d packers\n", len(s.packs))
 }
 
 // savePacker stores p in the backend.
-func (s *Server) savePacker(p *pack.Packer) error {
-	debug.Log("Server.savePacker", "save packer with %d blobs\n", p.Count())
+func (s *Repository) savePacker(p *pack.Packer) error {
+	debug.Log("Repository.savePacker", "save packer with %d blobs\n", p.Count())
 	_, err := p.Finalize()
 	if err != nil {
 		return err
@@ -262,15 +262,15 @@ func (s *Server) savePacker(p *pack.Packer) error {
 	sid := p.ID()
 	err = p.Writer().(backend.Blob).Finalize(backend.Data, sid.String())
 	if err != nil {
-		debug.Log("Server.savePacker", "blob Finalize() error: %v", err)
+		debug.Log("Repository.savePacker", "blob Finalize() error: %v", err)
 		return err
 	}
 
-	debug.Log("Server.savePacker", "saved as %v", sid.Str())
+	debug.Log("Repository.savePacker", "saved as %v", sid.Str())
 
 	// update blobs in the index
 	for _, b := range p.Blobs() {
-		debug.Log("Server.savePacker", "  updating blob %v to pack %v", b.ID.Str(), sid.Str())
+		debug.Log("Repository.savePacker", "  updating blob %v to pack %v", b.ID.Str(), sid.Str())
 		s.idx.Store(b.Type, b.ID, sid, b.Offset, uint(b.Length))
 	}
 
@@ -278,7 +278,7 @@ func (s *Server) savePacker(p *pack.Packer) error {
 }
 
 // countPacker returns the number of open (unfinished) packers.
-func (s *Server) countPacker() int {
+func (s *Repository) countPacker() int {
 	s.pm.Lock()
 	defer s.pm.Unlock()
 
@@ -287,13 +287,13 @@ func (s *Server) countPacker() int {
 
 // Save encrypts data and stores it to the backend as type t. If data is small
 // enough, it will be packed together with other small blobs.
-func (s *Server) Save(t pack.BlobType, data []byte, id backend.ID) (backend.ID, error) {
+func (s *Repository) Save(t pack.BlobType, data []byte, id backend.ID) (backend.ID, error) {
 	if id == nil {
 		// compute plaintext hash
 		id = backend.Hash(data)
 	}
 
-	debug.Log("Server.Save", "save id %v (%v, %d bytes)", id.Str(), t, len(data))
+	debug.Log("Repository.Save", "save id %v (%v, %d bytes)", id.Str(), t, len(data))
 
 	// get buf from the pool
 	ciphertext := getBuf()
@@ -317,12 +317,12 @@ func (s *Server) Save(t pack.BlobType, data []byte, id backend.ID) (backend.ID,
 	// add this id to the index, although we don't know yet in which pack it
 	// will be saved, the entry will be updated when the pack is written.
 	s.idx.Store(t, id, nil, 0, 0)
-	debug.Log("Server.Save", "saving stub for %v (%v) in index", id.Str, t)
+	debug.Log("Repository.Save", "saving stub for %v (%v) in index", id.Str, t)
 
 	// if the pack is not full enough and there are less than maxPackers
 	// packers, put back to the list
 	if packer.Size() < minPackSize && s.countPacker() < maxPackers {
-		debug.Log("Server.Save", "pack is not full enough (%d bytes)", packer.Size())
+		debug.Log("Repository.Save", "pack is not full enough (%d bytes)", packer.Size())
 		s.insertPacker(packer)
 		return id, nil
 	}
@@ -332,8 +332,8 @@ func (s *Server) Save(t pack.BlobType, data []byte, id backend.ID) (backend.ID,
 }
 
 // SaveFrom encrypts data read from rd and stores it in a pack in the backend as type t.
-func (s *Server) SaveFrom(t pack.BlobType, id backend.ID, length uint, rd io.Reader) error {
-	debug.Log("Server.SaveFrom", "save id %v (%v, %d bytes)", id.Str(), t, length)
+func (s *Repository) SaveFrom(t pack.BlobType, id backend.ID, length uint, rd io.Reader) error {
+	debug.Log("Repository.SaveFrom", "save id %v (%v, %d bytes)", id.Str(), t, length)
 	if id == nil {
 		return errors.New("id is nil")
 	}
@@ -353,8 +353,8 @@ func (s *Server) SaveFrom(t pack.BlobType, id backend.ID, length uint, rd io.Rea
 
 // SaveJSON serialises item as JSON and encrypts and saves it in a pack in the
 // backend as type t.
-func (s *Server) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error) {
-	debug.Log("Server.SaveJSON", "save %v blob", t)
+func (s *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error) {
+	debug.Log("Repository.SaveJSON", "save %v blob", t)
 	buf := getBuf()[:0]
 	defer freeBuf(buf)
 
@@ -372,13 +372,13 @@ func (s *Server) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error)
 
 // SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
 // backend as type t, without a pack. It returns the storage hash.
-func (s *Server) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) {
+func (s *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) {
 	// create file
 	blob, err := s.be.Create()
 	if err != nil {
 		return nil, err
 	}
-	debug.Log("Server.SaveJSONUnpacked", "create new file %p", blob)
+	debug.Log("Repository.SaveJSONUnpacked", "create new file %p", blob)
 
 	// hash
 	hw := backend.NewHashingWriter(blob, sha256.New())
@@ -409,11 +409,11 @@ func (s *Server) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID,
 }
 
 // Flush saves all remaining packs.
-func (s *Server) Flush() error {
+func (s *Repository) Flush() error {
 	s.pm.Lock()
 	defer s.pm.Unlock()
 
-	debug.Log("Server.Flush", "manually flushing %d packs", len(s.packs))
+	debug.Log("Repository.Flush", "manually flushing %d packs", len(s.packs))
 
 	for _, p := range s.packs {
 		err := s.savePacker(p)
@@ -426,23 +426,23 @@ func (s *Server) Flush() error {
 	return nil
 }
 
-func (s *Server) Backend() backend.Backend {
+func (s *Repository) Backend() backend.Backend {
 	return s.be
 }
 
-func (s *Server) Index() *Index {
+func (s *Repository) Index() *Index {
 	return s.idx
 }
 
-// SetIndex instructs the server to use the given index.
-func (s *Server) SetIndex(i *Index) {
+// SetIndex instructs the repository to use the given index.
+func (s *Repository) SetIndex(i *Index) {
 	s.idx = i
 }
 
 // SaveIndex saves all new packs in the index in the backend, returned is the
 // storage ID.
-func (s *Server) SaveIndex() (backend.ID, error) {
-	debug.Log("Server.SaveIndex", "Saving index")
+func (s *Repository) SaveIndex() (backend.ID, error) {
+	debug.Log("Repository.SaveIndex", "Saving index")
 
 	// create blob
 	blob, err := s.be.Create()
@@ -450,7 +450,7 @@ func (s *Server) SaveIndex() (backend.ID, error) {
 		return nil, err
 	}
 
-	debug.Log("Server.SaveIndex", "create new pack %p", blob)
+	debug.Log("Repository.SaveIndex", "create new pack %p", blob)
 
 	// hash
 	hw := backend.NewHashingWriter(blob, sha256.New())
@@ -476,15 +476,15 @@ func (s *Server) SaveIndex() (backend.ID, error) {
 		return nil, err
 	}
 
-	debug.Log("Server.SaveIndex", "Saved index as %v", sid.Str())
+	debug.Log("Repository.SaveIndex", "Saved index as %v", sid.Str())
 
 	return sid, nil
 }
 
 // LoadIndex loads all index files from the backend and merges them with the
 // current index.
-func (s *Server) LoadIndex() error {
-	debug.Log("Server.LoadIndex", "Loading index")
+func (s *Repository) LoadIndex() error {
+	debug.Log("Repository.LoadIndex", "Loading index")
 	done := make(chan struct{})
 	defer close(done)
 
@@ -498,8 +498,8 @@ func (s *Server) LoadIndex() error {
 }
 
 // loadIndex loads the index id and merges it with the currently used index.
-func (s *Server) loadIndex(id string) error {
-	debug.Log("Server.loadIndex", "Loading index %v", id[:8])
+func (s *Repository) loadIndex(id string) error {
+	debug.Log("Repository.loadIndex", "Loading index %v", id[:8])
 	before := len(s.idx.pack)
 
 	rd, err := s.be.Get(backend.Index, id)
@@ -517,14 +517,14 @@ func (s *Server) loadIndex(id string) error {
 
 	idx, err := DecodeIndex(decryptRd)
 	if err != nil {
-		debug.Log("Server.loadIndex", "error while decoding index %v: %v", id, err)
+		debug.Log("Repository.loadIndex", "error while decoding index %v: %v", id, err)
 		return err
 	}
 
 	s.idx.Merge(idx)
 
 	after := len(s.idx.pack)
-	debug.Log("Server.loadIndex", "Loaded index %v, added %v blobs", id[:8], after-before)
+	debug.Log("Repository.loadIndex", "Loaded index %v, added %v blobs", id[:8], after-before)
 
 	return nil
 }
@@ -532,7 +532,7 @@ func (s *Server) loadIndex(id string) error {
 const repositoryIDSize = sha256.Size
 const RepositoryVersion = 1
 
-func createConfig(s *Server) (err error) {
+func createConfig(s *Repository) (err error) {
 	s.Config.ChunkerPolynomial, err = chunker.RandomPolynomial()
 	if err != nil {
 		return err
@@ -547,13 +547,13 @@ func createConfig(s *Server) (err error) {
 	s.Config.ID = hex.EncodeToString(newID)
 	s.Config.Version = RepositoryVersion
 
-	debug.Log("Server.createConfig", "New config: %#v", s.Config)
+	debug.Log("Repository.createConfig", "New config: %#v", s.Config)
 
 	_, err = s.SaveJSONUnpacked(backend.Config, s.Config)
 	return err
 }
 
-func (s *Server) loadConfig(cfg *Config) error {
+func (s *Repository) loadConfig(cfg *Config) error {
 	err := s.LoadJSONUnpacked(backend.Config, nil, cfg)
 	if err != nil {
 		return err
@@ -572,7 +572,7 @@ func (s *Server) loadConfig(cfg *Config) error {
 
 // SearchKey finds a key with the supplied password, afterwards the config is
 // read and parsed.
-func (s *Server) SearchKey(password string) error {
+func (s *Repository) SearchKey(password string) error {
 	key, err := SearchKey(s, password)
 	if err != nil {
 		return err
@@ -585,7 +585,7 @@ func (s *Server) SearchKey(password string) error {
 
 // Init creates a new master key with the supplied password and initializes the
 // repository config.
-func (s *Server) Init(password string) error {
+func (s *Repository) Init(password string) error {
 	has, err := s.Test(backend.Config, "")
 	if err != nil {
 		return err
@@ -604,32 +604,32 @@ func (s *Server) Init(password string) error {
 	return createConfig(s)
 }
 
-func (s *Server) Decrypt(ciphertext []byte) ([]byte, error) {
+func (s *Repository) Decrypt(ciphertext []byte) ([]byte, error) {
 	if s.key == nil {
-		return nil, errors.New("key for server not set")
+		return nil, errors.New("key for repository not set")
 	}
 
 	return crypto.Decrypt(s.key, nil, ciphertext)
 }
 
-func (s *Server) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
+func (s *Repository) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
 	if s.key == nil {
-		return nil, errors.New("key for server not set")
+		return nil, errors.New("key for repository not set")
 	}
 
 	return crypto.Encrypt(s.key, ciphertext, plaintext)
 }
 
-func (s *Server) Key() *crypto.Key {
+func (s *Repository) Key() *crypto.Key {
 	return s.key
 }
 
-func (s *Server) KeyName() string {
+func (s *Repository) KeyName() string {
 	return s.keyName
 }
 
 // Count returns the number of blobs of a given type in the backend.
-func (s *Server) Count(t backend.Type) (n uint) {
+func (s *Repository) Count(t backend.Type) (n uint) {
 	for _ = range s.be.List(t, nil) {
 		n++
 	}
@@ -639,27 +639,27 @@ func (s *Server) Count(t backend.Type) (n uint) {
 
 // Proxy methods to backend
 
-func (s *Server) Get(t backend.Type, name string) (io.ReadCloser, error) {
+func (s *Repository) Get(t backend.Type, name string) (io.ReadCloser, error) {
 	return s.be.Get(t, name)
 }
 
-func (s *Server) List(t backend.Type, done <-chan struct{}) <-chan string {
+func (s *Repository) List(t backend.Type, done <-chan struct{}) <-chan string {
 	return s.be.List(t, done)
 }
 
-func (s *Server) Test(t backend.Type, name string) (bool, error) {
+func (s *Repository) Test(t backend.Type, name string) (bool, error) {
 	return s.be.Test(t, name)
 }
 
-func (s *Server) Remove(t backend.Type, name string) error {
+func (s *Repository) Remove(t backend.Type, name string) error {
 	return s.be.Remove(t, name)
 }
 
-func (s *Server) Close() error {
+func (s *Repository) Close() error {
 	return s.be.Close()
 }
 
-func (s *Server) Delete() error {
+func (s *Repository) Delete() error {
 	if b, ok := s.be.(backend.Deleter); ok {
 		return b.Delete()
 	}
@@ -667,6 +667,6 @@ func (s *Server) Delete() error {
 	return errors.New("Delete() called for backend that does not implement this method")
 }
 
-func (s *Server) Location() string {
+func (s *Repository) Location() string {
 	return s.be.Location()
 }
diff --git a/restorer.go b/restorer.go
index 26f605611..b11cd8916 100644
--- a/restorer.go
+++ b/restorer.go
@@ -14,7 +14,7 @@ import (
 
 // Restorer is used to restore a snapshot to a directory.
 type Restorer struct {
-	s  *repo.Server
+	s  *repo.Repository
 	sn *Snapshot
 
 	Error  func(dir string, node *Node, err error) error
@@ -24,7 +24,7 @@ type Restorer struct {
 var restorerAbortOnAllErrors = func(str string, node *Node, err error) error { return err }
 
 // NewRestorer creates a restorer preloaded with the content from the snapshot id.
-func NewRestorer(s *repo.Server, id backend.ID) (*Restorer, error) {
+func NewRestorer(s *repo.Repository, id backend.ID) (*Restorer, error) {
 	r := &Restorer{s: s, Error: restorerAbortOnAllErrors}
 
 	var err error
diff --git a/snapshot.go b/snapshot.go
index f32425f1f..68beafa25 100644
--- a/snapshot.go
+++ b/snapshot.go
@@ -50,7 +50,7 @@ func NewSnapshot(paths []string) (*Snapshot, error) {
 	return sn, nil
 }
 
-func LoadSnapshot(s *repo.Server, id backend.ID) (*Snapshot, error) {
+func LoadSnapshot(s *repo.Repository, id backend.ID) (*Snapshot, error) {
 	sn := &Snapshot{id: id}
 	err := s.LoadJSONUnpacked(backend.Snapshot, id, sn)
 	if err != nil {
diff --git a/test/backend.go b/test/backend.go
index 31b824155..a9193b1ff 100644
--- a/test/backend.go
+++ b/test/backend.go
@@ -17,7 +17,7 @@ var TestPassword = flag.String("test.password", "", `use this password for repos
 var TestCleanup = flag.Bool("test.cleanup", true, "clean up after running tests (remove local backend directory with all content)")
 var TestTempDir = flag.String("test.tempdir", "", "use this directory for temporary storage (default: system temp dir)")
 
-func SetupBackend(t testing.TB) *repo.Server {
+func SetupBackend(t testing.TB) *repo.Repository {
 	tempdir, err := ioutil.TempDir(*TestTempDir, "restic-test-")
 	OK(t, err)
 
@@ -29,12 +29,12 @@ func SetupBackend(t testing.TB) *repo.Server {
 	err = os.Setenv("RESTIC_CACHE", filepath.Join(tempdir, "cache"))
 	OK(t, err)
 
-	s := repo.NewServer(b)
+	s := repo.New(b)
 	OK(t, s.Init(*TestPassword))
 	return s
 }
 
-func TeardownBackend(t testing.TB, s *repo.Server) {
+func TeardownBackend(t testing.TB, s *repo.Repository) {
 	if !*TestCleanup {
 		l := s.Backend().(*local.Local)
 		t.Logf("leaving local backend at %s\n", l.Location())
@@ -44,7 +44,7 @@ func TeardownBackend(t testing.TB, s *repo.Server) {
 	OK(t, s.Delete())
 }
 
-func SnapshotDir(t testing.TB, server *repo.Server, path string, parent backend.ID) *restic.Snapshot {
+func SnapshotDir(t testing.TB, server *repo.Repository, path string, parent backend.ID) *restic.Snapshot {
 	arch := restic.NewArchiver(server)
 	sn, _, err := arch.Snapshot(nil, []string{path}, parent)
 	OK(t, err)
diff --git a/tree.go b/tree.go
index d43a119e7..c64e521f3 100644
--- a/tree.go
+++ b/tree.go
@@ -30,7 +30,7 @@ func (t Tree) String() string {
 	return fmt.Sprintf("Tree<%d nodes>", len(t.Nodes))
 }
 
-func LoadTree(s *repo.Server, id backend.ID) (*Tree, error) {
+func LoadTree(s *repo.Repository, id backend.ID) (*Tree, error) {
 	tree := &Tree{}
 	err := s.LoadJSONPack(pack.Tree, id, tree)
 	if err != nil {
diff --git a/walk.go b/walk.go
index a0f1566da..79a4cf176 100644
--- a/walk.go
+++ b/walk.go
@@ -16,7 +16,7 @@ type WalkTreeJob struct {
 	Tree *Tree
 }
 
-func walkTree(s *repo.Server, path string, treeID backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
+func walkTree(s *repo.Repository, path string, treeID backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
 	debug.Log("walkTree", "start on %q (%v)", path, treeID.Str())
 
 	t, err := LoadTree(s, treeID)
@@ -41,7 +41,7 @@ func walkTree(s *repo.Server, path string, treeID backend.ID, done chan struct{}
 // WalkTree walks the tree specified by id recursively and sends a job for each
 // file and directory it finds. When the channel done is closed, processing
 // stops.
-func WalkTree(server *repo.Server, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
+func WalkTree(server *repo.Repository, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
 	debug.Log("WalkTree", "start on %v", id.Str())
 	walkTree(server, "", id, done, jobCh)
 	close(jobCh)

From 7ec674f3e89fc2f4d80b8b9a38ff4fb52e1c5fd4 Mon Sep 17 00:00:00 2001
From: Alexander Neumann <alexander@bumpern.de>
Date: Sat, 9 May 2015 13:37:02 +0200
Subject: [PATCH 3/6] Move repo/server.go -> repo/repository.go

---
 repo/{server.go => repository.go}           | 0
 repo/{server_test.go => repository_test.go} | 0
 2 files changed, 0 insertions(+), 0 deletions(-)
 rename repo/{server.go => repository.go} (100%)
 rename repo/{server_test.go => repository_test.go} (100%)

diff --git a/repo/server.go b/repo/repository.go
similarity index 100%
rename from repo/server.go
rename to repo/repository.go
diff --git a/repo/server_test.go b/repo/repository_test.go
similarity index 100%
rename from repo/server_test.go
rename to repo/repository_test.go

From d9b5832034f5c8a86852f906fc45abe48a144a91 Mon Sep 17 00:00:00 2001
From: Alexander Neumann <alexander@bumpern.de>
Date: Sat, 9 May 2015 13:32:52 +0200
Subject: [PATCH 4/6] Rename variables

---
 archiver.go             | 28 ++++++++---------
 archiver_test.go        | 46 ++++++++++++++--------------
 cache.go                |  8 ++---
 cache_test.go           |  8 ++---
 cmd/restic/cmd_find.go  | 12 ++++----
 cmd/restic/cmd_fsck.go  | 22 +++++++-------
 cmd/restic/cmd_key.go   |  6 ++--
 cmd/restic/cmd_ls.go    |  6 ++--
 node.go                 |  8 ++---
 repo/repository_test.go | 66 ++++++++++++++++++++---------------------
 restorer.go             | 16 +++++-----
 snapshot.go             |  4 +--
 snapshot_test.go        |  4 +--
 test/backend.go         | 18 +++++------
 tree.go                 |  4 +--
 tree_test.go            | 10 +++----
 walk.go                 | 10 +++----
 walk_test.go            | 12 ++++----
 18 files changed, 144 insertions(+), 144 deletions(-)

diff --git a/archiver.go b/archiver.go
index a7c15fc60..103eedcb9 100644
--- a/archiver.go
+++ b/archiver.go
@@ -30,7 +30,7 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
 
 // Archiver is used to backup a set of directories.
 type Archiver struct {
-	s *repo.Repository
+	repo *repo.Repository
 
 	blobToken chan struct{}
 
@@ -39,9 +39,9 @@ type Archiver struct {
 }
 
 // NewArchiver returns a new archiver.
-func NewArchiver(s *repo.Repository) *Archiver {
+func NewArchiver(repo *repo.Repository) *Archiver {
 	arch := &Archiver{
-		s:         s,
+		repo:      repo,
 		blobToken: make(chan struct{}, maxConcurrentBlobs),
 	}
 
@@ -60,13 +60,13 @@ func (arch *Archiver) Save(t pack.BlobType, id backend.ID, length uint, rd io.Re
 	debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
 
 	// test if this blob is already known
-	if arch.s.Index().Has(id) {
+	if arch.repo.Index().Has(id) {
 		debug.Log("Archiver.Save", "(%v, %v) already saved\n", t, id.Str())
 		return nil
 	}
 
 	// otherwise save blob
-	err := arch.s.SaveFrom(t, id, length, rd)
+	err := arch.repo.SaveFrom(t, id, length, rd)
 	if err != nil {
 		debug.Log("Archiver.Save", "Save(%v, %v): error %v\n", t, id.Str(), err)
 		return err
@@ -86,11 +86,11 @@ func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) {
 
 	// check if tree has been saved before
 	id := backend.Hash(data)
-	if arch.s.Index().Has(id) {
+	if arch.repo.Index().Has(id) {
 		return id, nil
 	}
 
-	return arch.s.SaveJSON(pack.Tree, item)
+	return arch.repo.SaveJSON(pack.Tree, item)
 }
 
 func (arch *Archiver) reloadFileIfChanged(node *Node, file *os.File) (*Node, error) {
@@ -184,7 +184,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
 		return err
 	}
 
-	chnker := chunker.New(file, arch.s.Config.ChunkerPolynomial, sha256.New())
+	chnker := chunker.New(file, arch.repo.Config.ChunkerPolynomial, sha256.New())
 	resultChannels := [](<-chan saveResult){}
 
 	for {
@@ -254,7 +254,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
 				// check if all content is still available in the repository
 				contentMissing := false
 				for _, blob := range oldNode.blobs {
-					if ok, err := arch.s.Test(backend.Data, blob.Storage.String()); !ok || err != nil {
+					if ok, err := arch.repo.Test(backend.Data, blob.Storage.String()); !ok || err != nil {
 						debug.Log("Archiver.fileWorker", "   %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str())
 						contentMissing = true
 						break
@@ -557,14 +557,14 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID backend.ID)
 		sn.Parent = parentID
 
 		// load parent snapshot
-		parent, err := LoadSnapshot(arch.s, parentID)
+		parent, err := LoadSnapshot(arch.repo, parentID)
 		if err != nil {
 			return nil, nil, err
 		}
 
 		// start walker on old tree
 		ch := make(chan WalkTreeJob)
-		go WalkTree(arch.s, parent.Tree, done, ch)
+		go WalkTree(arch.repo, parent.Tree, done, ch)
 		jobs.Old = ch
 	} else {
 		// use closed channel
@@ -622,7 +622,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID backend.ID)
 	sn.Tree = root.Subtree
 
 	// save snapshot
-	id, err := arch.s.SaveJSONUnpacked(backend.Snapshot, sn)
+	id, err := arch.repo.SaveJSONUnpacked(backend.Snapshot, sn)
 	if err != nil {
 		return nil, nil, err
 	}
@@ -632,13 +632,13 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID backend.ID)
 	debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str())
 
 	// flush server
-	err = arch.s.Flush()
+	err = arch.repo.Flush()
 	if err != nil {
 		return nil, nil, err
 	}
 
 	// save index
-	indexID, err := arch.s.SaveIndex()
+	indexID, err := arch.repo.SaveIndex()
 	if err != nil {
 		debug.Log("Archiver.Snapshot", "error saving index: %v", err)
 		return nil, nil, err
diff --git a/archiver_test.go b/archiver_test.go
index f42bdd71a..9aa9bffb4 100644
--- a/archiver_test.go
+++ b/archiver_test.go
@@ -51,8 +51,8 @@ func BenchmarkChunkEncrypt(b *testing.B) {
 	data := Random(23, 10<<20) // 10MiB
 	rd := bytes.NewReader(data)
 
-	s := SetupBackend(b)
-	defer TeardownBackend(b, s)
+	s := SetupRepo(b)
+	defer TeardownRepo(b, s)
 
 	buf := make([]byte, chunker.MaxSize)
 	buf2 := make([]byte, chunker.MaxSize)
@@ -82,8 +82,8 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key)
 }
 
 func BenchmarkChunkEncryptParallel(b *testing.B) {
-	s := SetupBackend(b)
-	defer TeardownBackend(b, s)
+	s := SetupRepo(b)
+	defer TeardownRepo(b, s)
 
 	data := Random(23, 10<<20) // 10MiB
 
@@ -101,10 +101,10 @@ func BenchmarkChunkEncryptParallel(b *testing.B) {
 }
 
 func archiveDirectory(b testing.TB) {
-	server := SetupBackend(b)
-	defer TeardownBackend(b, server)
+	repo := SetupRepo(b)
+	defer TeardownRepo(b, repo)
 
-	arch := restic.NewArchiver(server)
+	arch := restic.NewArchiver(repo)
 
 	_, id, err := arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil)
 	OK(b, err)
@@ -135,8 +135,8 @@ func archiveWithDedup(t testing.TB) {
 		t.Skip("benchdir not set, skipping TestArchiverDedup")
 	}
 
-	server := SetupBackend(t)
-	defer TeardownBackend(t, server)
+	repo := SetupRepo(t)
+	defer TeardownRepo(t, repo)
 
 	var cnt struct {
 		before, after, after2 struct {
@@ -145,24 +145,24 @@ func archiveWithDedup(t testing.TB) {
 	}
 
 	// archive a few files
-	sn := SnapshotDir(t, server, *benchArchiveDirectory, nil)
+	sn := SnapshotDir(t, repo, *benchArchiveDirectory, nil)
 	t.Logf("archived snapshot %v", sn.ID().Str())
 
 	// get archive stats
-	cnt.before.packs = server.Count(backend.Data)
-	cnt.before.dataBlobs = server.Index().Count(pack.Data)
-	cnt.before.treeBlobs = server.Index().Count(pack.Tree)
+	cnt.before.packs = repo.Count(backend.Data)
+	cnt.before.dataBlobs = repo.Index().Count(pack.Data)
+	cnt.before.treeBlobs = repo.Index().Count(pack.Tree)
 	t.Logf("packs %v, data blobs %v, tree blobs %v",
 		cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs)
 
 	// archive the same files again, without parent snapshot
-	sn2 := SnapshotDir(t, server, *benchArchiveDirectory, nil)
+	sn2 := SnapshotDir(t, repo, *benchArchiveDirectory, nil)
 	t.Logf("archived snapshot %v", sn2.ID().Str())
 
 	// get archive stats again
-	cnt.after.packs = server.Count(backend.Data)
-	cnt.after.dataBlobs = server.Index().Count(pack.Data)
-	cnt.after.treeBlobs = server.Index().Count(pack.Tree)
+	cnt.after.packs = repo.Count(backend.Data)
+	cnt.after.dataBlobs = repo.Index().Count(pack.Data)
+	cnt.after.treeBlobs = repo.Index().Count(pack.Tree)
 	t.Logf("packs %v, data blobs %v, tree blobs %v",
 		cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs)
 
@@ -173,13 +173,13 @@ func archiveWithDedup(t testing.TB) {
 	}
 
 	// archive the same files again, with a parent snapshot
-	sn3 := SnapshotDir(t, server, *benchArchiveDirectory, sn2.ID())
+	sn3 := SnapshotDir(t, repo, *benchArchiveDirectory, sn2.ID())
 	t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
 
 	// get archive stats again
-	cnt.after2.packs = server.Count(backend.Data)
-	cnt.after2.dataBlobs = server.Index().Count(pack.Data)
-	cnt.after2.treeBlobs = server.Index().Count(pack.Tree)
+	cnt.after2.packs = repo.Count(backend.Data)
+	cnt.after2.dataBlobs = repo.Index().Count(pack.Data)
+	cnt.after2.treeBlobs = repo.Index().Count(pack.Tree)
 	t.Logf("packs %v, data blobs %v, tree blobs %v",
 		cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs)
 
@@ -199,8 +199,8 @@ func BenchmarkLoadTree(t *testing.B) {
 		t.Skip("benchdir not set, skipping TestArchiverDedup")
 	}
 
-	s := SetupBackend(t)
-	defer TeardownBackend(t, s)
+	s := SetupRepo(t)
+	defer TeardownRepo(t, s)
 
 	// archive a few files
 	arch := restic.NewArchiver(s)
diff --git a/cache.go b/cache.go
index 902aae2ea..c1d3101dc 100644
--- a/cache.go
+++ b/cache.go
@@ -18,13 +18,13 @@ type Cache struct {
 	base string
 }
 
-func NewCache(s *repo.Repository) (*Cache, error) {
+func NewCache(repo *repo.Repository) (*Cache, error) {
 	cacheDir, err := getCacheDir()
 	if err != nil {
 		return nil, err
 	}
 
-	basedir := filepath.Join(cacheDir, s.Config.ID)
+	basedir := filepath.Join(cacheDir, repo.Config.ID)
 	debug.Log("Cache.New", "opened cache at %v", basedir)
 
 	return &Cache{base: basedir}, nil
@@ -106,7 +106,7 @@ func (c *Cache) purge(t backend.Type, subtype string, id backend.ID) error {
 }
 
 // Clear removes information from the cache that isn't present in the server any more.
-func (c *Cache) Clear(s *repo.Repository) error {
+func (c *Cache) Clear(repo *repo.Repository) error {
 	list, err := c.list(backend.Snapshot)
 	if err != nil {
 		return err
@@ -115,7 +115,7 @@ func (c *Cache) Clear(s *repo.Repository) error {
 	for _, entry := range list {
 		debug.Log("Cache.Clear", "found entry %v", entry)
 
-		if ok, err := s.Test(backend.Snapshot, entry.ID.String()); !ok || err != nil {
+		if ok, err := repo.Test(backend.Snapshot, entry.ID.String()); !ok || err != nil {
 			debug.Log("Cache.Clear", "snapshot %v doesn't exist any more, removing %v", entry.ID, entry)
 
 			err = c.purge(backend.Snapshot, entry.Subtype, entry.ID)
diff --git a/cache_test.go b/cache_test.go
index 8540e1839..47f5d9ad9 100644
--- a/cache_test.go
+++ b/cache_test.go
@@ -8,13 +8,13 @@ import (
 )
 
 func TestCache(t *testing.T) {
-	server := SetupBackend(t)
-	defer TeardownBackend(t, server)
+	repo := SetupRepo(t)
+	defer TeardownRepo(t, repo)
 
-	_, err := restic.NewCache(server)
+	_, err := restic.NewCache(repo)
 	OK(t, err)
 
-	arch := restic.NewArchiver(server)
+	arch := restic.NewArchiver(repo)
 
 	// archive some files, this should automatically cache all blobs from the snapshot
 	_, _, err = arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil)
diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go
index 9ffae8bdf..535280e00 100644
--- a/cmd/restic/cmd_find.go
+++ b/cmd/restic/cmd_find.go
@@ -59,9 +59,9 @@ func parseTime(str string) (time.Time, error) {
 	return time.Time{}, fmt.Errorf("unable to parse time: %q", str)
 }
 
-func (c CmdFind) findInTree(s *repo.Repository, id backend.ID, path string) ([]findResult, error) {
+func (c CmdFind) findInTree(repo *repo.Repository, id backend.ID, path string) ([]findResult, error) {
 	debug.Log("restic.find", "checking tree %v\n", id)
-	tree, err := restic.LoadTree(s, id)
+	tree, err := restic.LoadTree(repo, id)
 	if err != nil {
 		return nil, err
 	}
@@ -93,7 +93,7 @@ func (c CmdFind) findInTree(s *repo.Repository, id backend.ID, path string) ([]f
 		}
 
 		if node.Type == "dir" {
-			subdirResults, err := c.findInTree(s, id, filepath.Join(path, node.Name))
+			subdirResults, err := c.findInTree(repo, id, filepath.Join(path, node.Name))
 			if err != nil {
 				return nil, err
 			}
@@ -105,7 +105,7 @@ func (c CmdFind) findInTree(s *repo.Repository, id backend.ID, path string) ([]f
 	return results, nil
 }
 
-func (c CmdFind) findInSnapshot(s *repo.Repository, name string) error {
+func (c CmdFind) findInSnapshot(repo *repo.Repository, name string) error {
 	debug.Log("restic.find", "searching in snapshot %s\n  for entries within [%s %s]", name, c.oldest, c.newest)
 
 	id, err := backend.ParseID(name)
@@ -113,12 +113,12 @@ func (c CmdFind) findInSnapshot(s *repo.Repository, name string) error {
 		return err
 	}
 
-	sn, err := restic.LoadSnapshot(s, id)
+	sn, err := restic.LoadSnapshot(repo, id)
 	if err != nil {
 		return err
 	}
 
-	results, err := c.findInTree(s, sn.Tree, "")
+	results, err := c.findInTree(repo, sn.Tree, "")
 	if err != nil {
 		return err
 	}
diff --git a/cmd/restic/cmd_fsck.go b/cmd/restic/cmd_fsck.go
index 5beedf190..e8b113c42 100644
--- a/cmd/restic/cmd_fsck.go
+++ b/cmd/restic/cmd_fsck.go
@@ -34,7 +34,7 @@ func init() {
 	}
 }
 
-func fsckFile(opts CmdFsck, s *repo.Repository, IDs []backend.ID) (uint64, error) {
+func fsckFile(opts CmdFsck, repo *repo.Repository, IDs []backend.ID) (uint64, error) {
 	debug.Log("restic.fsckFile", "checking file %v", IDs)
 	var bytes uint64
 
@@ -42,7 +42,7 @@ func fsckFile(opts CmdFsck, s *repo.Repository, IDs []backend.ID) (uint64, error
 		debug.Log("restic.fsck", "  checking data blob %v\n", id)
 
 		// test if blob is in the index
-		packID, tpe, _, length, err := s.Index().Lookup(id)
+		packID, tpe, _, length, err := repo.Index().Lookup(id)
 		if err != nil {
 			return 0, fmt.Errorf("storage for blob %v (%v) not found", id, tpe)
 		}
@@ -52,13 +52,13 @@ func fsckFile(opts CmdFsck, s *repo.Repository, IDs []backend.ID) (uint64, error
 
 		if opts.CheckData {
 			// load content
-			_, err := s.LoadBlob(pack.Data, id)
+			_, err := repo.LoadBlob(pack.Data, id)
 			if err != nil {
 				return 0, err
 			}
 		} else {
 			// test if data blob is there
-			ok, err := s.Test(backend.Data, packID.String())
+			ok, err := repo.Test(backend.Data, packID.String())
 			if err != nil {
 				return 0, err
 			}
@@ -77,10 +77,10 @@ func fsckFile(opts CmdFsck, s *repo.Repository, IDs []backend.ID) (uint64, error
 	return bytes, nil
 }
 
-func fsckTree(opts CmdFsck, s *repo.Repository, id backend.ID) error {
+func fsckTree(opts CmdFsck, repo *repo.Repository, id backend.ID) error {
 	debug.Log("restic.fsckTree", "checking tree %v", id.Str())
 
-	tree, err := restic.LoadTree(s, id)
+	tree, err := restic.LoadTree(repo, id)
 	if err != nil {
 		return err
 	}
@@ -122,7 +122,7 @@ func fsckTree(opts CmdFsck, s *repo.Repository, id backend.ID) error {
 			}
 
 			debug.Log("restic.fsckTree", "check file %v (%v)", node.Name, id.Str())
-			bytes, err := fsckFile(opts, s, node.Content)
+			bytes, err := fsckFile(opts, repo, node.Content)
 			if err != nil {
 				return err
 			}
@@ -139,7 +139,7 @@ func fsckTree(opts CmdFsck, s *repo.Repository, id backend.ID) error {
 			// record id
 			seenIDs.Insert(node.Subtree)
 
-			err = fsckTree(opts, s, node.Subtree)
+			err = fsckTree(opts, repo, node.Subtree)
 			if err != nil {
 				firstErr = err
 				fmt.Fprintf(os.Stderr, "%v\n", err)
@@ -157,15 +157,15 @@ func fsckTree(opts CmdFsck, s *repo.Repository, id backend.ID) error {
 	return firstErr
 }
 
-func fsckSnapshot(opts CmdFsck, s *repo.Repository, id backend.ID) error {
+func fsckSnapshot(opts CmdFsck, repo *repo.Repository, id backend.ID) error {
 	debug.Log("restic.fsck", "checking snapshot %v\n", id)
 
-	sn, err := restic.LoadSnapshot(s, id)
+	sn, err := restic.LoadSnapshot(repo, id)
 	if err != nil {
 		return fmt.Errorf("loading snapshot %v failed: %v", id, err)
 	}
 
-	err = fsckTree(opts, s, sn.Tree)
+	err = fsckTree(opts, repo, sn.Tree)
 	if err != nil {
 		debug.Log("restic.fsck", "  checking tree %v for snapshot %v\n", sn.Tree, id)
 		fmt.Fprintf(os.Stderr, "snapshot %v:\n  error for tree %v:\n    %v\n", id, sn.Tree, err)
diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go
index 3337829ac..7150e4e0d 100644
--- a/cmd/restic/cmd_key.go
+++ b/cmd/restic/cmd_key.go
@@ -74,12 +74,12 @@ func addKey(s *repo.Repository) error {
 	return nil
 }
 
-func deleteKey(s *repo.Repository, name string) error {
-	if name == s.KeyName() {
+func deleteKey(repo *repo.Repository, name string) error {
+	if name == repo.KeyName() {
 		return errors.New("refusing to remove key currently used to access repository")
 	}
 
-	err := s.Remove(backend.Key, name)
+	err := repo.Remove(backend.Key, name)
 	if err != nil {
 		return err
 	}
diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go
index 28c9d37ab..1148ca51e 100644
--- a/cmd/restic/cmd_ls.go
+++ b/cmd/restic/cmd_ls.go
@@ -38,8 +38,8 @@ func printNode(prefix string, n *restic.Node) string {
 	}
 }
 
-func printTree(prefix string, s *repo.Repository, id backend.ID) error {
-	tree, err := restic.LoadTree(s, id)
+func printTree(prefix string, repo *repo.Repository, id backend.ID) error {
+	tree, err := restic.LoadTree(repo, id)
 	if err != nil {
 		return err
 	}
@@ -48,7 +48,7 @@ func printTree(prefix string, s *repo.Repository, id backend.ID) error {
 		fmt.Println(printNode(prefix, entry))
 
 		if entry.Type == "dir" && entry.Subtree != nil {
-			err = printTree(filepath.Join(prefix, entry.Name), s, entry.Subtree)
+			err = printTree(filepath.Join(prefix, entry.Name), repo, entry.Subtree)
 			if err != nil {
 				return err
 			}
diff --git a/node.go b/node.go
index b11ecf60b..8413acfb5 100644
--- a/node.go
+++ b/node.go
@@ -103,14 +103,14 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string {
 }
 
 // CreateAt creates the node at the given path and restores all the meta data.
-func (node *Node) CreateAt(path string, s *repo.Repository) error {
+func (node *Node) CreateAt(path string, repo *repo.Repository) error {
 	switch node.Type {
 	case "dir":
 		if err := node.createDirAt(path); err != nil {
 			return errors.Annotate(err, "createDirAt")
 		}
 	case "file":
-		if err := node.createFileAt(path, s); err != nil {
+		if err := node.createFileAt(path, repo); err != nil {
 			return errors.Annotate(err, "createFileAt")
 		}
 	case "symlink":
@@ -176,7 +176,7 @@ func (node Node) createDirAt(path string) error {
 	return nil
 }
 
-func (node Node) createFileAt(path string, s *repo.Repository) error {
+func (node Node) createFileAt(path string, repo *repo.Repository) error {
 	f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600)
 	defer f.Close()
 
@@ -185,7 +185,7 @@ func (node Node) createFileAt(path string, s *repo.Repository) error {
 	}
 
 	for _, id := range node.Content {
-		buf, err := s.LoadBlob(pack.Data, id)
+		buf, err := repo.LoadBlob(pack.Data, id)
 		if err != nil {
 			return errors.Annotate(err, "Load")
 		}
diff --git a/repo/repository_test.go b/repo/repository_test.go
index 99de35701..5620b923f 100644
--- a/repo/repository_test.go
+++ b/repo/repository_test.go
@@ -23,21 +23,21 @@ type testJSONStruct struct {
 	Baz []byte
 }
 
-var serverTests = []testJSONStruct{
+var repoTests = []testJSONStruct{
 	testJSONStruct{Foo: 23, Bar: "Teststring", Baz: []byte("xx")},
 }
 
 func TestSaveJSON(t *testing.T) {
-	server := SetupBackend(t)
-	defer TeardownBackend(t, server)
+	repo := SetupRepo(t)
+	defer TeardownRepo(t, repo)
 
-	for _, obj := range serverTests {
+	for _, obj := range repoTests {
 		data, err := json.Marshal(obj)
 		OK(t, err)
 		data = append(data, '\n')
 		h := sha256.Sum256(data)
 
-		id, err := server.SaveJSON(pack.Tree, obj)
+		id, err := repo.SaveJSON(pack.Tree, obj)
 		OK(t, err)
 
 		Assert(t, bytes.Equal(h[:], id),
@@ -47,10 +47,10 @@ func TestSaveJSON(t *testing.T) {
 }
 
 func BenchmarkSaveJSON(t *testing.B) {
-	server := SetupBackend(t)
-	defer TeardownBackend(t, server)
+	repo := SetupRepo(t)
+	defer TeardownRepo(t, repo)
 
-	obj := serverTests[0]
+	obj := repoTests[0]
 
 	data, err := json.Marshal(obj)
 	OK(t, err)
@@ -60,7 +60,7 @@ func BenchmarkSaveJSON(t *testing.B) {
 	t.ResetTimer()
 
 	for i := 0; i < t.N; i++ {
-		id, err := server.SaveJSON(pack.Tree, obj)
+		id, err := repo.SaveJSON(pack.Tree, obj)
 		OK(t, err)
 
 		Assert(t, bytes.Equal(h[:], id),
@@ -72,8 +72,8 @@ func BenchmarkSaveJSON(t *testing.B) {
 var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20}
 
 func TestSave(t *testing.T) {
-	server := SetupBackend(t)
-	defer TeardownBackend(t, server)
+	repo := SetupRepo(t)
+	defer TeardownRepo(t, repo)
 
 	for _, size := range testSizes {
 		data := make([]byte, size)
@@ -83,15 +83,15 @@ func TestSave(t *testing.T) {
 		id := backend.Hash(data)
 
 		// save
-		sid, err := server.Save(pack.Data, data, nil)
+		sid, err := repo.Save(pack.Data, data, nil)
 		OK(t, err)
 
 		Equals(t, id, sid)
 
-		OK(t, server.Flush())
+		OK(t, repo.Flush())
 
 		// read back
-		buf, err := server.LoadBlob(pack.Data, id)
+		buf, err := repo.LoadBlob(pack.Data, id)
 
 		Assert(t, len(buf) == len(data),
 			"number of bytes read back does not match: expected %d, got %d",
@@ -104,8 +104,8 @@ func TestSave(t *testing.T) {
 }
 
 func TestSaveFrom(t *testing.T) {
-	server := SetupBackend(t)
-	defer TeardownBackend(t, server)
+	repo := SetupRepo(t)
+	defer TeardownRepo(t, repo)
 
 	for _, size := range testSizes {
 		data := make([]byte, size)
@@ -115,13 +115,13 @@ func TestSaveFrom(t *testing.T) {
 		id := backend.Hash(data)
 
 		// save
-		err = server.SaveFrom(pack.Data, id[:], uint(size), bytes.NewReader(data))
+		err = repo.SaveFrom(pack.Data, id[:], uint(size), bytes.NewReader(data))
 		OK(t, err)
 
-		OK(t, server.Flush())
+		OK(t, repo.Flush())
 
 		// read back
-		buf, err := server.LoadBlob(pack.Data, id[:])
+		buf, err := repo.LoadBlob(pack.Data, id[:])
 
 		Assert(t, len(buf) == len(data),
 			"number of bytes read back does not match: expected %d, got %d",
@@ -134,8 +134,8 @@ func TestSaveFrom(t *testing.T) {
 }
 
 func BenchmarkSaveFrom(t *testing.B) {
-	server := SetupBackend(t)
-	defer TeardownBackend(t, server)
+	repo := SetupRepo(t)
+	defer TeardownRepo(t, repo)
 
 	size := 4 << 20 // 4MiB
 
@@ -150,48 +150,48 @@ func BenchmarkSaveFrom(t *testing.B) {
 
 	for i := 0; i < t.N; i++ {
 		// save
-		err = server.SaveFrom(pack.Data, id[:], uint(size), bytes.NewReader(data))
+		err = repo.SaveFrom(pack.Data, id[:], uint(size), bytes.NewReader(data))
 		OK(t, err)
 	}
 }
 
 func TestLoadJSONPack(t *testing.T) {
 	if *benchTestDir == "" {
-		t.Skip("benchdir not set, skipping TestServerStats")
+		t.Skip("benchdir not set, skipping")
 	}
 
-	server := SetupBackend(t)
-	defer TeardownBackend(t, server)
+	repo := SetupRepo(t)
+	defer TeardownRepo(t, repo)
 
 	// archive a few files
-	sn := SnapshotDir(t, server, *benchTestDir, nil)
-	OK(t, server.Flush())
+	sn := SnapshotDir(t, repo, *benchTestDir, nil)
+	OK(t, repo.Flush())
 
 	tree := restic.NewTree()
-	err := server.LoadJSONPack(pack.Tree, sn.Tree, &tree)
+	err := repo.LoadJSONPack(pack.Tree, sn.Tree, &tree)
 	OK(t, err)
 }
 
 func TestLoadJSONUnpacked(t *testing.T) {
 	if *benchTestDir == "" {
-		t.Skip("benchdir not set, skipping TestServerStats")
+		t.Skip("benchdir not set, skipping")
 	}
 
-	server := SetupBackend(t)
-	defer TeardownBackend(t, server)
+	repo := SetupRepo(t)
+	defer TeardownRepo(t, repo)
 
 	// archive a snapshot
 	sn := restic.Snapshot{}
 	sn.Hostname = "foobar"
 	sn.Username = "test!"
 
-	id, err := server.SaveJSONUnpacked(backend.Snapshot, &sn)
+	id, err := repo.SaveJSONUnpacked(backend.Snapshot, &sn)
 	OK(t, err)
 
 	var sn2 restic.Snapshot
 
 	// restore
-	err = server.LoadJSONUnpacked(backend.Snapshot, id, &sn2)
+	err = repo.LoadJSONUnpacked(backend.Snapshot, id, &sn2)
 	OK(t, err)
 
 	Equals(t, sn.Hostname, sn2.Hostname)
diff --git a/restorer.go b/restorer.go
index b11cd8916..33f3ffc79 100644
--- a/restorer.go
+++ b/restorer.go
@@ -14,8 +14,8 @@ import (
 
 // Restorer is used to restore a snapshot to a directory.
 type Restorer struct {
-	s  *repo.Repository
-	sn *Snapshot
+	repo *repo.Repository
+	sn   *Snapshot
 
 	Error  func(dir string, node *Node, err error) error
 	Filter func(item string, dstpath string, node *Node) bool
@@ -24,12 +24,12 @@ type Restorer struct {
 var restorerAbortOnAllErrors = func(str string, node *Node, err error) error { return err }
 
 // NewRestorer creates a restorer preloaded with the content from the snapshot id.
-func NewRestorer(s *repo.Repository, id backend.ID) (*Restorer, error) {
-	r := &Restorer{s: s, Error: restorerAbortOnAllErrors}
+func NewRestorer(repo *repo.Repository, id backend.ID) (*Restorer, error) {
+	r := &Restorer{repo: repo, Error: restorerAbortOnAllErrors}
 
 	var err error
 
-	r.sn, err = LoadSnapshot(s, id)
+	r.sn, err = LoadSnapshot(repo, id)
 	if err != nil {
 		return nil, errors.Annotate(err, "load snapshot for restorer")
 	}
@@ -38,7 +38,7 @@ func NewRestorer(s *repo.Repository, id backend.ID) (*Restorer, error) {
 }
 
 func (res *Restorer) restoreTo(dst string, dir string, treeID backend.ID) error {
-	tree, err := LoadTree(res.s, treeID)
+	tree, err := LoadTree(res.repo, treeID)
 	if err != nil {
 		return res.Error(dir, nil, errors.Annotate(err, "LoadTree"))
 	}
@@ -74,7 +74,7 @@ func (res *Restorer) restoreNodeTo(node *Node, dir string, dst string) error {
 		return nil
 	}
 
-	err := node.CreateAt(dstPath, res.s)
+	err := node.CreateAt(dstPath, res.repo)
 
 	// Did it fail because of ENOENT?
 	if pe, ok := errors.Cause(err).(*os.PathError); ok {
@@ -83,7 +83,7 @@ func (res *Restorer) restoreNodeTo(node *Node, dir string, dst string) error {
 			// Create parent directories and retry
 			err = os.MkdirAll(filepath.Dir(dstPath), 0700)
 			if err == nil || err == os.ErrExist {
-				err = node.CreateAt(dstPath, res.s)
+				err = node.CreateAt(dstPath, res.repo)
 			}
 		}
 	}
diff --git a/snapshot.go b/snapshot.go
index 68beafa25..8fe382627 100644
--- a/snapshot.go
+++ b/snapshot.go
@@ -50,9 +50,9 @@ func NewSnapshot(paths []string) (*Snapshot, error) {
 	return sn, nil
 }
 
-func LoadSnapshot(s *repo.Repository, id backend.ID) (*Snapshot, error) {
+func LoadSnapshot(repo *repo.Repository, id backend.ID) (*Snapshot, error) {
 	sn := &Snapshot{id: id}
-	err := s.LoadJSONUnpacked(backend.Snapshot, id, sn)
+	err := repo.LoadJSONUnpacked(backend.Snapshot, id, sn)
 	if err != nil {
 		return nil, err
 	}
diff --git a/snapshot_test.go b/snapshot_test.go
index 007f2e082..00bb7a038 100644
--- a/snapshot_test.go
+++ b/snapshot_test.go
@@ -8,8 +8,8 @@ import (
 )
 
 func TestNewSnapshot(t *testing.T) {
-	s := SetupBackend(t)
-	defer TeardownBackend(t, s)
+	s := SetupRepo(t)
+	defer TeardownRepo(t, s)
 
 	paths := []string{"/home/foobar"}
 
diff --git a/test/backend.go b/test/backend.go
index a9193b1ff..6b9d9fc22 100644
--- a/test/backend.go
+++ b/test/backend.go
@@ -17,7 +17,7 @@ var TestPassword = flag.String("test.password", "", `use this password for repos
 var TestCleanup = flag.Bool("test.cleanup", true, "clean up after running tests (remove local backend directory with all content)")
 var TestTempDir = flag.String("test.tempdir", "", "use this directory for temporary storage (default: system temp dir)")
 
-func SetupBackend(t testing.TB) *repo.Repository {
+func SetupRepo(t testing.TB) *repo.Repository {
 	tempdir, err := ioutil.TempDir(*TestTempDir, "restic-test-")
 	OK(t, err)
 
@@ -29,23 +29,23 @@ func SetupBackend(t testing.TB) *repo.Repository {
 	err = os.Setenv("RESTIC_CACHE", filepath.Join(tempdir, "cache"))
 	OK(t, err)
 
-	s := repo.New(b)
-	OK(t, s.Init(*TestPassword))
-	return s
+	repo := repo.New(b)
+	OK(t, repo.Init(*TestPassword))
+	return repo
 }
 
-func TeardownBackend(t testing.TB, s *repo.Repository) {
+func TeardownRepo(t testing.TB, repo *repo.Repository) {
 	if !*TestCleanup {
-		l := s.Backend().(*local.Local)
+		l := repo.Backend().(*local.Local)
 		t.Logf("leaving local backend at %s\n", l.Location())
 		return
 	}
 
-	OK(t, s.Delete())
+	OK(t, repo.Delete())
 }
 
-func SnapshotDir(t testing.TB, server *repo.Repository, path string, parent backend.ID) *restic.Snapshot {
-	arch := restic.NewArchiver(server)
+func SnapshotDir(t testing.TB, repo *repo.Repository, path string, parent backend.ID) *restic.Snapshot {
+	arch := restic.NewArchiver(repo)
 	sn, _, err := arch.Snapshot(nil, []string{path}, parent)
 	OK(t, err)
 	return sn
diff --git a/tree.go b/tree.go
index c64e521f3..ffe903480 100644
--- a/tree.go
+++ b/tree.go
@@ -30,9 +30,9 @@ func (t Tree) String() string {
 	return fmt.Sprintf("Tree<%d nodes>", len(t.Nodes))
 }
 
-func LoadTree(s *repo.Repository, id backend.ID) (*Tree, error) {
+func LoadTree(repo *repo.Repository, id backend.ID) (*Tree, error) {
 	tree := &Tree{}
-	err := s.LoadJSONPack(pack.Tree, id, tree)
+	err := repo.LoadJSONPack(pack.Tree, id, tree)
 	if err != nil {
 		return nil, err
 	}
diff --git a/tree_test.go b/tree_test.go
index ecfefa620..6a1984d48 100644
--- a/tree_test.go
+++ b/tree_test.go
@@ -93,19 +93,19 @@ func TestNodeComparison(t *testing.T) {
 }
 
 func TestLoadTree(t *testing.T) {
-	server := SetupBackend(t)
-	defer TeardownBackend(t, server)
+	repo := SetupRepo(t)
+	defer TeardownRepo(t, repo)
 
 	// save tree
 	tree := restic.NewTree()
-	id, err := server.SaveJSON(pack.Tree, tree)
+	id, err := repo.SaveJSON(pack.Tree, tree)
 	OK(t, err)
 
 	// save packs
-	OK(t, server.Flush())
+	OK(t, repo.Flush())
 
 	// load tree again
-	tree2, err := restic.LoadTree(server, id)
+	tree2, err := restic.LoadTree(repo, id)
 	OK(t, err)
 
 	Assert(t, tree.Equals(tree2),
diff --git a/walk.go b/walk.go
index 79a4cf176..6c8c0d33f 100644
--- a/walk.go
+++ b/walk.go
@@ -16,10 +16,10 @@ type WalkTreeJob struct {
 	Tree *Tree
 }
 
-func walkTree(s *repo.Repository, path string, treeID backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
+func walkTree(repo *repo.Repository, path string, treeID backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
 	debug.Log("walkTree", "start on %q (%v)", path, treeID.Str())
 
-	t, err := LoadTree(s, treeID)
+	t, err := LoadTree(repo, treeID)
 	if err != nil {
 		jobCh <- WalkTreeJob{Path: path, Error: err}
 		return
@@ -28,7 +28,7 @@ func walkTree(s *repo.Repository, path string, treeID backend.ID, done chan stru
 	for _, node := range t.Nodes {
 		p := filepath.Join(path, node.Name)
 		if node.Type == "dir" {
-			walkTree(s, p, node.Subtree, done, jobCh)
+			walkTree(repo, p, node.Subtree, done, jobCh)
 		} else {
 			jobCh <- WalkTreeJob{Path: p, Node: node}
 		}
@@ -41,9 +41,9 @@ func walkTree(s *repo.Repository, path string, treeID backend.ID, done chan stru
 // WalkTree walks the tree specified by id recursively and sends a job for each
 // file and directory it finds. When the channel done is closed, processing
 // stops.
-func WalkTree(server *repo.Repository, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
+func WalkTree(repo *repo.Repository, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
 	debug.Log("WalkTree", "start on %v", id.Str())
-	walkTree(server, "", id, done, jobCh)
+	walkTree(repo, "", id, done, jobCh)
 	close(jobCh)
 	debug.Log("WalkTree", "done")
 }
diff --git a/walk_test.go b/walk_test.go
index 048af71c0..384e605df 100644
--- a/walk_test.go
+++ b/walk_test.go
@@ -16,22 +16,22 @@ func TestWalkTree(t *testing.T) {
 	dirs, err := filepath.Glob(*testWalkDirectory)
 	OK(t, err)
 
-	server := SetupBackend(t)
-	defer TeardownBackend(t, server)
+	repo := SetupRepo(t)
+	defer TeardownRepo(t, repo)
 
 	// archive a few files
-	arch := restic.NewArchiver(server)
+	arch := restic.NewArchiver(repo)
 	sn, _, err := arch.Snapshot(nil, dirs, nil)
 	OK(t, err)
 
-	// flush server, write all packs
-	OK(t, server.Flush())
+	// flush repo, write all packs
+	OK(t, repo.Flush())
 
 	done := make(chan struct{})
 
 	// start tree walker
 	treeJobs := make(chan restic.WalkTreeJob)
-	go restic.WalkTree(server, sn.Tree, done, treeJobs)
+	go restic.WalkTree(repo, sn.Tree, done, treeJobs)
 
 	// start filesystem walker
 	fsJobs := make(chan pipe.Job)

From 5fc1583acc595ccb0b4f1c63b0319b860314404d Mon Sep 17 00:00:00 2001
From: Alexander Neumann <alexander@bumpern.de>
Date: Sat, 9 May 2015 13:35:55 +0200
Subject: [PATCH 5/6] Fix comments

---
 archiver.go | 6 +++---
 cache.go    | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/archiver.go b/archiver.go
index 103eedcb9..da0c5c4fe 100644
--- a/archiver.go
+++ b/archiver.go
@@ -55,7 +55,7 @@ func NewArchiver(repo *repo.Repository) *Archiver {
 	return arch
 }
 
-// Save stores a blob read from rd in the server.
+// Save stores a blob read from rd in the repository.
 func (arch *Archiver) Save(t pack.BlobType, id backend.ID, length uint, rd io.Reader) error {
 	debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
 
@@ -76,7 +76,7 @@ func (arch *Archiver) Save(t pack.BlobType, id backend.ID, length uint, rd io.Re
 	return nil
 }
 
-// SaveTreeJSON stores a tree in the server.
+// SaveTreeJSON stores a tree in the repository.
 func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) {
 	data, err := json.Marshal(item)
 	if err != nil {
@@ -631,7 +631,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID backend.ID)
 	sn.id = id
 	debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str())
 
-	// flush server
+	// flush repository
 	err = arch.repo.Flush()
 	if err != nil {
 		return nil, nil, err
diff --git a/cache.go b/cache.go
index c1d3101dc..4084d09b0 100644
--- a/cache.go
+++ b/cache.go
@@ -13,7 +13,7 @@ import (
 	"github.com/restic/restic/repo"
 )
 
-// Cache is used to locally cache items from a server.
+// Cache is used to locally cache items from a repository.
 type Cache struct {
 	base string
 }
@@ -105,7 +105,7 @@ func (c *Cache) purge(t backend.Type, subtype string, id backend.ID) error {
 	return err
 }
 
-// Clear removes information from the cache that isn't present in the server any more.
+// Clear removes information from the cache that isn't present in the repository any more.
 func (c *Cache) Clear(repo *repo.Repository) error {
 	list, err := c.list(backend.Snapshot)
 	if err != nil {

From ae21938f3eeda5f4ef90672649d3ce2fa69772c3 Mon Sep 17 00:00:00 2001
From: Alexander Neumann <alexander@bumpern.de>
Date: Sat, 9 May 2015 17:41:28 +0200
Subject: [PATCH 6/6] Rename 'Repository' -> Repo

---
 archiver.go            |   4 +-
 cache.go               |   4 +-
 cmd/restic/cmd_find.go |   4 +-
 cmd/restic/cmd_fsck.go |   6 +-
 cmd/restic/cmd_key.go  |   8 +--
 cmd/restic/cmd_ls.go   |   2 +-
 cmd/restic/main.go     |   2 +-
 node.go                |   4 +-
 repo/key.go            |  10 +--
 repo/repository.go     | 150 ++++++++++++++++++++---------------------
 restorer.go            |   4 +-
 snapshot.go            |   2 +-
 test/backend.go        |   6 +-
 tree.go                |   2 +-
 walk.go                |   4 +-
 15 files changed, 106 insertions(+), 106 deletions(-)

diff --git a/archiver.go b/archiver.go
index da0c5c4fe..f4f474563 100644
--- a/archiver.go
+++ b/archiver.go
@@ -30,7 +30,7 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
 
 // Archiver is used to backup a set of directories.
 type Archiver struct {
-	repo *repo.Repository
+	repo *repo.Repo
 
 	blobToken chan struct{}
 
@@ -39,7 +39,7 @@ type Archiver struct {
 }
 
 // NewArchiver returns a new archiver.
-func NewArchiver(repo *repo.Repository) *Archiver {
+func NewArchiver(repo *repo.Repo) *Archiver {
 	arch := &Archiver{
 		repo:      repo,
 		blobToken: make(chan struct{}, maxConcurrentBlobs),
diff --git a/cache.go b/cache.go
index 4084d09b0..47bfbac76 100644
--- a/cache.go
+++ b/cache.go
@@ -18,7 +18,7 @@ type Cache struct {
 	base string
 }
 
-func NewCache(repo *repo.Repository) (*Cache, error) {
+func NewCache(repo *repo.Repo) (*Cache, error) {
 	cacheDir, err := getCacheDir()
 	if err != nil {
 		return nil, err
@@ -106,7 +106,7 @@ func (c *Cache) purge(t backend.Type, subtype string, id backend.ID) error {
 }
 
 // Clear removes information from the cache that isn't present in the repository any more.
-func (c *Cache) Clear(repo *repo.Repository) error {
+func (c *Cache) Clear(repo *repo.Repo) error {
 	list, err := c.list(backend.Snapshot)
 	if err != nil {
 		return err
diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go
index 535280e00..cdb2b4c18 100644
--- a/cmd/restic/cmd_find.go
+++ b/cmd/restic/cmd_find.go
@@ -59,7 +59,7 @@ func parseTime(str string) (time.Time, error) {
 	return time.Time{}, fmt.Errorf("unable to parse time: %q", str)
 }
 
-func (c CmdFind) findInTree(repo *repo.Repository, id backend.ID, path string) ([]findResult, error) {
+func (c CmdFind) findInTree(repo *repo.Repo, id backend.ID, path string) ([]findResult, error) {
 	debug.Log("restic.find", "checking tree %v\n", id)
 	tree, err := restic.LoadTree(repo, id)
 	if err != nil {
@@ -105,7 +105,7 @@ func (c CmdFind) findInTree(repo *repo.Repository, id backend.ID, path string) (
 	return results, nil
 }
 
-func (c CmdFind) findInSnapshot(repo *repo.Repository, name string) error {
+func (c CmdFind) findInSnapshot(repo *repo.Repo, name string) error {
 	debug.Log("restic.find", "searching in snapshot %s\n  for entries within [%s %s]", name, c.oldest, c.newest)
 
 	id, err := backend.ParseID(name)
diff --git a/cmd/restic/cmd_fsck.go b/cmd/restic/cmd_fsck.go
index e8b113c42..6fbb4ddca 100644
--- a/cmd/restic/cmd_fsck.go
+++ b/cmd/restic/cmd_fsck.go
@@ -34,7 +34,7 @@ func init() {
 	}
 }
 
-func fsckFile(opts CmdFsck, repo *repo.Repository, IDs []backend.ID) (uint64, error) {
+func fsckFile(opts CmdFsck, repo *repo.Repo, IDs []backend.ID) (uint64, error) {
 	debug.Log("restic.fsckFile", "checking file %v", IDs)
 	var bytes uint64
 
@@ -77,7 +77,7 @@ func fsckFile(opts CmdFsck, repo *repo.Repository, IDs []backend.ID) (uint64, er
 	return bytes, nil
 }
 
-func fsckTree(opts CmdFsck, repo *repo.Repository, id backend.ID) error {
+func fsckTree(opts CmdFsck, repo *repo.Repo, id backend.ID) error {
 	debug.Log("restic.fsckTree", "checking tree %v", id.Str())
 
 	tree, err := restic.LoadTree(repo, id)
@@ -157,7 +157,7 @@ func fsckTree(opts CmdFsck, repo *repo.Repository, id backend.ID) error {
 	return firstErr
 }
 
-func fsckSnapshot(opts CmdFsck, repo *repo.Repository, id backend.ID) error {
+func fsckSnapshot(opts CmdFsck, repo *repo.Repo, id backend.ID) error {
 	debug.Log("restic.fsck", "checking snapshot %v\n", id)
 
 	sn, err := restic.LoadSnapshot(repo, id)
diff --git a/cmd/restic/cmd_key.go b/cmd/restic/cmd_key.go
index 7150e4e0d..b1abdad69 100644
--- a/cmd/restic/cmd_key.go
+++ b/cmd/restic/cmd_key.go
@@ -21,7 +21,7 @@ func init() {
 	}
 }
 
-func listKeys(s *repo.Repository) error {
+func listKeys(s *repo.Repo) error {
 	tab := NewTable()
 	tab.Header = fmt.Sprintf(" %-10s  %-10s  %-10s  %s", "ID", "User", "Host", "Created")
 	tab.RowFormat = "%s%-10s  %-10s  %-10s  %s"
@@ -56,7 +56,7 @@ func listKeys(s *repo.Repository) error {
 	return nil
 }
 
-func addKey(s *repo.Repository) error {
+func addKey(s *repo.Repo) error {
 	pw := readPassword("RESTIC_NEWPASSWORD", "enter password for new key: ")
 	pw2 := readPassword("RESTIC_NEWPASSWORD", "enter password again: ")
 
@@ -74,7 +74,7 @@ func addKey(s *repo.Repository) error {
 	return nil
 }
 
-func deleteKey(repo *repo.Repository, name string) error {
+func deleteKey(repo *repo.Repo, name string) error {
 	if name == repo.KeyName() {
 		return errors.New("refusing to remove key currently used to access repository")
 	}
@@ -88,7 +88,7 @@ func deleteKey(repo *repo.Repository, name string) error {
 	return nil
 }
 
-func changePassword(s *repo.Repository) error {
+func changePassword(s *repo.Repo) error {
 	pw := readPassword("RESTIC_NEWPASSWORD", "enter password for new key: ")
 	pw2 := readPassword("RESTIC_NEWPASSWORD", "enter password again: ")
 
diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go
index 1148ca51e..c9d67ed43 100644
--- a/cmd/restic/cmd_ls.go
+++ b/cmd/restic/cmd_ls.go
@@ -38,7 +38,7 @@ func printNode(prefix string, n *restic.Node) string {
 	}
 }
 
-func printTree(prefix string, repo *repo.Repository, id backend.ID) error {
+func printTree(prefix string, repo *repo.Repo, id backend.ID) error {
 	tree, err := restic.LoadTree(repo, id)
 	if err != nil {
 		return err
diff --git a/cmd/restic/main.go b/cmd/restic/main.go
index 22266b538..f34ecd4ac 100644
--- a/cmd/restic/main.go
+++ b/cmd/restic/main.go
@@ -133,7 +133,7 @@ func create(u string) (backend.Backend, error) {
 	return sftp.Create(url.Path[1:], "ssh", args...)
 }
 
-func OpenRepo() (*repo.Repository, error) {
+func OpenRepo() (*repo.Repo, error) {
 	if opts.Repo == "" {
 		return nil, errors.New("Please specify repository location (-r)")
 	}
diff --git a/node.go b/node.go
index 8413acfb5..517cbb31b 100644
--- a/node.go
+++ b/node.go
@@ -103,7 +103,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string {
 }
 
 // CreateAt creates the node at the given path and restores all the meta data.
-func (node *Node) CreateAt(path string, repo *repo.Repository) error {
+func (node *Node) CreateAt(path string, repo *repo.Repo) error {
 	switch node.Type {
 	case "dir":
 		if err := node.createDirAt(path); err != nil {
@@ -176,7 +176,7 @@ func (node Node) createDirAt(path string) error {
 	return nil
 }
 
-func (node Node) createFileAt(path string, repo *repo.Repository) error {
+func (node Node) createFileAt(path string, repo *repo.Repo) error {
 	f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600)
 	defer f.Close()
 
diff --git a/repo/key.go b/repo/key.go
index c60909f21..d99a1408e 100644
--- a/repo/key.go
+++ b/repo/key.go
@@ -49,12 +49,12 @@ type Key struct {
 
 // createMasterKey creates a new master key in the given backend and encrypts
 // it with the password.
-func createMasterKey(s *Repository, password string) (*Key, error) {
+func createMasterKey(s *Repo, password string) (*Key, error) {
 	return AddKey(s, password, nil)
 }
 
 // OpenKey tries do decrypt the key specified by name with the given password.
-func OpenKey(s *Repository, name string, password string) (*Key, error) {
+func OpenKey(s *Repo, name string, password string) (*Key, error) {
 	k, err := LoadKey(s, name)
 	if err != nil {
 		return nil, err
@@ -94,7 +94,7 @@ func OpenKey(s *Repository, name string, password string) (*Key, error) {
 
 // SearchKey tries to decrypt all keys in the backend with the given password.
 // If none could be found, ErrNoKeyFound is returned.
-func SearchKey(s *Repository, password string) (*Key, error) {
+func SearchKey(s *Repo, password string) (*Key, error) {
 	// try all keys in repo
 	done := make(chan struct{})
 	defer close(done)
@@ -111,7 +111,7 @@ func SearchKey(s *Repository, password string) (*Key, error) {
 }
 
 // LoadKey loads a key from the backend.
-func LoadKey(s *Repository, name string) (*Key, error) {
+func LoadKey(s *Repo, name string) (*Key, error) {
 	// extract data from repo
 	rd, err := s.be.Get(backend.Key, name)
 	if err != nil {
@@ -131,7 +131,7 @@ func LoadKey(s *Repository, name string) (*Key, error) {
 }
 
 // AddKey adds a new key to an already existing repository.
-func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error) {
+func AddKey(s *Repo, password string, template *crypto.Key) (*Key, error) {
 	// fill meta data about key
 	newkey := &Key{
 		Created: time.Now(),
diff --git a/repo/repository.go b/repo/repository.go
index 9f4d91622..80d392f89 100644
--- a/repo/repository.go
+++ b/repo/repository.go
@@ -26,8 +26,8 @@ type Config struct {
 	ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"`
 }
 
-// Repository is used to access a repository in a backend.
-type Repository struct {
+// Repo is used to access a repository in a backend.
+type Repo struct {
 	be      backend.Backend
 	Config  Config
 	key     *crypto.Key
@@ -38,8 +38,8 @@ type Repository struct {
 	packs []*pack.Packer
 }
 
-func New(be backend.Backend) *Repository {
-	return &Repository{
+func New(be backend.Backend) *Repo {
+	return &Repo{
 		be:  be,
 		idx: NewIndex(),
 	}
@@ -48,31 +48,31 @@ func New(be backend.Backend) *Repository {
 // Find loads the list of all blobs of type t and searches for names which start
 // with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
 // more than one is found, nil and ErrMultipleIDMatches is returned.
-func (s *Repository) Find(t backend.Type, prefix string) (string, error) {
+func (s *Repo) Find(t backend.Type, prefix string) (string, error) {
 	return backend.Find(s.be, t, prefix)
 }
 
 // FindSnapshot takes a string and tries to find a snapshot whose ID matches
 // the string as closely as possible.
-func (s *Repository) FindSnapshot(name string) (string, error) {
+func (s *Repo) FindSnapshot(name string) (string, error) {
 	return backend.FindSnapshot(s.be, name)
 }
 
 // PrefixLength returns the number of bytes required so that all prefixes of
 // all IDs of type t are unique.
-func (s *Repository) PrefixLength(t backend.Type) (int, error) {
+func (s *Repo) PrefixLength(t backend.Type) (int, error) {
 	return backend.PrefixLength(s.be, t)
 }
 
 // Load tries to load and decrypt content identified by t and id from the
 // backend.
-func (s *Repository) Load(t backend.Type, id backend.ID) ([]byte, error) {
-	debug.Log("Repository.Load", "load %v with id %v", t, id.Str())
+func (s *Repo) Load(t backend.Type, id backend.ID) ([]byte, error) {
+	debug.Log("Repo.Load", "load %v with id %v", t, id.Str())
 
 	// load blob from pack
 	rd, err := s.be.Get(t, id.String())
 	if err != nil {
-		debug.Log("Repository.Load", "error loading %v: %v", id.Str(), err)
+		debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
 		return nil, err
 	}
 
@@ -102,26 +102,26 @@ func (s *Repository) Load(t backend.Type, id backend.ID) ([]byte, error) {
 
 // LoadBlob tries to load and decrypt content identified by t and id from a
 // pack from the backend.
-func (s *Repository) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
-	debug.Log("Repository.LoadBlob", "load %v with id %v", t, id.Str())
+func (s *Repo) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
+	debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
 	// lookup pack
 	packID, tpe, offset, length, err := s.idx.Lookup(id)
 	if err != nil {
-		debug.Log("Repository.LoadBlob", "id %v not found in index: %v", id.Str(), err)
+		debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
 		return nil, err
 	}
 
 	if tpe != t {
-		debug.Log("Repository.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, tpe)
+		debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, tpe)
 		return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", tpe, t)
 	}
 
-	debug.Log("Repository.LoadBlob", "id %v found in pack %v at offset %v (length %d)", id.Str(), packID.Str(), offset, length)
+	debug.Log("Repo.LoadBlob", "id %v found in pack %v at offset %v (length %d)", id.Str(), packID.Str(), offset, length)
 
 	// load blob from pack
 	rd, err := s.be.GetReader(backend.Data, packID.String(), offset, length)
 	if err != nil {
-		debug.Log("Repository.LoadBlob", "error loading pack %v for %v: %v", packID.Str(), id.Str(), err)
+		debug.Log("Repo.LoadBlob", "error loading pack %v for %v: %v", packID.Str(), id.Str(), err)
 		return nil, err
 	}
 
@@ -151,7 +151,7 @@ func (s *Repository) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
 
 // LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
 // the item.
-func (s *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) error {
+func (s *Repo) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) error {
 	// load blob from backend
 	rd, err := s.be.Get(t, id.String())
 	if err != nil {
@@ -178,7 +178,7 @@ func (s *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interf
 
 // LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the
 // data and afterwards call json.Unmarshal on the item.
-func (s *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) error {
+func (s *Repo) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) error {
 	// lookup pack
 	packID, _, offset, length, err := s.idx.Lookup(id)
 	if err != nil {
@@ -215,16 +215,16 @@ const maxPackers = 200
 
 // findPacker returns a packer for a new blob of size bytes. Either a new one is
 // created or one is returned that already has some blobs.
-func (s *Repository) findPacker(size uint) (*pack.Packer, error) {
+func (s *Repo) findPacker(size uint) (*pack.Packer, error) {
 	s.pm.Lock()
 	defer s.pm.Unlock()
 
 	// search for a suitable packer
 	if len(s.packs) > 0 {
-		debug.Log("Repository.findPacker", "searching packer for %d bytes\n", size)
+		debug.Log("Repo.findPacker", "searching packer for %d bytes\n", size)
 		for i, p := range s.packs {
 			if p.Size()+size < maxPackSize {
-				debug.Log("Repository.findPacker", "found packer %v", p)
+				debug.Log("Repo.findPacker", "found packer %v", p)
 				// remove from list
 				s.packs = append(s.packs[:i], s.packs[i+1:]...)
 				return p, nil
@@ -237,22 +237,22 @@ func (s *Repository) findPacker(size uint) (*pack.Packer, error) {
 	if err != nil {
 		return nil, err
 	}
-	debug.Log("Repository.findPacker", "create new pack %p", blob)
+	debug.Log("Repo.findPacker", "create new pack %p", blob)
 	return pack.NewPacker(s.key, blob), nil
 }
 
 // insertPacker appends p to s.packs.
-func (s *Repository) insertPacker(p *pack.Packer) {
+func (s *Repo) insertPacker(p *pack.Packer) {
 	s.pm.Lock()
 	defer s.pm.Unlock()
 
 	s.packs = append(s.packs, p)
-	debug.Log("Repository.insertPacker", "%d packers\n", len(s.packs))
+	debug.Log("Repo.insertPacker", "%d packers\n", len(s.packs))
 }
 
 // savePacker stores p in the backend.
-func (s *Repository) savePacker(p *pack.Packer) error {
-	debug.Log("Repository.savePacker", "save packer with %d blobs\n", p.Count())
+func (s *Repo) savePacker(p *pack.Packer) error {
+	debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
 	_, err := p.Finalize()
 	if err != nil {
 		return err
@@ -262,15 +262,15 @@ func (s *Repository) savePacker(p *pack.Packer) error {
 	sid := p.ID()
 	err = p.Writer().(backend.Blob).Finalize(backend.Data, sid.String())
 	if err != nil {
-		debug.Log("Repository.savePacker", "blob Finalize() error: %v", err)
+		debug.Log("Repo.savePacker", "blob Finalize() error: %v", err)
 		return err
 	}
 
-	debug.Log("Repository.savePacker", "saved as %v", sid.Str())
+	debug.Log("Repo.savePacker", "saved as %v", sid.Str())
 
 	// update blobs in the index
 	for _, b := range p.Blobs() {
-		debug.Log("Repository.savePacker", "  updating blob %v to pack %v", b.ID.Str(), sid.Str())
+		debug.Log("Repo.savePacker", "  updating blob %v to pack %v", b.ID.Str(), sid.Str())
 		s.idx.Store(b.Type, b.ID, sid, b.Offset, uint(b.Length))
 	}
 
@@ -278,7 +278,7 @@ func (s *Repository) savePacker(p *pack.Packer) error {
 }
 
 // countPacker returns the number of open (unfinished) packers.
-func (s *Repository) countPacker() int {
+func (s *Repo) countPacker() int {
 	s.pm.Lock()
 	defer s.pm.Unlock()
 
@@ -287,13 +287,13 @@ func (s *Repository) countPacker() int {
 
 // Save encrypts data and stores it to the backend as type t. If data is small
 // enough, it will be packed together with other small blobs.
-func (s *Repository) Save(t pack.BlobType, data []byte, id backend.ID) (backend.ID, error) {
+func (s *Repo) Save(t pack.BlobType, data []byte, id backend.ID) (backend.ID, error) {
 	if id == nil {
 		// compute plaintext hash
 		id = backend.Hash(data)
 	}
 
-	debug.Log("Repository.Save", "save id %v (%v, %d bytes)", id.Str(), t, len(data))
+	debug.Log("Repo.Save", "save id %v (%v, %d bytes)", id.Str(), t, len(data))
 
 	// get buf from the pool
 	ciphertext := getBuf()
@@ -317,12 +317,12 @@ func (s *Repository) Save(t pack.BlobType, data []byte, id backend.ID) (backend.
 	// add this id to the index, although we don't know yet in which pack it
 	// will be saved, the entry will be updated when the pack is written.
 	s.idx.Store(t, id, nil, 0, 0)
-	debug.Log("Repository.Save", "saving stub for %v (%v) in index", id.Str, t)
+	debug.Log("Repo.Save", "saving stub for %v (%v) in index", id.Str, t)
 
 	// if the pack is not full enough and there are less than maxPackers
 	// packers, put back to the list
 	if packer.Size() < minPackSize && s.countPacker() < maxPackers {
-		debug.Log("Repository.Save", "pack is not full enough (%d bytes)", packer.Size())
+		debug.Log("Repo.Save", "pack is not full enough (%d bytes)", packer.Size())
 		s.insertPacker(packer)
 		return id, nil
 	}
@@ -332,8 +332,8 @@ func (s *Repository) Save(t pack.BlobType, data []byte, id backend.ID) (backend.
 }
 
 // SaveFrom encrypts data read from rd and stores it in a pack in the backend as type t.
-func (s *Repository) SaveFrom(t pack.BlobType, id backend.ID, length uint, rd io.Reader) error {
-	debug.Log("Repository.SaveFrom", "save id %v (%v, %d bytes)", id.Str(), t, length)
+func (s *Repo) SaveFrom(t pack.BlobType, id backend.ID, length uint, rd io.Reader) error {
+	debug.Log("Repo.SaveFrom", "save id %v (%v, %d bytes)", id.Str(), t, length)
 	if id == nil {
 		return errors.New("id is nil")
 	}
@@ -353,8 +353,8 @@ func (s *Repository) SaveFrom(t pack.BlobType, id backend.ID, length uint, rd io
 
 // SaveJSON serialises item as JSON and encrypts and saves it in a pack in the
 // backend as type t.
-func (s *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error) {
-	debug.Log("Repository.SaveJSON", "save %v blob", t)
+func (s *Repo) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error) {
+	debug.Log("Repo.SaveJSON", "save %v blob", t)
 	buf := getBuf()[:0]
 	defer freeBuf(buf)
 
@@ -372,13 +372,13 @@ func (s *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, er
 
 // SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
 // backend as type t, without a pack. It returns the storage hash.
-func (s *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) {
+func (s *Repo) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) {
 	// create file
 	blob, err := s.be.Create()
 	if err != nil {
 		return nil, err
 	}
-	debug.Log("Repository.SaveJSONUnpacked", "create new file %p", blob)
+	debug.Log("Repo.SaveJSONUnpacked", "create new file %p", blob)
 
 	// hash
 	hw := backend.NewHashingWriter(blob, sha256.New())
@@ -409,11 +409,11 @@ func (s *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend
 }
 
 // Flush saves all remaining packs.
-func (s *Repository) Flush() error {
+func (s *Repo) Flush() error {
 	s.pm.Lock()
 	defer s.pm.Unlock()
 
-	debug.Log("Repository.Flush", "manually flushing %d packs", len(s.packs))
+	debug.Log("Repo.Flush", "manually flushing %d packs", len(s.packs))
 
 	for _, p := range s.packs {
 		err := s.savePacker(p)
@@ -426,23 +426,23 @@ func (s *Repository) Flush() error {
 	return nil
 }
 
-func (s *Repository) Backend() backend.Backend {
+func (s *Repo) Backend() backend.Backend {
 	return s.be
 }
 
-func (s *Repository) Index() *Index {
+func (s *Repo) Index() *Index {
 	return s.idx
 }
 
 // SetIndex instructs the repository to use the given index.
-func (s *Repository) SetIndex(i *Index) {
+func (s *Repo) SetIndex(i *Index) {
 	s.idx = i
 }
 
 // SaveIndex saves all new packs in the index in the backend, returned is the
 // storage ID.
-func (s *Repository) SaveIndex() (backend.ID, error) {
-	debug.Log("Repository.SaveIndex", "Saving index")
+func (s *Repo) SaveIndex() (backend.ID, error) {
+	debug.Log("Repo.SaveIndex", "Saving index")
 
 	// create blob
 	blob, err := s.be.Create()
@@ -450,7 +450,7 @@ func (s *Repository) SaveIndex() (backend.ID, error) {
 		return nil, err
 	}
 
-	debug.Log("Repository.SaveIndex", "create new pack %p", blob)
+	debug.Log("Repo.SaveIndex", "create new pack %p", blob)
 
 	// hash
 	hw := backend.NewHashingWriter(blob, sha256.New())
@@ -476,15 +476,15 @@ func (s *Repository) SaveIndex() (backend.ID, error) {
 		return nil, err
 	}
 
-	debug.Log("Repository.SaveIndex", "Saved index as %v", sid.Str())
+	debug.Log("Repo.SaveIndex", "Saved index as %v", sid.Str())
 
 	return sid, nil
 }
 
 // LoadIndex loads all index files from the backend and merges them with the
 // current index.
-func (s *Repository) LoadIndex() error {
-	debug.Log("Repository.LoadIndex", "Loading index")
+func (s *Repo) LoadIndex() error {
+	debug.Log("Repo.LoadIndex", "Loading index")
 	done := make(chan struct{})
 	defer close(done)
 
@@ -498,8 +498,8 @@ func (s *Repository) LoadIndex() error {
 }
 
 // loadIndex loads the index id and merges it with the currently used index.
-func (s *Repository) loadIndex(id string) error {
-	debug.Log("Repository.loadIndex", "Loading index %v", id[:8])
+func (s *Repo) loadIndex(id string) error {
+	debug.Log("Repo.loadIndex", "Loading index %v", id[:8])
 	before := len(s.idx.pack)
 
 	rd, err := s.be.Get(backend.Index, id)
@@ -517,22 +517,22 @@ func (s *Repository) loadIndex(id string) error {
 
 	idx, err := DecodeIndex(decryptRd)
 	if err != nil {
-		debug.Log("Repository.loadIndex", "error while decoding index %v: %v", id, err)
+		debug.Log("Repo.loadIndex", "error while decoding index %v: %v", id, err)
 		return err
 	}
 
 	s.idx.Merge(idx)
 
 	after := len(s.idx.pack)
-	debug.Log("Repository.loadIndex", "Loaded index %v, added %v blobs", id[:8], after-before)
+	debug.Log("Repo.loadIndex", "Loaded index %v, added %v blobs", id[:8], after-before)
 
 	return nil
 }
 
 const repositoryIDSize = sha256.Size
-const RepositoryVersion = 1
+const RepoVersion = 1
 
-func createConfig(s *Repository) (err error) {
+func createConfig(s *Repo) (err error) {
 	s.Config.ChunkerPolynomial, err = chunker.RandomPolynomial()
 	if err != nil {
 		return err
@@ -545,21 +545,21 @@ func createConfig(s *Repository) (err error) {
 	}
 
 	s.Config.ID = hex.EncodeToString(newID)
-	s.Config.Version = RepositoryVersion
+	s.Config.Version = RepoVersion
 
-	debug.Log("Repository.createConfig", "New config: %#v", s.Config)
+	debug.Log("Repo.createConfig", "New config: %#v", s.Config)
 
 	_, err = s.SaveJSONUnpacked(backend.Config, s.Config)
 	return err
 }
 
-func (s *Repository) loadConfig(cfg *Config) error {
+func (s *Repo) loadConfig(cfg *Config) error {
 	err := s.LoadJSONUnpacked(backend.Config, nil, cfg)
 	if err != nil {
 		return err
 	}
 
-	if cfg.Version != RepositoryVersion {
+	if cfg.Version != RepoVersion {
 		return errors.New("unsupported repository version")
 	}
 
@@ -572,7 +572,7 @@ func (s *Repository) loadConfig(cfg *Config) error {
 
 // SearchKey finds a key with the supplied password, afterwards the config is
 // read and parsed.
-func (s *Repository) SearchKey(password string) error {
+func (s *Repo) SearchKey(password string) error {
 	key, err := SearchKey(s, password)
 	if err != nil {
 		return err
@@ -585,7 +585,7 @@ func (s *Repository) SearchKey(password string) error {
 
 // Init creates a new master key with the supplied password and initializes the
 // repository config.
-func (s *Repository) Init(password string) error {
+func (s *Repo) Init(password string) error {
 	has, err := s.Test(backend.Config, "")
 	if err != nil {
 		return err
@@ -604,7 +604,7 @@ func (s *Repository) Init(password string) error {
 	return createConfig(s)
 }
 
-func (s *Repository) Decrypt(ciphertext []byte) ([]byte, error) {
+func (s *Repo) Decrypt(ciphertext []byte) ([]byte, error) {
 	if s.key == nil {
 		return nil, errors.New("key for repository not set")
 	}
@@ -612,7 +612,7 @@ func (s *Repository) Decrypt(ciphertext []byte) ([]byte, error) {
 	return crypto.Decrypt(s.key, nil, ciphertext)
 }
 
-func (s *Repository) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
+func (s *Repo) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
 	if s.key == nil {
 		return nil, errors.New("key for repository not set")
 	}
@@ -620,16 +620,16 @@ func (s *Repository) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
 	return crypto.Encrypt(s.key, ciphertext, plaintext)
 }
 
-func (s *Repository) Key() *crypto.Key {
+func (s *Repo) Key() *crypto.Key {
 	return s.key
 }
 
-func (s *Repository) KeyName() string {
+func (s *Repo) KeyName() string {
 	return s.keyName
 }
 
 // Count returns the number of blobs of a given type in the backend.
-func (s *Repository) Count(t backend.Type) (n uint) {
+func (s *Repo) Count(t backend.Type) (n uint) {
 	for _ = range s.be.List(t, nil) {
 		n++
 	}
@@ -639,27 +639,27 @@ func (s *Repository) Count(t backend.Type) (n uint) {
 
 // Proxy methods to backend
 
-func (s *Repository) Get(t backend.Type, name string) (io.ReadCloser, error) {
+func (s *Repo) Get(t backend.Type, name string) (io.ReadCloser, error) {
 	return s.be.Get(t, name)
 }
 
-func (s *Repository) List(t backend.Type, done <-chan struct{}) <-chan string {
+func (s *Repo) List(t backend.Type, done <-chan struct{}) <-chan string {
 	return s.be.List(t, done)
 }
 
-func (s *Repository) Test(t backend.Type, name string) (bool, error) {
+func (s *Repo) Test(t backend.Type, name string) (bool, error) {
 	return s.be.Test(t, name)
 }
 
-func (s *Repository) Remove(t backend.Type, name string) error {
+func (s *Repo) Remove(t backend.Type, name string) error {
 	return s.be.Remove(t, name)
 }
 
-func (s *Repository) Close() error {
+func (s *Repo) Close() error {
 	return s.be.Close()
 }
 
-func (s *Repository) Delete() error {
+func (s *Repo) Delete() error {
 	if b, ok := s.be.(backend.Deleter); ok {
 		return b.Delete()
 	}
@@ -667,6 +667,6 @@ func (s *Repository) Delete() error {
 	return errors.New("Delete() called for backend that does not implement this method")
 }
 
-func (s *Repository) Location() string {
+func (s *Repo) Location() string {
 	return s.be.Location()
 }
diff --git a/restorer.go b/restorer.go
index 33f3ffc79..be0e6e347 100644
--- a/restorer.go
+++ b/restorer.go
@@ -14,7 +14,7 @@ import (
 
 // Restorer is used to restore a snapshot to a directory.
 type Restorer struct {
-	repo *repo.Repository
+	repo *repo.Repo
 	sn   *Snapshot
 
 	Error  func(dir string, node *Node, err error) error
@@ -24,7 +24,7 @@ type Restorer struct {
 var restorerAbortOnAllErrors = func(str string, node *Node, err error) error { return err }
 
 // NewRestorer creates a restorer preloaded with the content from the snapshot id.
-func NewRestorer(repo *repo.Repository, id backend.ID) (*Restorer, error) {
+func NewRestorer(repo *repo.Repo, id backend.ID) (*Restorer, error) {
 	r := &Restorer{repo: repo, Error: restorerAbortOnAllErrors}
 
 	var err error
diff --git a/snapshot.go b/snapshot.go
index 8fe382627..776e35790 100644
--- a/snapshot.go
+++ b/snapshot.go
@@ -50,7 +50,7 @@ func NewSnapshot(paths []string) (*Snapshot, error) {
 	return sn, nil
 }
 
-func LoadSnapshot(repo *repo.Repository, id backend.ID) (*Snapshot, error) {
+func LoadSnapshot(repo *repo.Repo, id backend.ID) (*Snapshot, error) {
 	sn := &Snapshot{id: id}
 	err := repo.LoadJSONUnpacked(backend.Snapshot, id, sn)
 	if err != nil {
diff --git a/test/backend.go b/test/backend.go
index 6b9d9fc22..f7c5f1034 100644
--- a/test/backend.go
+++ b/test/backend.go
@@ -17,7 +17,7 @@ var TestPassword = flag.String("test.password", "", `use this password for repos
 var TestCleanup = flag.Bool("test.cleanup", true, "clean up after running tests (remove local backend directory with all content)")
 var TestTempDir = flag.String("test.tempdir", "", "use this directory for temporary storage (default: system temp dir)")
 
-func SetupRepo(t testing.TB) *repo.Repository {
+func SetupRepo(t testing.TB) *repo.Repo {
 	tempdir, err := ioutil.TempDir(*TestTempDir, "restic-test-")
 	OK(t, err)
 
@@ -34,7 +34,7 @@ func SetupRepo(t testing.TB) *repo.Repository {
 	return repo
 }
 
-func TeardownRepo(t testing.TB, repo *repo.Repository) {
+func TeardownRepo(t testing.TB, repo *repo.Repo) {
 	if !*TestCleanup {
 		l := repo.Backend().(*local.Local)
 		t.Logf("leaving local backend at %s\n", l.Location())
@@ -44,7 +44,7 @@ func TeardownRepo(t testing.TB, repo *repo.Repository) {
 	OK(t, repo.Delete())
 }
 
-func SnapshotDir(t testing.TB, repo *repo.Repository, path string, parent backend.ID) *restic.Snapshot {
+func SnapshotDir(t testing.TB, repo *repo.Repo, path string, parent backend.ID) *restic.Snapshot {
 	arch := restic.NewArchiver(repo)
 	sn, _, err := arch.Snapshot(nil, []string{path}, parent)
 	OK(t, err)
diff --git a/tree.go b/tree.go
index ffe903480..0fccf5082 100644
--- a/tree.go
+++ b/tree.go
@@ -30,7 +30,7 @@ func (t Tree) String() string {
 	return fmt.Sprintf("Tree<%d nodes>", len(t.Nodes))
 }
 
-func LoadTree(repo *repo.Repository, id backend.ID) (*Tree, error) {
+func LoadTree(repo *repo.Repo, id backend.ID) (*Tree, error) {
 	tree := &Tree{}
 	err := repo.LoadJSONPack(pack.Tree, id, tree)
 	if err != nil {
diff --git a/walk.go b/walk.go
index 6c8c0d33f..f666d5d38 100644
--- a/walk.go
+++ b/walk.go
@@ -16,7 +16,7 @@ type WalkTreeJob struct {
 	Tree *Tree
 }
 
-func walkTree(repo *repo.Repository, path string, treeID backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
+func walkTree(repo *repo.Repo, path string, treeID backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
 	debug.Log("walkTree", "start on %q (%v)", path, treeID.Str())
 
 	t, err := LoadTree(repo, treeID)
@@ -41,7 +41,7 @@ func walkTree(repo *repo.Repository, path string, treeID backend.ID, done chan s
 // WalkTree walks the tree specified by id recursively and sends a job for each
 // file and directory it finds. When the channel done is closed, processing
 // stops.
-func WalkTree(repo *repo.Repository, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
+func WalkTree(repo *repo.Repo, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
 	debug.Log("WalkTree", "start on %v", id.Str())
 	walkTree(repo, "", id, done, jobCh)
 	close(jobCh)