Merge branch 'add_config'
This commit is contained in:
commit
c57c4f0b8f
22 changed files with 254 additions and 481 deletions
|
@ -184,7 +184,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
chnker := GetChunker("archiver.SaveFile")
|
chnker := GetChunker("archiver.SaveFile")
|
||||||
chnker.Reset(file, arch.s.ChunkerPolynomial())
|
chnker.Reset(file, arch.s.Config.ChunkerPolynomial)
|
||||||
resultChannels := [](<-chan saveResult){}
|
resultChannels := [](<-chan saveResult){}
|
||||||
defer FreeChunker("archiver.SaveFile", chnker)
|
defer FreeChunker("archiver.SaveFile", chnker)
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,8 @@ import (
|
||||||
"github.com/restic/restic"
|
"github.com/restic/restic"
|
||||||
"github.com/restic/restic/backend"
|
"github.com/restic/restic/backend"
|
||||||
"github.com/restic/restic/chunker"
|
"github.com/restic/restic/chunker"
|
||||||
|
"github.com/restic/restic/crypto"
|
||||||
"github.com/restic/restic/pack"
|
"github.com/restic/restic/pack"
|
||||||
"github.com/restic/restic/server"
|
|
||||||
. "github.com/restic/restic/test"
|
. "github.com/restic/restic/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ type Rdr interface {
|
||||||
io.ReaderAt
|
io.ReaderAt
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *server.Key) {
|
func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.Key) {
|
||||||
ch := restic.GetChunker("BenchmarkChunkEncrypt")
|
ch := restic.GetChunker("BenchmarkChunkEncrypt")
|
||||||
rd.Seek(0, 0)
|
rd.Seek(0, 0)
|
||||||
ch.Reset(rd, testPol)
|
ch.Reset(rd, testPol)
|
||||||
|
@ -44,7 +44,7 @@ func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *server.K
|
||||||
OK(b, err)
|
OK(b, err)
|
||||||
Assert(b, uint(n) == chunk.Length, "invalid length: got %d, expected %d", n, chunk.Length)
|
Assert(b, uint(n) == chunk.Length, "invalid length: got %d, expected %d", n, chunk.Length)
|
||||||
|
|
||||||
_, err = key.Encrypt(buf2, buf)
|
_, err = crypto.Encrypt(key, buf2, buf)
|
||||||
OK(b, err)
|
OK(b, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,9 +55,8 @@ func BenchmarkChunkEncrypt(b *testing.B) {
|
||||||
data := Random(23, 10<<20) // 10MiB
|
data := Random(23, 10<<20) // 10MiB
|
||||||
rd := bytes.NewReader(data)
|
rd := bytes.NewReader(data)
|
||||||
|
|
||||||
be := SetupBackend(b)
|
s := SetupBackend(b)
|
||||||
defer TeardownBackend(b, be)
|
defer TeardownBackend(b, s)
|
||||||
key := SetupKey(b, be, "geheim")
|
|
||||||
|
|
||||||
buf := restic.GetChunkBuf("BenchmarkChunkEncrypt")
|
buf := restic.GetChunkBuf("BenchmarkChunkEncrypt")
|
||||||
buf2 := restic.GetChunkBuf("BenchmarkChunkEncrypt")
|
buf2 := restic.GetChunkBuf("BenchmarkChunkEncrypt")
|
||||||
|
@ -66,14 +65,14 @@ func BenchmarkChunkEncrypt(b *testing.B) {
|
||||||
b.SetBytes(int64(len(data)))
|
b.SetBytes(int64(len(data)))
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
benchmarkChunkEncrypt(b, buf, buf2, rd, key)
|
benchmarkChunkEncrypt(b, buf, buf2, rd, s.Key())
|
||||||
}
|
}
|
||||||
|
|
||||||
restic.FreeChunkBuf("BenchmarkChunkEncrypt", buf)
|
restic.FreeChunkBuf("BenchmarkChunkEncrypt", buf)
|
||||||
restic.FreeChunkBuf("BenchmarkChunkEncrypt", buf2)
|
restic.FreeChunkBuf("BenchmarkChunkEncrypt", buf2)
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *server.Key) {
|
func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key) {
|
||||||
ch := restic.GetChunker("BenchmarkChunkEncryptP")
|
ch := restic.GetChunker("BenchmarkChunkEncryptP")
|
||||||
rd.Seek(0, 0)
|
rd.Seek(0, 0)
|
||||||
ch.Reset(rd, testPol)
|
ch.Reset(rd, testPol)
|
||||||
|
@ -87,16 +86,15 @@ func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *server.Key)
|
||||||
// reduce length of chunkBuf
|
// reduce length of chunkBuf
|
||||||
buf = buf[:chunk.Length]
|
buf = buf[:chunk.Length]
|
||||||
io.ReadFull(chunk.Reader(rd), buf)
|
io.ReadFull(chunk.Reader(rd), buf)
|
||||||
key.Encrypt(buf, buf)
|
crypto.Encrypt(key, buf, buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
restic.FreeChunker("BenchmarkChunkEncryptP", ch)
|
restic.FreeChunker("BenchmarkChunkEncryptP", ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkChunkEncryptParallel(b *testing.B) {
|
func BenchmarkChunkEncryptParallel(b *testing.B) {
|
||||||
be := SetupBackend(b)
|
s := SetupBackend(b)
|
||||||
defer TeardownBackend(b, be)
|
defer TeardownBackend(b, s)
|
||||||
key := SetupKey(b, be, "geheim")
|
|
||||||
|
|
||||||
data := Random(23, 10<<20) // 10MiB
|
data := Random(23, 10<<20) // 10MiB
|
||||||
|
|
||||||
|
@ -108,7 +106,7 @@ func BenchmarkChunkEncryptParallel(b *testing.B) {
|
||||||
b.RunParallel(func(pb *testing.PB) {
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
rd := bytes.NewReader(data)
|
rd := bytes.NewReader(data)
|
||||||
benchmarkChunkEncryptP(pb, buf, rd, key)
|
benchmarkChunkEncryptP(pb, buf, rd, s.Key())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -118,8 +116,6 @@ func BenchmarkChunkEncryptParallel(b *testing.B) {
|
||||||
func archiveDirectory(b testing.TB) {
|
func archiveDirectory(b testing.TB) {
|
||||||
server := SetupBackend(b)
|
server := SetupBackend(b)
|
||||||
defer TeardownBackend(b, server)
|
defer TeardownBackend(b, server)
|
||||||
key := SetupKey(b, server, "geheim")
|
|
||||||
server.SetKey(key)
|
|
||||||
|
|
||||||
arch := restic.NewArchiver(server)
|
arch := restic.NewArchiver(server)
|
||||||
|
|
||||||
|
@ -154,8 +150,6 @@ func archiveWithDedup(t testing.TB) {
|
||||||
|
|
||||||
server := SetupBackend(t)
|
server := SetupBackend(t)
|
||||||
defer TeardownBackend(t, server)
|
defer TeardownBackend(t, server)
|
||||||
key := SetupKey(t, server, "geheim")
|
|
||||||
server.SetKey(key)
|
|
||||||
|
|
||||||
var cnt struct {
|
var cnt struct {
|
||||||
before, after, after2 struct {
|
before, after, after2 struct {
|
||||||
|
@ -220,8 +214,6 @@ func BenchmarkLoadTree(t *testing.B) {
|
||||||
|
|
||||||
s := SetupBackend(t)
|
s := SetupBackend(t)
|
||||||
defer TeardownBackend(t, s)
|
defer TeardownBackend(t, s)
|
||||||
key := SetupKey(t, s, "geheim")
|
|
||||||
s.SetKey(key)
|
|
||||||
|
|
||||||
// archive a few files
|
// archive a few files
|
||||||
arch := restic.NewArchiver(s)
|
arch := restic.NewArchiver(s)
|
||||||
|
|
|
@ -11,10 +11,7 @@ const (
|
||||||
Lock = "lock"
|
Lock = "lock"
|
||||||
Snapshot = "snapshot"
|
Snapshot = "snapshot"
|
||||||
Index = "index"
|
Index = "index"
|
||||||
)
|
Config = "config"
|
||||||
|
|
||||||
const (
|
|
||||||
Version = 1
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A Backend manages data stored somewhere.
|
// A Backend manages data stored somewhere.
|
||||||
|
@ -43,17 +40,9 @@ type Backend interface {
|
||||||
// Close the backend
|
// Close the backend
|
||||||
Close() error
|
Close() error
|
||||||
|
|
||||||
Identifier
|
|
||||||
Lister
|
Lister
|
||||||
}
|
}
|
||||||
|
|
||||||
type Identifier interface {
|
|
||||||
// ID returns a unique ID for a specific repository. This means restic can
|
|
||||||
// recognize repositories accessed via different methods (e.g. local file
|
|
||||||
// access and sftp).
|
|
||||||
ID() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Lister interface {
|
type Lister interface {
|
||||||
// List returns a channel that yields all names of blobs of type t in
|
// List returns a channel that yields all names of blobs of type t in
|
||||||
// lexicographic order. A goroutine is started for this. If the channel
|
// lexicographic order. A goroutine is started for this. If the channel
|
||||||
|
|
|
@ -1,9 +1,6 @@
|
||||||
package local
|
package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -11,7 +8,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/restic/restic/backend"
|
"github.com/restic/restic/backend"
|
||||||
)
|
)
|
||||||
|
@ -19,9 +15,7 @@ import (
|
||||||
var ErrWrongData = errors.New("wrong data returned by backend, checksum does not match")
|
var ErrWrongData = errors.New("wrong data returned by backend, checksum does not match")
|
||||||
|
|
||||||
type Local struct {
|
type Local struct {
|
||||||
p string
|
p string
|
||||||
ver uint
|
|
||||||
id string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open opens the local backend at dir.
|
// Open opens the local backend at dir.
|
||||||
|
@ -36,68 +30,19 @@ func Open(dir string) (*Local, error) {
|
||||||
filepath.Join(dir, backend.Paths.Temp),
|
filepath.Join(dir, backend.Paths.Temp),
|
||||||
}
|
}
|
||||||
|
|
||||||
// test if all necessary dirs and files are there
|
// test if all necessary dirs are there
|
||||||
for _, d := range items {
|
for _, d := range items {
|
||||||
if _, err := os.Stat(d); err != nil {
|
if _, err := os.Stat(d); err != nil {
|
||||||
return nil, fmt.Errorf("%s does not exist", d)
|
return nil, fmt.Errorf("%s does not exist", d)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// read version file
|
return &Local{p: dir}, nil
|
||||||
f, err := os.Open(filepath.Join(dir, backend.Paths.Version))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to read version file: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var version uint
|
|
||||||
n, err := fmt.Fscanf(f, "%d", &version)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if n != 1 {
|
|
||||||
return nil, errors.New("could not read version from file")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// check version
|
|
||||||
if version != backend.Version {
|
|
||||||
return nil, fmt.Errorf("wrong version %d", version)
|
|
||||||
}
|
|
||||||
|
|
||||||
// read ID
|
|
||||||
f, err = os.Open(filepath.Join(dir, backend.Paths.ID))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(f)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
id := strings.TrimSpace(string(buf))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Local{p: dir, ver: version, id: id}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates all the necessary files and directories for a new local
|
// Create creates all the necessary files and directories for a new local
|
||||||
// backend at dir.
|
// backend at dir. Afterwards a new config blob should be created.
|
||||||
func Create(dir string) (*Local, error) {
|
func Create(dir string) (*Local, error) {
|
||||||
versionFile := filepath.Join(dir, backend.Paths.Version)
|
|
||||||
idFile := filepath.Join(dir, backend.Paths.ID)
|
|
||||||
dirs := []string{
|
dirs := []string{
|
||||||
dir,
|
dir,
|
||||||
filepath.Join(dir, backend.Paths.Data),
|
filepath.Join(dir, backend.Paths.Data),
|
||||||
|
@ -108,15 +53,10 @@ func Create(dir string) (*Local, error) {
|
||||||
filepath.Join(dir, backend.Paths.Temp),
|
filepath.Join(dir, backend.Paths.Temp),
|
||||||
}
|
}
|
||||||
|
|
||||||
// test if files already exist
|
// test if config file already exists
|
||||||
_, err := os.Lstat(versionFile)
|
_, err := os.Lstat(backend.Paths.Config)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil, errors.New("version file already exists")
|
return nil, errors.New("config file already exists")
|
||||||
}
|
|
||||||
|
|
||||||
_, err = os.Lstat(idFile)
|
|
||||||
if err == nil {
|
|
||||||
return nil, errors.New("id file already exists")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// test if directories already exist
|
// test if directories already exist
|
||||||
|
@ -134,44 +74,6 @@ func Create(dir string) (*Local, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// create version file
|
|
||||||
f, err := os.Create(versionFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = fmt.Fprintf(f, "%d\n", backend.Version)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// create ID file
|
|
||||||
id := make([]byte, sha256.Size)
|
|
||||||
_, err = rand.Read(id)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err = os.Create(idFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = fmt.Fprintln(f, hex.EncodeToString(id))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// open backend
|
// open backend
|
||||||
return Open(dir)
|
return Open(dir)
|
||||||
}
|
}
|
||||||
|
@ -265,6 +167,10 @@ func (b *Local) Create() (backend.Blob, error) {
|
||||||
|
|
||||||
// Construct path for given Type and name.
|
// Construct path for given Type and name.
|
||||||
func filename(base string, t backend.Type, name string) string {
|
func filename(base string, t backend.Type, name string) string {
|
||||||
|
if t == backend.Config {
|
||||||
|
return filepath.Join(base, "config")
|
||||||
|
}
|
||||||
|
|
||||||
return filepath.Join(dirname(base, t, name), name)
|
return filepath.Join(dirname(base, t, name), name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,16 +282,6 @@ func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||||
return ch
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
// Version returns the version of this local backend.
|
|
||||||
func (b *Local) Version() uint {
|
|
||||||
return b.ver
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of this local backend.
|
|
||||||
func (b *Local) ID() string {
|
|
||||||
return b.id
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes the repository and all files.
|
// Delete removes the repository and all files.
|
||||||
func (b *Local) Delete() error { return os.RemoveAll(b.p) }
|
func (b *Local) Delete() error { return os.RemoveAll(b.p) }
|
||||||
|
|
||||||
|
|
|
@ -10,8 +10,7 @@ var Paths = struct {
|
||||||
Locks string
|
Locks string
|
||||||
Keys string
|
Keys string
|
||||||
Temp string
|
Temp string
|
||||||
Version string
|
Config string
|
||||||
ID string
|
|
||||||
}{
|
}{
|
||||||
"data",
|
"data",
|
||||||
"snapshots",
|
"snapshots",
|
||||||
|
@ -19,8 +18,7 @@ var Paths = struct {
|
||||||
"locks",
|
"locks",
|
||||||
"keys",
|
"keys",
|
||||||
"tmp",
|
"tmp",
|
||||||
"version",
|
"config",
|
||||||
"id",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default modes for file-based backends
|
// Default modes for file-based backends
|
||||||
|
|
|
@ -2,17 +2,14 @@ package sftp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/juju/errors"
|
"github.com/juju/errors"
|
||||||
"github.com/pkg/sftp"
|
"github.com/pkg/sftp"
|
||||||
|
@ -24,10 +21,8 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type SFTP struct {
|
type SFTP struct {
|
||||||
c *sftp.Client
|
c *sftp.Client
|
||||||
p string
|
p string
|
||||||
ver uint
|
|
||||||
id string
|
|
||||||
|
|
||||||
cmd *exec.Cmd
|
cmd *exec.Cmd
|
||||||
}
|
}
|
||||||
|
@ -81,8 +76,7 @@ func Open(dir string, program string, args ...string) (*SFTP, error) {
|
||||||
filepath.Join(dir, backend.Paths.Index),
|
filepath.Join(dir, backend.Paths.Index),
|
||||||
filepath.Join(dir, backend.Paths.Locks),
|
filepath.Join(dir, backend.Paths.Locks),
|
||||||
filepath.Join(dir, backend.Paths.Keys),
|
filepath.Join(dir, backend.Paths.Keys),
|
||||||
filepath.Join(dir, backend.Paths.Version),
|
filepath.Join(dir, backend.Paths.Temp),
|
||||||
filepath.Join(dir, backend.Paths.ID),
|
|
||||||
}
|
}
|
||||||
for _, d := range items {
|
for _, d := range items {
|
||||||
if _, err := sftp.c.Lstat(d); err != nil {
|
if _, err := sftp.c.Lstat(d); err != nil {
|
||||||
|
@ -90,64 +84,18 @@ func Open(dir string, program string, args ...string) (*SFTP, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// read version file
|
|
||||||
f, err := sftp.c.Open(filepath.Join(dir, backend.Paths.Version))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to read version file: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var version uint
|
|
||||||
n, err := fmt.Fscanf(f, "%d", &version)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if n != 1 {
|
|
||||||
return nil, errors.New("could not read version from file")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// check version
|
|
||||||
if version != backend.Version {
|
|
||||||
return nil, fmt.Errorf("wrong version %d", version)
|
|
||||||
}
|
|
||||||
|
|
||||||
// read ID
|
|
||||||
f, err = sftp.c.Open(filepath.Join(dir, backend.Paths.ID))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(f)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sftp.id = strings.TrimSpace(string(buf))
|
|
||||||
sftp.p = dir
|
sftp.p = dir
|
||||||
|
|
||||||
return sftp, nil
|
return sftp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates all the necessary files and directories for a new sftp
|
// Create creates all the necessary files and directories for a new sftp
|
||||||
// backend at dir.
|
// backend at dir. Afterwards a new config blob should be created.
|
||||||
func Create(dir string, program string, args ...string) (*SFTP, error) {
|
func Create(dir string, program string, args ...string) (*SFTP, error) {
|
||||||
sftp, err := startClient(program, args...)
|
sftp, err := startClient(program, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
versionFile := filepath.Join(dir, backend.Paths.Version)
|
|
||||||
idFile := filepath.Join(dir, backend.Paths.ID)
|
|
||||||
dirs := []string{
|
dirs := []string{
|
||||||
dir,
|
dir,
|
||||||
filepath.Join(dir, backend.Paths.Data),
|
filepath.Join(dir, backend.Paths.Data),
|
||||||
|
@ -158,15 +106,10 @@ func Create(dir string, program string, args ...string) (*SFTP, error) {
|
||||||
filepath.Join(dir, backend.Paths.Temp),
|
filepath.Join(dir, backend.Paths.Temp),
|
||||||
}
|
}
|
||||||
|
|
||||||
// test if files already exist
|
// test if config file already exists
|
||||||
_, err = sftp.c.Lstat(versionFile)
|
_, err = sftp.c.Lstat(backend.Paths.Config)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil, errors.New("version file already exists")
|
return nil, errors.New("config file already exists")
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sftp.c.Lstat(idFile)
|
|
||||||
if err == nil {
|
|
||||||
return nil, errors.New("id file already exists")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// test if directories already exist
|
// test if directories already exist
|
||||||
|
@ -184,44 +127,6 @@ func Create(dir string, program string, args ...string) (*SFTP, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// create version file
|
|
||||||
f, err := sftp.c.Create(versionFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = fmt.Fprintf(f, "%d\n", backend.Version)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// create ID file
|
|
||||||
id := make([]byte, sha256.Size)
|
|
||||||
_, err = rand.Read(id)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err = sftp.c.Create(idFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = fmt.Fprintln(f, hex.EncodeToString(id))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = sftp.c.Close()
|
err = sftp.c.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -387,6 +292,10 @@ func (r *SFTP) Create() (backend.Blob, error) {
|
||||||
|
|
||||||
// Construct path for given backend.Type and name.
|
// Construct path for given backend.Type and name.
|
||||||
func (r *SFTP) filename(t backend.Type, name string) string {
|
func (r *SFTP) filename(t backend.Type, name string) string {
|
||||||
|
if t == backend.Config {
|
||||||
|
return filepath.Join(r.p, "config")
|
||||||
|
}
|
||||||
|
|
||||||
return filepath.Join(r.dirname(t, name), name)
|
return filepath.Join(r.dirname(t, name), name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -540,16 +449,6 @@ func (r *SFTP) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Version returns the version of this local backend.
|
|
||||||
func (r *SFTP) Version() uint {
|
|
||||||
return r.ver
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of this local backend.
|
|
||||||
func (r *SFTP) ID() string {
|
|
||||||
return r.id
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the sftp connection and terminates the underlying command.
|
// Close closes the sftp connection and terminates the underlying command.
|
||||||
func (s *SFTP) Close() error {
|
func (s *SFTP) Close() error {
|
||||||
if s == nil {
|
if s == nil {
|
||||||
|
|
4
cache.go
4
cache.go
|
@ -18,13 +18,13 @@ type Cache struct {
|
||||||
base string
|
base string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCache(be backend.Identifier) (*Cache, error) {
|
func NewCache(s *server.Server) (*Cache, error) {
|
||||||
cacheDir, err := getCacheDir()
|
cacheDir, err := getCacheDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
basedir := filepath.Join(cacheDir, be.ID())
|
basedir := filepath.Join(cacheDir, s.Config.ID)
|
||||||
debug.Log("Cache.New", "opened cache at %v", basedir)
|
debug.Log("Cache.New", "opened cache at %v", basedir)
|
||||||
|
|
||||||
return &Cache{base: basedir}, nil
|
return &Cache{base: basedir}, nil
|
||||||
|
|
|
@ -10,8 +10,6 @@ import (
|
||||||
func TestCache(t *testing.T) {
|
func TestCache(t *testing.T) {
|
||||||
server := SetupBackend(t)
|
server := SetupBackend(t)
|
||||||
defer TeardownBackend(t, server)
|
defer TeardownBackend(t, server)
|
||||||
key := SetupKey(t, server, "geheim")
|
|
||||||
server.SetKey(key)
|
|
||||||
|
|
||||||
_, err := restic.NewCache(server)
|
_, err := restic.NewCache(server)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
|
@ -276,6 +276,22 @@ func TestPolIrreducible(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkPolIrreducible(b *testing.B) {
|
||||||
|
// find first irreducible polynomial
|
||||||
|
var pol chunker.Pol
|
||||||
|
for _, test := range polIrredTests {
|
||||||
|
if test.irred {
|
||||||
|
pol = test.f
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
Assert(b, pol.Irreducible(),
|
||||||
|
"Irreducibility test for Polynomial %v failed", pol)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var polGCDTests = []struct {
|
var polGCDTests = []struct {
|
||||||
f1 chunker.Pol
|
f1 chunker.Pol
|
||||||
f2 chunker.Pol
|
f2 chunker.Pol
|
||||||
|
|
|
@ -27,11 +27,11 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cmd CmdCat) Usage() string {
|
func (cmd CmdCat) Usage() string {
|
||||||
return "[pack|blob|tree|snapshot|key|masterkey|lock] ID"
|
return "[pack|blob|tree|snapshot|key|masterkey|config|lock] ID"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cmd CmdCat) Execute(args []string) error {
|
func (cmd CmdCat) Execute(args []string) error {
|
||||||
if len(args) < 1 || (args[0] != "masterkey" && len(args) != 2) {
|
if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) {
|
||||||
return fmt.Errorf("type or ID not specified, Usage: %s", cmd.Usage())
|
return fmt.Errorf("type or ID not specified, Usage: %s", cmd.Usage())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||||
tpe := args[0]
|
tpe := args[0]
|
||||||
|
|
||||||
var id backend.ID
|
var id backend.ID
|
||||||
if tpe != "masterkey" {
|
if tpe != "masterkey" && tpe != "config" {
|
||||||
id, err = backend.ParseID(args[1])
|
id, err = backend.ParseID(args[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
id = nil
|
id = nil
|
||||||
|
@ -67,6 +67,14 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||||
|
|
||||||
// handle all types that don't need an index
|
// handle all types that don't need an index
|
||||||
switch tpe {
|
switch tpe {
|
||||||
|
case "config":
|
||||||
|
buf, err := json.MarshalIndent(s.Config, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(string(buf))
|
||||||
|
return nil
|
||||||
case "index":
|
case "index":
|
||||||
buf, err := s.Load(backend.Index, id)
|
buf, err := s.Load(backend.Index, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -78,7 +86,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||||
|
|
||||||
case "snapshot":
|
case "snapshot":
|
||||||
sn := &restic.Snapshot{}
|
sn := &restic.Snapshot{}
|
||||||
err = s.LoadJSONEncrypted(backend.Snapshot, id, sn)
|
err = s.LoadJSONUnpacked(backend.Snapshot, id, sn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -113,7 +121,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||||
fmt.Println(string(buf))
|
fmt.Println(string(buf))
|
||||||
return nil
|
return nil
|
||||||
case "masterkey":
|
case "masterkey":
|
||||||
buf, err := json.MarshalIndent(s.Key().Master(), "", " ")
|
buf, err := json.MarshalIndent(s.Key(), "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,7 @@ func listKeys(s *server.Server) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var current string
|
var current string
|
||||||
if name == s.Key().Name() {
|
if name == s.KeyName() {
|
||||||
current = "*"
|
current = "*"
|
||||||
} else {
|
} else {
|
||||||
current = " "
|
current = " "
|
||||||
|
@ -75,7 +75,7 @@ func addKey(s *server.Server) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func deleteKey(s *server.Server, name string) error {
|
func deleteKey(s *server.Server, name string) error {
|
||||||
if name == s.Key().Name() {
|
if name == s.KeyName() {
|
||||||
return errors.New("refusing to remove key currently used to access repository")
|
return errors.New("refusing to remove key currently used to access repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ func changePassword(s *server.Server) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove old key
|
// remove old key
|
||||||
err = s.Remove(backend.Key, s.Key().Name())
|
err = s.Remove(backend.Key, s.KeyName())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,14 +74,13 @@ func (cmd CmdInit) Execute(args []string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
s := server.NewServer(be)
|
s := server.NewServer(be)
|
||||||
|
err = s.Init(pw)
|
||||||
_, err = server.CreateKey(s, pw)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "creating key in backend at %s failed: %v\n", opts.Repo, err)
|
fmt.Fprintf(os.Stderr, "creating key in backend at %s failed: %v\n", opts.Repo, err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("created restic backend %v at %s\n", s.ID()[:10], opts.Repo)
|
fmt.Printf("created restic backend %v at %s\n", s.Config.ID[:10], opts.Repo)
|
||||||
|
|
||||||
fmt.Println("Please note that knowledge of your password is required to access the repository.")
|
fmt.Println("Please note that knowledge of your password is required to access the repository.")
|
||||||
fmt.Println("Losing your password means that your data is irrecoverably lost.")
|
fmt.Println("Losing your password means that your data is irrecoverably lost.")
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/restic/restic/chunker"
|
|
||||||
"golang.org/x/crypto/poly1305"
|
"golang.org/x/crypto/poly1305"
|
||||||
"golang.org/x/crypto/scrypt"
|
"golang.org/x/crypto/scrypt"
|
||||||
)
|
)
|
||||||
|
@ -35,12 +34,10 @@ var (
|
||||||
|
|
||||||
// Key holds encryption and message authentication keys for a repository. It is stored
|
// Key holds encryption and message authentication keys for a repository. It is stored
|
||||||
// encrypted and authenticated as a JSON data structure in the Data field of the Key
|
// encrypted and authenticated as a JSON data structure in the Data field of the Key
|
||||||
// structure. For the master key, the secret random polynomial used for content
|
// structure.
|
||||||
// defined chunking is included.
|
|
||||||
type Key struct {
|
type Key struct {
|
||||||
MAC MACKey `json:"mac"`
|
MAC MACKey `json:"mac"`
|
||||||
Encrypt EncryptionKey `json:"encrypt"`
|
Encrypt EncryptionKey `json:"encrypt"`
|
||||||
ChunkerPolynomial chunker.Pol `json:"chunker_polynomial,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type EncryptionKey [32]byte
|
type EncryptionKey [32]byte
|
||||||
|
@ -340,9 +337,5 @@ func KDF(N, R, P int, salt []byte, password string) (*Key, error) {
|
||||||
|
|
||||||
// Valid tests if the key is valid.
|
// Valid tests if the key is valid.
|
||||||
func (k *Key) Valid() bool {
|
func (k *Key) Valid() bool {
|
||||||
if k.ChunkerPolynomial != 0 && !k.ChunkerPolynomial.Irreducible() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return k.Encrypt.Valid() && k.MAC.Valid()
|
return k.Encrypt.Valid() && k.MAC.Valid()
|
||||||
}
|
}
|
||||||
|
|
119
doc/Design.md
119
doc/Design.md
|
@ -21,49 +21,64 @@ been backed up at some point in time. The state here means the content and meta
|
||||||
data like the name and modification time for the file or the directory and its
|
data like the name and modification time for the file or the directory and its
|
||||||
contents.
|
contents.
|
||||||
|
|
||||||
|
*Storage ID*: A storage ID is the SHA-256 hash of the content stored in the
|
||||||
|
repository. This ID is needed in order to load the file from the repository.
|
||||||
|
|
||||||
Repository Format
|
Repository Format
|
||||||
=================
|
=================
|
||||||
|
|
||||||
All data is stored in a restic repository. A repository is able to store data
|
All data is stored in a restic repository. A repository is able to store data
|
||||||
of several different types, which can later be requested based on an ID. The ID
|
of several different types, which can later be requested based on an ID. This
|
||||||
is the hash (SHA-256) of the content of a file. All files in a repository are
|
so-called "storage ID" is the SHA-256 hash of the content of a file. All files
|
||||||
only written once and never modified afterwards. This allows accessing and even
|
in a repository are only written once and never modified afterwards. This
|
||||||
writing to the repository with multiple clients in parallel. Only the delete
|
allows accessing and even writing to the repository with multiple clients in
|
||||||
operation changes data in the repository.
|
parallel. Only the delete operation removes data from the repository.
|
||||||
|
|
||||||
At the time of writing, the only implemented repository type is based on
|
At the time of writing, the only implemented repository type is based on
|
||||||
directories and files. Such repositories can be accessed locally on the same
|
directories and files. Such repositories can be accessed locally on the same
|
||||||
system or via the integrated SFTP client. The directory layout is the same for
|
system or via the integrated SFTP client. The directory layout is the same for
|
||||||
both access methods. This repository type is described in the following.
|
both access methods. This repository type is described in the following.
|
||||||
|
|
||||||
Repositories consists of several directories and a file called `version`. This
|
Repositories consist of several directories and a file called `config`. For
|
||||||
file contains the version number of the repository. At the moment, this file
|
all other files stored in the repository, the name for the file is the lower
|
||||||
is expected to hold the string `1`, with an optional newline character.
|
case hexadecimal representation of the storage ID, which is the SHA-256 hash of
|
||||||
Additionally there is a file named `id` which contains 32 random bytes, encoded
|
the file's contents. This allows easily checking all files for accidental
|
||||||
in hexadecimal. This uniquely identifies the repository, regardless if it is
|
modifications like disk read errors by simply running the program `sha256sum`
|
||||||
accessed via SFTP or locally.
|
and comparing its output to the file name. If the prefix of a filename is
|
||||||
|
unique amongst all the other files in the same directory, the prefix may be
|
||||||
|
used instead of the complete filename.
|
||||||
|
|
||||||
For all other files stored in the repository, the name for the file is the
|
Apart from the files stored below the `keys` directory, all files are encrypted
|
||||||
lower case hexadecimal representation of the SHA-256 hash of the file's
|
with AES-256 in counter mode (CTR). The integrity of the encrypted data is
|
||||||
contents. This allows easily checking all files for accidental modifications
|
secured by a Poly1305-AES message authentication code (sometimes also referred
|
||||||
like disk read errors by simply running the program `sha256sum` and comparing
|
to as a "signature").
|
||||||
its output to the file name. If the prefix of a filename is unique amongst all
|
|
||||||
the other files in the same directory, the prefix may be used instead of the
|
|
||||||
complete filename.
|
|
||||||
|
|
||||||
Apart from the files `version`, `id` and the files stored below the `keys`
|
|
||||||
directory, all files are encrypted with AES-256 in counter mode (CTR). The
|
|
||||||
integrity of the encrypted data is secured by a Poly1305-AES message
|
|
||||||
authentication code (sometimes also referred to as a "signature").
|
|
||||||
|
|
||||||
In the first 16 bytes of each encrypted file the initialisation vector (IV) is
|
In the first 16 bytes of each encrypted file the initialisation vector (IV) is
|
||||||
stored. It is followed by the encrypted data and completed by the 16 byte
|
stored. It is followed by the encrypted data and completed by the 16 byte
|
||||||
MAC. The format is: `IV || CIPHERTEXT || MAC`. The complete encryption
|
MAC. The format is: `IV || CIPHERTEXT || MAC`. The complete encryption
|
||||||
overhead is 32 byte. For each file, a new random IV is selected.
|
overhead is 32 bytes. For each file, a new random IV is selected.
|
||||||
|
|
||||||
The basic layout of a sample restic repository is shown below:
|
The file `config` is encrypted this way and contains a JSON document like the
|
||||||
|
following:
|
||||||
|
|
||||||
|
{
|
||||||
|
"version": 1,
|
||||||
|
"id": "5956a3f67a6230d4a92cefb29529f10196c7d92582ec305fd71ff6d331d6271b",
|
||||||
|
"chunker_polynomial": "25b468838dcb75"
|
||||||
|
}
|
||||||
|
|
||||||
|
After decryption, restic first checks that the version field contains a version
|
||||||
|
number that it understands, otherwise it aborts. At the moment, the version is
|
||||||
|
expected to be 1. The field `id` holds a unique ID which consists of 32
|
||||||
|
random bytes, encoded in hexadecimal. This uniquely identifies the repository,
|
||||||
|
regardless if it is accessed via SFTP or locally. The field
|
||||||
|
`chunker_polynomial` contains a parameter that is used for splitting large
|
||||||
|
files into smaller chunks (see below).
|
||||||
|
|
||||||
|
The basic layout of a sample restic repository is shown here:
|
||||||
|
|
||||||
/tmp/restic-repo
|
/tmp/restic-repo
|
||||||
|
├── config
|
||||||
├── data
|
├── data
|
||||||
│ ├── 21
|
│ ├── 21
|
||||||
│ │ └── 2159dd48f8a24f33c307b750592773f8b71ff8d11452132a7b2e2a6a01611be1
|
│ │ └── 2159dd48f8a24f33c307b750592773f8b71ff8d11452132a7b2e2a6a01611be1
|
||||||
|
@ -74,7 +89,6 @@ The basic layout of a sample restic repository is shown below:
|
||||||
│ ├── 73
|
│ ├── 73
|
||||||
│ │ └── 73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c
|
│ │ └── 73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c
|
||||||
│ [...]
|
│ [...]
|
||||||
├── id
|
|
||||||
├── index
|
├── index
|
||||||
│ ├── c38f5fb68307c6a3e3aa945d556e325dc38f5fb68307c6a3e3aa945d556e325d
|
│ ├── c38f5fb68307c6a3e3aa945d556e325dc38f5fb68307c6a3e3aa945d556e325d
|
||||||
│ └── ca171b1b7394d90d330b265d90f506f9984043b342525f019788f97e745c71fd
|
│ └── ca171b1b7394d90d330b265d90f506f9984043b342525f019788f97e745c71fd
|
||||||
|
@ -83,8 +97,7 @@ The basic layout of a sample restic repository is shown below:
|
||||||
├── locks
|
├── locks
|
||||||
├── snapshots
|
├── snapshots
|
||||||
│ └── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec
|
│ └── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec
|
||||||
├── tmp
|
└── tmp
|
||||||
└── version
|
|
||||||
|
|
||||||
A repository can be initialized with the `restic init` command, e.g.:
|
A repository can be initialized with the `restic init` command, e.g.:
|
||||||
|
|
||||||
|
@ -93,21 +106,21 @@ A repository can be initialized with the `restic init` command, e.g.:
|
||||||
Pack Format
|
Pack Format
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
All files in the repository except Key and Data files just contain raw data,
|
All files in the repository except Key and Pack files just contain raw data,
|
||||||
stored as `IV || Ciphertext || MAC`. Data files may contain one or more Blobs
|
stored as `IV || Ciphertext || MAC`. Pack files may contain one or more Blobs
|
||||||
of data. The format is described in the following.
|
of data.
|
||||||
|
|
||||||
The Pack's structure is as follows:
|
A Pack's structure is as follows:
|
||||||
|
|
||||||
EncryptedBlob1 || ... || EncryptedBlobN || EncryptedHeader || Header_Length
|
EncryptedBlob1 || ... || EncryptedBlobN || EncryptedHeader || Header_Length
|
||||||
|
|
||||||
At the end of the Pack is a header, which describes the content. The header is
|
At the end of the Pack file is a header, which describes the content. The
|
||||||
encrypted and authenticated. `Header_Length` is the length of the encrypted header
|
header is encrypted and authenticated. `Header_Length` is the length of the
|
||||||
encoded as a four byte integer in little-endian encoding. Placing the header at
|
encrypted header encoded as a four byte integer in little-endian encoding.
|
||||||
the end of a file allows writing the blobs in a continuous stream as soon as
|
Placing the header at the end of a file allows writing the blobs in a
|
||||||
they are read during the backup phase. This reduces code complexity and avoids
|
continuous stream as soon as they are read during the backup phase. This
|
||||||
having to re-write a file once the pack is complete and the content and length
|
reduces code complexity and avoids having to re-write a file once the pack is
|
||||||
of the header is known.
|
complete and the content and length of the header is known.
|
||||||
|
|
||||||
All the blobs (`EncryptedBlob1`, `EncryptedBlobN` etc.) are authenticated and
|
All the blobs (`EncryptedBlob1`, `EncryptedBlobN` etc.) are authenticated and
|
||||||
encrypted independently. This enables repository reorganisation without having
|
encrypted independently. This enables repository reorganisation without having
|
||||||
|
@ -178,7 +191,7 @@ listed afterwards.
|
||||||
|
|
||||||
There may be an arbitrary number of index files, containing information on
|
There may be an arbitrary number of index files, containing information on
|
||||||
non-disjoint sets of Packs. The number of packs described in a single file is
|
non-disjoint sets of Packs. The number of packs described in a single file is
|
||||||
chosen so that the file size is kep below 8 MiB.
|
chosen so that the file size is kept below 8 MiB.
|
||||||
|
|
||||||
Keys, Encryption and MAC
|
Keys, Encryption and MAC
|
||||||
------------------------
|
------------------------
|
||||||
|
@ -230,9 +243,8 @@ tampered with, the computed MAC will not match the last 16 bytes of the data,
|
||||||
and restic exits with an error. Otherwise, the data is decrypted with the
|
and restic exits with an error. Otherwise, the data is decrypted with the
|
||||||
encryption key derived from `scrypt`. This yields a JSON document which
|
encryption key derived from `scrypt`. This yields a JSON document which
|
||||||
contains the master encryption and message authentication keys for this
|
contains the master encryption and message authentication keys for this
|
||||||
repository (encoded in Base64) and the polynomial that is used for CDC. The
|
repository (encoded in Base64). The command `restic cat masterkey` can be used
|
||||||
command `restic cat masterkey` can be used as follows to decrypt and
|
as follows to decrypt and pretty-print the master key:
|
||||||
pretty-print the master key:
|
|
||||||
|
|
||||||
$ restic -r /tmp/restic-repo cat masterkey
|
$ restic -r /tmp/restic-repo cat masterkey
|
||||||
{
|
{
|
||||||
|
@ -241,7 +253,6 @@ pretty-print the master key:
|
||||||
"r": "E9eEDnSJZgqwTOkDtOp+Dw=="
|
"r": "E9eEDnSJZgqwTOkDtOp+Dw=="
|
||||||
},
|
},
|
||||||
"encrypt": "UQCqa0lKZ94PygPxMRqkePTZnHRYh1k1pX2k2lM2v3Q=",
|
"encrypt": "UQCqa0lKZ94PygPxMRqkePTZnHRYh1k1pX2k2lM2v3Q=",
|
||||||
"chunker_polynomial": "2f0797d9c2363f"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
All data in the repository is encrypted and authenticated with these master keys.
|
All data in the repository is encrypted and authenticated with these master keys.
|
||||||
|
@ -257,9 +268,8 @@ Snapshots
|
||||||
A snapshots represents a directory with all files and sub-directories at a
|
A snapshots represents a directory with all files and sub-directories at a
|
||||||
given point in time. For each backup that is made, a new snapshot is created. A
|
given point in time. For each backup that is made, a new snapshot is created. A
|
||||||
snapshot is a JSON document that is stored in an encrypted file below the
|
snapshot is a JSON document that is stored in an encrypted file below the
|
||||||
directory `snapshots` in the repository. The filename is the SHA-256 hash of
|
directory `snapshots` in the repository. The filename is the storage ID. This
|
||||||
the (encrypted) contents. This string is unique and used within restic to
|
string is unique and used within restic to uniquely identify a snapshot.
|
||||||
uniquely identify a snapshot.
|
|
||||||
|
|
||||||
The command `restic cat snapshot` can be used as follows to decrypt and
|
The command `restic cat snapshot` can be used as follows to decrypt and
|
||||||
pretty-print the contents of a snapshot file:
|
pretty-print the contents of a snapshot file:
|
||||||
|
@ -284,9 +294,9 @@ hash. Before saving, each file is split into variable sized Blobs of data. The
|
||||||
SHA-256 hashes of all Blobs are saved in an ordered list which then represents
|
SHA-256 hashes of all Blobs are saved in an ordered list which then represents
|
||||||
the content of the file.
|
the content of the file.
|
||||||
|
|
||||||
In order to relate these plain text hashes to the actual encrypted storage
|
In order to relate these plaintext hashes to the actual location within a Pack
|
||||||
hashes (which vary due to random IVs), an index is used. If the index is not
|
file , an index is used. If the index is not available, the header of all data
|
||||||
available, the header of all data Blobs can be read.
|
Blobs can be read.
|
||||||
|
|
||||||
Trees and Data
|
Trees and Data
|
||||||
--------------
|
--------------
|
||||||
|
@ -355,9 +365,9 @@ This tree contains a file entry. This time, the `subtree` field is not present
|
||||||
and the `content` field contains a list with one plain text SHA-256 hash.
|
and the `content` field contains a list with one plain text SHA-256 hash.
|
||||||
|
|
||||||
The command `restic cat data` can be used to extract and decrypt data given a
|
The command `restic cat data` can be used to extract and decrypt data given a
|
||||||
storage hash, e.g. for the data mentioned above:
|
plaintext ID, e.g. for the data mentioned above:
|
||||||
|
|
||||||
$ restic -r /tmp/restic-repo cat blob 00634c46e5f7c055c341acd1201cf8289cabe769f991d6e350f8cd8ce2a52ac3 | sha256sum
|
$ restic -r /tmp/restic-repo cat blob 50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d | sha256sum
|
||||||
enter password for repository:
|
enter password for repository:
|
||||||
50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d -
|
50f77b3b4291e8411a027b9f9b9e64658181cc676ce6ba9958b95f268cb1109d -
|
||||||
|
|
||||||
|
@ -372,8 +382,9 @@ For creating a backup, restic scans the source directory for all files,
|
||||||
sub-directories and other entries. The data from each file is split into
|
sub-directories and other entries. The data from each file is split into
|
||||||
variable length Blobs cut at offsets defined by a sliding window of 64 byte.
|
variable length Blobs cut at offsets defined by a sliding window of 64 byte.
|
||||||
The implementation uses Rabin Fingerprints for implementing this Content
|
The implementation uses Rabin Fingerprints for implementing this Content
|
||||||
Defined Chunking (CDC). An irreducible polynomial is selected at random when a
|
Defined Chunking (CDC). An irreducible polynomial is selected at random and
|
||||||
repository is initialized.
|
saved in the file `config` when a repository is initialized, so that watermark
|
||||||
|
attacks are much harder.
|
||||||
|
|
||||||
Files smaller than 512 KiB are not split, Blobs are of 512 KiB to 8 MiB in
|
Files smaller than 512 KiB are not split, Blobs are of 512 KiB to 8 MiB in
|
||||||
size. The implementation aims for 1 MiB Blob size on average.
|
size. The implementation aims for 1 MiB Blob size on average.
|
||||||
|
|
|
@ -6,15 +6,12 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/restic/restic/backend"
|
"github.com/restic/restic/backend"
|
||||||
"github.com/restic/restic/chunker"
|
|
||||||
"github.com/restic/restic/crypto"
|
"github.com/restic/restic/crypto"
|
||||||
"github.com/restic/restic/debug"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -50,9 +47,9 @@ type Key struct {
|
||||||
name string
|
name string
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateKey initializes a master key in the given backend and encrypts it with
|
// createMasterKey creates a new master key in the given backend and encrypts
|
||||||
// the password.
|
// it with the password.
|
||||||
func CreateKey(s *Server, password string) (*Key, error) {
|
func createMasterKey(s *Server, password string) (*Key, error) {
|
||||||
return AddKey(s, password, nil)
|
return AddKey(s, password, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,13 +89,6 @@ func OpenKey(s *Server, name string, password string) (*Key, error) {
|
||||||
return nil, errors.New("Invalid key for repository")
|
return nil, errors.New("Invalid key for repository")
|
||||||
}
|
}
|
||||||
|
|
||||||
// test if the chunker polynomial is present in the master key
|
|
||||||
if k.master.ChunkerPolynomial == 0 {
|
|
||||||
return nil, errors.New("Polynomial for content defined chunking is zero")
|
|
||||||
}
|
|
||||||
|
|
||||||
debug.Log("OpenKey", "Master keys loaded, polynomial %v", k.master.ChunkerPolynomial)
|
|
||||||
|
|
||||||
return k, nil
|
return k, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,7 +131,7 @@ func LoadKey(s *Server, name string) (*Key, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddKey adds a new key to an already existing repository.
|
// AddKey adds a new key to an already existing repository.
|
||||||
func AddKey(s *Server, password string, template *Key) (*Key, error) {
|
func AddKey(s *Server, password string, template *crypto.Key) (*Key, error) {
|
||||||
// fill meta data about key
|
// fill meta data about key
|
||||||
newkey := &Key{
|
newkey := &Key{
|
||||||
Created: time.Now(),
|
Created: time.Now(),
|
||||||
|
@ -177,17 +167,9 @@ func AddKey(s *Server, password string, template *Key) (*Key, error) {
|
||||||
if template == nil {
|
if template == nil {
|
||||||
// generate new random master keys
|
// generate new random master keys
|
||||||
newkey.master = crypto.NewRandomKey()
|
newkey.master = crypto.NewRandomKey()
|
||||||
// generate random polynomial for cdc
|
|
||||||
p, err := chunker.RandomPolynomial()
|
|
||||||
if err != nil {
|
|
||||||
debug.Log("AddKey", "error generating new polynomial for cdc: %v", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
debug.Log("AddKey", "generated new polynomial for cdc: %v", p)
|
|
||||||
newkey.master.ChunkerPolynomial = p
|
|
||||||
} else {
|
} else {
|
||||||
// copy master keys from old key
|
// copy master keys from old key
|
||||||
newkey.master = template.master
|
newkey.master = template
|
||||||
}
|
}
|
||||||
|
|
||||||
// encrypt master keys (as json) with user key
|
// encrypt master keys (as json) with user key
|
||||||
|
@ -229,46 +211,6 @@ func AddKey(s *Server, password string, template *Key) (*Key, error) {
|
||||||
return newkey, nil
|
return newkey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt encrypts and authenticates data with the master key. Stored in
|
|
||||||
// ciphertext is IV || Ciphertext || MAC. Returns the ciphertext, which is
|
|
||||||
// extended if necessary.
|
|
||||||
func (k *Key) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
|
|
||||||
return crypto.Encrypt(k.master, ciphertext, plaintext)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncryptTo encrypts and authenticates data with the master key. The returned
|
|
||||||
// io.Writer writes IV || Ciphertext || MAC.
|
|
||||||
func (k *Key) EncryptTo(wr io.Writer) io.WriteCloser {
|
|
||||||
return crypto.EncryptTo(k.master, wr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt verifes and decrypts the ciphertext with the master key. Ciphertext
|
|
||||||
// must be in the form IV || Ciphertext || MAC.
|
|
||||||
func (k *Key) Decrypt(plaintext, ciphertext []byte) ([]byte, error) {
|
|
||||||
return crypto.Decrypt(k.master, plaintext, ciphertext)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecryptFrom verifies and decrypts the ciphertext read from rd and makes it
|
|
||||||
// available on the returned Reader. Ciphertext must be in the form IV ||
|
|
||||||
// Ciphertext || MAC. In order to correctly verify the ciphertext, rd is
|
|
||||||
// drained, locally buffered and made available on the returned Reader
|
|
||||||
// afterwards. If a MAC verification failure is observed, it is returned
|
|
||||||
// immediately.
|
|
||||||
func (k *Key) DecryptFrom(rd io.Reader) (io.ReadCloser, error) {
|
|
||||||
return crypto.DecryptFrom(k.master, rd)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Master returns the master keys for this repository. Only included for
|
|
||||||
// debug purposes.
|
|
||||||
func (k *Key) Master() *crypto.Key {
|
|
||||||
return k.master
|
|
||||||
}
|
|
||||||
|
|
||||||
// User returns the user keys for this key. Only included for debug purposes.
|
|
||||||
func (k *Key) User() *crypto.Key {
|
|
||||||
return k.user
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *Key) String() string {
|
func (k *Key) String() string {
|
||||||
if k == nil {
|
if k == nil {
|
||||||
return "<Key nil>"
|
return "<Key nil>"
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
package server_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
. "github.com/restic/restic/test"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRepo(t *testing.T) {
|
|
||||||
s := SetupBackend(t)
|
|
||||||
defer TeardownBackend(t, s)
|
|
||||||
_ = SetupKey(t, s, TestPassword)
|
|
||||||
}
|
|
130
server/server.go
130
server/server.go
|
@ -2,7 +2,9 @@ package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -12,14 +14,25 @@ import (
|
||||||
|
|
||||||
"github.com/restic/restic/backend"
|
"github.com/restic/restic/backend"
|
||||||
"github.com/restic/restic/chunker"
|
"github.com/restic/restic/chunker"
|
||||||
|
"github.com/restic/restic/crypto"
|
||||||
"github.com/restic/restic/debug"
|
"github.com/restic/restic/debug"
|
||||||
"github.com/restic/restic/pack"
|
"github.com/restic/restic/pack"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Config contains the configuration for a repository.
|
||||||
|
type Config struct {
|
||||||
|
Version uint `json:"version"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Server is used to access a repository in a backend.
|
||||||
type Server struct {
|
type Server struct {
|
||||||
be backend.Backend
|
be backend.Backend
|
||||||
key *Key
|
Config Config
|
||||||
idx *Index
|
key *crypto.Key
|
||||||
|
keyName string
|
||||||
|
idx *Index
|
||||||
|
|
||||||
pm sync.Mutex
|
pm sync.Mutex
|
||||||
packs []*pack.Packer
|
packs []*pack.Packer
|
||||||
|
@ -32,15 +45,6 @@ func NewServer(be backend.Backend) *Server {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) SetKey(k *Key) {
|
|
||||||
s.key = k
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChunkerPolynomial returns the secret polynomial used for content defined chunking.
|
|
||||||
func (s *Server) ChunkerPolynomial() chunker.Pol {
|
|
||||||
return chunker.Pol(s.key.Master().ChunkerPolynomial)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find loads the list of all blobs of type t and searches for names which start
|
// Find loads the list of all blobs of type t and searches for names which start
|
||||||
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
|
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
|
||||||
// more than one is found, nil and ErrMultipleIDMatches is returned.
|
// more than one is found, nil and ErrMultipleIDMatches is returned.
|
||||||
|
@ -145,9 +149,9 @@ func (s *Server) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
|
||||||
return plain, nil
|
return plain, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadJSONEncrypted decrypts the data and afterwards calls json.Unmarshal on
|
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
|
||||||
// the item.
|
// the item.
|
||||||
func (s *Server) LoadJSONEncrypted(t backend.Type, id backend.ID, item interface{}) error {
|
func (s *Server) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) error {
|
||||||
// load blob from backend
|
// load blob from backend
|
||||||
rd, err := s.be.Get(t, id.String())
|
rd, err := s.be.Get(t, id.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -156,7 +160,7 @@ func (s *Server) LoadJSONEncrypted(t backend.Type, id backend.ID, item interface
|
||||||
defer rd.Close()
|
defer rd.Close()
|
||||||
|
|
||||||
// decrypt
|
// decrypt
|
||||||
decryptRd, err := s.key.DecryptFrom(rd)
|
decryptRd, err := crypto.DecryptFrom(s.key, rd)
|
||||||
defer decryptRd.Close()
|
defer decryptRd.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -189,7 +193,7 @@ func (s *Server) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{})
|
||||||
defer rd.Close()
|
defer rd.Close()
|
||||||
|
|
||||||
// decrypt
|
// decrypt
|
||||||
decryptRd, err := s.key.DecryptFrom(rd)
|
decryptRd, err := crypto.DecryptFrom(s.key, rd)
|
||||||
defer decryptRd.Close()
|
defer decryptRd.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -234,7 +238,7 @@ func (s *Server) findPacker(size uint) (*pack.Packer, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
debug.Log("Server.findPacker", "create new pack %p", blob)
|
debug.Log("Server.findPacker", "create new pack %p", blob)
|
||||||
return pack.NewPacker(s.key.Master(), blob), nil
|
return pack.NewPacker(s.key, blob), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// insertPacker appends p to s.packs.
|
// insertPacker appends p to s.packs.
|
||||||
|
@ -369,18 +373,18 @@ func (s *Server) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error)
|
||||||
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
|
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
|
||||||
// backend as type t, without a pack. It returns the storage hash.
|
// backend as type t, without a pack. It returns the storage hash.
|
||||||
func (s *Server) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) {
|
func (s *Server) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) {
|
||||||
// create blob
|
// create file
|
||||||
blob, err := s.be.Create()
|
blob, err := s.be.Create()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
debug.Log("Server.SaveJSONUnpacked", "create new pack %p", blob)
|
debug.Log("Server.SaveJSONUnpacked", "create new file %p", blob)
|
||||||
|
|
||||||
// hash
|
// hash
|
||||||
hw := backend.NewHashingWriter(blob, sha256.New())
|
hw := backend.NewHashingWriter(blob, sha256.New())
|
||||||
|
|
||||||
// encrypt blob
|
// encrypt blob
|
||||||
ewr := s.key.EncryptTo(hw)
|
ewr := crypto.EncryptTo(s.key, hw)
|
||||||
|
|
||||||
enc := json.NewEncoder(ewr)
|
enc := json.NewEncoder(ewr)
|
||||||
err = enc.Encode(item)
|
err = enc.Encode(item)
|
||||||
|
@ -452,7 +456,7 @@ func (s *Server) SaveIndex() (backend.ID, error) {
|
||||||
hw := backend.NewHashingWriter(blob, sha256.New())
|
hw := backend.NewHashingWriter(blob, sha256.New())
|
||||||
|
|
||||||
// encrypt blob
|
// encrypt blob
|
||||||
ewr := s.key.EncryptTo(hw)
|
ewr := crypto.EncryptTo(s.key, hw)
|
||||||
|
|
||||||
err = s.idx.Encode(ewr)
|
err = s.idx.Encode(ewr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -505,7 +509,7 @@ func (s *Server) loadIndex(id string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// decrypt
|
// decrypt
|
||||||
decryptRd, err := s.key.DecryptFrom(rd)
|
decryptRd, err := crypto.DecryptFrom(s.key, rd)
|
||||||
defer decryptRd.Close()
|
defer decryptRd.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -525,15 +529,79 @@ func (s *Server) loadIndex(id string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const repositoryIDSize = sha256.Size
|
||||||
|
const RepositoryVersion = 1
|
||||||
|
|
||||||
|
func createConfig(s *Server) (err error) {
|
||||||
|
s.Config.ChunkerPolynomial, err = chunker.RandomPolynomial()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newID := make([]byte, repositoryIDSize)
|
||||||
|
_, err = io.ReadFull(rand.Reader, newID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Config.ID = hex.EncodeToString(newID)
|
||||||
|
s.Config.Version = RepositoryVersion
|
||||||
|
|
||||||
|
debug.Log("Server.createConfig", "New config: %#v", s.Config)
|
||||||
|
|
||||||
|
_, err = s.SaveJSONUnpacked(backend.Config, s.Config)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) loadConfig(cfg *Config) error {
|
||||||
|
err := s.LoadJSONUnpacked(backend.Config, nil, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Version != RepositoryVersion {
|
||||||
|
return errors.New("unsupported repository version")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cfg.ChunkerPolynomial.Irreducible() {
|
||||||
|
return errors.New("invalid chunker polynomial")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchKey finds a key with the supplied password, afterwards the config is
|
||||||
|
// read and parsed.
|
||||||
func (s *Server) SearchKey(password string) error {
|
func (s *Server) SearchKey(password string) error {
|
||||||
key, err := SearchKey(s, password)
|
key, err := SearchKey(s, password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
s.key = key
|
s.key = key.master
|
||||||
|
s.keyName = key.Name()
|
||||||
|
return s.loadConfig(&s.Config)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
// Init creates a new master key with the supplied password and initializes the
|
||||||
|
// repository config.
|
||||||
|
func (s *Server) Init(password string) error {
|
||||||
|
has, err := s.Test(backend.Config, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if has {
|
||||||
|
return errors.New("repository master key and config already initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := createMasterKey(s, password)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.key = key.master
|
||||||
|
s.keyName = key.Name()
|
||||||
|
return createConfig(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) Decrypt(ciphertext []byte) ([]byte, error) {
|
func (s *Server) Decrypt(ciphertext []byte) ([]byte, error) {
|
||||||
|
@ -541,7 +609,7 @@ func (s *Server) Decrypt(ciphertext []byte) ([]byte, error) {
|
||||||
return nil, errors.New("key for server not set")
|
return nil, errors.New("key for server not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.key.Decrypt(nil, ciphertext)
|
return crypto.Decrypt(s.key, nil, ciphertext)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
|
func (s *Server) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
|
||||||
|
@ -549,13 +617,17 @@ func (s *Server) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
|
||||||
return nil, errors.New("key for server not set")
|
return nil, errors.New("key for server not set")
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.key.Encrypt(ciphertext, plaintext)
|
return crypto.Encrypt(s.key, ciphertext, plaintext)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) Key() *Key {
|
func (s *Server) Key() *crypto.Key {
|
||||||
return s.key
|
return s.key
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) KeyName() string {
|
||||||
|
return s.keyName
|
||||||
|
}
|
||||||
|
|
||||||
// Count returns the number of blobs of a given type in the backend.
|
// Count returns the number of blobs of a given type in the backend.
|
||||||
func (s *Server) Count(t backend.Type) (n uint) {
|
func (s *Server) Count(t backend.Type) (n uint) {
|
||||||
for _ = range s.be.List(t, nil) {
|
for _ = range s.be.List(t, nil) {
|
||||||
|
@ -595,10 +667,6 @@ func (s *Server) Delete() error {
|
||||||
return errors.New("Delete() called for backend that does not implement this method")
|
return errors.New("Delete() called for backend that does not implement this method")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) ID() string {
|
|
||||||
return s.be.ID()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) Location() string {
|
func (s *Server) Location() string {
|
||||||
return s.be.Location()
|
return s.be.Location()
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,8 +30,6 @@ var serverTests = []testJSONStruct{
|
||||||
func TestSaveJSON(t *testing.T) {
|
func TestSaveJSON(t *testing.T) {
|
||||||
server := SetupBackend(t)
|
server := SetupBackend(t)
|
||||||
defer TeardownBackend(t, server)
|
defer TeardownBackend(t, server)
|
||||||
key := SetupKey(t, server, "geheim")
|
|
||||||
server.SetKey(key)
|
|
||||||
|
|
||||||
for _, obj := range serverTests {
|
for _, obj := range serverTests {
|
||||||
data, err := json.Marshal(obj)
|
data, err := json.Marshal(obj)
|
||||||
|
@ -51,8 +49,6 @@ func TestSaveJSON(t *testing.T) {
|
||||||
func BenchmarkSaveJSON(t *testing.B) {
|
func BenchmarkSaveJSON(t *testing.B) {
|
||||||
server := SetupBackend(t)
|
server := SetupBackend(t)
|
||||||
defer TeardownBackend(t, server)
|
defer TeardownBackend(t, server)
|
||||||
key := SetupKey(t, server, "geheim")
|
|
||||||
server.SetKey(key)
|
|
||||||
|
|
||||||
obj := serverTests[0]
|
obj := serverTests[0]
|
||||||
|
|
||||||
|
@ -78,8 +74,6 @@ var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20}
|
||||||
func TestSave(t *testing.T) {
|
func TestSave(t *testing.T) {
|
||||||
server := SetupBackend(t)
|
server := SetupBackend(t)
|
||||||
defer TeardownBackend(t, server)
|
defer TeardownBackend(t, server)
|
||||||
key := SetupKey(t, server, "geheim")
|
|
||||||
server.SetKey(key)
|
|
||||||
|
|
||||||
for _, size := range testSizes {
|
for _, size := range testSizes {
|
||||||
data := make([]byte, size)
|
data := make([]byte, size)
|
||||||
|
@ -112,8 +106,6 @@ func TestSave(t *testing.T) {
|
||||||
func TestSaveFrom(t *testing.T) {
|
func TestSaveFrom(t *testing.T) {
|
||||||
server := SetupBackend(t)
|
server := SetupBackend(t)
|
||||||
defer TeardownBackend(t, server)
|
defer TeardownBackend(t, server)
|
||||||
key := SetupKey(t, server, "geheim")
|
|
||||||
server.SetKey(key)
|
|
||||||
|
|
||||||
for _, size := range testSizes {
|
for _, size := range testSizes {
|
||||||
data := make([]byte, size)
|
data := make([]byte, size)
|
||||||
|
@ -144,8 +136,6 @@ func TestSaveFrom(t *testing.T) {
|
||||||
func BenchmarkSaveFrom(t *testing.B) {
|
func BenchmarkSaveFrom(t *testing.B) {
|
||||||
server := SetupBackend(t)
|
server := SetupBackend(t)
|
||||||
defer TeardownBackend(t, server)
|
defer TeardownBackend(t, server)
|
||||||
key := SetupKey(t, server, "geheim")
|
|
||||||
server.SetKey(key)
|
|
||||||
|
|
||||||
size := 4 << 20 // 4MiB
|
size := 4 << 20 // 4MiB
|
||||||
|
|
||||||
|
@ -172,8 +162,6 @@ func TestLoadJSONPack(t *testing.T) {
|
||||||
|
|
||||||
server := SetupBackend(t)
|
server := SetupBackend(t)
|
||||||
defer TeardownBackend(t, server)
|
defer TeardownBackend(t, server)
|
||||||
key := SetupKey(t, server, "geheim")
|
|
||||||
server.SetKey(key)
|
|
||||||
|
|
||||||
// archive a few files
|
// archive a few files
|
||||||
sn := SnapshotDir(t, server, *benchTestDir, nil)
|
sn := SnapshotDir(t, server, *benchTestDir, nil)
|
||||||
|
@ -184,15 +172,13 @@ func TestLoadJSONPack(t *testing.T) {
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadJSONEncrypted(t *testing.T) {
|
func TestLoadJSONUnpacked(t *testing.T) {
|
||||||
if *benchTestDir == "" {
|
if *benchTestDir == "" {
|
||||||
t.Skip("benchdir not set, skipping TestServerStats")
|
t.Skip("benchdir not set, skipping TestServerStats")
|
||||||
}
|
}
|
||||||
|
|
||||||
server := SetupBackend(t)
|
server := SetupBackend(t)
|
||||||
defer TeardownBackend(t, server)
|
defer TeardownBackend(t, server)
|
||||||
key := SetupKey(t, server, "geheim")
|
|
||||||
server.SetKey(key)
|
|
||||||
|
|
||||||
// archive a snapshot
|
// archive a snapshot
|
||||||
sn := restic.Snapshot{}
|
sn := restic.Snapshot{}
|
||||||
|
@ -205,7 +191,7 @@ func TestLoadJSONEncrypted(t *testing.T) {
|
||||||
var sn2 restic.Snapshot
|
var sn2 restic.Snapshot
|
||||||
|
|
||||||
// restore
|
// restore
|
||||||
err = server.LoadJSONEncrypted(backend.Snapshot, id, &sn2)
|
err = server.LoadJSONUnpacked(backend.Snapshot, id, &sn2)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
Equals(t, sn.Hostname, sn2.Hostname)
|
Equals(t, sn.Hostname, sn2.Hostname)
|
||||||
|
|
|
@ -52,7 +52,7 @@ func NewSnapshot(paths []string) (*Snapshot, error) {
|
||||||
|
|
||||||
func LoadSnapshot(s *server.Server, id backend.ID) (*Snapshot, error) {
|
func LoadSnapshot(s *server.Server, id backend.ID) (*Snapshot, error) {
|
||||||
sn := &Snapshot{id: id}
|
sn := &Snapshot{id: id}
|
||||||
err := s.LoadJSONEncrypted(backend.Snapshot, id, sn)
|
err := s.LoadJSONUnpacked(backend.Snapshot, id, sn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ import (
|
||||||
"github.com/restic/restic/server"
|
"github.com/restic/restic/server"
|
||||||
)
|
)
|
||||||
|
|
||||||
var TestPassword = "foobar"
|
var TestPassword = flag.String("test.password", "", `use this password for repositories created during tests (default: "geheim")`)
|
||||||
var TestCleanup = flag.Bool("test.cleanup", true, "clean up after running tests (remove local backend directory with all content)")
|
var TestCleanup = flag.Bool("test.cleanup", true, "clean up after running tests (remove local backend directory with all content)")
|
||||||
var TestTempDir = flag.String("test.tempdir", "", "use this directory for temporary storage (default: system temp dir)")
|
var TestTempDir = flag.String("test.tempdir", "", "use this directory for temporary storage (default: system temp dir)")
|
||||||
|
|
||||||
|
@ -25,11 +25,13 @@ func SetupBackend(t testing.TB) *server.Server {
|
||||||
b, err := local.Create(filepath.Join(tempdir, "repo"))
|
b, err := local.Create(filepath.Join(tempdir, "repo"))
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
// set cache dir
|
// set cache dir below temp dir
|
||||||
err = os.Setenv("RESTIC_CACHE", filepath.Join(tempdir, "cache"))
|
err = os.Setenv("RESTIC_CACHE", filepath.Join(tempdir, "cache"))
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
return server.NewServer(b)
|
s := server.NewServer(b)
|
||||||
|
OK(t, s.Init(*TestPassword))
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func TeardownBackend(t testing.TB, s *server.Server) {
|
func TeardownBackend(t testing.TB, s *server.Server) {
|
||||||
|
@ -42,13 +44,6 @@ func TeardownBackend(t testing.TB, s *server.Server) {
|
||||||
OK(t, s.Delete())
|
OK(t, s.Delete())
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetupKey(t testing.TB, s *server.Server, password string) *server.Key {
|
|
||||||
k, err := server.CreateKey(s, password)
|
|
||||||
OK(t, err)
|
|
||||||
|
|
||||||
return k
|
|
||||||
}
|
|
||||||
|
|
||||||
func SnapshotDir(t testing.TB, server *server.Server, path string, parent backend.ID) *restic.Snapshot {
|
func SnapshotDir(t testing.TB, server *server.Server, path string, parent backend.ID) *restic.Snapshot {
|
||||||
arch := restic.NewArchiver(server)
|
arch := restic.NewArchiver(server)
|
||||||
sn, _, err := arch.Snapshot(nil, []string{path}, parent)
|
sn, _, err := arch.Snapshot(nil, []string{path}, parent)
|
||||||
|
|
|
@ -95,8 +95,6 @@ func TestNodeComparison(t *testing.T) {
|
||||||
func TestLoadTree(t *testing.T) {
|
func TestLoadTree(t *testing.T) {
|
||||||
server := SetupBackend(t)
|
server := SetupBackend(t)
|
||||||
defer TeardownBackend(t, server)
|
defer TeardownBackend(t, server)
|
||||||
key := SetupKey(t, server, "geheim")
|
|
||||||
server.SetKey(key)
|
|
||||||
|
|
||||||
// save tree
|
// save tree
|
||||||
tree := restic.NewTree()
|
tree := restic.NewTree()
|
||||||
|
|
|
@ -18,8 +18,6 @@ func TestWalkTree(t *testing.T) {
|
||||||
|
|
||||||
server := SetupBackend(t)
|
server := SetupBackend(t)
|
||||||
defer TeardownBackend(t, server)
|
defer TeardownBackend(t, server)
|
||||||
key := SetupKey(t, server, "geheim")
|
|
||||||
server.SetKey(key)
|
|
||||||
|
|
||||||
// archive a few files
|
// archive a few files
|
||||||
arch := restic.NewArchiver(server)
|
arch := restic.NewArchiver(server)
|
||||||
|
|
Loading…
Reference in a new issue