forked from TrueCloudLab/restic
commit
f9fc8674eb
6 changed files with 111 additions and 79 deletions
|
@ -272,7 +272,13 @@ func readBackupFromStdin(opts BackupOptions, gopts GlobalOptions, args []string)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, id, err := archiver.ArchiveReader(repo, newArchiveStdinProgress(gopts), os.Stdin, opts.StdinFilename, opts.Tags, opts.Hostname)
|
r := &archiver.Reader{
|
||||||
|
Repository: repo,
|
||||||
|
Tags: opts.Tags,
|
||||||
|
Hostname: opts.Hostname,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, id, err := r.Archive(opts.StdinFilename, os.Stdin, newArchiveStdinProgress(gopts))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"restic/repository"
|
"restic"
|
||||||
|
"restic/index"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
@ -34,5 +35,47 @@ func runRebuildIndex(gopts GlobalOptions) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return repository.RebuildIndex(repo)
|
done := make(chan struct{})
|
||||||
|
defer close(done)
|
||||||
|
|
||||||
|
Verbosef("counting files in repo\n")
|
||||||
|
|
||||||
|
var packs uint64
|
||||||
|
for _ = range repo.List(restic.DataFile, done) {
|
||||||
|
packs++
|
||||||
|
}
|
||||||
|
|
||||||
|
bar := newProgressMax(!gopts.Quiet, packs, "packs")
|
||||||
|
idx, err := index.New(repo, bar)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
Verbosef("listing old index files\n")
|
||||||
|
var supersedes restic.IDs
|
||||||
|
for id := range repo.List(restic.IndexFile, done) {
|
||||||
|
supersedes = append(supersedes, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := idx.Save(repo, supersedes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
Verbosef("saved new index as %v\n", id.Str())
|
||||||
|
|
||||||
|
Verbosef("remove %d old index files\n", len(supersedes))
|
||||||
|
|
||||||
|
for _, id := range supersedes {
|
||||||
|
err := repo.Backend().Remove(restic.Handle{
|
||||||
|
Type: restic.IndexFile,
|
||||||
|
Name: id.String(),
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
Warnf("error deleting old index %v: %v\n", id.Str(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,15 +11,22 @@ import (
|
||||||
"github.com/restic/chunker"
|
"github.com/restic/chunker"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ArchiveReader reads from the reader and archives the data. Returned is the
|
// Reader allows saving a stream of data to the repository.
|
||||||
// resulting snapshot and its ID.
|
type Reader struct {
|
||||||
func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, name string, tags []string, hostname string) (*restic.Snapshot, restic.ID, error) {
|
restic.Repository
|
||||||
|
|
||||||
|
Tags []string
|
||||||
|
Hostname string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Archive reads data from the reader and saves it to the repo.
|
||||||
|
func (r *Reader) Archive(name string, rd io.Reader, p *restic.Progress) (*restic.Snapshot, restic.ID, error) {
|
||||||
if name == "" {
|
if name == "" {
|
||||||
return nil, restic.ID{}, errors.New("no filename given")
|
return nil, restic.ID{}, errors.New("no filename given")
|
||||||
}
|
}
|
||||||
|
|
||||||
debug.Log("start archiving %s", name)
|
debug.Log("start archiving %s", name)
|
||||||
sn, err := restic.NewSnapshot([]string{name}, tags, hostname)
|
sn, err := restic.NewSnapshot([]string{name}, r.Tags, r.Hostname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, restic.ID{}, err
|
return nil, restic.ID{}, err
|
||||||
}
|
}
|
||||||
|
@ -27,6 +34,7 @@ func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, nam
|
||||||
p.Start()
|
p.Start()
|
||||||
defer p.Done()
|
defer p.Done()
|
||||||
|
|
||||||
|
repo := r.Repository
|
||||||
chnker := chunker.New(rd, repo.Config().ChunkerPolynomial)
|
chnker := chunker.New(rd, repo.Config().ChunkerPolynomial)
|
||||||
|
|
||||||
ids := restic.IDs{}
|
ids := restic.IDs{}
|
||||||
|
|
|
@ -79,7 +79,13 @@ func TestArchiveReader(t *testing.T) {
|
||||||
|
|
||||||
f := fakeFile(t, seed, size)
|
f := fakeFile(t, seed, size)
|
||||||
|
|
||||||
sn, id, err := ArchiveReader(repo, nil, f, "fakefile", []string{"test"}, "localhost")
|
r := &Reader{
|
||||||
|
Repository: repo,
|
||||||
|
Hostname: "localhost",
|
||||||
|
Tags: []string{"test"},
|
||||||
|
}
|
||||||
|
|
||||||
|
sn, id, err := r.Archive("fakefile", f, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("ArchiveReader() returned error %v", err)
|
t.Fatalf("ArchiveReader() returned error %v", err)
|
||||||
}
|
}
|
||||||
|
@ -99,7 +105,13 @@ func TestArchiveReaderNull(t *testing.T) {
|
||||||
repo, cleanup := repository.TestRepository(t)
|
repo, cleanup := repository.TestRepository(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
sn, id, err := ArchiveReader(repo, nil, bytes.NewReader(nil), "fakefile", nil, "localhost")
|
r := &Reader{
|
||||||
|
Repository: repo,
|
||||||
|
Hostname: "localhost",
|
||||||
|
Tags: []string{"test"},
|
||||||
|
}
|
||||||
|
|
||||||
|
sn, id, err := r.Archive("fakefile", bytes.NewReader(nil), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("ArchiveReader() returned error %v", err)
|
t.Fatalf("ArchiveReader() returned error %v", err)
|
||||||
}
|
}
|
||||||
|
@ -134,7 +146,13 @@ func TestArchiveReaderError(t *testing.T) {
|
||||||
repo, cleanup := repository.TestRepository(t)
|
repo, cleanup := repository.TestRepository(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
sn, id, err := ArchiveReader(repo, nil, errReader("error returned by reading stdin"), "fakefile", nil, "localhost")
|
r := &Reader{
|
||||||
|
Repository: repo,
|
||||||
|
Hostname: "localhost",
|
||||||
|
Tags: []string{"test"},
|
||||||
|
}
|
||||||
|
|
||||||
|
sn, id, err := r.Archive("fakefile", errReader("error returned by reading stdin"), nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected error not returned")
|
t.Errorf("expected error not returned")
|
||||||
}
|
}
|
||||||
|
@ -167,11 +185,17 @@ func BenchmarkArchiveReader(t *testing.B) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
r := &Reader{
|
||||||
|
Repository: repo,
|
||||||
|
Hostname: "localhost",
|
||||||
|
Tags: []string{"test"},
|
||||||
|
}
|
||||||
|
|
||||||
t.SetBytes(size)
|
t.SetBytes(size)
|
||||||
t.ResetTimer()
|
t.ResetTimer()
|
||||||
|
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
_, _, err := ArchiveReader(repo, nil, bytes.NewReader(buf), "fakefile", []string{"test"}, "localhost")
|
_, _, err := r.Archive("fakefile", bytes.NewReader(buf), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,66 +0,0 @@
|
||||||
package repository
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"restic"
|
|
||||||
"restic/debug"
|
|
||||||
"restic/list"
|
|
||||||
"restic/worker"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RebuildIndex lists all packs in the repo, writes a new index and removes all
|
|
||||||
// old indexes. This operation should only be done with an exclusive lock in
|
|
||||||
// place.
|
|
||||||
func RebuildIndex(repo restic.Repository) error {
|
|
||||||
debug.Log("start rebuilding index")
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
defer close(done)
|
|
||||||
|
|
||||||
ch := make(chan worker.Job)
|
|
||||||
go list.AllPacks(repo, ch, done)
|
|
||||||
|
|
||||||
idx := NewIndex()
|
|
||||||
for job := range ch {
|
|
||||||
id := job.Data.(restic.ID)
|
|
||||||
|
|
||||||
if job.Error != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
res := job.Result.(list.Result)
|
|
||||||
|
|
||||||
for _, entry := range res.Entries() {
|
|
||||||
pb := restic.PackedBlob{
|
|
||||||
Blob: entry,
|
|
||||||
PackID: res.PackID(),
|
|
||||||
}
|
|
||||||
idx.Store(pb)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
oldIndexes := restic.NewIDSet()
|
|
||||||
for id := range repo.List(restic.IndexFile, done) {
|
|
||||||
idx.AddToSupersedes(id)
|
|
||||||
oldIndexes.Insert(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
id, err := SaveIndex(repo, idx)
|
|
||||||
if err != nil {
|
|
||||||
debug.Log("error saving index: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
debug.Log("new index saved as %v", id.Str())
|
|
||||||
|
|
||||||
for indexID := range oldIndexes {
|
|
||||||
h := restic.Handle{Type: restic.IndexFile, Name: indexID.String()}
|
|
||||||
err := repo.Backend().Remove(h)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", indexID.Str(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"restic"
|
"restic"
|
||||||
|
"restic/index"
|
||||||
"restic/repository"
|
"restic/repository"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
@ -144,8 +145,24 @@ func saveIndex(t *testing.T, repo restic.Repository) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func rebuildIndex(t *testing.T, repo restic.Repository) {
|
func rebuildIndex(t *testing.T, repo restic.Repository) {
|
||||||
if err := repository.RebuildIndex(repo); err != nil {
|
idx, err := index.New(repo, nil)
|
||||||
t.Fatalf("error rebuilding index: %v", err)
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for id := range repo.List(restic.IndexFile, nil) {
|
||||||
|
err = repo.Backend().Remove(restic.Handle{
|
||||||
|
Type: restic.IndexFile,
|
||||||
|
Name: id.String(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = idx.Save(repo, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue