Merge pull request #798 from restic/fix-797

Checker: Propagate errors properly
This commit is contained in:
Alexander Neumann 2017-02-11 14:55:50 +01:00
commit 05cae4911d
4 changed files with 63 additions and 17 deletions

View file

@ -80,6 +80,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
debug.Log("Start") debug.Log("Start")
type indexRes struct { type indexRes struct {
Index *repository.Index Index *repository.Index
err error
ID string ID string
} }
@ -95,39 +96,40 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
idx, err = repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeOldIndex) idx, err = repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeOldIndex)
} }
if err != nil { err = errors.Wrapf(err, "error loading index %v", id.Str())
return err
}
select { select {
case indexCh <- indexRes{Index: idx, ID: id.String()}: case indexCh <- indexRes{Index: idx, ID: id.String(), err: err}:
case <-done: case <-done:
} }
return nil return nil
} }
var perr error
go func() { go func() {
defer close(indexCh) defer close(indexCh)
debug.Log("start loading indexes in parallel") debug.Log("start loading indexes in parallel")
perr = repository.FilesInParallel(c.repo.Backend(), restic.IndexFile, defaultParallelism, err := repository.FilesInParallel(c.repo.Backend(), restic.IndexFile, defaultParallelism,
repository.ParallelWorkFuncParseID(worker)) repository.ParallelWorkFuncParseID(worker))
debug.Log("loading indexes finished, error: %v", perr) debug.Log("loading indexes finished, error: %v", err)
if err != nil {
panic(err)
}
}() }()
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
if perr != nil {
errs = append(errs, perr)
return hints, errs
}
packToIndex := make(map[restic.ID]restic.IDSet) packToIndex := make(map[restic.ID]restic.IDSet)
for res := range indexCh { for res := range indexCh {
debug.Log("process index %v", res.ID) debug.Log("process index %v, err %v", res.ID, res.err)
if res.err != nil {
errs = append(errs, res.err)
continue
}
idxID, err := restic.ParseID(res.ID) idxID, err := restic.ParseID(res.ID)
if err != nil { if err != nil {
errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID)) errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID))
@ -154,8 +156,6 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
debug.Log("%d blobs processed", cnt) debug.Log("%d blobs processed", cnt)
} }
debug.Log("done, error %v", perr)
debug.Log("checking for duplicate packs") debug.Log("checking for duplicate packs")
for packID := range c.packs { for packID := range c.packs {
debug.Log(" check pack %v: contained in %d indexes", packID.Str(), len(packToIndex[packID])) debug.Log(" check pack %v: contained in %d indexes", packID.Str(), len(packToIndex[packID]))

View file

@ -179,6 +179,48 @@ func TestUnreferencedBlobs(t *testing.T) {
test.Equals(t, unusedBlobsBySnapshot, blobs) test.Equals(t, unusedBlobsBySnapshot, blobs)
} }
func TestModifiedIndex(t *testing.T) {
repodir, cleanup := test.Env(t, checkerTestData)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
done := make(chan struct{})
defer close(done)
h := restic.Handle{
Type: restic.IndexFile,
Name: "90f838b4ac28735fda8644fe6a08dbc742e57aaf81b30977b4fefa357010eafd",
}
f, err := repo.Backend().Load(h, 0, 0)
test.OK(t, err)
// save the index again with a modified name so that the hash doesn't match
// the content any more
h2 := restic.Handle{
Type: restic.IndexFile,
Name: "80f838b4ac28735fda8644fe6a08dbc742e57aaf81b30977b4fefa357010eafd",
}
err = repo.Backend().Save(h2, f)
test.OK(t, err)
test.OK(t, f.Close())
chkr := checker.New(repo)
hints, errs := chkr.LoadIndex()
if len(errs) == 0 {
t.Fatalf("expected errors not found")
}
for _, err := range errs {
t.Logf("found expected error %v", err)
}
if len(hints) > 0 {
t.Errorf("expected no hints, got %v: %v", len(hints), hints)
}
}
var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz") var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz")
func TestDuplicatePacksInIndex(t *testing.T) { func TestDuplicatePacksInIndex(t *testing.T) {

View file

@ -18,3 +18,7 @@ var Errorf = errors.Errorf
// Wrap wraps an error retrieved from outside of restic. Wrapped so that this // Wrap wraps an error retrieved from outside of restic. Wrapped so that this
// package does not appear in the stack trace. // package does not appear in the stack trace.
var Wrap = errors.Wrap var Wrap = errors.Wrap
// Wrapf returns an error annotating err with the format specifier. If err is
// nil, Wrapf returns nil.
var Wrapf = errors.Wrapf

View file

@ -56,12 +56,12 @@ func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, er
h := restic.Handle{Type: t, Name: id.String()} h := restic.Handle{Type: t, Name: id.String()}
buf, err := backend.LoadAll(r.be, h) buf, err := backend.LoadAll(r.be, h)
if err != nil { if err != nil {
debug.Log("error loading %v: %v", id.Str(), err) debug.Log("error loading %v: %v", h, err)
return nil, err return nil, err
} }
if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) { if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) {
return nil, errors.New("invalid data returned") return nil, errors.Errorf("load %v: invalid data returned", h)
} }
// decrypt // decrypt