forked from TrueCloudLab/restic
Tests pass for restic/
This commit is contained in:
parent
4c95d2cfdc
commit
3695ba5882
14 changed files with 156 additions and 195 deletions
|
@ -11,9 +11,8 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"restic"
|
"restic"
|
||||||
"restic/backend"
|
"restic/archiver"
|
||||||
"restic/mock"
|
"restic/mock"
|
||||||
"restic/pack"
|
|
||||||
"restic/repository"
|
"restic/repository"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -21,14 +20,14 @@ const parallelSaves = 50
|
||||||
const testSaveIndexTime = 100 * time.Millisecond
|
const testSaveIndexTime = 100 * time.Millisecond
|
||||||
const testTimeout = 2 * time.Second
|
const testTimeout = 2 * time.Second
|
||||||
|
|
||||||
var DupID backend.ID
|
var DupID restic.ID
|
||||||
|
|
||||||
func randomID() backend.ID {
|
func randomID() restic.ID {
|
||||||
if mrand.Float32() < 0.5 {
|
if mrand.Float32() < 0.5 {
|
||||||
return DupID
|
return DupID
|
||||||
}
|
}
|
||||||
|
|
||||||
id := backend.ID{}
|
id := restic.ID{}
|
||||||
_, err := io.ReadFull(rand.Reader, id[:])
|
_, err := io.ReadFull(rand.Reader, id[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -52,8 +51,8 @@ func forgetfulBackend() restic.Backend {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
be.StatFn = func(h restic.Handle) (restic.BlobInfo, error) {
|
be.StatFn = func(h restic.Handle) (restic.FileInfo, error) {
|
||||||
return restic.BlobInfo{}, errors.New("not found")
|
return restic.FileInfo{}, errors.New("not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
be.RemoveFn = func(t restic.FileType, name string) error {
|
be.RemoveFn = func(t restic.FileType, name string) error {
|
||||||
|
@ -86,7 +85,7 @@ func testArchiverDuplication(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
arch := restic.NewArchiver(repo)
|
arch := archiver.New(repo)
|
||||||
|
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
|
|
|
@ -2,6 +2,7 @@ package archiver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"restic"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"restic/pipe"
|
"restic/pipe"
|
||||||
|
|
|
@ -7,10 +7,9 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"restic"
|
"restic"
|
||||||
"restic/backend"
|
"restic/archiver"
|
||||||
"restic/checker"
|
"restic/checker"
|
||||||
"restic/crypto"
|
"restic/crypto"
|
||||||
"restic/pack"
|
|
||||||
. "restic/test"
|
. "restic/test"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -101,7 +100,7 @@ func archiveDirectory(b testing.TB) {
|
||||||
repo := SetupRepo()
|
repo := SetupRepo()
|
||||||
defer TeardownRepo(repo)
|
defer TeardownRepo(repo)
|
||||||
|
|
||||||
arch := restic.NewArchiver(repo)
|
arch := archiver.New(repo)
|
||||||
|
|
||||||
_, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
|
_, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
|
||||||
OK(b, err)
|
OK(b, err)
|
||||||
|
@ -191,48 +190,6 @@ func TestArchiveDedup(t *testing.T) {
|
||||||
archiveWithDedup(t)
|
archiveWithDedup(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkLoadTree(t *testing.B) {
|
|
||||||
repo := SetupRepo()
|
|
||||||
defer TeardownRepo(repo)
|
|
||||||
|
|
||||||
if BenchArchiveDirectory == "" {
|
|
||||||
t.Skip("benchdir not set, skipping TestArchiverDedup")
|
|
||||||
}
|
|
||||||
|
|
||||||
// archive a few files
|
|
||||||
arch := restic.NewArchiver(repo)
|
|
||||||
sn, _, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
|
|
||||||
OK(t, err)
|
|
||||||
t.Logf("archived snapshot %v", sn.ID())
|
|
||||||
|
|
||||||
list := make([]backend.ID, 0, 10)
|
|
||||||
done := make(chan struct{})
|
|
||||||
|
|
||||||
for _, idx := range repo.Index().All() {
|
|
||||||
for blob := range idx.Each(done) {
|
|
||||||
if blob.Type != restic.TreeBlob {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
list = append(list, blob.ID)
|
|
||||||
if len(list) == cap(list) {
|
|
||||||
close(done)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// start benchmark
|
|
||||||
t.ResetTimer()
|
|
||||||
|
|
||||||
for i := 0; i < t.N; i++ {
|
|
||||||
for _, id := range list {
|
|
||||||
_, err := restic.LoadTree(repo, id)
|
|
||||||
OK(t, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Saves several identical chunks concurrently and later checks that there are no
|
// Saves several identical chunks concurrently and later checks that there are no
|
||||||
// unreferenced packs in the repository. See also #292 and #358.
|
// unreferenced packs in the repository. See also #292 and #358.
|
||||||
func TestParallelSaveWithDuplication(t *testing.T) {
|
func TestParallelSaveWithDuplication(t *testing.T) {
|
||||||
|
@ -248,7 +205,7 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) {
|
||||||
dataSizeMb := 128
|
dataSizeMb := 128
|
||||||
duplication := 7
|
duplication := 7
|
||||||
|
|
||||||
arch := restic.NewArchiver(repo)
|
arch := archiver.New(repo)
|
||||||
chunks := getRandomData(seed, dataSizeMb*1024*1024)
|
chunks := getRandomData(seed, dataSizeMb*1024*1024)
|
||||||
|
|
||||||
errChannels := [](<-chan error){}
|
errChannels := [](<-chan error){}
|
||||||
|
@ -265,7 +222,7 @@ func testParallelSaveWithDuplication(t *testing.T, seed int) {
|
||||||
go func(c chunker.Chunk, errChan chan<- error) {
|
go func(c chunker.Chunk, errChan chan<- error) {
|
||||||
barrier <- struct{}{}
|
barrier <- struct{}{}
|
||||||
|
|
||||||
id := backend.Hash(c.Data)
|
id := restic.Hash(c.Data)
|
||||||
time.Sleep(time.Duration(id[0]))
|
time.Sleep(time.Duration(id[0]))
|
||||||
err := arch.Save(restic.DataBlob, c.Data, id)
|
err := arch.Save(restic.DataBlob, c.Data, id)
|
||||||
<-barrier
|
<-barrier
|
||||||
|
@ -301,7 +258,7 @@ func getRandomData(seed int, size int) []chunker.Chunk {
|
||||||
return chunks
|
return chunks
|
||||||
}
|
}
|
||||||
|
|
||||||
func createAndInitChecker(t *testing.T, repo Repository) *checker.Checker {
|
func createAndInitChecker(t *testing.T, repo restic.Repository) *checker.Checker {
|
||||||
chkr := checker.New(repo)
|
chkr := checker.New(repo)
|
||||||
|
|
||||||
hints, errs := chkr.LoadIndex()
|
hints, errs := chkr.LoadIndex()
|
||||||
|
|
|
@ -32,11 +32,11 @@ type Checker struct {
|
||||||
|
|
||||||
masterIndex *repository.MasterIndex
|
masterIndex *repository.MasterIndex
|
||||||
|
|
||||||
repo *repository.Repository
|
repo restic.Repository
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new checker which runs on repo.
|
// New returns a new checker which runs on repo.
|
||||||
func New(repo *repository.Repository) *Checker {
|
func New(repo restic.Repository) *Checker {
|
||||||
c := &Checker{
|
c := &Checker{
|
||||||
packs: restic.NewIDSet(),
|
packs: restic.NewIDSet(),
|
||||||
blobs: restic.NewIDSet(),
|
blobs: restic.NewIDSet(),
|
||||||
|
@ -180,7 +180,7 @@ func (e PackError) Error() string {
|
||||||
return "pack " + e.ID.String() + ": " + e.Err.Error()
|
return "pack " + e.ID.String() + ": " + e.Err.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func packIDTester(repo *repository.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
|
func packIDTester(repo restic.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
|
||||||
debug.Log("Checker.testPackID", "worker start")
|
debug.Log("Checker.testPackID", "worker start")
|
||||||
defer debug.Log("Checker.testPackID", "worker done")
|
defer debug.Log("Checker.testPackID", "worker done")
|
||||||
|
|
||||||
|
@ -273,7 +273,7 @@ func (e Error) Error() string {
|
||||||
return e.Err.Error()
|
return e.Err.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadTreeFromSnapshot(repo *repository.Repository, id restic.ID) (restic.ID, error) {
|
func loadTreeFromSnapshot(repo restic.Repository, id restic.ID) (restic.ID, error) {
|
||||||
sn, err := restic.LoadSnapshot(repo, id)
|
sn, err := restic.LoadSnapshot(repo, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err)
|
debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err)
|
||||||
|
@ -289,7 +289,7 @@ func loadTreeFromSnapshot(repo *repository.Repository, id restic.ID) (restic.ID,
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs.
|
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs.
|
||||||
func loadSnapshotTreeIDs(repo *repository.Repository) (restic.IDs, []error) {
|
func loadSnapshotTreeIDs(repo restic.Repository) (restic.IDs, []error) {
|
||||||
var trees struct {
|
var trees struct {
|
||||||
IDs restic.IDs
|
IDs restic.IDs
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
@ -349,7 +349,7 @@ type treeJob struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadTreeWorker loads trees from repo and sends them to out.
|
// loadTreeWorker loads trees from repo and sends them to out.
|
||||||
func loadTreeWorker(repo *repository.Repository,
|
func loadTreeWorker(repo restic.Repository,
|
||||||
in <-chan restic.ID, out chan<- treeJob,
|
in <-chan restic.ID, out chan<- treeJob,
|
||||||
done <-chan struct{}, wg *sync.WaitGroup) {
|
done <-chan struct{}, wg *sync.WaitGroup) {
|
||||||
|
|
||||||
|
@ -660,7 +660,7 @@ func (c *Checker) CountPacks() uint64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkPack reads a pack and checks the integrity of all blobs.
|
// checkPack reads a pack and checks the integrity of all blobs.
|
||||||
func checkPack(r *repository.Repository, id restic.ID) error {
|
func checkPack(r restic.Repository, id restic.ID) error {
|
||||||
debug.Log("Checker.checkPack", "checking pack %v", id.Str())
|
debug.Log("Checker.checkPack", "checking pack %v", id.Str())
|
||||||
h := restic.Handle{FileType: restic.DataFile, Name: id.String()}
|
h := restic.Handle{FileType: restic.DataFile, Name: id.String()}
|
||||||
buf, err := backend.LoadAll(r.Backend(), h, nil)
|
buf, err := backend.LoadAll(r.Backend(), h, nil)
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"restic"
|
"restic"
|
||||||
"restic/backend"
|
"restic/archiver"
|
||||||
"restic/backend/mem"
|
"restic/backend/mem"
|
||||||
"restic/checker"
|
"restic/checker"
|
||||||
"restic/repository"
|
"restic/repository"
|
||||||
|
@ -147,7 +147,7 @@ func TestUnreferencedBlobs(t *testing.T) {
|
||||||
snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"
|
snID := "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02"
|
||||||
OK(t, repo.Backend().Remove(restic.SnapshotFile, snID))
|
OK(t, repo.Backend().Remove(restic.SnapshotFile, snID))
|
||||||
|
|
||||||
unusedBlobsBySnapshot := backend.IDs{
|
unusedBlobsBySnapshot := restic.IDs{
|
||||||
ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"),
|
ParseID("58c748bbe2929fdf30c73262bd8313fe828f8925b05d1d4a87fe109082acb849"),
|
||||||
ParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"),
|
ParseID("988a272ab9768182abfd1fe7d7a7b68967825f0b861d3b36156795832c772235"),
|
||||||
ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"),
|
ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"),
|
||||||
|
@ -212,7 +212,7 @@ func TestDuplicatePacksInIndex(t *testing.T) {
|
||||||
|
|
||||||
// errorBackend randomly modifies data after reading.
|
// errorBackend randomly modifies data after reading.
|
||||||
type errorBackend struct {
|
type errorBackend struct {
|
||||||
backend.Backend
|
restic.Backend
|
||||||
ProduceErrors bool
|
ProduceErrors bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -244,7 +244,7 @@ func TestCheckerModifiedData(t *testing.T) {
|
||||||
repo := repository.New(be)
|
repo := repository.New(be)
|
||||||
OK(t, repo.Init(TestPassword))
|
OK(t, repo.Init(TestPassword))
|
||||||
|
|
||||||
arch := restic.NewArchiver(repo)
|
arch := archiver.New(repo)
|
||||||
_, id, err := arch.Snapshot(nil, []string{"."}, nil)
|
_, id, err := arch.Snapshot(nil, []string{"."}, nil)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
t.Logf("archived as %v", id.Str())
|
t.Logf("archived as %v", id.Str())
|
||||||
|
|
|
@ -5,10 +5,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"restic"
|
"restic"
|
||||||
"restic/backend"
|
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/list"
|
"restic/list"
|
||||||
"restic/pack"
|
|
||||||
"restic/worker"
|
"restic/worker"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -17,27 +15,27 @@ import (
|
||||||
// Pack contains information about the contents of a pack.
|
// Pack contains information about the contents of a pack.
|
||||||
type Pack struct {
|
type Pack struct {
|
||||||
Size int64
|
Size int64
|
||||||
Entries []pack.Blob
|
Entries []restic.Blob
|
||||||
}
|
}
|
||||||
|
|
||||||
// Blob contains information about a blob.
|
// Blob contains information about a blob.
|
||||||
type Blob struct {
|
type Blob struct {
|
||||||
Size int64
|
Size int64
|
||||||
Packs backend.IDSet
|
Packs restic.IDSet
|
||||||
}
|
}
|
||||||
|
|
||||||
// Index contains information about blobs and packs stored in a repo.
|
// Index contains information about blobs and packs stored in a repo.
|
||||||
type Index struct {
|
type Index struct {
|
||||||
Packs map[backend.ID]Pack
|
Packs map[restic.ID]Pack
|
||||||
Blobs map[pack.Handle]Blob
|
Blobs map[restic.BlobHandle]Blob
|
||||||
IndexIDs backend.IDSet
|
IndexIDs restic.IDSet
|
||||||
}
|
}
|
||||||
|
|
||||||
func newIndex() *Index {
|
func newIndex() *Index {
|
||||||
return &Index{
|
return &Index{
|
||||||
Packs: make(map[backend.ID]Pack),
|
Packs: make(map[restic.ID]Pack),
|
||||||
Blobs: make(map[pack.Handle]Blob),
|
Blobs: make(map[restic.BlobHandle]Blob),
|
||||||
IndexIDs: backend.NewIDSet(),
|
IndexIDs: restic.NewIDSet(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +55,7 @@ func New(repo restic.Repository, p *restic.Progress) (*Index, error) {
|
||||||
for job := range ch {
|
for job := range ch {
|
||||||
p.Report(restic.Stat{Blobs: 1})
|
p.Report(restic.Stat{Blobs: 1})
|
||||||
|
|
||||||
packID := job.Data.(backend.ID)
|
packID := job.Data.(restic.ID)
|
||||||
if job.Error != nil {
|
if job.Error != nil {
|
||||||
fmt.Fprintf(os.Stderr, "unable to list pack %v: %v\n", packID.Str(), job.Error)
|
fmt.Fprintf(os.Stderr, "unable to list pack %v: %v\n", packID.Str(), job.Error)
|
||||||
continue
|
continue
|
||||||
|
@ -82,23 +80,23 @@ func New(repo restic.Repository, p *restic.Progress) (*Index, error) {
|
||||||
const loadIndexParallelism = 20
|
const loadIndexParallelism = 20
|
||||||
|
|
||||||
type packJSON struct {
|
type packJSON struct {
|
||||||
ID backend.ID `json:"id"`
|
ID restic.ID `json:"id"`
|
||||||
Blobs []blobJSON `json:"blobs"`
|
Blobs []blobJSON `json:"blobs"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type blobJSON struct {
|
type blobJSON struct {
|
||||||
ID backend.ID `json:"id"`
|
ID restic.ID `json:"id"`
|
||||||
Type pack.BlobType `json:"type"`
|
Type restic.BlobType `json:"type"`
|
||||||
Offset uint `json:"offset"`
|
Offset uint `json:"offset"`
|
||||||
Length uint `json:"length"`
|
Length uint `json:"length"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type indexJSON struct {
|
type indexJSON struct {
|
||||||
Supersedes backend.IDs `json:"supersedes,omitempty"`
|
Supersedes restic.IDs `json:"supersedes,omitempty"`
|
||||||
Packs []*packJSON `json:"packs"`
|
Packs []*packJSON `json:"packs"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadIndexJSON(repo restic.Repository, id backend.ID) (*indexJSON, error) {
|
func loadIndexJSON(repo restic.Repository, id restic.ID) (*indexJSON, error) {
|
||||||
debug.Log("index.loadIndexJSON", "process index %v\n", id.Str())
|
debug.Log("index.loadIndexJSON", "process index %v\n", id.Str())
|
||||||
|
|
||||||
var idx indexJSON
|
var idx indexJSON
|
||||||
|
@ -120,8 +118,8 @@ func Load(repo restic.Repository, p *restic.Progress) (*Index, error) {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
|
||||||
supersedes := make(map[backend.ID]backend.IDSet)
|
supersedes := make(map[restic.ID]restic.IDSet)
|
||||||
results := make(map[backend.ID]map[backend.ID]Pack)
|
results := make(map[restic.ID]map[restic.ID]Pack)
|
||||||
|
|
||||||
index := newIndex()
|
index := newIndex()
|
||||||
|
|
||||||
|
@ -134,17 +132,17 @@ func Load(repo restic.Repository, p *restic.Progress) (*Index, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
res := make(map[backend.ID]Pack)
|
res := make(map[restic.ID]Pack)
|
||||||
supersedes[id] = backend.NewIDSet()
|
supersedes[id] = restic.NewIDSet()
|
||||||
for _, sid := range idx.Supersedes {
|
for _, sid := range idx.Supersedes {
|
||||||
debug.Log("index.Load", " index %v supersedes %v", id.Str(), sid)
|
debug.Log("index.Load", " index %v supersedes %v", id.Str(), sid)
|
||||||
supersedes[id].Insert(sid)
|
supersedes[id].Insert(sid)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, jpack := range idx.Packs {
|
for _, jpack := range idx.Packs {
|
||||||
entries := make([]pack.Blob, 0, len(jpack.Blobs))
|
entries := make([]restic.Blob, 0, len(jpack.Blobs))
|
||||||
for _, blob := range jpack.Blobs {
|
for _, blob := range jpack.Blobs {
|
||||||
entry := pack.Blob{
|
entry := restic.Blob{
|
||||||
ID: blob.ID,
|
ID: blob.ID,
|
||||||
Type: blob.Type,
|
Type: blob.Type,
|
||||||
Offset: blob.Offset,
|
Offset: blob.Offset,
|
||||||
|
@ -178,7 +176,7 @@ func Load(repo restic.Repository, p *restic.Progress) (*Index, error) {
|
||||||
|
|
||||||
// AddPack adds a pack to the index. If this pack is already in the index, an
|
// AddPack adds a pack to the index. If this pack is already in the index, an
|
||||||
// error is returned.
|
// error is returned.
|
||||||
func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error {
|
func (idx *Index) AddPack(id restic.ID, size int64, entries []restic.Blob) error {
|
||||||
if _, ok := idx.Packs[id]; ok {
|
if _, ok := idx.Packs[id]; ok {
|
||||||
return errors.Errorf("pack %v already present in the index", id.Str())
|
return errors.Errorf("pack %v already present in the index", id.Str())
|
||||||
}
|
}
|
||||||
|
@ -186,11 +184,11 @@ func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error
|
||||||
idx.Packs[id] = Pack{Size: size, Entries: entries}
|
idx.Packs[id] = Pack{Size: size, Entries: entries}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
h := pack.Handle{ID: entry.ID, Type: entry.Type}
|
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
|
||||||
if _, ok := idx.Blobs[h]; !ok {
|
if _, ok := idx.Blobs[h]; !ok {
|
||||||
idx.Blobs[h] = Blob{
|
idx.Blobs[h] = Blob{
|
||||||
Size: int64(entry.Length),
|
Size: int64(entry.Length),
|
||||||
Packs: backend.NewIDSet(),
|
Packs: restic.NewIDSet(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -201,13 +199,13 @@ func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePack deletes a pack from the index.
|
// RemovePack deletes a pack from the index.
|
||||||
func (idx *Index) RemovePack(id backend.ID) error {
|
func (idx *Index) RemovePack(id restic.ID) error {
|
||||||
if _, ok := idx.Packs[id]; !ok {
|
if _, ok := idx.Packs[id]; !ok {
|
||||||
return errors.Errorf("pack %v not found in the index", id.Str())
|
return errors.Errorf("pack %v not found in the index", id.Str())
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, blob := range idx.Packs[id].Entries {
|
for _, blob := range idx.Packs[id].Entries {
|
||||||
h := pack.Handle{ID: blob.ID, Type: blob.Type}
|
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
|
||||||
idx.Blobs[h].Packs.Delete(id)
|
idx.Blobs[h].Packs.Delete(id)
|
||||||
|
|
||||||
if len(idx.Blobs[h].Packs) == 0 {
|
if len(idx.Blobs[h].Packs) == 0 {
|
||||||
|
@ -222,13 +220,13 @@ func (idx *Index) RemovePack(id backend.ID) error {
|
||||||
|
|
||||||
// DuplicateBlobs returns a list of blobs that are stored more than once in the
|
// DuplicateBlobs returns a list of blobs that are stored more than once in the
|
||||||
// repo.
|
// repo.
|
||||||
func (idx *Index) DuplicateBlobs() (dups pack.BlobSet) {
|
func (idx *Index) DuplicateBlobs() (dups restic.BlobSet) {
|
||||||
dups = pack.NewBlobSet()
|
dups = restic.NewBlobSet()
|
||||||
seen := pack.NewBlobSet()
|
seen := restic.NewBlobSet()
|
||||||
|
|
||||||
for _, p := range idx.Packs {
|
for _, p := range idx.Packs {
|
||||||
for _, entry := range p.Entries {
|
for _, entry := range p.Entries {
|
||||||
h := pack.Handle{ID: entry.ID, Type: entry.Type}
|
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
|
||||||
if seen.Has(h) {
|
if seen.Has(h) {
|
||||||
dups.Insert(h)
|
dups.Insert(h)
|
||||||
}
|
}
|
||||||
|
@ -240,8 +238,8 @@ func (idx *Index) DuplicateBlobs() (dups pack.BlobSet) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// PacksForBlobs returns the set of packs in which the blobs are contained.
|
// PacksForBlobs returns the set of packs in which the blobs are contained.
|
||||||
func (idx *Index) PacksForBlobs(blobs pack.BlobSet) (packs backend.IDSet) {
|
func (idx *Index) PacksForBlobs(blobs restic.BlobSet) (packs restic.IDSet) {
|
||||||
packs = backend.NewIDSet()
|
packs = restic.NewIDSet()
|
||||||
|
|
||||||
for h := range blobs {
|
for h := range blobs {
|
||||||
blob, ok := idx.Blobs[h]
|
blob, ok := idx.Blobs[h]
|
||||||
|
@ -259,8 +257,8 @@ func (idx *Index) PacksForBlobs(blobs pack.BlobSet) (packs backend.IDSet) {
|
||||||
|
|
||||||
// Location describes the location of a blob in a pack.
|
// Location describes the location of a blob in a pack.
|
||||||
type Location struct {
|
type Location struct {
|
||||||
PackID backend.ID
|
PackID restic.ID
|
||||||
pack.Blob
|
restic.Blob
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrBlobNotFound is return by FindBlob when the blob could not be found in
|
// ErrBlobNotFound is return by FindBlob when the blob could not be found in
|
||||||
|
@ -268,7 +266,7 @@ type Location struct {
|
||||||
var ErrBlobNotFound = errors.New("blob not found in index")
|
var ErrBlobNotFound = errors.New("blob not found in index")
|
||||||
|
|
||||||
// FindBlob returns a list of packs and positions the blob can be found in.
|
// FindBlob returns a list of packs and positions the blob can be found in.
|
||||||
func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) {
|
func (idx *Index) FindBlob(h restic.BlobHandle) ([]Location, error) {
|
||||||
blob, ok := idx.Blobs[h]
|
blob, ok := idx.Blobs[h]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, ErrBlobNotFound
|
return nil, ErrBlobNotFound
|
||||||
|
@ -299,8 +297,8 @@ func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save writes the complete index to the repo.
|
// Save writes the complete index to the repo.
|
||||||
func (idx *Index) Save(repo restic.Repository, supersedes backend.IDs) (backend.ID, error) {
|
func (idx *Index) Save(repo restic.Repository, supersedes restic.IDs) (restic.ID, error) {
|
||||||
packs := make(map[backend.ID][]pack.Blob, len(idx.Packs))
|
packs := make(map[restic.ID][]restic.Blob, len(idx.Packs))
|
||||||
for id, p := range idx.Packs {
|
for id, p := range idx.Packs {
|
||||||
packs[id] = p.Entries
|
packs[id] = p.Entries
|
||||||
}
|
}
|
||||||
|
@ -309,7 +307,7 @@ func (idx *Index) Save(repo restic.Repository, supersedes backend.IDs) (backend.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save writes a new index containing the given packs.
|
// Save writes a new index containing the given packs.
|
||||||
func Save(repo restic.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) {
|
func Save(repo restic.Repository, packs map[restic.ID][]restic.Blob, supersedes restic.IDs) (restic.ID, error) {
|
||||||
idx := &indexJSON{
|
idx := &indexJSON{
|
||||||
Supersedes: supersedes,
|
Supersedes: supersedes,
|
||||||
Packs: make([]*packJSON, 0, len(packs)),
|
Packs: make([]*packJSON, 0, len(packs)),
|
||||||
|
|
|
@ -3,8 +3,6 @@ package index
|
||||||
import (
|
import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"restic"
|
"restic"
|
||||||
"restic/backend"
|
|
||||||
"restic/pack"
|
|
||||||
"restic/repository"
|
"restic/repository"
|
||||||
. "restic/test"
|
. "restic/test"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -179,7 +177,7 @@ func TestIndexSave(t *testing.T) {
|
||||||
|
|
||||||
idx := loadIndex(t, repo)
|
idx := loadIndex(t, repo)
|
||||||
|
|
||||||
packs := make(map[backend.ID][]pack.Blob)
|
packs := make(map[restic.ID][]restic.Blob)
|
||||||
for id := range idx.Packs {
|
for id := range idx.Packs {
|
||||||
if rand.Float32() < 0.5 {
|
if rand.Float32() < 0.5 {
|
||||||
packs[id] = idx.Packs[id].Entries
|
packs[id] = idx.Packs[id].Entries
|
||||||
|
@ -248,7 +246,7 @@ func TestIndexAddRemovePack(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, blob := range blobs {
|
for _, blob := range blobs {
|
||||||
h := pack.Handle{ID: blob.ID, Type: blob.Type}
|
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
|
||||||
_, err := idx.FindBlob(h)
|
_, err := idx.FindBlob(h)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("removed blob %v found in index", h)
|
t.Errorf("removed blob %v found in index", h)
|
||||||
|
@ -308,7 +306,7 @@ func TestIndexLoadDocReference(t *testing.T) {
|
||||||
idx := loadIndex(t, repo)
|
idx := loadIndex(t, repo)
|
||||||
|
|
||||||
blobID := ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66")
|
blobID := ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66")
|
||||||
locs, err := idx.FindBlob(pack.Handle{ID: blobID, Type: pack.Data})
|
locs, err := idx.FindBlob(restic.BlobHandle{ID: blobID, Type: restic.DataBlob})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("FindBlob() returned error %v", err)
|
t.Errorf("FindBlob() returned error %v", err)
|
||||||
}
|
}
|
||||||
|
@ -322,8 +320,8 @@ func TestIndexLoadDocReference(t *testing.T) {
|
||||||
t.Errorf("blob IDs are not equal: %v != %v", l.ID, blobID)
|
t.Errorf("blob IDs are not equal: %v != %v", l.ID, blobID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l.Type != pack.Data {
|
if l.Type != restic.DataBlob {
|
||||||
t.Errorf("want type %v, got %v", pack.Data, l.Type)
|
t.Errorf("want type %v, got %v", restic.DataBlob, l.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l.Offset != 150 {
|
if l.Offset != 150 {
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"restic"
|
"restic"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/backend/mem"
|
"restic/backend/mem"
|
||||||
"restic/crypto"
|
"restic/crypto"
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
|
@ -21,7 +20,7 @@ var testLens = []int{23, 31650, 25860, 10928, 13769, 19862, 5211, 127, 13690, 30
|
||||||
|
|
||||||
type Buf struct {
|
type Buf struct {
|
||||||
data []byte
|
data []byte
|
||||||
id backend.ID
|
id restic.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) {
|
func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) {
|
||||||
|
@ -38,7 +37,7 @@ func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) {
|
||||||
// pack blobs
|
// pack blobs
|
||||||
p := pack.NewPacker(k, nil)
|
p := pack.NewPacker(k, nil)
|
||||||
for _, b := range bufs {
|
for _, b := range bufs {
|
||||||
p.Add(pack.Tree, b.id, b.data)
|
p.Add(restic.TreeBlob, b.id, b.data)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := p.Finalize()
|
_, err := p.Finalize()
|
||||||
|
@ -56,7 +55,7 @@ func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSi
|
||||||
// header length
|
// header length
|
||||||
written += binary.Size(uint32(0))
|
written += binary.Size(uint32(0))
|
||||||
// header
|
// header
|
||||||
written += len(bufs) * (binary.Size(pack.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize)
|
written += len(bufs) * (binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + restic.IDSize)
|
||||||
// header crypto
|
// header crypto
|
||||||
written += crypto.Extension
|
written += crypto.Extension
|
||||||
|
|
||||||
|
@ -96,11 +95,11 @@ func TestCreatePack(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var blobTypeJSON = []struct {
|
var blobTypeJSON = []struct {
|
||||||
t pack.BlobType
|
t restic.BlobType
|
||||||
res string
|
res string
|
||||||
}{
|
}{
|
||||||
{pack.Data, `"data"`},
|
{restic.DataBlob, `"data"`},
|
||||||
{pack.Tree, `"tree"`},
|
{restic.TreeBlob, `"tree"`},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBlobTypeJSON(t *testing.T) {
|
func TestBlobTypeJSON(t *testing.T) {
|
||||||
|
@ -111,7 +110,7 @@ func TestBlobTypeJSON(t *testing.T) {
|
||||||
Equals(t, test.res, string(buf))
|
Equals(t, test.res, string(buf))
|
||||||
|
|
||||||
// test unserialize
|
// test unserialize
|
||||||
var v pack.BlobType
|
var v restic.BlobType
|
||||||
err = json.Unmarshal([]byte(test.res), &v)
|
err = json.Unmarshal([]byte(test.res), &v)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
Equals(t, test.t, v)
|
Equals(t, test.t, v)
|
||||||
|
@ -125,9 +124,9 @@ func TestUnpackReadSeeker(t *testing.T) {
|
||||||
bufs, packData, packSize := newPack(t, k, testLens)
|
bufs, packData, packSize := newPack(t, k, testLens)
|
||||||
|
|
||||||
b := mem.New()
|
b := mem.New()
|
||||||
id := backend.Hash(packData)
|
id := restic.Hash(packData)
|
||||||
|
|
||||||
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
handle := restic.Handle{FileType: restic.DataFile, Name: id.String()}
|
||||||
OK(t, b.Save(handle, packData))
|
OK(t, b.Save(handle, packData))
|
||||||
verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize)
|
verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize)
|
||||||
}
|
}
|
||||||
|
@ -138,9 +137,9 @@ func TestShortPack(t *testing.T) {
|
||||||
bufs, packData, packSize := newPack(t, k, []int{23})
|
bufs, packData, packSize := newPack(t, k, []int{23})
|
||||||
|
|
||||||
b := mem.New()
|
b := mem.New()
|
||||||
id := backend.Hash(packData)
|
id := restic.Hash(packData)
|
||||||
|
|
||||||
handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
handle := restic.Handle{FileType: restic.DataFile, Name: id.String()}
|
||||||
OK(t, b.Save(handle, packData))
|
OK(t, b.Save(handle, packData))
|
||||||
verifyBlobs(t, bufs, k, backend.ReaderAt(b, handle), packSize)
|
verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
package restic
|
package restic
|
||||||
|
|
||||||
|
import "restic/crypto"
|
||||||
|
|
||||||
// Repository stores data in a backend. It provides high-level functions and
|
// Repository stores data in a backend. It provides high-level functions and
|
||||||
// transparently encrypts/decrypts data.
|
// transparently encrypts/decrypts data.
|
||||||
type Repository interface {
|
type Repository interface {
|
||||||
|
@ -7,6 +9,8 @@ type Repository interface {
|
||||||
// Backend returns the backend used by the repository
|
// Backend returns the backend used by the repository
|
||||||
Backend() Backend
|
Backend() Backend
|
||||||
|
|
||||||
|
Key() *crypto.Key
|
||||||
|
|
||||||
SetIndex(Index)
|
SetIndex(Index)
|
||||||
|
|
||||||
Index() Index
|
Index() Index
|
||||||
|
@ -24,6 +28,7 @@ type Repository interface {
|
||||||
LoadJSONPack(BlobType, ID, interface{}) error
|
LoadJSONPack(BlobType, ID, interface{}) error
|
||||||
LoadJSONUnpacked(FileType, ID, interface{}) error
|
LoadJSONUnpacked(FileType, ID, interface{}) error
|
||||||
LoadBlob(ID, BlobType, []byte) ([]byte, error)
|
LoadBlob(ID, BlobType, []byte) ([]byte, error)
|
||||||
|
LoadAndDecrypt(FileType, ID) ([]byte, error)
|
||||||
|
|
||||||
LookupBlobSize(ID, BlobType) (uint, error)
|
LookupBlobSize(ID, BlobType) (uint, error)
|
||||||
|
|
||||||
|
@ -47,4 +52,5 @@ type Lister interface {
|
||||||
type Index interface {
|
type Index interface {
|
||||||
Has(ID, BlobType) bool
|
Has(ID, BlobType) bool
|
||||||
Lookup(ID, BlobType) ([]PackedBlob, error)
|
Lookup(ID, BlobType) ([]PackedBlob, error)
|
||||||
|
Count(BlobType) uint
|
||||||
}
|
}
|
||||||
|
|
|
@ -534,7 +534,7 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadIndexWithDecoder loads the index and decodes it with fn.
|
// LoadIndexWithDecoder loads the index and decodes it with fn.
|
||||||
func LoadIndexWithDecoder(repo *Repository, id restic.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) {
|
func LoadIndexWithDecoder(repo restic.Repository, id restic.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) {
|
||||||
debug.Log("LoadIndexWithDecoder", "Loading index %v", id[:8])
|
debug.Log("LoadIndexWithDecoder", "Loading index %v", id[:8])
|
||||||
|
|
||||||
buf, err := repo.LoadAndDecrypt(restic.IndexFile, id)
|
buf, err := repo.LoadAndDecrypt(restic.IndexFile, id)
|
||||||
|
|
|
@ -5,8 +5,6 @@ import (
|
||||||
"restic"
|
"restic"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/pack"
|
|
||||||
"restic/repository"
|
"restic/repository"
|
||||||
. "restic/test"
|
. "restic/test"
|
||||||
)
|
)
|
||||||
|
@ -24,24 +22,26 @@ func TestIndexSerialize(t *testing.T) {
|
||||||
|
|
||||||
// create 50 packs with 20 blobs each
|
// create 50 packs with 20 blobs each
|
||||||
for i := 0; i < 50; i++ {
|
for i := 0; i < 50; i++ {
|
||||||
packID := backend.RandomID()
|
packID := restic.TestRandomID()
|
||||||
|
|
||||||
pos := uint(0)
|
pos := uint(0)
|
||||||
for j := 0; j < 20; j++ {
|
for j := 0; j < 20; j++ {
|
||||||
id := backend.RandomID()
|
id := restic.TestRandomID()
|
||||||
length := uint(i*100 + j)
|
length := uint(i*100 + j)
|
||||||
idx.Store(repository.PackedBlob{
|
idx.Store(restic.PackedBlob{
|
||||||
Type: pack.Data,
|
Blob: restic.Blob{
|
||||||
ID: id,
|
Type: restic.DataBlob,
|
||||||
|
ID: id,
|
||||||
|
Offset: pos,
|
||||||
|
Length: length,
|
||||||
|
},
|
||||||
PackID: packID,
|
PackID: packID,
|
||||||
Offset: pos,
|
|
||||||
Length: length,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
tests = append(tests, testEntry{
|
tests = append(tests, testEntry{
|
||||||
id: id,
|
id: id,
|
||||||
pack: packID,
|
pack: packID,
|
||||||
tpe: pack.Data,
|
tpe: restic.DataBlob,
|
||||||
offset: pos,
|
offset: pos,
|
||||||
length: length,
|
length: length,
|
||||||
})
|
})
|
||||||
|
@ -94,24 +94,26 @@ func TestIndexSerialize(t *testing.T) {
|
||||||
// add more blobs to idx
|
// add more blobs to idx
|
||||||
newtests := []testEntry{}
|
newtests := []testEntry{}
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
packID := backend.RandomID()
|
packID := restic.TestRandomID()
|
||||||
|
|
||||||
pos := uint(0)
|
pos := uint(0)
|
||||||
for j := 0; j < 10; j++ {
|
for j := 0; j < 10; j++ {
|
||||||
id := backend.RandomID()
|
id := restic.TestRandomID()
|
||||||
length := uint(i*100 + j)
|
length := uint(i*100 + j)
|
||||||
idx.Store(repository.PackedBlob{
|
idx.Store(restic.PackedBlob{
|
||||||
Type: pack.Data,
|
Blob: restic.Blob{
|
||||||
ID: id,
|
Type: restic.DataBlob,
|
||||||
|
ID: id,
|
||||||
|
Offset: pos,
|
||||||
|
Length: length,
|
||||||
|
},
|
||||||
PackID: packID,
|
PackID: packID,
|
||||||
Offset: pos,
|
|
||||||
Length: length,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
newtests = append(newtests, testEntry{
|
newtests = append(newtests, testEntry{
|
||||||
id: id,
|
id: id,
|
||||||
pack: packID,
|
pack: packID,
|
||||||
tpe: pack.Data,
|
tpe: restic.DataBlob,
|
||||||
offset: pos,
|
offset: pos,
|
||||||
length: length,
|
length: length,
|
||||||
})
|
})
|
||||||
|
@ -128,7 +130,7 @@ func TestIndexSerialize(t *testing.T) {
|
||||||
Assert(t, idx.Final(),
|
Assert(t, idx.Final(),
|
||||||
"index not final after encoding")
|
"index not final after encoding")
|
||||||
|
|
||||||
id := backend.RandomID()
|
id := restic.TestRandomID()
|
||||||
OK(t, idx.SetID(id))
|
OK(t, idx.SetID(id))
|
||||||
id2, err := idx.ID()
|
id2, err := idx.ID()
|
||||||
Assert(t, id2.Equal(id),
|
Assert(t, id2.Equal(id),
|
||||||
|
@ -165,18 +167,20 @@ func TestIndexSize(t *testing.T) {
|
||||||
packs := 200
|
packs := 200
|
||||||
blobs := 100
|
blobs := 100
|
||||||
for i := 0; i < packs; i++ {
|
for i := 0; i < packs; i++ {
|
||||||
packID := backend.RandomID()
|
packID := restic.TestRandomID()
|
||||||
|
|
||||||
pos := uint(0)
|
pos := uint(0)
|
||||||
for j := 0; j < blobs; j++ {
|
for j := 0; j < blobs; j++ {
|
||||||
id := backend.RandomID()
|
id := restic.TestRandomID()
|
||||||
length := uint(i*100 + j)
|
length := uint(i*100 + j)
|
||||||
idx.Store(repository.PackedBlob{
|
idx.Store(restic.PackedBlob{
|
||||||
Type: pack.Data,
|
Blob: restic.Blob{
|
||||||
ID: id,
|
Type: restic.DataBlob,
|
||||||
|
ID: id,
|
||||||
|
Offset: pos,
|
||||||
|
Length: length,
|
||||||
|
},
|
||||||
PackID: packID,
|
PackID: packID,
|
||||||
Offset: pos,
|
|
||||||
Length: length,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
pos += length
|
pos += length
|
||||||
|
@ -257,15 +261,15 @@ var exampleTests = []struct {
|
||||||
{
|
{
|
||||||
ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"),
|
ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"),
|
||||||
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
||||||
pack.Data, 0, 25,
|
restic.DataBlob, 0, 25,
|
||||||
}, {
|
}, {
|
||||||
ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"),
|
ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"),
|
||||||
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
||||||
pack.Tree, 38, 100,
|
restic.TreeBlob, 38, 100,
|
||||||
}, {
|
}, {
|
||||||
ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"),
|
ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"),
|
||||||
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
||||||
pack.Data, 150, 123,
|
restic.DataBlob, 150, 123,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -275,9 +279,9 @@ var exampleLookupTest = struct {
|
||||||
}{
|
}{
|
||||||
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
||||||
map[restic.ID]restic.BlobType{
|
map[restic.ID]restic.BlobType{
|
||||||
ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): pack.Data,
|
ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): restic.DataBlob,
|
||||||
ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): pack.Tree,
|
ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): restic.TreeBlob,
|
||||||
ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): pack.Data,
|
ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): restic.DataBlob,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -349,13 +353,15 @@ func TestIndexPacks(t *testing.T) {
|
||||||
packs := restic.NewIDSet()
|
packs := restic.NewIDSet()
|
||||||
|
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
packID := backend.RandomID()
|
packID := restic.TestRandomID()
|
||||||
idx.Store(repository.PackedBlob{
|
idx.Store(restic.PackedBlob{
|
||||||
Type: pack.Data,
|
Blob: restic.Blob{
|
||||||
ID: backend.RandomID(),
|
Type: restic.DataBlob,
|
||||||
|
ID: restic.TestRandomID(),
|
||||||
|
Offset: 0,
|
||||||
|
Length: 23,
|
||||||
|
},
|
||||||
PackID: packID,
|
PackID: packID,
|
||||||
Offset: 0,
|
|
||||||
Length: 23,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
packs.Insert(packID)
|
packs.Insert(packID)
|
||||||
|
|
|
@ -7,7 +7,6 @@ import (
|
||||||
"restic"
|
"restic"
|
||||||
"restic/backend/mem"
|
"restic/backend/mem"
|
||||||
"restic/crypto"
|
"restic/crypto"
|
||||||
"restic/pack"
|
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -64,7 +63,7 @@ func saveFile(t testing.TB, be Saver, filename string, n int) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
h := restic.Handle{Type: restic.DataFile, Name: restic.Hash(data).String()}
|
h := restic.Handle{FileType: restic.DataFile, Name: restic.Hash(data).String()}
|
||||||
|
|
||||||
err = be.Save(h, data)
|
err = be.Save(h, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -95,7 +94,7 @@ func fillPacks(t testing.TB, rnd *randReader, be Saver, pm *packerManager, buf [
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
n, err := packer.Add(pack.Data, id, buf)
|
n, err := packer.Add(restic.DataBlob, id, buf)
|
||||||
if n != l {
|
if n != l {
|
||||||
t.Errorf("Add() returned invalid number of bytes: want %v, got %v", n, l)
|
t.Errorf("Add() returned invalid number of bytes: want %v, got %v", n, l)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"restic"
|
"restic"
|
||||||
"restic/pack"
|
|
||||||
"restic/repository"
|
"restic/repository"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
@ -32,18 +31,18 @@ func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pDa
|
||||||
)
|
)
|
||||||
|
|
||||||
if rand.Float32() < pData {
|
if rand.Float32() < pData {
|
||||||
tpe = pack.Data
|
tpe = restic.DataBlob
|
||||||
length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data
|
length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data
|
||||||
} else {
|
} else {
|
||||||
tpe = pack.Tree
|
tpe = restic.TreeBlob
|
||||||
length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB
|
length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := random(t, length)
|
buf := random(t, length)
|
||||||
id := restic.Hash(buf)
|
id := restic.Hash(buf)
|
||||||
|
|
||||||
if repo.Index().Has(id, pack.Data) {
|
if repo.Index().Has(id, restic.DataBlob) {
|
||||||
t.Errorf("duplicate blob %v/%v ignored", id, pack.Data)
|
t.Errorf("duplicate blob %v/%v ignored", id, restic.DataBlob)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,14 +65,14 @@ func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pDa
|
||||||
|
|
||||||
// selectBlobs splits the list of all blobs randomly into two lists. A blob
|
// selectBlobs splits the list of all blobs randomly into two lists. A blob
|
||||||
// will be contained in the firstone ith probability p.
|
// will be contained in the firstone ith probability p.
|
||||||
func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, list2 pack.BlobSet) {
|
func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, list2 restic.BlobSet) {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
|
||||||
list1 = pack.NewBlobSet()
|
list1 = restic.NewBlobSet()
|
||||||
list2 = pack.NewBlobSet()
|
list2 = restic.NewBlobSet()
|
||||||
|
|
||||||
blobs := pack.NewBlobSet()
|
blobs := restic.NewBlobSet()
|
||||||
|
|
||||||
for id := range repo.List(restic.DataFile, done) {
|
for id := range repo.List(restic.DataFile, done) {
|
||||||
entries, _, err := repo.ListPack(id)
|
entries, _, err := repo.ListPack(id)
|
||||||
|
@ -82,7 +81,7 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
h := pack.Handle{ID: entry.ID, Type: entry.Type}
|
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
|
||||||
if blobs.Has(h) {
|
if blobs.Has(h) {
|
||||||
t.Errorf("ignoring duplicate blob %v", h)
|
t.Errorf("ignoring duplicate blob %v", h)
|
||||||
continue
|
continue
|
||||||
|
@ -90,9 +89,9 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l
|
||||||
blobs.Insert(h)
|
blobs.Insert(h)
|
||||||
|
|
||||||
if rand.Float32() <= p {
|
if rand.Float32() <= p {
|
||||||
list1.Insert(pack.Handle{ID: entry.ID, Type: entry.Type})
|
list1.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type})
|
||||||
} else {
|
} else {
|
||||||
list2.Insert(pack.Handle{ID: entry.ID, Type: entry.Type})
|
list2.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type})
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -113,7 +112,7 @@ func listPacks(t *testing.T, repo *repository.Repository) restic.IDSet {
|
||||||
return list
|
return list
|
||||||
}
|
}
|
||||||
|
|
||||||
func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.BlobSet) restic.IDSet {
|
func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs restic.BlobSet) restic.IDSet {
|
||||||
packs := restic.NewIDSet()
|
packs := restic.NewIDSet()
|
||||||
|
|
||||||
idx := repo.Index()
|
idx := repo.Index()
|
||||||
|
@ -131,7 +130,7 @@ func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.Blo
|
||||||
return packs
|
return packs
|
||||||
}
|
}
|
||||||
|
|
||||||
func repack(t *testing.T, repo *repository.Repository, packs restic.IDSet, blobs pack.BlobSet) {
|
func repack(t *testing.T, repo *repository.Repository, packs restic.IDSet, blobs restic.BlobSet) {
|
||||||
err := repository.Repack(repo, packs, blobs)
|
err := repository.Repack(repo, packs, blobs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"restic"
|
"restic"
|
||||||
"restic/pack"
|
|
||||||
"restic/repository"
|
"restic/repository"
|
||||||
. "restic/test"
|
. "restic/test"
|
||||||
)
|
)
|
||||||
|
@ -36,7 +35,7 @@ func TestSaveJSON(t *testing.T) {
|
||||||
data = append(data, '\n')
|
data = append(data, '\n')
|
||||||
h := sha256.Sum256(data)
|
h := sha256.Sum256(data)
|
||||||
|
|
||||||
id, err := repo.SaveJSON(pack.Tree, obj)
|
id, err := repo.SaveJSON(restic.TreeBlob, obj)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
Assert(t, h == id,
|
Assert(t, h == id,
|
||||||
|
@ -59,7 +58,7 @@ func BenchmarkSaveJSON(t *testing.B) {
|
||||||
t.ResetTimer()
|
t.ResetTimer()
|
||||||
|
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
id, err := repo.SaveJSON(pack.Tree, obj)
|
id, err := repo.SaveJSON(restic.TreeBlob, obj)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
Assert(t, h == id,
|
Assert(t, h == id,
|
||||||
|
@ -82,7 +81,7 @@ func TestSave(t *testing.T) {
|
||||||
id := restic.Hash(data)
|
id := restic.Hash(data)
|
||||||
|
|
||||||
// save
|
// save
|
||||||
sid, err := repo.SaveAndEncrypt(pack.Data, data, nil)
|
sid, err := repo.SaveAndEncrypt(restic.DataBlob, data, nil)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
Equals(t, id, sid)
|
Equals(t, id, sid)
|
||||||
|
@ -91,7 +90,7 @@ func TestSave(t *testing.T) {
|
||||||
// OK(t, repo.SaveIndex())
|
// OK(t, repo.SaveIndex())
|
||||||
|
|
||||||
// read back
|
// read back
|
||||||
buf, err := repo.LoadBlob(id, pack.Data, make([]byte, size))
|
buf, err := repo.LoadBlob(id, restic.DataBlob, make([]byte, size))
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
Assert(t, len(buf) == len(data),
|
Assert(t, len(buf) == len(data),
|
||||||
|
@ -116,14 +115,14 @@ func TestSaveFrom(t *testing.T) {
|
||||||
id := restic.Hash(data)
|
id := restic.Hash(data)
|
||||||
|
|
||||||
// save
|
// save
|
||||||
id2, err := repo.SaveAndEncrypt(pack.Data, data, &id)
|
id2, err := repo.SaveAndEncrypt(restic.DataBlob, data, &id)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
Equals(t, id, id2)
|
Equals(t, id, id2)
|
||||||
|
|
||||||
OK(t, repo.Flush())
|
OK(t, repo.Flush())
|
||||||
|
|
||||||
// read back
|
// read back
|
||||||
buf, err := repo.LoadBlob(id, pack.Data, make([]byte, size))
|
buf, err := repo.LoadBlob(id, restic.DataBlob, make([]byte, size))
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
Assert(t, len(buf) == len(data),
|
Assert(t, len(buf) == len(data),
|
||||||
|
@ -153,7 +152,7 @@ func BenchmarkSaveAndEncrypt(t *testing.B) {
|
||||||
|
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
// save
|
// save
|
||||||
_, err = repo.SaveAndEncrypt(pack.Data, data, &id)
|
_, err = repo.SaveAndEncrypt(restic.DataBlob, data, &id)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -171,7 +170,7 @@ func TestLoadJSONPack(t *testing.T) {
|
||||||
OK(t, repo.Flush())
|
OK(t, repo.Flush())
|
||||||
|
|
||||||
tree := restic.NewTree()
|
tree := restic.NewTree()
|
||||||
err := repo.LoadJSONPack(pack.Tree, *sn.Tree, &tree)
|
err := repo.LoadJSONPack(restic.TreeBlob, *sn.Tree, &tree)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,7 +191,7 @@ func BenchmarkLoadJSONPack(t *testing.B) {
|
||||||
t.ResetTimer()
|
t.ResetTimer()
|
||||||
|
|
||||||
for i := 0; i < t.N; i++ {
|
for i := 0; i < t.N; i++ {
|
||||||
err := repo.LoadJSONPack(pack.Tree, *sn.Tree, &tree)
|
err := repo.LoadJSONPack(restic.TreeBlob, *sn.Tree, &tree)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -253,7 +252,7 @@ func saveRandomDataBlobs(t testing.TB, repo *repository.Repository, num int, siz
|
||||||
_, err := io.ReadFull(rand.Reader, buf)
|
_, err := io.ReadFull(rand.Reader, buf)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
_, err = repo.SaveAndEncrypt(pack.Data, buf, nil)
|
_, err = repo.SaveAndEncrypt(restic.DataBlob, buf, nil)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue