forked from TrueCloudLab/restic
Fix TestCreateSnapshot, do not generate duplicate data
This commit is contained in:
parent
2de233fe8b
commit
628fb0fb72
3 changed files with 100 additions and 32 deletions
|
@ -63,7 +63,7 @@ func TestArchiveReader(t *testing.T) {
|
||||||
size := int64(rand.Intn(50*1024*1024) + 50*1024*1024)
|
size := int64(rand.Intn(50*1024*1024) + 50*1024*1024)
|
||||||
t.Logf("seed is 0x%016x, size is %v", seed, size)
|
t.Logf("seed is 0x%016x, size is %v", seed, size)
|
||||||
|
|
||||||
f := fakeFile(t, seed, size)
|
f := fakeFile(seed, size)
|
||||||
|
|
||||||
sn, id, err := ArchiveReader(repo, nil, f, "fakefile")
|
sn, id, err := ArchiveReader(repo, nil, f, "fakefile")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -76,7 +76,7 @@ func TestArchiveReader(t *testing.T) {
|
||||||
|
|
||||||
t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str())
|
t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str())
|
||||||
|
|
||||||
checkSavedFile(t, repo, *sn.Tree, "fakefile", fakeFile(t, seed, size))
|
checkSavedFile(t, repo, *sn.Tree, "fakefile", fakeFile(seed, size))
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkArchiveReader(t *testing.B) {
|
func BenchmarkArchiveReader(t *testing.B) {
|
||||||
|
@ -86,7 +86,7 @@ func BenchmarkArchiveReader(t *testing.B) {
|
||||||
const size = 50 * 1024 * 1024
|
const size = 50 * 1024 * 1024
|
||||||
|
|
||||||
buf := make([]byte, size)
|
buf := make([]byte, size)
|
||||||
_, err := io.ReadFull(fakeFile(t, 23, size), buf)
|
_, err := io.ReadFull(fakeFile(23, size), buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package restic
|
package restic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
@ -85,14 +86,21 @@ func (rd *randReader) Read(p []byte) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// fakeFile returns a reader which yields deterministic pseudo-random data.
|
// fakeFile returns a reader which yields deterministic pseudo-random data.
|
||||||
func fakeFile(t testing.TB, seed, size int64) io.Reader {
|
func fakeFile(seed, size int64) io.Reader {
|
||||||
return io.LimitReader(newRandReader(rand.New(rand.NewSource(seed))), size)
|
return io.LimitReader(newRandReader(rand.New(rand.NewSource(seed))), size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fakeTree struct {
|
||||||
|
t testing.TB
|
||||||
|
repo *repository.Repository
|
||||||
|
knownBlobs backend.IDSet
|
||||||
|
}
|
||||||
|
|
||||||
// saveFile reads from rd and saves the blobs in the repository. The list of
|
// saveFile reads from rd and saves the blobs in the repository. The list of
|
||||||
// IDs is returned.
|
// IDs is returned.
|
||||||
func saveFile(t testing.TB, repo *repository.Repository, rd io.Reader) (blobs backend.IDs) {
|
func (f fakeTree) saveFile(rd io.Reader) (blobs backend.IDs) {
|
||||||
ch := chunker.New(rd, repo.Config.ChunkerPolynomial)
|
blobs = backend.IDs{}
|
||||||
|
ch := chunker.New(rd, f.repo.Config.ChunkerPolynomial)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
chunk, err := ch.Next(getBuf())
|
chunk, err := ch.Next(getBuf())
|
||||||
|
@ -101,47 +109,92 @@ func saveFile(t testing.TB, repo *repository.Repository, rd io.Reader) (blobs ba
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unabel to save chunk in repo: %v", err)
|
f.t.Fatalf("unable to save chunk in repo: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
|
id := backend.Hash(chunk.Data)
|
||||||
if err != nil {
|
if !f.knownBlobs.Has(id) {
|
||||||
t.Fatalf("error saving chunk: %v", err)
|
_, err := f.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id)
|
||||||
|
if err != nil {
|
||||||
|
f.t.Fatalf("error saving chunk: %v", err)
|
||||||
|
}
|
||||||
|
f.knownBlobs.Insert(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
blobs = append(blobs, id)
|
blobs = append(blobs, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
return blobs
|
return blobs
|
||||||
}
|
}
|
||||||
|
|
||||||
const maxFileSize = 1500000
|
const (
|
||||||
const maxSeed = 100
|
maxFileSize = 1500000
|
||||||
|
maxSeed = 32
|
||||||
|
maxNodes = 32
|
||||||
|
)
|
||||||
|
|
||||||
// saveTree saves a tree of fake files in the repo and returns the ID.
|
func (f fakeTree) treeIsKnown(tree *Tree) (bool, backend.ID) {
|
||||||
func saveTree(t testing.TB, repo *repository.Repository, seed int64) backend.ID {
|
data, err := json.Marshal(tree)
|
||||||
|
if err != nil {
|
||||||
|
f.t.Fatalf("json.Marshal(tree) returned error: %v", err)
|
||||||
|
return false, backend.ID{}
|
||||||
|
}
|
||||||
|
data = append(data, '\n')
|
||||||
|
|
||||||
|
// check if tree has been saved before
|
||||||
|
id := backend.Hash(data)
|
||||||
|
if f.knownBlobs.Has(id) {
|
||||||
|
return true, id
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, id
|
||||||
|
}
|
||||||
|
|
||||||
|
// save stores a tree of fake files in the repo and returns the ID.
|
||||||
|
func (f fakeTree) saveTree(seed int64, depth int) backend.ID {
|
||||||
rnd := rand.NewSource(seed)
|
rnd := rand.NewSource(seed)
|
||||||
numNodes := int(rnd.Int63() % 64)
|
numNodes := int(rnd.Int63() % maxNodes)
|
||||||
t.Logf("create %v nodes", numNodes)
|
|
||||||
|
|
||||||
var tree Tree
|
var tree Tree
|
||||||
for i := 0; i < numNodes; i++ {
|
for i := 0; i < numNodes; i++ {
|
||||||
seed := rnd.Int63() % maxSeed
|
|
||||||
size := rnd.Int63() % maxFileSize
|
|
||||||
|
|
||||||
node := &Node{
|
// randomly select the type of the node, either tree (p = 1/4) or file (p = 3/4).
|
||||||
Name: fmt.Sprintf("file-%v", seed),
|
if depth > 1 && rnd.Int63()%4 == 0 {
|
||||||
Type: "file",
|
treeSeed := rnd.Int63() % maxSeed
|
||||||
Mode: 0644,
|
id := f.saveTree(treeSeed, depth-1)
|
||||||
Size: uint64(size),
|
|
||||||
|
node := &Node{
|
||||||
|
Name: fmt.Sprintf("dir-%v", treeSeed),
|
||||||
|
Type: "dir",
|
||||||
|
Mode: 0755,
|
||||||
|
Subtree: &id,
|
||||||
|
}
|
||||||
|
|
||||||
|
tree.Nodes = append(tree.Nodes, node)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
node.Content = saveFile(t, repo, fakeFile(t, seed, size))
|
fileSeed := rnd.Int63() % maxSeed
|
||||||
|
fileSize := (maxFileSize / maxSeed) * fileSeed
|
||||||
|
|
||||||
|
node := &Node{
|
||||||
|
Name: fmt.Sprintf("file-%v", fileSeed),
|
||||||
|
Type: "file",
|
||||||
|
Mode: 0644,
|
||||||
|
Size: uint64(fileSize),
|
||||||
|
}
|
||||||
|
|
||||||
|
node.Content = f.saveFile(fakeFile(fileSeed, fileSize))
|
||||||
tree.Nodes = append(tree.Nodes, node)
|
tree.Nodes = append(tree.Nodes, node)
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := repo.SaveJSON(pack.Tree, tree)
|
if known, id := f.treeIsKnown(&tree); known {
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := f.repo.SaveJSON(pack.Tree, tree)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
f.t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return id
|
return id
|
||||||
|
@ -149,8 +202,12 @@ func saveTree(t testing.TB, repo *repository.Repository, seed int64) backend.ID
|
||||||
|
|
||||||
// TestCreateSnapshot creates a snapshot filled with fake data. The
|
// TestCreateSnapshot creates a snapshot filled with fake data. The
|
||||||
// fake data is generated deterministically from the timestamp `at`, which is
|
// fake data is generated deterministically from the timestamp `at`, which is
|
||||||
// also used as the snapshot's timestamp.
|
// also used as the snapshot's timestamp. The tree's depth can be specified
|
||||||
func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time) backend.ID {
|
// with the parameter depth.
|
||||||
|
func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int) *Snapshot {
|
||||||
|
seed := at.Unix()
|
||||||
|
t.Logf("create fake snapshot at %s with seed %d", at, seed)
|
||||||
|
|
||||||
fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05"))
|
fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05"))
|
||||||
snapshot, err := NewSnapshot([]string{fakedir})
|
snapshot, err := NewSnapshot([]string{fakedir})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -158,7 +215,13 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time)
|
||||||
}
|
}
|
||||||
snapshot.Time = at
|
snapshot.Time = at
|
||||||
|
|
||||||
treeID := saveTree(t, repo, at.UnixNano())
|
f := fakeTree{
|
||||||
|
t: t,
|
||||||
|
repo: repo,
|
||||||
|
knownBlobs: backend.NewIDSet(),
|
||||||
|
}
|
||||||
|
|
||||||
|
treeID := f.saveTree(seed, depth)
|
||||||
snapshot.Tree = &treeID
|
snapshot.Tree = &treeID
|
||||||
|
|
||||||
id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot)
|
id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot)
|
||||||
|
@ -166,6 +229,8 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time)
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
snapshot.id = &id
|
||||||
|
|
||||||
t.Logf("saved snapshot %v", id.Str())
|
t.Logf("saved snapshot %v", id.Str())
|
||||||
|
|
||||||
err = repo.Flush()
|
err = repo.Flush()
|
||||||
|
@ -178,5 +243,5 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time)
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return id
|
return snapshot
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,14 +10,17 @@ import (
|
||||||
|
|
||||||
var testSnapshotTime = time.Unix(1460289341, 207401672)
|
var testSnapshotTime = time.Unix(1460289341, 207401672)
|
||||||
|
|
||||||
const testCreateSnapshots = 3
|
const (
|
||||||
|
testCreateSnapshots = 3
|
||||||
|
testDepth = 2
|
||||||
|
)
|
||||||
|
|
||||||
func TestCreateSnapshot(t *testing.T) {
|
func TestCreateSnapshot(t *testing.T) {
|
||||||
repo, cleanup := repository.TestRepository(t)
|
repo, cleanup := repository.TestRepository(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
for i := 0; i < testCreateSnapshots; i++ {
|
for i := 0; i < testCreateSnapshots; i++ {
|
||||||
restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second))
|
restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth)
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshots, err := restic.LoadAllSnapshots(repo)
|
snapshots, err := restic.LoadAllSnapshots(repo)
|
||||||
|
|
Loading…
Add table
Reference in a new issue