Revert "Fix TestCreateSnapshot, do not generate duplicate data"

This reverts commit 628fb0fb72.
This commit is contained in:
Alexander Neumann 2016-08-02 22:11:55 +02:00
parent 628fb0fb72
commit fc9b27c533
3 changed files with 29 additions and 97 deletions

View file

@ -63,7 +63,7 @@ func TestArchiveReader(t *testing.T) {
size := int64(rand.Intn(50*1024*1024) + 50*1024*1024) size := int64(rand.Intn(50*1024*1024) + 50*1024*1024)
t.Logf("seed is 0x%016x, size is %v", seed, size) t.Logf("seed is 0x%016x, size is %v", seed, size)
f := fakeFile(seed, size) f := fakeFile(t, seed, size)
sn, id, err := ArchiveReader(repo, nil, f, "fakefile") sn, id, err := ArchiveReader(repo, nil, f, "fakefile")
if err != nil { if err != nil {
@ -76,7 +76,7 @@ func TestArchiveReader(t *testing.T) {
t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str()) t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str())
checkSavedFile(t, repo, *sn.Tree, "fakefile", fakeFile(seed, size)) checkSavedFile(t, repo, *sn.Tree, "fakefile", fakeFile(t, seed, size))
} }
func BenchmarkArchiveReader(t *testing.B) { func BenchmarkArchiveReader(t *testing.B) {
@ -86,7 +86,7 @@ func BenchmarkArchiveReader(t *testing.B) {
const size = 50 * 1024 * 1024 const size = 50 * 1024 * 1024
buf := make([]byte, size) buf := make([]byte, size)
_, err := io.ReadFull(fakeFile(23, size), buf) _, err := io.ReadFull(fakeFile(t, 23, size), buf)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -1,7 +1,6 @@
package restic package restic
import ( import (
"encoding/json"
"fmt" "fmt"
"io" "io"
"math/rand" "math/rand"
@ -86,21 +85,14 @@ func (rd *randReader) Read(p []byte) (int, error) {
} }
// fakeFile returns a reader which yields deterministic pseudo-random data. // fakeFile returns a reader which yields deterministic pseudo-random data.
func fakeFile(seed, size int64) io.Reader { func fakeFile(t testing.TB, seed, size int64) io.Reader {
return io.LimitReader(newRandReader(rand.New(rand.NewSource(seed))), size) return io.LimitReader(newRandReader(rand.New(rand.NewSource(seed))), size)
} }
type fakeTree struct {
t testing.TB
repo *repository.Repository
knownBlobs backend.IDSet
}
// saveFile reads from rd and saves the blobs in the repository. The list of // saveFile reads from rd and saves the blobs in the repository. The list of
// IDs is returned. // IDs is returned.
func (f fakeTree) saveFile(rd io.Reader) (blobs backend.IDs) { func saveFile(t testing.TB, repo *repository.Repository, rd io.Reader) (blobs backend.IDs) {
blobs = backend.IDs{} ch := chunker.New(rd, repo.Config.ChunkerPolynomial)
ch := chunker.New(rd, f.repo.Config.ChunkerPolynomial)
for { for {
chunk, err := ch.Next(getBuf()) chunk, err := ch.Next(getBuf())
@ -109,92 +101,47 @@ func (f fakeTree) saveFile(rd io.Reader) (blobs backend.IDs) {
} }
if err != nil { if err != nil {
f.t.Fatalf("unable to save chunk in repo: %v", err) t.Fatalf("unabel to save chunk in repo: %v", err)
} }
id := backend.Hash(chunk.Data) id, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
if !f.knownBlobs.Has(id) { if err != nil {
_, err := f.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id) t.Fatalf("error saving chunk: %v", err)
if err != nil {
f.t.Fatalf("error saving chunk: %v", err)
}
f.knownBlobs.Insert(id)
} }
blobs = append(blobs, id) blobs = append(blobs, id)
} }
return blobs return blobs
} }
const ( const maxFileSize = 1500000
maxFileSize = 1500000 const maxSeed = 100
maxSeed = 32
maxNodes = 32
)
func (f fakeTree) treeIsKnown(tree *Tree) (bool, backend.ID) { // saveTree saves a tree of fake files in the repo and returns the ID.
data, err := json.Marshal(tree) func saveTree(t testing.TB, repo *repository.Repository, seed int64) backend.ID {
if err != nil {
f.t.Fatalf("json.Marshal(tree) returned error: %v", err)
return false, backend.ID{}
}
data = append(data, '\n')
// check if tree has been saved before
id := backend.Hash(data)
if f.knownBlobs.Has(id) {
return true, id
}
return false, id
}
// save stores a tree of fake files in the repo and returns the ID.
func (f fakeTree) saveTree(seed int64, depth int) backend.ID {
rnd := rand.NewSource(seed) rnd := rand.NewSource(seed)
numNodes := int(rnd.Int63() % maxNodes) numNodes := int(rnd.Int63() % 64)
t.Logf("create %v nodes", numNodes)
var tree Tree var tree Tree
for i := 0; i < numNodes; i++ { for i := 0; i < numNodes; i++ {
seed := rnd.Int63() % maxSeed
// randomly select the type of the node, either tree (p = 1/4) or file (p = 3/4). size := rnd.Int63() % maxFileSize
if depth > 1 && rnd.Int63()%4 == 0 {
treeSeed := rnd.Int63() % maxSeed
id := f.saveTree(treeSeed, depth-1)
node := &Node{
Name: fmt.Sprintf("dir-%v", treeSeed),
Type: "dir",
Mode: 0755,
Subtree: &id,
}
tree.Nodes = append(tree.Nodes, node)
continue
}
fileSeed := rnd.Int63() % maxSeed
fileSize := (maxFileSize / maxSeed) * fileSeed
node := &Node{ node := &Node{
Name: fmt.Sprintf("file-%v", fileSeed), Name: fmt.Sprintf("file-%v", seed),
Type: "file", Type: "file",
Mode: 0644, Mode: 0644,
Size: uint64(fileSize), Size: uint64(size),
} }
node.Content = f.saveFile(fakeFile(fileSeed, fileSize)) node.Content = saveFile(t, repo, fakeFile(t, seed, size))
tree.Nodes = append(tree.Nodes, node) tree.Nodes = append(tree.Nodes, node)
} }
if known, id := f.treeIsKnown(&tree); known { id, err := repo.SaveJSON(pack.Tree, tree)
return id
}
id, err := f.repo.SaveJSON(pack.Tree, tree)
if err != nil { if err != nil {
f.t.Fatal(err) t.Fatal(err)
} }
return id return id
@ -202,12 +149,8 @@ func (f fakeTree) saveTree(seed int64, depth int) backend.ID {
// TestCreateSnapshot creates a snapshot filled with fake data. The // TestCreateSnapshot creates a snapshot filled with fake data. The
// fake data is generated deterministically from the timestamp `at`, which is // fake data is generated deterministically from the timestamp `at`, which is
// also used as the snapshot's timestamp. The tree's depth can be specified // also used as the snapshot's timestamp.
// with the parameter depth. func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time) backend.ID {
func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int) *Snapshot {
seed := at.Unix()
t.Logf("create fake snapshot at %s with seed %d", at, seed)
fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05")) fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05"))
snapshot, err := NewSnapshot([]string{fakedir}) snapshot, err := NewSnapshot([]string{fakedir})
if err != nil { if err != nil {
@ -215,13 +158,7 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time,
} }
snapshot.Time = at snapshot.Time = at
f := fakeTree{ treeID := saveTree(t, repo, at.UnixNano())
t: t,
repo: repo,
knownBlobs: backend.NewIDSet(),
}
treeID := f.saveTree(seed, depth)
snapshot.Tree = &treeID snapshot.Tree = &treeID
id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot) id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot)
@ -229,8 +166,6 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time,
t.Fatal(err) t.Fatal(err)
} }
snapshot.id = &id
t.Logf("saved snapshot %v", id.Str()) t.Logf("saved snapshot %v", id.Str())
err = repo.Flush() err = repo.Flush()
@ -243,5 +178,5 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time,
t.Fatal(err) t.Fatal(err)
} }
return snapshot return id
} }

View file

@ -10,17 +10,14 @@ import (
var testSnapshotTime = time.Unix(1460289341, 207401672) var testSnapshotTime = time.Unix(1460289341, 207401672)
const ( const testCreateSnapshots = 3
testCreateSnapshots = 3
testDepth = 2
)
func TestCreateSnapshot(t *testing.T) { func TestCreateSnapshot(t *testing.T) {
repo, cleanup := repository.TestRepository(t) repo, cleanup := repository.TestRepository(t)
defer cleanup() defer cleanup()
for i := 0; i < testCreateSnapshots; i++ { for i := 0; i < testCreateSnapshots; i++ {
restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth) restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second))
} }
snapshots, err := restic.LoadAllSnapshots(repo) snapshots, err := restic.LoadAllSnapshots(repo)