forked from TrueCloudLab/restic
Add tests for Archiver.Preload() and a few more
This commit is contained in:
parent
3bb2aba141
commit
f214dce87c
3 changed files with 132 additions and 11 deletions
|
@ -143,7 +143,7 @@ func BenchmarkArchiveDirectory(b *testing.B) {
|
||||||
b.Logf("snapshot archived as %v", id)
|
b.Logf("snapshot archived as %v", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
func snapshot(t *testing.T, server restic.Server, path string) *restic.Snapshot {
|
func snapshot(t testing.TB, server restic.Server, path string) *restic.Snapshot {
|
||||||
arch, err := restic.NewArchiver(server, nil)
|
arch, err := restic.NewArchiver(server, nil)
|
||||||
ok(t, err)
|
ok(t, err)
|
||||||
ok(t, arch.Preload())
|
ok(t, arch.Preload())
|
||||||
|
@ -152,7 +152,7 @@ func snapshot(t *testing.T, server restic.Server, path string) *restic.Snapshot
|
||||||
return sn
|
return sn
|
||||||
}
|
}
|
||||||
|
|
||||||
func countBlobs(t *testing.T, server restic.Server) int {
|
func countBlobs(t testing.TB, server restic.Server) int {
|
||||||
blobs := 0
|
blobs := 0
|
||||||
err := server.EachID(backend.Tree, func(id backend.ID) {
|
err := server.EachID(backend.Tree, func(id backend.ID) {
|
||||||
tree, err := restic.LoadTree(server, id)
|
tree, err := restic.LoadTree(server, id)
|
||||||
|
@ -165,7 +165,7 @@ func countBlobs(t *testing.T, server restic.Server) int {
|
||||||
return blobs
|
return blobs
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestArchiverPreload(t *testing.T) {
|
func archiveWithPreload(t testing.TB) {
|
||||||
if *benchArchiveDirectory == "" {
|
if *benchArchiveDirectory == "" {
|
||||||
t.Skip("benchdir not set, skipping TestArchiverPreload")
|
t.Skip("benchdir not set, skipping TestArchiverPreload")
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,7 @@ func TestArchiverPreload(t *testing.T) {
|
||||||
|
|
||||||
// archive a few files
|
// archive a few files
|
||||||
sn := snapshot(t, server, *benchArchiveDirectory)
|
sn := snapshot(t, server, *benchArchiveDirectory)
|
||||||
t.Logf("archived snapshot %v", sn.ID)
|
t.Logf("archived snapshot %v", sn.ID())
|
||||||
|
|
||||||
// get archive stats
|
// get archive stats
|
||||||
blobsBefore := countBlobs(t, server)
|
blobsBefore := countBlobs(t, server)
|
||||||
|
@ -185,15 +185,51 @@ func TestArchiverPreload(t *testing.T) {
|
||||||
|
|
||||||
// archive the same files again
|
// archive the same files again
|
||||||
sn2 := snapshot(t, server, *benchArchiveDirectory)
|
sn2 := snapshot(t, server, *benchArchiveDirectory)
|
||||||
t.Logf("archived snapshot %v", sn2.ID)
|
t.Logf("archived snapshot %v", sn2.ID())
|
||||||
|
|
||||||
// get archive stats
|
// get archive stats
|
||||||
blobsAfter := countBlobs(t, server)
|
blobsAfter := countBlobs(t, server)
|
||||||
t.Logf("found %v blobs", blobsAfter)
|
t.Logf("found %v blobs", blobsAfter)
|
||||||
|
|
||||||
// if there are more than 10% more blobs, something is wrong
|
// if there are more than 50% more blobs, something is wrong
|
||||||
if blobsAfter > (blobsBefore + blobsBefore/10) {
|
if blobsAfter > (blobsBefore + blobsBefore/2) {
|
||||||
t.Fatalf("TestArchiverPreload: too many blobs in repository: before %d, after %d, threshhold %d",
|
t.Fatalf("TestArchiverPreload: too many blobs in repository: before %d, after %d, threshhold %d",
|
||||||
blobsBefore, blobsAfter, (blobsBefore + blobsBefore/10))
|
blobsBefore, blobsAfter, (blobsBefore + blobsBefore/2))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestArchivePreload(t *testing.T) {
|
||||||
|
archiveWithPreload(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkArchivePreload(b *testing.B) {
|
||||||
|
archiveWithPreload(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkPreload(t *testing.B) {
|
||||||
|
if *benchArchiveDirectory == "" {
|
||||||
|
t.Skip("benchdir not set, skipping TestArchiverPreload")
|
||||||
|
}
|
||||||
|
|
||||||
|
be := setupBackend(t)
|
||||||
|
defer teardownBackend(t, be)
|
||||||
|
key := setupKey(t, be, "geheim")
|
||||||
|
server := restic.NewServerWithKey(be, key)
|
||||||
|
|
||||||
|
// archive a few files
|
||||||
|
arch, err := restic.NewArchiver(server, nil)
|
||||||
|
ok(t, err)
|
||||||
|
sn, _, err := arch.Snapshot(*benchArchiveDirectory, nil)
|
||||||
|
ok(t, err)
|
||||||
|
t.Logf("archived snapshot %v", sn.ID())
|
||||||
|
|
||||||
|
// start benchmark
|
||||||
|
t.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < t.N; i++ {
|
||||||
|
// create new archiver and preload
|
||||||
|
arch2, err := restic.NewArchiver(server, nil)
|
||||||
|
ok(t, err)
|
||||||
|
ok(t, arch2.Preload())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
31
server.go
31
server.go
|
@ -1,6 +1,7 @@
|
||||||
package restic
|
package restic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"compress/zlib"
|
"compress/zlib"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
@ -107,6 +108,28 @@ func (s Server) LoadJSON(t backend.Type, blob Blob, item interface{}) error {
|
||||||
return s.LoadJSONID(t, blob.Storage, item)
|
return s.LoadJSONID(t, blob.Storage, item)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
zEmptyString = []byte("x\x9C\x03\x00\x00\x00\x00\x01")
|
||||||
|
zEmptyStringReader = bytes.NewReader(zEmptyString)
|
||||||
|
)
|
||||||
|
|
||||||
|
var zReaderPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
zEmptyStringReader.Seek(0, 0)
|
||||||
|
rd, err := zlib.NewReader(zEmptyStringReader)
|
||||||
|
if err != nil {
|
||||||
|
// shouldn't happen
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return rd
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type zReader interface {
|
||||||
|
io.ReadCloser
|
||||||
|
zlib.Resetter
|
||||||
|
}
|
||||||
|
|
||||||
// LoadJSONID calls Load() to get content from the backend and afterwards calls
|
// LoadJSONID calls Load() to get content from the backend and afterwards calls
|
||||||
// json.Unmarshal on the item.
|
// json.Unmarshal on the item.
|
||||||
func (s Server) LoadJSONID(t backend.Type, storageID backend.ID, item interface{}) error {
|
func (s Server) LoadJSONID(t backend.Type, storageID backend.ID, item interface{}) error {
|
||||||
|
@ -125,8 +148,12 @@ func (s Server) LoadJSONID(t backend.Type, storageID backend.ID, item interface{
|
||||||
}
|
}
|
||||||
|
|
||||||
// unzip
|
// unzip
|
||||||
unzipRd, err := zlib.NewReader(decryptRd)
|
unzipRd := zReaderPool.Get().(zReader)
|
||||||
defer unzipRd.Close()
|
err = unzipRd.Reset(decryptRd, nil)
|
||||||
|
defer func() {
|
||||||
|
unzipRd.Close()
|
||||||
|
zReaderPool.Put(unzipRd)
|
||||||
|
}()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -136,9 +136,67 @@ func TestServerStats(t *testing.T) {
|
||||||
|
|
||||||
// archive a few files
|
// archive a few files
|
||||||
sn := snapshot(t, server, *benchArchiveDirectory)
|
sn := snapshot(t, server, *benchArchiveDirectory)
|
||||||
t.Logf("archived snapshot %v", sn.ID)
|
t.Logf("archived snapshot %v", sn.ID())
|
||||||
|
|
||||||
stats, err := server.Stats()
|
stats, err := server.Stats()
|
||||||
ok(t, err)
|
ok(t, err)
|
||||||
t.Logf("stats: %v", stats)
|
t.Logf("stats: %v", stats)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLoadJSONID(t *testing.T) {
|
||||||
|
if *benchArchiveDirectory == "" {
|
||||||
|
t.Skip("benchdir not set, skipping TestServerStats")
|
||||||
|
}
|
||||||
|
|
||||||
|
be := setupBackend(t)
|
||||||
|
defer teardownBackend(t, be)
|
||||||
|
key := setupKey(t, be, "geheim")
|
||||||
|
server := restic.NewServerWithKey(be, key)
|
||||||
|
|
||||||
|
// archive a few files
|
||||||
|
sn := snapshot(t, server, *benchArchiveDirectory)
|
||||||
|
t.Logf("archived snapshot %v", sn.ID())
|
||||||
|
|
||||||
|
// benchmark loading first tree
|
||||||
|
list, err := server.List(backend.Tree)
|
||||||
|
ok(t, err)
|
||||||
|
assert(t, len(list) > 0,
|
||||||
|
"no Trees in repository found")
|
||||||
|
|
||||||
|
treeID := list[0]
|
||||||
|
|
||||||
|
tree := restic.NewTree()
|
||||||
|
err = server.LoadJSONID(backend.Tree, treeID, &tree)
|
||||||
|
ok(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkLoadJSONID(t *testing.B) {
|
||||||
|
if *benchArchiveDirectory == "" {
|
||||||
|
t.Skip("benchdir not set, skipping TestServerStats")
|
||||||
|
}
|
||||||
|
|
||||||
|
be := setupBackend(t)
|
||||||
|
defer teardownBackend(t, be)
|
||||||
|
key := setupKey(t, be, "geheim")
|
||||||
|
server := restic.NewServerWithKey(be, key)
|
||||||
|
|
||||||
|
// archive a few files
|
||||||
|
sn := snapshot(t, server, *benchArchiveDirectory)
|
||||||
|
t.Logf("archived snapshot %v", sn.ID())
|
||||||
|
|
||||||
|
// benchmark loading first tree
|
||||||
|
list, err := server.List(backend.Tree)
|
||||||
|
ok(t, err)
|
||||||
|
assert(t, len(list) > 0,
|
||||||
|
"no Trees in repository found")
|
||||||
|
|
||||||
|
t.ResetTimer()
|
||||||
|
|
||||||
|
tree := restic.NewTree()
|
||||||
|
for i := 0; i < t.N; i++ {
|
||||||
|
for _, treeID := range list {
|
||||||
|
err = server.LoadJSONID(backend.Tree, treeID, &tree)
|
||||||
|
ok(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue