more cleanup

This commit is contained in:
Florian Weingarten 2015-04-29 21:41:51 -04:00
parent 7af7c64403
commit 0d9360a815
7 changed files with 18 additions and 33 deletions

View file

@ -24,10 +24,12 @@ const (
maxConcurrency = 10
maxConcurrencyPreload = 20
// chunkerBufSize is used in pool.go
chunkerBufSize = 512 * chunker.KiB
)
var archiverAbortOnAllErrors = func(str string, fi os.FileInfo, err error) error { return err }
var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
type Archiver struct {
s *server.Server
@ -37,24 +39,20 @@ type Archiver struct {
Filter func(item string, fi os.FileInfo) bool
}
func NewArchiver(s *server.Server) (*Archiver, error) {
var err error
func NewArchiver(s *server.Server) *Archiver {
arch := &Archiver{
s: s,
blobToken: make(chan struct{}, maxConcurrentBlobs),
}
// fill blob token
for i := 0; i < maxConcurrentBlobs; i++ {
arch.blobToken <- struct{}{}
}
// abort on all errors
arch.Error = func(string, os.FileInfo, error) error { return err }
// allow all files
arch.Filter = func(string, os.FileInfo) bool { return true }
arch.Error = archiverAbortOnAllErrors
arch.Filter = archiverAllowAllFiles
return arch, nil
return arch
}
func (arch *Archiver) Save(t pack.BlobType, id backend.ID, length uint, rd io.Reader) error {
@ -78,22 +76,18 @@ func (arch *Archiver) Save(t pack.BlobType, id backend.ID, length uint, rd io.Re
}
func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) {
// convert to json
data, err := json.Marshal(item)
// append newline
data = append(data, '\n')
if err != nil {
return nil, err
}
data = append(data, '\n')
// check if tree has been saved before
id := backend.Hash(data)
if arch.s.Index().Has(id) {
return id, nil
}
// otherwise save the data
return arch.s.SaveJSON(pack.Tree, item)
}
@ -106,7 +100,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
return err
}
// check file again
// check file again, since it could have disappeared by now
fi, err := file.Stat()
if err != nil {
return err
@ -116,14 +110,12 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
e2 := arch.Error(node.path, fi, errors.New("file was updated, using new version"))
if e2 == nil {
// create new node
n, err := NodeFromFileInfo(node.path, fi)
if err != nil {
debug.Log("Archiver.SaveFile", "NodeFromFileInfo returned error for %v: %v", node.path, err)
return err
}
// copy node
*node = *n
}
}

View file

@ -121,10 +121,10 @@ func archiveDirectory(b testing.TB) {
key := SetupKey(b, server, "geheim")
server.SetKey(key)
arch, err := restic.NewArchiver(server)
OK(b, err)
arch := restic.NewArchiver(server)
_, id, err := arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil)
OK(b, err)
b.Logf("snapshot archived as %v", id)
}
@ -238,8 +238,7 @@ func BenchmarkLoadTree(t *testing.B) {
s.SetKey(key)
// archive a few files
arch, err := restic.NewArchiver(s)
OK(t, err)
arch := restic.NewArchiver(s)
sn, _, err := arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil)
OK(t, err)
t.Logf("archived snapshot %v", sn.ID())

View file

@ -16,8 +16,7 @@ func TestCache(t *testing.T) {
_, err := restic.NewCache(server)
OK(t, err)
arch, err := restic.NewArchiver(server)
OK(t, err)
arch := restic.NewArchiver(server)
// archive some files, this should automatically cache all blobs from the snapshot
_, _, err = arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil)

View file

@ -252,10 +252,7 @@ func (cmd CmdBackup) Execute(args []string) error {
// return true
// }
arch, err := restic.NewArchiver(s)
if err != nil {
fmt.Fprintf(os.Stderr, "err: %v\n", err)
}
arch := restic.NewArchiver(s)
arch.Error = func(dir string, fi os.FileInfo, err error) error {
// TODO: make ignoring errors configurable

View file

@ -19,11 +19,11 @@ type Restorer struct {
Filter func(item string, dstpath string, node *Node) bool
}
var abortOnAllErrors = func(str string, node *Node, err error) error { return err }
var restorerAbortOnAllErrors = func(str string, node *Node, err error) error { return err }
// NewRestorer creates a restorer preloaded with the content from the snapshot id.
func NewRestorer(s *server.Server, id backend.ID) (*Restorer, error) {
r := &Restorer{s: s, Error: abortOnAllErrors}
r := &Restorer{s: s, Error: restorerAbortOnAllErrors}
var err error

View file

@ -50,8 +50,7 @@ func SetupKey(t testing.TB, s *server.Server, password string) *server.Key {
}
func SnapshotDir(t testing.TB, server *server.Server, path string, parent backend.ID) *restic.Snapshot {
arch, err := restic.NewArchiver(server)
OK(t, err)
arch := restic.NewArchiver(server)
sn, _, err := arch.Snapshot(nil, []string{path}, parent)
OK(t, err)
return sn

View file

@ -22,8 +22,7 @@ func TestWalkTree(t *testing.T) {
server.SetKey(key)
// archive a few files
arch, err := restic.NewArchiver(server)
OK(t, err)
arch := restic.NewArchiver(server)
sn, _, err := arch.Snapshot(nil, dirs, nil)
OK(t, err)