Merge pull request #632 from restic/rework-debug

Rework debug message printing
This commit is contained in:
Alexander Neumann 2016-09-28 21:11:02 +02:00
commit 3114d41cb7
42 changed files with 491 additions and 462 deletions

View file

@ -544,6 +544,28 @@ If you suspect that there is a bug, you can have a look at the debug log.
Please be aware that the debug log might contain sensitive information such as
file and directory names.
The debug log will always contain all log messages restic generates. You can
also instruct restic to print some or all debug messages to stderr. These can
also be limited to e.g. a list of source files or a list of patterns for
function names. The patterns are globbing patterns (see the documentation for
[`path.Glob`](https://golang.org/pkg/path/#Glob)), multiple patterns are
separated by commas. Patterns are case sensitive.
Printing all log messages to the console can be achieved by setting the file
filter to `*`:
$ DEBUG_FILES=* restic check
If you want restic to just print all debug log messages from the files
`main.go` and `lock.go`, set the environment variable `DEBUG_FILES` like this:
$ DEBUG_FILES=main.go,lock.go restic check
The following command line instructs restic to only print debug statements
originating in functions that match the pattern `*unlock*` (case sensitive):
$ DEBUG_FUNCS=*unlock* restic check
# Under the hood: Browse repository objects
Internally, a repository stores data of several different types described in the [design documentation](https://github.com/restic/restic/blob/master/doc/Design.md). You can `list` objects such as blobs, packs, index, snapshots, keys or locks with the following command:

View file

@ -60,7 +60,7 @@ func RunCleanupHandlers() {
// CleanupHandler handles the SIGINT signal.
func CleanupHandler(c <-chan os.Signal) {
for s := range c {
debug.Log("CleanupHandler", "signal %v received, cleaning up", s)
debug.Log("signal %v received, cleaning up", s)
fmt.Printf("%sInterrupt received, cleaning up\n", ClearLine())
RunCleanupHandlers()
fmt.Println("exiting")

View file

@ -270,7 +270,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
if err != nil {
return err
}
debug.Log("backup.Execute", "allowed devices: %v\n", allowedDevs)
debug.Log("allowed devices: %v\n", allowedDevs)
}
repo, err := OpenRepository(gopts)
@ -342,7 +342,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
}
if matched {
debug.Log("backup.Execute", "path %q excluded by a filter", item)
debug.Log("path %q excluded by a filter", item)
return false
}
@ -358,7 +358,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
}
_, found := allowedDevs[id]
if !found {
debug.Log("backup.Execute", "path %q on disallowed device %d", item, id)
debug.Log("path %q on disallowed device %d", item, id)
return false
}

View file

@ -185,16 +185,16 @@ func runCat(gopts GlobalOptions, args []string) error {
return errors.Fatal("blob not found")
case "tree":
debug.Log("cat", "cat tree %v", id.Str())
debug.Log("cat tree %v", id.Str())
tree, err := repo.LoadTree(id)
if err != nil {
debug.Log("cat", "unable to load tree %v: %v", id.Str(), err)
debug.Log("unable to load tree %v: %v", id.Str(), err)
return err
}
buf, err := json.MarshalIndent(&tree, "", " ")
if err != nil {
debug.Log("cat", "error json.MarshalIndent(): %v", err)
debug.Log("error json.MarshalIndent(): %v", err)
return err
}

View file

@ -76,7 +76,7 @@ func parseTime(str string) (time.Time, error) {
}
func findInTree(repo *repository.Repository, pat findPattern, id restic.ID, path string) ([]findResult, error) {
debug.Log("restic.find", "checking tree %v\n", id)
debug.Log("checking tree %v\n", id)
tree, err := repo.LoadTree(id)
if err != nil {
return nil, err
@ -84,7 +84,7 @@ func findInTree(repo *repository.Repository, pat findPattern, id restic.ID, path
results := []findResult{}
for _, node := range tree.Nodes {
debug.Log("restic.find", " testing entry %q\n", node.Name)
debug.Log(" testing entry %q\n", node.Name)
m, err := filepath.Match(pat.pattern, node.Name)
if err != nil {
@ -92,20 +92,20 @@ func findInTree(repo *repository.Repository, pat findPattern, id restic.ID, path
}
if m {
debug.Log("restic.find", " pattern matches\n")
debug.Log(" pattern matches\n")
if !pat.oldest.IsZero() && node.ModTime.Before(pat.oldest) {
debug.Log("restic.find", " ModTime is older than %s\n", pat.oldest)
debug.Log(" ModTime is older than %s\n", pat.oldest)
continue
}
if !pat.newest.IsZero() && node.ModTime.After(pat.newest) {
debug.Log("restic.find", " ModTime is newer than %s\n", pat.newest)
debug.Log(" ModTime is newer than %s\n", pat.newest)
continue
}
results = append(results, findResult{node: node, path: path})
} else {
debug.Log("restic.find", " pattern does not match\n")
debug.Log(" pattern does not match\n")
}
if node.Type == "dir" {
@ -122,7 +122,7 @@ func findInTree(repo *repository.Repository, pat findPattern, id restic.ID, path
}
func findInSnapshot(repo *repository.Repository, pat findPattern, id restic.ID) error {
debug.Log("restic.find", "searching in snapshot %s\n for entries within [%s %s]", id.Str(), pat.oldest, pat.newest)
debug.Log("searching in snapshot %s\n for entries within [%s %s]", id.Str(), pat.oldest, pat.newest)
sn, err := restic.LoadSnapshot(repo, id)
if err != nil {

View file

@ -44,8 +44,8 @@ func init() {
}
func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
debug.Log("mount", "start mount")
defer debug.Log("mount", "finish mount")
debug.Log("start mount")
defer debug.Log("finish mount")
repo, err := OpenRepository(gopts)
if err != nil {
@ -79,7 +79,7 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
root := fs.Tree{}
root.Add("snapshots", fuse.NewSnapshotsDir(repo, opts.OwnerRoot))
debug.Log("mount", "serving mount at %v", mountpoint)
debug.Log("serving mount at %v", mountpoint)
err = fs.Serve(c, &root)
if err != nil {
return err
@ -101,7 +101,7 @@ func runMount(opts MountOptions, gopts GlobalOptions, args []string) error {
mountpoint := args[0]
AddCleanupHandler(func() error {
debug.Log("mount", "running umount cleanup handler for mount at %v", mountpoint)
debug.Log("running umount cleanup handler for mount at %v", mountpoint)
err := umount(mountpoint)
if err != nil {
Warnf("unable to umount (maybe already umounted?): %v\n", err)

View file

@ -147,14 +147,14 @@ func runPrune(gopts GlobalOptions) error {
bar = newProgressMax(!gopts.Quiet, uint64(len(snapshots)), "snapshots")
bar.Start()
for _, sn := range snapshots {
debug.Log("CmdPrune.Execute", "process snapshot %v", sn.ID().Str())
debug.Log("process snapshot %v", sn.ID().Str())
err = restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, seenBlobs)
if err != nil {
return err
}
debug.Log("CmdPrune.Execute", "found %v blobs for snapshot %v", sn.ID().Str())
debug.Log("found %v blobs for snapshot %v", sn.ID().Str())
bar.Report(restic.Stat{Blobs: 1})
}
bar.Done()

View file

@ -62,7 +62,7 @@ func runRestore(opts RestoreOptions, gopts GlobalOptions, args []string) error {
snapshotIDString := args[0]
debug.Log("restore", "restore %v to %v", snapshotIDString, opts.Target)
debug.Log("restore %v to %v", snapshotIDString, opts.Target)
repo, err := OpenRepository(gopts)
if err != nil {

View file

@ -302,7 +302,7 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
// Open the backend specified by a location config.
func open(s string) (restic.Backend, error) {
debug.Log("open", "parsing location %v", s)
debug.Log("parsing location %v", s)
loc, err := location.Parse(s)
if err != nil {
return nil, errors.Fatalf("parsing repository location failed: %v", err)
@ -312,10 +312,10 @@ func open(s string) (restic.Backend, error) {
switch loc.Scheme {
case "local":
debug.Log("open", "opening local repository at %#v", loc.Config)
debug.Log("opening local repository at %#v", loc.Config)
be, err = local.Open(loc.Config.(string))
case "sftp":
debug.Log("open", "opening sftp repository at %#v", loc.Config)
debug.Log("opening sftp repository at %#v", loc.Config)
be, err = sftp.OpenWithConfig(loc.Config.(sftp.Config))
case "s3":
cfg := loc.Config.(s3.Config)
@ -327,7 +327,7 @@ func open(s string) (restic.Backend, error) {
cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
}
debug.Log("open", "opening s3 repository at %#v", cfg)
debug.Log("opening s3 repository at %#v", cfg)
be, err = s3.Open(cfg)
case "rest":
be, err = rest.Open(loc.Config.(rest.Config))
@ -344,7 +344,7 @@ func open(s string) (restic.Backend, error) {
// Create the backend specified by URI.
func create(s string) (restic.Backend, error) {
debug.Log("open", "parsing location %v", s)
debug.Log("parsing location %v", s)
loc, err := location.Parse(s)
if err != nil {
return nil, err
@ -352,10 +352,10 @@ func create(s string) (restic.Backend, error) {
switch loc.Scheme {
case "local":
debug.Log("open", "create local repository at %#v", loc.Config)
debug.Log("create local repository at %#v", loc.Config)
return local.Create(loc.Config.(string))
case "sftp":
debug.Log("open", "create sftp repository at %#v", loc.Config)
debug.Log("create sftp repository at %#v", loc.Config)
return sftp.CreateWithConfig(loc.Config.(sftp.Config))
case "s3":
cfg := loc.Config.(s3.Config)
@ -367,12 +367,12 @@ func create(s string) (restic.Backend, error) {
cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
}
debug.Log("open", "create s3 repository at %#v", loc.Config)
debug.Log("create s3 repository at %#v", loc.Config)
return s3.Open(cfg)
case "rest":
return rest.Open(loc.Config.(rest.Config))
}
debug.Log("open", "invalid repository scheme: %v", s)
debug.Log("invalid repository scheme: %v", s)
return nil, errors.Fatalf("invalid scheme %q", loc.Scheme)
}

View file

@ -39,7 +39,7 @@ func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock,
globalLocks.Lock()
if globalLocks.cancelRefresh == nil {
debug.Log("main.lockRepository", "start goroutine for lock refresh")
debug.Log("start goroutine for lock refresh")
globalLocks.cancelRefresh = make(chan struct{})
globalLocks.refreshWG = sync.WaitGroup{}
globalLocks.refreshWG.Add(1)
@ -55,7 +55,7 @@ func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock,
var refreshInterval = 5 * time.Minute
func refreshLocks(wg *sync.WaitGroup, done <-chan struct{}) {
debug.Log("main.refreshLocks", "start")
debug.Log("start")
defer func() {
wg.Done()
globalLocks.Lock()
@ -68,10 +68,10 @@ func refreshLocks(wg *sync.WaitGroup, done <-chan struct{}) {
for {
select {
case <-done:
debug.Log("main.refreshLocks", "terminate")
debug.Log("terminate")
return
case <-ticker.C:
debug.Log("main.refreshLocks", "refreshing locks")
debug.Log("refreshing locks")
globalLocks.Lock()
for _, lock := range globalLocks.locks {
err := lock.Refresh()
@ -88,9 +88,9 @@ func unlockRepo(lock *restic.Lock) error {
globalLocks.Lock()
defer globalLocks.Unlock()
debug.Log("unlockRepo", "unlocking repository")
debug.Log("unlocking repository")
if err := lock.Unlock(); err != nil {
debug.Log("unlockRepo", "error while unlocking: %v", err)
debug.Log("error while unlocking: %v", err)
return err
}
@ -108,13 +108,13 @@ func unlockAll() error {
globalLocks.Lock()
defer globalLocks.Unlock()
debug.Log("unlockAll", "unlocking %d locks", len(globalLocks.locks))
debug.Log("unlocking %d locks", len(globalLocks.locks))
for _, lock := range globalLocks.locks {
if err := lock.Unlock(); err != nil {
debug.Log("unlockAll", "error while unlocking: %v", err)
debug.Log("error while unlocking: %v", err)
return err
}
debug.Log("unlockAll", "successfully removed lock")
debug.Log("successfully removed lock")
}
return nil

View file

@ -29,7 +29,7 @@ func init() {
// set GOMAXPROCS to number of CPUs
if runtime.Version() < "go1.5" {
gomaxprocs := os.Getenv("GOMAXPROCS")
debug.Log("restic", "read GOMAXPROCS from env variable, value: %s", gomaxprocs)
debug.Log("read GOMAXPROCS from env variable, value: %s", gomaxprocs)
if gomaxprocs == "" {
runtime.GOMAXPROCS(runtime.NumCPU())
}
@ -37,7 +37,7 @@ func init() {
}
func main() {
debug.Log("restic", "main %#v", os.Args)
debug.Log("main %#v", os.Args)
err := cmdRoot.Execute()
switch {

View file

@ -14,7 +14,7 @@ import (
// ArchiveReader reads from the reader and archives the data. Returned is the
// resulting snapshot and its ID.
func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, name string, tags []string) (*restic.Snapshot, restic.ID, error) {
debug.Log("ArchiveReader", "start archiving %s", name)
debug.Log("start archiving %s", name)
sn, err := restic.NewSnapshot([]string{name}, tags)
if err != nil {
return nil, restic.ID{}, err
@ -45,9 +45,9 @@ func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, nam
if err != nil {
return nil, restic.ID{}, err
}
debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
debug.Log("saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
} else {
debug.Log("ArchiveReader", "blob %v already saved in the repo\n", id.Str())
debug.Log("blob %v already saved in the repo\n", id.Str())
}
freeBuf(chunk.Data)
@ -80,14 +80,14 @@ func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, nam
return nil, restic.ID{}, err
}
sn.Tree = &treeID
debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())
debug.Log("tree saved as %v", treeID.Str())
id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
if err != nil {
return nil, restic.ID{}, err
}
debug.Log("ArchiveReader", "snapshot saved as %v", id.Str())
debug.Log("snapshot saved as %v", id.Str())
err = repo.Flush()
if err != nil {

View file

@ -91,20 +91,20 @@ func (arch *Archiver) isKnownBlob(id restic.ID, t restic.BlobType) bool {
// Save stores a blob read from rd in the repository.
func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error {
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
debug.Log("Save(%v, %v)\n", t, id.Str())
if arch.isKnownBlob(id, restic.DataBlob) {
debug.Log("Archiver.Save", "blob %v is known\n", id.Str())
debug.Log("blob %v is known\n", id.Str())
return nil
}
_, err := arch.repo.SaveBlob(t, data, id)
if err != nil {
debug.Log("Archiver.Save", "Save(%v, %v): error %v\n", t, id.Str(), err)
debug.Log("Save(%v, %v): error %v\n", t, id.Str(), err)
return err
}
debug.Log("Archiver.Save", "Save(%v, %v): new blob\n", t, id.Str())
debug.Log("Save(%v, %v): new blob\n", t, id.Str())
return nil
}
@ -142,7 +142,7 @@ func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*res
node, err = restic.NodeFromFileInfo(node.Path, fi)
if err != nil {
debug.Log("Archiver.SaveFile", "restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
debug.Log("restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
return nil, err
}
@ -184,7 +184,7 @@ func waitForResults(resultChannels [](<-chan saveResult)) ([]saveResult, error)
}
func updateNodeContent(node *restic.Node, results []saveResult) error {
debug.Log("Archiver.Save", "checking size for file %s", node.Path)
debug.Log("checking size for file %s", node.Path)
var bytes uint64
node.Content = make([]restic.ID, len(results))
@ -193,14 +193,14 @@ func updateNodeContent(node *restic.Node, results []saveResult) error {
node.Content[i] = b.id
bytes += b.bytes
debug.Log("Archiver.Save", " adding blob %s, %d bytes", b.id.Str(), b.bytes)
debug.Log(" adding blob %s, %d bytes", b.id.Str(), b.bytes)
}
if bytes != node.Size {
return errors.Errorf("errors saving node %q: saved %d bytes, wanted %d bytes", node.Path, bytes, node.Size)
}
debug.Log("Archiver.SaveFile", "SaveFile(%q): %v blobs\n", node.Path, len(results))
debug.Log("SaveFile(%q): %v blobs\n", node.Path, len(results))
return nil
}
@ -248,7 +248,7 @@ func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) error {
func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, entCh <-chan pipe.Entry) {
defer func() {
debug.Log("Archiver.fileWorker", "done")
debug.Log("done")
wg.Done()
}()
for {
@ -259,11 +259,11 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-
return
}
debug.Log("Archiver.fileWorker", "got job %v", e)
debug.Log("got job %v", e)
// check for errors
if e.Error() != nil {
debug.Log("Archiver.fileWorker", "job %v has errors: %v", e.Path(), e.Error())
debug.Log("job %v has errors: %v", e.Path(), e.Error())
// TODO: integrate error reporting
fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error())
// ignore this file
@ -275,7 +275,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-
node, err := restic.NodeFromFileInfo(e.Fullpath(), e.Info())
if err != nil {
// TODO: integrate error reporting
debug.Log("Archiver.fileWorker", "restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
debug.Log("restic.NodeFromFileInfo returned error for %v: %v", node.Path, err)
e.Result() <- nil
p.Report(restic.Stat{Errors: 1})
continue
@ -283,14 +283,14 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-
// try to use old node, if present
if e.Node != nil {
debug.Log("Archiver.fileWorker", " %v use old data", e.Path())
debug.Log(" %v use old data", e.Path())
oldNode := e.Node.(*restic.Node)
// check if all content is still available in the repository
contentMissing := false
for _, blob := range oldNode.Content {
if !arch.repo.Index().Has(blob, restic.DataBlob) {
debug.Log("Archiver.fileWorker", " %v not using old data, %v is missing", e.Path(), blob.Str())
debug.Log(" %v not using old data, %v is missing", e.Path(), blob.Str())
contentMissing = true
break
}
@ -298,15 +298,15 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-
if !contentMissing {
node.Content = oldNode.Content
debug.Log("Archiver.fileWorker", " %v content is complete", e.Path())
debug.Log(" %v content is complete", e.Path())
}
} else {
debug.Log("Archiver.fileWorker", " %v no old data", e.Path())
debug.Log(" %v no old data", e.Path())
}
// otherwise read file normally
if node.Type == "file" && len(node.Content) == 0 {
debug.Log("Archiver.fileWorker", " read and save %v, content: %v", e.Path(), node.Content)
debug.Log(" read and save %v, content: %v", e.Path(), node.Content)
err = arch.SaveFile(p, node)
if err != nil {
// TODO: integrate error reporting
@ -321,7 +321,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-
p.Report(restic.Stat{Bytes: node.Size})
}
debug.Log("Archiver.fileWorker", " processed %v, %d blobs", e.Path(), len(node.Content))
debug.Log(" processed %v, %d blobs", e.Path(), len(node.Content))
e.Result() <- node
p.Report(restic.Stat{Files: 1})
case <-done:
@ -332,9 +332,9 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *restic.Progress, done <-
}
func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-chan struct{}, dirCh <-chan pipe.Dir) {
debug.Log("Archiver.dirWorker", "start")
debug.Log("start")
defer func() {
debug.Log("Archiver.dirWorker", "done")
debug.Log("done")
wg.Done()
}()
for {
@ -344,7 +344,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-c
// channel is closed
return
}
debug.Log("Archiver.dirWorker", "save dir %v (%d entries), error %v\n", dir.Path(), len(dir.Entries), dir.Error())
debug.Log("save dir %v (%d entries), error %v\n", dir.Path(), len(dir.Entries), dir.Error())
// ignore dir nodes with errors
if dir.Error() != nil {
@ -358,13 +358,13 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-c
// wait for all content
for _, ch := range dir.Entries {
debug.Log("Archiver.dirWorker", "receiving result from %v", ch)
debug.Log("receiving result from %v", ch)
res := <-ch
// if we get a nil pointer here, an error has happened while
// processing this entry. Ignore it for now.
if res == nil {
debug.Log("Archiver.dirWorker", "got nil result?")
debug.Log("got nil result?")
continue
}
@ -373,7 +373,7 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-c
tree.Insert(node)
if node.Type == "dir" {
debug.Log("Archiver.dirWorker", "got tree node for %s: %v", node.Path, node.Subtree)
debug.Log("got tree node for %s: %v", node.Path, node.Subtree)
if node.Subtree.IsNull() {
panic("invalid null subtree restic.ID")
@ -401,14 +401,14 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *restic.Progress, done <-c
if err != nil {
panic(err)
}
debug.Log("Archiver.dirWorker", "save tree for %s: %v", dir.Path(), id.Str())
debug.Log("save tree for %s: %v", dir.Path(), id.Str())
if id.IsNull() {
panic("invalid null subtree restic.ID return from SaveTreeJSON()")
}
node.Subtree = &id
debug.Log("Archiver.dirWorker", "sending result to %v", dir.Result())
debug.Log("sending result to %v", dir.Result())
dir.Result() <- node
if dir.Path() != "" {
@ -443,7 +443,7 @@ func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) {
case job, ok = <-inCh:
if !ok {
// input channel closed, we're done
debug.Log("copyJobs", "input channel closed, we're done")
debug.Log("input channel closed, we're done")
return
}
inCh = nil
@ -464,10 +464,10 @@ type archiveJob struct {
func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
defer func() {
close(out)
debug.Log("ArchivePipe.compare", "done")
debug.Log("done")
}()
debug.Log("ArchivePipe.compare", "start")
debug.Log("start")
var (
loadOld, loadNew bool = true, true
ok bool
@ -480,7 +480,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
oldJob, ok = <-a.Old
// if the old channel is closed, just pass through the new jobs
if !ok {
debug.Log("ArchivePipe.compare", "old channel is closed, copy from new channel")
debug.Log("old channel is closed, copy from new channel")
// handle remaining newJob
if !loadNew {
@ -498,15 +498,15 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
newJob, ok = <-a.New
// if the new channel is closed, there are no more files in the current snapshot, return
if !ok {
debug.Log("ArchivePipe.compare", "new channel is closed, we're done")
debug.Log("new channel is closed, we're done")
return
}
loadNew = false
}
debug.Log("ArchivePipe.compare", "old job: %v", oldJob.Path)
debug.Log("ArchivePipe.compare", "new job: %v", newJob.Path())
debug.Log("old job: %v", oldJob.Path)
debug.Log("new job: %v", newJob.Path())
// at this point we have received an old job as well as a new job, compare paths
file1 := oldJob.Path
@ -516,7 +516,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
dir2 := filepath.Dir(file2)
if file1 == file2 {
debug.Log("ArchivePipe.compare", " same filename %q", file1)
debug.Log(" same filename %q", file1)
// send job
out <- archiveJob{hasOld: true, old: oldJob, new: newJob}.Copy()
@ -524,19 +524,19 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
loadNew = true
continue
} else if dir1 < dir2 {
debug.Log("ArchivePipe.compare", " %q < %q, file %q added", dir1, dir2, file2)
debug.Log(" %q < %q, file %q added", dir1, dir2, file2)
// file is new, send new job and load new
loadNew = true
out <- archiveJob{new: newJob}.Copy()
continue
} else if dir1 == dir2 {
if file1 < file2 {
debug.Log("ArchivePipe.compare", " %q < %q, file %q removed", file1, file2, file1)
debug.Log(" %q < %q, file %q removed", file1, file2, file1)
// file has been removed, load new old
loadOld = true
continue
} else {
debug.Log("ArchivePipe.compare", " %q > %q, file %q added", file1, file2, file2)
debug.Log(" %q > %q, file %q added", file1, file2, file2)
// file is new, send new job and load new
loadNew = true
out <- archiveJob{new: newJob}.Copy()
@ -544,7 +544,7 @@ func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
}
}
debug.Log("ArchivePipe.compare", " %q > %q, file %q removed", file1, file2, file1)
debug.Log(" %q > %q, file %q removed", file1, file2, file1)
// file has been removed, throw away old job and load new
loadOld = true
}
@ -557,7 +557,7 @@ func (j archiveJob) Copy() pipe.Job {
// handle files
if isRegularFile(j.new.Info()) {
debug.Log("archiveJob.Copy", " job %v is file", j.new.Path())
debug.Log(" job %v is file", j.new.Path())
// if type has changed, return new job directly
if j.old.Node == nil {
@ -566,11 +566,11 @@ func (j archiveJob) Copy() pipe.Job {
// if file is newer, return the new job
if j.old.Node.IsNewer(j.new.Fullpath(), j.new.Info()) {
debug.Log("archiveJob.Copy", " job %v is newer", j.new.Path())
debug.Log(" job %v is newer", j.new.Path())
return j.new
}
debug.Log("archiveJob.Copy", " job %v add old data", j.new.Path())
debug.Log(" job %v add old data", j.new.Path())
// otherwise annotate job with old data
e := j.new.(pipe.Entry)
e.Node = j.old.Node
@ -595,10 +595,10 @@ func (arch *Archiver) saveIndexes(wg *sync.WaitGroup, done <-chan struct{}) {
case <-done:
return
case <-ticker.C:
debug.Log("Archiver.saveIndexes", "saving full indexes")
debug.Log("saving full indexes")
err := arch.repo.SaveFullIndex()
if err != nil {
debug.Log("Archiver.saveIndexes", "save indexes returned an error: %v", err)
debug.Log("save indexes returned an error: %v", err)
fmt.Fprintf(os.Stderr, "error saving preliminary index: %v\n", err)
}
}
@ -637,7 +637,7 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, parentI
paths = unique(paths)
sort.Sort(baseNameSlice(paths))
debug.Log("Archiver.Snapshot", "start for %v", paths)
debug.Log("start for %v", paths)
debug.RunHook("Archiver.Snapshot", nil)
@ -683,7 +683,7 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, parentI
resCh := make(chan pipe.Result, 1)
go func() {
pipe.Walk(paths, arch.SelectFilter, done, pipeCh, resCh)
debug.Log("Archiver.Snapshot", "pipe.Walk done")
debug.Log("pipe.Walk done")
}()
jobs.New = pipeCh
@ -698,7 +698,7 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, parentI
wg.Add(1)
go func() {
pipe.Split(ch, dirCh, entCh)
debug.Log("Archiver.Snapshot", "split done")
debug.Log("split done")
close(dirCh)
close(entCh)
wg.Done()
@ -718,18 +718,18 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, parentI
go arch.saveIndexes(&wgIndexSaver, stopIndexSaver)
// wait for all workers to terminate
debug.Log("Archiver.Snapshot", "wait for workers")
debug.Log("wait for workers")
wg.Wait()
// stop index saver
close(stopIndexSaver)
wgIndexSaver.Wait()
debug.Log("Archiver.Snapshot", "workers terminated")
debug.Log("workers terminated")
// receive the top-level tree
root := (<-resCh).(*restic.Node)
debug.Log("Archiver.Snapshot", "root node received: %v", root.Subtree.Str())
debug.Log("root node received: %v", root.Subtree.Str())
sn.Tree = root.Subtree
// save snapshot
@ -738,7 +738,7 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, parentI
return nil, restic.ID{}, err
}
debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str())
debug.Log("saved snapshot %v", id.Str())
// flush repository
err = arch.repo.Flush()
@ -749,11 +749,11 @@ func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, parentI
// save index
err = arch.repo.SaveIndex()
if err != nil {
debug.Log("Archiver.Snapshot", "error saving index: %v", err)
debug.Log("error saving index: %v", err)
return nil, restic.ID{}, err
}
debug.Log("Archiver.Snapshot", "saved indexes")
debug.Log("saved indexes")
return sn, id, nil
}
@ -775,7 +775,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Sta
var stat restic.Stat
for _, dir := range dirs {
debug.Log("Scan", "Start for %v", dir)
debug.Log("Start for %v", dir)
err := fs.Walk(dir, func(str string, fi os.FileInfo, err error) error {
// TODO: integrate error reporting
if err != nil {
@ -788,7 +788,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Sta
}
if !filter(str, fi) {
debug.Log("Scan.Walk", "path %v excluded", str)
debug.Log("path %v excluded", str)
if fi.IsDir() {
return filepath.SkipDir
}
@ -813,7 +813,7 @@ func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Sta
return nil
})
debug.Log("Scan", "Done for %v, err: %v", dir, err)
debug.Log("Done for %v, err: %v", dir, err)
if err != nil {
return restic.Stat{}, errors.Wrap(err, "fs.Walk")
}

View file

@ -106,7 +106,7 @@ func dirname(base string, t restic.FileType, name string) string {
// exception: when off is lower than zero, it is treated as an offset relative
// to the end of the file.
func (b *Local) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
debug.Log("backend.local.Load", "Load %v, length %v at %v", h, len(p), off)
debug.Log("Load %v, length %v at %v", h, len(p), off)
if err := h.Valid(); err != nil {
return 0, err
}
@ -172,13 +172,13 @@ func writeToTempfile(tempdir string, p []byte) (filename string, err error) {
// Save stores data in the backend at the handle.
func (b *Local) Save(h restic.Handle, p []byte) (err error) {
debug.Log("backend.local.Save", "Save %v, length %v", h, len(p))
debug.Log("Save %v, length %v", h, len(p))
if err := h.Valid(); err != nil {
return err
}
tmpfile, err := writeToTempfile(filepath.Join(b.p, backend.Paths.Temp), p)
debug.Log("local.Save", "saved %v (%d bytes) to %v", h, len(p), tmpfile)
debug.Log("saved %v (%d bytes) to %v", h, len(p), tmpfile)
if err != nil {
return err
}
@ -199,7 +199,7 @@ func (b *Local) Save(h restic.Handle, p []byte) (err error) {
}
err = fs.Rename(tmpfile, filename)
debug.Log("local.Save", "save %v: rename %v -> %v: %v",
debug.Log("save %v: rename %v -> %v: %v",
h, filepath.Base(tmpfile), filepath.Base(filename), err)
if err != nil {
@ -217,7 +217,7 @@ func (b *Local) Save(h restic.Handle, p []byte) (err error) {
// Stat returns information about a blob.
func (b *Local) Stat(h restic.Handle) (restic.FileInfo, error) {
debug.Log("backend.local.Stat", "Stat %v", h)
debug.Log("Stat %v", h)
if err := h.Valid(); err != nil {
return restic.FileInfo{}, err
}
@ -232,7 +232,7 @@ func (b *Local) Stat(h restic.Handle) (restic.FileInfo, error) {
// Test returns true if a blob of the given type and name exists in the backend.
func (b *Local) Test(t restic.FileType, name string) (bool, error) {
debug.Log("backend.local.Test", "Test %v %v", t, name)
debug.Log("Test %v %v", t, name)
_, err := fs.Stat(filename(b.p, t, name))
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
@ -246,7 +246,7 @@ func (b *Local) Test(t restic.FileType, name string) (bool, error) {
// Remove removes the blob with the given name and type.
func (b *Local) Remove(t restic.FileType, name string) error {
debug.Log("backend.local.Remove", "Remove %v %v", t, name)
debug.Log("Remove %v %v", t, name)
fn := filename(b.p, t, name)
// reset read-only flag
@ -321,7 +321,7 @@ func listDirs(dir string) (filenames []string, err error) {
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (b *Local) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("backend.local.List", "List %v", t)
debug.Log("List %v", t)
lister := listDir
if t == restic.DataFile {
lister = listDirs
@ -354,13 +354,13 @@ func (b *Local) List(t restic.FileType, done <-chan struct{}) <-chan string {
// Delete removes the repository and all files.
func (b *Local) Delete() error {
debug.Log("backend.local.Delete", "Delete()")
debug.Log("Delete()")
return fs.RemoveAll(b.p)
}
// Close closes all open files.
func (b *Local) Close() error {
debug.Log("backend.local.Close", "Close()")
debug.Log("Close()")
// this does not need to do anything, all open files are closed within the
// same function.
return nil

View file

@ -33,7 +33,7 @@ func New() *MemoryBackend {
data: make(memMap),
}
debug.Log("MemoryBackend.New", "created new memory backend")
debug.Log("created new memory backend")
return be
}
@ -43,7 +43,7 @@ func (be *MemoryBackend) Test(t restic.FileType, name string) (bool, error) {
be.m.Lock()
defer be.m.Unlock()
debug.Log("MemoryBackend.Test", "test %v %v", t, name)
debug.Log("test %v %v", t, name)
if _, ok := be.data[entry{t, name}]; ok {
return true, nil
@ -65,7 +65,7 @@ func (be *MemoryBackend) Load(h restic.Handle, p []byte, off int64) (int, error)
h.Name = ""
}
debug.Log("MemoryBackend.Load", "get %v offset %v len %v", h, off, len(p))
debug.Log("get %v offset %v len %v", h, off, len(p))
if _, ok := be.data[entry{h.Type, h.Name}]; !ok {
return 0, errors.New("no such data")
@ -109,7 +109,7 @@ func (be *MemoryBackend) Save(h restic.Handle, p []byte) error {
return errors.New("file already exists")
}
debug.Log("MemoryBackend.Save", "save %v bytes at %v", len(p), h)
debug.Log("save %v bytes at %v", len(p), h)
buf := make([]byte, len(p))
copy(buf, p)
be.data[entry{h.Type, h.Name}] = buf
@ -130,7 +130,7 @@ func (be *MemoryBackend) Stat(h restic.Handle) (restic.FileInfo, error) {
h.Name = ""
}
debug.Log("MemoryBackend.Stat", "stat %v", h)
debug.Log("stat %v", h)
e, ok := be.data[entry{h.Type, h.Name}]
if !ok {
@ -145,7 +145,7 @@ func (be *MemoryBackend) Remove(t restic.FileType, name string) error {
be.m.Lock()
defer be.m.Unlock()
debug.Log("MemoryBackend.Remove", "get %v %v", t, name)
debug.Log("get %v %v", t, name)
if _, ok := be.data[entry{t, name}]; !ok {
return errors.New("no such data")
@ -171,7 +171,7 @@ func (be *MemoryBackend) List(t restic.FileType, done <-chan struct{}) <-chan st
ids = append(ids, entry.Name)
}
debug.Log("MemoryBackend.List", "list %v: %v", t, ids)
debug.Log("list %v: %v", t, ids)
go func() {
defer close(ch)

View file

@ -26,7 +26,7 @@ type s3 struct {
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
func Open(cfg Config) (restic.Backend, error) {
debug.Log("s3.Open", "open, config %#v", cfg)
debug.Log("open, config %#v", cfg)
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
if err != nil {
@ -38,7 +38,7 @@ func Open(cfg Config) (restic.Backend, error) {
ok, err := client.BucketExists(cfg.Bucket)
if err != nil {
debug.Log("s3.Open", "BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
debug.Log("BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
return nil, errors.Wrap(err, "client.BucketExists")
}
@ -84,7 +84,7 @@ func (be *s3) Location() string {
func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
var obj *minio.Object
debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p))
debug.Log("%v, offset %v, len %v", h, off, len(p))
path := be.s3path(h.Type, h.Name)
<-be.connChan
@ -94,7 +94,7 @@ func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
obj, err = be.client.GetObject(be.bucketname, path)
if err != nil {
debug.Log("s3.Load", " err %v", err)
debug.Log(" err %v", err)
return 0, errors.Wrap(err, "client.GetObject")
}
@ -137,7 +137,7 @@ func (be s3) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
nextError = io.ErrUnexpectedEOF
debug.Log("s3.Load", " capped buffer to %v byte", len(p))
debug.Log(" capped buffer to %v byte", len(p))
}
n, err = obj.ReadAt(p, off)
@ -158,14 +158,14 @@ func (be s3) Save(h restic.Handle, p []byte) (err error) {
return err
}
debug.Log("s3.Save", "%v with %d bytes", h, len(p))
debug.Log("%v with %d bytes", h, len(p))
path := be.s3path(h.Type, h.Name)
// Check key does not already exist
_, err = be.client.StatObject(be.bucketname, path)
if err == nil {
debug.Log("s3.blob.Finalize()", "%v already exists", h)
debug.Log("%v already exists", h)
return errors.New("key already exists")
}
@ -174,24 +174,24 @@ func (be s3) Save(h restic.Handle, p []byte) (err error) {
be.connChan <- struct{}{}
}()
debug.Log("s3.Save", "PutObject(%v, %v, %v, %v)",
debug.Log("PutObject(%v, %v, %v, %v)",
be.bucketname, path, int64(len(p)), "binary/octet-stream")
n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream")
debug.Log("s3.Save", "%v -> %v bytes, err %#v", path, n, err)
debug.Log("%v -> %v bytes, err %#v", path, n, err)
return errors.Wrap(err, "client.PutObject")
}
// Stat returns information about a blob.
func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
debug.Log("s3.Stat", "%v", h)
debug.Log("%v", h)
path := be.s3path(h.Type, h.Name)
var obj *minio.Object
obj, err = be.client.GetObject(be.bucketname, path)
if err != nil {
debug.Log("s3.Stat", "GetObject() err %v", err)
debug.Log("GetObject() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
}
@ -205,7 +205,7 @@ func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
fi, err := obj.Stat()
if err != nil {
debug.Log("s3.Stat", "Stat() err %v", err)
debug.Log("Stat() err %v", err)
return restic.FileInfo{}, errors.Wrap(err, "Stat")
}
@ -229,7 +229,7 @@ func (be *s3) Test(t restic.FileType, name string) (bool, error) {
func (be *s3) Remove(t restic.FileType, name string) error {
path := be.s3path(t, name)
err := be.client.RemoveObject(be.bucketname, path)
debug.Log("s3.Remove", "%v %v -> err %v", t, name, err)
debug.Log("%v %v -> err %v", t, name, err)
return errors.Wrap(err, "client.RemoveObject")
}
@ -237,7 +237,7 @@ func (be *s3) Remove(t restic.FileType, name string) error {
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (be *s3) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("s3.List", "listing %v", t)
debug.Log("listing %v", t)
ch := make(chan string)
prefix := be.s3path(t, "")

View file

@ -76,7 +76,7 @@ func startClient(program string, args ...string) (*SFTP, error) {
ch := make(chan error, 1)
go func() {
err := cmd.Wait()
debug.Log("sftp.Wait", "ssh command exited, err %v", err)
debug.Log("ssh command exited, err %v", err)
ch <- errors.Wrap(err, "cmd.Wait")
}()
@ -106,7 +106,7 @@ func paths(dir string) []string {
func (r *SFTP) clientError() error {
select {
case err := <-r.result:
debug.Log("sftp.clientError", "client has exited with err %v", err)
debug.Log("client has exited with err %v", err)
return err
default:
}
@ -119,10 +119,10 @@ func (r *SFTP) clientError() error {
// is expected at the given path. `dir` must be delimited by forward slashes
// ("/"), which is required by sftp.
func Open(dir string, program string, args ...string) (*SFTP, error) {
debug.Log("sftp.Open", "open backend with program %v, %v at %v", program, args, dir)
debug.Log("open backend with program %v, %v at %v", program, args, dir)
sftp, err := startClient(program, args...)
if err != nil {
debug.Log("sftp.Open", "unable to start program: %v", err)
debug.Log("unable to start program: %v", err)
return nil, err
}
@ -155,7 +155,7 @@ func buildSSHCommand(cfg Config) []string {
// OpenWithConfig opens an sftp backend as described by the config by running
// "ssh" with the appropiate arguments.
func OpenWithConfig(cfg Config) (*SFTP, error) {
debug.Log("sftp.OpenWithConfig", "open with config %v", cfg)
debug.Log("open with config %v", cfg)
return Open(cfg.Dir, "ssh", buildSSHCommand(cfg)...)
}
@ -163,7 +163,7 @@ func OpenWithConfig(cfg Config) (*SFTP, error) {
// backend at dir. Afterwards a new config blob should be created. `dir` must
// be delimited by forward slashes ("/"), which is required by sftp.
func Create(dir string, program string, args ...string) (*SFTP, error) {
debug.Log("sftp.Create", "%v %v", program, args)
debug.Log("%v %v", program, args)
sftp, err := startClient(program, args...)
if err != nil {
return nil, err
@ -195,7 +195,7 @@ func Create(dir string, program string, args ...string) (*SFTP, error) {
// CreateWithConfig creates an sftp backend as described by the config by running
// "ssh" with the appropiate arguments.
func CreateWithConfig(cfg Config) (*SFTP, error) {
debug.Log("sftp.CreateWithConfig", "config %v", cfg)
debug.Log("config %v", cfg)
return Create(cfg.Dir, "ssh", buildSSHCommand(cfg)...)
}
@ -329,7 +329,7 @@ func (r *SFTP) dirname(t restic.FileType, name string) string {
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
func (r *SFTP) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
debug.Log("sftp.Load", "load %v, %d bytes, offset %v", h, len(p), off)
debug.Log("load %v, %d bytes, offset %v", h, len(p), off)
if err := r.clientError(); err != nil {
return 0, err
}
@ -366,7 +366,7 @@ func (r *SFTP) Load(h restic.Handle, p []byte, off int64) (n int, err error) {
// Save stores data in the backend at the handle.
func (r *SFTP) Save(h restic.Handle, p []byte) (err error) {
debug.Log("sftp.Save", "save %v bytes to %v", h, len(p))
debug.Log("save %v bytes to %v", h, len(p))
if err := r.clientError(); err != nil {
return err
}
@ -380,7 +380,7 @@ func (r *SFTP) Save(h restic.Handle, p []byte) (err error) {
return err
}
debug.Log("sftp.Save", "save %v (%d bytes) to %v", h, len(p), filename)
debug.Log("save %v (%d bytes) to %v", h, len(p), filename)
n, err := tmpfile.Write(p)
if err != nil {
@ -397,14 +397,14 @@ func (r *SFTP) Save(h restic.Handle, p []byte) (err error) {
}
err = r.renameFile(filename, h.Type, h.Name)
debug.Log("sftp.Save", "save %v: rename %v: %v",
debug.Log("save %v: rename %v: %v",
h, path.Base(filename), err)
return err
}
// Stat returns information about a blob.
func (r *SFTP) Stat(h restic.Handle) (restic.FileInfo, error) {
debug.Log("sftp.Stat", "stat %v", h)
debug.Log("stat %v", h)
if err := r.clientError(); err != nil {
return restic.FileInfo{}, err
}
@ -423,7 +423,7 @@ func (r *SFTP) Stat(h restic.Handle) (restic.FileInfo, error) {
// Test returns true if a blob of the given type and name exists in the backend.
func (r *SFTP) Test(t restic.FileType, name string) (bool, error) {
debug.Log("sftp.Test", "type %v, name %v", t, name)
debug.Log("type %v, name %v", t, name)
if err := r.clientError(); err != nil {
return false, err
}
@ -442,7 +442,7 @@ func (r *SFTP) Test(t restic.FileType, name string) (bool, error) {
// Remove removes the content stored at name.
func (r *SFTP) Remove(t restic.FileType, name string) error {
debug.Log("sftp.Remove", "type %v, name %v", t, name)
debug.Log("type %v, name %v", t, name)
if err := r.clientError(); err != nil {
return err
}
@ -454,7 +454,7 @@ func (r *SFTP) Remove(t restic.FileType, name string) error {
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (r *SFTP) List(t restic.FileType, done <-chan struct{}) <-chan string {
debug.Log("sftp.List", "list all %v", t)
debug.Log("list all %v", t)
ch := make(chan string)
go func() {
@ -523,13 +523,13 @@ var closeTimeout = 2 * time.Second
// Close closes the sftp connection and terminates the underlying command.
func (r *SFTP) Close() error {
debug.Log("sftp.Close", "")
debug.Log("")
if r == nil {
return nil
}
err := r.c.Close()
debug.Log("sftp.Close", "Close returned error %v", err)
debug.Log("Close returned error %v", err)
// wait for closeTimeout before killing the process
select {

View file

@ -74,7 +74,7 @@ func (err ErrOldIndexFormat) Error() string {
// LoadIndex loads all index files.
func (c *Checker) LoadIndex() (hints []error, errs []error) {
debug.Log("LoadIndex", "Start")
debug.Log("Start")
type indexRes struct {
Index *repository.Index
ID string
@ -83,10 +83,10 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
indexCh := make(chan indexRes)
worker := func(id restic.ID, done <-chan struct{}) error {
debug.Log("LoadIndex", "worker got index %v", id)
debug.Log("worker got index %v", id)
idx, err := repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeIndex)
if errors.Cause(err) == repository.ErrOldIndexFormat {
debug.Log("LoadIndex", "index %v has old format", id.Str())
debug.Log("index %v has old format", id.Str())
hints = append(hints, ErrOldIndexFormat{id})
idx, err = repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeOldIndex)
@ -107,10 +107,10 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
var perr error
go func() {
defer close(indexCh)
debug.Log("LoadIndex", "start loading indexes in parallel")
debug.Log("start loading indexes in parallel")
perr = repository.FilesInParallel(c.repo.Backend(), restic.IndexFile, defaultParallelism,
repository.ParallelWorkFuncParseID(worker))
debug.Log("LoadIndex", "loading indexes finished, error: %v", perr)
debug.Log("loading indexes finished, error: %v", perr)
}()
done := make(chan struct{})
@ -124,7 +124,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
packToIndex := make(map[restic.ID]restic.IDSet)
for res := range indexCh {
debug.Log("LoadIndex", "process index %v", res.ID)
debug.Log("process index %v", res.ID)
idxID, err := restic.ParseID(res.ID)
if err != nil {
errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID))
@ -134,7 +134,7 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
c.indexes[idxID] = res.Index
c.masterIndex.Insert(res.Index)
debug.Log("LoadIndex", "process blobs")
debug.Log("process blobs")
cnt := 0
for blob := range res.Index.Each(done) {
c.packs.Insert(blob.PackID)
@ -148,14 +148,14 @@ func (c *Checker) LoadIndex() (hints []error, errs []error) {
packToIndex[blob.PackID].Insert(idxID)
}
debug.Log("LoadIndex", "%d blobs processed", cnt)
debug.Log("%d blobs processed", cnt)
}
debug.Log("LoadIndex", "done, error %v", perr)
debug.Log("done, error %v", perr)
debug.Log("LoadIndex", "checking for duplicate packs")
debug.Log("checking for duplicate packs")
for packID := range c.packs {
debug.Log("LoadIndex", " check pack %v: contained in %d indexes", packID.Str(), len(packToIndex[packID]))
debug.Log(" check pack %v: contained in %d indexes", packID.Str(), len(packToIndex[packID]))
if len(packToIndex[packID]) > 1 {
hints = append(hints, ErrDuplicatePacks{
PackID: packID,
@ -181,8 +181,8 @@ func (e PackError) Error() string {
}
func packIDTester(repo restic.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
debug.Log("Checker.testPackID", "worker start")
defer debug.Log("Checker.testPackID", "worker done")
debug.Log("worker start")
defer debug.Log("worker done")
defer wg.Done()
@ -197,7 +197,7 @@ func packIDTester(repo restic.Repository, inChan <-chan restic.ID, errChan chan<
}
if err != nil {
debug.Log("Checker.testPackID", "error checking for pack %s: %v", id.Str(), err)
debug.Log("error checking for pack %s: %v", id.Str(), err)
select {
case <-done:
return
@ -207,7 +207,7 @@ func packIDTester(repo restic.Repository, inChan <-chan restic.ID, errChan chan<
continue
}
debug.Log("Checker.testPackID", "pack %s exists", id.Str())
debug.Log("pack %s exists", id.Str())
}
}
@ -217,7 +217,7 @@ func packIDTester(repo restic.Repository, inChan <-chan restic.ID, errChan chan<
func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
defer close(errChan)
debug.Log("Checker.Packs", "checking for %d packs", len(c.packs))
debug.Log("checking for %d packs", len(c.packs))
seenPacks := restic.NewIDSet()
var workerWG sync.WaitGroup
@ -234,12 +234,12 @@ func (c *Checker) Packs(errChan chan<- error, done <-chan struct{}) {
}
close(IDChan)
debug.Log("Checker.Packs", "waiting for %d workers to terminate", defaultParallelism)
debug.Log("waiting for %d workers to terminate", defaultParallelism)
workerWG.Wait()
debug.Log("Checker.Packs", "workers terminated")
debug.Log("workers terminated")
for id := range c.repo.List(restic.DataFile, done) {
debug.Log("Checker.Packs", "check data blob %v", id.Str())
debug.Log("check data blob %v", id.Str())
if !seenPacks.Has(id) {
c.orphanedPacks = append(c.orphanedPacks, id)
select {
@ -276,12 +276,12 @@ func (e Error) Error() string {
func loadTreeFromSnapshot(repo restic.Repository, id restic.ID) (restic.ID, error) {
sn, err := restic.LoadSnapshot(repo, id)
if err != nil {
debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err)
debug.Log("error loading snapshot %v: %v", id.Str(), err)
return restic.ID{}, err
}
if sn.Tree == nil {
debug.Log("Checker.loadTreeFromSnapshot", "snapshot %v has no tree", id.Str())
debug.Log("snapshot %v has no tree", id.Str())
return restic.ID{}, errors.Errorf("snapshot %v has no tree", id)
}
@ -306,7 +306,7 @@ func loadSnapshotTreeIDs(repo restic.Repository) (restic.IDs, []error) {
return err
}
debug.Log("Checker.Snaphots", "load snapshot %v", id.Str())
debug.Log("load snapshot %v", id.Str())
treeID, err := loadTreeFromSnapshot(repo, id)
if err != nil {
@ -316,7 +316,7 @@ func loadSnapshotTreeIDs(repo restic.Repository) (restic.IDs, []error) {
return nil
}
debug.Log("Checker.Snaphots", "snapshot %v has tree %v", id.Str(), treeID.Str())
debug.Log("snapshot %v has tree %v", id.Str(), treeID.Str())
trees.Lock()
trees.IDs = append(trees.IDs, treeID)
trees.Unlock()
@ -354,7 +354,7 @@ func loadTreeWorker(repo restic.Repository,
done <-chan struct{}, wg *sync.WaitGroup) {
defer func() {
debug.Log("checker.loadTreeWorker", "exiting")
debug.Log("exiting")
wg.Done()
}()
@ -374,16 +374,16 @@ func loadTreeWorker(repo restic.Repository,
if !ok {
return
}
debug.Log("checker.loadTreeWorker", "load tree %v", treeID.Str())
debug.Log("load tree %v", treeID.Str())
tree, err := repo.LoadTree(treeID)
debug.Log("checker.loadTreeWorker", "load tree %v (%v) returned err: %v", tree, treeID.Str(), err)
debug.Log("load tree %v (%v) returned err: %v", tree, treeID.Str(), err)
job = treeJob{ID: treeID, error: err, Tree: tree}
outCh = out
inCh = nil
case outCh <- job:
debug.Log("checker.loadTreeWorker", "sent tree %v", job.ID.Str())
debug.Log("sent tree %v", job.ID.Str())
outCh = nil
inCh = in
}
@ -393,7 +393,7 @@ func loadTreeWorker(repo restic.Repository,
// checkTreeWorker checks the trees received and sends out errors to errChan.
func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-chan struct{}, wg *sync.WaitGroup) {
defer func() {
debug.Log("checker.checkTreeWorker", "exiting")
debug.Log("exiting")
wg.Done()
}()
@ -407,12 +407,12 @@ func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-ch
for {
select {
case <-done:
debug.Log("checker.checkTreeWorker", "done channel closed, exiting")
debug.Log("done channel closed, exiting")
return
case job, ok := <-inCh:
if !ok {
debug.Log("checker.checkTreeWorker", "input channel closed, exiting")
debug.Log("input channel closed, exiting")
return
}
@ -423,14 +423,14 @@ func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-ch
alreadyChecked = true
}
c.blobRefs.M[id]++
debug.Log("checker.checkTreeWorker", "tree %v refcount %d", job.ID.Str(), c.blobRefs.M[id])
debug.Log("tree %v refcount %d", job.ID.Str(), c.blobRefs.M[id])
c.blobRefs.Unlock()
if alreadyChecked {
continue
}
debug.Log("checker.checkTreeWorker", "check tree %v (tree %v, err %v)", job.ID.Str(), job.Tree, job.error)
debug.Log("check tree %v (tree %v, err %v)", job.ID.Str(), job.Tree, job.error)
var errs []error
if job.error != nil {
@ -440,14 +440,14 @@ func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-ch
}
if len(errs) > 0 {
debug.Log("checker.checkTreeWorker", "checked tree %v: %v errors", job.ID.Str(), len(errs))
debug.Log("checked tree %v: %v errors", job.ID.Str(), len(errs))
treeError = TreeError{ID: job.ID, Errors: errs}
outCh = out
inCh = nil
}
case outCh <- treeError:
debug.Log("checker.checkTreeWorker", "tree %v: sent %d errors", treeError.ID, len(treeError.Errors))
debug.Log("tree %v: sent %d errors", treeError.ID, len(treeError.Errors))
outCh = nil
inCh = in
}
@ -456,7 +456,7 @@ func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- error, done <-ch
func filterTrees(backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan treeJob, out chan<- treeJob, done <-chan struct{}) {
defer func() {
debug.Log("checker.filterTrees", "closing output channels")
debug.Log("closing output channels")
close(loaderChan)
close(out)
}()
@ -480,7 +480,7 @@ func filterTrees(backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan tree
}
if loadCh == nil && outCh == nil && outstandingLoadTreeJobs == 0 {
debug.Log("checker.filterTrees", "backlog is empty, all channels nil, exiting")
debug.Log("backlog is empty, all channels nil, exiting")
return
}
@ -494,7 +494,7 @@ func filterTrees(backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan tree
case j, ok := <-inCh:
if !ok {
debug.Log("checker.filterTrees", "input channel closed")
debug.Log("input channel closed")
inCh = nil
in = nil
continue
@ -502,23 +502,23 @@ func filterTrees(backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan tree
outstandingLoadTreeJobs--
debug.Log("checker.filterTrees", "input job tree %v", j.ID.Str())
debug.Log("input job tree %v", j.ID.Str())
var err error
if j.error != nil {
debug.Log("checker.filterTrees", "received job with error: %v (tree %v, ID %v)", j.error, j.Tree, j.ID.Str())
debug.Log("received job with error: %v (tree %v, ID %v)", j.error, j.Tree, j.ID.Str())
} else if j.Tree == nil {
debug.Log("checker.filterTrees", "received job with nil tree pointer: %v (ID %v)", j.error, j.ID.Str())
debug.Log("received job with nil tree pointer: %v (ID %v)", j.error, j.ID.Str())
err = errors.New("tree is nil and error is nil")
} else {
debug.Log("checker.filterTrees", "subtrees for tree %v: %v", j.ID.Str(), j.Tree.Subtrees())
debug.Log("subtrees for tree %v: %v", j.ID.Str(), j.Tree.Subtrees())
for _, id := range j.Tree.Subtrees() {
if id.IsNull() {
// We do not need to raise this error here, it is
// checked when the tree is checked. Just make sure
// that we do not add any null IDs to the backlog.
debug.Log("checker.filterTrees", "tree %v has nil subtree", j.ID.Str())
debug.Log("tree %v has nil subtree", j.ID.Str())
continue
}
backlog = append(backlog, id)
@ -535,7 +535,7 @@ func filterTrees(backlog restic.IDs, loaderChan chan<- restic.ID, in <-chan tree
inCh = nil
case outCh <- job:
debug.Log("checker.FilterTrees", "tree sent to check: %v", job.ID.Str())
debug.Log("tree sent to check: %v", job.ID.Str())
outCh = nil
inCh = in
}
@ -549,7 +549,7 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) {
defer close(errChan)
trees, errs := loadSnapshotTreeIDs(c.repo)
debug.Log("checker.Structure", "need to check %d trees from snapshots, %d errs returned", len(trees), len(errs))
debug.Log("need to check %d trees from snapshots, %d errs returned", len(trees), len(errs))
for _, err := range errs {
select {
@ -576,7 +576,7 @@ func (c *Checker) Structure(errChan chan<- error, done <-chan struct{}) {
}
func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
debug.Log("Checker.checkTree", "checking tree %v", id.Str())
debug.Log("checking tree %v", id.Str())
var blobs []restic.ID
@ -620,11 +620,11 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
for _, blobID := range blobs {
c.blobRefs.Lock()
c.blobRefs.M[blobID]++
debug.Log("Checker.checkTree", "blob %v refcount %d", blobID.Str(), c.blobRefs.M[blobID])
debug.Log("blob %v refcount %d", blobID.Str(), c.blobRefs.M[blobID])
c.blobRefs.Unlock()
if !c.blobs.Has(blobID) {
debug.Log("Checker.trees", "tree %v references blob %v which isn't contained in index", id.Str(), blobID.Str())
debug.Log("tree %v references blob %v which isn't contained in index", id.Str(), blobID.Str())
errs = append(errs, Error{TreeID: id, BlobID: blobID, Err: errors.New("not found in index")})
}
@ -638,10 +638,10 @@ func (c *Checker) UnusedBlobs() (blobs restic.IDs) {
c.blobRefs.Lock()
defer c.blobRefs.Unlock()
debug.Log("Checker.UnusedBlobs", "checking %d blobs", len(c.blobs))
debug.Log("checking %d blobs", len(c.blobs))
for id := range c.blobs {
if c.blobRefs.M[id] == 0 {
debug.Log("Checker.UnusedBlobs", "blob %v not referenced", id.Str())
debug.Log("blob %v not referenced", id.Str())
blobs = append(blobs, id)
}
}
@ -656,7 +656,7 @@ func (c *Checker) CountPacks() uint64 {
// checkPack reads a pack and checks the integrity of all blobs.
func checkPack(r restic.Repository, id restic.ID) error {
debug.Log("Checker.checkPack", "checking pack %v", id.Str())
debug.Log("checking pack %v", id.Str())
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
buf, err := backend.LoadAll(r.Backend(), h, nil)
if err != nil {
@ -665,7 +665,7 @@ func checkPack(r restic.Repository, id restic.ID) error {
hash := restic.Hash(buf)
if !hash.Equal(id) {
debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
debug.Log("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
}
@ -676,12 +676,12 @@ func checkPack(r restic.Repository, id restic.ID) error {
var errs []error
for i, blob := range blobs {
debug.Log("Checker.checkPack", " check blob %d: %v", i, blob.ID.Str())
debug.Log(" check blob %d: %v", i, blob.ID.Str())
plainBuf := make([]byte, blob.Length)
n, err := crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length])
if err != nil {
debug.Log("Checker.checkPack", " error decrypting blob %v: %v", blob.ID.Str(), err)
debug.Log(" error decrypting blob %v: %v", blob.ID.Str(), err)
errs = append(errs, errors.Errorf("blob %v: %v", i, err))
continue
}
@ -689,7 +689,7 @@ func checkPack(r restic.Repository, id restic.ID) error {
hash := restic.Hash(plainBuf)
if !hash.Equal(blob.ID) {
debug.Log("Checker.checkPack", " Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
debug.Log(" Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
continue
}

View file

@ -42,7 +42,7 @@ func CreateConfig() (Config, error) {
cfg.ID = NewRandomID().String()
cfg.Version = RepoVersion
debug.Log("Repo.CreateConfig", "New config: %#v", cfg)
debug.Log("New config: %#v", cfg)
return cfg, nil
}

View file

@ -10,24 +10,15 @@ import (
"path/filepath"
"restic/fs"
"runtime"
"strconv"
"strings"
"sync"
"time"
"restic/errors"
)
type process struct {
tag string
goroutine int
}
var opts struct {
logger *log.Logger
tags map[string]bool
last map[process]time.Time
m sync.Mutex
funcs map[string]bool
files map[string]bool
}
// make sure that all the initialization happens before the init() functions
@ -73,23 +64,16 @@ func initDebugLogger() {
opts.logger = log.New(f, "", log.LstdFlags)
}
func initDebugTags() {
opts.tags = make(map[string]bool)
opts.last = make(map[process]time.Time)
func parseFilter(envname string, pad func(string) string) map[string]bool {
filter := make(map[string]bool)
// defaults
opts.tags["break"] = true
// initialize tags
env := os.Getenv("DEBUG_TAGS")
if len(env) == 0 {
return
env := os.Getenv(envname)
if env == "" {
return filter
}
tags := []string{}
for _, tag := range strings.Split(env, ",") {
t := strings.TrimSpace(tag)
for _, fn := range strings.Split(env, ",") {
t := pad(strings.TrimSpace(fn))
val := true
if t[0] == '-' {
val = false
@ -106,11 +90,39 @@ func initDebugTags() {
os.Exit(5)
}
opts.tags[t] = val
tags = append(tags, tag)
filter[t] = val
}
fmt.Fprintf(os.Stderr, "debug log enabled for: %v\n", tags)
return filter
}
func padFunc(s string) string {
if s == "all" {
return s
}
return s
}
func padFile(s string) string {
if s == "all" {
return s
}
if !strings.Contains(s, "/") {
s = "*/" + s
}
if !strings.Contains(s, ":") {
s = s + ":*"
}
return s
}
func initDebugTags() {
opts.funcs = parseFilter("DEBUG_FUNCS", padFunc)
opts.files = parseFilter("DEBUG_FILES", padFile)
}
// taken from https://github.com/VividCortex/trace
@ -124,40 +136,52 @@ func goroutineNum() int {
}
// taken from https://github.com/VividCortex/trace
func getPosition(goroutine int) string {
_, file, line, ok := runtime.Caller(2)
func getPosition() (fn, dir, file string, line int) {
pc, file, line, ok := runtime.Caller(2)
if !ok {
return ""
return "", "", "", 0
}
return fmt.Sprintf("%3d %s:%d", goroutine, filepath.Base(file), line)
dirname, filename := filepath.Base(filepath.Dir(file)), filepath.Base(file)
Func := runtime.FuncForPC(pc)
return path.Base(Func.Name()), dirname, filename, line
}
var maxTagLen = 10
func Log(tag string, f string, args ...interface{}) {
opts.m.Lock()
defer opts.m.Unlock()
goroutine := goroutineNum()
last, ok := opts.last[process{tag, goroutine}]
if !ok {
last = time.Now()
func checkFilter(filter map[string]bool, key string) bool {
// check if key is enabled directly
if v, ok := filter[key]; ok {
return v
}
current := time.Now()
opts.last[process{tag, goroutine}] = current
// check for globbing
for k, v := range filter {
if m, _ := path.Match(k, key); m {
return v
}
}
// check if tag "all" is enabled
if v, ok := filter["all"]; ok && v {
return true
}
return false
}
// Log prints a message to the debug log (if debug is enabled).
func Log(f string, args ...interface{}) {
fn, dir, file, line := getPosition()
goroutine := goroutineNum()
if len(f) == 0 || f[len(f)-1] != '\n' {
f += "\n"
}
if len(tag) > maxTagLen {
maxTagLen = len(tag)
}
pos := fmt.Sprintf("%s/%s:%d", dir, file, line)
formatStringTag := "%2.3f [%" + strconv.FormatInt(int64(maxTagLen), 10) + "s]"
formatString := fmt.Sprintf(formatStringTag+" %s %s", current.Sub(last).Seconds(), tag, getPosition(goroutine), f)
formatString := fmt.Sprintf("%s\t%s\t%d\t%s", pos, fn, goroutine, f)
dbgprint := func() {
fmt.Fprintf(os.Stderr, formatString, args...)
@ -167,26 +191,12 @@ func Log(tag string, f string, args ...interface{}) {
opts.logger.Printf(formatString, args...)
}
// check if tag is enabled directly
if v, ok := opts.tags[tag]; ok {
if v {
if checkFilter(opts.files, fmt.Sprintf("%s/%s:%d", dir, file, line)) {
dbgprint()
}
return
}
// check for globbing
for k, v := range opts.tags {
if m, _ := path.Match(k, tag); m {
if v {
dbgprint()
}
return
}
}
// check if tag "all" is enabled
if v, ok := opts.tags["all"]; ok && v {
if checkFilter(opts.funcs, fn) {
dbgprint()
}
}

View file

@ -2,4 +2,5 @@
package debug
func Log(tag string, fmt string, args ...interface{}) {}
// Log prints a message to the debug log (if debug is enabled).
func Log(fmt string, args ...interface{}) {}

View file

@ -27,10 +27,10 @@ type dir struct {
}
func newDir(repo restic.Repository, node *restic.Node, ownerIsRoot bool) (*dir, error) {
debug.Log("newDir", "new dir for %v (%v)", node.Name, node.Subtree.Str())
debug.Log("new dir for %v (%v)", node.Name, node.Subtree.Str())
tree, err := repo.LoadTree(*node.Subtree)
if err != nil {
debug.Log("newDir", " error loading tree %v: %v", node.Subtree.Str(), err)
debug.Log(" error loading tree %v: %v", node.Subtree.Str(), err)
return nil, err
}
items := make(map[string]*restic.Node)
@ -67,17 +67,17 @@ func replaceSpecialNodes(repo restic.Repository, node *restic.Node) ([]*restic.N
}
func newDirFromSnapshot(repo restic.Repository, snapshot SnapshotWithId, ownerIsRoot bool) (*dir, error) {
debug.Log("newDirFromSnapshot", "new dir for snapshot %v (%v)", snapshot.ID.Str(), snapshot.Tree.Str())
debug.Log("new dir for snapshot %v (%v)", snapshot.ID.Str(), snapshot.Tree.Str())
tree, err := repo.LoadTree(*snapshot.Tree)
if err != nil {
debug.Log("newDirFromSnapshot", " loadTree(%v) failed: %v", snapshot.ID.Str(), err)
debug.Log(" loadTree(%v) failed: %v", snapshot.ID.Str(), err)
return nil, err
}
items := make(map[string]*restic.Node)
for _, n := range tree.Nodes {
nodes, err := replaceSpecialNodes(repo, n)
if err != nil {
debug.Log("newDirFromSnapshot", " replaceSpecialNodes(%v) failed: %v", n, err)
debug.Log(" replaceSpecialNodes(%v) failed: %v", n, err)
return nil, err
}
@ -103,7 +103,7 @@ func newDirFromSnapshot(repo restic.Repository, snapshot SnapshotWithId, ownerIs
}
func (d *dir) Attr(ctx context.Context, a *fuse.Attr) error {
debug.Log("dir.Attr", "called")
debug.Log("called")
a.Inode = d.inode
a.Mode = os.ModeDir | d.node.Mode
@ -118,7 +118,7 @@ func (d *dir) Attr(ctx context.Context, a *fuse.Attr) error {
}
func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
debug.Log("dir.ReadDirAll", "called")
debug.Log("called")
ret := make([]fuse.Dirent, 0, len(d.items))
for _, node := range d.items {
@ -143,10 +143,10 @@ func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
}
func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
debug.Log("dir.Lookup", "Lookup(%v)", name)
debug.Log("Lookup(%v)", name)
node, ok := d.items[name]
if !ok {
debug.Log("dir.Lookup", " Lookup(%v) -> not found", name)
debug.Log(" Lookup(%v) -> not found", name)
return nil, fuse.ENOENT
}
switch node.Type {
@ -157,7 +157,7 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
case "symlink":
return newLink(d.repo, node, d.ownerIsRoot)
default:
debug.Log("dir.Lookup", " node %v has unknown type %v", name, node.Type)
debug.Log(" node %v has unknown type %v", name, node.Type)
return nil, fuse.ENOENT
}
}

View file

@ -48,7 +48,7 @@ var blobPool = sync.Pool{
}
func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool) (*file, error) {
debug.Log("newFile", "create new file for %v with %d blobs", node.Name, len(node.Content))
debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content))
var bytes uint64
sizes := make([]uint, len(node.Content))
for i, id := range node.Content {
@ -62,7 +62,7 @@ func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool) (*file, error
}
if bytes != node.Size {
debug.Log("newFile", "sizes do not match: node.Size %v != size %v, using real size", node.Size, bytes)
debug.Log("sizes do not match: node.Size %v != size %v, using real size", node.Size, bytes)
node.Size = bytes
}
@ -76,7 +76,7 @@ func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool) (*file, error
}
func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
debug.Log("file.Attr", "Attr(%v)", f.node.Name)
debug.Log("Attr(%v)", f.node.Name)
a.Inode = f.node.Inode
a.Mode = f.node.Mode
a.Size = f.node.Size
@ -94,7 +94,7 @@ func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
}
func (f *file) getBlobAt(i int) (blob []byte, err error) {
debug.Log("file.getBlobAt", "getBlobAt(%v, %v)", f.node.Name, i)
debug.Log("getBlobAt(%v, %v)", f.node.Name, i)
if f.blobs[i] != nil {
return f.blobs[i], nil
}
@ -111,7 +111,7 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) {
n, err := f.repo.LoadBlob(restic.DataBlob, f.node.Content[i], buf)
if err != nil {
debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
debug.Log("LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
return nil, err
}
f.blobs[i] = buf[:n]
@ -120,11 +120,11 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) {
}
func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
debug.Log("file.Read", "Read(%v, %v, %v), file size %v", f.node.Name, req.Size, req.Offset, f.node.Size)
debug.Log("Read(%v, %v, %v), file size %v", f.node.Name, req.Size, req.Offset, f.node.Size)
offset := req.Offset
if uint64(offset) > f.node.Size {
debug.Log("file.Read", "Read(%v): offset is greater than file size: %v > %v",
debug.Log("Read(%v): offset is greater than file size: %v > %v",
f.node.Name, req.Offset, f.node.Size)
return errors.New("offset greater than files size")
}

View file

@ -41,7 +41,7 @@ type SnapshotsDir struct {
// NewSnapshotsDir returns a new dir object for the snapshots.
func NewSnapshotsDir(repo restic.Repository, ownerIsRoot bool) *SnapshotsDir {
debug.Log("NewSnapshotsDir", "fuse mount initiated")
debug.Log("fuse mount initiated")
return &SnapshotsDir{
repo: repo,
knownSnapshots: make(map[string]SnapshotWithId),
@ -58,22 +58,22 @@ func (sn *SnapshotsDir) Attr(ctx context.Context, attr *fuse.Attr) error {
attr.Uid = uint32(os.Getuid())
attr.Gid = uint32(os.Getgid())
}
debug.Log("SnapshotsDir.Attr", "attr is %v", attr)
debug.Log("attr is %v", attr)
return nil
}
func (sn *SnapshotsDir) updateCache(ctx context.Context) error {
debug.Log("SnapshotsDir.updateCache", "called")
debug.Log("called")
sn.Lock()
defer sn.Unlock()
for id := range sn.repo.List(restic.SnapshotFile, ctx.Done()) {
if sn.processed.Has(id) {
debug.Log("SnapshotsDir.List", "skipping snapshot %v, already in list", id.Str())
debug.Log("skipping snapshot %v, already in list", id.Str())
continue
}
debug.Log("SnapshotsDir.List", "found snapshot id %v", id.Str())
debug.Log("found snapshot id %v", id.Str())
snapshot, err := restic.LoadSnapshot(sn.repo, id)
if err != nil {
return err
@ -88,7 +88,7 @@ func (sn *SnapshotsDir) updateCache(ctx context.Context) error {
timestamp = fmt.Sprintf("%s-%d", snapshot.Time.Format(time.RFC3339), i)
}
debug.Log("SnapshotsDir.List", " add %v as dir %v", id.Str(), timestamp)
debug.Log(" add %v as dir %v", id.Str(), timestamp)
sn.knownSnapshots[timestamp] = SnapshotWithId{snapshot, id}
sn.processed.Insert(id)
}
@ -99,12 +99,12 @@ func (sn *SnapshotsDir) get(name string) (snapshot SnapshotWithId, ok bool) {
sn.RLock()
snapshot, ok = sn.knownSnapshots[name]
sn.RUnlock()
debug.Log("SnapshotsDir.get", "get(%s) -> %v %v", name, snapshot, ok)
debug.Log("get(%s) -> %v %v", name, snapshot, ok)
return snapshot, ok
}
func (sn *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
debug.Log("SnapshotsDir.ReadDirAll", "called")
debug.Log("called")
err := sn.updateCache(ctx)
if err != nil {
return nil, err
@ -122,25 +122,25 @@ func (sn *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
})
}
debug.Log("SnapshotsDir.ReadDirAll", " -> %d entries", len(ret))
debug.Log(" -> %d entries", len(ret))
return ret, nil
}
func (sn *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) {
debug.Log("SnapshotsDir.Lookup", "Lookup(%s)", name)
debug.Log("Lookup(%s)", name)
snapshot, ok := sn.get(name)
if !ok {
// We don't know about it, update the cache
err := sn.updateCache(ctx)
if err != nil {
debug.Log("SnapshotsDir.Lookup", " Lookup(%s) -> err %v", name, err)
debug.Log(" Lookup(%s) -> err %v", name, err)
return nil, err
}
snapshot, ok = sn.get(name)
if !ok {
// We still don't know about it, this time it really doesn't exist
debug.Log("SnapshotsDir.Lookup", " Lookup(%s) -> not found", name)
debug.Log(" Lookup(%s) -> not found", name)
return nil, fuse.ENOENT
}
}

View file

@ -63,7 +63,7 @@ func New(repo restic.Repository, p *restic.Progress) (*Index, error) {
j := job.Result.(list.Result)
debug.Log("Index.New", "pack %v contains %d blobs", packID.Str(), len(j.Entries()))
debug.Log("pack %v contains %d blobs", packID.Str(), len(j.Entries()))
err := idx.AddPack(packID, j.Size(), j.Entries())
if err != nil {
@ -95,7 +95,7 @@ type indexJSON struct {
}
func loadIndexJSON(repo restic.Repository, id restic.ID) (*indexJSON, error) {
debug.Log("index.loadIndexJSON", "process index %v\n", id.Str())
debug.Log("process index %v\n", id.Str())
var idx indexJSON
err := repo.LoadJSONUnpacked(restic.IndexFile, id, &idx)
@ -108,7 +108,7 @@ func loadIndexJSON(repo restic.Repository, id restic.ID) (*indexJSON, error) {
// Load creates an index by loading all index files from the repo.
func Load(repo restic.Repository, p *restic.Progress) (*Index, error) {
debug.Log("index.Load", "loading indexes")
debug.Log("loading indexes")
p.Start()
defer p.Done()
@ -124,7 +124,7 @@ func Load(repo restic.Repository, p *restic.Progress) (*Index, error) {
for id := range repo.List(restic.IndexFile, done) {
p.Report(restic.Stat{Blobs: 1})
debug.Log("index.Load", "Load index %v", id.Str())
debug.Log("Load index %v", id.Str())
idx, err := loadIndexJSON(repo, id)
if err != nil {
return nil, err
@ -133,7 +133,7 @@ func Load(repo restic.Repository, p *restic.Progress) (*Index, error) {
res := make(map[restic.ID]Pack)
supersedes[id] = restic.NewIDSet()
for _, sid := range idx.Supersedes {
debug.Log("index.Load", " index %v supersedes %v", id.Str(), sid)
debug.Log(" index %v supersedes %v", id.Str(), sid)
supersedes[id].Insert(sid)
}
@ -163,7 +163,7 @@ func Load(repo restic.Repository, p *restic.Progress) (*Index, error) {
if _, ok := results[indexID]; !ok {
continue
}
debug.Log("index.Load", " removing index %v, superseded by %v", indexID.Str(), superID.Str())
debug.Log(" removing index %v, superseded by %v", indexID.Str(), superID.Str())
fmt.Fprintf(os.Stderr, "index %v can be removed, superseded by index %v\n", indexID.Str(), superID.Str())
delete(results, indexID)
}

View file

@ -195,15 +195,15 @@ var staleTimeout = 30 * time.Minute
// older than 30 minutes or if it was created on the current machine and the
// process isn't alive any more.
func (l *Lock) Stale() bool {
debug.Log("Lock.Stale", "testing if lock %v for process %d is stale", l, l.PID)
debug.Log("testing if lock %v for process %d is stale", l, l.PID)
if time.Since(l.Time) > staleTimeout {
debug.Log("Lock.Stale", "lock is stale, timestamp is too old: %v\n", l.Time)
debug.Log("lock is stale, timestamp is too old: %v\n", l.Time)
return true
}
hn, err := os.Hostname()
if err != nil {
debug.Log("Lock.Stale", "unable to find current hostnanme: %v", err)
debug.Log("unable to find current hostnanme: %v", err)
// since we cannot find the current hostname, assume that the lock is
// not stale.
return false
@ -217,18 +217,18 @@ func (l *Lock) Stale() bool {
// check if we can reach the process retaining the lock
exists := l.processExists()
if !exists {
debug.Log("Lock.Stale", "could not reach process, %d, lock is probably stale\n", l.PID)
debug.Log("could not reach process, %d, lock is probably stale\n", l.PID)
return true
}
debug.Log("Lock.Stale", "lock not stale\n")
debug.Log("lock not stale\n")
return false
}
// Refresh refreshes the lock by creating a new file in the backend with a new
// timestamp. Afterwards the old lock is removed.
func (l *Lock) Refresh() error {
debug.Log("Lock.Refresh", "refreshing lock %v", l.lockID.Str())
debug.Log("refreshing lock %v", l.lockID.Str())
id, err := l.createLock()
if err != nil {
return err
@ -239,7 +239,7 @@ func (l *Lock) Refresh() error {
return err
}
debug.Log("Lock.Refresh", "new lock ID %v", id.Str())
debug.Log("new lock ID %v", id.Str())
l.lockID = &id
return nil
@ -251,10 +251,6 @@ func (l Lock) String() string {
l.Time.Format("2006-01-02 15:04:05"), time.Since(l.Time),
l.lockID.Str())
if l.Stale() {
text += " (stale)"
}
return text
}
@ -267,7 +263,7 @@ func init() {
c := make(chan os.Signal)
signal.Notify(c, syscall.SIGHUP)
for s := range c {
debug.Log("lock.ignoreSIGHUP", "Signal received: %v\n", s)
debug.Log("Signal received: %v\n", s)
}
}()
})

View file

@ -35,15 +35,15 @@ func uidGidInt(u user.User) (uid, gid uint32, err error) {
func (l Lock) processExists() bool {
proc, err := os.FindProcess(l.PID)
if err != nil {
debug.Log("Lock.Stale", "error searching for process %d: %v\n", l.PID, err)
debug.Log("error searching for process %d: %v\n", l.PID, err)
return false
}
defer proc.Release()
debug.Log("Lock.Stale", "sending SIGHUP to process %d\n", l.PID)
debug.Log("sending SIGHUP to process %d\n", l.PID)
err = proc.Signal(syscall.SIGHUP)
if err != nil {
debug.Log("Lock.Stale", "signal error: %v, lock is probably stale\n", err)
debug.Log("signal error: %v, lock is probably stale\n", err)
return false
}
return true

View file

@ -17,7 +17,7 @@ func uidGidInt(u user.User) (uid, gid uint32, err error) {
func (l Lock) processExists() bool {
proc, err := os.FindProcess(l.PID)
if err != nil {
debug.Log("Lock.Stale", "error searching for process %d: %v\n", l.PID, err)
debug.Log("error searching for process %d: %v\n", l.PID, err)
return false
}
proc.Release()

View file

@ -98,7 +98,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string {
// CreateAt creates the node at the given path and restores all the meta data.
func (node *Node) CreateAt(path string, repo Repository) error {
debug.Log("Node.CreateAt", "create node %v at %v", node.Name, path)
debug.Log("create node %v at %v", node.Name, path)
switch node.Type {
case "dir":
@ -133,7 +133,7 @@ func (node *Node) CreateAt(path string, repo Repository) error {
err := node.restoreMetadata(path)
if err != nil {
debug.Log("Node.CreateAt", "restoreMetadata(%s) error %v", path, err)
debug.Log("restoreMetadata(%s) error %v", path, err)
}
return err
@ -157,7 +157,7 @@ func (node Node) restoreMetadata(path string) error {
if node.Type != "dir" {
err = node.RestoreTimestamps(path)
if err != nil {
debug.Log("Node.restoreMetadata", "error restoring timestamps for dir %v: %v", path, err)
debug.Log("error restoring timestamps for dir %v: %v", path, err)
return err
}
}
@ -385,13 +385,13 @@ func (node Node) sameContent(other Node) bool {
// IsNewer returns true of the file has been updated since the last Stat().
func (node *Node) IsNewer(path string, fi os.FileInfo) bool {
if node.Type != "file" {
debug.Log("node.IsNewer", "node %v is newer: not file", path)
debug.Log("node %v is newer: not file", path)
return true
}
tpe := nodeTypeFromFileInfo(fi)
if node.Name != fi.Name() || node.Type != tpe {
debug.Log("node.IsNewer", "node %v is newer: name or type changed", path)
debug.Log("node %v is newer: name or type changed", path)
return true
}
@ -401,7 +401,7 @@ func (node *Node) IsNewer(path string, fi os.FileInfo) bool {
if !ok {
if node.ModTime != fi.ModTime() ||
node.Size != size {
debug.Log("node.IsNewer", "node %v is newer: timestamp or size changed", path)
debug.Log("node %v is newer: timestamp or size changed", path)
return true
}
return false
@ -413,11 +413,11 @@ func (node *Node) IsNewer(path string, fi os.FileInfo) bool {
node.ChangeTime != changeTime(extendedStat) ||
node.Inode != uint64(inode) ||
node.Size != size {
debug.Log("node.IsNewer", "node %v is newer: timestamp, size or inode changed", path)
debug.Log("node %v is newer: timestamp, size or inode changed", path)
return true
}
debug.Log("node.IsNewer", "node %v is not newer", path)
debug.Log("node %v is not newer", path)
return false
}

View file

@ -79,7 +79,7 @@ func readDirNames(dirname string) ([]string, error) {
type SelectFunc func(item string, fi os.FileInfo) bool
func walk(basedir, dir string, selectFunc SelectFunc, done <-chan struct{}, jobs chan<- Job, res chan<- Result) (excluded bool) {
debug.Log("pipe.walk", "start on %q, basedir %q", dir, basedir)
debug.Log("start on %q, basedir %q", dir, basedir)
relpath, err := filepath.Rel(basedir, dir)
if err != nil {
@ -89,7 +89,7 @@ func walk(basedir, dir string, selectFunc SelectFunc, done <-chan struct{}, jobs
info, err := fs.Lstat(dir)
if err != nil {
err = errors.Wrap(err, "Lstat")
debug.Log("pipe.walk", "error for %v: %v, res %p", dir, err, res)
debug.Log("error for %v: %v, res %p", dir, err, res)
select {
case jobs <- Dir{basedir: basedir, path: relpath, info: info, error: err, result: res}:
case <-done:
@ -98,13 +98,13 @@ func walk(basedir, dir string, selectFunc SelectFunc, done <-chan struct{}, jobs
}
if !selectFunc(dir, info) {
debug.Log("pipe.walk", "file %v excluded by filter, res %p", dir, res)
debug.Log("file %v excluded by filter, res %p", dir, res)
excluded = true
return
}
if !info.IsDir() {
debug.Log("pipe.walk", "sending file job for %v, res %p", dir, res)
debug.Log("sending file job for %v, res %p", dir, res)
select {
case jobs <- Entry{info: info, basedir: basedir, path: relpath, result: res}:
case <-done:
@ -115,7 +115,7 @@ func walk(basedir, dir string, selectFunc SelectFunc, done <-chan struct{}, jobs
debug.RunHook("pipe.readdirnames", dir)
names, err := readDirNames(dir)
if err != nil {
debug.Log("pipe.walk", "Readdirnames(%v) returned error: %v, res %p", dir, err, res)
debug.Log("Readdirnames(%v) returned error: %v, res %p", dir, err, res)
select {
case <-done:
case jobs <- Dir{basedir: basedir, path: relpath, info: info, error: err, result: res}:
@ -134,7 +134,7 @@ func walk(basedir, dir string, selectFunc SelectFunc, done <-chan struct{}, jobs
fi, statErr := fs.Lstat(subpath)
if !selectFunc(subpath, fi) {
debug.Log("pipe.walk", "file %v excluded by filter", subpath)
debug.Log("file %v excluded by filter", subpath)
continue
}
@ -143,7 +143,7 @@ func walk(basedir, dir string, selectFunc SelectFunc, done <-chan struct{}, jobs
if statErr != nil {
statErr = errors.Wrap(statErr, "Lstat")
debug.Log("pipe.walk", "sending file job for %v, err %v, res %p", subpath, err, res)
debug.Log("sending file job for %v, err %v, res %p", subpath, err, res)
select {
case jobs <- Entry{info: fi, error: statErr, basedir: basedir, path: filepath.Join(relpath, name), result: ch}:
case <-done:
@ -159,7 +159,7 @@ func walk(basedir, dir string, selectFunc SelectFunc, done <-chan struct{}, jobs
walk(basedir, subpath, selectFunc, done, jobs, ch)
}
debug.Log("pipe.walk", "sending dirjob for %q, basedir %q, res %p", dir, basedir, res)
debug.Log("sending dirjob for %q, basedir %q, res %p", dir, basedir, res)
select {
case jobs <- Dir{basedir: basedir, path: relpath, info: info, Entries: entries, result: res}:
case <-done:
@ -198,48 +198,48 @@ func Walk(walkPaths []string, selectFunc SelectFunc, done chan struct{}, jobs ch
ps, err := cleanupPath(p)
if err != nil {
fmt.Fprintf(os.Stderr, "Readdirnames(%v): %v, skipping\n", p, err)
debug.Log("pipe.Walk", "Readdirnames(%v) returned error: %v, skipping", p, err)
debug.Log("Readdirnames(%v) returned error: %v, skipping", p, err)
continue
}
paths = append(paths, ps...)
}
debug.Log("pipe.Walk", "start on %v", paths)
debug.Log("start on %v", paths)
defer func() {
debug.Log("pipe.Walk", "output channel closed")
debug.Log("output channel closed")
close(jobs)
}()
entries := make([]<-chan Result, 0, len(paths))
for _, path := range paths {
debug.Log("pipe.Walk", "start walker for %v", path)
debug.Log("start walker for %v", path)
ch := make(chan Result, 1)
excluded := walk(filepath.Dir(path), path, selectFunc, done, jobs, ch)
if excluded {
debug.Log("pipe.Walk", "walker for %v done, it was excluded by the filter", path)
debug.Log("walker for %v done, it was excluded by the filter", path)
continue
}
entries = append(entries, ch)
debug.Log("pipe.Walk", "walker for %v done", path)
debug.Log("walker for %v done", path)
}
debug.Log("pipe.Walk", "sending root node, res %p", res)
debug.Log("sending root node, res %p", res)
select {
case <-done:
return
case jobs <- Dir{Entries: entries, result: res}:
}
debug.Log("pipe.Walk", "walker done")
debug.Log("walker done")
}
// Split feeds all elements read from inChan to dirChan and entChan.
func Split(inChan <-chan Job, dirChan chan<- Dir, entChan chan<- Entry) {
debug.Log("pipe.Split", "start")
defer debug.Log("pipe.Split", "done")
debug.Log("start")
defer debug.Log("done")
inCh := inChan
dirCh := dirChan

View file

@ -15,7 +15,7 @@ func init() {
signal.Notify(c, syscall.SIGUSR1)
go func() {
for s := range c {
debug.Log("progress.handleSIGUSR1", "Signal received: %v\n", s)
debug.Log("Signal received: %v\n", s)
forceUpdateProgress <- true
}
}()

View file

@ -70,27 +70,27 @@ var IndexFull = func(idx *Index) bool {
idx.m.Lock()
defer idx.m.Unlock()
debug.Log("Index.Full", "checking whether index %p is full", idx)
debug.Log("checking whether index %p is full", idx)
packs := len(idx.pack)
age := time.Now().Sub(idx.created)
if age > indexMaxAge {
debug.Log("Index.Full", "index %p is old enough", idx, age)
debug.Log("index %p is old enough", idx, age)
return true
}
if packs < indexMinBlobs || age < indexMinAge {
debug.Log("Index.Full", "index %p only has %d packs or is too young (%v)", idx, packs, age)
debug.Log("index %p only has %d packs or is too young (%v)", idx, packs, age)
return false
}
if packs > indexMaxBlobs {
debug.Log("Index.Full", "index %p has %d packs", idx, packs)
debug.Log("index %p has %d packs", idx, packs)
return true
}
debug.Log("Index.Full", "index %p is not full", idx)
debug.Log("index %p is not full", idx)
return false
}
@ -104,7 +104,7 @@ func (idx *Index) Store(blob restic.PackedBlob) {
panic("store new item in finalized index")
}
debug.Log("Index.Store", "%v", blob)
debug.Log("%v", blob)
idx.store(blob)
}
@ -120,7 +120,7 @@ func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.Pack
blobs = make([]restic.PackedBlob, 0, len(packs))
for _, p := range packs {
debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d",
debug.Log("id %v found in pack %v at %d, length %d",
id.Str(), p.packID.Str(), p.offset, p.length)
blob := restic.PackedBlob{
@ -139,7 +139,7 @@ func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.Pack
return blobs, nil
}
debug.Log("Index.Lookup", "id %v not found", id.Str())
debug.Log("id %v not found", id.Str())
return nil, errors.Errorf("id %v not found in index", id)
}
@ -260,7 +260,7 @@ func (idx *Index) Packs() restic.IDSet {
// Count returns the number of blobs of type t in the index.
func (idx *Index) Count(t restic.BlobType) (n uint) {
debug.Log("Index.Count", "counting blobs of type %v", t)
debug.Log("counting blobs of type %v", t)
idx.m.Lock()
defer idx.m.Unlock()
@ -298,10 +298,10 @@ func (idx *Index) generatePackList() ([]*packJSON, error) {
panic("null pack id")
}
debug.Log("Index.generatePackList", "handle blob %v", h)
debug.Log("handle blob %v", h)
if blob.packID.IsNull() {
debug.Log("Index.generatePackList", "blob %v has no packID! (offset %v, length %v)",
debug.Log("blob %v has no packID! (offset %v, length %v)",
h, blob.offset, blob.length)
return nil, errors.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", h)
}
@ -327,7 +327,7 @@ func (idx *Index) generatePackList() ([]*packJSON, error) {
}
}
debug.Log("Index.generatePackList", "done")
debug.Log("done")
return list, nil
}
@ -339,7 +339,7 @@ type jsonIndex struct {
// Encode writes the JSON serialization of the index to the writer w.
func (idx *Index) Encode(w io.Writer) error {
debug.Log("Index.Encode", "encoding index")
debug.Log("encoding index")
idx.m.Lock()
defer idx.m.Unlock()
@ -348,7 +348,7 @@ func (idx *Index) Encode(w io.Writer) error {
// encode writes the JSON serialization of the index to the writer w.
func (idx *Index) encode(w io.Writer) error {
debug.Log("Index.encode", "encoding index")
debug.Log("encoding index")
list, err := idx.generatePackList()
if err != nil {
@ -365,7 +365,7 @@ func (idx *Index) encode(w io.Writer) error {
// Finalize sets the index to final and writes the JSON serialization to w.
func (idx *Index) Finalize(w io.Writer) error {
debug.Log("Index.Encode", "encoding index")
debug.Log("encoding index")
idx.m.Lock()
defer idx.m.Unlock()
@ -401,7 +401,7 @@ func (idx *Index) SetID(id restic.ID) error {
return errors.New("ID already set")
}
debug.Log("Index.SetID", "ID set to %v", id.Str())
debug.Log("ID set to %v", id.Str())
idx.id = id
return nil
@ -409,7 +409,7 @@ func (idx *Index) SetID(id restic.ID) error {
// Dump writes the pretty-printed JSON representation of the index to w.
func (idx *Index) Dump(w io.Writer) error {
debug.Log("Index.Dump", "dumping index")
debug.Log("dumping index")
idx.m.Lock()
defer idx.m.Unlock()
@ -433,7 +433,7 @@ func (idx *Index) Dump(w io.Writer) error {
return errors.Wrap(err, "Write")
}
debug.Log("Index.Dump", "done")
debug.Log("done")
return nil
}
@ -453,16 +453,16 @@ var ErrOldIndexFormat = errors.New("index has old format")
// DecodeIndex loads and unserializes an index from rd.
func DecodeIndex(rd io.Reader) (idx *Index, err error) {
debug.Log("Index.DecodeIndex", "Start decoding index")
debug.Log("Start decoding index")
idxJSON := jsonIndex{}
dec := json.NewDecoder(rd)
err = dec.Decode(&idxJSON)
if err != nil {
debug.Log("Index.DecodeIndex", "Error %v", err)
debug.Log("Error %v", err)
if isErrOldIndex(err) {
debug.Log("Index.DecodeIndex", "index is probably old format, trying that")
debug.Log("index is probably old format, trying that")
err = ErrOldIndexFormat
}
@ -486,19 +486,19 @@ func DecodeIndex(rd io.Reader) (idx *Index, err error) {
idx.supersedes = idxJSON.Supersedes
idx.final = true
debug.Log("Index.DecodeIndex", "done")
debug.Log("done")
return idx, nil
}
// DecodeOldIndex loads and unserializes an index in the old format from rd.
func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
debug.Log("Index.DecodeOldIndex", "Start decoding old index")
debug.Log("Start decoding old index")
list := []*packJSON{}
dec := json.NewDecoder(rd)
err = dec.Decode(&list)
if err != nil {
debug.Log("Index.DecodeOldIndex", "Error %#v", err)
debug.Log("Error %#v", err)
return nil, errors.Wrap(err, "Decode")
}
@ -518,13 +518,13 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
}
idx.final = true
debug.Log("Index.DecodeOldIndex", "done")
debug.Log("done")
return idx, nil
}
// LoadIndexWithDecoder loads the index and decodes it with fn.
func LoadIndexWithDecoder(repo restic.Repository, id restic.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) {
debug.Log("LoadIndexWithDecoder", "Loading index %v", id.Str())
debug.Log("Loading index %v", id.Str())
buf, err := repo.LoadAndDecrypt(restic.IndexFile, id)
if err != nil {
@ -533,7 +533,7 @@ func LoadIndexWithDecoder(repo restic.Repository, id restic.ID, fn func(io.Reade
idx, err = fn(bytes.NewReader(buf))
if err != nil {
debug.Log("LoadIndexWithDecoder", "error while decoding index %v: %v", id, err)
debug.Log("error while decoding index %v: %v", id, err)
return nil, err
}

View file

@ -13,7 +13,7 @@ import (
// old indexes. This operation should only be done with an exclusive lock in
// place.
func RebuildIndex(repo restic.Repository) error {
debug.Log("RebuildIndex", "start rebuilding index")
debug.Log("start rebuilding index")
done := make(chan struct{})
defer close(done)
@ -49,10 +49,10 @@ func RebuildIndex(repo restic.Repository) error {
id, err := SaveIndex(repo, idx)
if err != nil {
debug.Log("RebuildIndex.RebuildIndex", "error saving index: %v", err)
debug.Log("error saving index: %v", err)
return err
}
debug.Log("RebuildIndex.RebuildIndex", "new index saved as %v", id.Str())
debug.Log("new index saved as %v", id.Str())
for indexID := range oldIndexes {
err := repo.Backend().Remove(restic.IndexFile, indexID.String())

View file

@ -64,7 +64,7 @@ func createMasterKey(s *Repository, password string) (*Key, error) {
func OpenKey(s *Repository, name string, password string) (*Key, error) {
k, err := LoadKey(s, name)
if err != nil {
debug.Log("OpenKey", "LoadKey(%v) returned error %v", name[:12], err)
debug.Log("LoadKey(%v) returned error %v", name[:12], err)
return nil, err
}
@ -96,7 +96,7 @@ func OpenKey(s *Repository, name string, password string) (*Key, error) {
k.master = &crypto.Key{}
err = json.Unmarshal(buf, k.master)
if err != nil {
debug.Log("OpenKey", "Unmarshal() returned error %v", err)
debug.Log("Unmarshal() returned error %v", err)
return nil, errors.Wrap(err, "Unmarshal")
}
k.name = name
@ -123,10 +123,10 @@ func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) {
return nil, ErrMaxKeysReached
}
debug.Log("SearchKey", "trying key %v", name[:12])
debug.Log("trying key %v", name[:12])
key, err := OpenKey(s, name, password)
if err != nil {
debug.Log("SearchKey", "key %v returned error %v", name[:12], err)
debug.Log("key %v returned error %v", name[:12], err)
// ErrUnauthenticated means the password is wrong, try the next key
if errors.Cause(err) == crypto.ErrUnauthenticated {
@ -136,7 +136,7 @@ func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) {
return nil, err
}
debug.Log("SearchKey", "successfully opened key %v", name[:12])
debug.Log("successfully opened key %v", name[:12])
return key, nil
}
@ -170,7 +170,7 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error)
}
KDFParams = &p
debug.Log("repository.AddKey", "calibrated KDF parameters are %v", p)
debug.Log("calibrated KDF parameters are %v", p)
}
// fill meta data about key

View file

@ -25,7 +25,7 @@ func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
debug.Log("MasterIndex.Lookup", "looking up id %v, tpe %v", id.Str(), tpe)
debug.Log("looking up id %v, tpe %v", id.Str(), tpe)
for _, idx := range mi.idx {
blobs, err = idx.Lookup(id, tpe)
@ -36,7 +36,7 @@ func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic
}
}
debug.Log("MasterIndex.Lookup", "id %v not found in any index", id.Str())
debug.Log("id %v not found in any index", id.Str())
return nil, errors.Errorf("id %v not found in any index", id)
}
@ -155,7 +155,7 @@ func (mi *MasterIndex) NotFinalIndexes() []*Index {
}
}
debug.Log("MasterIndex.NotFinalIndexes", "return %d indexes", len(list))
debug.Log("return %d indexes", len(list))
return list
}
@ -166,22 +166,22 @@ func (mi *MasterIndex) FullIndexes() []*Index {
var list []*Index
debug.Log("MasterIndex.FullIndexes", "checking %d indexes", len(mi.idx))
debug.Log("checking %d indexes", len(mi.idx))
for _, idx := range mi.idx {
if idx.Final() {
debug.Log("MasterIndex.FullIndexes", "index %p is final", idx)
debug.Log("index %p is final", idx)
continue
}
if IndexFull(idx) {
debug.Log("MasterIndex.FullIndexes", "index %p is full", idx)
debug.Log("index %p is full", idx)
list = append(list, idx)
} else {
debug.Log("MasterIndex.FullIndexes", "index %p not full", idx)
debug.Log("index %p not full", idx)
}
}
debug.Log("MasterIndex.FullIndexes", "return %d indexes", len(list))
debug.Log("return %d indexes", len(list))
return list
}
@ -200,14 +200,14 @@ func (mi *MasterIndex) RebuildIndex(packBlacklist restic.IDSet) (*Index, error)
mi.idxMutex.Lock()
defer mi.idxMutex.Unlock()
debug.Log("MasterIndex.RebuildIndex", "start rebuilding index of %d indexes, pack blacklist: %v", len(mi.idx), packBlacklist)
debug.Log("start rebuilding index of %d indexes, pack blacklist: %v", len(mi.idx), packBlacklist)
newIndex := NewIndex()
done := make(chan struct{})
defer close(done)
for i, idx := range mi.idx {
debug.Log("MasterIndex.RebuildIndex", "adding index %d", i)
debug.Log("adding index %d", i)
for pb := range idx.Each(done) {
if packBlacklist.Has(pb.PackID) {
@ -218,17 +218,17 @@ func (mi *MasterIndex) RebuildIndex(packBlacklist restic.IDSet) (*Index, error)
}
if !idx.Final() {
debug.Log("MasterIndex.RebuildIndex", "index %d isn't final, don't add to supersedes field", i)
debug.Log("index %d isn't final, don't add to supersedes field", i)
continue
}
id, err := idx.ID()
if err != nil {
debug.Log("MasterIndex.RebuildIndex", "index %d does not have an ID: %v", err)
debug.Log("index %d does not have an ID: %v", err)
return nil, err
}
debug.Log("MasterIndex.RebuildIndex", "adding index id %v to supersedes field", id.Str())
debug.Log("adding index id %v to supersedes field", id.Str())
err = newIndex.AddToSupersedes(id)
if err != nil {

View file

@ -56,10 +56,10 @@ func (r *packerManager) findPacker(size uint) (packer *pack.Packer, err error) {
// search for a suitable packer
if len(r.packs) > 0 {
debug.Log("Repo.findPacker", "searching packer for %d bytes\n", size)
debug.Log("searching packer for %d bytes\n", size)
for i, p := range r.packs {
if p.Size()+size < maxPackSize {
debug.Log("Repo.findPacker", "found packer %v", p)
debug.Log("found packer %v", p)
// remove from list
r.packs = append(r.packs[:i], r.packs[i+1:]...)
return p, nil
@ -68,7 +68,7 @@ func (r *packerManager) findPacker(size uint) (packer *pack.Packer, err error) {
}
// no suitable packer found, return new
debug.Log("Repo.findPacker", "create new pack for %d bytes", size)
debug.Log("create new pack for %d bytes", size)
tmpfile, err := ioutil.TempFile("", "restic-temp-pack-")
if err != nil {
return nil, errors.Wrap(err, "ioutil.TempFile")
@ -83,12 +83,12 @@ func (r *packerManager) insertPacker(p *pack.Packer) {
defer r.pm.Unlock()
r.packs = append(r.packs, p)
debug.Log("Repo.insertPacker", "%d packers\n", len(r.packs))
debug.Log("%d packers\n", len(r.packs))
}
// savePacker stores p in the backend.
func (r *Repository) savePacker(p *pack.Packer) error {
debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
debug.Log("save packer with %d blobs\n", p.Count())
n, err := p.Finalize()
if err != nil {
return err
@ -119,11 +119,11 @@ func (r *Repository) savePacker(p *pack.Packer) error {
err = r.be.Save(h, data)
if err != nil {
debug.Log("Repo.savePacker", "Save(%v) error: %v", h, err)
debug.Log("Save(%v) error: %v", h, err)
return err
}
debug.Log("Repo.savePacker", "saved as %v", h)
debug.Log("saved as %v", h)
err = fs.Remove(tmpfile.Name())
if err != nil {
@ -132,7 +132,7 @@ func (r *Repository) savePacker(p *pack.Packer) error {
// update blobs in the index
for _, b := range p.Blobs() {
debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), id.Str())
debug.Log(" updating blob %v to pack %v", b.ID.Str(), id.Str())
r.idx.Current().Store(restic.PackedBlob{
Blob: restic.Blob{
Type: b.Type,

View file

@ -82,7 +82,7 @@ func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc {
return func(s string, done <-chan struct{}) error {
id, err := restic.ParseID(s)
if err != nil {
debug.Log("repository.ParallelWorkFuncParseID", "invalid ID %q: %v", id, err)
debug.Log("invalid ID %q: %v", id, err)
return err
}

View file

@ -16,7 +16,7 @@ import (
// into a new pack. Afterwards, the packs are removed. This operation requires
// an exclusive lock on the repo.
func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet) (err error) {
debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
debug.Log("repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
buf := make([]byte, 0, maxPackSize)
for packID := range packs {
@ -33,14 +33,14 @@ func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet
return err
}
debug.Log("Repack", "pack %v loaded (%d bytes)", packID.Str(), len(buf))
debug.Log("pack %v loaded (%d bytes)", packID.Str(), len(buf))
blobs, err := pack.List(repo.Key(), bytes.NewReader(buf), int64(len(buf)))
if err != nil {
return err
}
debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(blobs))
debug.Log("processing pack %v, blobs: %v", packID.Str(), len(blobs))
var plaintext []byte
for _, entry := range blobs {
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
@ -48,7 +48,7 @@ func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet
continue
}
debug.Log("Repack", " process blob %v", h)
debug.Log(" process blob %v", h)
ciphertext := buf[entry.Offset : entry.Offset+entry.Length]
plaintext = plaintext[:len(plaintext)]
@ -56,7 +56,7 @@ func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet
plaintext = make([]byte, len(ciphertext))
}
debug.Log("Repack", " ciphertext %d, plaintext %d", len(plaintext), len(ciphertext))
debug.Log(" ciphertext %d, plaintext %d", len(plaintext), len(ciphertext))
n, err := crypto.Decrypt(repo.Key(), plaintext, ciphertext)
if err != nil {
@ -69,7 +69,7 @@ func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet
return err
}
debug.Log("Repack", " saved blob %v", entry.ID.Str())
debug.Log(" saved blob %v", entry.ID.Str())
keepBlobs.Delete(h)
}
@ -82,10 +82,10 @@ func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet
for packID := range packs {
err := repo.Backend().Remove(restic.DataFile, packID.String())
if err != nil {
debug.Log("Repack", "error removing pack %v: %v", packID.Str(), err)
debug.Log("error removing pack %v: %v", packID.Str(), err)
return err
}
debug.Log("Repack", "removed pack %v", packID.Str())
debug.Log("removed pack %v", packID.Str())
}
return nil

View file

@ -51,12 +51,12 @@ func (r *Repository) PrefixLength(t restic.FileType) (int, error) {
// LoadAndDecrypt loads and decrypts data identified by t and id from the
// backend.
func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) {
debug.Log("Repo.Load", "load %v with id %v", t, id.Str())
debug.Log("load %v with id %v", t, id.Str())
h := restic.Handle{Type: t, Name: id.String()}
buf, err := backend.LoadAll(r.be, h, nil)
if err != nil {
debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
debug.Log("error loading %v: %v", id.Str(), err)
return nil, err
}
@ -79,7 +79,7 @@ func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, er
// pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob.
func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []byte) (int, error) {
debug.Log("Repo.loadBlob", "load %v with id %v (buf %p, len %d)", t, id.Str(), plaintextBuf, len(plaintextBuf))
debug.Log("load %v with id %v (buf %p, len %d)", t, id.Str(), plaintextBuf, len(plaintextBuf))
// lookup plaintext size of blob
size, err := r.idx.LookupSize(id, t)
@ -95,16 +95,16 @@ func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by
// lookup packs
blobs, err := r.idx.Lookup(id, t)
if err != nil {
debug.Log("Repo.loadBlob", "id %v not found in index: %v", id.Str(), err)
debug.Log("id %v not found in index: %v", id.Str(), err)
return 0, err
}
var lastError error
for _, blob := range blobs {
debug.Log("Repo.loadBlob", "id %v found: %v", id.Str(), blob)
debug.Log("id %v found: %v", id.Str(), blob)
if blob.Type != t {
debug.Log("Repo.loadBlob", "blob %v has wrong block type, want %v", blob, t)
debug.Log("blob %v has wrong block type, want %v", blob, t)
}
// load blob from pack
@ -112,7 +112,7 @@ func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by
ciphertextBuf := make([]byte, blob.Length)
n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
if err != nil {
debug.Log("Repo.loadBlob", "error loading blob %v: %v", blob, err)
debug.Log("error loading blob %v: %v", blob, err)
lastError = err
continue
}
@ -120,7 +120,7 @@ func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []by
if uint(n) != blob.Length {
lastError = errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d",
id.Str(), blob.Length, uint(n))
debug.Log("Repo.loadBlob", "lastError: %v", lastError)
debug.Log("lastError: %v", lastError)
continue
}
@ -173,7 +173,7 @@ func (r *Repository) SaveAndEncrypt(t restic.BlobType, data []byte, id *restic.I
id = &hashedID
}
debug.Log("Repo.Save", "save id %v (%v, %d bytes)", id.Str(), t, len(data))
debug.Log("save id %v (%v, %d bytes)", id.Str(), t, len(data))
// get buf from the pool
ciphertext := getBuf()
@ -200,7 +200,7 @@ func (r *Repository) SaveAndEncrypt(t restic.BlobType, data []byte, id *restic.I
// if the pack is not full enough and there are less than maxPackers
// packers, put back to the list
if packer.Size() < minPackSize && r.countPacker() < maxPackers {
debug.Log("Repo.Save", "pack is not full enough (%d bytes)", packer.Size())
debug.Log("pack is not full enough (%d bytes)", packer.Size())
r.insertPacker(packer)
return *id, nil
}
@ -212,7 +212,7 @@ func (r *Repository) SaveAndEncrypt(t restic.BlobType, data []byte, id *restic.I
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
// backend as type t, without a pack. It returns the storage hash.
func (r *Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (restic.ID, error) {
debug.Log("Repo.SaveJSONUnpacked", "save new blob %v", t)
debug.Log("save new blob %v", t)
plaintext, err := json.Marshal(item)
if err != nil {
return restic.ID{}, errors.Wrap(err, "json.Marshal")
@ -235,11 +235,11 @@ func (r *Repository) SaveUnpacked(t restic.FileType, p []byte) (id restic.ID, er
err = r.be.Save(h, ciphertext)
if err != nil {
debug.Log("Repo.SaveJSONUnpacked", "error saving blob %v: %v", h, err)
debug.Log("error saving blob %v: %v", h, err)
return restic.ID{}, err
}
debug.Log("Repo.SaveJSONUnpacked", "blob %v saved", h)
debug.Log("blob %v saved", h)
return id, nil
}
@ -248,7 +248,7 @@ func (r *Repository) Flush() error {
r.pm.Lock()
defer r.pm.Unlock()
debug.Log("Repo.Flush", "manually flushing %d packs", len(r.packs))
debug.Log("manually flushing %d packs", len(r.packs))
for _, p := range r.packs {
err := r.savePacker(p)
@ -290,14 +290,14 @@ func SaveIndex(repo restic.Repository, index *Index) (restic.ID, error) {
// saveIndex saves all indexes in the backend.
func (r *Repository) saveIndex(indexes ...*Index) error {
for i, idx := range indexes {
debug.Log("Repo.SaveIndex", "Saving index %d", i)
debug.Log("Saving index %d", i)
sid, err := SaveIndex(r, idx)
if err != nil {
return err
}
debug.Log("Repo.SaveIndex", "Saved index %d as %v", i, sid.Str())
debug.Log("Saved index %d as %v", i, sid.Str())
}
return nil
@ -318,7 +318,7 @@ const loadIndexParallelism = 20
// LoadIndex loads all index files from the backend in parallel and stores them
// in the master index. The first error that occurred is returned.
func (r *Repository) LoadIndex() error {
debug.Log("Repo.LoadIndex", "Loading index")
debug.Log("Loading index")
errCh := make(chan error, 1)
indexes := make(chan *Index)
@ -530,7 +530,7 @@ func (r *Repository) Close() error {
// LoadBlob loads a blob of type t from the repository to the buffer.
func (r *Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) {
debug.Log("repo.LoadBlob", "load blob %v into buf %p", id.Str(), buf)
debug.Log("load blob %v into buf %p", id.Str(), buf)
size, err := r.idx.LookupSize(id, t)
if err != nil {
return 0, err
@ -546,7 +546,7 @@ func (r *Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int,
}
buf = buf[:n]
debug.Log("repo.LoadBlob", "loaded %d bytes into buf %p", len(buf), buf)
debug.Log("loaded %d bytes into buf %p", len(buf), buf)
return len(buf), err
}
@ -563,14 +563,14 @@ func (r *Repository) SaveBlob(t restic.BlobType, buf []byte, id restic.ID) (rest
// LoadTree loads a tree from the repository.
func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) {
debug.Log("repo.LoadTree", "load tree %v", id.Str())
debug.Log("load tree %v", id.Str())
size, err := r.idx.LookupSize(id, restic.TreeBlob)
if err != nil {
return nil, err
}
debug.Log("repo.LoadTree", "size is %d, create buffer", size)
debug.Log("size is %d, create buffer", size)
buf := make([]byte, size)
n, err := r.loadBlob(id, restic.TreeBlob, buf)

View file

@ -47,7 +47,7 @@ func (res *Restorer) restoreTo(dst string, dir string, treeID ID) error {
for _, node := range tree.Nodes {
selectedForRestore := res.SelectFilter(filepath.Join(dir, node.Name),
filepath.Join(dst, dir, node.Name), node)
debug.Log("Restorer.restoreNodeTo", "SelectForRestore returned %v", selectedForRestore)
debug.Log("SelectForRestore returned %v", selectedForRestore)
if selectedForRestore {
err := res.restoreNodeTo(node, dir, dst)
@ -84,17 +84,17 @@ func (res *Restorer) restoreTo(dst string, dir string, treeID ID) error {
}
func (res *Restorer) restoreNodeTo(node *Node, dir string, dst string) error {
debug.Log("Restorer.restoreNodeTo", "node %v, dir %v, dst %v", node.Name, dir, dst)
debug.Log("node %v, dir %v, dst %v", node.Name, dir, dst)
dstPath := filepath.Join(dst, dir, node.Name)
err := node.CreateAt(dstPath, res.repo)
if err != nil {
debug.Log("Restorer.restoreNodeTo", "node.CreateAt(%s) error %v", dstPath, err)
debug.Log("node.CreateAt(%s) error %v", dstPath, err)
}
// Did it fail because of ENOENT?
if err != nil && os.IsNotExist(errors.Cause(err)) {
debug.Log("Restorer.restoreNodeTo", "create intermediate paths")
debug.Log("create intermediate paths")
// Create parent directories and retry
err = fs.MkdirAll(filepath.Dir(dstPath), 0700)
@ -104,14 +104,14 @@ func (res *Restorer) restoreNodeTo(node *Node, dir string, dst string) error {
}
if err != nil {
debug.Log("Restorer.restoreNodeTo", "error %v", err)
debug.Log("error %v", err)
err = res.Error(dstPath, node, err)
if err != nil {
return err
}
}
debug.Log("Restorer.restoreNodeTo", "successfully restored %v", node.Name)
debug.Log("successfully restored %v", node.Name)
return nil
}

View file

@ -28,15 +28,15 @@ func (t Tree) String() string {
// Equals returns true if t and other have exactly the same nodes.
func (t Tree) Equals(other *Tree) bool {
if len(t.Nodes) != len(other.Nodes) {
debug.Log("Tree.Equals", "tree.Equals(): trees have different number of nodes")
debug.Log("tree.Equals(): trees have different number of nodes")
return false
}
for i := 0; i < len(t.Nodes); i++ {
if !t.Nodes[i].Equals(*other.Nodes[i]) {
debug.Log("Tree.Equals", "tree.Equals(): node %d is different:", i)
debug.Log("Tree.Equals", " %#v", t.Nodes[i])
debug.Log("Tree.Equals", " %#v", other.Nodes[i])
debug.Log("tree.Equals(): node %d is different:", i)
debug.Log(" %#v", t.Nodes[i])
debug.Log(" %#v", other.Nodes[i])
return false
}
}

View file

@ -35,8 +35,8 @@ func NewTreeWalker(ch chan<- loadTreeJob, out chan<- TreeJob) *TreeWalker {
// Walk starts walking the tree given by id. When the channel done is closed,
// processing stops.
func (tw *TreeWalker) Walk(path string, id restic.ID, done chan struct{}) {
debug.Log("TreeWalker.Walk", "starting on tree %v for %v", id.Str(), path)
defer debug.Log("TreeWalker.Walk", "done walking tree %v for %v", id.Str(), path)
debug.Log("starting on tree %v for %v", id.Str(), path)
defer debug.Log("done walking tree %v for %v", id.Str(), path)
resCh := make(chan loadTreeResult, 1)
tw.ch <- loadTreeJob{
@ -64,10 +64,10 @@ func (tw *TreeWalker) Walk(path string, id restic.ID, done chan struct{}) {
}
func (tw *TreeWalker) walk(path string, tree *restic.Tree, done chan struct{}) {
debug.Log("TreeWalker.walk", "start on %q", path)
defer debug.Log("TreeWalker.walk", "done for %q", path)
debug.Log("start on %q", path)
defer debug.Log("done for %q", path)
debug.Log("TreeWalker.walk", "tree %#v", tree)
debug.Log("tree %#v", tree)
// load all subtrees in parallel
results := make([]<-chan loadTreeResult, len(tree.Nodes))
@ -125,31 +125,31 @@ type loadTreeJob struct {
type treeLoader func(restic.ID) (*restic.Tree, error)
func loadTreeWorker(wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader, done <-chan struct{}) {
debug.Log("loadTreeWorker", "start")
defer debug.Log("loadTreeWorker", "exit")
debug.Log("start")
defer debug.Log("exit")
defer wg.Done()
for {
select {
case <-done:
debug.Log("loadTreeWorker", "done channel closed")
debug.Log("done channel closed")
return
case job, ok := <-in:
if !ok {
debug.Log("loadTreeWorker", "input channel closed, exiting")
debug.Log("input channel closed, exiting")
return
}
debug.Log("loadTreeWorker", "received job to load tree %v", job.id.Str())
debug.Log("received job to load tree %v", job.id.Str())
tree, err := load(job.id)
debug.Log("loadTreeWorker", "tree %v loaded, error %v", job.id.Str(), err)
debug.Log("tree %v loaded, error %v", job.id.Str(), err)
select {
case job.res <- loadTreeResult{tree, err}:
debug.Log("loadTreeWorker", "job result sent")
debug.Log("job result sent")
case <-done:
debug.Log("loadTreeWorker", "done channel closed before result could be sent")
debug.Log("done channel closed before result could be sent")
return
}
}
@ -167,7 +167,7 @@ const loadTreeWorkers = 10
// file and directory it finds. When the channel done is closed, processing
// stops.
func Tree(repo TreeLoader, id restic.ID, done chan struct{}, jobCh chan<- TreeJob) {
debug.Log("WalkTree", "start on %v, start workers", id.Str())
debug.Log("start on %v, start workers", id.Str())
load := func(id restic.ID) (*restic.Tree, error) {
tree, err := repo.LoadTree(id)
@ -192,5 +192,5 @@ func Tree(repo TreeLoader, id restic.ID, done chan struct{}, jobCh chan<- TreeJo
close(ch)
wg.Wait()
debug.Log("WalkTree", "done")
debug.Log("done")
}