Add new archiver code
This commit is contained in:
parent
76b616451f
commit
f279731168
12 changed files with 4767 additions and 0 deletions
788
internal/archiver/archiver.go
Normal file
788
internal/archiver/archiver.go
Normal file
|
@ -0,0 +1,788 @@
|
|||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"sort"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// SelectFunc returns true for all items that should be included (files and
|
||||
// dirs). If false is returned, files are ignored and dirs are not even walked.
|
||||
type SelectFunc func(item string, fi os.FileInfo) bool
|
||||
|
||||
// ErrorFunc is called when an error during archiving occurs. When nil is
|
||||
// returned, the archiver continues, otherwise it aborts and passes the error
|
||||
// up the call stack.
|
||||
type ErrorFunc func(file string, fi os.FileInfo, err error) error
|
||||
|
||||
// ItemStats collects some statistics about a particular file or directory.
|
||||
type ItemStats struct {
|
||||
DataBlobs int // number of new data blobs added for this item
|
||||
DataSize uint64 // sum of the sizes of all new data blobs
|
||||
TreeBlobs int // number of new tree blobs added for this item
|
||||
TreeSize uint64 // sum of the sizes of all new tree blobs
|
||||
}
|
||||
|
||||
// Add adds other to the current ItemStats.
|
||||
func (s *ItemStats) Add(other ItemStats) {
|
||||
s.DataBlobs += other.DataBlobs
|
||||
s.DataSize += other.DataSize
|
||||
s.TreeBlobs += other.TreeBlobs
|
||||
s.TreeSize += other.TreeSize
|
||||
}
|
||||
|
||||
// Archiver saves a directory structure to the repo.
|
||||
type Archiver struct {
|
||||
Repo restic.Repository
|
||||
Select SelectFunc
|
||||
FS fs.FS
|
||||
Options Options
|
||||
|
||||
blobSaver *BlobSaver
|
||||
fileSaver *FileSaver
|
||||
|
||||
// Error is called for all errors that occur during backup.
|
||||
Error ErrorFunc
|
||||
|
||||
// CompleteItem is called for all files and dirs once they have been
|
||||
// processed successfully. The parameter item contains the path as it will
|
||||
// be in the snapshot after saving. s contains some statistics about this
|
||||
// particular file/dir.
|
||||
//
|
||||
// CompleteItem may be called asynchronously from several different
|
||||
// goroutines!
|
||||
CompleteItem func(item string, previous, current *restic.Node, s ItemStats, d time.Duration)
|
||||
|
||||
// StartFile is called when a file is being processed by a worker.
|
||||
StartFile func(filename string)
|
||||
|
||||
// CompleteBlob is called for all saved blobs for files.
|
||||
CompleteBlob func(filename string, bytes uint64)
|
||||
|
||||
// WithAtime configures if the access time for files and directories should
|
||||
// be saved. Enabling it may result in much metadata, so it's off by
|
||||
// default.
|
||||
WithAtime bool
|
||||
}
|
||||
|
||||
// Options is used to configure the archiver.
|
||||
type Options struct {
|
||||
// FileReadConcurrency sets how many files are read in concurrently. If
|
||||
// it's set to zero, at most two files are read in concurrently (which
|
||||
// turned out to be a good default for most situations).
|
||||
FileReadConcurrency uint
|
||||
|
||||
// SaveBlobConcurrency sets how many blobs are hashed and saved
|
||||
// concurrently. If it's set to zero, the default is the number of CPUs
|
||||
// available in the system.
|
||||
SaveBlobConcurrency uint
|
||||
}
|
||||
|
||||
// ApplyDefaults returns a copy of o with the default options set for all unset
|
||||
// fields.
|
||||
func (o Options) ApplyDefaults() Options {
|
||||
if o.FileReadConcurrency == 0 {
|
||||
// two is a sweet spot for almost all situations. We've done some
|
||||
// experiments documented here:
|
||||
// https://github.com/borgbackup/borg/issues/3500
|
||||
o.FileReadConcurrency = 2
|
||||
}
|
||||
|
||||
if o.SaveBlobConcurrency == 0 {
|
||||
o.SaveBlobConcurrency = uint(runtime.NumCPU())
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// New initializes a new archiver.
|
||||
func New(repo restic.Repository, fs fs.FS, opts Options) *Archiver {
|
||||
arch := &Archiver{
|
||||
Repo: repo,
|
||||
Select: func(string, os.FileInfo) bool { return true },
|
||||
FS: fs,
|
||||
Options: opts.ApplyDefaults(),
|
||||
|
||||
CompleteItem: func(string, *restic.Node, *restic.Node, ItemStats, time.Duration) {},
|
||||
StartFile: func(string) {},
|
||||
CompleteBlob: func(string, uint64) {},
|
||||
}
|
||||
|
||||
return arch
|
||||
}
|
||||
|
||||
// Valid returns an error if anything is missing.
|
||||
func (arch *Archiver) Valid() error {
|
||||
if arch.blobSaver == nil {
|
||||
return errors.New("blobSaver is nil")
|
||||
}
|
||||
|
||||
if arch.fileSaver == nil {
|
||||
return errors.New("fileSaver is nil")
|
||||
}
|
||||
|
||||
if arch.Repo == nil {
|
||||
return errors.New("repo is not set")
|
||||
}
|
||||
|
||||
if arch.Select == nil {
|
||||
return errors.New("Select is not set")
|
||||
}
|
||||
|
||||
if arch.FS == nil {
|
||||
return errors.New("FS is not set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// error calls arch.Error if it is set.
|
||||
func (arch *Archiver) error(item string, fi os.FileInfo, err error) error {
|
||||
if arch.Error == nil || err == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
errf := arch.Error(item, fi, err)
|
||||
if err != errf {
|
||||
debug.Log("item %v: error was filtered by handler, before: %q, after: %v", item, err, errf)
|
||||
}
|
||||
return errf
|
||||
}
|
||||
|
||||
// saveTree stores a tree in the repo. It checks the index and the known blobs
|
||||
// before saving anything.
|
||||
func (arch *Archiver) saveTree(ctx context.Context, t *restic.Tree) (restic.ID, ItemStats, error) {
|
||||
var s ItemStats
|
||||
buf, err := json.Marshal(t)
|
||||
if err != nil {
|
||||
return restic.ID{}, s, errors.Wrap(err, "MarshalJSON")
|
||||
}
|
||||
|
||||
// append a newline so that the data is always consistent (json.Encoder
|
||||
// adds a newline after each object)
|
||||
buf = append(buf, '\n')
|
||||
|
||||
b := Buffer{Data: buf}
|
||||
res := arch.blobSaver.Save(ctx, restic.TreeBlob, b)
|
||||
if res.Err() != nil {
|
||||
return restic.ID{}, s, res.Err()
|
||||
}
|
||||
|
||||
if !res.Known() {
|
||||
s.TreeBlobs++
|
||||
s.TreeSize += uint64(len(buf))
|
||||
}
|
||||
return res.ID(), s, nil
|
||||
}
|
||||
|
||||
// nodeFromFileInfo returns the restic node from a os.FileInfo.
|
||||
func (arch *Archiver) nodeFromFileInfo(filename string, fi os.FileInfo) (*restic.Node, error) {
|
||||
node, err := restic.NodeFromFileInfo(filename, fi)
|
||||
if !arch.WithAtime {
|
||||
node.AccessTime = node.ModTime
|
||||
}
|
||||
return node, errors.Wrap(err, "NodeFromFileInfo")
|
||||
}
|
||||
|
||||
// loadSubtree tries to load the subtree referenced by node. In case of an error, nil is returned.
|
||||
func (arch *Archiver) loadSubtree(ctx context.Context, node *restic.Node) *restic.Tree {
|
||||
if node == nil || node.Type != "dir" || node.Subtree == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tree, err := arch.Repo.LoadTree(ctx, *node.Subtree)
|
||||
if err != nil {
|
||||
debug.Log("unable to load tree %v: %v", node.Subtree.Str(), err)
|
||||
// TODO: handle error
|
||||
return nil
|
||||
}
|
||||
|
||||
return tree
|
||||
}
|
||||
|
||||
// SaveDir stores a directory in the repo and returns the node. snPath is the
|
||||
// path within the current snapshot.
|
||||
func (arch *Archiver) SaveDir(ctx context.Context, snPath string, fi os.FileInfo, dir string, previous *restic.Tree) (*restic.Node, ItemStats, error) {
|
||||
debug.Log("%v %v", snPath, dir)
|
||||
|
||||
var s ItemStats
|
||||
|
||||
treeNode, err := arch.nodeFromFileInfo(dir, fi)
|
||||
if err != nil {
|
||||
return nil, s, err
|
||||
}
|
||||
|
||||
names, err := readdirnames(arch.FS, dir)
|
||||
if err != nil {
|
||||
return nil, s, err
|
||||
}
|
||||
|
||||
var futures []FutureNode
|
||||
|
||||
tree := restic.NewTree()
|
||||
|
||||
for _, name := range names {
|
||||
pathname := arch.FS.Join(dir, name)
|
||||
oldNode := previous.Find(name)
|
||||
snItem := join(snPath, name)
|
||||
fn, excluded, err := arch.Save(ctx, snItem, pathname, oldNode)
|
||||
|
||||
// return error early if possible
|
||||
if err != nil {
|
||||
err = arch.error(pathname, fi, err)
|
||||
if err == nil {
|
||||
// ignore error
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, s, err
|
||||
}
|
||||
|
||||
if excluded {
|
||||
continue
|
||||
}
|
||||
|
||||
futures = append(futures, fn)
|
||||
}
|
||||
|
||||
for _, fn := range futures {
|
||||
fn.wait()
|
||||
|
||||
// return the error if it wasn't ignored
|
||||
if fn.err != nil {
|
||||
fn.err = arch.error(fn.target, fn.fi, fn.err)
|
||||
if fn.err == nil {
|
||||
// ignore error
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, s, fn.err
|
||||
}
|
||||
|
||||
// when the error is ignored, the node could not be saved, so ignore it
|
||||
if fn.node == nil {
|
||||
debug.Log("%v excluded: %v", fn.snPath, fn.target)
|
||||
continue
|
||||
}
|
||||
|
||||
err := tree.Insert(fn.node)
|
||||
if err != nil {
|
||||
return nil, s, err
|
||||
}
|
||||
}
|
||||
|
||||
id, treeStats, err := arch.saveTree(ctx, tree)
|
||||
if err != nil {
|
||||
return nil, ItemStats{}, err
|
||||
}
|
||||
|
||||
s.Add(treeStats)
|
||||
|
||||
treeNode.Subtree = &id
|
||||
return treeNode, s, nil
|
||||
}
|
||||
|
||||
// FutureNode holds a reference to a node or a FutureFile.
|
||||
type FutureNode struct {
|
||||
snPath, target string
|
||||
|
||||
// kept to call the error callback function
|
||||
absTarget string
|
||||
fi os.FileInfo
|
||||
|
||||
node *restic.Node
|
||||
stats ItemStats
|
||||
err error
|
||||
|
||||
isFile bool
|
||||
file FutureFile
|
||||
}
|
||||
|
||||
func (fn *FutureNode) wait() {
|
||||
if fn.isFile {
|
||||
// wait for and collect the data for the file
|
||||
fn.node = fn.file.Node()
|
||||
fn.err = fn.file.Err()
|
||||
fn.stats = fn.file.Stats()
|
||||
}
|
||||
}
|
||||
|
||||
// Save saves a target (file or directory) to the repo. If the item is
|
||||
// excluded,this function returns a nil node and error.
|
||||
//
|
||||
// Errors and completion is needs to be handled by the caller.
|
||||
//
|
||||
// snPath is the path within the current snapshot.
|
||||
func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) {
|
||||
fn = FutureNode{
|
||||
snPath: snPath,
|
||||
target: target,
|
||||
}
|
||||
|
||||
debug.Log("%v target %q, previous %v", snPath, target, previous)
|
||||
abstarget, err := arch.FS.Abs(target)
|
||||
if err != nil {
|
||||
return FutureNode{}, false, err
|
||||
}
|
||||
|
||||
fn.absTarget = abstarget
|
||||
|
||||
var fi os.FileInfo
|
||||
var errFI error
|
||||
|
||||
file, errOpen := arch.FS.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0)
|
||||
if errOpen == nil {
|
||||
fi, errFI = file.Stat()
|
||||
}
|
||||
|
||||
if !arch.Select(abstarget, fi) {
|
||||
debug.Log("%v is excluded", target)
|
||||
if file != nil {
|
||||
_ = file.Close()
|
||||
}
|
||||
return FutureNode{}, true, nil
|
||||
}
|
||||
|
||||
if errOpen != nil {
|
||||
debug.Log(" open error %#v", errOpen)
|
||||
// test if the open failed because target is a symbolic link or a socket
|
||||
if e, ok := errOpen.(*os.PathError); ok && (e.Err == syscall.ELOOP || e.Err == syscall.ENXIO) {
|
||||
// in this case, redo the stat and carry on
|
||||
fi, errFI = arch.FS.Lstat(target)
|
||||
} else {
|
||||
return FutureNode{}, false, errors.Wrap(errOpen, "OpenFile")
|
||||
}
|
||||
}
|
||||
|
||||
if errFI != nil {
|
||||
_ = file.Close()
|
||||
return FutureNode{}, false, errors.Wrap(errFI, "Stat")
|
||||
}
|
||||
|
||||
switch {
|
||||
case fs.IsRegularFile(fi):
|
||||
debug.Log(" %v regular file", target)
|
||||
start := time.Now()
|
||||
|
||||
// use previous node if the file hasn't changed
|
||||
if previous != nil && !fileChanged(fi, previous) {
|
||||
debug.Log("%v hasn't changed, returning old node", target)
|
||||
arch.CompleteItem(snPath, previous, previous, ItemStats{}, time.Since(start))
|
||||
arch.CompleteBlob(snPath, previous.Size)
|
||||
fn.node = previous
|
||||
_ = file.Close()
|
||||
return fn, false, nil
|
||||
}
|
||||
|
||||
fn.isFile = true
|
||||
// Save will close the file, we don't need to do that
|
||||
fn.file = arch.fileSaver.Save(ctx, snPath, file, fi, func() {
|
||||
arch.StartFile(snPath)
|
||||
}, func(node *restic.Node, stats ItemStats) {
|
||||
arch.CompleteItem(snPath, previous, node, stats, time.Since(start))
|
||||
})
|
||||
|
||||
file = nil
|
||||
|
||||
case fi.IsDir():
|
||||
debug.Log(" %v dir", target)
|
||||
|
||||
snItem := snPath + "/"
|
||||
start := time.Now()
|
||||
oldSubtree := arch.loadSubtree(ctx, previous)
|
||||
fn.node, fn.stats, err = arch.SaveDir(ctx, snPath, fi, target, oldSubtree)
|
||||
if err == nil {
|
||||
arch.CompleteItem(snItem, previous, fn.node, fn.stats, time.Since(start))
|
||||
} else {
|
||||
_ = file.Close()
|
||||
return FutureNode{}, false, err
|
||||
}
|
||||
|
||||
case fi.Mode()&os.ModeSocket > 0:
|
||||
debug.Log(" %v is a socket, ignoring", target)
|
||||
return FutureNode{}, true, nil
|
||||
|
||||
default:
|
||||
debug.Log(" %v other", target)
|
||||
|
||||
fn.node, err = arch.nodeFromFileInfo(target, fi)
|
||||
if err != nil {
|
||||
_ = file.Close()
|
||||
return FutureNode{}, false, err
|
||||
}
|
||||
}
|
||||
|
||||
if file != nil {
|
||||
err = file.Close()
|
||||
if err != nil {
|
||||
return fn, false, errors.Wrap(err, "Close")
|
||||
}
|
||||
}
|
||||
|
||||
return fn, false, nil
|
||||
}
|
||||
|
||||
// fileChanged returns true if the file's content has changed since the node
|
||||
// was created.
|
||||
func fileChanged(fi os.FileInfo, node *restic.Node) bool {
|
||||
if node == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// check type change
|
||||
if node.Type != "file" {
|
||||
return true
|
||||
}
|
||||
|
||||
// check modification timestamp
|
||||
if !fi.ModTime().Equal(node.ModTime) {
|
||||
return true
|
||||
}
|
||||
|
||||
// check size
|
||||
extFI := fs.ExtendedStat(fi)
|
||||
if uint64(fi.Size()) != node.Size || uint64(extFI.Size) != node.Size {
|
||||
return true
|
||||
}
|
||||
|
||||
// check inode
|
||||
if node.Inode != extFI.Inode {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// join returns all elements separated with a forward slash.
|
||||
func join(elem ...string) string {
|
||||
return path.Join(elem...)
|
||||
}
|
||||
|
||||
// statDir returns the file info for the directory. Symbolic links are
|
||||
// resolved. If the target directory is not a directory, an error is returned.
|
||||
func (arch *Archiver) statDir(dir string) (os.FileInfo, error) {
|
||||
fi, err := arch.FS.Stat(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Lstat")
|
||||
}
|
||||
|
||||
tpe := fi.Mode() & (os.ModeType | os.ModeCharDevice)
|
||||
if tpe != os.ModeDir {
|
||||
return fi, errors.Errorf("path is not a directory: %v", dir)
|
||||
}
|
||||
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
// SaveTree stores a Tree in the repo, returned is the tree. snPath is the path
|
||||
// within the current snapshot.
|
||||
func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree) (*restic.Tree, error) {
|
||||
debug.Log("%v (%v nodes), parent %v", snPath, len(atree.Nodes), previous)
|
||||
|
||||
tree := restic.NewTree()
|
||||
|
||||
futureNodes := make(map[string]FutureNode)
|
||||
|
||||
for name, subatree := range atree.Nodes {
|
||||
|
||||
// this is a leaf node
|
||||
if subatree.Path != "" {
|
||||
fn, excluded, err := arch.Save(ctx, join(snPath, name), subatree.Path, previous.Find(name))
|
||||
|
||||
if err != nil {
|
||||
err = arch.error(subatree.Path, fn.fi, err)
|
||||
if err == nil {
|
||||
// ignore error
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !excluded {
|
||||
futureNodes[name] = fn
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
snItem := join(snPath, name) + "/"
|
||||
start := time.Now()
|
||||
|
||||
oldNode := previous.Find(name)
|
||||
oldSubtree := arch.loadSubtree(ctx, oldNode)
|
||||
|
||||
// not a leaf node, archive subtree
|
||||
subtree, err := arch.SaveTree(ctx, join(snPath, name), &subatree, oldSubtree)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id, nodeStats, err := arch.saveTree(ctx, subtree)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if subatree.FileInfoPath == "" {
|
||||
return nil, errors.Errorf("FileInfoPath for %v/%v is empty", snPath, name)
|
||||
}
|
||||
|
||||
debug.Log("%v, saved subtree %v as %v", snPath, subtree, id.Str())
|
||||
|
||||
fi, err := arch.statDir(subatree.FileInfoPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("%v, dir node data loaded from %v", snPath, subatree.FileInfoPath)
|
||||
|
||||
node, err := arch.nodeFromFileInfo(subatree.FileInfoPath, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node.Name = name
|
||||
node.Subtree = &id
|
||||
|
||||
err = tree.Insert(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
arch.CompleteItem(snItem, oldNode, node, nodeStats, time.Since(start))
|
||||
}
|
||||
|
||||
// process all futures
|
||||
for name, fn := range futureNodes {
|
||||
fn.wait()
|
||||
|
||||
// return the error, or ignore it
|
||||
if fn.err != nil {
|
||||
fn.err = arch.error(fn.target, fn.fi, fn.err)
|
||||
if fn.err == nil {
|
||||
// ignore error
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, fn.err
|
||||
}
|
||||
|
||||
// when the error is ignored, the node could not be saved, so ignore it
|
||||
if fn.node == nil {
|
||||
debug.Log("%v excluded: %v", fn.snPath, fn.target)
|
||||
continue
|
||||
}
|
||||
|
||||
fn.node.Name = name
|
||||
|
||||
err := tree.Insert(fn.node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return tree, nil
|
||||
}
|
||||
|
||||
type fileInfoSlice []os.FileInfo
|
||||
|
||||
func (fi fileInfoSlice) Len() int {
|
||||
return len(fi)
|
||||
}
|
||||
|
||||
func (fi fileInfoSlice) Swap(i, j int) {
|
||||
fi[i], fi[j] = fi[j], fi[i]
|
||||
}
|
||||
|
||||
func (fi fileInfoSlice) Less(i, j int) bool {
|
||||
return fi[i].Name() < fi[j].Name()
|
||||
}
|
||||
|
||||
func readdir(filesystem fs.FS, dir string) ([]os.FileInfo, error) {
|
||||
f, err := filesystem.OpenFile(dir, fs.O_RDONLY|fs.O_NOFOLLOW, 0)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open")
|
||||
}
|
||||
|
||||
entries, err := f.Readdir(-1)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return nil, errors.Wrap(err, "Readdir")
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Sort(fileInfoSlice(entries))
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func readdirnames(filesystem fs.FS, dir string) ([]string, error) {
|
||||
f, err := filesystem.OpenFile(dir, fs.O_RDONLY|fs.O_NOFOLLOW, 0)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open")
|
||||
}
|
||||
|
||||
entries, err := f.Readdirnames(-1)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return nil, errors.Wrap(err, "Readdirnames")
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sort.Sort(sort.StringSlice(entries))
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// resolveRelativeTargets replaces targets that only contain relative
|
||||
// directories ("." or "../../") with the contents of the directory. Each
|
||||
// element of target is processed with fs.Clean().
|
||||
func resolveRelativeTargets(fs fs.FS, targets []string) ([]string, error) {
|
||||
debug.Log("targets before resolving: %v", targets)
|
||||
result := make([]string, 0, len(targets))
|
||||
for _, target := range targets {
|
||||
target = fs.Clean(target)
|
||||
pc, _ := pathComponents(fs, target, false)
|
||||
if len(pc) > 0 {
|
||||
result = append(result, target)
|
||||
continue
|
||||
}
|
||||
|
||||
debug.Log("replacing %q with readdir(%q)", target, target)
|
||||
entries, err := readdirnames(fs, target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, name := range entries {
|
||||
result = append(result, fs.Join(target, name))
|
||||
}
|
||||
}
|
||||
|
||||
debug.Log("targets after resolving: %v", result)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// SnapshotOptions collect attributes for a new snapshot.
|
||||
type SnapshotOptions struct {
|
||||
Tags []string
|
||||
Hostname string
|
||||
Excludes []string
|
||||
Time time.Time
|
||||
ParentSnapshot restic.ID
|
||||
}
|
||||
|
||||
// loadParentTree loads a tree referenced by snapshot id. If id is null, nil is returned.
|
||||
func (arch *Archiver) loadParentTree(ctx context.Context, snapshotID restic.ID) *restic.Tree {
|
||||
if snapshotID.IsNull() {
|
||||
return nil
|
||||
}
|
||||
|
||||
debug.Log("load parent snapshot %v", snapshotID)
|
||||
sn, err := restic.LoadSnapshot(ctx, arch.Repo, snapshotID)
|
||||
if err != nil {
|
||||
debug.Log("unable to load snapshot %v: %v", snapshotID, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if sn.Tree == nil {
|
||||
debug.Log("snapshot %v has empty tree %v", snapshotID)
|
||||
return nil
|
||||
}
|
||||
|
||||
debug.Log("load parent tree %v", *sn.Tree)
|
||||
tree, err := arch.Repo.LoadTree(ctx, *sn.Tree)
|
||||
if err != nil {
|
||||
debug.Log("unable to load tree %v: %v", *sn.Tree, err)
|
||||
return nil
|
||||
}
|
||||
return tree
|
||||
}
|
||||
|
||||
// runWorkers starts the worker pools, which are stopped when the context is cancelled.
|
||||
func (arch *Archiver) runWorkers(ctx context.Context) {
|
||||
arch.blobSaver = NewBlobSaver(ctx, arch.Repo, arch.Options.SaveBlobConcurrency)
|
||||
arch.fileSaver = NewFileSaver(ctx, arch.FS, arch.blobSaver, arch.Repo.Config().ChunkerPolynomial, arch.Options.FileReadConcurrency)
|
||||
arch.fileSaver.CompleteBlob = arch.CompleteBlob
|
||||
|
||||
arch.fileSaver.NodeFromFileInfo = arch.nodeFromFileInfo
|
||||
}
|
||||
|
||||
// Snapshot saves several targets and returns a snapshot.
|
||||
func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, error) {
|
||||
workerCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
arch.runWorkers(workerCtx)
|
||||
|
||||
err := arch.Valid()
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
cleanTargets, err := resolveRelativeTargets(arch.FS, targets)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
atree, err := NewTree(arch.FS, cleanTargets)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
tree, err := arch.SaveTree(ctx, "/", atree, arch.loadParentTree(ctx, opts.ParentSnapshot))
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
rootTreeID, stats, err := arch.saveTree(ctx, tree)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
arch.CompleteItem("/", nil, nil, stats, time.Since(start))
|
||||
|
||||
err = arch.Repo.Flush(ctx)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
err = arch.Repo.SaveIndex(ctx)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
sn, err := restic.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time)
|
||||
sn.Excludes = opts.Excludes
|
||||
if !opts.ParentSnapshot.IsNull() {
|
||||
id := opts.ParentSnapshot
|
||||
sn.Parent = &id
|
||||
}
|
||||
sn.Tree = &rootTreeID
|
||||
|
||||
id, err := arch.Repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
return sn, id, nil
|
||||
}
|
1569
internal/archiver/archiver_test.go
Normal file
1569
internal/archiver/archiver_test.go
Normal file
File diff suppressed because it is too large
Load diff
158
internal/archiver/blob_saver.go
Normal file
158
internal/archiver/blob_saver.go
Normal file
|
@ -0,0 +1,158 @@
|
|||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// Saver allows saving a blob.
|
||||
type Saver interface {
|
||||
SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) (restic.ID, error)
|
||||
Index() restic.Index
|
||||
}
|
||||
|
||||
// BlobSaver concurrently saves incoming blobs to the repo.
|
||||
type BlobSaver struct {
|
||||
repo Saver
|
||||
|
||||
m sync.Mutex
|
||||
knownBlobs restic.BlobSet
|
||||
|
||||
ch chan<- saveBlobJob
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewBlobSaver returns a new blob. A worker pool is started, it is stopped
|
||||
// when ctx is cancelled.
|
||||
func NewBlobSaver(ctx context.Context, repo Saver, workers uint) *BlobSaver {
|
||||
ch := make(chan saveBlobJob, 2*int(workers))
|
||||
s := &BlobSaver{
|
||||
repo: repo,
|
||||
knownBlobs: restic.NewBlobSet(),
|
||||
ch: ch,
|
||||
}
|
||||
|
||||
for i := uint(0); i < workers; i++ {
|
||||
s.wg.Add(1)
|
||||
go s.worker(ctx, &s.wg, ch)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Save stores a blob in the repo. It checks the index and the known blobs
|
||||
// before saving anything. The second return parameter is true if the blob was
|
||||
// previously unknown.
|
||||
func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf Buffer) FutureBlob {
|
||||
ch := make(chan saveBlobResponse, 1)
|
||||
s.ch <- saveBlobJob{BlobType: t, buf: buf, ch: ch}
|
||||
|
||||
return FutureBlob{ch: ch, length: len(buf.Data)}
|
||||
}
|
||||
|
||||
// FutureBlob is returned by SaveBlob and will return the data once it has been processed.
|
||||
type FutureBlob struct {
|
||||
ch <-chan saveBlobResponse
|
||||
length int
|
||||
res saveBlobResponse
|
||||
}
|
||||
|
||||
func (s *FutureBlob) wait() {
|
||||
res, ok := <-s.ch
|
||||
if ok {
|
||||
s.res = res
|
||||
}
|
||||
}
|
||||
|
||||
// ID returns the ID of the blob after it has been saved.
|
||||
func (s *FutureBlob) ID() restic.ID {
|
||||
s.wait()
|
||||
return s.res.id
|
||||
}
|
||||
|
||||
// Known returns whether or not the blob was already known.
|
||||
func (s *FutureBlob) Known() bool {
|
||||
s.wait()
|
||||
return s.res.known
|
||||
}
|
||||
|
||||
// Err returns the error which may have occurred during save.
|
||||
func (s *FutureBlob) Err() error {
|
||||
s.wait()
|
||||
return s.res.err
|
||||
}
|
||||
|
||||
// Length returns the length of the blob.
|
||||
func (s *FutureBlob) Length() int {
|
||||
return s.length
|
||||
}
|
||||
|
||||
type saveBlobJob struct {
|
||||
restic.BlobType
|
||||
buf Buffer
|
||||
ch chan<- saveBlobResponse
|
||||
}
|
||||
|
||||
type saveBlobResponse struct {
|
||||
id restic.ID
|
||||
known bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) saveBlobResponse {
|
||||
id := restic.Hash(buf)
|
||||
h := restic.BlobHandle{ID: id, Type: t}
|
||||
|
||||
// check if another goroutine has already saved this blob
|
||||
known := false
|
||||
s.m.Lock()
|
||||
if s.knownBlobs.Has(h) {
|
||||
known = true
|
||||
} else {
|
||||
s.knownBlobs.Insert(h)
|
||||
known = false
|
||||
}
|
||||
s.m.Unlock()
|
||||
|
||||
// blob is already known, nothing to do
|
||||
if known {
|
||||
return saveBlobResponse{
|
||||
id: id,
|
||||
known: true,
|
||||
}
|
||||
}
|
||||
|
||||
// check if the repo knows this blob
|
||||
if s.repo.Index().Has(id, t) {
|
||||
return saveBlobResponse{
|
||||
id: id,
|
||||
known: true,
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise we're responsible for saving it
|
||||
_, err := s.repo.SaveBlob(ctx, t, buf, id)
|
||||
return saveBlobResponse{
|
||||
id: id,
|
||||
known: false,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BlobSaver) worker(ctx context.Context, wg *sync.WaitGroup, jobs <-chan saveBlobJob) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
var job saveBlobJob
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case job = <-jobs:
|
||||
}
|
||||
|
||||
job.ch <- s.saveBlob(ctx, job.BlobType, job.buf.Data)
|
||||
close(job.ch)
|
||||
job.buf.Release()
|
||||
}
|
||||
}
|
90
internal/archiver/buffer.go
Normal file
90
internal/archiver/buffer.go
Normal file
|
@ -0,0 +1,90 @@
|
|||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Buffer is a reusable buffer. After the buffer has been used, Release should
|
||||
// be called so the underlying slice is put back into the pool.
|
||||
type Buffer struct {
|
||||
Data []byte
|
||||
Put func([]byte)
|
||||
}
|
||||
|
||||
// Release puts the buffer back into the pool it came from.
|
||||
func (b Buffer) Release() {
|
||||
if b.Put != nil {
|
||||
b.Put(b.Data)
|
||||
}
|
||||
}
|
||||
|
||||
// BufferPool implements a limited set of reusable buffers.
|
||||
type BufferPool struct {
|
||||
ch chan []byte
|
||||
chM sync.Mutex
|
||||
defaultSize int
|
||||
clearOnce sync.Once
|
||||
}
|
||||
|
||||
// NewBufferPool initializes a new buffer pool. When the context is cancelled,
|
||||
// all buffers are released. The pool stores at most max items. New buffers are
|
||||
// created with defaultSize, buffers that are larger are released and not put
|
||||
// back.
|
||||
func NewBufferPool(ctx context.Context, max int, defaultSize int) *BufferPool {
|
||||
b := &BufferPool{
|
||||
ch: make(chan []byte, max),
|
||||
defaultSize: defaultSize,
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
b.clear()
|
||||
}()
|
||||
return b
|
||||
}
|
||||
|
||||
// Get returns a new buffer, either from the pool or newly allocated.
|
||||
func (pool *BufferPool) Get() Buffer {
|
||||
b := Buffer{Put: pool.put}
|
||||
|
||||
pool.chM.Lock()
|
||||
defer pool.chM.Unlock()
|
||||
select {
|
||||
case buf := <-pool.ch:
|
||||
b.Data = buf
|
||||
default:
|
||||
b.Data = make([]byte, pool.defaultSize)
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (pool *BufferPool) put(b []byte) {
|
||||
pool.chM.Lock()
|
||||
defer pool.chM.Unlock()
|
||||
select {
|
||||
case pool.ch <- b:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Put returns a buffer to the pool for reuse.
|
||||
func (pool *BufferPool) Put(b Buffer) {
|
||||
if cap(b.Data) > pool.defaultSize {
|
||||
return
|
||||
}
|
||||
pool.put(b.Data)
|
||||
}
|
||||
|
||||
// clear empties the buffer so that all items can be garbage collected.
|
||||
func (pool *BufferPool) clear() {
|
||||
pool.clearOnce.Do(func() {
|
||||
ch := pool.ch
|
||||
pool.chM.Lock()
|
||||
pool.ch = nil
|
||||
pool.chM.Unlock()
|
||||
close(ch)
|
||||
for range ch {
|
||||
}
|
||||
})
|
||||
}
|
228
internal/archiver/file_saver.go
Normal file
228
internal/archiver/file_saver.go
Normal file
|
@ -0,0 +1,228 @@
|
|||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/restic/chunker"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// FutureFile is returned by SaveFile and will return the data once it
|
||||
// has been processed.
|
||||
type FutureFile struct {
|
||||
ch <-chan saveFileResponse
|
||||
res saveFileResponse
|
||||
}
|
||||
|
||||
func (s *FutureFile) wait() {
|
||||
res, ok := <-s.ch
|
||||
if ok {
|
||||
s.res = res
|
||||
}
|
||||
}
|
||||
|
||||
// Node returns the node once it is available.
|
||||
func (s *FutureFile) Node() *restic.Node {
|
||||
s.wait()
|
||||
return s.res.node
|
||||
}
|
||||
|
||||
// Stats returns the stats for the file once they are available.
|
||||
func (s *FutureFile) Stats() ItemStats {
|
||||
s.wait()
|
||||
return s.res.stats
|
||||
}
|
||||
|
||||
// Err returns the error in case an error occurred.
|
||||
func (s *FutureFile) Err() error {
|
||||
s.wait()
|
||||
return s.res.err
|
||||
}
|
||||
|
||||
// FileSaver concurrently saves incoming files to the repo.
|
||||
type FileSaver struct {
|
||||
fs fs.FS
|
||||
blobSaver *BlobSaver
|
||||
saveFilePool *BufferPool
|
||||
|
||||
pol chunker.Pol
|
||||
|
||||
ch chan<- saveFileJob
|
||||
wg sync.WaitGroup
|
||||
|
||||
CompleteBlob func(filename string, bytes uint64)
|
||||
|
||||
NodeFromFileInfo func(filename string, fi os.FileInfo) (*restic.Node, error)
|
||||
}
|
||||
|
||||
// NewFileSaver returns a new file saver. A worker pool with workers is
|
||||
// started, it is stopped when ctx is cancelled.
|
||||
func NewFileSaver(ctx context.Context, fs fs.FS, blobSaver *BlobSaver, pol chunker.Pol, workers uint) *FileSaver {
|
||||
ch := make(chan saveFileJob, workers)
|
||||
|
||||
s := &FileSaver{
|
||||
fs: fs,
|
||||
blobSaver: blobSaver,
|
||||
saveFilePool: NewBufferPool(ctx, 3*int(workers), chunker.MaxSize/4),
|
||||
pol: pol,
|
||||
ch: ch,
|
||||
|
||||
CompleteBlob: func(string, uint64) {},
|
||||
}
|
||||
|
||||
for i := uint(0); i < workers; i++ {
|
||||
s.wg.Add(1)
|
||||
go s.worker(ctx, &s.wg, ch)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// CompleteFunc is called when the file has been saved.
|
||||
type CompleteFunc func(*restic.Node, ItemStats)
|
||||
|
||||
// Save stores the file f and returns the data once it has been completed. The
|
||||
// file is closed by Save.
|
||||
func (s *FileSaver) Save(ctx context.Context, snPath string, file fs.File, fi os.FileInfo, start func(), complete CompleteFunc) FutureFile {
|
||||
ch := make(chan saveFileResponse, 1)
|
||||
s.ch <- saveFileJob{
|
||||
snPath: snPath,
|
||||
file: file,
|
||||
fi: fi,
|
||||
start: start,
|
||||
complete: complete,
|
||||
ch: ch,
|
||||
}
|
||||
|
||||
return FutureFile{ch: ch}
|
||||
}
|
||||
|
||||
type saveFileJob struct {
|
||||
snPath string
|
||||
file fs.File
|
||||
fi os.FileInfo
|
||||
ch chan<- saveFileResponse
|
||||
complete CompleteFunc
|
||||
start func()
|
||||
}
|
||||
|
||||
type saveFileResponse struct {
|
||||
node *restic.Node
|
||||
stats ItemStats
|
||||
err error
|
||||
}
|
||||
|
||||
// saveFile stores the file f in the repo, then closes it.
|
||||
func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, f fs.File, fi os.FileInfo, start func()) saveFileResponse {
|
||||
start()
|
||||
|
||||
stats := ItemStats{}
|
||||
|
||||
debug.Log("%v", snPath)
|
||||
|
||||
node, err := s.NodeFromFileInfo(f.Name(), fi)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return saveFileResponse{err: err}
|
||||
}
|
||||
|
||||
if node.Type != "file" {
|
||||
_ = f.Close()
|
||||
return saveFileResponse{err: errors.Errorf("node type %q is wrong", node.Type)}
|
||||
}
|
||||
|
||||
// reuse the chunker
|
||||
chnker.Reset(f, s.pol)
|
||||
|
||||
var results []FutureBlob
|
||||
|
||||
node.Content = []restic.ID{}
|
||||
var size uint64
|
||||
for {
|
||||
buf := s.saveFilePool.Get()
|
||||
chunk, err := chnker.Next(buf.Data)
|
||||
if errors.Cause(err) == io.EOF {
|
||||
buf.Release()
|
||||
break
|
||||
}
|
||||
buf.Data = chunk.Data
|
||||
|
||||
size += uint64(chunk.Length)
|
||||
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return saveFileResponse{err: err}
|
||||
}
|
||||
|
||||
// test if the context has been cancelled, return the error
|
||||
if ctx.Err() != nil {
|
||||
_ = f.Close()
|
||||
return saveFileResponse{err: ctx.Err()}
|
||||
}
|
||||
|
||||
res := s.blobSaver.Save(ctx, restic.DataBlob, buf)
|
||||
results = append(results, res)
|
||||
|
||||
// test if the context has been cancelled, return the error
|
||||
if ctx.Err() != nil {
|
||||
_ = f.Close()
|
||||
return saveFileResponse{err: ctx.Err()}
|
||||
}
|
||||
|
||||
s.CompleteBlob(f.Name(), uint64(len(chunk.Data)))
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return saveFileResponse{err: err}
|
||||
}
|
||||
|
||||
for _, res := range results {
|
||||
// test if the context has been cancelled, return the error
|
||||
if res.Err() != nil {
|
||||
return saveFileResponse{err: ctx.Err()}
|
||||
}
|
||||
|
||||
if !res.Known() {
|
||||
stats.DataBlobs++
|
||||
stats.DataSize += uint64(res.Length())
|
||||
}
|
||||
|
||||
node.Content = append(node.Content, res.ID())
|
||||
}
|
||||
|
||||
node.Size = size
|
||||
|
||||
return saveFileResponse{
|
||||
node: node,
|
||||
stats: stats,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FileSaver) worker(ctx context.Context, wg *sync.WaitGroup, jobs <-chan saveFileJob) {
|
||||
// a worker has one chunker which is reused for each file (because it contains a rather large buffer)
|
||||
chnker := chunker.New(nil, s.pol)
|
||||
|
||||
defer wg.Done()
|
||||
for {
|
||||
var job saveFileJob
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case job = <-jobs:
|
||||
}
|
||||
|
||||
res := s.saveFile(ctx, chnker, job.snPath, job.file, job.fi, job.start)
|
||||
if job.complete != nil {
|
||||
job.complete(res.node, res.stats)
|
||||
}
|
||||
job.ch <- res
|
||||
close(job.ch)
|
||||
}
|
||||
}
|
53
internal/archiver/index_uploader.go
Normal file
53
internal/archiver/index_uploader.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// IndexUploader polls the repo for full indexes and uploads them.
|
||||
type IndexUploader struct {
|
||||
restic.Repository
|
||||
|
||||
// Start is called when an index is to be uploaded.
|
||||
Start func()
|
||||
|
||||
// Complete is called when uploading an index has finished.
|
||||
Complete func(id restic.ID)
|
||||
}
|
||||
|
||||
// Upload periodically uploads full indexes to the repo. When shutdown is
|
||||
// cancelled, the last index upload will finish and then Upload returns.
|
||||
func (u IndexUploader) Upload(ctx, shutdown context.Context, interval time.Duration) error {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-shutdown.Done():
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
full := u.Repository.Index().(*repository.MasterIndex).FullIndexes()
|
||||
for _, idx := range full {
|
||||
if u.Start != nil {
|
||||
u.Start()
|
||||
}
|
||||
|
||||
id, err := repository.SaveIndex(ctx, u.Repository, idx)
|
||||
if err != nil {
|
||||
debug.Log("save indexes returned an error: %v", err)
|
||||
return err
|
||||
}
|
||||
if u.Complete != nil {
|
||||
u.Complete(id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
112
internal/archiver/scanner.go
Normal file
112
internal/archiver/scanner.go
Normal file
|
@ -0,0 +1,112 @@
|
|||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/restic/restic/internal/fs"
|
||||
)
|
||||
|
||||
// Scanner traverses the targets and calls the function Result with cumulated
|
||||
// stats concerning the files and folders found. Select is used to decide which
|
||||
// items should be included. Error is called when an error occurs.
|
||||
type Scanner struct {
|
||||
FS fs.FS
|
||||
Select SelectFunc
|
||||
Error ErrorFunc
|
||||
Result func(item string, s ScanStats)
|
||||
}
|
||||
|
||||
// NewScanner initializes a new Scanner.
|
||||
func NewScanner(fs fs.FS) *Scanner {
|
||||
return &Scanner{
|
||||
FS: fs,
|
||||
Select: func(item string, fi os.FileInfo) bool {
|
||||
return true
|
||||
},
|
||||
Error: func(item string, fi os.FileInfo, err error) error {
|
||||
return err
|
||||
},
|
||||
Result: func(item string, s ScanStats) {},
|
||||
}
|
||||
}
|
||||
|
||||
// ScanStats collect statistics.
|
||||
type ScanStats struct {
|
||||
Files, Dirs, Others uint
|
||||
Bytes uint64
|
||||
}
|
||||
|
||||
// Scan traverses the targets. The function Result is called for each new item
|
||||
// found, the complete result is also returned by Scan.
|
||||
func (s *Scanner) Scan(ctx context.Context, targets []string) error {
|
||||
var stats ScanStats
|
||||
for _, target := range targets {
|
||||
abstarget, err := s.FS.Abs(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stats, err = s.scan(ctx, stats, abstarget)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
s.Result("", stats)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (ScanStats, error) {
|
||||
if ctx.Err() != nil {
|
||||
return stats, ctx.Err()
|
||||
}
|
||||
|
||||
fi, err := s.FS.Lstat(target)
|
||||
if err != nil {
|
||||
// ignore error if the target is to be excluded anyway
|
||||
if !s.Select(target, nil) {
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// else return filtered error
|
||||
return stats, s.Error(target, fi, err)
|
||||
}
|
||||
|
||||
if !s.Select(target, fi) {
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case fi.Mode().IsRegular():
|
||||
stats.Files++
|
||||
stats.Bytes += uint64(fi.Size())
|
||||
case fi.Mode().IsDir():
|
||||
if ctx.Err() != nil {
|
||||
return stats, ctx.Err()
|
||||
}
|
||||
|
||||
names, err := readdirnames(s.FS, target)
|
||||
if err != nil {
|
||||
return stats, s.Error(target, fi, err)
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
stats, err = s.scan(ctx, stats, filepath.Join(target, name))
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
}
|
||||
stats.Dirs++
|
||||
default:
|
||||
stats.Others++
|
||||
}
|
||||
|
||||
s.Result(target, stats)
|
||||
return stats, nil
|
||||
}
|
333
internal/archiver/scanner_test.go
Normal file
333
internal/archiver/scanner_test.go
Normal file
|
@ -0,0 +1,333 @@
|
|||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
restictest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestScanner(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
src TestDir
|
||||
want map[string]ScanStats
|
||||
selFn SelectFunc
|
||||
}{
|
||||
{
|
||||
name: "include-all",
|
||||
src: TestDir{
|
||||
"other": TestFile{Content: "another file"},
|
||||
"work": TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"foo.txt": TestFile{Content: "foo text file"},
|
||||
"subdir": TestDir{
|
||||
"other": TestFile{Content: "other in subdir"},
|
||||
"bar.txt": TestFile{Content: "bar.txt in subdir"},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: map[string]ScanStats{
|
||||
filepath.FromSlash("other"): ScanStats{Files: 1, Bytes: 12},
|
||||
filepath.FromSlash("work/foo"): ScanStats{Files: 2, Bytes: 15},
|
||||
filepath.FromSlash("work/foo.txt"): ScanStats{Files: 3, Bytes: 28},
|
||||
filepath.FromSlash("work/subdir/bar.txt"): ScanStats{Files: 4, Bytes: 45},
|
||||
filepath.FromSlash("work/subdir/other"): ScanStats{Files: 5, Bytes: 60},
|
||||
filepath.FromSlash("work/subdir"): ScanStats{Files: 5, Dirs: 1, Bytes: 60},
|
||||
filepath.FromSlash("work"): ScanStats{Files: 5, Dirs: 2, Bytes: 60},
|
||||
filepath.FromSlash("."): ScanStats{Files: 5, Dirs: 3, Bytes: 60},
|
||||
filepath.FromSlash(""): ScanStats{Files: 5, Dirs: 3, Bytes: 60},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "select-txt",
|
||||
src: TestDir{
|
||||
"other": TestFile{Content: "another file"},
|
||||
"work": TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"foo.txt": TestFile{Content: "foo text file"},
|
||||
"subdir": TestDir{
|
||||
"other": TestFile{Content: "other in subdir"},
|
||||
"bar.txt": TestFile{Content: "bar.txt in subdir"},
|
||||
},
|
||||
},
|
||||
},
|
||||
selFn: func(item string, fi os.FileInfo) bool {
|
||||
if fi.IsDir() {
|
||||
return true
|
||||
}
|
||||
|
||||
if filepath.Ext(item) == ".txt" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
want: map[string]ScanStats{
|
||||
filepath.FromSlash("work/foo.txt"): ScanStats{Files: 1, Bytes: 13},
|
||||
filepath.FromSlash("work/subdir/bar.txt"): ScanStats{Files: 2, Bytes: 30},
|
||||
filepath.FromSlash("work/subdir"): ScanStats{Files: 2, Dirs: 1, Bytes: 30},
|
||||
filepath.FromSlash("work"): ScanStats{Files: 2, Dirs: 2, Bytes: 30},
|
||||
filepath.FromSlash("."): ScanStats{Files: 2, Dirs: 3, Bytes: 30},
|
||||
filepath.FromSlash(""): ScanStats{Files: 2, Dirs: 3, Bytes: 30},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempdir, cleanup := restictest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
TestCreateFiles(t, tempdir, test.src)
|
||||
|
||||
back := fs.TestChdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
cur, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sc := NewScanner(fs.Track{fs.Local{}})
|
||||
if test.selFn != nil {
|
||||
sc.Select = test.selFn
|
||||
}
|
||||
|
||||
results := make(map[string]ScanStats)
|
||||
sc.Result = func(item string, s ScanStats) {
|
||||
var p string
|
||||
var err error
|
||||
|
||||
if item != "" {
|
||||
p, err = filepath.Rel(cur, item)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
results[p] = s
|
||||
}
|
||||
|
||||
err = sc.Scan(ctx, []string{"."})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !cmp.Equal(test.want, results) {
|
||||
t.Error(cmp.Diff(test.want, results))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestScannerError(t *testing.T) {
|
||||
var tests = []struct {
|
||||
name string
|
||||
unix bool
|
||||
src TestDir
|
||||
result ScanStats
|
||||
selFn SelectFunc
|
||||
errFn func(t testing.TB, item string, fi os.FileInfo, err error) error
|
||||
resFn func(t testing.TB, item string, s ScanStats)
|
||||
prepare func(t testing.TB)
|
||||
}{
|
||||
{
|
||||
name: "no-error",
|
||||
src: TestDir{
|
||||
"other": TestFile{Content: "another file"},
|
||||
"work": TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"foo.txt": TestFile{Content: "foo text file"},
|
||||
"subdir": TestDir{
|
||||
"other": TestFile{Content: "other in subdir"},
|
||||
"bar.txt": TestFile{Content: "bar.txt in subdir"},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: ScanStats{Files: 5, Dirs: 3, Bytes: 60},
|
||||
},
|
||||
{
|
||||
name: "unreadable-dir",
|
||||
unix: true,
|
||||
src: TestDir{
|
||||
"other": TestFile{Content: "another file"},
|
||||
"work": TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"foo.txt": TestFile{Content: "foo text file"},
|
||||
"subdir": TestDir{
|
||||
"other": TestFile{Content: "other in subdir"},
|
||||
"bar.txt": TestFile{Content: "bar.txt in subdir"},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: ScanStats{Files: 3, Dirs: 2, Bytes: 28},
|
||||
prepare: func(t testing.TB) {
|
||||
err := os.Chmod(filepath.Join("work", "subdir"), 0000)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
},
|
||||
errFn: func(t testing.TB, item string, fi os.FileInfo, err error) error {
|
||||
if item == filepath.FromSlash("work/subdir") {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "removed-item",
|
||||
src: TestDir{
|
||||
"bar": TestFile{Content: "bar"},
|
||||
"baz": TestFile{Content: "baz"},
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"other": TestFile{Content: "other"},
|
||||
},
|
||||
result: ScanStats{Files: 3, Dirs: 1, Bytes: 11},
|
||||
resFn: func(t testing.TB, item string, s ScanStats) {
|
||||
if item == "bar" {
|
||||
err := os.Remove("foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
},
|
||||
errFn: func(t testing.TB, item string, fi os.FileInfo, err error) error {
|
||||
if item == "foo" {
|
||||
t.Logf("ignoring error for %v: %v", item, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if test.unix && runtime.GOOS == "windows" {
|
||||
t.Skipf("skip on windows")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempdir, cleanup := restictest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
TestCreateFiles(t, tempdir, test.src)
|
||||
|
||||
back := fs.TestChdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
cur, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if test.prepare != nil {
|
||||
test.prepare(t)
|
||||
}
|
||||
|
||||
sc := NewScanner(fs.Track{fs.Local{}})
|
||||
if test.selFn != nil {
|
||||
sc.Select = test.selFn
|
||||
}
|
||||
|
||||
var stats ScanStats
|
||||
|
||||
sc.Result = func(item string, s ScanStats) {
|
||||
if item == "" {
|
||||
stats = s
|
||||
return
|
||||
}
|
||||
|
||||
if test.resFn != nil {
|
||||
p, relErr := filepath.Rel(cur, item)
|
||||
if relErr != nil {
|
||||
panic(relErr)
|
||||
}
|
||||
test.resFn(t, p, s)
|
||||
}
|
||||
}
|
||||
if test.errFn != nil {
|
||||
sc.Error = func(item string, fi os.FileInfo, err error) error {
|
||||
p, relErr := filepath.Rel(cur, item)
|
||||
if relErr != nil {
|
||||
panic(relErr)
|
||||
}
|
||||
|
||||
return test.errFn(t, p, fi, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = sc.Scan(ctx, []string{"."})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if stats != test.result {
|
||||
t.Errorf("wrong final result, want\n %#v\ngot:\n %#v", test.result, stats)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestScannerCancel(t *testing.T) {
|
||||
src := TestDir{
|
||||
"bar": TestFile{Content: "bar"},
|
||||
"baz": TestFile{Content: "baz"},
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"other": TestFile{Content: "other"},
|
||||
}
|
||||
|
||||
result := ScanStats{Files: 2, Bytes: 6}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempdir, cleanup := restictest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
TestCreateFiles(t, tempdir, src)
|
||||
|
||||
back := fs.TestChdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
cur, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sc := NewScanner(fs.Track{fs.Local{}})
|
||||
var lastStats ScanStats
|
||||
sc.Result = func(item string, s ScanStats) {
|
||||
lastStats = s
|
||||
|
||||
if item == filepath.Join(cur, "baz") {
|
||||
t.Logf("found baz")
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
err = sc.Scan(ctx, []string{"."})
|
||||
if err == nil {
|
||||
t.Errorf("did not find expected error")
|
||||
}
|
||||
|
||||
if err != context.Canceled {
|
||||
t.Errorf("unexpected error found, want %v, got %v", context.Canceled, err)
|
||||
}
|
||||
|
||||
if lastStats != result {
|
||||
t.Errorf("wrong final result, want\n %#v\ngot:\n %#v", result, lastStats)
|
||||
}
|
||||
}
|
|
@ -2,10 +2,19 @@ package archiver
|
|||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
// TestSnapshot creates a new snapshot of path.
|
||||
|
@ -17,3 +26,310 @@ func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *res
|
|||
}
|
||||
return sn
|
||||
}
|
||||
|
||||
// TestDir describes a directory structure to create for a test.
|
||||
type TestDir map[string]interface{}
|
||||
|
||||
func (d TestDir) String() string {
|
||||
return "<Dir>"
|
||||
}
|
||||
|
||||
// TestFile describes a file created for a test.
|
||||
type TestFile struct {
|
||||
Content string
|
||||
}
|
||||
|
||||
func (f TestFile) String() string {
|
||||
return "<File>"
|
||||
}
|
||||
|
||||
// TestSymlink describes a symlink created for a test.
|
||||
type TestSymlink struct {
|
||||
Target string
|
||||
}
|
||||
|
||||
func (s TestSymlink) String() string {
|
||||
return "<Symlink>"
|
||||
}
|
||||
|
||||
// TestCreateFiles creates a directory structure described by dir at target,
|
||||
// which must already exist. On Windows, symlinks aren't created.
|
||||
func TestCreateFiles(t testing.TB, target string, dir TestDir) {
|
||||
test.Helper(t).Helper()
|
||||
for name, item := range dir {
|
||||
targetPath := filepath.Join(target, name)
|
||||
|
||||
switch it := item.(type) {
|
||||
case TestFile:
|
||||
err := ioutil.WriteFile(targetPath, []byte(it.Content), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
case TestSymlink:
|
||||
if runtime.GOOS == "windows" {
|
||||
continue
|
||||
}
|
||||
|
||||
err := fs.Symlink(filepath.FromSlash(it.Target), targetPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
case TestDir:
|
||||
err := fs.Mkdir(targetPath, 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
TestCreateFiles(t, targetPath, it)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestWalkFunc is used by TestWalkFiles to traverse the dir. When an error is
|
||||
// returned, traversal stops and the surrounding test is marked as failed.
|
||||
type TestWalkFunc func(path string, item interface{}) error
|
||||
|
||||
// TestWalkFiles runs fn for each file/directory in dir, the filename will be
|
||||
// constructed with target as the prefix. Symlinks on Windows are ignored.
|
||||
func TestWalkFiles(t testing.TB, target string, dir TestDir, fn TestWalkFunc) {
|
||||
test.Helper(t).Helper()
|
||||
for name, item := range dir {
|
||||
targetPath := filepath.Join(target, name)
|
||||
|
||||
err := fn(targetPath, item)
|
||||
if err != nil {
|
||||
t.Fatalf("TestWalkFunc returned error for %v: %v", targetPath, err)
|
||||
return
|
||||
}
|
||||
|
||||
if dir, ok := item.(TestDir); ok {
|
||||
TestWalkFiles(t, targetPath, dir, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fixpath removes UNC paths (starting with `\\?`) on windows. On Linux, it's a noop.
|
||||
func fixpath(item string) string {
|
||||
if runtime.GOOS != "windows" {
|
||||
return item
|
||||
}
|
||||
if strings.HasPrefix(item, `\\?`) {
|
||||
return item[4:]
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
// TestEnsureFiles tests if the directory structure at target is the same as
|
||||
// described in dir.
|
||||
func TestEnsureFiles(t testing.TB, target string, dir TestDir) {
|
||||
test.Helper(t).Helper()
|
||||
pathsChecked := make(map[string]struct{})
|
||||
|
||||
// first, test that all items are there
|
||||
TestWalkFiles(t, target, dir, func(path string, item interface{}) error {
|
||||
// ignore symlinks on Windows
|
||||
if _, ok := item.(TestSymlink); ok && runtime.GOOS == "windows" {
|
||||
// mark paths and parents as checked
|
||||
pathsChecked[path] = struct{}{}
|
||||
for parent := filepath.Dir(path); parent != target; parent = filepath.Dir(parent) {
|
||||
pathsChecked[parent] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
fi, err := fs.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch node := item.(type) {
|
||||
case TestDir:
|
||||
if !fi.IsDir() {
|
||||
t.Errorf("is not a directory: %v", path)
|
||||
}
|
||||
return nil
|
||||
case TestFile:
|
||||
if !fs.IsRegularFile(fi) {
|
||||
t.Errorf("is not a regular file: %v", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if string(content) != node.Content {
|
||||
t.Errorf("wrong content for %v, want %q, got %q", path, node.Content, content)
|
||||
}
|
||||
case TestSymlink:
|
||||
if fi.Mode()&os.ModeType != os.ModeSymlink {
|
||||
t.Errorf("is not a symlink: %v", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
target, err := fs.Readlink(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if target != node.Target {
|
||||
t.Errorf("wrong target for %v, want %v, got %v", path, node.Target, target)
|
||||
}
|
||||
}
|
||||
|
||||
pathsChecked[path] = struct{}{}
|
||||
|
||||
for parent := filepath.Dir(path); parent != target; parent = filepath.Dir(parent) {
|
||||
pathsChecked[parent] = struct{}{}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
// then, traverse the directory again, looking for additional files
|
||||
err := fs.Walk(target, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path = fixpath(path)
|
||||
|
||||
if path == target {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, ok := pathsChecked[path]
|
||||
if !ok {
|
||||
t.Errorf("additional item found: %v %v", path, fi.Mode())
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestEnsureFileContent checks if the file in the repo is the same as file.
|
||||
func TestEnsureFileContent(ctx context.Context, t testing.TB, repo restic.Repository, filename string, node *restic.Node, file TestFile) {
|
||||
if int(node.Size) != len(file.Content) {
|
||||
t.Fatalf("%v: wrong node size: want %d, got %d", filename, node.Size, len(file.Content))
|
||||
return
|
||||
}
|
||||
|
||||
content := make([]byte, restic.CiphertextLength(len(file.Content)))
|
||||
pos := 0
|
||||
for _, id := range node.Content {
|
||||
n, err := repo.LoadBlob(ctx, restic.DataBlob, id, content[pos:])
|
||||
if err != nil {
|
||||
t.Fatalf("error loading blob %v: %v", id.Str(), err)
|
||||
return
|
||||
}
|
||||
|
||||
pos += n
|
||||
}
|
||||
|
||||
content = content[:pos]
|
||||
|
||||
if string(content) != file.Content {
|
||||
t.Fatalf("%v: wrong content returned, want %q, got %q", filename, file.Content, content)
|
||||
}
|
||||
}
|
||||
|
||||
// TestEnsureTree checks that the tree ID in the repo matches dir. On Windows,
|
||||
// Symlinks are ignored.
|
||||
func TestEnsureTree(ctx context.Context, t testing.TB, prefix string, repo restic.Repository, treeID restic.ID, dir TestDir) {
|
||||
test.Helper(t).Helper()
|
||||
|
||||
tree, err := repo.LoadTree(ctx, treeID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
var nodeNames []string
|
||||
for _, node := range tree.Nodes {
|
||||
nodeNames = append(nodeNames, node.Name)
|
||||
}
|
||||
debug.Log("%v (%v) %v", prefix, treeID.Str(), nodeNames)
|
||||
|
||||
checked := make(map[string]struct{})
|
||||
for _, node := range tree.Nodes {
|
||||
nodePrefix := path.Join(prefix, node.Name)
|
||||
|
||||
entry, ok := dir[node.Name]
|
||||
if !ok {
|
||||
t.Errorf("unexpected tree node %q found, want: %#v", node.Name, dir)
|
||||
return
|
||||
}
|
||||
|
||||
checked[node.Name] = struct{}{}
|
||||
|
||||
switch e := entry.(type) {
|
||||
case TestDir:
|
||||
if node.Type != "dir" {
|
||||
t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "dir")
|
||||
return
|
||||
}
|
||||
|
||||
if node.Subtree == nil {
|
||||
t.Errorf("tree node %v has nil subtree", nodePrefix)
|
||||
return
|
||||
}
|
||||
|
||||
TestEnsureTree(ctx, t, path.Join(prefix, node.Name), repo, *node.Subtree, e)
|
||||
case TestFile:
|
||||
if node.Type != "file" {
|
||||
t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file")
|
||||
}
|
||||
TestEnsureFileContent(ctx, t, repo, nodePrefix, node, e)
|
||||
case TestSymlink:
|
||||
// skip symlinks on windows
|
||||
if runtime.GOOS == "windows" {
|
||||
continue
|
||||
}
|
||||
if node.Type != "symlink" {
|
||||
t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file")
|
||||
}
|
||||
|
||||
if e.Target != node.LinkTarget {
|
||||
t.Errorf("symlink %v has wrong target, want %q, got %q", nodePrefix, e.Target, node.LinkTarget)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for name := range dir {
|
||||
// skip checking symlinks on Windows
|
||||
entry := dir[name]
|
||||
if _, ok := entry.(TestSymlink); ok && runtime.GOOS == "windows" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok := checked[name]
|
||||
if !ok {
|
||||
t.Errorf("tree %v: expected node %q not found, has: %v", prefix, name, nodeNames)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestEnsureSnapshot tests if the snapshot in the repo has exactly the same
|
||||
// structure as dir. On Windows, Symlinks are ignored.
|
||||
func TestEnsureSnapshot(t testing.TB, repo restic.Repository, snapshotID restic.ID, dir TestDir) {
|
||||
test.Helper(t).Helper()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
sn, err := restic.LoadSnapshot(ctx, repo, snapshotID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
if sn.Tree == nil {
|
||||
t.Fatal("snapshot has nil tree ID")
|
||||
return
|
||||
}
|
||||
|
||||
TestEnsureTree(ctx, t, "/", repo, *sn.Tree, dir)
|
||||
}
|
||||
|
|
525
internal/archiver/testing_test.go
Normal file
525
internal/archiver/testing_test.go
Normal file
|
@ -0,0 +1,525 @@
|
|||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
restictest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
// MockT passes through all logging functions from T, but catches Fail(),
|
||||
// Error/f() and Fatal/f(). It is used to test test helper functions.
|
||||
type MockT struct {
|
||||
*testing.T
|
||||
HasFailed bool
|
||||
}
|
||||
|
||||
// Fail marks the function as having failed but continues execution.
|
||||
func (t *MockT) Fail() {
|
||||
t.T.Log("MockT Fail() called")
|
||||
t.HasFailed = true
|
||||
}
|
||||
|
||||
// Fatal is equivalent to Log followed by FailNow.
|
||||
func (t *MockT) Fatal(args ...interface{}) {
|
||||
t.T.Logf("MockT Fatal called with %v", args)
|
||||
t.HasFailed = true
|
||||
}
|
||||
|
||||
// Fatalf is equivalent to Logf followed by FailNow.
|
||||
func (t *MockT) Fatalf(msg string, args ...interface{}) {
|
||||
t.T.Logf("MockT Fatal called: "+msg, args...)
|
||||
t.HasFailed = true
|
||||
}
|
||||
|
||||
// Error is equivalent to Log followed by Fail.
|
||||
func (t *MockT) Error(args ...interface{}) {
|
||||
t.T.Logf("MockT Error called with %v", args)
|
||||
t.HasFailed = true
|
||||
}
|
||||
|
||||
// Errorf is equivalent to Logf followed by Fail.
|
||||
func (t *MockT) Errorf(msg string, args ...interface{}) {
|
||||
t.T.Logf("MockT Error called: "+msg, args...)
|
||||
t.HasFailed = true
|
||||
}
|
||||
|
||||
func createFilesAt(t testing.TB, targetdir string, files map[string]interface{}) {
|
||||
for name, item := range files {
|
||||
target := filepath.Join(targetdir, filepath.FromSlash(name))
|
||||
err := fs.MkdirAll(filepath.Dir(target), 0700)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
switch it := item.(type) {
|
||||
case TestFile:
|
||||
err := ioutil.WriteFile(target, []byte(it.Content), 0600)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
case TestSymlink:
|
||||
// ignore symlinks on windows
|
||||
if runtime.GOOS == "windows" {
|
||||
continue
|
||||
}
|
||||
err := fs.Symlink(filepath.FromSlash(it.Target), target)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestCreateFiles(t *testing.T) {
|
||||
var tests = []struct {
|
||||
dir TestDir
|
||||
files map[string]interface{}
|
||||
}{
|
||||
{
|
||||
dir: TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"subdir": TestDir{
|
||||
"subfile": TestFile{Content: "bar"},
|
||||
},
|
||||
"sub": TestDir{
|
||||
"subsub": TestDir{
|
||||
"link": TestSymlink{Target: "x/y/z"},
|
||||
},
|
||||
},
|
||||
},
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"subdir": TestDir{},
|
||||
"subdir/subfile": TestFile{Content: "bar"},
|
||||
"sub/subsub/link": TestSymlink{Target: "x/y/z"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
tempdir, cleanup := restictest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
t.Run("", func(t *testing.T) {
|
||||
tempdir := filepath.Join(tempdir, fmt.Sprintf("test-%d", i))
|
||||
err := fs.MkdirAll(tempdir, 0700)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
TestCreateFiles(t, tempdir, test.dir)
|
||||
|
||||
for name, item := range test.files {
|
||||
// don't check symlinks on windows
|
||||
if runtime.GOOS == "windows" {
|
||||
if _, ok := item.(TestSymlink); ok {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
targetPath := filepath.Join(tempdir, filepath.FromSlash(name))
|
||||
fi, err := fs.Lstat(targetPath)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
switch node := item.(type) {
|
||||
case TestFile:
|
||||
if !fs.IsRegularFile(fi) {
|
||||
t.Errorf("is not regular file: %v", name)
|
||||
continue
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadFile(targetPath)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if string(content) != node.Content {
|
||||
t.Errorf("wrong content for %v: want %q, got %q", name, node.Content, content)
|
||||
}
|
||||
case TestSymlink:
|
||||
if fi.Mode()&os.ModeType != os.ModeSymlink {
|
||||
t.Errorf("is not symlink: %v, %o != %o", name, fi.Mode(), os.ModeSymlink)
|
||||
continue
|
||||
}
|
||||
|
||||
target, err := fs.Readlink(targetPath)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if target != node.Target {
|
||||
t.Errorf("wrong target for %v: want %q, got %q", name, node.Target, target)
|
||||
}
|
||||
case TestDir:
|
||||
if !fi.IsDir() {
|
||||
t.Errorf("is not directory: %v", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestWalkFiles(t *testing.T) {
|
||||
var tests = []struct {
|
||||
dir TestDir
|
||||
want map[string]string
|
||||
}{
|
||||
{
|
||||
dir: TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"subdir": TestDir{
|
||||
"subfile": TestFile{Content: "bar"},
|
||||
},
|
||||
"x": TestDir{
|
||||
"y": TestDir{
|
||||
"link": TestSymlink{Target: filepath.FromSlash("../../foo")},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: map[string]string{
|
||||
"foo": "<File>",
|
||||
"subdir": "<Dir>",
|
||||
filepath.FromSlash("subdir/subfile"): "<File>",
|
||||
"x": "<Dir>",
|
||||
filepath.FromSlash("x/y"): "<Dir>",
|
||||
filepath.FromSlash("x/y/link"): "<Symlink>",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
tempdir, cleanup := restictest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
got := make(map[string]string)
|
||||
|
||||
TestCreateFiles(t, tempdir, test.dir)
|
||||
TestWalkFiles(t, tempdir, test.dir, func(path string, item interface{}) error {
|
||||
p, err := filepath.Rel(tempdir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
got[p] = fmt.Sprintf("%v", item)
|
||||
return nil
|
||||
})
|
||||
|
||||
if !cmp.Equal(test.want, got) {
|
||||
t.Error(cmp.Diff(test.want, got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestEnsureFiles(t *testing.T) {
|
||||
var tests = []struct {
|
||||
expectFailure bool
|
||||
files map[string]interface{}
|
||||
want TestDir
|
||||
unixOnly bool
|
||||
}{
|
||||
{
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"subdir/subfile": TestFile{Content: "bar"},
|
||||
"x/y/link": TestSymlink{Target: "../../foo"},
|
||||
},
|
||||
want: TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"subdir": TestDir{
|
||||
"subfile": TestFile{Content: "bar"},
|
||||
},
|
||||
"x": TestDir{
|
||||
"y": TestDir{
|
||||
"link": TestSymlink{Target: "../../foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
want: TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"subdir": TestDir{
|
||||
"subfile": TestFile{Content: "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"subdir/subfile": TestFile{Content: "bar"},
|
||||
},
|
||||
want: TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "xxx"},
|
||||
},
|
||||
want: TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestSymlink{Target: "/xxx"},
|
||||
},
|
||||
want: TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
unixOnly: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
want: TestDir{
|
||||
"foo": TestSymlink{Target: "/xxx"},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
unixOnly: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestSymlink{Target: "xxx"},
|
||||
},
|
||||
want: TestDir{
|
||||
"foo": TestSymlink{Target: "/yyy"},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
},
|
||||
want: TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
want: TestDir{
|
||||
"foo": TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
if test.unixOnly && runtime.GOOS == "windows" {
|
||||
t.Skip("skip on Windows")
|
||||
return
|
||||
}
|
||||
|
||||
tempdir, cleanup := restictest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
createFilesAt(t, tempdir, test.files)
|
||||
|
||||
subtestT := testing.TB(t)
|
||||
if test.expectFailure {
|
||||
subtestT = &MockT{T: t}
|
||||
}
|
||||
|
||||
TestEnsureFiles(subtestT, tempdir, test.want)
|
||||
|
||||
if test.expectFailure && !subtestT.(*MockT).HasFailed {
|
||||
t.Fatal("expected failure of TestEnsureFiles not found")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTestEnsureSnapshot(t *testing.T) {
|
||||
var tests = []struct {
|
||||
expectFailure bool
|
||||
files map[string]interface{}
|
||||
want TestDir
|
||||
unixOnly bool
|
||||
}{
|
||||
{
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
filepath.FromSlash("subdir/subfile"): TestFile{Content: "bar"},
|
||||
filepath.FromSlash("x/y/link"): TestSymlink{Target: filepath.FromSlash("../../foo")},
|
||||
},
|
||||
want: TestDir{
|
||||
"target": TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"subdir": TestDir{
|
||||
"subfile": TestFile{Content: "bar"},
|
||||
},
|
||||
"x": TestDir{
|
||||
"y": TestDir{
|
||||
"link": TestSymlink{Target: filepath.FromSlash("../../foo")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
want: TestDir{
|
||||
"target": TestDir{
|
||||
"bar": TestFile{Content: "foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"bar": TestFile{Content: "bar"},
|
||||
},
|
||||
want: TestDir{
|
||||
"target": TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
want: TestDir{
|
||||
"target": TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
"bar": TestFile{Content: "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
want: TestDir{
|
||||
"target": TestDir{
|
||||
"foo": TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestSymlink{Target: filepath.FromSlash("x/y/z")},
|
||||
},
|
||||
want: TestDir{
|
||||
"target": TestDir{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
unixOnly: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestSymlink{Target: filepath.FromSlash("x/y/z")},
|
||||
},
|
||||
want: TestDir{
|
||||
"target": TestDir{
|
||||
"foo": TestSymlink{Target: filepath.FromSlash("x/y/z2")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
expectFailure: true,
|
||||
files: map[string]interface{}{
|
||||
"foo": TestFile{Content: "foo"},
|
||||
},
|
||||
want: TestDir{
|
||||
"target": TestDir{
|
||||
"foo": TestFile{Content: "xxx"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
if test.unixOnly && runtime.GOOS == "windows" {
|
||||
t.Skip("skip on Windows")
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempdir, cleanup := restictest.TempDir(t)
|
||||
defer cleanup()
|
||||
|
||||
targetDir := filepath.Join(tempdir, "target")
|
||||
err := fs.Mkdir(targetDir, 0700)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
createFilesAt(t, targetDir, test.files)
|
||||
|
||||
back := fs.TestChdir(t, targetDir)
|
||||
defer back()
|
||||
|
||||
repo, cleanup := repository.TestRepository(t)
|
||||
defer cleanup()
|
||||
|
||||
arch := New(repo)
|
||||
_, id, err := arch.Snapshot(ctx, nil, []string{"."}, nil, "hostname", nil, time.Now())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Logf("snapshot saved as %v", id.Str())
|
||||
|
||||
subtestT := testing.TB(t)
|
||||
if test.expectFailure {
|
||||
subtestT = &MockT{T: t}
|
||||
}
|
||||
|
||||
TestEnsureSnapshot(subtestT, repo, id, test.want)
|
||||
|
||||
if test.expectFailure && !subtestT.(*MockT).HasFailed {
|
||||
t.Fatal("expected failure of TestEnsureSnapshot not found")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
254
internal/archiver/tree.go
Normal file
254
internal/archiver/tree.go
Normal file
|
@ -0,0 +1,254 @@
|
|||
package archiver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
)
|
||||
|
||||
// Tree recursively defines how a snapshot should look like when
|
||||
// archived.
|
||||
//
|
||||
// When `Path` is set, this is a leaf node and the contents of `Path` should be
|
||||
// inserted at this point in the tree.
|
||||
//
|
||||
// The attribute `Root` is used to distinguish between files/dirs which have
|
||||
// the same name, but live in a separate directory on the local file system.
|
||||
//
|
||||
// `FileInfoPath` is used to extract metadata for intermediate (=non-leaf)
|
||||
// trees.
|
||||
type Tree struct {
|
||||
Nodes map[string]Tree
|
||||
Path string // where the files/dirs to be saved are found
|
||||
FileInfoPath string // where the dir can be found that is not included itself, but its subdirs
|
||||
Root string // parent directory of the tree
|
||||
}
|
||||
|
||||
// pathComponents returns all path components of p. If a virtual directory
|
||||
// (volume name on Windows) is added, virtualPrefix is set to true. See the
|
||||
// tests for examples.
|
||||
func pathComponents(fs fs.FS, p string, includeRelative bool) (components []string, virtualPrefix bool) {
|
||||
volume := fs.VolumeName(p)
|
||||
|
||||
if !fs.IsAbs(p) {
|
||||
if !includeRelative {
|
||||
p = fs.Join(fs.Separator(), p)
|
||||
}
|
||||
}
|
||||
|
||||
p = fs.Clean(p)
|
||||
|
||||
for {
|
||||
dir, file := fs.Dir(p), fs.Base(p)
|
||||
|
||||
if p == dir {
|
||||
break
|
||||
}
|
||||
|
||||
components = append(components, file)
|
||||
p = dir
|
||||
}
|
||||
|
||||
// reverse components
|
||||
for i := len(components)/2 - 1; i >= 0; i-- {
|
||||
opp := len(components) - 1 - i
|
||||
components[i], components[opp] = components[opp], components[i]
|
||||
}
|
||||
|
||||
if volume != "" {
|
||||
// strip colon
|
||||
if len(volume) == 2 && volume[1] == ':' {
|
||||
volume = volume[:1]
|
||||
}
|
||||
|
||||
components = append([]string{volume}, components...)
|
||||
virtualPrefix = true
|
||||
}
|
||||
|
||||
return components, virtualPrefix
|
||||
}
|
||||
|
||||
// rootDirectory returns the directory which contains the first element of target.
|
||||
func rootDirectory(fs fs.FS, target string) string {
|
||||
if target == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
if fs.IsAbs(target) {
|
||||
return fs.Join(fs.VolumeName(target), fs.Separator())
|
||||
}
|
||||
|
||||
target = fs.Clean(target)
|
||||
pc, _ := pathComponents(fs, target, true)
|
||||
|
||||
rel := "."
|
||||
for _, c := range pc {
|
||||
if c == ".." {
|
||||
rel = fs.Join(rel, c)
|
||||
}
|
||||
}
|
||||
|
||||
return rel
|
||||
}
|
||||
|
||||
// Add adds a new file or directory to the tree.
|
||||
func (t *Tree) Add(fs fs.FS, path string) error {
|
||||
if path == "" {
|
||||
panic("invalid path (empty string)")
|
||||
}
|
||||
|
||||
if t.Nodes == nil {
|
||||
t.Nodes = make(map[string]Tree)
|
||||
}
|
||||
|
||||
pc, virtualPrefix := pathComponents(fs, path, false)
|
||||
if len(pc) == 0 {
|
||||
return errors.New("invalid path (no path components)")
|
||||
}
|
||||
|
||||
name := pc[0]
|
||||
root := rootDirectory(fs, path)
|
||||
tree := Tree{Root: root}
|
||||
|
||||
origName := name
|
||||
i := 0
|
||||
for {
|
||||
other, ok := t.Nodes[name]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
i++
|
||||
if other.Root == root {
|
||||
tree = other
|
||||
break
|
||||
}
|
||||
|
||||
// resolve conflict and try again
|
||||
name = fmt.Sprintf("%s-%d", origName, i)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(pc) > 1 {
|
||||
subroot := fs.Join(root, origName)
|
||||
if virtualPrefix {
|
||||
// use the original root dir if this is a virtual directory (volume name on Windows)
|
||||
subroot = root
|
||||
}
|
||||
err := tree.add(fs, path, subroot, pc[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tree.FileInfoPath = subroot
|
||||
} else {
|
||||
tree.Path = path
|
||||
}
|
||||
|
||||
t.Nodes[name] = tree
|
||||
return nil
|
||||
}
|
||||
|
||||
// add adds a new target path into the tree.
|
||||
func (t *Tree) add(fs fs.FS, target, root string, pc []string) error {
|
||||
if len(pc) == 0 {
|
||||
return errors.Errorf("invalid path %q", target)
|
||||
}
|
||||
|
||||
if t.Nodes == nil {
|
||||
t.Nodes = make(map[string]Tree)
|
||||
}
|
||||
|
||||
name := pc[0]
|
||||
|
||||
if len(pc) == 1 {
|
||||
tree, ok := t.Nodes[name]
|
||||
|
||||
if !ok {
|
||||
t.Nodes[name] = Tree{Path: target}
|
||||
return nil
|
||||
}
|
||||
|
||||
if tree.Path != "" {
|
||||
return errors.Errorf("path is already set for target %v", target)
|
||||
}
|
||||
tree.Path = target
|
||||
t.Nodes[name] = tree
|
||||
return nil
|
||||
}
|
||||
|
||||
tree := Tree{}
|
||||
if other, ok := t.Nodes[name]; ok {
|
||||
tree = other
|
||||
}
|
||||
|
||||
subroot := fs.Join(root, name)
|
||||
tree.FileInfoPath = subroot
|
||||
|
||||
err := tree.add(fs, target, subroot, pc[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Nodes[name] = tree
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t Tree) String() string {
|
||||
return formatTree(t, "")
|
||||
}
|
||||
|
||||
// formatTree returns a text representation of the tree t.
|
||||
func formatTree(t Tree, indent string) (s string) {
|
||||
for name, node := range t.Nodes {
|
||||
if node.Path != "" {
|
||||
s += fmt.Sprintf("%v/%v, src %q\n", indent, name, node.Path)
|
||||
continue
|
||||
}
|
||||
s += fmt.Sprintf("%v/%v, root %q, meta %q\n", indent, name, node.Root, node.FileInfoPath)
|
||||
s += formatTree(node, indent+" ")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// prune removes sub-trees of leaf nodes.
|
||||
func prune(t *Tree) {
|
||||
// if the current tree is a leaf node (Path is set), remove all nodes,
|
||||
// those are automatically included anyway.
|
||||
if t.Path != "" && len(t.Nodes) > 0 {
|
||||
t.FileInfoPath = ""
|
||||
t.Nodes = nil
|
||||
return
|
||||
}
|
||||
|
||||
for i, subtree := range t.Nodes {
|
||||
prune(&subtree)
|
||||
t.Nodes[i] = subtree
|
||||
}
|
||||
}
|
||||
|
||||
// NewTree creates a Tree from the target files/directories.
|
||||
func NewTree(fs fs.FS, targets []string) (*Tree, error) {
|
||||
debug.Log("targets: %v", targets)
|
||||
tree := &Tree{}
|
||||
seen := make(map[string]struct{})
|
||||
for _, target := range targets {
|
||||
target = fs.Clean(target)
|
||||
|
||||
// skip duplicate targets
|
||||
if _, ok := seen[target]; ok {
|
||||
continue
|
||||
}
|
||||
seen[target] = struct{}{}
|
||||
|
||||
err := tree.Add(fs, target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
prune(tree)
|
||||
debug.Log("result:\n%v", tree)
|
||||
return tree, nil
|
||||
}
|
341
internal/archiver/tree_test.go
Normal file
341
internal/archiver/tree_test.go
Normal file
|
@ -0,0 +1,341 @@
|
|||
package archiver
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
)
|
||||
|
||||
func TestPathComponents(t *testing.T) {
|
||||
var tests = []struct {
|
||||
p string
|
||||
c []string
|
||||
virtual bool
|
||||
rel bool
|
||||
win bool
|
||||
}{
|
||||
{
|
||||
p: "/foo/bar/baz",
|
||||
c: []string{"foo", "bar", "baz"},
|
||||
},
|
||||
{
|
||||
p: "/foo/bar/baz",
|
||||
c: []string{"foo", "bar", "baz"},
|
||||
rel: true,
|
||||
},
|
||||
{
|
||||
p: "foo/bar/baz",
|
||||
c: []string{"foo", "bar", "baz"},
|
||||
},
|
||||
{
|
||||
p: "foo/bar/baz",
|
||||
c: []string{"foo", "bar", "baz"},
|
||||
rel: true,
|
||||
},
|
||||
{
|
||||
p: "../foo/bar/baz",
|
||||
c: []string{"foo", "bar", "baz"},
|
||||
},
|
||||
{
|
||||
p: "../foo/bar/baz",
|
||||
c: []string{"..", "foo", "bar", "baz"},
|
||||
rel: true,
|
||||
},
|
||||
{
|
||||
p: "c:/foo/bar/baz",
|
||||
c: []string{"c", "foo", "bar", "baz"},
|
||||
virtual: true,
|
||||
rel: true,
|
||||
win: true,
|
||||
},
|
||||
{
|
||||
p: "c:/foo/../bar/baz",
|
||||
c: []string{"c", "bar", "baz"},
|
||||
virtual: true,
|
||||
win: true,
|
||||
},
|
||||
{
|
||||
p: `c:\foo\..\bar\baz`,
|
||||
c: []string{"c", "bar", "baz"},
|
||||
virtual: true,
|
||||
win: true,
|
||||
},
|
||||
{
|
||||
p: "c:/foo/../bar/baz",
|
||||
c: []string{"c", "bar", "baz"},
|
||||
virtual: true,
|
||||
rel: true,
|
||||
win: true,
|
||||
},
|
||||
{
|
||||
p: `c:\foo\..\bar\baz`,
|
||||
c: []string{"c", "bar", "baz"},
|
||||
virtual: true,
|
||||
rel: true,
|
||||
win: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
if test.win && runtime.GOOS != "windows" {
|
||||
t.Skip("skip test on unix")
|
||||
}
|
||||
|
||||
c, v := pathComponents(fs.Local{}, filepath.FromSlash(test.p), test.rel)
|
||||
if !cmp.Equal(test.c, c) {
|
||||
t.Error(test.c, c)
|
||||
}
|
||||
|
||||
if v != test.virtual {
|
||||
t.Errorf("unexpected virtual prefix count returned, want %v, got %v", test.virtual, v)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRootDirectory(t *testing.T) {
|
||||
var tests = []struct {
|
||||
target string
|
||||
root string
|
||||
unix bool
|
||||
win bool
|
||||
}{
|
||||
{target: ".", root: "."},
|
||||
{target: "foo/bar/baz", root: "."},
|
||||
{target: "../foo/bar/baz", root: ".."},
|
||||
{target: "..", root: ".."},
|
||||
{target: "../../..", root: "../../.."},
|
||||
{target: "/home/foo", root: "/", unix: true},
|
||||
{target: "c:/home/foo", root: "c:/", win: true},
|
||||
{target: `c:\home\foo`, root: `c:\`, win: true},
|
||||
{target: "//host/share/foo", root: "//host/share/", win: true},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
if test.unix && runtime.GOOS == "windows" {
|
||||
t.Skip("skip test on windows")
|
||||
}
|
||||
if test.win && runtime.GOOS != "windows" {
|
||||
t.Skip("skip test on unix")
|
||||
}
|
||||
|
||||
root := rootDirectory(fs.Local{}, filepath.FromSlash(test.target))
|
||||
want := filepath.FromSlash(test.root)
|
||||
if root != want {
|
||||
t.Fatalf("wrong root directory, want %v, got %v", want, root)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTree(t *testing.T) {
|
||||
var tests = []struct {
|
||||
targets []string
|
||||
want Tree
|
||||
unix bool
|
||||
win bool
|
||||
mustError bool
|
||||
}{
|
||||
{
|
||||
targets: []string{"foo"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"foo": Tree{Path: "foo", Root: "."},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"foo", "bar", "baz"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"foo": Tree{Path: "foo", Root: "."},
|
||||
"bar": Tree{Path: "bar", Root: "."},
|
||||
"baz": Tree{Path: "baz", Root: "."},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"foo/user1", "foo/user2", "foo/other"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
|
||||
"user1": Tree{Path: filepath.FromSlash("foo/user1")},
|
||||
"user2": Tree{Path: filepath.FromSlash("foo/user2")},
|
||||
"other": Tree{Path: filepath.FromSlash("foo/other")},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"foo/work/user1", "foo/work/user2"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
|
||||
"work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{
|
||||
"user1": Tree{Path: filepath.FromSlash("foo/work/user1")},
|
||||
"user2": Tree{Path: filepath.FromSlash("foo/work/user2")},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"foo/user1", "bar/user1", "foo/other"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
|
||||
"user1": Tree{Path: filepath.FromSlash("foo/user1")},
|
||||
"other": Tree{Path: filepath.FromSlash("foo/other")},
|
||||
}},
|
||||
"bar": Tree{Root: ".", FileInfoPath: "bar", Nodes: map[string]Tree{
|
||||
"user1": Tree{Path: filepath.FromSlash("bar/user1")},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"../work"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"work": Tree{Root: "..", Path: filepath.FromSlash("../work")},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"../work/other"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"work": Tree{Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{
|
||||
"other": Tree{Path: filepath.FromSlash("../work/other")},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"foo/user1", "../work/other", "foo/user2"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
|
||||
"user1": Tree{Path: filepath.FromSlash("foo/user1")},
|
||||
"user2": Tree{Path: filepath.FromSlash("foo/user2")},
|
||||
}},
|
||||
"work": Tree{Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{
|
||||
"other": Tree{Path: filepath.FromSlash("../work/other")},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"foo/user1", "../foo/other", "foo/user2"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
|
||||
"user1": Tree{Path: filepath.FromSlash("foo/user1")},
|
||||
"user2": Tree{Path: filepath.FromSlash("foo/user2")},
|
||||
}},
|
||||
"foo-1": Tree{Root: "..", FileInfoPath: filepath.FromSlash("../foo"), Nodes: map[string]Tree{
|
||||
"other": Tree{Path: filepath.FromSlash("../foo/other")},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"foo/work", "foo/work/user2"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
|
||||
"work": Tree{
|
||||
Path: filepath.FromSlash("foo/work"),
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"foo/work/user2", "foo/work"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
|
||||
"work": Tree{
|
||||
Path: filepath.FromSlash("foo/work"),
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"foo/work/user2/data/secret", "foo"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"foo": Tree{Root: ".", Path: "foo"},
|
||||
}},
|
||||
},
|
||||
{
|
||||
unix: true,
|
||||
targets: []string{"/mnt/driveA", "/mnt/driveA/work/driveB"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"mnt": Tree{Root: "/", FileInfoPath: filepath.FromSlash("/mnt"), Nodes: map[string]Tree{
|
||||
"driveA": Tree{
|
||||
Path: filepath.FromSlash("/mnt/driveA"),
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"foo/work/user", "foo/work/user"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
|
||||
"work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{
|
||||
"user": Tree{Path: filepath.FromSlash("foo/work/user")},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"./foo/work/user", "foo/work/user"},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
|
||||
"work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{
|
||||
"user": Tree{Path: filepath.FromSlash("foo/work/user")},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
win: true,
|
||||
targets: []string{`c:\users\foobar\temp`},
|
||||
want: Tree{Nodes: map[string]Tree{
|
||||
"c": Tree{Root: `c:\`, FileInfoPath: `c:\`, Nodes: map[string]Tree{
|
||||
"users": Tree{FileInfoPath: `c:\users`, Nodes: map[string]Tree{
|
||||
"foobar": Tree{FileInfoPath: `c:\users\foobar`, Nodes: map[string]Tree{
|
||||
"temp": Tree{Path: `c:\users\foobar\temp`},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
targets: []string{"."},
|
||||
mustError: true,
|
||||
},
|
||||
{
|
||||
targets: []string{".."},
|
||||
mustError: true,
|
||||
},
|
||||
{
|
||||
targets: []string{"../.."},
|
||||
mustError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
if test.unix && runtime.GOOS == "windows" {
|
||||
t.Skip("skip test on windows")
|
||||
}
|
||||
|
||||
if test.win && runtime.GOOS != "windows" {
|
||||
t.Skip("skip test on unix")
|
||||
}
|
||||
|
||||
tree, err := NewTree(fs.Local{}, test.targets)
|
||||
if test.mustError {
|
||||
if err == nil {
|
||||
t.Fatal("expected error, got nil")
|
||||
}
|
||||
t.Logf("found expected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !cmp.Equal(&test.want, tree) {
|
||||
t.Error(cmp.Diff(&test.want, tree))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue