Merge pull request #1494 from restic/new-archiver

New archiver code
This commit is contained in:
Alexander Neumann 2018-04-28 22:24:39 +02:00
commit 56e394ac33
142 changed files with 16847 additions and 4832 deletions

20
Gopkg.lock generated
View file

@ -67,6 +67,12 @@
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
[[projects]]
name = "github.com/google/go-cmp"
packages = ["cmp","cmp/internal/diff","cmp/internal/function","cmp/internal/value"]
revision = "8099a9787ce5dc5984ed879a3bda47dc730a8e97"
version = "v0.1.0"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
@ -97,6 +103,12 @@
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
version = "v1.1.0"
[[projects]]
name = "github.com/mattn/go-isatty"
packages = ["."]
revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39"
version = "v0.0.3"
[[projects]]
name = "github.com/minio/minio-go"
packages = [".","pkg/credentials","pkg/encrypt","pkg/policy","pkg/s3signer","pkg/s3utils","pkg/set"]
@ -223,6 +235,12 @@
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
[[projects]]
branch = "v2"
name = "gopkg.in/tomb.v2"
packages = ["."]
revision = "d5d1b5820637886def9eef33e03a27a9f166942c"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
@ -232,6 +250,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "d3d59414a33bb8ecc6d88a681c782a87244a565cc9d0f85615cfa0704c02800a"
inputs-digest = "44a8f2ed127a6eaa38c1449b97d298fc703c961617bd93565b89bcc6c9a41483"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -0,0 +1,36 @@
Enhancement: Rework archiver code
The core archiver code and the complementary code for the `backup` command was
rewritten completely. This resolves very annoying issues such as 549.
Basically, with the old code, restic took the last path component of each
to-be-saved file or directory as the top-level file/directory within the
snapshot. This meant that when called as `restic backup /home/user/foo`, the
snapshot would contain the files in the directory `/home/user/foo` as `/foo`.
This is not the case any more with the new archiver code. Now, restic works
very similar to what `tar` does: When restic is called with an absolute path to
save, then it'll preserve the directory structure within the snapshot. For the
example above, the snapshot would contain the files in the directory within
`/home/user/foo` in the snapshot. For relative directories, it only preserves
the relative path components. So `restic backup user/foo` will save the files
as `/user/foo` in the snapshot.
While we were at it, the status display and notification system was completely
rewritten. By default, restic now shows which files are currently read (unless
`--quiet` is specified) in a multi-line status display.
The `backup` command also gained a new option: `--verbose`. It can be specified
once (which prints a bit more detail what restic is doing) or twice (which
prints a line for each file/directory restic encountered, together with some
statistics).
https://github.com/restic/restic/issues/549
https://github.com/restic/restic/issues/1286
https://github.com/restic/restic/issues/446
https://github.com/restic/restic/issues/1344
https://github.com/restic/restic/issues/1416
https://github.com/restic/restic/issues/1456
https://github.com/restic/restic/issues/1145
https://github.com/restic/restic/issues/1160
https://github.com/restic/restic/pull/1494

View file

@ -1,9 +0,0 @@
// +build !linux
package main
// IsProcessBackground should return true if it is running in the background or false if not
func IsProcessBackground() bool {
//TODO: Check if the process are running in the background in other OS than linux
return false
}

View file

@ -2,21 +2,24 @@ package main
import (
"bufio"
"fmt"
"context"
"io"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/spf13/cobra"
tomb "gopkg.in/tomb.v2"
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/termstatus"
)
var cmdBackup = &cobra.Command{
@ -42,11 +45,16 @@ given as the arguments.
return errors.Fatal("cannot use both `--stdin` and `--files-from -`")
}
if backupOptions.Stdin {
return readBackupFromStdin(backupOptions, globalOptions, args)
}
var t tomb.Tomb
term := termstatus.New(globalOptions.stdout, globalOptions.stderr)
t.Go(func() error { term.Run(t.Context(globalOptions.ctx)); return nil })
return runBackup(backupOptions, globalOptions, args)
err := runBackup(backupOptions, globalOptions, term, args)
if err != nil {
return err
}
t.Kill(nil)
return t.Wait()
},
}
@ -90,127 +98,6 @@ func init() {
f.BoolVar(&backupOptions.WithAtime, "with-atime", false, "store the atime for all files and directories")
}
func newScanProgress(gopts GlobalOptions) *restic.Progress {
if gopts.Quiet {
return nil
}
p := restic.NewProgress()
p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
if IsProcessBackground() {
return
}
PrintProgress("[%s] %d directories, %d files, %s", formatDuration(d), s.Dirs, s.Files, formatBytes(s.Bytes))
}
p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
PrintProgress("scanned %d directories, %d files in %s\n", s.Dirs, s.Files, formatDuration(d))
}
return p
}
func newArchiveProgress(gopts GlobalOptions, todo restic.Stat) *restic.Progress {
if gopts.Quiet {
return nil
}
archiveProgress := restic.NewProgress()
var bps, eta uint64
itemsTodo := todo.Files + todo.Dirs
archiveProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
if IsProcessBackground() {
return
}
sec := uint64(d / time.Second)
if todo.Bytes > 0 && sec > 0 && ticker {
bps = s.Bytes / sec
if s.Bytes >= todo.Bytes {
eta = 0
} else if bps > 0 {
eta = (todo.Bytes - s.Bytes) / bps
}
}
itemsDone := s.Files + s.Dirs
status1 := fmt.Sprintf("[%s] %s %s / %s %d / %d items %d errors ",
formatDuration(d),
formatPercent(s.Bytes, todo.Bytes),
formatBytes(s.Bytes), formatBytes(todo.Bytes),
itemsDone, itemsTodo,
s.Errors)
status2 := fmt.Sprintf("ETA %s ", formatSeconds(eta))
if w := stdoutTerminalWidth(); w > 0 {
maxlen := w - len(status2) - 1
if maxlen < 4 {
status1 = ""
} else if len(status1) > maxlen {
status1 = status1[:maxlen-4]
status1 += "... "
}
}
PrintProgress("%s%s", status1, status2)
}
archiveProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\nduration: %s\n", formatDuration(d))
}
return archiveProgress
}
func newArchiveStdinProgress(gopts GlobalOptions) *restic.Progress {
if gopts.Quiet {
return nil
}
archiveProgress := restic.NewProgress()
var bps uint64
archiveProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
if IsProcessBackground() {
return
}
sec := uint64(d / time.Second)
if s.Bytes > 0 && sec > 0 && ticker {
bps = s.Bytes / sec
}
status1 := fmt.Sprintf("[%s] %s %s/s", formatDuration(d),
formatBytes(s.Bytes),
formatBytes(bps))
if w := stdoutTerminalWidth(); w > 0 {
maxlen := w - len(status1)
if maxlen < 4 {
status1 = ""
} else if len(status1) > maxlen {
status1 = status1[:maxlen-4]
status1 += "... "
}
}
PrintProgress("%s", status1)
}
archiveProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\nduration: %s\n", formatDuration(d))
}
return archiveProgress
}
// filterExisting returns a slice of all existing items, or an error if no
// items exist at all.
func filterExisting(items []string) (result []string, err error) {
@ -231,72 +118,10 @@ func filterExisting(items []string) (result []string, err error) {
return
}
func readBackupFromStdin(opts BackupOptions, gopts GlobalOptions, args []string) error {
if len(args) != 0 {
return errors.Fatal("when reading from stdin, no additional files can be specified")
}
fn := opts.StdinFilename
if fn == "" {
return errors.Fatal("filename for backup from stdin must not be empty")
}
if filepath.Base(fn) != fn || path.Base(fn) != fn {
return errors.Fatal("filename is invalid (may not contain a directory, slash or backslash)")
}
var t time.Time
if opts.TimeStamp != "" {
parsedT, err := time.Parse("2006-01-02 15:04:05", opts.TimeStamp)
if err != nil {
return err
}
t = parsedT
} else {
t = time.Now()
}
if gopts.password == "" {
return errors.Fatal("unable to read password from stdin when data is to be read from stdin, use --password-file or $RESTIC_PASSWORD")
}
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
err = repo.LoadIndex(gopts.ctx)
if err != nil {
return err
}
r := &archiver.Reader{
Repository: repo,
Tags: opts.Tags,
Hostname: opts.Hostname,
TimeStamp: t,
}
_, id, err := r.Archive(gopts.ctx, fn, os.Stdin, newArchiveStdinProgress(gopts))
if err != nil {
return err
}
Verbosef("archived as %v\n", id.Str())
return nil
}
// readFromFile will read all lines from the given filename and write them to a
// string array, if filename is empty readFromFile returns and empty string
// array. If filename is a dash (-), readFromFile will read the lines from
// the standard input.
// readFromFile will read all lines from the given filename and return them as
// a string array, if filename is empty readFromFile returns and empty string
// array. If filename is a dash (-), readFromFile will read the lines from the
// standard input.
func readLinesFromFile(filename string) ([]string, error) {
if filename == "" {
return nil, nil
@ -335,47 +160,45 @@ func readLinesFromFile(filename string) ([]string, error) {
return lines, nil
}
func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
// Check returns an error when an invalid combination of options was set.
func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error {
if opts.FilesFrom == "-" && gopts.password == "" {
return errors.Fatal("unable to read password from stdin when data is to be read from stdin, use --password-file or $RESTIC_PASSWORD")
}
fromfile, err := readLinesFromFile(opts.FilesFrom)
if err != nil {
return err
}
// merge files from files-from into normal args so we can reuse the normal
// args checks and have the ability to use both files-from and args at the
// same time
args = append(args, fromfile...)
if len(args) == 0 {
return errors.Fatal("nothing to backup, please specify target files/dirs")
}
target := make([]string, 0, len(args))
for _, d := range args {
if a, err := filepath.Abs(d); err == nil {
d = a
if opts.Stdin {
if opts.FilesFrom != "" {
return errors.Fatal("--stdin and --files-from cannot be used together")
}
if len(args) > 0 {
return errors.Fatal("--stdin was specified and files/dirs were listed as arguments")
}
target = append(target, d)
}
target, err = filterExisting(target)
if err != nil {
return err
}
// rejectFuncs collect functions that can reject items from the backup
var rejectFuncs []RejectFunc
return nil
}
// collectRejectFuncs returns a list of all functions which may reject data
// from being saved in a snapshot
func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectFunc, err error) {
// allowed devices
if opts.ExcludeOtherFS {
f, err := rejectByDevice(target)
f, err := rejectByDevice(targets)
if err != nil {
return err
return nil, err
}
rejectFuncs = append(rejectFuncs, f)
fs = append(fs, f)
}
// exclude restic cache
if repo.Cache != nil {
f, err := rejectResticCache(repo)
if err != nil {
return nil, err
}
fs = append(fs, f)
}
// add patterns from file
@ -384,7 +207,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
}
if len(opts.Excludes) > 0 {
rejectFuncs = append(rejectFuncs, rejectByPattern(opts.Excludes))
fs = append(fs, rejectByPattern(opts.Excludes))
}
if opts.ExcludeCaches {
@ -394,111 +217,17 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error {
for _, spec := range opts.ExcludeIfPresent {
f, err := rejectIfPresent(spec)
if err != nil {
return err
return nil, err
}
rejectFuncs = append(rejectFuncs, f)
fs = append(fs, f)
}
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
// exclude restic cache
if repo.Cache != nil {
f, err := rejectResticCache(repo)
if err != nil {
return err
}
rejectFuncs = append(rejectFuncs, f)
}
err = repo.LoadIndex(gopts.ctx)
if err != nil {
return err
}
var parentSnapshotID *restic.ID
// Force using a parent
if !opts.Force && opts.Parent != "" {
id, err := restic.FindSnapshot(repo, opts.Parent)
if err != nil {
return errors.Fatalf("invalid id %q: %v", opts.Parent, err)
}
parentSnapshotID = &id
}
// Find last snapshot to set it as parent, if not already set
if !opts.Force && parentSnapshotID == nil {
id, err := restic.FindLatestSnapshot(gopts.ctx, repo, target, []restic.TagList{}, opts.Hostname)
if err == nil {
parentSnapshotID = &id
} else if err != restic.ErrNoSnapshotFound {
return err
}
}
if parentSnapshotID != nil {
Verbosef("using parent snapshot %v\n", parentSnapshotID.Str())
}
Verbosef("scan %v\n", target)
selectFilter := func(item string, fi os.FileInfo) bool {
for _, reject := range rejectFuncs {
if reject(item, fi) {
return false
}
}
return true
}
var stat restic.Stat
if !gopts.Quiet {
stat, err = archiver.Scan(target, selectFilter, newScanProgress(gopts))
if err != nil {
return err
}
}
arch := archiver.New(repo)
arch.Excludes = opts.Excludes
arch.SelectFilter = selectFilter
arch.WithAccessTime = opts.WithAtime
arch.Warn = func(dir string, fi os.FileInfo, err error) {
// TODO: make ignoring errors configurable
Warnf("%s\rwarning for %s: %v\n", ClearLine(), dir, err)
}
timeStamp := time.Now()
if opts.TimeStamp != "" {
timeStamp, err = time.Parse(TimeFormat, opts.TimeStamp)
if err != nil {
return errors.Fatalf("error in time option: %v\n", err)
}
}
_, id, err := arch.Snapshot(gopts.ctx, newArchiveProgress(gopts, stat), target, opts.Tags, opts.Hostname, parentSnapshotID, timeStamp)
if err != nil {
return err
}
Verbosef("snapshot %s saved\n", id.Str())
return nil
return fs, nil
}
// readExcludePatternsFromFiles reads all exclude files and returns the list of
// exclude patterns.
func readExcludePatternsFromFiles(excludeFiles []string) []string {
var excludes []string
for _, filename := range excludeFiles {
@ -540,3 +269,217 @@ func readExcludePatternsFromFiles(excludeFiles []string) []string {
}
return excludes
}
// collectTargets returns a list of target files/dirs from several sources.
func collectTargets(opts BackupOptions, args []string) (targets []string, err error) {
if opts.Stdin {
return nil, nil
}
fromfile, err := readLinesFromFile(opts.FilesFrom)
if err != nil {
return nil, err
}
// merge files from files-from into normal args so we can reuse the normal
// args checks and have the ability to use both files-from and args at the
// same time
args = append(args, fromfile...)
if len(args) == 0 && !opts.Stdin {
return nil, errors.Fatal("nothing to backup, please specify target files/dirs")
}
targets = args
targets, err = filterExisting(targets)
if err != nil {
return nil, err
}
return targets, nil
}
// parent returns the ID of the parent snapshot. If there is none, nil is
// returned.
func findParentSnapshot(ctx context.Context, repo restic.Repository, opts BackupOptions, targets []string) (parentID *restic.ID, err error) {
// Force using a parent
if !opts.Force && opts.Parent != "" {
id, err := restic.FindSnapshot(repo, opts.Parent)
if err != nil {
return nil, errors.Fatalf("invalid id %q: %v", opts.Parent, err)
}
parentID = &id
}
// Find last snapshot to set it as parent, if not already set
if !opts.Force && parentID == nil {
id, err := restic.FindLatestSnapshot(ctx, repo, targets, []restic.TagList{}, opts.Hostname)
if err == nil {
parentID = &id
} else if err != restic.ErrNoSnapshotFound {
return nil, err
}
}
return parentID, nil
}
func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error {
err := opts.Check(gopts, args)
if err != nil {
return err
}
targets, err := collectTargets(opts, args)
if err != nil {
return err
}
var t tomb.Tomb
p := ui.NewBackup(term, gopts.verbosity)
// use the terminal for stdout/stderr
prevStdout, prevStderr := gopts.stdout, gopts.stderr
defer func() {
gopts.stdout, gopts.stderr = prevStdout, prevStderr
}()
gopts.stdout, gopts.stderr = p.Stdout(), p.Stderr()
if s, ok := os.LookupEnv("RESTIC_PROGRESS_FPS"); ok {
fps, err := strconv.Atoi(s)
if err == nil && fps >= 1 {
if fps > 60 {
fps = 60
}
p.MinUpdatePause = time.Second / time.Duration(fps)
}
}
t.Go(func() error { return p.Run(t.Context(gopts.ctx)) })
p.V("open repository")
repo, err := OpenRepository(gopts)
if err != nil {
return err
}
p.V("lock repository")
lock, err := lockRepo(repo)
defer unlockRepo(lock)
if err != nil {
return err
}
// rejectFuncs collect functions that can reject items from the backup
rejectFuncs, err := collectRejectFuncs(opts, repo, targets)
if err != nil {
return err
}
p.V("load index files")
err = repo.LoadIndex(gopts.ctx)
if err != nil {
return err
}
parentSnapshotID, err := findParentSnapshot(gopts.ctx, repo, opts, targets)
if err != nil {
return err
}
if parentSnapshotID != nil {
p.V("using parent snapshot %v\n", parentSnapshotID.Str())
}
selectFilter := func(item string, fi os.FileInfo) bool {
for _, reject := range rejectFuncs {
if reject(item, fi) {
return false
}
}
return true
}
timeStamp := time.Now()
if opts.TimeStamp != "" {
timeStamp, err = time.Parse(TimeFormat, opts.TimeStamp)
if err != nil {
return errors.Fatalf("error in time option: %v\n", err)
}
}
var targetFS fs.FS = fs.Local{}
if opts.Stdin {
p.V("read data from stdin")
targetFS = &fs.Reader{
ModTime: timeStamp,
Name: opts.StdinFilename,
Mode: 0644,
ReadCloser: os.Stdin,
}
targets = []string{opts.StdinFilename}
}
sc := archiver.NewScanner(targetFS)
sc.Select = selectFilter
sc.Error = p.ScannerError
sc.Result = p.ReportTotal
p.V("start scan")
t.Go(func() error { return sc.Scan(t.Context(gopts.ctx), targets) })
arch := archiver.New(repo, targetFS, archiver.Options{})
arch.Select = selectFilter
arch.WithAtime = opts.WithAtime
arch.Error = p.Error
arch.CompleteItem = p.CompleteItemFn
arch.StartFile = p.StartFile
arch.CompleteBlob = p.CompleteBlob
if parentSnapshotID == nil {
parentSnapshotID = &restic.ID{}
}
snapshotOpts := archiver.SnapshotOptions{
Excludes: opts.Excludes,
Tags: opts.Tags,
Time: timeStamp,
Hostname: opts.Hostname,
ParentSnapshot: *parentSnapshotID,
}
uploader := archiver.IndexUploader{
Repository: repo,
Start: func() {
p.VV("uploading intermediate index")
},
Complete: func(id restic.ID) {
p.V("uploaded intermediate index %v", id.Str())
},
}
t.Go(func() error {
return uploader.Upload(gopts.ctx, t.Context(gopts.ctx), 30*time.Second)
})
p.V("start backup")
_, id, err := arch.Snapshot(gopts.ctx, targets, snapshotOpts)
if err != nil {
return err
}
p.Finish()
p.P("snapshot %s saved\n", id.Str())
// cleanly shutdown all running goroutines
t.Kill(nil)
// let's see if one returned an error
err = t.Wait()
if err != nil {
return err
}
return nil
}

View file

@ -56,7 +56,8 @@ func printTree(ctx context.Context, repo *repository.Repository, id *restic.ID,
Printf("%s\n", formatNode(prefix, entry, lsOptions.ListLong))
if entry.Type == "dir" && entry.Subtree != nil {
if err = printTree(ctx, repo, entry.Subtree, filepath.Join(prefix, entry.Name)); err != nil {
entryPath := prefix + string(filepath.Separator) + entry.Name
if err = printTree(ctx, repo, entry.Subtree, entryPath); err != nil {
return err
}
}
@ -84,7 +85,7 @@ func runLs(opts LsOptions, gopts GlobalOptions, args []string) error {
for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) {
Verbosef("snapshot %s of %v at %s):\n", sn.ID().Str(), sn.Paths, sn.Time)
if err = printTree(gopts.ctx, repo, sn.Tree, string(filepath.Separator)); err != nil {
if err = printTree(gopts.ctx, repo, sn.Tree, ""); err != nil {
return err
}
}

View file

@ -64,8 +64,9 @@ func formatDuration(d time.Duration) string {
}
func formatNode(prefix string, n *restic.Node, long bool) string {
nodepath := prefix + string(filepath.Separator) + n.Name
if !long {
return filepath.Join(prefix, n.Name)
return nodepath
}
var mode os.FileMode
@ -91,6 +92,6 @@ func formatNode(prefix string, n *restic.Node, long bool) string {
return fmt.Sprintf("%s %5d %5d %6d %s %s%s",
mode|n.Mode, n.UID, n.GID, n.Size,
n.ModTime.Format(TimeFormat), filepath.Join(prefix, n.Name),
n.ModTime.Format(TimeFormat), nodepath,
target)
}

View file

@ -43,6 +43,7 @@ type GlobalOptions struct {
Repo string
PasswordFile string
Quiet bool
Verbose int
NoLock bool
JSON bool
CacheDir string
@ -59,6 +60,13 @@ type GlobalOptions struct {
stdout io.Writer
stderr io.Writer
// verbosity is set as follows:
// 0 means: don't print any messages except errors, this is used when --quiet is specified
// 1 is the default: print essential messages
// 2 means: print more messages, report minor things, this is used when --verbose is specified
// 3 means: print very detailed debug messages, this is used when --debug is specified
verbosity uint
Options []string
extended options.Options
@ -81,6 +89,7 @@ func init() {
f.StringVarP(&globalOptions.Repo, "repo", "r", os.Getenv("RESTIC_REPOSITORY"), "repository to backup to or restore from (default: $RESTIC_REPOSITORY)")
f.StringVarP(&globalOptions.PasswordFile, "password-file", "p", os.Getenv("RESTIC_PASSWORD_FILE"), "read the repository password from a file (default: $RESTIC_PASSWORD_FILE)")
f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not output comprehensive progress report")
f.CountVarP(&globalOptions.Verbose, "verbose", "v", "be verbose (specify --verbose multiple times or level `n`)")
f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repo, this allows some operations on read-only repos")
f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it")
f.StringVar(&globalOptions.CacheDir, "cache-dir", "", "set the cache directory")
@ -173,11 +182,9 @@ func Printf(format string, args ...interface{}) {
// Verbosef calls Printf to write the message when the verbose flag is set.
func Verbosef(format string, args ...interface{}) {
if globalOptions.Quiet {
return
if globalOptions.verbosity >= 1 {
Printf(format, args...)
}
Printf(format, args...)
}
// PrintProgress wraps fmt.Printf to handle the difference in writing progress

View file

@ -18,6 +18,7 @@ var (
listenMemoryProfile string
memProfilePath string
cpuProfilePath string
traceProfilePath string
insecure bool
)
@ -26,6 +27,7 @@ func init() {
f.StringVar(&listenMemoryProfile, "listen-profile", "", "listen on this `address:port` for memory profiling")
f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`")
f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`")
f.StringVar(&traceProfilePath, "trace-profile", "", "write trace to `dir`")
f.BoolVar(&insecure, "insecure-kdf", false, "use insecure KDF settings")
}
@ -46,7 +48,18 @@ func runDebug() error {
}()
}
if memProfilePath != "" && cpuProfilePath != "" {
profilesEnabled := 0
if memProfilePath != "" {
profilesEnabled++
}
if cpuProfilePath != "" {
profilesEnabled++
}
if traceProfilePath != "" {
profilesEnabled++
}
if profilesEnabled > 1 {
return errors.Fatal("only one profile (memory or CPU) may be activated at the same time")
}
@ -58,6 +71,8 @@ func runDebug() error {
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(memProfilePath))
} else if cpuProfilePath != "" {
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(cpuProfilePath))
} else if traceProfilePath != "" {
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(traceProfilePath))
}
if prof != nil {

View file

@ -171,7 +171,7 @@ func TestMount(t *testing.T) {
rtest.SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
// first backup
testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts)
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
snapshotIDs := testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshotIDs) == 1,
"expected one snapshot, got %v", snapshotIDs)
@ -179,7 +179,7 @@ func TestMount(t *testing.T) {
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 2)
// second backup, implicit incremental
testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts)
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
snapshotIDs = testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshotIDs) == 2,
"expected two snapshots, got %v", snapshotIDs)
@ -188,7 +188,7 @@ func TestMount(t *testing.T) {
// third backup, explicit incremental
bopts := BackupOptions{Parent: snapshotIDs[0].String()}
testRunBackup(t, []string{env.testdata}, bopts, env.gopts)
testRunBackup(t, "", []string{env.testdata}, bopts, env.gopts)
snapshotIDs = testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshotIDs) == 3,
"expected three snapshots, got %v", snapshotIDs)

View file

@ -3,6 +3,7 @@ package main
import (
"bufio"
"bytes"
"context"
"crypto/rand"
"encoding/json"
"fmt"
@ -17,12 +18,14 @@ import (
"testing"
"time"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui/termstatus"
"golang.org/x/sync/errgroup"
)
func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs {
@ -51,9 +54,29 @@ func testRunInit(t testing.TB, opts GlobalOptions) {
t.Logf("repository initialized at %v", opts.Repo)
}
func testRunBackup(t testing.TB, target []string, opts BackupOptions, gopts GlobalOptions) {
t.Logf("backing up %v", target)
rtest.OK(t, runBackup(opts, gopts, target))
func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) {
ctx, cancel := context.WithCancel(gopts.ctx)
defer cancel()
var wg errgroup.Group
term := termstatus.New(gopts.stdout, gopts.stderr)
wg.Go(func() error { term.Run(ctx); return nil })
gopts.stdout = ioutil.Discard
t.Logf("backing up %v in %v", target, dir)
if dir != "" {
cleanup := fs.TestChdir(t, dir)
defer cleanup()
}
rtest.OK(t, runBackup(opts, gopts, term, target))
cancel()
err := wg.Wait()
if err != nil {
t.Fatal(err)
}
}
func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs {
@ -219,7 +242,7 @@ func TestBackup(t *testing.T) {
opts := BackupOptions{}
// first backup
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
snapshotIDs := testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshotIDs) == 1,
"expected one snapshot, got %v", snapshotIDs)
@ -228,7 +251,7 @@ func TestBackup(t *testing.T) {
stat1 := dirStats(env.repo)
// second backup, implicit incremental
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
snapshotIDs = testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshotIDs) == 2,
"expected two snapshots, got %v", snapshotIDs)
@ -242,7 +265,7 @@ func TestBackup(t *testing.T) {
testRunCheck(t, env.gopts)
// third backup, explicit incremental
opts.Parent = snapshotIDs[0].String()
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
snapshotIDs = testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshotIDs) == 3,
"expected three snapshots, got %v", snapshotIDs)
@ -296,198 +319,7 @@ func TestBackupNonExistingFile(t *testing.T) {
opts := BackupOptions{}
testRunBackup(t, dirs, opts, env.gopts)
}
func TestBackupMissingFile1(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
fd, err := os.Open(datafile)
if os.IsNotExist(errors.Cause(err)) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
rtest.OK(t, err)
rtest.OK(t, fd.Close())
rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunInit(t, env.gopts)
globalOptions.stderr = ioutil.Discard
defer func() {
globalOptions.stderr = os.Stderr
}()
ranHook := false
debug.Hook("pipe.walk1", func(context interface{}) {
pathname := context.(string)
if pathname != filepath.Join("testdata", "0", "0", "9") {
return
}
t.Logf("in hook, removing test file testdata/0/0/9/37")
ranHook = true
rtest.OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
})
opts := BackupOptions{}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
rtest.Assert(t, ranHook, "hook did not run")
debug.RemoveHook("pipe.walk1")
}
func TestBackupMissingFile2(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
fd, err := os.Open(datafile)
if os.IsNotExist(errors.Cause(err)) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
rtest.OK(t, err)
rtest.OK(t, fd.Close())
rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunInit(t, env.gopts)
globalOptions.stderr = ioutil.Discard
defer func() {
globalOptions.stderr = os.Stderr
}()
ranHook := false
debug.Hook("pipe.walk2", func(context interface{}) {
pathname := context.(string)
if pathname != filepath.Join("testdata", "0", "0", "9", "37") {
return
}
t.Logf("in hook, removing test file testdata/0/0/9/37")
ranHook = true
rtest.OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
})
opts := BackupOptions{}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
rtest.Assert(t, ranHook, "hook did not run")
debug.RemoveHook("pipe.walk2")
}
func TestBackupChangedFile(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
fd, err := os.Open(datafile)
if os.IsNotExist(errors.Cause(err)) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
rtest.OK(t, err)
rtest.OK(t, fd.Close())
rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunInit(t, env.gopts)
globalOptions.stderr = ioutil.Discard
defer func() {
globalOptions.stderr = os.Stderr
}()
modFile := filepath.Join(env.testdata, "0", "0", "9", "18")
ranHook := false
debug.Hook("archiver.SaveFile", func(context interface{}) {
pathname := context.(string)
if pathname != modFile {
return
}
t.Logf("in hook, modifying test file %v", modFile)
ranHook = true
rtest.OK(t, ioutil.WriteFile(modFile, []byte("modified"), 0600))
})
opts := BackupOptions{}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
rtest.Assert(t, ranHook, "hook did not run")
debug.RemoveHook("archiver.SaveFile")
}
func TestBackupDirectoryError(t *testing.T) {
env, cleanup := withTestEnvironment(t)
defer cleanup()
datafile := filepath.Join("testdata", "backup-data.tar.gz")
fd, err := os.Open(datafile)
if os.IsNotExist(errors.Cause(err)) {
t.Skipf("unable to find data file %q, skipping", datafile)
return
}
rtest.OK(t, err)
rtest.OK(t, fd.Close())
rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunInit(t, env.gopts)
globalOptions.stderr = ioutil.Discard
defer func() {
globalOptions.stderr = os.Stderr
}()
ranHook := false
testdir := filepath.Join(env.testdata, "0", "0", "9")
// install hook that removes the dir right before readdirnames()
debug.Hook("pipe.readdirnames", func(context interface{}) {
path := context.(string)
if path != testdir {
return
}
t.Logf("in hook, removing test file %v", testdir)
ranHook = true
rtest.OK(t, os.RemoveAll(testdir))
})
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0")}, BackupOptions{}, env.gopts)
testRunCheck(t, env.gopts)
rtest.Assert(t, ranHook, "hook did not run")
debug.RemoveHook("pipe.walk2")
snapshots := testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshots) > 0,
"no snapshots found in repo (%v)", datafile)
files := testRunLs(t, env.gopts, snapshots[0].String())
rtest.Assert(t, len(files) > 1, "snapshot is empty")
testRunBackup(t, "", dirs, opts, env.gopts)
}
func includes(haystack []string, needle string) bool {
@ -552,21 +384,21 @@ func TestBackupExclude(t *testing.T) {
opts := BackupOptions{}
testRunBackup(t, []string{datadir}, opts, env.gopts)
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
files := testRunLs(t, env.gopts, snapshotID)
rtest.Assert(t, includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
"expected file %q in first snapshot, but it's not included", "foo.tar.gz")
opts.Excludes = []string{"*.tar.gz"}
testRunBackup(t, []string{datadir}, opts, env.gopts)
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
files = testRunLs(t, env.gopts, snapshotID)
rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
"expected file %q not in first snapshot, but it's included", "foo.tar.gz")
opts.Excludes = []string{"*.tar.gz", "private/secret"}
testRunBackup(t, []string{datadir}, opts, env.gopts)
testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts)
_, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts))
files = testRunLs(t, env.gopts, snapshotID)
rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")),
@ -616,13 +448,13 @@ func TestIncrementalBackup(t *testing.T) {
opts := BackupOptions{}
testRunBackup(t, []string{datadir}, opts, env.gopts)
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
testRunCheck(t, env.gopts)
stat1 := dirStats(env.repo)
rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite))
testRunBackup(t, []string{datadir}, opts, env.gopts)
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
testRunCheck(t, env.gopts)
stat2 := dirStats(env.repo)
if stat2.size-stat1.size > incrementalFirstWrite {
@ -632,7 +464,7 @@ func TestIncrementalBackup(t *testing.T) {
rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite))
testRunBackup(t, []string{datadir}, opts, env.gopts)
testRunBackup(t, "", []string{datadir}, opts, env.gopts)
testRunCheck(t, env.gopts)
stat3 := dirStats(env.repo)
if stat3.size-stat2.size > incrementalFirstWrite {
@ -651,7 +483,7 @@ func TestBackupTags(t *testing.T) {
opts := BackupOptions{}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
newest, _ := testRunSnapshots(t, env.gopts)
rtest.Assert(t, newest != nil, "expected a new backup, got nil")
@ -660,7 +492,7 @@ func TestBackupTags(t *testing.T) {
parent := newest
opts.Tags = []string{"NL"}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
newest, _ = testRunSnapshots(t, env.gopts)
rtest.Assert(t, newest != nil, "expected a new backup, got nil")
@ -683,7 +515,7 @@ func TestTag(t *testing.T) {
testRunInit(t, env.gopts)
rtest.SetupTarTestFixture(t, env.testdata, datafile)
testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts)
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
testRunCheck(t, env.gopts)
newest, _ := testRunSnapshots(t, env.gopts)
rtest.Assert(t, newest != nil, "expected a new backup, got nil")
@ -859,7 +691,7 @@ func TestRestoreFilter(t *testing.T) {
opts := BackupOptions{}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
testRunCheck(t, env.gopts)
snapshotID := testRunList(t, "snapshots", env.gopts)[0]
@ -899,7 +731,7 @@ func TestRestore(t *testing.T) {
opts := BackupOptions{}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
testRunCheck(t, env.gopts)
// Restore latest without any filters
@ -922,12 +754,22 @@ func TestRestoreLatest(t *testing.T) {
opts := BackupOptions{}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
// chdir manually here so we can get the current directory. This is not the
// same as the temp dir returned by ioutil.TempDir() on darwin.
back := fs.TestChdir(t, filepath.Dir(env.testdata))
defer back()
curdir, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
testRunCheck(t, env.gopts)
os.Remove(p)
rtest.OK(t, appendRandomData(p, 101))
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts)
testRunCheck(t, env.gopts)
// Restore latest without any filters
@ -935,16 +777,18 @@ func TestRestoreLatest(t *testing.T) {
rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))
// Setup test files in different directories backed up in different snapshots
p1 := filepath.Join(env.testdata, "p1/testfile.c")
p1 := filepath.Join(curdir, filepath.FromSlash("p1/testfile.c"))
rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
rtest.OK(t, appendRandomData(p1, 102))
testRunBackup(t, []string{filepath.Dir(p1)}, opts, env.gopts)
testRunBackup(t, "", []string{"p1"}, opts, env.gopts)
testRunCheck(t, env.gopts)
p2 := filepath.Join(env.testdata, "p2/testfile.c")
p2 := filepath.Join(curdir, filepath.FromSlash("p2/testfile.c"))
rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
rtest.OK(t, appendRandomData(p2, 103))
testRunBackup(t, []string{filepath.Dir(p2)}, opts, env.gopts)
testRunBackup(t, "", []string{"p2"}, opts, env.gopts)
testRunCheck(t, env.gopts)
p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c")
@ -1017,7 +861,7 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
opts := BackupOptions{}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
testRunCheck(t, env.gopts)
snapshotID := testRunList(t, "snapshots", env.gopts)[0]
@ -1055,7 +899,7 @@ func TestFind(t *testing.T) {
opts := BackupOptions{}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
results := testRunFind(t, false, env.gopts, "unexistingfile")
@ -1095,7 +939,7 @@ func TestFindJSON(t *testing.T) {
opts := BackupOptions{}
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
testRunCheck(t, env.gopts)
results := testRunFind(t, true, env.gopts, "unexistingfile")
@ -1198,13 +1042,13 @@ func TestPrune(t *testing.T) {
rtest.SetupTarTestFixture(t, env.testdata, datafile)
opts := BackupOptions{}
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts)
firstSnapshot := testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(firstSnapshot) == 1,
"expected one snapshot, got %v", firstSnapshot)
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts)
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
snapshotIDs := testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshotIDs) == 3,
@ -1238,7 +1082,7 @@ func TestHardLink(t *testing.T) {
opts := BackupOptions{}
// first backup
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
snapshotIDs := testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshotIDs) == 1,
"expected one snapshot, got %v", snapshotIDs)
@ -1332,7 +1176,7 @@ func TestQuietBackup(t *testing.T) {
opts := BackupOptions{}
env.gopts.Quiet = false
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
snapshotIDs := testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshotIDs) == 1,
"expected one snapshot, got %v", snapshotIDs)
@ -1340,7 +1184,7 @@ func TestQuietBackup(t *testing.T) {
testRunCheck(t, env.gopts)
env.gopts.Quiet = true
testRunBackup(t, []string{env.testdata}, opts, env.gopts)
testRunBackup(t, "", []string{env.testdata}, opts, env.gopts)
snapshotIDs = testRunList(t, "snapshots", env.gopts)
rtest.Assert(t, len(snapshotIDs) == 2,
"expected two snapshots, got %v", snapshotIDs)

View file

@ -30,6 +30,21 @@ directories in an encrypted repository stored on different backends.
DisableAutoGenTag: true,
PersistentPreRunE: func(c *cobra.Command, args []string) error {
// set verbosity, default is one
globalOptions.verbosity = 1
if globalOptions.Quiet && (globalOptions.Verbose > 1) {
return errors.Fatal("--quiet and --verbose cannot be specified at the same time")
}
switch {
case globalOptions.Verbose >= 2:
globalOptions.verbosity = 3
case globalOptions.Verbose > 0:
globalOptions.verbosity = 2
case globalOptions.Quiet:
globalOptions.verbosity = 0
}
// parse extended options
opts, err := options.Parse(globalOptions.Options)
if err != nil {

View file

@ -14,3 +14,6 @@
Introduction
############
Restic is a fast and secure backup program. In the following sections, we will
present typical workflows, starting with installing, preparing a new
repository, and making the first backup.

View file

@ -145,9 +145,17 @@ Admin rights.
Docker Container
****************
We're maintaining a bare docker container with just a few files and the restic
binary, you can get it with `docker pull` like this:
.. code-block:: console
$ docker pull restic/restic
.. note::
| A docker container is available as a contribution (Thank you!).
| You can find it at https://github.com/Lobaro/restic-backup-docker
| Another docker container which offers more configuration options is
| available as a contribution (Thank you!). You can find it at
| https://github.com/Lobaro/restic-backup-docker
From Source
***********
@ -173,7 +181,7 @@ You can easily cross-compile restic for all supported platforms, just
supply the target OS and platform via the command-line options like this
(for Windows and FreeBSD respectively):
::
.. code-block:: console
$ go run build.go --goos windows --goarch amd64

View file

@ -15,20 +15,24 @@ Preparing a new repository
##########################
The place where your backups will be saved at is called a "repository".
This chapter explains how to create ("init") such a repository.
This chapter explains how to create ("init") such a repository. The repository
can be stored locally, or on some remote server or service. We'll first cover
using a local repository, the remaining sections of this chapter cover all the
other options. You can skip to the next chapter once you've read the relevant
section here.
Local
*****
In order to create a repository at ``/tmp/backup``, run the following
In order to create a repository at ``/srv/restic-repo``, run the following
command and enter the same password twice:
.. code-block:: console
$ restic init --repo /tmp/backup
$ restic init --repo /srv/restic-repo
enter password for new backend:
enter password again:
created restic backend 085b3c76b9 at /tmp/backup
created restic backend 085b3c76b9 at /srv/restic-repo
Please note that knowledge of your password is required to access the repository.
Losing your password means that your data is irrecoverably lost.
@ -55,10 +59,10 @@ simply be achieved by changing the URL scheme in the ``init`` command:
.. code-block:: console
$ restic -r sftp:user@host:/tmp/backup init
$ restic -r sftp:user@host:/srv/restic-repo init
enter password for new backend:
enter password again:
created restic backend f1c6108821 at sftp:user@host:/tmp/backup
created restic backend f1c6108821 at sftp:user@host:/srv/restic-repo
Please note that knowledge of your password is required to access the repository.
Losing your password means that your data is irrecoverably lost.
@ -87,7 +91,7 @@ specify the user name in this case):
::
$ restic -r sftp:foo:/tmp/backup init
$ restic -r sftp:foo:/srv/restic-repo init
You can also add an entry with a special host name which does not exist,
just for use with restic, and use the ``Hostname`` option to set the
@ -104,7 +108,7 @@ Then use it in the backend specification:
::
$ restic -r sftp:restic-backup-host:/tmp/backup init
$ restic -r sftp:restic-backup-host:/srv/restic-repo init
Last, if you'd like to use an entirely different program to create the
SFTP connection, you can specify the command to be run with the option
@ -509,5 +513,5 @@ On MSYS2, you can install ``winpty`` as follows:
.. code-block:: console
$ pacman -S winpty
$ winpty restic -r /tmp/backup init
$ winpty restic -r /srv/restic-repo init

View file

@ -21,43 +21,88 @@ again:
.. code-block:: console
$ restic -r /tmp/backup backup ~/work
$ restic -r /srv/restic-repo --verbose backup ~/work
open repository
enter password for repository:
scan [/home/user/work]
scanned 764 directories, 1816 files in 0:00
[0:29] 100.00% 54.732 MiB/s 1.582 GiB / 1.582 GiB 2580 / 2580 items 0 errors ETA 0:00
duration: 0:29, 54.47MiB/s
password is correct
lock repository
load index files
start scan
start backup
scan finished in 1.837s
processed 1.720 GiB in 0:12
Files: 5307 new, 0 changed, 0 unmodified
Dirs: 1867 new, 0 changed, 0 unmodified
Added: 1.700 GiB
snapshot 40dc1520 saved
As you can see, restic created a backup of the directory and was pretty
fast! The specific snapshot just created is identified by a sequence of
hexadecimal characters, ``40dc1520`` in this case.
If you don't pass the ``--verbose`` option, restic will print less data (but
you'll still get a nice live status display).
If you run the command again, restic will create another snapshot of
your data, but this time it's even faster. This is de-duplication at
work!
.. code-block:: console
$ restic -r /tmp/backup backup ~/work
$ restic -r /srv/restic-repo backup --verbose ~/work
open repository
enter password for repository:
using parent snapshot 40dc1520aa6a07b7b3ae561786770a01951245d2367241e71e9485f18ae8228c
scan [/home/user/work]
scanned 764 directories, 1816 files in 0:00
[0:00] 100.00% 0B/s 1.582 GiB / 1.582 GiB 2580 / 2580 items 0 errors ETA 0:00
duration: 0:00, 6572.38MiB/s
password is correct
lock repository
load index files
using parent snapshot d875ae93
start scan
start backup
scan finished in 1.881s
processed 1.720 GiB in 0:03
Files: 0 new, 0 changed, 5307 unmodified
Dirs: 0 new, 0 changed, 1867 unmodified
Added: 0 B
snapshot 79766175 saved
You can even backup individual files in the same repository.
You can even backup individual files in the same repository (not passing
``--verbose`` means less output):
.. code-block:: console
$ restic -r /tmp/backup backup ~/work.txt
scan [/home/user/work.txt]
scanned 0 directories, 1 files in 0:00
[0:00] 100.00% 0B/s 220B / 220B 1 / 1 items 0 errors ETA 0:00
duration: 0:00, 0.03MiB/s
snapshot 31f7bd63 saved
$ restic -r /srv/restic-repo backup ~/work.txt
enter password for repository:
password is correct
snapshot 249d0210 saved
If you're interested in what restic does, pass ``--verbose`` twice (or
``--verbose 2``) to display detailed information about each file and directory
restic encounters:
.. code-block:: console
$ echo 'more data foo bar' >> ~/work.txt
$ restic -r /srv/restic-repo backup --verbose --verbose ~/work.txt
open repository
enter password for repository:
password is correct
lock repository
load index files
using parent snapshot f3f8d56b
start scan
start backup
scan finished in 2.115s
modified /home/user/work.txt, saved in 0.007s (22 B added)
modified /home/user/, saved in 0.008s (0 B added, 378 B metadata)
modified /home/, saved in 0.009s (0 B added, 375 B metadata)
processed 22 B in 0:02
Files: 0 new, 1 changed, 0 unmodified
Dirs: 0 new, 2 changed, 0 unmodified
Data Blobs: 1 new
Tree Blobs: 3 new
Added: 1.116 KiB
snapshot 8dc503fc saved
In fact several hosts may use the same repository to backup directories
and files leading to a greater de-duplication.
@ -87,33 +132,53 @@ the exclude options are:
- ``--exclude-if-present`` Specified one or more times to exclude a folders content
if it contains a given file (optionally having a given header)
Basic example:
Let's say we have a file called ``excludes.txt`` with the following content:
.. code-block:: console
$ cat exclude
::
# exclude go-files
*.go
# exclude foo/x/y/z/bar foo/x/bar foo/bar
foo/**/bar
$ restic -r /tmp/backup backup ~/work --exclude="*.c" --exclude-file=exclude
It can be used like this:
.. code-block:: console
$ restic -r /srv/restic-repo backup ~/work --exclude="*.c" --exclude-file=excludes.txt
This instruct restic to exclude files matching the following criteria:
* All files matching ``*.go`` (second line in ``excludes.txt``)
* All files and sub-directories named ``bar`` which reside somewhere below a directory called ``foo`` (fourth line in ``excludes.txt``)
* All files matching ``*.c`` (parameter ``--exclude``)
Please see ``restic help backup`` for more specific information about each exclude option.
Patterns use `filepath.Glob <https://golang.org/pkg/path/filepath/#Glob>`__ internally,
see `filepath.Match <https://golang.org/pkg/path/filepath/#Match>`__ for syntax.
Patterns are tested against the full path of a file/dir to be saved, not only
against the relative path below the argument given to restic backup.
Patterns need to match on complete path components. (``foo`` matches
``/dir1/foo/dir2/file`` and ``/dir/foo`` but does not match ``/dir/foobar`` or
``barfoo``.) A trailing ``/`` is ignored. A leading ``/`` anchors the
pattern at the root directory. (``/bin`` matches ``/bin/bash`` but does not
match ``/usr/bin/restic``.) Regular wildcards cannot be used to match over the
directory separator ``/``. (``b*ash`` matches ``/bin/bash`` but does not match
``/bin/ash``.) However ``**`` matches arbitrary subdirectories. (``foo/**/bar``
matches ``/dir1/foo/dir2/bar/file``, ``/foo/bar/file`` and ``/tmp/foo/bar``.)
Environment-variables in exclude-files are expanded with
`os.ExpandEnv <https://golang.org/pkg/os/#ExpandEnv>`__.
see `filepath.Match <https://golang.org/pkg/path/filepath/#Match>`__ for
syntax. Patterns are tested against the full path of a file/dir to be saved,
even if restic is passed a relative path to save. Environment-variables in
exclude-files are expanded with `os.ExpandEnv <https://golang.org/pkg/os/#ExpandEnv>`__.
Patterns need to match on complete path components. For example, the pattern ``foo``:
* matches ``/dir1/foo/dir2/file`` and ``/dir/foo``
* does not match ``/dir/foobar`` or ``barfoo``
A trailing ``/`` is ignored, a leading ``/`` anchors the
pattern at the root directory. This means, ``/bin`` matches ``/bin/bash`` but
does not match ``/usr/bin/restic``.
Regular wildcards cannot be used to match over the
directory separator ``/``. For example: ``b*ash`` matches ``/bin/bash`` but does not match
``/bin/ash``.
For this, the special wildcard ``**`` can be used to match arbitrary
sub-directories: The pattern ``foo/**/bar`` matches:
* ``/dir1/foo/dir2/bar/file``
* ``/foo/bar/file``
* ``/tmp/foo/bar``
By specifying the option ``--one-file-system`` you can instruct restic
to only backup files from the file systems the initially specified files
@ -122,15 +187,15 @@ backup ``/sys`` or ``/dev`` on a Linux system:
.. code-block:: console
$ restic -r /tmp/backup backup --one-file-system /
$ restic -r /srv/restic-repo backup --one-file-system /
By using the ``--files-from`` option you can read the files you want to
backup from a file. This is especially useful if a lot of files have to
be backed up that are not in the same folder or are maybe pre-filtered
by other software.
For example maybe you want to backup files that have a certain filename
in them:
For example maybe you want to backup files which have a name that matches a
certain pattern:
.. code-block:: console
@ -140,14 +205,14 @@ You can then use restic to backup the filtered files:
.. code-block:: console
$ restic -r /tmp/backup backup --files-from /tmp/files_to_backup
$ restic -r /srv/restic-repo backup --files-from /tmp/files_to_backup
Incidentally you can also combine ``--files-from`` with the normal files
args:
.. code-block:: console
$ restic -r /tmp/backup backup --files-from /tmp/files_to_backup /tmp/some_additional_file
$ restic -r /srv/restic-repo backup --files-from /tmp/files_to_backup /tmp/some_additional_file
Paths in the listing file can be absolute or relative.
@ -159,7 +224,7 @@ and displays a small statistic, just pass the command two snapshot IDs:
.. code-block:: console
$ restic -r /tmp/backup diff 5845b002 2ab627a6
$ restic -r /srv/restic-repo diff 5845b002 2ab627a6
password is correct
comparing snapshot ea657ce5 to 2ab627a6:
@ -206,7 +271,7 @@ this mode of operation, just supply the option ``--stdin`` to the
.. code-block:: console
$ mysqldump [...] | restic -r /tmp/backup backup --stdin
$ mysqldump [...] | restic -r /srv/restic-repo backup --stdin
This creates a new snapshot of the output of ``mysqldump``. You can then
use e.g. the fuse mounting option (see below) to mount the repository
@ -217,7 +282,7 @@ specified with ``--stdin-filename``, e.g. like this:
.. code-block:: console
$ mysqldump [...] | restic -r /tmp/backup backup --stdin --stdin-filename production.sql
$ mysqldump [...] | restic -r /srv/restic-repo backup --stdin --stdin-filename production.sql
Tags for backup
***************
@ -227,7 +292,7 @@ information. Just specify the tags for a snapshot one by one with ``--tag``:
.. code-block:: console
$ restic -r /tmp/backup backup --tag projectX --tag foo --tag bar ~/work
$ restic -r /srv/restic-repo backup --tag projectX --tag foo --tag bar ~/work
[...]
The tags can later be used to keep (or forget) snapshots with the ``forget``

View file

@ -22,7 +22,7 @@ Now, you can list all the snapshots stored in the repository:
.. code-block:: console
$ restic -r /tmp/backup snapshots
$ restic -r /srv/restic-repo snapshots
enter password for repository:
ID Date Host Tags Directory
----------------------------------------------------------------------
@ -36,7 +36,7 @@ You can filter the listing by directory path:
.. code-block:: console
$ restic -r /tmp/backup snapshots --path="/srv"
$ restic -r /srv/restic-repo snapshots --path="/srv"
enter password for repository:
ID Date Host Tags Directory
----------------------------------------------------------------------
@ -47,7 +47,7 @@ Or filter by host:
.. code-block:: console
$ restic -r /tmp/backup snapshots --host luigi
$ restic -r /srv/restic-repo snapshots --host luigi
enter password for repository:
ID Date Host Tags Directory
----------------------------------------------------------------------
@ -74,7 +74,7 @@ backup data is consistent and the integrity is unharmed:
.. code-block:: console
$ restic -r /tmp/backup check
$ restic -r /srv/restic-repo check
Load indexes
ciphertext verification failed
@ -83,7 +83,7 @@ yield the same error:
.. code-block:: console
$ restic -r /tmp/backup restore 79766175 --target /tmp/restore-work
$ restic -r /srv/restic-repo restore 79766175 --target /tmp/restore-work
Load indexes
ciphertext verification failed
@ -93,7 +93,7 @@ data files:
.. code-block:: console
$ restic -r /tmp/backup check --read-data
$ restic -r /srv/restic-repo check --read-data
load indexes
check all packs
check snapshots, trees and blobs
@ -107,9 +107,9 @@ commands check all repository data files over 5 separate invocations:
.. code-block:: console
$ restic -r /tmp/backup check --read-data-subset=1/5
$ restic -r /tmp/backup check --read-data-subset=2/5
$ restic -r /tmp/backup check --read-data-subset=3/5
$ restic -r /tmp/backup check --read-data-subset=4/5
$ restic -r /tmp/backup check --read-data-subset=5/5
$ restic -r /srv/restic-repo check --read-data-subset=1/5
$ restic -r /srv/restic-repo check --read-data-subset=2/5
$ restic -r /srv/restic-repo check --read-data-subset=3/5
$ restic -r /srv/restic-repo check --read-data-subset=4/5
$ restic -r /srv/restic-repo check --read-data-subset=5/5

View file

@ -23,7 +23,7 @@ command to restore the contents of the latest snapshot to
.. code-block:: console
$ restic -r /tmp/backup restore 79766175 --target /tmp/restore-work
$ restic -r /srv/restic-repo restore 79766175 --target /tmp/restore-work
enter password for repository:
restoring <Snapshot of [/home/user/work] at 2015-05-08 21:40:19.884408621 +0200 CEST> to /tmp/restore-work
@ -33,7 +33,7 @@ backup for a specific host, path or both.
.. code-block:: console
$ restic -r /tmp/backup restore latest --target /tmp/restore-art --path "/home/art" --host luigi
$ restic -r /srv/restic-repo restore latest --target /tmp/restore-art --path "/home/art" --host luigi
enter password for repository:
restoring <Snapshot of [/home/art] at 2015-05-08 21:45:17.884408621 +0200 CEST> to /tmp/restore-art
@ -42,7 +42,7 @@ files in the snapshot. For example, to restore a single file:
.. code-block:: console
$ restic -r /tmp/backup restore 79766175 --target /tmp/restore-work --include /work/foo
$ restic -r /srv/restic-repo restore 79766175 --target /tmp/restore-work --include /work/foo
enter password for repository:
restoring <Snapshot of [/home/user/work] at 2015-05-08 21:40:19.884408621 +0200 CEST> to /tmp/restore-work
@ -58,9 +58,9 @@ command to serve the repository with FUSE:
.. code-block:: console
$ mkdir /mnt/restic
$ restic -r /tmp/backup mount /mnt/restic
$ restic -r /srv/restic-repo mount /mnt/restic
enter password for repository:
Now serving /tmp/backup at /mnt/restic
Now serving /srv/restic-repo at /mnt/restic
Don't forget to umount after quitting!
Mounting repositories via FUSE is not possible on OpenBSD, Solaris/illumos
@ -80,4 +80,4 @@ the data directly. This can be achieved by using the `dump` command, like this:
.. code-block:: console
$ restic -r /tmp/backup dump latest production.sql | mysql
$ restic -r /srv/restic-repo dump latest production.sql | mysql

View file

@ -35,7 +35,7 @@ repository like this:
.. code-block:: console
$ restic -r /tmp/backup snapshots
$ restic -r /srv/restic-repo snapshots
enter password for repository:
ID Date Host Tags Directory
----------------------------------------------------------------------
@ -50,7 +50,7 @@ command and specify the snapshot ID on the command line:
.. code-block:: console
$ restic -r /tmp/backup forget bdbd3439
$ restic -r /srv/restic-repo forget bdbd3439
enter password for repository:
removed snapshot d3f01f63
@ -58,7 +58,7 @@ Afterwards this snapshot is removed:
.. code-block:: console
$ restic -r /tmp/backup snapshots
$ restic -r /srv/restic-repo snapshots
enter password for repository:
ID Date Host Tags Directory
----------------------------------------------------------------------
@ -73,7 +73,7 @@ command must be run:
.. code-block:: console
$ restic -r /tmp/backup prune
$ restic -r /srv/restic-repo prune
enter password for repository:
counting files in repo

View file

@ -16,8 +16,8 @@ Encryption
*"The design might not be perfect, but its good. Encryption is a first-class feature,
the implementation looks sane and I guess the deduplication trade-off is worth it. So… Im going to use restic for
my personal backups.*" `Filippo Valsorda`_
the implementation looks sane and I guess the deduplication trade-off is worth
it. So… Im going to use restic for my personal backups.*" `Filippo Valsorda`_
.. _Filippo Valsorda: https://blog.filippo.io/restic-cryptography/
@ -31,19 +31,19 @@ per repository. In fact, you can use the ``list``, ``add``, ``remove``, and
.. code-block:: console
$ restic -r /tmp/backup key list
$ restic -r /srv/restic-repo key list
enter password for repository:
ID User Host Created
----------------------------------------------------------------------
*eb78040b username kasimir 2015-08-12 13:29:57
$ restic -r /tmp/backup key add
$ restic -r /srv/restic-repo key add
enter password for repository:
enter password for new key:
enter password again:
saved new key as <Key of username@kasimir, created on 2015-08-12 13:35:05.316831933 +0200 CEST>
$ restic -r backup key list
$ restic -r /srv/restic-repo key list
enter password for repository:
ID User Host Created
----------------------------------------------------------------------

View file

@ -26,10 +26,10 @@ times. The command ``snapshots`` may be used for this purpose:
.. code-block:: console
$ restic -r /tmp/backup snapshots
Fatal: unable to open config file: Stat: stat /tmp/backup/config: no such file or directory
$ restic -r /srv/restic-repo snapshots
Fatal: unable to open config file: Stat: stat /srv/restic-repo/config: no such file or directory
Is there a repository at the following location?
/tmp/backup
/srv/restic-repo
If a repository does not exist, restic will return a non-zero exit code
and print an error message. Note that restic will also return a non-zero

View file

@ -625,14 +625,15 @@ are deleted, the particular snapshot vanished and all snapshots
depending on data that has been added in the snapshot cannot be restored
completely. Restic is not designed to detect this attack.
******
Local Cache
===========
******
In order to speed up certain operations, restic manages a local cache of data.
This document describes the data structures for the local cache with version 1.
Versions
--------
========
The cache directory is selected according to the `XDG base dir specification
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`__.
@ -646,12 +647,21 @@ a lower version number is found the cache is recreated with the current
version. If a higher version number is found the cache is ignored and left as
is.
Snapshots and Indexes
---------------------
Snapshots, Data and Indexes
===========================
Snapshot, Data and Index files are cached in the sub-directories ``snapshots``,
``data`` and ``index``, as read from the repository.
Expiry
======
Whenever a cache directory for a repo is used, that directory's modification
timestamp is updated to the current time. By looking at the modification
timestamps of the repo cache directories it is easy to decide which directories
are old and haven't been used in a long time. Those are probably stale and can
be removed.
************
REST Backend
@ -798,24 +808,3 @@ Returns "200 OK" if the blob with the given name and type has been
deleted from the repository, an HTTP error otherwise.
*****
Talks
*****
The following talks will be or have been given about restic:
- 2016-01-31: Lightning Talk at the Go Devroom at FOSDEM 2016,
Brussels, Belgium
- 2016-01-29: `restic - Backups mal
richtig <https://media.ccc.de/v/c4.openchaos.2016.01.restic>`__:
Public lecture in German at `CCC Cologne
e.V. <https://koeln.ccc.de>`__ in Cologne, Germany
- 2015-08-23: `A Solution to the Backup
Inconvenience <https://programm.froscon.de/2015/events/1515.html>`__:
Lecture at `FROSCON 2015 <https://www.froscon.de>`__ in Bonn, Germany
- 2015-02-01: `Lightning Talk at FOSDEM
2015 <https://www.youtube.com/watch?v=oM-MfeflUZ8&t=11m40s>`__: A
short introduction (with slightly outdated command line)
- 2015-01-27: `Talk about restic at CCC
Aachen <https://videoag.fsmpi.rwth-aachen.de/?view=player&lectureid=4442#content>`__
(in German)

34
doc/110_talks.rst Normal file
View file

@ -0,0 +1,34 @@
..
Normally, there are no heading levels assigned to certain characters as the structure is
determined from the succession of headings. However, this convention is used in Pythons
Style Guide for documenting which you may follow:
# with overline, for parts
* for chapters
= for sections
- for subsections
^ for subsubsections
" for paragraphs
#####
Talks
#####
The following talks will be or have been given about restic:
- 2016-01-31: Lightning Talk at the Go Devroom at FOSDEM 2016,
Brussels, Belgium
- 2016-01-29: `restic - Backups mal
richtig <https://media.ccc.de/v/c4.openchaos.2016.01.restic>`__:
Public lecture in German at `CCC Cologne
e.V. <https://koeln.ccc.de>`__ in Cologne, Germany
- 2015-08-23: `A Solution to the Backup
Inconvenience <https://programm.froscon.de/2015/events/1515.html>`__:
Lecture at `FROSCON 2015 <https://www.froscon.de>`__ in Bonn, Germany
- 2015-02-01: `Lightning Talk at FOSDEM
2015 <https://www.youtube.com/watch?v=oM-MfeflUZ8&t=11m40s>`__: A
short introduction (with slightly outdated command line)
- 2015-01-27: `Talk about restic at CCC
Aachen <https://videoag.fsmpi.rwth-aachen.de/?view=player&lectureid=4442#content>`__
(in German)

View file

@ -1,36 +0,0 @@
Local Cache
===========
In order to speed up certain operations, restic manages a local cache of data.
This document describes the data structures for the local cache with version 1.
Versions
--------
The cache directory is selected according to the `XDG base dir specification
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`__.
Each repository has its own cache sub-directory, consting of the repository ID
which is chosen at ``init``. All cache directories for different repos are
independent of each other.
The cache dir for a repo contains a file named ``version``, which contains a
single ASCII integer line that stands for the current version of the cache. If
a lower version number is found the cache is recreated with the current
version. If a higher version number is found the cache is ignored and left as
is.
Snapshots, Data and Indexes
---------------------------
Snapshot, Data and Index files are cached in the sub-directories ``snapshots``,
``data`` and ``index``, as read from the repository.
Expiry
------
Whenever a cache directory for a repo is used, that directory's modification
timestamp is updated to the current time. By looking at the modification
timestamps of the repo cache directories it is easy to decide which directories
are old and haven't been used in a long time. Those are probably stale and can
be removed.

View file

@ -35,7 +35,7 @@ master_doc = 'index'
# General information about the project.
project = 'restic'
copyright = '2017, restic authors'
copyright = '2018, restic authors'
author = 'fd0'
# The version info for the project you're documenting, acts as replacement for

View file

@ -16,5 +16,6 @@ Restic Documentation
080_examples
090_participating
100_references
110_talks
faq
manual_rest

View file

@ -19,6 +19,7 @@ Usage help is available:
backup Create a new backup of files and/or directories
cat Print internal objects to stdout
check Check the repository for errors
diff Show differences between two snapshots
dump Print a backed-up file to stdout
find Find a file or directory
forget Remove snapshots from the repository
@ -39,24 +40,24 @@ Usage help is available:
version Print version information
Flags:
--cacert stringSlice path to load root certificates from (default: use system certificates)
--cache-dir string set the cache directory
-h, --help help for restic
--json set output mode to JSON for commands that support it
--limit-download int limits downloads to a maximum rate in KiB/s. (default: unlimited)
--limit-upload int limits uploads to a maximum rate in KiB/s. (default: unlimited)
--no-cache do not use a local cache
--no-lock do not lock the repo, this allows some operations on read-only repos
-o, --option key=value set extended option (key=value, can be specified multiple times)
-p, --password-file string read the repository password from a file (default: $RESTIC_PASSWORD_FILE)
-q, --quiet do not output comprehensive progress report
-r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY)
--cacert stringSlice path to load root certificates from (default: use system certificates)
--cache-dir string set the cache directory
--cleanup-cache auto remove old cache directories
-h, --help help for restic
--json set output mode to JSON for commands that support it
--limit-download int limits downloads to a maximum rate in KiB/s. (default: unlimited)
--limit-upload int limits uploads to a maximum rate in KiB/s. (default: unlimited)
--no-cache do not use a local cache
--no-lock do not lock the repo, this allows some operations on read-only repos
-o, --option key=value set extended option (key=value, can be specified multiple times)
-p, --password-file string read the repository password from a file (default: $RESTIC_PASSWORD_FILE)
-q, --quiet do not output comprehensive progress report
-r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY)
--tls-client-cert string path to a file containing PEM encoded TLS client certificate and private key
-v, --verbose count[=-1] be verbose (can be specified multiple times)
Use "restic [command] --help" for more information about a command.
Similar to programs such as ``git``, restic has a number of
sub-commands. You can see these commands in the listing above. Each
sub-command may have own command-line options, and there is a help
@ -87,21 +88,23 @@ command:
--stdin-filename string file name to use when reading from stdin (default "stdin")
--tag tag add a tag for the new snapshot (can be specified multiple times)
--time string time of the backup (ex. '2012-11-01 22:08:41') (default: now)
--with-atime store the atime for all files and directories
Global Flags:
--cacert stringSlice path to load root certificates from (default: use system certificates)
--cache-dir string set the cache directory
--json set output mode to JSON for commands that support it
--limit-download int limits downloads to a maximum rate in KiB/s. (default: unlimited)
--limit-upload int limits uploads to a maximum rate in KiB/s. (default: unlimited)
--no-cache do not use a local cache
--no-lock do not lock the repo, this allows some operations on read-only repos
-o, --option key=value set extended option (key=value, can be specified multiple times)
-p, --password-file string read the repository password from a file (default: $RESTIC_PASSWORD_FILE)
-q, --quiet do not output comprehensive progress report
-r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY)
--tls-client-cert string path to a TLS client certificate
--tls-client-key string path to a TLS client certificate key
--cacert stringSlice path to load root certificates from (default: use system certificates)
--cache-dir string set the cache directory
--cleanup-cache auto remove old cache directories
--json set output mode to JSON for commands that support it
--limit-download int limits downloads to a maximum rate in KiB/s. (default: unlimited)
--limit-upload int limits uploads to a maximum rate in KiB/s. (default: unlimited)
--no-cache do not use a local cache
--no-lock do not lock the repo, this allows some operations on read-only repos
-o, --option key=value set extended option (key=value, can be specified multiple times)
-p, --password-file string read the repository password from a file (default: $RESTIC_PASSWORD_FILE)
-q, --quiet do not output comprehensive progress report
-r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY)
--tls-client-cert string path to a file containing PEM encoded TLS client certificate and private key
-v, --verbose n[=-1] be verbose (specify --verbose multiple times or level n)
Subcommand that support showing progress information such as ``backup``,
``check`` and ``prune`` will do so unless the quiet flag ``-q`` or
@ -128,7 +131,7 @@ command does that:
.. code-block:: console
$ restic -r /tmp/backup tag --set NL --set CH 590c8fc8
$ restic -r /srv/restic-repo tag --set NL --set CH 590c8fc8
create exclusive lock for repository
modified tags on 1 snapshots
@ -141,19 +144,19 @@ So we can add and remove tags incrementally like this:
.. code-block:: console
$ restic -r /tmp/backup tag --tag NL --remove CH
$ restic -r /srv/restic-repo tag --tag NL --remove CH
create exclusive lock for repository
modified tags on 1 snapshots
$ restic -r /tmp/backup tag --tag NL --add UK
$ restic -r /srv/restic-repo tag --tag NL --add UK
create exclusive lock for repository
modified tags on 1 snapshots
$ restic -r /tmp/backup tag --tag NL --remove NL
$ restic -r /srv/restic-repo tag --tag NL --remove NL
create exclusive lock for repository
modified tags on 1 snapshots
$ restic -r /tmp/backup tag --tag NL --add SOMETHING
$ restic -r /srv/restic-repo tag --tag NL --add SOMETHING
no snapshots were modified
Under the hood
@ -170,7 +173,7 @@ locks with the following command:
.. code-block:: console
$ restic -r /tmp/backup list snapshots
$ restic -r /srv/restic-repo list snapshots
d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c
The ``find`` command searches for a given
@ -191,7 +194,7 @@ objects or their raw content.
.. code-block:: console
$ restic -r /tmp/backup cat snapshot d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c
$ restic -r /srv/restic-repo cat snapshot d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c
enter password for repository:
{
"time": "2015-08-12T12:52:44.091448856+02:00",
@ -242,7 +245,7 @@ lists all snapshots as JSON and uses ``jq`` to pretty-print the result:
.. code-block:: console
$ restic -r /tmp/backup snapshots --json | jq .
$ restic -r /srv/restic-repo snapshots --json | jq .
[
{
"time": "2017-03-11T09:57:43.26630619+01:00",
@ -283,7 +286,7 @@ instead of the default, set the environment variable like this:
.. code-block:: console
$ export TMPDIR=/var/tmp/restic-tmp
$ restic -r /tmp/backup backup ~/work
$ restic -r /srv/restic-repo backup ~/work

View file

@ -1,117 +0,0 @@
package archiver
import (
"context"
"io"
"time"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/errors"
"github.com/restic/chunker"
)
// Reader allows saving a stream of data to the repository.
type Reader struct {
restic.Repository
Tags []string
Hostname string
TimeStamp time.Time
}
// Archive reads data from the reader and saves it to the repo.
func (r *Reader) Archive(ctx context.Context, name string, rd io.Reader, p *restic.Progress) (*restic.Snapshot, restic.ID, error) {
if name == "" {
return nil, restic.ID{}, errors.New("no filename given")
}
debug.Log("start archiving %s", name)
sn, err := restic.NewSnapshot([]string{name}, r.Tags, r.Hostname, r.TimeStamp)
if err != nil {
return nil, restic.ID{}, err
}
p.Start()
defer p.Done()
repo := r.Repository
chnker := chunker.New(rd, repo.Config().ChunkerPolynomial)
ids := restic.IDs{}
var fileSize uint64
for {
chunk, err := chnker.Next(getBuf())
if errors.Cause(err) == io.EOF {
break
}
if err != nil {
return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()")
}
id := restic.Hash(chunk.Data)
if !repo.Index().Has(id, restic.DataBlob) {
_, err := repo.SaveBlob(ctx, restic.DataBlob, chunk.Data, id)
if err != nil {
return nil, restic.ID{}, err
}
debug.Log("saved blob %v (%d bytes)\n", id, chunk.Length)
} else {
debug.Log("blob %v already saved in the repo\n", id)
}
freeBuf(chunk.Data)
ids = append(ids, id)
p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
fileSize += uint64(chunk.Length)
}
tree := &restic.Tree{
Nodes: []*restic.Node{
{
Name: name,
AccessTime: time.Now(),
ModTime: time.Now(),
Type: "file",
Mode: 0644,
Size: fileSize,
UID: sn.UID,
GID: sn.GID,
User: sn.Username,
Content: ids,
},
},
}
treeID, err := repo.SaveTree(ctx, tree)
if err != nil {
return nil, restic.ID{}, err
}
sn.Tree = &treeID
debug.Log("tree saved as %v", treeID)
id, err := repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn)
if err != nil {
return nil, restic.ID{}, err
}
debug.Log("snapshot saved as %v", id)
err = repo.Flush(ctx)
if err != nil {
return nil, restic.ID{}, err
}
err = repo.SaveIndex(ctx)
if err != nil {
return nil, restic.ID{}, err
}
return sn, id, nil
}

View file

@ -1,206 +0,0 @@
package archiver
import (
"bytes"
"context"
"errors"
"io"
"math/rand"
"testing"
"github.com/restic/restic/internal/checker"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
)
func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) int {
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
if err != nil {
t.Fatalf("LoadBlob(%v) returned error %v", id, err)
}
return n
}
func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name string, rd io.Reader) {
tree, err := repo.LoadTree(context.TODO(), treeID)
if err != nil {
t.Fatalf("LoadTree() returned error %v", err)
}
if len(tree.Nodes) != 1 {
t.Fatalf("wrong number of nodes for tree, want %v, got %v", 1, len(tree.Nodes))
}
node := tree.Nodes[0]
if node.Name != "fakefile" {
t.Fatalf("wrong filename, want %v, got %v", "fakefile", node.Name)
}
if len(node.Content) == 0 {
t.Fatalf("node.Content has length 0")
}
// check blobs
for i, id := range node.Content {
size, found := repo.LookupBlobSize(id, restic.DataBlob)
if !found {
t.Fatal("Failed to find blob", id.Str())
}
buf := restic.NewBlobBuffer(int(size))
n := loadBlob(t, repo, id, buf)
if n != len(buf) {
t.Errorf("wrong number of bytes read, want %d, got %d", len(buf), n)
}
buf2 := make([]byte, int(size))
_, err := io.ReadFull(rd, buf2)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, buf2) {
t.Fatalf("blob %d (%v) is wrong", i, id.Str())
}
}
}
// fakeFile returns a reader which yields deterministic pseudo-random data.
func fakeFile(t testing.TB, seed, size int64) io.Reader {
return io.LimitReader(restic.NewRandReader(rand.New(rand.NewSource(seed))), size)
}
func TestArchiveReader(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
seed := rand.Int63()
size := int64(rand.Intn(50*1024*1024) + 50*1024*1024)
t.Logf("seed is 0x%016x, size is %v", seed, size)
f := fakeFile(t, seed, size)
r := &Reader{
Repository: repo,
Hostname: "localhost",
Tags: []string{"test"},
}
sn, id, err := r.Archive(context.TODO(), "fakefile", f, nil)
if err != nil {
t.Fatalf("ArchiveReader() returned error %v", err)
}
if id.IsNull() {
t.Fatalf("ArchiveReader() returned null ID")
}
t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str())
checkSavedFile(t, repo, *sn.Tree, "fakefile", fakeFile(t, seed, size))
checker.TestCheckRepo(t, repo)
}
func TestArchiveReaderNull(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
r := &Reader{
Repository: repo,
Hostname: "localhost",
Tags: []string{"test"},
}
sn, id, err := r.Archive(context.TODO(), "fakefile", bytes.NewReader(nil), nil)
if err != nil {
t.Fatalf("ArchiveReader() returned error %v", err)
}
if id.IsNull() {
t.Fatalf("ArchiveReader() returned null ID")
}
t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str())
checker.TestCheckRepo(t, repo)
}
type errReader string
func (e errReader) Read([]byte) (int, error) {
return 0, errors.New(string(e))
}
func countSnapshots(t testing.TB, repo restic.Repository) int {
snapshots := 0
err := repo.List(context.TODO(), restic.SnapshotFile, func(id restic.ID, size int64) error {
snapshots++
return nil
})
if err != nil {
t.Fatal(err)
}
return snapshots
}
func TestArchiveReaderError(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
r := &Reader{
Repository: repo,
Hostname: "localhost",
Tags: []string{"test"},
}
sn, id, err := r.Archive(context.TODO(), "fakefile", errReader("error returned by reading stdin"), nil)
if err == nil {
t.Errorf("expected error not returned")
}
if sn != nil {
t.Errorf("Snapshot should be nil, but isn't")
}
if !id.IsNull() {
t.Errorf("id should be null, but %v returned", id.Str())
}
n := countSnapshots(t, repo)
if n > 0 {
t.Errorf("expected zero snapshots, but got %d", n)
}
checker.TestCheckRepo(t, repo)
}
func BenchmarkArchiveReader(t *testing.B) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
const size = 50 * 1024 * 1024
buf := make([]byte, size)
_, err := io.ReadFull(fakeFile(t, 23, size), buf)
if err != nil {
t.Fatal(err)
}
r := &Reader{
Repository: repo,
Hostname: "localhost",
Tags: []string{"test"},
}
t.SetBytes(size)
t.ResetTimer()
for i := 0; i < t.N; i++ {
_, _, err := r.Archive(context.TODO(), "fakefile", bytes.NewReader(buf), nil)
if err != nil {
t.Fatal(err)
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,145 +0,0 @@
package archiver
import (
"context"
"os"
"testing"
"github.com/restic/restic/internal/pipe"
"github.com/restic/restic/internal/walk"
)
var treeJobs = []string{
"foo/baz/subdir",
"foo/baz",
"foo",
"quu/bar/file1",
"quu/bar/file2",
"quu/foo/file1",
"quu/foo/file2",
"quu/foo/file3",
"quu/foo",
"quu/fooz",
"quu",
"yy/a",
"yy/b",
"yy",
}
var pipeJobs = []string{
"foo/baz/subdir",
"foo/baz/subdir2", // subdir2 added
"foo/baz",
"foo",
"quu/bar/.file1.swp", // file with . added
"quu/bar/file1",
"quu/bar/file2",
"quu/foo/file1", // file2 removed
"quu/foo/file3",
"quu/foo",
"quu",
"quv/file1", // files added and removed
"quv/file2",
"quv",
"yy",
"zz/file1", // files removed and added at the end
"zz/file2",
"zz",
}
var resultJobs = []struct {
path string
action string
}{
{"foo/baz/subdir", "same, not a file"},
{"foo/baz/subdir2", "new, no old job"},
{"foo/baz", "same, not a file"},
{"foo", "same, not a file"},
{"quu/bar/.file1.swp", "new, no old job"},
{"quu/bar/file1", "same, not a file"},
{"quu/bar/file2", "same, not a file"},
{"quu/foo/file1", "same, not a file"},
{"quu/foo/file3", "same, not a file"},
{"quu/foo", "same, not a file"},
{"quu", "same, not a file"},
{"quv/file1", "new, no old job"},
{"quv/file2", "new, no old job"},
{"quv", "new, no old job"},
{"yy", "same, not a file"},
{"zz/file1", "testPipeJob"},
{"zz/file2", "testPipeJob"},
{"zz", "testPipeJob"},
}
type testPipeJob struct {
path string
err error
fi os.FileInfo
res chan<- pipe.Result
}
func (j testPipeJob) Path() string { return j.path }
func (j testPipeJob) Fullpath() string { return j.path }
func (j testPipeJob) Error() error { return j.err }
func (j testPipeJob) Info() os.FileInfo { return j.fi }
func (j testPipeJob) Result() chan<- pipe.Result { return j.res }
func testTreeWalker(ctx context.Context, out chan<- walk.TreeJob) {
for _, e := range treeJobs {
select {
case <-ctx.Done():
return
case out <- walk.TreeJob{Path: e}:
}
}
close(out)
}
func testPipeWalker(ctx context.Context, out chan<- pipe.Job) {
for _, e := range pipeJobs {
select {
case <-ctx.Done():
return
case out <- testPipeJob{path: e}:
}
}
close(out)
}
func TestArchivePipe(t *testing.T) {
ctx := context.TODO()
treeCh := make(chan walk.TreeJob)
pipeCh := make(chan pipe.Job)
go testTreeWalker(ctx, treeCh)
go testPipeWalker(ctx, pipeCh)
p := archivePipe{Old: treeCh, New: pipeCh}
ch := make(chan pipe.Job)
go p.compare(ctx, ch)
i := 0
for job := range ch {
if job.Path() != resultJobs[i].path {
t.Fatalf("wrong job received: wanted %v, got %v", resultJobs[i], job)
}
// switch j := job.(type) {
// case archivePipeJob:
// if j.action != resultJobs[i].action {
// t.Fatalf("wrong action for %v detected: wanted %q, got %q", job.Path(), resultJobs[i].action, j.action)
// }
// case testPipeJob:
// if resultJobs[i].action != "testPipeJob" {
// t.Fatalf("unexpected testPipeJob, expected %q: %v", resultJobs[i].action, j)
// }
// }
i++
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,158 @@
package archiver
import (
"context"
"sync"
"github.com/restic/restic/internal/restic"
)
// Saver allows saving a blob.
type Saver interface {
SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) (restic.ID, error)
Index() restic.Index
}
// BlobSaver concurrently saves incoming blobs to the repo.
type BlobSaver struct {
repo Saver
m sync.Mutex
knownBlobs restic.BlobSet
ch chan<- saveBlobJob
wg sync.WaitGroup
}
// NewBlobSaver returns a new blob. A worker pool is started, it is stopped
// when ctx is cancelled.
func NewBlobSaver(ctx context.Context, repo Saver, workers uint) *BlobSaver {
ch := make(chan saveBlobJob, 2*int(workers))
s := &BlobSaver{
repo: repo,
knownBlobs: restic.NewBlobSet(),
ch: ch,
}
for i := uint(0); i < workers; i++ {
s.wg.Add(1)
go s.worker(ctx, &s.wg, ch)
}
return s
}
// Save stores a blob in the repo. It checks the index and the known blobs
// before saving anything. The second return parameter is true if the blob was
// previously unknown.
func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf Buffer) FutureBlob {
ch := make(chan saveBlobResponse, 1)
s.ch <- saveBlobJob{BlobType: t, buf: buf, ch: ch}
return FutureBlob{ch: ch, length: len(buf.Data)}
}
// FutureBlob is returned by SaveBlob and will return the data once it has been processed.
type FutureBlob struct {
ch <-chan saveBlobResponse
length int
res saveBlobResponse
}
func (s *FutureBlob) wait() {
res, ok := <-s.ch
if ok {
s.res = res
}
}
// ID returns the ID of the blob after it has been saved.
func (s *FutureBlob) ID() restic.ID {
s.wait()
return s.res.id
}
// Known returns whether or not the blob was already known.
func (s *FutureBlob) Known() bool {
s.wait()
return s.res.known
}
// Err returns the error which may have occurred during save.
func (s *FutureBlob) Err() error {
s.wait()
return s.res.err
}
// Length returns the length of the blob.
func (s *FutureBlob) Length() int {
return s.length
}
type saveBlobJob struct {
restic.BlobType
buf Buffer
ch chan<- saveBlobResponse
}
type saveBlobResponse struct {
id restic.ID
known bool
err error
}
func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) saveBlobResponse {
id := restic.Hash(buf)
h := restic.BlobHandle{ID: id, Type: t}
// check if another goroutine has already saved this blob
known := false
s.m.Lock()
if s.knownBlobs.Has(h) {
known = true
} else {
s.knownBlobs.Insert(h)
known = false
}
s.m.Unlock()
// blob is already known, nothing to do
if known {
return saveBlobResponse{
id: id,
known: true,
}
}
// check if the repo knows this blob
if s.repo.Index().Has(id, t) {
return saveBlobResponse{
id: id,
known: true,
}
}
// otherwise we're responsible for saving it
_, err := s.repo.SaveBlob(ctx, t, buf, id)
return saveBlobResponse{
id: id,
known: false,
err: err,
}
}
func (s *BlobSaver) worker(ctx context.Context, wg *sync.WaitGroup, jobs <-chan saveBlobJob) {
defer wg.Done()
for {
var job saveBlobJob
select {
case <-ctx.Done():
return
case job = <-jobs:
}
job.ch <- s.saveBlob(ctx, job.BlobType, job.buf.Data)
close(job.ch)
job.buf.Release()
}
}

View file

@ -0,0 +1,90 @@
package archiver
import (
"context"
"sync"
)
// Buffer is a reusable buffer. After the buffer has been used, Release should
// be called so the underlying slice is put back into the pool.
type Buffer struct {
Data []byte
Put func([]byte)
}
// Release puts the buffer back into the pool it came from.
func (b Buffer) Release() {
if b.Put != nil {
b.Put(b.Data)
}
}
// BufferPool implements a limited set of reusable buffers.
type BufferPool struct {
ch chan []byte
chM sync.Mutex
defaultSize int
clearOnce sync.Once
}
// NewBufferPool initializes a new buffer pool. When the context is cancelled,
// all buffers are released. The pool stores at most max items. New buffers are
// created with defaultSize, buffers that are larger are released and not put
// back.
func NewBufferPool(ctx context.Context, max int, defaultSize int) *BufferPool {
b := &BufferPool{
ch: make(chan []byte, max),
defaultSize: defaultSize,
}
go func() {
<-ctx.Done()
b.clear()
}()
return b
}
// Get returns a new buffer, either from the pool or newly allocated.
func (pool *BufferPool) Get() Buffer {
b := Buffer{Put: pool.put}
pool.chM.Lock()
defer pool.chM.Unlock()
select {
case buf := <-pool.ch:
b.Data = buf
default:
b.Data = make([]byte, pool.defaultSize)
}
return b
}
func (pool *BufferPool) put(b []byte) {
pool.chM.Lock()
defer pool.chM.Unlock()
select {
case pool.ch <- b:
default:
}
}
// Put returns a buffer to the pool for reuse.
func (pool *BufferPool) Put(b Buffer) {
if cap(b.Data) > pool.defaultSize {
return
}
pool.put(b.Data)
}
// clear empties the buffer so that all items can be garbage collected.
func (pool *BufferPool) clear() {
pool.clearOnce.Do(func() {
ch := pool.ch
pool.chM.Lock()
pool.ch = nil
pool.chM.Unlock()
close(ch)
for range ch {
}
})
}

View file

@ -1,21 +0,0 @@
package archiver
import (
"sync"
"github.com/restic/chunker"
)
var bufPool = sync.Pool{
New: func() interface{} {
return make([]byte, chunker.MinSize)
},
}
func getBuf() []byte {
return bufPool.Get().([]byte)
}
func freeBuf(data []byte) {
bufPool.Put(data)
}

View file

@ -0,0 +1,228 @@
package archiver
import (
"context"
"io"
"os"
"sync"
"github.com/restic/chunker"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
)
// FutureFile is returned by SaveFile and will return the data once it
// has been processed.
type FutureFile struct {
ch <-chan saveFileResponse
res saveFileResponse
}
func (s *FutureFile) wait() {
res, ok := <-s.ch
if ok {
s.res = res
}
}
// Node returns the node once it is available.
func (s *FutureFile) Node() *restic.Node {
s.wait()
return s.res.node
}
// Stats returns the stats for the file once they are available.
func (s *FutureFile) Stats() ItemStats {
s.wait()
return s.res.stats
}
// Err returns the error in case an error occurred.
func (s *FutureFile) Err() error {
s.wait()
return s.res.err
}
// FileSaver concurrently saves incoming files to the repo.
type FileSaver struct {
fs fs.FS
blobSaver *BlobSaver
saveFilePool *BufferPool
pol chunker.Pol
ch chan<- saveFileJob
wg sync.WaitGroup
CompleteBlob func(filename string, bytes uint64)
NodeFromFileInfo func(filename string, fi os.FileInfo) (*restic.Node, error)
}
// NewFileSaver returns a new file saver. A worker pool with workers is
// started, it is stopped when ctx is cancelled.
func NewFileSaver(ctx context.Context, fs fs.FS, blobSaver *BlobSaver, pol chunker.Pol, workers uint) *FileSaver {
ch := make(chan saveFileJob, workers)
s := &FileSaver{
fs: fs,
blobSaver: blobSaver,
saveFilePool: NewBufferPool(ctx, 3*int(workers), chunker.MaxSize/4),
pol: pol,
ch: ch,
CompleteBlob: func(string, uint64) {},
}
for i := uint(0); i < workers; i++ {
s.wg.Add(1)
go s.worker(ctx, &s.wg, ch)
}
return s
}
// CompleteFunc is called when the file has been saved.
type CompleteFunc func(*restic.Node, ItemStats)
// Save stores the file f and returns the data once it has been completed. The
// file is closed by Save.
func (s *FileSaver) Save(ctx context.Context, snPath string, file fs.File, fi os.FileInfo, start func(), complete CompleteFunc) FutureFile {
ch := make(chan saveFileResponse, 1)
s.ch <- saveFileJob{
snPath: snPath,
file: file,
fi: fi,
start: start,
complete: complete,
ch: ch,
}
return FutureFile{ch: ch}
}
type saveFileJob struct {
snPath string
file fs.File
fi os.FileInfo
ch chan<- saveFileResponse
complete CompleteFunc
start func()
}
type saveFileResponse struct {
node *restic.Node
stats ItemStats
err error
}
// saveFile stores the file f in the repo, then closes it.
func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, f fs.File, fi os.FileInfo, start func()) saveFileResponse {
start()
stats := ItemStats{}
debug.Log("%v", snPath)
node, err := s.NodeFromFileInfo(f.Name(), fi)
if err != nil {
_ = f.Close()
return saveFileResponse{err: err}
}
if node.Type != "file" {
_ = f.Close()
return saveFileResponse{err: errors.Errorf("node type %q is wrong", node.Type)}
}
// reuse the chunker
chnker.Reset(f, s.pol)
var results []FutureBlob
node.Content = []restic.ID{}
var size uint64
for {
buf := s.saveFilePool.Get()
chunk, err := chnker.Next(buf.Data)
if errors.Cause(err) == io.EOF {
buf.Release()
break
}
buf.Data = chunk.Data
size += uint64(chunk.Length)
if err != nil {
_ = f.Close()
return saveFileResponse{err: err}
}
// test if the context has been cancelled, return the error
if ctx.Err() != nil {
_ = f.Close()
return saveFileResponse{err: ctx.Err()}
}
res := s.blobSaver.Save(ctx, restic.DataBlob, buf)
results = append(results, res)
// test if the context has been cancelled, return the error
if ctx.Err() != nil {
_ = f.Close()
return saveFileResponse{err: ctx.Err()}
}
s.CompleteBlob(f.Name(), uint64(len(chunk.Data)))
}
err = f.Close()
if err != nil {
return saveFileResponse{err: err}
}
for _, res := range results {
// test if the context has been cancelled, return the error
if res.Err() != nil {
return saveFileResponse{err: ctx.Err()}
}
if !res.Known() {
stats.DataBlobs++
stats.DataSize += uint64(res.Length())
}
node.Content = append(node.Content, res.ID())
}
node.Size = size
return saveFileResponse{
node: node,
stats: stats,
}
}
func (s *FileSaver) worker(ctx context.Context, wg *sync.WaitGroup, jobs <-chan saveFileJob) {
// a worker has one chunker which is reused for each file (because it contains a rather large buffer)
chnker := chunker.New(nil, s.pol)
defer wg.Done()
for {
var job saveFileJob
select {
case <-ctx.Done():
return
case job = <-jobs:
}
res := s.saveFile(ctx, chnker, job.snPath, job.file, job.fi, job.start)
if job.complete != nil {
job.complete(res.node, res.stats)
}
job.ch <- res
close(job.ch)
}
}

View file

@ -0,0 +1,53 @@
package archiver
import (
"context"
"time"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
)
// IndexUploader polls the repo for full indexes and uploads them.
type IndexUploader struct {
restic.Repository
// Start is called when an index is to be uploaded.
Start func()
// Complete is called when uploading an index has finished.
Complete func(id restic.ID)
}
// Upload periodically uploads full indexes to the repo. When shutdown is
// cancelled, the last index upload will finish and then Upload returns.
func (u IndexUploader) Upload(ctx, shutdown context.Context, interval time.Duration) error {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return nil
case <-shutdown.Done():
return nil
case <-ticker.C:
full := u.Repository.Index().(*repository.MasterIndex).FullIndexes()
for _, idx := range full {
if u.Start != nil {
u.Start()
}
id, err := repository.SaveIndex(ctx, u.Repository, idx)
if err != nil {
debug.Log("save indexes returned an error: %v", err)
return err
}
if u.Complete != nil {
u.Complete(id)
}
}
}
}
}

View file

@ -0,0 +1,112 @@
package archiver
import (
"context"
"os"
"path/filepath"
"github.com/restic/restic/internal/fs"
)
// Scanner traverses the targets and calls the function Result with cumulated
// stats concerning the files and folders found. Select is used to decide which
// items should be included. Error is called when an error occurs.
type Scanner struct {
FS fs.FS
Select SelectFunc
Error ErrorFunc
Result func(item string, s ScanStats)
}
// NewScanner initializes a new Scanner.
func NewScanner(fs fs.FS) *Scanner {
return &Scanner{
FS: fs,
Select: func(item string, fi os.FileInfo) bool {
return true
},
Error: func(item string, fi os.FileInfo, err error) error {
return err
},
Result: func(item string, s ScanStats) {},
}
}
// ScanStats collect statistics.
type ScanStats struct {
Files, Dirs, Others uint
Bytes uint64
}
// Scan traverses the targets. The function Result is called for each new item
// found, the complete result is also returned by Scan.
func (s *Scanner) Scan(ctx context.Context, targets []string) error {
var stats ScanStats
for _, target := range targets {
abstarget, err := s.FS.Abs(target)
if err != nil {
return err
}
stats, err = s.scan(ctx, stats, abstarget)
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
}
s.Result("", stats)
return nil
}
func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (ScanStats, error) {
if ctx.Err() != nil {
return stats, ctx.Err()
}
fi, err := s.FS.Lstat(target)
if err != nil {
// ignore error if the target is to be excluded anyway
if !s.Select(target, nil) {
return stats, nil
}
// else return filtered error
return stats, s.Error(target, fi, err)
}
if !s.Select(target, fi) {
return stats, nil
}
switch {
case fi.Mode().IsRegular():
stats.Files++
stats.Bytes += uint64(fi.Size())
case fi.Mode().IsDir():
if ctx.Err() != nil {
return stats, ctx.Err()
}
names, err := readdirnames(s.FS, target)
if err != nil {
return stats, s.Error(target, fi, err)
}
for _, name := range names {
stats, err = s.scan(ctx, stats, filepath.Join(target, name))
if err != nil {
return stats, err
}
}
stats.Dirs++
default:
stats.Others++
}
s.Result(target, stats)
return stats, nil
}

View file

@ -0,0 +1,333 @@
package archiver
import (
"context"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/restic/restic/internal/fs"
restictest "github.com/restic/restic/internal/test"
)
func TestScanner(t *testing.T) {
var tests = []struct {
name string
src TestDir
want map[string]ScanStats
selFn SelectFunc
}{
{
name: "include-all",
src: TestDir{
"other": TestFile{Content: "another file"},
"work": TestDir{
"foo": TestFile{Content: "foo"},
"foo.txt": TestFile{Content: "foo text file"},
"subdir": TestDir{
"other": TestFile{Content: "other in subdir"},
"bar.txt": TestFile{Content: "bar.txt in subdir"},
},
},
},
want: map[string]ScanStats{
filepath.FromSlash("other"): ScanStats{Files: 1, Bytes: 12},
filepath.FromSlash("work/foo"): ScanStats{Files: 2, Bytes: 15},
filepath.FromSlash("work/foo.txt"): ScanStats{Files: 3, Bytes: 28},
filepath.FromSlash("work/subdir/bar.txt"): ScanStats{Files: 4, Bytes: 45},
filepath.FromSlash("work/subdir/other"): ScanStats{Files: 5, Bytes: 60},
filepath.FromSlash("work/subdir"): ScanStats{Files: 5, Dirs: 1, Bytes: 60},
filepath.FromSlash("work"): ScanStats{Files: 5, Dirs: 2, Bytes: 60},
filepath.FromSlash("."): ScanStats{Files: 5, Dirs: 3, Bytes: 60},
filepath.FromSlash(""): ScanStats{Files: 5, Dirs: 3, Bytes: 60},
},
},
{
name: "select-txt",
src: TestDir{
"other": TestFile{Content: "another file"},
"work": TestDir{
"foo": TestFile{Content: "foo"},
"foo.txt": TestFile{Content: "foo text file"},
"subdir": TestDir{
"other": TestFile{Content: "other in subdir"},
"bar.txt": TestFile{Content: "bar.txt in subdir"},
},
},
},
selFn: func(item string, fi os.FileInfo) bool {
if fi.IsDir() {
return true
}
if filepath.Ext(item) == ".txt" {
return true
}
return false
},
want: map[string]ScanStats{
filepath.FromSlash("work/foo.txt"): ScanStats{Files: 1, Bytes: 13},
filepath.FromSlash("work/subdir/bar.txt"): ScanStats{Files: 2, Bytes: 30},
filepath.FromSlash("work/subdir"): ScanStats{Files: 2, Dirs: 1, Bytes: 30},
filepath.FromSlash("work"): ScanStats{Files: 2, Dirs: 2, Bytes: 30},
filepath.FromSlash("."): ScanStats{Files: 2, Dirs: 3, Bytes: 30},
filepath.FromSlash(""): ScanStats{Files: 2, Dirs: 3, Bytes: 30},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempdir, cleanup := restictest.TempDir(t)
defer cleanup()
TestCreateFiles(t, tempdir, test.src)
back := fs.TestChdir(t, tempdir)
defer back()
cur, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
sc := NewScanner(fs.Track{fs.Local{}})
if test.selFn != nil {
sc.Select = test.selFn
}
results := make(map[string]ScanStats)
sc.Result = func(item string, s ScanStats) {
var p string
var err error
if item != "" {
p, err = filepath.Rel(cur, item)
if err != nil {
panic(err)
}
}
results[p] = s
}
err = sc.Scan(ctx, []string{"."})
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(test.want, results) {
t.Error(cmp.Diff(test.want, results))
}
})
}
}
func TestScannerError(t *testing.T) {
var tests = []struct {
name string
unix bool
src TestDir
result ScanStats
selFn SelectFunc
errFn func(t testing.TB, item string, fi os.FileInfo, err error) error
resFn func(t testing.TB, item string, s ScanStats)
prepare func(t testing.TB)
}{
{
name: "no-error",
src: TestDir{
"other": TestFile{Content: "another file"},
"work": TestDir{
"foo": TestFile{Content: "foo"},
"foo.txt": TestFile{Content: "foo text file"},
"subdir": TestDir{
"other": TestFile{Content: "other in subdir"},
"bar.txt": TestFile{Content: "bar.txt in subdir"},
},
},
},
result: ScanStats{Files: 5, Dirs: 3, Bytes: 60},
},
{
name: "unreadable-dir",
unix: true,
src: TestDir{
"other": TestFile{Content: "another file"},
"work": TestDir{
"foo": TestFile{Content: "foo"},
"foo.txt": TestFile{Content: "foo text file"},
"subdir": TestDir{
"other": TestFile{Content: "other in subdir"},
"bar.txt": TestFile{Content: "bar.txt in subdir"},
},
},
},
result: ScanStats{Files: 3, Dirs: 2, Bytes: 28},
prepare: func(t testing.TB) {
err := os.Chmod(filepath.Join("work", "subdir"), 0000)
if err != nil {
t.Fatal(err)
}
},
errFn: func(t testing.TB, item string, fi os.FileInfo, err error) error {
if item == filepath.FromSlash("work/subdir") {
return nil
}
return err
},
},
{
name: "removed-item",
src: TestDir{
"bar": TestFile{Content: "bar"},
"baz": TestFile{Content: "baz"},
"foo": TestFile{Content: "foo"},
"other": TestFile{Content: "other"},
},
result: ScanStats{Files: 3, Dirs: 1, Bytes: 11},
resFn: func(t testing.TB, item string, s ScanStats) {
if item == "bar" {
err := os.Remove("foo")
if err != nil {
t.Fatal(err)
}
}
},
errFn: func(t testing.TB, item string, fi os.FileInfo, err error) error {
if item == "foo" {
t.Logf("ignoring error for %v: %v", item, err)
return nil
}
return err
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if test.unix && runtime.GOOS == "windows" {
t.Skipf("skip on windows")
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempdir, cleanup := restictest.TempDir(t)
defer cleanup()
TestCreateFiles(t, tempdir, test.src)
back := fs.TestChdir(t, tempdir)
defer back()
cur, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if test.prepare != nil {
test.prepare(t)
}
sc := NewScanner(fs.Track{fs.Local{}})
if test.selFn != nil {
sc.Select = test.selFn
}
var stats ScanStats
sc.Result = func(item string, s ScanStats) {
if item == "" {
stats = s
return
}
if test.resFn != nil {
p, relErr := filepath.Rel(cur, item)
if relErr != nil {
panic(relErr)
}
test.resFn(t, p, s)
}
}
if test.errFn != nil {
sc.Error = func(item string, fi os.FileInfo, err error) error {
p, relErr := filepath.Rel(cur, item)
if relErr != nil {
panic(relErr)
}
return test.errFn(t, p, fi, err)
}
}
err = sc.Scan(ctx, []string{"."})
if err != nil {
t.Fatal(err)
}
if stats != test.result {
t.Errorf("wrong final result, want\n %#v\ngot:\n %#v", test.result, stats)
}
})
}
}
func TestScannerCancel(t *testing.T) {
src := TestDir{
"bar": TestFile{Content: "bar"},
"baz": TestFile{Content: "baz"},
"foo": TestFile{Content: "foo"},
"other": TestFile{Content: "other"},
}
result := ScanStats{Files: 2, Bytes: 6}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempdir, cleanup := restictest.TempDir(t)
defer cleanup()
TestCreateFiles(t, tempdir, src)
back := fs.TestChdir(t, tempdir)
defer back()
cur, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
sc := NewScanner(fs.Track{fs.Local{}})
var lastStats ScanStats
sc.Result = func(item string, s ScanStats) {
lastStats = s
if item == filepath.Join(cur, "baz") {
t.Logf("found baz")
cancel()
}
}
err = sc.Scan(ctx, []string{"."})
if err == nil {
t.Errorf("did not find expected error")
}
if err != context.Canceled {
t.Errorf("unexpected error found, want %v, got %v", context.Canceled, err)
}
if lastStats != result {
t.Errorf("wrong final result, want\n %#v\ngot:\n %#v", result, lastStats)
}
}

View file

@ -2,18 +2,342 @@ package archiver
import (
"context"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
// TestSnapshot creates a new snapshot of path.
func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot {
arch := New(repo)
sn, _, err := arch.Snapshot(context.TODO(), nil, []string{path}, []string{"test"}, "localhost", parent, time.Now())
arch := New(repo, fs.Local{}, Options{})
opts := SnapshotOptions{
Time: time.Now(),
Hostname: "localhost",
Tags: []string{"test"},
}
if parent != nil {
opts.ParentSnapshot = *parent
}
sn, _, err := arch.Snapshot(context.TODO(), []string{path}, opts)
if err != nil {
t.Fatal(err)
}
return sn
}
// TestDir describes a directory structure to create for a test.
type TestDir map[string]interface{}
func (d TestDir) String() string {
return "<Dir>"
}
// TestFile describes a file created for a test.
type TestFile struct {
Content string
}
func (f TestFile) String() string {
return "<File>"
}
// TestSymlink describes a symlink created for a test.
type TestSymlink struct {
Target string
}
func (s TestSymlink) String() string {
return "<Symlink>"
}
// TestCreateFiles creates a directory structure described by dir at target,
// which must already exist. On Windows, symlinks aren't created.
func TestCreateFiles(t testing.TB, target string, dir TestDir) {
test.Helper(t).Helper()
for name, item := range dir {
targetPath := filepath.Join(target, name)
switch it := item.(type) {
case TestFile:
err := ioutil.WriteFile(targetPath, []byte(it.Content), 0644)
if err != nil {
t.Fatal(err)
}
case TestSymlink:
if runtime.GOOS == "windows" {
continue
}
err := fs.Symlink(filepath.FromSlash(it.Target), targetPath)
if err != nil {
t.Fatal(err)
}
case TestDir:
err := fs.Mkdir(targetPath, 0755)
if err != nil {
t.Fatal(err)
}
TestCreateFiles(t, targetPath, it)
}
}
}
// TestWalkFunc is used by TestWalkFiles to traverse the dir. When an error is
// returned, traversal stops and the surrounding test is marked as failed.
type TestWalkFunc func(path string, item interface{}) error
// TestWalkFiles runs fn for each file/directory in dir, the filename will be
// constructed with target as the prefix. Symlinks on Windows are ignored.
func TestWalkFiles(t testing.TB, target string, dir TestDir, fn TestWalkFunc) {
test.Helper(t).Helper()
for name, item := range dir {
targetPath := filepath.Join(target, name)
err := fn(targetPath, item)
if err != nil {
t.Fatalf("TestWalkFunc returned error for %v: %v", targetPath, err)
return
}
if dir, ok := item.(TestDir); ok {
TestWalkFiles(t, targetPath, dir, fn)
}
}
}
// fixpath removes UNC paths (starting with `\\?`) on windows. On Linux, it's a noop.
func fixpath(item string) string {
if runtime.GOOS != "windows" {
return item
}
if strings.HasPrefix(item, `\\?`) {
return item[4:]
}
return item
}
// TestEnsureFiles tests if the directory structure at target is the same as
// described in dir.
func TestEnsureFiles(t testing.TB, target string, dir TestDir) {
test.Helper(t).Helper()
pathsChecked := make(map[string]struct{})
// first, test that all items are there
TestWalkFiles(t, target, dir, func(path string, item interface{}) error {
// ignore symlinks on Windows
if _, ok := item.(TestSymlink); ok && runtime.GOOS == "windows" {
// mark paths and parents as checked
pathsChecked[path] = struct{}{}
for parent := filepath.Dir(path); parent != target; parent = filepath.Dir(parent) {
pathsChecked[parent] = struct{}{}
}
return nil
}
fi, err := fs.Lstat(path)
if err != nil {
return err
}
switch node := item.(type) {
case TestDir:
if !fi.IsDir() {
t.Errorf("is not a directory: %v", path)
}
return nil
case TestFile:
if !fs.IsRegularFile(fi) {
t.Errorf("is not a regular file: %v", path)
return nil
}
content, err := ioutil.ReadFile(path)
if err != nil {
return err
}
if string(content) != node.Content {
t.Errorf("wrong content for %v, want %q, got %q", path, node.Content, content)
}
case TestSymlink:
if fi.Mode()&os.ModeType != os.ModeSymlink {
t.Errorf("is not a symlink: %v", path)
return nil
}
target, err := fs.Readlink(path)
if err != nil {
return err
}
if target != node.Target {
t.Errorf("wrong target for %v, want %v, got %v", path, node.Target, target)
}
}
pathsChecked[path] = struct{}{}
for parent := filepath.Dir(path); parent != target; parent = filepath.Dir(parent) {
pathsChecked[parent] = struct{}{}
}
return nil
})
// then, traverse the directory again, looking for additional files
err := fs.Walk(target, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
path = fixpath(path)
if path == target {
return nil
}
_, ok := pathsChecked[path]
if !ok {
t.Errorf("additional item found: %v %v", path, fi.Mode())
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
// TestEnsureFileContent checks if the file in the repo is the same as file.
func TestEnsureFileContent(ctx context.Context, t testing.TB, repo restic.Repository, filename string, node *restic.Node, file TestFile) {
if int(node.Size) != len(file.Content) {
t.Fatalf("%v: wrong node size: want %d, got %d", filename, node.Size, len(file.Content))
return
}
content := make([]byte, restic.CiphertextLength(len(file.Content)))
pos := 0
for _, id := range node.Content {
n, err := repo.LoadBlob(ctx, restic.DataBlob, id, content[pos:])
if err != nil {
t.Fatalf("error loading blob %v: %v", id.Str(), err)
return
}
pos += n
}
content = content[:pos]
if string(content) != file.Content {
t.Fatalf("%v: wrong content returned, want %q, got %q", filename, file.Content, content)
}
}
// TestEnsureTree checks that the tree ID in the repo matches dir. On Windows,
// Symlinks are ignored.
func TestEnsureTree(ctx context.Context, t testing.TB, prefix string, repo restic.Repository, treeID restic.ID, dir TestDir) {
test.Helper(t).Helper()
tree, err := repo.LoadTree(ctx, treeID)
if err != nil {
t.Fatal(err)
return
}
var nodeNames []string
for _, node := range tree.Nodes {
nodeNames = append(nodeNames, node.Name)
}
debug.Log("%v (%v) %v", prefix, treeID.Str(), nodeNames)
checked := make(map[string]struct{})
for _, node := range tree.Nodes {
nodePrefix := path.Join(prefix, node.Name)
entry, ok := dir[node.Name]
if !ok {
t.Errorf("unexpected tree node %q found, want: %#v", node.Name, dir)
return
}
checked[node.Name] = struct{}{}
switch e := entry.(type) {
case TestDir:
if node.Type != "dir" {
t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "dir")
return
}
if node.Subtree == nil {
t.Errorf("tree node %v has nil subtree", nodePrefix)
return
}
TestEnsureTree(ctx, t, path.Join(prefix, node.Name), repo, *node.Subtree, e)
case TestFile:
if node.Type != "file" {
t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file")
}
TestEnsureFileContent(ctx, t, repo, nodePrefix, node, e)
case TestSymlink:
// skip symlinks on windows
if runtime.GOOS == "windows" {
continue
}
if node.Type != "symlink" {
t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file")
}
if e.Target != node.LinkTarget {
t.Errorf("symlink %v has wrong target, want %q, got %q", nodePrefix, e.Target, node.LinkTarget)
}
}
}
for name := range dir {
// skip checking symlinks on Windows
entry := dir[name]
if _, ok := entry.(TestSymlink); ok && runtime.GOOS == "windows" {
continue
}
_, ok := checked[name]
if !ok {
t.Errorf("tree %v: expected node %q not found, has: %v", prefix, name, nodeNames)
}
}
}
// TestEnsureSnapshot tests if the snapshot in the repo has exactly the same
// structure as dir. On Windows, Symlinks are ignored.
func TestEnsureSnapshot(t testing.TB, repo restic.Repository, snapshotID restic.ID, dir TestDir) {
test.Helper(t).Helper()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sn, err := restic.LoadSnapshot(ctx, repo, snapshotID)
if err != nil {
t.Fatal(err)
return
}
if sn.Tree == nil {
t.Fatal("snapshot has nil tree ID")
return
}
TestEnsureTree(ctx, t, "/", repo, *sn.Tree, dir)
}

View file

@ -0,0 +1,530 @@
package archiver
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository"
restictest "github.com/restic/restic/internal/test"
)
// MockT passes through all logging functions from T, but catches Fail(),
// Error/f() and Fatal/f(). It is used to test test helper functions.
type MockT struct {
*testing.T
HasFailed bool
}
// Fail marks the function as having failed but continues execution.
func (t *MockT) Fail() {
t.T.Log("MockT Fail() called")
t.HasFailed = true
}
// Fatal is equivalent to Log followed by FailNow.
func (t *MockT) Fatal(args ...interface{}) {
t.T.Logf("MockT Fatal called with %v", args)
t.HasFailed = true
}
// Fatalf is equivalent to Logf followed by FailNow.
func (t *MockT) Fatalf(msg string, args ...interface{}) {
t.T.Logf("MockT Fatal called: "+msg, args...)
t.HasFailed = true
}
// Error is equivalent to Log followed by Fail.
func (t *MockT) Error(args ...interface{}) {
t.T.Logf("MockT Error called with %v", args)
t.HasFailed = true
}
// Errorf is equivalent to Logf followed by Fail.
func (t *MockT) Errorf(msg string, args ...interface{}) {
t.T.Logf("MockT Error called: "+msg, args...)
t.HasFailed = true
}
func createFilesAt(t testing.TB, targetdir string, files map[string]interface{}) {
for name, item := range files {
target := filepath.Join(targetdir, filepath.FromSlash(name))
err := fs.MkdirAll(filepath.Dir(target), 0700)
if err != nil {
t.Fatal(err)
}
switch it := item.(type) {
case TestFile:
err := ioutil.WriteFile(target, []byte(it.Content), 0600)
if err != nil {
t.Fatal(err)
}
case TestSymlink:
// ignore symlinks on windows
if runtime.GOOS == "windows" {
continue
}
err := fs.Symlink(filepath.FromSlash(it.Target), target)
if err != nil {
t.Fatal(err)
}
}
}
}
func TestTestCreateFiles(t *testing.T) {
var tests = []struct {
dir TestDir
files map[string]interface{}
}{
{
dir: TestDir{
"foo": TestFile{Content: "foo"},
"subdir": TestDir{
"subfile": TestFile{Content: "bar"},
},
"sub": TestDir{
"subsub": TestDir{
"link": TestSymlink{Target: "x/y/z"},
},
},
},
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
"subdir": TestDir{},
"subdir/subfile": TestFile{Content: "bar"},
"sub/subsub/link": TestSymlink{Target: "x/y/z"},
},
},
}
for i, test := range tests {
tempdir, cleanup := restictest.TempDir(t)
defer cleanup()
t.Run("", func(t *testing.T) {
tempdir := filepath.Join(tempdir, fmt.Sprintf("test-%d", i))
err := fs.MkdirAll(tempdir, 0700)
if err != nil {
t.Fatal(err)
}
TestCreateFiles(t, tempdir, test.dir)
for name, item := range test.files {
// don't check symlinks on windows
if runtime.GOOS == "windows" {
if _, ok := item.(TestSymlink); ok {
continue
}
continue
}
targetPath := filepath.Join(tempdir, filepath.FromSlash(name))
fi, err := fs.Lstat(targetPath)
if err != nil {
t.Error(err)
continue
}
switch node := item.(type) {
case TestFile:
if !fs.IsRegularFile(fi) {
t.Errorf("is not regular file: %v", name)
continue
}
content, err := ioutil.ReadFile(targetPath)
if err != nil {
t.Error(err)
continue
}
if string(content) != node.Content {
t.Errorf("wrong content for %v: want %q, got %q", name, node.Content, content)
}
case TestSymlink:
if fi.Mode()&os.ModeType != os.ModeSymlink {
t.Errorf("is not symlink: %v, %o != %o", name, fi.Mode(), os.ModeSymlink)
continue
}
target, err := fs.Readlink(targetPath)
if err != nil {
t.Error(err)
continue
}
if target != node.Target {
t.Errorf("wrong target for %v: want %q, got %q", name, node.Target, target)
}
case TestDir:
if !fi.IsDir() {
t.Errorf("is not directory: %v", name)
}
}
}
})
}
}
func TestTestWalkFiles(t *testing.T) {
var tests = []struct {
dir TestDir
want map[string]string
}{
{
dir: TestDir{
"foo": TestFile{Content: "foo"},
"subdir": TestDir{
"subfile": TestFile{Content: "bar"},
},
"x": TestDir{
"y": TestDir{
"link": TestSymlink{Target: filepath.FromSlash("../../foo")},
},
},
},
want: map[string]string{
"foo": "<File>",
"subdir": "<Dir>",
filepath.FromSlash("subdir/subfile"): "<File>",
"x": "<Dir>",
filepath.FromSlash("x/y"): "<Dir>",
filepath.FromSlash("x/y/link"): "<Symlink>",
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
tempdir, cleanup := restictest.TempDir(t)
defer cleanup()
got := make(map[string]string)
TestCreateFiles(t, tempdir, test.dir)
TestWalkFiles(t, tempdir, test.dir, func(path string, item interface{}) error {
p, err := filepath.Rel(tempdir, path)
if err != nil {
return err
}
got[p] = fmt.Sprintf("%v", item)
return nil
})
if !cmp.Equal(test.want, got) {
t.Error(cmp.Diff(test.want, got))
}
})
}
}
func TestTestEnsureFiles(t *testing.T) {
var tests = []struct {
expectFailure bool
files map[string]interface{}
want TestDir
unixOnly bool
}{
{
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
"subdir/subfile": TestFile{Content: "bar"},
"x/y/link": TestSymlink{Target: "../../foo"},
},
want: TestDir{
"foo": TestFile{Content: "foo"},
"subdir": TestDir{
"subfile": TestFile{Content: "bar"},
},
"x": TestDir{
"y": TestDir{
"link": TestSymlink{Target: "../../foo"},
},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"foo": TestFile{Content: "foo"},
"subdir": TestDir{
"subfile": TestFile{Content: "bar"},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
"subdir/subfile": TestFile{Content: "bar"},
},
want: TestDir{
"foo": TestFile{Content: "foo"},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "xxx"},
},
want: TestDir{
"foo": TestFile{Content: "foo"},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestSymlink{Target: "/xxx"},
},
want: TestDir{
"foo": TestFile{Content: "foo"},
},
},
{
expectFailure: true,
unixOnly: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"foo": TestSymlink{Target: "/xxx"},
},
},
{
expectFailure: true,
unixOnly: true,
files: map[string]interface{}{
"foo": TestSymlink{Target: "xxx"},
},
want: TestDir{
"foo": TestSymlink{Target: "/yyy"},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestDir{
"foo": TestFile{Content: "foo"},
},
},
want: TestDir{
"foo": TestFile{Content: "foo"},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"foo": TestDir{
"foo": TestFile{Content: "foo"},
},
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
if test.unixOnly && runtime.GOOS == "windows" {
t.Skip("skip on Windows")
return
}
tempdir, cleanup := restictest.TempDir(t)
defer cleanup()
createFilesAt(t, tempdir, test.files)
subtestT := testing.TB(t)
if test.expectFailure {
subtestT = &MockT{T: t}
}
TestEnsureFiles(subtestT, tempdir, test.want)
if test.expectFailure && !subtestT.(*MockT).HasFailed {
t.Fatal("expected failure of TestEnsureFiles not found")
}
})
}
}
func TestTestEnsureSnapshot(t *testing.T) {
var tests = []struct {
expectFailure bool
files map[string]interface{}
want TestDir
unixOnly bool
}{
{
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
filepath.FromSlash("subdir/subfile"): TestFile{Content: "bar"},
filepath.FromSlash("x/y/link"): TestSymlink{Target: filepath.FromSlash("../../foo")},
},
want: TestDir{
"target": TestDir{
"foo": TestFile{Content: "foo"},
"subdir": TestDir{
"subfile": TestFile{Content: "bar"},
},
"x": TestDir{
"y": TestDir{
"link": TestSymlink{Target: filepath.FromSlash("../../foo")},
},
},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"target": TestDir{
"bar": TestFile{Content: "foo"},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
"bar": TestFile{Content: "bar"},
},
want: TestDir{
"target": TestDir{
"foo": TestFile{Content: "foo"},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"target": TestDir{
"foo": TestFile{Content: "foo"},
"bar": TestFile{Content: "bar"},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"target": TestDir{
"foo": TestDir{
"foo": TestFile{Content: "foo"},
},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestSymlink{Target: filepath.FromSlash("x/y/z")},
},
want: TestDir{
"target": TestDir{
"foo": TestFile{Content: "foo"},
},
},
},
{
expectFailure: true,
unixOnly: true,
files: map[string]interface{}{
"foo": TestSymlink{Target: filepath.FromSlash("x/y/z")},
},
want: TestDir{
"target": TestDir{
"foo": TestSymlink{Target: filepath.FromSlash("x/y/z2")},
},
},
},
{
expectFailure: true,
files: map[string]interface{}{
"foo": TestFile{Content: "foo"},
},
want: TestDir{
"target": TestDir{
"foo": TestFile{Content: "xxx"},
},
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
if test.unixOnly && runtime.GOOS == "windows" {
t.Skip("skip on Windows")
return
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempdir, cleanup := restictest.TempDir(t)
defer cleanup()
targetDir := filepath.Join(tempdir, "target")
err := fs.Mkdir(targetDir, 0700)
if err != nil {
t.Fatal(err)
}
createFilesAt(t, targetDir, test.files)
back := fs.TestChdir(t, tempdir)
defer back()
repo, cleanup := repository.TestRepository(t)
defer cleanup()
arch := New(repo, fs.Local{}, Options{})
opts := SnapshotOptions{
Time: time.Now(),
Hostname: "localhost",
Tags: []string{"test"},
}
_, id, err := arch.Snapshot(ctx, []string{"."}, opts)
if err != nil {
t.Fatal(err)
}
t.Logf("snapshot saved as %v", id.Str())
subtestT := testing.TB(t)
if test.expectFailure {
subtestT = &MockT{T: t}
}
TestEnsureSnapshot(subtestT, repo, id, test.want)
if test.expectFailure && !subtestT.(*MockT).HasFailed {
t.Fatal("expected failure of TestEnsureSnapshot not found")
}
})
}
}

254
internal/archiver/tree.go Normal file
View file

@ -0,0 +1,254 @@
package archiver
import (
"fmt"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
)
// Tree recursively defines how a snapshot should look like when
// archived.
//
// When `Path` is set, this is a leaf node and the contents of `Path` should be
// inserted at this point in the tree.
//
// The attribute `Root` is used to distinguish between files/dirs which have
// the same name, but live in a separate directory on the local file system.
//
// `FileInfoPath` is used to extract metadata for intermediate (=non-leaf)
// trees.
type Tree struct {
Nodes map[string]Tree
Path string // where the files/dirs to be saved are found
FileInfoPath string // where the dir can be found that is not included itself, but its subdirs
Root string // parent directory of the tree
}
// pathComponents returns all path components of p. If a virtual directory
// (volume name on Windows) is added, virtualPrefix is set to true. See the
// tests for examples.
func pathComponents(fs fs.FS, p string, includeRelative bool) (components []string, virtualPrefix bool) {
volume := fs.VolumeName(p)
if !fs.IsAbs(p) {
if !includeRelative {
p = fs.Join(fs.Separator(), p)
}
}
p = fs.Clean(p)
for {
dir, file := fs.Dir(p), fs.Base(p)
if p == dir {
break
}
components = append(components, file)
p = dir
}
// reverse components
for i := len(components)/2 - 1; i >= 0; i-- {
opp := len(components) - 1 - i
components[i], components[opp] = components[opp], components[i]
}
if volume != "" {
// strip colon
if len(volume) == 2 && volume[1] == ':' {
volume = volume[:1]
}
components = append([]string{volume}, components...)
virtualPrefix = true
}
return components, virtualPrefix
}
// rootDirectory returns the directory which contains the first element of target.
func rootDirectory(fs fs.FS, target string) string {
if target == "" {
return ""
}
if fs.IsAbs(target) {
return fs.Join(fs.VolumeName(target), fs.Separator())
}
target = fs.Clean(target)
pc, _ := pathComponents(fs, target, true)
rel := "."
for _, c := range pc {
if c == ".." {
rel = fs.Join(rel, c)
}
}
return rel
}
// Add adds a new file or directory to the tree.
func (t *Tree) Add(fs fs.FS, path string) error {
if path == "" {
panic("invalid path (empty string)")
}
if t.Nodes == nil {
t.Nodes = make(map[string]Tree)
}
pc, virtualPrefix := pathComponents(fs, path, false)
if len(pc) == 0 {
return errors.New("invalid path (no path components)")
}
name := pc[0]
root := rootDirectory(fs, path)
tree := Tree{Root: root}
origName := name
i := 0
for {
other, ok := t.Nodes[name]
if !ok {
break
}
i++
if other.Root == root {
tree = other
break
}
// resolve conflict and try again
name = fmt.Sprintf("%s-%d", origName, i)
continue
}
if len(pc) > 1 {
subroot := fs.Join(root, origName)
if virtualPrefix {
// use the original root dir if this is a virtual directory (volume name on Windows)
subroot = root
}
err := tree.add(fs, path, subroot, pc[1:])
if err != nil {
return err
}
tree.FileInfoPath = subroot
} else {
tree.Path = path
}
t.Nodes[name] = tree
return nil
}
// add adds a new target path into the tree.
func (t *Tree) add(fs fs.FS, target, root string, pc []string) error {
if len(pc) == 0 {
return errors.Errorf("invalid path %q", target)
}
if t.Nodes == nil {
t.Nodes = make(map[string]Tree)
}
name := pc[0]
if len(pc) == 1 {
tree, ok := t.Nodes[name]
if !ok {
t.Nodes[name] = Tree{Path: target}
return nil
}
if tree.Path != "" {
return errors.Errorf("path is already set for target %v", target)
}
tree.Path = target
t.Nodes[name] = tree
return nil
}
tree := Tree{}
if other, ok := t.Nodes[name]; ok {
tree = other
}
subroot := fs.Join(root, name)
tree.FileInfoPath = subroot
err := tree.add(fs, target, subroot, pc[1:])
if err != nil {
return err
}
t.Nodes[name] = tree
return nil
}
func (t Tree) String() string {
return formatTree(t, "")
}
// formatTree returns a text representation of the tree t.
func formatTree(t Tree, indent string) (s string) {
for name, node := range t.Nodes {
if node.Path != "" {
s += fmt.Sprintf("%v/%v, src %q\n", indent, name, node.Path)
continue
}
s += fmt.Sprintf("%v/%v, root %q, meta %q\n", indent, name, node.Root, node.FileInfoPath)
s += formatTree(node, indent+" ")
}
return s
}
// prune removes sub-trees of leaf nodes.
func prune(t *Tree) {
// if the current tree is a leaf node (Path is set), remove all nodes,
// those are automatically included anyway.
if t.Path != "" && len(t.Nodes) > 0 {
t.FileInfoPath = ""
t.Nodes = nil
return
}
for i, subtree := range t.Nodes {
prune(&subtree)
t.Nodes[i] = subtree
}
}
// NewTree creates a Tree from the target files/directories.
func NewTree(fs fs.FS, targets []string) (*Tree, error) {
debug.Log("targets: %v", targets)
tree := &Tree{}
seen := make(map[string]struct{})
for _, target := range targets {
target = fs.Clean(target)
// skip duplicate targets
if _, ok := seen[target]; ok {
continue
}
seen[target] = struct{}{}
err := tree.Add(fs, target)
if err != nil {
return nil, err
}
}
prune(tree)
debug.Log("result:\n%v", tree)
return tree, nil
}

View file

@ -0,0 +1,341 @@
package archiver
import (
"path/filepath"
"runtime"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/restic/restic/internal/fs"
)
func TestPathComponents(t *testing.T) {
var tests = []struct {
p string
c []string
virtual bool
rel bool
win bool
}{
{
p: "/foo/bar/baz",
c: []string{"foo", "bar", "baz"},
},
{
p: "/foo/bar/baz",
c: []string{"foo", "bar", "baz"},
rel: true,
},
{
p: "foo/bar/baz",
c: []string{"foo", "bar", "baz"},
},
{
p: "foo/bar/baz",
c: []string{"foo", "bar", "baz"},
rel: true,
},
{
p: "../foo/bar/baz",
c: []string{"foo", "bar", "baz"},
},
{
p: "../foo/bar/baz",
c: []string{"..", "foo", "bar", "baz"},
rel: true,
},
{
p: "c:/foo/bar/baz",
c: []string{"c", "foo", "bar", "baz"},
virtual: true,
rel: true,
win: true,
},
{
p: "c:/foo/../bar/baz",
c: []string{"c", "bar", "baz"},
virtual: true,
win: true,
},
{
p: `c:\foo\..\bar\baz`,
c: []string{"c", "bar", "baz"},
virtual: true,
win: true,
},
{
p: "c:/foo/../bar/baz",
c: []string{"c", "bar", "baz"},
virtual: true,
rel: true,
win: true,
},
{
p: `c:\foo\..\bar\baz`,
c: []string{"c", "bar", "baz"},
virtual: true,
rel: true,
win: true,
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
if test.win && runtime.GOOS != "windows" {
t.Skip("skip test on unix")
}
c, v := pathComponents(fs.Local{}, filepath.FromSlash(test.p), test.rel)
if !cmp.Equal(test.c, c) {
t.Error(test.c, c)
}
if v != test.virtual {
t.Errorf("unexpected virtual prefix count returned, want %v, got %v", test.virtual, v)
}
})
}
}
func TestRootDirectory(t *testing.T) {
var tests = []struct {
target string
root string
unix bool
win bool
}{
{target: ".", root: "."},
{target: "foo/bar/baz", root: "."},
{target: "../foo/bar/baz", root: ".."},
{target: "..", root: ".."},
{target: "../../..", root: "../../.."},
{target: "/home/foo", root: "/", unix: true},
{target: "c:/home/foo", root: "c:/", win: true},
{target: `c:\home\foo`, root: `c:\`, win: true},
{target: "//host/share/foo", root: "//host/share/", win: true},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
if test.unix && runtime.GOOS == "windows" {
t.Skip("skip test on windows")
}
if test.win && runtime.GOOS != "windows" {
t.Skip("skip test on unix")
}
root := rootDirectory(fs.Local{}, filepath.FromSlash(test.target))
want := filepath.FromSlash(test.root)
if root != want {
t.Fatalf("wrong root directory, want %v, got %v", want, root)
}
})
}
}
func TestTree(t *testing.T) {
var tests = []struct {
targets []string
want Tree
unix bool
win bool
mustError bool
}{
{
targets: []string{"foo"},
want: Tree{Nodes: map[string]Tree{
"foo": Tree{Path: "foo", Root: "."},
}},
},
{
targets: []string{"foo", "bar", "baz"},
want: Tree{Nodes: map[string]Tree{
"foo": Tree{Path: "foo", Root: "."},
"bar": Tree{Path: "bar", Root: "."},
"baz": Tree{Path: "baz", Root: "."},
}},
},
{
targets: []string{"foo/user1", "foo/user2", "foo/other"},
want: Tree{Nodes: map[string]Tree{
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
"user1": Tree{Path: filepath.FromSlash("foo/user1")},
"user2": Tree{Path: filepath.FromSlash("foo/user2")},
"other": Tree{Path: filepath.FromSlash("foo/other")},
}},
}},
},
{
targets: []string{"foo/work/user1", "foo/work/user2"},
want: Tree{Nodes: map[string]Tree{
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
"work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{
"user1": Tree{Path: filepath.FromSlash("foo/work/user1")},
"user2": Tree{Path: filepath.FromSlash("foo/work/user2")},
}},
}},
}},
},
{
targets: []string{"foo/user1", "bar/user1", "foo/other"},
want: Tree{Nodes: map[string]Tree{
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
"user1": Tree{Path: filepath.FromSlash("foo/user1")},
"other": Tree{Path: filepath.FromSlash("foo/other")},
}},
"bar": Tree{Root: ".", FileInfoPath: "bar", Nodes: map[string]Tree{
"user1": Tree{Path: filepath.FromSlash("bar/user1")},
}},
}},
},
{
targets: []string{"../work"},
want: Tree{Nodes: map[string]Tree{
"work": Tree{Root: "..", Path: filepath.FromSlash("../work")},
}},
},
{
targets: []string{"../work/other"},
want: Tree{Nodes: map[string]Tree{
"work": Tree{Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{
"other": Tree{Path: filepath.FromSlash("../work/other")},
}},
}},
},
{
targets: []string{"foo/user1", "../work/other", "foo/user2"},
want: Tree{Nodes: map[string]Tree{
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
"user1": Tree{Path: filepath.FromSlash("foo/user1")},
"user2": Tree{Path: filepath.FromSlash("foo/user2")},
}},
"work": Tree{Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{
"other": Tree{Path: filepath.FromSlash("../work/other")},
}},
}},
},
{
targets: []string{"foo/user1", "../foo/other", "foo/user2"},
want: Tree{Nodes: map[string]Tree{
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
"user1": Tree{Path: filepath.FromSlash("foo/user1")},
"user2": Tree{Path: filepath.FromSlash("foo/user2")},
}},
"foo-1": Tree{Root: "..", FileInfoPath: filepath.FromSlash("../foo"), Nodes: map[string]Tree{
"other": Tree{Path: filepath.FromSlash("../foo/other")},
}},
}},
},
{
targets: []string{"foo/work", "foo/work/user2"},
want: Tree{Nodes: map[string]Tree{
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
"work": Tree{
Path: filepath.FromSlash("foo/work"),
},
}},
}},
},
{
targets: []string{"foo/work/user2", "foo/work"},
want: Tree{Nodes: map[string]Tree{
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
"work": Tree{
Path: filepath.FromSlash("foo/work"),
},
}},
}},
},
{
targets: []string{"foo/work/user2/data/secret", "foo"},
want: Tree{Nodes: map[string]Tree{
"foo": Tree{Root: ".", Path: "foo"},
}},
},
{
unix: true,
targets: []string{"/mnt/driveA", "/mnt/driveA/work/driveB"},
want: Tree{Nodes: map[string]Tree{
"mnt": Tree{Root: "/", FileInfoPath: filepath.FromSlash("/mnt"), Nodes: map[string]Tree{
"driveA": Tree{
Path: filepath.FromSlash("/mnt/driveA"),
},
}},
}},
},
{
targets: []string{"foo/work/user", "foo/work/user"},
want: Tree{Nodes: map[string]Tree{
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
"work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{
"user": Tree{Path: filepath.FromSlash("foo/work/user")},
}},
}},
}},
},
{
targets: []string{"./foo/work/user", "foo/work/user"},
want: Tree{Nodes: map[string]Tree{
"foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{
"work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{
"user": Tree{Path: filepath.FromSlash("foo/work/user")},
}},
}},
}},
},
{
win: true,
targets: []string{`c:\users\foobar\temp`},
want: Tree{Nodes: map[string]Tree{
"c": Tree{Root: `c:\`, FileInfoPath: `c:\`, Nodes: map[string]Tree{
"users": Tree{FileInfoPath: `c:\users`, Nodes: map[string]Tree{
"foobar": Tree{FileInfoPath: `c:\users\foobar`, Nodes: map[string]Tree{
"temp": Tree{Path: `c:\users\foobar\temp`},
}},
}},
}},
}},
},
{
targets: []string{"."},
mustError: true,
},
{
targets: []string{".."},
mustError: true,
},
{
targets: []string{"../.."},
mustError: true,
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
if test.unix && runtime.GOOS == "windows" {
t.Skip("skip test on windows")
}
if test.win && runtime.GOOS != "windows" {
t.Skip("skip test on unix")
}
tree, err := NewTree(fs.Local{}, test.targets)
if test.mustError {
if err == nil {
t.Fatal("expected error, got nil")
}
t.Logf("found expected error: %v", err)
return
}
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(&test.want, tree) {
t.Error(cmp.Diff(&test.want, tree))
}
})
}
}

View file

@ -569,12 +569,24 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)})
}
var size uint64
for b, blobID := range node.Content {
if blobID.IsNull() {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d has null ID", node.Name, b)})
continue
}
blobs = append(blobs, blobID)
blobSize, found := c.repo.LookupBlobSize(blobID, restic.DataBlob)
if !found {
errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d size could not be found", node.Name, b)})
}
size += uint64(blobSize)
}
if size != node.Size {
errs = append(errs, Error{
TreeID: id,
Err: errors.Errorf("file %q: metadata size (%v) and sum of blob sizes (%v) do not match", node.Name, node.Size, size),
})
}
case "dir":
if node.Subtree == nil {

View file

@ -9,7 +9,6 @@ import (
"path/filepath"
"sort"
"testing"
"time"
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/checker"
@ -326,10 +325,8 @@ func TestCheckerModifiedData(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
arch := archiver.New(repo)
_, id, err := arch.Snapshot(context.TODO(), nil, []string{"."}, nil, "localhost", nil, time.Now())
test.OK(t, err)
t.Logf("archived as %v", id.Str())
sn := archiver.TestSnapshot(t, repo, ".", nil)
t.Logf("archived as %v", sn.ID().Str())
beError := &errorBackend{Backend: repo.Backend()}
checkRepo := repository.New(beError)

16
internal/fs/const.go Normal file
View file

@ -0,0 +1,16 @@
package fs
import "syscall"
// Flags to OpenFile wrapping those of the underlying system. Not all flags may
// be implemented on a given system.
const (
O_RDONLY int = syscall.O_RDONLY // open the file read-only.
O_WRONLY int = syscall.O_WRONLY // open the file write-only.
O_RDWR int = syscall.O_RDWR // open the file read-write.
O_APPEND int = syscall.O_APPEND // append data to the file when writing.
O_CREATE int = syscall.O_CREAT // create a new file if none exists.
O_EXCL int = syscall.O_EXCL // used with O_CREATE, file must not exist
O_SYNC int = syscall.O_SYNC // open for synchronous I/O.
O_TRUNC int = syscall.O_TRUNC // if possible, truncate file when opened.
)

View file

@ -0,0 +1,8 @@
// +build !windows
package fs
import "syscall"
// O_NOFOLLOW instructs the kernel to not follow symlinks when opening a file.
const O_NOFOLLOW int = syscall.O_NOFOLLOW

View file

@ -0,0 +1,6 @@
// +build windows
package fs
// O_NOFOLLOW is a noop on Windows.
const O_NOFOLLOW int = 0

View file

@ -1,25 +1,11 @@
package fs
import (
"io"
"os"
"path/filepath"
"time"
)
// File is an open file on a file system.
type File interface {
io.Reader
io.Writer
io.Closer
Fd() uintptr
Readdirnames(n int) ([]string, error)
Readdir(int) ([]os.FileInfo, error)
Seek(int64, int) (int64, error)
Stat() (os.FileInfo, error)
}
// Mkdir creates a new directory with the specified name and permission bits.
// If there is an error, it will be of type *PathError.
func Mkdir(name string, perm os.FileMode) error {

96
internal/fs/fs_local.go Normal file
View file

@ -0,0 +1,96 @@
package fs
import (
"os"
"path/filepath"
)
// Local is the local file system. Most methods are just passed on to the stdlib.
type Local struct{}
// statically ensure that Local implements FS.
var _ FS = &Local{}
// VolumeName returns leading volume name. Given "C:\foo\bar" it returns "C:"
// on Windows. Given "\\host\share\foo" it returns "\\host\share". On other
// platforms it returns "".
func (fs Local) VolumeName(path string) string {
return filepath.VolumeName(path)
}
// Open opens a file for reading.
func (fs Local) Open(name string) (File, error) {
f, err := os.Open(fixpath(name))
if err != nil {
return nil, err
}
return f, nil
}
// OpenFile is the generalized open call; most users will use Open
// or Create instead. It opens the named file with specified flag
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
// methods on the returned File can be used for I/O.
// If there is an error, it will be of type *PathError.
func (fs Local) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
f, err := os.OpenFile(fixpath(name), flag, perm)
if err != nil {
return nil, err
}
return f, nil
}
// Stat returns a FileInfo describing the named file. If there is an error, it
// will be of type *PathError.
func (fs Local) Stat(name string) (os.FileInfo, error) {
return os.Stat(fixpath(name))
}
// Lstat returns the FileInfo structure describing the named file.
// If the file is a symbolic link, the returned FileInfo
// describes the symbolic link. Lstat makes no attempt to follow the link.
// If there is an error, it will be of type *PathError.
func (fs Local) Lstat(name string) (os.FileInfo, error) {
return os.Lstat(fixpath(name))
}
// Join joins any number of path elements into a single path, adding a
// Separator if necessary. Join calls Clean on the result; in particular, all
// empty strings are ignored. On Windows, the result is a UNC path if and only
// if the first path element is a UNC path.
func (fs Local) Join(elem ...string) string {
return filepath.Join(elem...)
}
// Separator returns the OS and FS dependent separator for dirs/subdirs/files.
func (fs Local) Separator() string {
return string(filepath.Separator)
}
// IsAbs reports whether the path is absolute.
func (fs Local) IsAbs(path string) bool {
return filepath.IsAbs(path)
}
// Abs returns an absolute representation of path. If the path is not absolute
// it will be joined with the current working directory to turn it into an
// absolute path. The absolute path name for a given file is not guaranteed to
// be unique. Abs calls Clean on the result.
func (fs Local) Abs(path string) (string, error) {
return filepath.Abs(path)
}
// Clean returns the cleaned path. For details, see filepath.Clean.
func (fs Local) Clean(p string) string {
return filepath.Clean(p)
}
// Base returns the last element of path.
func (fs Local) Base(path string) string {
return filepath.Base(path)
}
// Dir returns path without the last element.
func (fs Local) Dir(path string) string {
return filepath.Dir(path)
}

289
internal/fs/fs_reader.go Normal file
View file

@ -0,0 +1,289 @@
package fs
import (
"io"
"os"
"path"
"sync"
"syscall"
"time"
"github.com/restic/restic/internal/errors"
)
// Reader is a file system which provides a directory with a single file. When
// this file is opened for reading, the reader is passed through. The file can
// be opened once, all subsequent open calls return syscall.EIO. For Lstat(),
// the provided FileInfo is returned.
type Reader struct {
Name string
io.ReadCloser
Mode os.FileMode
ModTime time.Time
Size int64
open sync.Once
}
// statically ensure that Local implements FS.
var _ FS = &Reader{}
// VolumeName returns leading volume name, for the Reader file system it's
// always the empty string.
func (fs *Reader) VolumeName(path string) string {
return ""
}
// Open opens a file for reading.
func (fs *Reader) Open(name string) (f File, err error) {
switch name {
case fs.Name:
fs.open.Do(func() {
f = newReaderFile(fs.ReadCloser, fs.fi())
})
if f == nil {
return nil, syscall.EIO
}
return f, nil
case "/", ".":
f = fakeDir{
entries: []os.FileInfo{fs.fi()},
}
return f, nil
}
return nil, syscall.ENOENT
}
func (fs *Reader) fi() os.FileInfo {
return fakeFileInfo{
name: fs.Name,
size: fs.Size,
mode: fs.Mode,
modtime: fs.ModTime,
}
}
// OpenFile is the generalized open call; most users will use Open
// or Create instead. It opens the named file with specified flag
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
// methods on the returned File can be used for I/O.
// If there is an error, it will be of type *PathError.
func (fs *Reader) OpenFile(name string, flag int, perm os.FileMode) (f File, err error) {
if flag & ^(O_RDONLY|O_NOFOLLOW) != 0 {
return nil, errors.Errorf("invalid combination of flags 0x%x", flag)
}
fs.open.Do(func() {
f = newReaderFile(fs.ReadCloser, fs.fi())
})
if f == nil {
return nil, syscall.EIO
}
return f, nil
}
// Stat returns a FileInfo describing the named file. If there is an error, it
// will be of type *PathError.
func (fs *Reader) Stat(name string) (os.FileInfo, error) {
return fs.Lstat(name)
}
// Lstat returns the FileInfo structure describing the named file.
// If the file is a symbolic link, the returned FileInfo
// describes the symbolic link. Lstat makes no attempt to follow the link.
// If there is an error, it will be of type *PathError.
func (fs *Reader) Lstat(name string) (os.FileInfo, error) {
switch name {
case fs.Name:
return fs.fi(), nil
case "/", ".":
fi := fakeFileInfo{
name: name,
size: 0,
mode: 0755,
modtime: time.Now(),
}
return fi, nil
}
return nil, os.ErrNotExist
}
// Join joins any number of path elements into a single path, adding a
// Separator if necessary. Join calls Clean on the result; in particular, all
// empty strings are ignored. On Windows, the result is a UNC path if and only
// if the first path element is a UNC path.
func (fs *Reader) Join(elem ...string) string {
return path.Join(elem...)
}
// Separator returns the OS and FS dependent separator for dirs/subdirs/files.
func (fs *Reader) Separator() string {
return "/"
}
// IsAbs reports whether the path is absolute. For the Reader, this is always the case.
func (fs *Reader) IsAbs(p string) bool {
return true
}
// Abs returns an absolute representation of path. If the path is not absolute
// it will be joined with the current working directory to turn it into an
// absolute path. The absolute path name for a given file is not guaranteed to
// be unique. Abs calls Clean on the result.
//
// For the Reader, all paths are absolute.
func (fs *Reader) Abs(p string) (string, error) {
return path.Clean(p), nil
}
// Clean returns the cleaned path. For details, see filepath.Clean.
func (fs *Reader) Clean(p string) string {
return path.Clean(p)
}
// Base returns the last element of p.
func (fs *Reader) Base(p string) string {
return path.Base(p)
}
// Dir returns p without the last element.
func (fs *Reader) Dir(p string) string {
return path.Dir(p)
}
func newReaderFile(rd io.ReadCloser, fi os.FileInfo) readerFile {
return readerFile{
ReadCloser: rd,
fakeFile: fakeFile{
FileInfo: fi,
name: fi.Name(),
},
}
}
type readerFile struct {
io.ReadCloser
fakeFile
}
func (r readerFile) Read(p []byte) (int, error) {
return r.ReadCloser.Read(p)
}
func (r readerFile) Close() error {
return r.ReadCloser.Close()
}
// ensure that readerFile implements File
var _ File = readerFile{}
// fakeFile implements all File methods, but only returns errors for anything
// except Stat() and Name().
type fakeFile struct {
name string
os.FileInfo
}
// ensure that fakeFile implements File
var _ File = fakeFile{}
func (f fakeFile) Fd() uintptr {
return 0
}
func (f fakeFile) Readdirnames(n int) ([]string, error) {
return nil, os.ErrInvalid
}
func (f fakeFile) Readdir(n int) ([]os.FileInfo, error) {
return nil, os.ErrInvalid
}
func (f fakeFile) Seek(int64, int) (int64, error) {
return 0, os.ErrInvalid
}
func (f fakeFile) Write(p []byte) (int, error) {
return 0, os.ErrInvalid
}
func (f fakeFile) Read(p []byte) (int, error) {
return 0, os.ErrInvalid
}
func (f fakeFile) Close() error {
return nil
}
func (f fakeFile) Stat() (os.FileInfo, error) {
return f.FileInfo, nil
}
func (f fakeFile) Name() string {
return f.name
}
// fakeDir implements Readdirnames and Readdir, everything else is delegated to fakeFile.
type fakeDir struct {
entries []os.FileInfo
fakeFile
}
func (d fakeDir) Readdirnames(n int) ([]string, error) {
if n >= 0 {
return nil, errors.New("not implemented")
}
names := make([]string, 0, len(d.entries))
for _, entry := range d.entries {
names = append(names, entry.Name())
}
return names, nil
}
func (d fakeDir) Readdir(n int) ([]os.FileInfo, error) {
if n >= 0 {
return nil, errors.New("not implemented")
}
return d.entries, nil
}
// fakeFileInfo implements the bare minimum of os.FileInfo.
type fakeFileInfo struct {
name string
size int64
mode os.FileMode
modtime time.Time
sys interface{}
}
func (fi fakeFileInfo) Name() string {
return fi.name
}
func (fi fakeFileInfo) Size() int64 {
return fi.size
}
func (fi fakeFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi fakeFileInfo) ModTime() time.Time {
return fi.modtime
}
func (fi fakeFileInfo) IsDir() bool {
return fi.mode&os.ModeDir > 0
}
func (fi fakeFileInfo) Sys() interface{} {
return fi.sys
}

View file

@ -0,0 +1,319 @@
package fs
import (
"bytes"
"io/ioutil"
"os"
"sort"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/restic/restic/internal/test"
)
func verifyFileContentOpen(t testing.TB, fs FS, filename string, want []byte) {
f, err := fs.Open(filename)
if err != nil {
t.Fatal(err)
}
buf, err := ioutil.ReadAll(f)
if err != nil {
t.Fatal(err)
}
err = f.Close()
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(want, buf) {
t.Error(cmp.Diff(want, buf))
}
}
func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte) {
f, err := fs.OpenFile(filename, O_RDONLY, 0)
if err != nil {
t.Fatal(err)
}
buf, err := ioutil.ReadAll(f)
if err != nil {
t.Fatal(err)
}
err = f.Close()
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(want, buf) {
t.Error(cmp.Diff(want, buf))
}
}
func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) {
f, err := fs.Open(dir)
if err != nil {
t.Fatal(err)
}
entries, err := f.Readdirnames(-1)
if err != nil {
t.Fatal(err)
}
err = f.Close()
if err != nil {
t.Fatal(err)
}
sort.Sort(sort.StringSlice(want))
sort.Sort(sort.StringSlice(entries))
if !cmp.Equal(want, entries) {
t.Error(cmp.Diff(want, entries))
}
}
type fiSlice []os.FileInfo
func (s fiSlice) Len() int {
return len(s)
}
func (s fiSlice) Less(i, j int) bool {
return s[i].Name() < s[j].Name()
}
func (s fiSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func verifyDirectoryContentsFI(t testing.TB, fs FS, dir string, want []os.FileInfo) {
f, err := fs.Open(dir)
if err != nil {
t.Fatal(err)
}
entries, err := f.Readdir(-1)
if err != nil {
t.Fatal(err)
}
err = f.Close()
if err != nil {
t.Fatal(err)
}
sort.Sort(fiSlice(want))
sort.Sort(fiSlice(entries))
if len(want) != len(entries) {
t.Errorf("wrong number of entries returned, want %d, got %d", len(want), len(entries))
}
max := len(want)
if len(entries) < max {
max = len(entries)
}
for i := 0; i < max; i++ {
fi1 := want[i]
fi2 := entries[i]
if fi1.Name() != fi2.Name() {
t.Errorf("entry %d: wrong value for Name: want %q, got %q", i, fi1.Name(), fi2.Name())
}
if fi1.IsDir() != fi2.IsDir() {
t.Errorf("entry %d: wrong value for IsDir: want %v, got %v", i, fi1.IsDir(), fi2.IsDir())
}
if fi1.Mode() != fi2.Mode() {
t.Errorf("entry %d: wrong value for Mode: want %v, got %v", i, fi1.Mode(), fi2.Mode())
}
if fi1.ModTime() != fi2.ModTime() {
t.Errorf("entry %d: wrong value for ModTime: want %v, got %v", i, fi1.ModTime(), fi2.ModTime())
}
if fi1.Size() != fi2.Size() {
t.Errorf("entry %d: wrong value for Size: want %v, got %v", i, fi1.Size(), fi2.Size())
}
if fi1.Sys() != fi2.Sys() {
t.Errorf("entry %d: wrong value for Sys: want %v, got %v", i, fi1.Sys(), fi2.Sys())
}
}
}
func checkFileInfo(t testing.TB, fi os.FileInfo, filename string, modtime time.Time, mode os.FileMode, isdir bool) {
if fi.IsDir() {
t.Errorf("IsDir returned true, want false")
}
if fi.Mode() != mode {
t.Errorf("Mode() returned wrong value, want 0%o, got 0%o", mode, fi.Mode())
}
if !modtime.Equal(time.Time{}) && !fi.ModTime().Equal(modtime) {
t.Errorf("ModTime() returned wrong value, want %v, got %v", modtime, fi.ModTime())
}
if fi.Name() != filename {
t.Errorf("Name() returned wrong value, want %q, got %q", filename, fi.Name())
}
}
func TestFSReader(t *testing.T) {
data := test.Random(55, 1<<18+588)
now := time.Now()
filename := "foobar"
var tests = []struct {
name string
f func(t *testing.T, fs FS)
}{
{
name: "Readdirnames-slash",
f: func(t *testing.T, fs FS) {
verifyDirectoryContents(t, fs, "/", []string{filename})
},
},
{
name: "Readdirnames-current",
f: func(t *testing.T, fs FS) {
verifyDirectoryContents(t, fs, ".", []string{filename})
},
},
{
name: "Readdir-slash",
f: func(t *testing.T, fs FS) {
fi := fakeFileInfo{
mode: 0644,
modtime: now,
name: filename,
size: int64(len(data)),
}
verifyDirectoryContentsFI(t, fs, "/", []os.FileInfo{fi})
},
},
{
name: "Readdir-current",
f: func(t *testing.T, fs FS) {
fi := fakeFileInfo{
mode: 0644,
modtime: now,
name: filename,
size: int64(len(data)),
}
verifyDirectoryContentsFI(t, fs, ".", []os.FileInfo{fi})
},
},
{
name: "file/Open",
f: func(t *testing.T, fs FS) {
verifyFileContentOpen(t, fs, filename, data)
},
},
{
name: "file/OpenFile",
f: func(t *testing.T, fs FS) {
verifyFileContentOpenFile(t, fs, filename, data)
},
},
{
name: "file/Lstat",
f: func(t *testing.T, fs FS) {
fi, err := fs.Lstat(filename)
if err != nil {
t.Fatal(err)
}
checkFileInfo(t, fi, filename, now, 0644, false)
},
},
{
name: "file/Stat",
f: func(t *testing.T, fs FS) {
f, err := fs.Open(filename)
if err != nil {
t.Fatal(err)
}
fi, err := f.Stat()
if err != nil {
t.Fatal(err)
}
err = f.Close()
if err != nil {
t.Fatal(err)
}
checkFileInfo(t, fi, filename, now, 0644, false)
},
},
{
name: "dir/Lstat-slash",
f: func(t *testing.T, fs FS) {
fi, err := fs.Lstat("/")
if err != nil {
t.Fatal(err)
}
checkFileInfo(t, fi, "/", time.Time{}, 0755, false)
},
},
{
name: "dir/Lstat-current",
f: func(t *testing.T, fs FS) {
fi, err := fs.Lstat(".")
if err != nil {
t.Fatal(err)
}
checkFileInfo(t, fi, ".", time.Time{}, 0755, false)
},
},
{
name: "dir/Open-slash",
f: func(t *testing.T, fs FS) {
fi, err := fs.Lstat("/")
if err != nil {
t.Fatal(err)
}
checkFileInfo(t, fi, "/", time.Time{}, 0755, false)
},
},
{
name: "dir/Open-current",
f: func(t *testing.T, fs FS) {
fi, err := fs.Lstat(".")
if err != nil {
t.Fatal(err)
}
checkFileInfo(t, fi, ".", time.Time{}, 0755, false)
},
},
}
for _, test := range tests {
fs := &Reader{
Name: filename,
ReadCloser: ioutil.NopCloser(bytes.NewReader(data)),
Mode: 0644,
Size: int64(len(data)),
ModTime: now,
}
t.Run(test.name, func(t *testing.T) {
test.f(t, fs)
})
}
}

54
internal/fs/fs_track.go Normal file
View file

@ -0,0 +1,54 @@
package fs
import (
"fmt"
"os"
"runtime"
"runtime/debug"
)
// Track is a wrapper around another file system which installs finalizers
// for open files which call panic() when they are not closed when the garbage
// collector releases them. This can be used to find resource leaks via open
// files.
type Track struct {
FS
}
// Open wraps the Open method of the underlying file system.
func (fs Track) Open(name string) (File, error) {
f, err := fs.FS.Open(fixpath(name))
if err != nil {
return nil, err
}
return newTrackFile(debug.Stack(), name, f), nil
}
// OpenFile wraps the OpenFile method of the underlying file system.
func (fs Track) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
f, err := fs.FS.OpenFile(fixpath(name), flag, perm)
if err != nil {
return nil, err
}
return newTrackFile(debug.Stack(), name, f), nil
}
type trackFile struct {
File
}
func newTrackFile(stack []byte, filename string, file File) *trackFile {
f := &trackFile{file}
runtime.SetFinalizer(f, func(f *trackFile) {
fmt.Fprintf(os.Stderr, "file %s not closed\n\nStacktrack:\n%s\n", filename, stack)
panic("file " + filename + " not closed")
})
return f
}
func (f *trackFile) Close() error {
runtime.SetFinalizer(f, nil)
return f.File.Close()
}

43
internal/fs/helpers.go Normal file
View file

@ -0,0 +1,43 @@
package fs
import (
"os"
"testing"
"github.com/restic/restic/internal/test"
)
// IsRegularFile returns true if fi belongs to a normal file. If fi is nil,
// false is returned.
func IsRegularFile(fi os.FileInfo) bool {
if fi == nil {
return false
}
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
}
// TestChdir changes the current directory to dest, the function back returns to the previous directory.
func TestChdir(t testing.TB, dest string) (back func()) {
test.Helper(t).Helper()
prev, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
t.Logf("chdir to %v", dest)
err = os.Chdir(dest)
if err != nil {
t.Fatal(err)
}
return func() {
test.Helper(t).Helper()
t.Logf("chdir back to %v", prev)
err = os.Chdir(prev)
if err != nil {
t.Fatal(err)
}
}
}

38
internal/fs/interface.go Normal file
View file

@ -0,0 +1,38 @@
package fs
import (
"io"
"os"
)
// FS bundles all methods needed for a file system.
type FS interface {
Open(name string) (File, error)
OpenFile(name string, flag int, perm os.FileMode) (File, error)
Stat(name string) (os.FileInfo, error)
Lstat(name string) (os.FileInfo, error)
Join(elem ...string) string
Separator() string
Abs(path string) (string, error)
Clean(path string) string
VolumeName(path string) string
IsAbs(path string) bool
Dir(path string) string
Base(path string) string
}
// File is an open file on a file system.
type File interface {
io.Reader
io.Writer
io.Closer
Fd() uintptr
Readdirnames(n int) ([]string, error)
Readdir(int) ([]os.FileInfo, error)
Seek(int64, int) (int64, error)
Stat() (os.FileInfo, error)
Name() string
}

34
internal/fs/stat.go Normal file
View file

@ -0,0 +1,34 @@
package fs
import (
"os"
"time"
)
// ExtendedFileInfo is an extended stat_t, filled with attributes that are
// supported by most operating systems. The original FileInfo is embedded.
type ExtendedFileInfo struct {
os.FileInfo
DeviceID uint64 // ID of device containing the file
Inode uint64 // Inode number
Links uint64 // Number of hard links
UID uint32 // owner user ID
GID uint32 // owner group ID
Device uint64 // Device ID (if this is a device file)
BlockSize int64 // block size for filesystem IO
Blocks int64 // number of allocated filesystem blocks
Size int64 // file size in byte
AccessTime time.Time // last access time stamp
ModTime time.Time // last (content) modification time stamp
}
// ExtendedStat returns an ExtendedFileInfo constructed from the os.FileInfo.
func ExtendedStat(fi os.FileInfo) ExtendedFileInfo {
if fi == nil {
panic("os.FileInfo is nil")
}
return extendedStat(fi)
}

36
internal/fs/stat_bsd.go Normal file
View file

@ -0,0 +1,36 @@
// +build freebsd darwin
package fs
import (
"fmt"
"os"
"syscall"
"time"
)
// extendedStat extracts info into an ExtendedFileInfo for unix based operating systems.
func extendedStat(fi os.FileInfo) ExtendedFileInfo {
s, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
panic(fmt.Sprintf("conversion to syscall.Stat_t failed, type is %T", fi.Sys()))
}
extFI := ExtendedFileInfo{
FileInfo: fi,
DeviceID: uint64(s.Dev),
Inode: uint64(s.Ino),
Links: uint64(s.Nlink),
UID: s.Uid,
GID: s.Gid,
Device: uint64(s.Rdev),
BlockSize: int64(s.Blksize),
Blocks: s.Blocks,
Size: s.Size,
AccessTime: time.Unix(s.Atimespec.Unix()),
ModTime: time.Unix(s.Mtimespec.Unix()),
}
return extFI
}

31
internal/fs/stat_test.go Normal file
View file

@ -0,0 +1,31 @@
package fs
import (
"io/ioutil"
"path/filepath"
"testing"
restictest "github.com/restic/restic/internal/test"
)
func TestExtendedStat(t *testing.T) {
tempdir, cleanup := restictest.TempDir(t)
defer cleanup()
filename := filepath.Join(tempdir, "file")
err := ioutil.WriteFile(filename, []byte("foobar"), 0640)
if err != nil {
t.Fatal(err)
}
fi, err := Lstat(filename)
if err != nil {
t.Fatal(err)
}
extFI := ExtendedStat(fi)
if !extFI.ModTime.Equal(fi.ModTime()) {
t.Errorf("extFI.ModTime does not match, want %v, got %v", fi.ModTime(), extFI.ModTime)
}
}

36
internal/fs/stat_unix.go Normal file
View file

@ -0,0 +1,36 @@
// +build !windows,!darwin,!freebsd
package fs
import (
"fmt"
"os"
"syscall"
"time"
)
// extendedStat extracts info into an ExtendedFileInfo for unix based operating systems.
func extendedStat(fi os.FileInfo) ExtendedFileInfo {
s, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
panic(fmt.Sprintf("conversion to syscall.Stat_t failed, type is %T", fi.Sys()))
}
extFI := ExtendedFileInfo{
FileInfo: fi,
DeviceID: uint64(s.Dev),
Inode: s.Ino,
Links: uint64(s.Nlink),
UID: s.Uid,
GID: s.Gid,
Device: uint64(s.Rdev),
BlockSize: int64(s.Blksize),
Blocks: s.Blocks,
Size: s.Size,
AccessTime: time.Unix(s.Atim.Unix()),
ModTime: time.Unix(s.Mtim.Unix()),
}
return extFI
}

View file

@ -0,0 +1,31 @@
// +build windows
package fs
import (
"fmt"
"os"
"syscall"
"time"
)
// extendedStat extracts info into an ExtendedFileInfo for Windows.
func extendedStat(fi os.FileInfo) ExtendedFileInfo {
s, ok := fi.Sys().(*syscall.Win32FileAttributeData)
if !ok {
panic(fmt.Sprintf("conversion to syscall.Win32FileAttributeData failed, type is %T", fi.Sys()))
}
extFI := ExtendedFileInfo{
FileInfo: fi,
Size: int64(s.FileSizeLow) + int64(s.FileSizeHigh)<<32,
}
atime := syscall.NsecToTimespec(s.LastAccessTime.Nanoseconds())
extFI.AccessTime = time.Unix(atime.Unix())
mtime := syscall.NsecToTimespec(s.LastWriteTime.Nanoseconds())
extFI.ModTime = time.Unix(mtime.Unix())
return extFI
}

View file

@ -1,2 +0,0 @@
// Package pipe implements walking a directory in a deterministic order.
package pipe

View file

@ -1,292 +0,0 @@
package pipe
import (
"context"
"fmt"
"os"
"path/filepath"
"sort"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/fs"
)
type Result interface{}
type Job interface {
Path() string
Fullpath() string
Error() error
Info() os.FileInfo
Result() chan<- Result
}
type Entry struct {
basedir string
path string
info os.FileInfo
error error
result chan<- Result
// points to the old node if available, interface{} is used to prevent
// circular import
Node interface{}
}
func (e Entry) Path() string { return e.path }
func (e Entry) Fullpath() string { return filepath.Join(e.basedir, e.path) }
func (e Entry) Error() error { return e.error }
func (e Entry) Info() os.FileInfo { return e.info }
func (e Entry) Result() chan<- Result { return e.result }
type Dir struct {
basedir string
path string
error error
info os.FileInfo
Entries [](<-chan Result)
result chan<- Result
}
func (e Dir) Path() string { return e.path }
func (e Dir) Fullpath() string { return filepath.Join(e.basedir, e.path) }
func (e Dir) Error() error { return e.error }
func (e Dir) Info() os.FileInfo { return e.info }
func (e Dir) Result() chan<- Result { return e.result }
// readDirNames reads the directory named by dirname and returns
// a sorted list of directory entries.
// taken from filepath/path.go
func readDirNames(dirname string) ([]string, error) {
f, err := fs.Open(dirname)
if err != nil {
return nil, errors.Wrap(err, "Open")
}
names, err := f.Readdirnames(-1)
_ = f.Close()
if err != nil {
return nil, errors.Wrap(err, "Readdirnames")
}
sort.Strings(names)
return names, nil
}
// SelectFunc returns true for all items that should be included (files and
// dirs). If false is returned, files are ignored and dirs are not even walked.
type SelectFunc func(item string, fi os.FileInfo) bool
func walk(ctx context.Context, basedir, dir string, selectFunc SelectFunc, jobs chan<- Job, res chan<- Result) (excluded bool) {
debug.Log("start on %q, basedir %q", dir, basedir)
relpath, err := filepath.Rel(basedir, dir)
if err != nil {
panic(err)
}
info, err := fs.Lstat(dir)
if err != nil {
err = errors.Wrap(err, "Lstat")
debug.Log("error for %v: %v, res %p", dir, err, res)
select {
case jobs <- Dir{basedir: basedir, path: relpath, info: info, error: err, result: res}:
case <-ctx.Done():
}
return
}
if !selectFunc(dir, info) {
debug.Log("file %v excluded by filter, res %p", dir, res)
excluded = true
return
}
if !info.IsDir() {
debug.Log("sending file job for %v, res %p", dir, res)
select {
case jobs <- Entry{info: info, basedir: basedir, path: relpath, result: res}:
case <-ctx.Done():
}
return
}
debug.RunHook("pipe.readdirnames", dir)
names, err := readDirNames(dir)
if err != nil {
debug.Log("Readdirnames(%v) returned error: %v, res %p", dir, err, res)
select {
case <-ctx.Done():
case jobs <- Dir{basedir: basedir, path: relpath, info: info, error: err, result: res}:
}
return
}
// Insert breakpoint to allow testing behaviour with vanishing files
// between Readdir() and lstat()
debug.RunHook("pipe.walk1", relpath)
entries := make([]<-chan Result, 0, len(names))
for _, name := range names {
subpath := filepath.Join(dir, name)
fi, statErr := fs.Lstat(subpath)
if !selectFunc(subpath, fi) {
debug.Log("file %v excluded by filter", subpath)
continue
}
ch := make(chan Result, 1)
entries = append(entries, ch)
if statErr != nil {
statErr = errors.Wrap(statErr, "Lstat")
debug.Log("sending file job for %v, err %v, res %p", subpath, err, res)
select {
case jobs <- Entry{info: fi, error: statErr, basedir: basedir, path: filepath.Join(relpath, name), result: ch}:
case <-ctx.Done():
return
}
continue
}
// Insert breakpoint to allow testing behaviour with vanishing files
// between walk and open
debug.RunHook("pipe.walk2", filepath.Join(relpath, name))
walk(ctx, basedir, subpath, selectFunc, jobs, ch)
}
debug.Log("sending dirjob for %q, basedir %q, res %p", dir, basedir, res)
select {
case jobs <- Dir{basedir: basedir, path: relpath, info: info, Entries: entries, result: res}:
case <-ctx.Done():
}
return
}
// cleanupPath is used to clean a path. For a normal path, a slice with just
// the path is returned. For special cases such as "." and "/" the list of
// names within those paths is returned.
func cleanupPath(path string) ([]string, error) {
path = filepath.Clean(path)
if filepath.Dir(path) != path {
return []string{path}, nil
}
paths, err := readDirNames(path)
if err != nil {
return nil, err
}
for i, p := range paths {
paths[i] = filepath.Join(path, p)
}
return paths, nil
}
// Walk sends a Job for each file and directory it finds below the paths. When
// the channel done is closed, processing stops.
func Walk(ctx context.Context, walkPaths []string, selectFunc SelectFunc, jobs chan<- Job, res chan<- Result) {
var paths []string
for _, p := range walkPaths {
ps, err := cleanupPath(p)
if err != nil {
fmt.Fprintf(os.Stderr, "Readdirnames(%v): %v, skipping\n", p, err)
debug.Log("Readdirnames(%v) returned error: %v, skipping", p, err)
continue
}
paths = append(paths, ps...)
}
debug.Log("start on %v", paths)
defer func() {
debug.Log("output channel closed")
close(jobs)
}()
entries := make([]<-chan Result, 0, len(paths))
for _, path := range paths {
debug.Log("start walker for %v", path)
ch := make(chan Result, 1)
excluded := walk(ctx, filepath.Dir(path), path, selectFunc, jobs, ch)
if excluded {
debug.Log("walker for %v done, it was excluded by the filter", path)
continue
}
entries = append(entries, ch)
debug.Log("walker for %v done", path)
}
debug.Log("sending root node, res %p", res)
select {
case <-ctx.Done():
return
case jobs <- Dir{Entries: entries, result: res}:
}
debug.Log("walker done")
}
// Split feeds all elements read from inChan to dirChan and entChan.
func Split(inChan <-chan Job, dirChan chan<- Dir, entChan chan<- Entry) {
debug.Log("start")
defer debug.Log("done")
inCh := inChan
dirCh := dirChan
entCh := entChan
var (
dir Dir
ent Entry
)
// deactivate sending until we received at least one job
dirCh = nil
entCh = nil
for {
select {
case job, ok := <-inCh:
if !ok {
// channel is closed
return
}
if job == nil {
panic("nil job received")
}
// disable receiving until the current job has been sent
inCh = nil
switch j := job.(type) {
case Dir:
dir = j
dirCh = dirChan
case Entry:
ent = j
entCh = entChan
default:
panic(fmt.Sprintf("unknown job type %v", j))
}
case dirCh <- dir:
// disable sending, re-enable receiving
dirCh = nil
inCh = inChan
case entCh <- ent:
// disable sending, re-enable receiving
entCh = nil
inCh = inChan
}
}
}

View file

@ -1,600 +0,0 @@
package pipe_test
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sync"
"testing"
"time"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/pipe"
rtest "github.com/restic/restic/internal/test"
)
type stats struct {
dirs, files int
}
func acceptAll(string, os.FileInfo) bool {
return true
}
func statPath(path string) (stats, error) {
var s stats
// count files and directories with filepath.Walk()
err := filepath.Walk(rtest.TestWalkerPath, func(p string, fi os.FileInfo, err error) error {
if fi == nil {
return err
}
if fi.IsDir() {
s.dirs++
} else {
s.files++
}
return err
})
return s, err
}
const maxWorkers = 100
func TestPipelineWalkerWithSplit(t *testing.T) {
if rtest.TestWalkerPath == "" {
t.Skipf("walkerpath not set, skipping TestPipelineWalker")
}
var err error
if !filepath.IsAbs(rtest.TestWalkerPath) {
rtest.TestWalkerPath, err = filepath.Abs(rtest.TestWalkerPath)
rtest.OK(t, err)
}
before, err := statPath(rtest.TestWalkerPath)
rtest.OK(t, err)
t.Logf("walking path %s with %d dirs, %d files", rtest.TestWalkerPath,
before.dirs, before.files)
// account for top level dir
before.dirs++
after := stats{}
m := sync.Mutex{}
worker := func(wg *sync.WaitGroup, done <-chan struct{}, entCh <-chan pipe.Entry, dirCh <-chan pipe.Dir) {
defer wg.Done()
for {
select {
case e, ok := <-entCh:
if !ok {
// channel is closed
return
}
m.Lock()
after.files++
m.Unlock()
e.Result() <- true
case dir, ok := <-dirCh:
if !ok {
// channel is closed
return
}
// wait for all content
for _, ch := range dir.Entries {
<-ch
}
m.Lock()
after.dirs++
m.Unlock()
dir.Result() <- true
case <-done:
// pipeline was cancelled
return
}
}
}
var wg sync.WaitGroup
done := make(chan struct{})
entCh := make(chan pipe.Entry)
dirCh := make(chan pipe.Dir)
for i := 0; i < maxWorkers; i++ {
wg.Add(1)
go worker(&wg, done, entCh, dirCh)
}
jobs := make(chan pipe.Job, 200)
wg.Add(1)
go func() {
pipe.Split(jobs, dirCh, entCh)
close(entCh)
close(dirCh)
wg.Done()
}()
resCh := make(chan pipe.Result, 1)
pipe.Walk(context.TODO(), []string{rtest.TestWalkerPath}, acceptAll, jobs, resCh)
// wait for all workers to terminate
wg.Wait()
// wait for top-level blob
<-resCh
t.Logf("walked path %s with %d dirs, %d files", rtest.TestWalkerPath,
after.dirs, after.files)
rtest.Assert(t, before == after, "stats do not match, expected %v, got %v", before, after)
}
func TestPipelineWalker(t *testing.T) {
if rtest.TestWalkerPath == "" {
t.Skipf("walkerpath not set, skipping TestPipelineWalker")
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
var err error
if !filepath.IsAbs(rtest.TestWalkerPath) {
rtest.TestWalkerPath, err = filepath.Abs(rtest.TestWalkerPath)
rtest.OK(t, err)
}
before, err := statPath(rtest.TestWalkerPath)
rtest.OK(t, err)
t.Logf("walking path %s with %d dirs, %d files", rtest.TestWalkerPath,
before.dirs, before.files)
// account for top level dir
before.dirs++
after := stats{}
m := sync.Mutex{}
worker := func(ctx context.Context, wg *sync.WaitGroup, jobs <-chan pipe.Job) {
defer wg.Done()
for {
select {
case job, ok := <-jobs:
if !ok {
// channel is closed
return
}
rtest.Assert(t, job != nil, "job is nil")
switch j := job.(type) {
case pipe.Dir:
// wait for all content
for _, ch := range j.Entries {
<-ch
}
m.Lock()
after.dirs++
m.Unlock()
j.Result() <- true
case pipe.Entry:
m.Lock()
after.files++
m.Unlock()
j.Result() <- true
}
case <-ctx.Done():
// pipeline was cancelled
return
}
}
}
var wg sync.WaitGroup
jobs := make(chan pipe.Job)
for i := 0; i < maxWorkers; i++ {
wg.Add(1)
go worker(ctx, &wg, jobs)
}
resCh := make(chan pipe.Result, 1)
pipe.Walk(ctx, []string{rtest.TestWalkerPath}, acceptAll, jobs, resCh)
// wait for all workers to terminate
wg.Wait()
// wait for top-level blob
<-resCh
t.Logf("walked path %s with %d dirs, %d files", rtest.TestWalkerPath,
after.dirs, after.files)
rtest.Assert(t, before == after, "stats do not match, expected %v, got %v", before, after)
}
func createFile(filename, data string) error {
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
_, err = f.Write([]byte(data))
if err != nil {
return err
}
return nil
}
func TestPipeWalkerError(t *testing.T) {
dir, err := ioutil.TempDir("", "restic-test-")
rtest.OK(t, err)
base := filepath.Base(dir)
var testjobs = []struct {
path []string
err bool
}{
{[]string{base, "a", "file_a"}, false},
{[]string{base, "a"}, false},
{[]string{base, "b"}, true},
{[]string{base, "c", "file_c"}, false},
{[]string{base, "c"}, false},
{[]string{base}, false},
{[]string{}, false},
}
rtest.OK(t, os.Mkdir(filepath.Join(dir, "a"), 0755))
rtest.OK(t, os.Mkdir(filepath.Join(dir, "b"), 0755))
rtest.OK(t, os.Mkdir(filepath.Join(dir, "c"), 0755))
rtest.OK(t, createFile(filepath.Join(dir, "a", "file_a"), "file a"))
rtest.OK(t, createFile(filepath.Join(dir, "b", "file_b"), "file b"))
rtest.OK(t, createFile(filepath.Join(dir, "c", "file_c"), "file c"))
ranHook := false
testdir := filepath.Join(dir, "b")
// install hook that removes the dir right before readdirnames()
debug.Hook("pipe.readdirnames", func(context interface{}) {
path := context.(string)
if path != testdir {
return
}
t.Logf("in hook, removing test file %v", testdir)
ranHook = true
rtest.OK(t, os.RemoveAll(testdir))
})
ctx, cancel := context.WithCancel(context.TODO())
ch := make(chan pipe.Job)
resCh := make(chan pipe.Result, 1)
go pipe.Walk(ctx, []string{dir}, acceptAll, ch, resCh)
i := 0
for job := range ch {
if i == len(testjobs) {
t.Errorf("too many jobs received")
break
}
p := filepath.Join(testjobs[i].path...)
if p != job.Path() {
t.Errorf("job %d has wrong path: expected %q, got %q", i, p, job.Path())
}
if testjobs[i].err {
if job.Error() == nil {
t.Errorf("job %d expected error but got nil", i)
}
} else {
if job.Error() != nil {
t.Errorf("job %d expected no error but got %v", i, job.Error())
}
}
i++
}
if i != len(testjobs) {
t.Errorf("expected %d jobs, got %d", len(testjobs), i)
}
cancel()
rtest.Assert(t, ranHook, "hook did not run")
rtest.OK(t, os.RemoveAll(dir))
}
func BenchmarkPipelineWalker(b *testing.B) {
if rtest.TestWalkerPath == "" {
b.Skipf("walkerpath not set, skipping BenchPipelineWalker")
}
var max time.Duration
m := sync.Mutex{}
fileWorker := func(ctx context.Context, wg *sync.WaitGroup, ch <-chan pipe.Entry) {
defer wg.Done()
for {
select {
case e, ok := <-ch:
if !ok {
// channel is closed
return
}
// simulate backup
//time.Sleep(10 * time.Millisecond)
e.Result() <- true
case <-ctx.Done():
// pipeline was cancelled
return
}
}
}
dirWorker := func(ctx context.Context, wg *sync.WaitGroup, ch <-chan pipe.Dir) {
defer wg.Done()
for {
select {
case dir, ok := <-ch:
if !ok {
// channel is closed
return
}
start := time.Now()
// wait for all content
for _, ch := range dir.Entries {
<-ch
}
d := time.Since(start)
m.Lock()
if d > max {
max = d
}
m.Unlock()
dir.Result() <- true
case <-ctx.Done():
// pipeline was cancelled
return
}
}
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
for i := 0; i < b.N; i++ {
max = 0
entCh := make(chan pipe.Entry, 200)
dirCh := make(chan pipe.Dir, 200)
var wg sync.WaitGroup
b.Logf("starting %d workers", maxWorkers)
for i := 0; i < maxWorkers; i++ {
wg.Add(2)
go dirWorker(ctx, &wg, dirCh)
go fileWorker(ctx, &wg, entCh)
}
jobs := make(chan pipe.Job, 200)
wg.Add(1)
go func() {
pipe.Split(jobs, dirCh, entCh)
close(entCh)
close(dirCh)
wg.Done()
}()
resCh := make(chan pipe.Result, 1)
pipe.Walk(ctx, []string{rtest.TestWalkerPath}, acceptAll, jobs, resCh)
// wait for all workers to terminate
wg.Wait()
// wait for final result
<-resCh
b.Logf("max duration for a dir: %v", max)
}
}
func TestPipelineWalkerMultiple(t *testing.T) {
if rtest.TestWalkerPath == "" {
t.Skipf("walkerpath not set, skipping TestPipelineWalker")
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
paths, err := filepath.Glob(filepath.Join(rtest.TestWalkerPath, "*"))
rtest.OK(t, err)
before, err := statPath(rtest.TestWalkerPath)
rtest.OK(t, err)
t.Logf("walking paths %v with %d dirs, %d files", paths,
before.dirs, before.files)
after := stats{}
m := sync.Mutex{}
worker := func(ctx context.Context, wg *sync.WaitGroup, jobs <-chan pipe.Job) {
defer wg.Done()
for {
select {
case job, ok := <-jobs:
if !ok {
// channel is closed
return
}
rtest.Assert(t, job != nil, "job is nil")
switch j := job.(type) {
case pipe.Dir:
// wait for all content
for _, ch := range j.Entries {
<-ch
}
m.Lock()
after.dirs++
m.Unlock()
j.Result() <- true
case pipe.Entry:
m.Lock()
after.files++
m.Unlock()
j.Result() <- true
}
case <-ctx.Done():
// pipeline was cancelled
return
}
}
}
var wg sync.WaitGroup
jobs := make(chan pipe.Job)
for i := 0; i < maxWorkers; i++ {
wg.Add(1)
go worker(ctx, &wg, jobs)
}
resCh := make(chan pipe.Result, 1)
pipe.Walk(ctx, paths, acceptAll, jobs, resCh)
// wait for all workers to terminate
wg.Wait()
// wait for top-level blob
<-resCh
t.Logf("walked %d paths with %d dirs, %d files", len(paths), after.dirs, after.files)
rtest.Assert(t, before == after, "stats do not match, expected %v, got %v", before, after)
}
func dirsInPath(path string) int {
if path == "/" || path == "." || path == "" {
return 0
}
n := 0
for dir := path; dir != "/" && dir != "."; dir = filepath.Dir(dir) {
n++
}
return n
}
func TestPipeWalkerRoot(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skipf("not running TestPipeWalkerRoot on %s", runtime.GOOS)
return
}
cwd, err := os.Getwd()
rtest.OK(t, err)
testPaths := []string{
string(filepath.Separator),
".",
cwd,
}
for _, path := range testPaths {
testPipeWalkerRootWithPath(path, t)
}
}
func testPipeWalkerRootWithPath(path string, t *testing.T) {
pattern := filepath.Join(path, "*")
rootPaths, err := filepath.Glob(pattern)
rtest.OK(t, err)
for i, p := range rootPaths {
rootPaths[i], err = filepath.Rel(path, p)
rtest.OK(t, err)
}
t.Logf("paths in %v (pattern %q) expanded to %v items", path, pattern, len(rootPaths))
jobCh := make(chan pipe.Job)
var jobs []pipe.Job
worker := func(wg *sync.WaitGroup) {
defer wg.Done()
for job := range jobCh {
jobs = append(jobs, job)
}
}
var wg sync.WaitGroup
wg.Add(1)
go worker(&wg)
filter := func(p string, fi os.FileInfo) bool {
p, err := filepath.Rel(path, p)
rtest.OK(t, err)
return dirsInPath(p) <= 1
}
resCh := make(chan pipe.Result, 1)
pipe.Walk(context.TODO(), []string{path}, filter, jobCh, resCh)
wg.Wait()
t.Logf("received %d jobs", len(jobs))
for i, job := range jobs[:len(jobs)-1] {
path := job.Path()
if path == "." || path == ".." || path == string(filepath.Separator) {
t.Errorf("job %v has invalid path %q", i, path)
}
}
lastPath := jobs[len(jobs)-1].Path()
if lastPath != "" {
t.Errorf("last job has non-empty path %q", lastPath)
}
if len(jobs) < len(rootPaths) {
t.Errorf("want at least %v jobs, got %v for path %v\n", len(rootPaths), len(jobs), path)
}
}

View file

@ -8,7 +8,7 @@ import (
var bufPool = sync.Pool{
New: func() interface{} {
return make([]byte, chunker.MinSize)
return make([]byte, chunker.MaxSize/3)
},
}

View file

@ -214,11 +214,11 @@ func (r *Repository) SaveAndEncrypt(ctx context.Context, t restic.BlobType, data
// get buf from the pool
ciphertext := getBuf()
defer freeBuf(ciphertext)
ciphertext = ciphertext[:0]
nonce := crypto.NewRandomNonce()
ciphertext = append(ciphertext, nonce...)
defer freeBuf(ciphertext)
// encrypt blob
ciphertext = r.key.Seal(ciphertext, nonce, data, nil)

View file

@ -42,6 +42,7 @@ const testChunkerPol = chunker.Pol(0x3DA3358B4DC173)
// password. If be is nil, an in-memory backend is used. A constant polynomial
// is used for the chunker and low-security test parameters.
func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r restic.Repository, cleanup func()) {
test.Helper(t).Helper()
TestUseLowSecurityKDFParameters(t)
restic.TestDisableCheckPolynomial(t)
@ -70,6 +71,7 @@ func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r restic.Reposi
// a non-existing directory, a local backend is created there and this is used
// instead. The directory is not removed, but left there for inspection.
func TestRepository(t testing.TB) (r restic.Repository, cleanup func()) {
test.Helper(t).Helper()
dir := os.Getenv("RESTIC_TEST_REPO")
if dir != "" {
_, err := os.Stat(dir)

View file

@ -517,45 +517,6 @@ func (node Node) sameExtendedAttributes(other Node) bool {
return true
}
// IsNewer returns true of the file has been updated since the last Stat().
func (node *Node) IsNewer(path string, fi os.FileInfo) bool {
if node.Type != "file" {
debug.Log("node %v is newer: not file", path)
return true
}
tpe := nodeTypeFromFileInfo(fi)
if node.Name != fi.Name() || node.Type != tpe {
debug.Log("node %v is newer: name or type changed", path)
return true
}
size := uint64(fi.Size())
extendedStat, ok := toStatT(fi.Sys())
if !ok {
if !node.ModTime.Equal(fi.ModTime()) ||
node.Size != size {
debug.Log("node %v is newer: timestamp or size changed", path)
return true
}
return false
}
inode := extendedStat.ino()
if !node.ModTime.Equal(fi.ModTime()) ||
!node.ChangeTime.Equal(changeTime(extendedStat)) ||
node.Inode != uint64(inode) ||
node.Size != size {
debug.Log("node %v is newer: timestamp, size or inode changed", path)
return true
}
debug.Log("node %v is not newer", path)
return false
}
func (node *Node) fillUser(stat statT) error {
node.UID = stat.uid()
node.GID = stat.gid()
@ -635,6 +596,10 @@ func lookupGroup(gid string) (string, error) {
func (node *Node) fillExtra(path string, fi os.FileInfo) error {
stat, ok := toStatT(fi.Sys())
if !ok {
// fill minimal info with current values for uid, gid
node.UID = uint32(os.Getuid())
node.GID = uint32(os.Getgid())
node.ChangeTime = node.ModTime
return nil
}

View file

@ -346,27 +346,6 @@ func TestRestorer(t *testing.T) {
}
}
func chdir(t testing.TB, target string) func() {
prev, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
t.Logf("chdir to %v", target)
err = os.Chdir(target)
if err != nil {
t.Fatal(err)
}
return func() {
t.Logf("chdir back to %v", prev)
err = os.Chdir(prev)
if err != nil {
t.Fatal(err)
}
}
}
func TestRestorerRelative(t *testing.T) {
var tests = []struct {
Snapshot
@ -406,7 +385,7 @@ func TestRestorerRelative(t *testing.T) {
tempdir, cleanup := rtest.TempDir(t)
defer cleanup()
cleanup = chdir(t, tempdir)
cleanup = fs.TestChdir(t, tempdir)
defer cleanup()
errors := make(map[string]string)

View file

@ -30,14 +30,18 @@ type Snapshot struct {
// NewSnapshot returns an initialized snapshot struct for the current user and
// time.
func NewSnapshot(paths []string, tags []string, hostname string, time time.Time) (*Snapshot, error) {
for i, path := range paths {
if p, err := filepath.Abs(path); err != nil {
paths[i] = p
absPaths := make([]string, 0, len(paths))
for _, path := range paths {
p, err := filepath.Abs(path)
if err == nil {
absPaths = append(absPaths, p)
} else {
absPaths = append(absPaths, path)
}
}
sn := &Snapshot{
Paths: paths,
Paths: absPaths,
Time: time,
Tags: tags,
Hostname: hostname,

View file

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"os"
"path/filepath"
"time"
"github.com/restic/restic/internal/errors"
@ -14,13 +15,25 @@ var ErrNoSnapshotFound = errors.New("no snapshot found")
// FindLatestSnapshot finds latest snapshot with optional target/directory, tags and hostname filters.
func FindLatestSnapshot(ctx context.Context, repo Repository, targets []string, tagLists []TagList, hostname string) (ID, error) {
var err error
absTargets := make([]string, 0, len(targets))
for _, target := range targets {
if !filepath.IsAbs(target) {
target, err = filepath.Abs(target)
if err != nil {
return ID{}, errors.Wrap(err, "Abs")
}
}
absTargets = append(absTargets, target)
}
var (
latest time.Time
latestID ID
found bool
)
err := repo.List(ctx, SnapshotFile, func(snapshotID ID, size int64) error {
err = repo.List(ctx, SnapshotFile, func(snapshotID ID, size int64) error {
snapshot, err := LoadSnapshot(ctx, repo, snapshotID)
if err != nil {
return errors.Errorf("Error loading snapshot %v: %v", snapshotID.Str(), err)
@ -33,7 +46,7 @@ func FindLatestSnapshot(ctx context.Context, repo Repository, targets []string,
return nil
}
if !snapshot.HasPaths(targets) {
if !snapshot.HasPaths(absTargets) {
return nil
}

View file

@ -21,12 +21,12 @@ func NewTree() *Tree {
}
}
func (t Tree) String() string {
func (t *Tree) String() string {
return fmt.Sprintf("Tree<%d nodes>", len(t.Nodes))
}
// Equals returns true if t and other have exactly the same nodes.
func (t Tree) Equals(other *Tree) bool {
func (t *Tree) Equals(other *Tree) bool {
if len(t.Nodes) != len(other.Nodes) {
debug.Log("tree.Equals(): trees have different number of nodes")
return false
@ -46,9 +46,9 @@ func (t Tree) Equals(other *Tree) bool {
// Insert adds a new node at the correct place in the tree.
func (t *Tree) Insert(node *Node) error {
pos, _, err := t.binarySearch(node.Name)
if err == nil {
return errors.New("node already present")
pos, found := t.find(node.Name)
if found != nil {
return errors.Errorf("node %q already present", node.Name)
}
// https://code.google.com/p/go-wiki/wiki/SliceTricks
@ -59,16 +59,26 @@ func (t *Tree) Insert(node *Node) error {
return nil
}
func (t Tree) binarySearch(name string) (int, *Node, error) {
func (t *Tree) find(name string) (int, *Node) {
pos := sort.Search(len(t.Nodes), func(i int) bool {
return t.Nodes[i].Name >= name
})
if pos < len(t.Nodes) && t.Nodes[pos].Name == name {
return pos, t.Nodes[pos], nil
return pos, t.Nodes[pos]
}
return pos, nil, errors.New("named node not found")
return pos, nil
}
// Find returns a node with the given name, or nil if none could be found.
func (t *Tree) Find(name string) *Node {
if t == nil {
return nil
}
_, node := t.find(name)
return node
}
// Sort sorts the nodes by name.
@ -79,7 +89,7 @@ func (t *Tree) Sort() {
}
// Subtrees returns a slice of all subtree IDs of the tree.
func (t Tree) Subtrees() (trees IDs) {
func (t *Tree) Subtrees() (trees IDs) {
for _, node := range t.Nodes {
if node.Type == "dir" && node.Subtree != nil {
trees = append(trees, *node.Subtree)

15
internal/test/helper.go Normal file
View file

@ -0,0 +1,15 @@
// +build go1.9
package test
import "testing"
// Helperer marks the current function as a test helper.
type Helperer interface {
Helper()
}
// Helper returns a function that marks the current function as a helper function.
func Helper(t testing.TB) Helperer {
return t
}

View file

@ -0,0 +1,19 @@
// +build !go1.9
package test
import "testing"
// Helperer marks the current function as a test helper.
type Helperer interface {
Helper()
}
type fakeHelper struct{}
func (fakeHelper) Helper() {}
// Helper returns a function that marks the current function as a helper function.
func Helper(t testing.TB) Helperer {
return fakeHelper{}
}

343
internal/ui/backup.go Normal file
View file

@ -0,0 +1,343 @@
package ui
import (
"context"
"fmt"
"os"
"sort"
"sync"
"time"
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/termstatus"
)
type counter struct {
Files, Dirs uint
Bytes uint64
}
type fileWorkerMessage struct {
filename string
done bool
}
// Backup reports progress for the `backup` command.
type Backup struct {
*Message
*StdioWrapper
MinUpdatePause time.Duration
term *termstatus.Terminal
v uint
start time.Time
totalBytes uint64
totalCh chan counter
processedCh chan counter
errCh chan struct{}
workerCh chan fileWorkerMessage
summary struct {
sync.Mutex
Files, Dirs struct {
New uint
Changed uint
Unchanged uint
}
archiver.ItemStats
}
}
// NewBackup returns a new backup progress reporter.
func NewBackup(term *termstatus.Terminal, verbosity uint) *Backup {
return &Backup{
Message: NewMessage(term, verbosity),
StdioWrapper: NewStdioWrapper(term),
term: term,
v: verbosity,
start: time.Now(),
// limit to 60fps by default
MinUpdatePause: time.Second / 60,
totalCh: make(chan counter),
processedCh: make(chan counter),
errCh: make(chan struct{}),
workerCh: make(chan fileWorkerMessage),
}
}
// Run regularly updates the status lines. It should be called in a separate
// goroutine.
func (b *Backup) Run(ctx context.Context) error {
var (
lastUpdate time.Time
total, processed counter
errors uint
started bool
currentFiles = make(map[string]struct{})
secondsRemaining uint64
)
t := time.NewTicker(time.Second)
defer t.Stop()
for {
select {
case <-ctx.Done():
return nil
case t, ok := <-b.totalCh:
if ok {
total = t
started = true
} else {
// scan has finished
b.totalCh = nil
b.totalBytes = total.Bytes
}
case s := <-b.processedCh:
processed.Files += s.Files
processed.Dirs += s.Dirs
processed.Bytes += s.Bytes
started = true
case <-b.errCh:
errors++
started = true
case m := <-b.workerCh:
if m.done {
delete(currentFiles, m.filename)
} else {
currentFiles[m.filename] = struct{}{}
}
case <-t.C:
if !started {
continue
}
if b.totalCh == nil {
secs := float64(time.Since(b.start) / time.Second)
todo := float64(total.Bytes - processed.Bytes)
secondsRemaining = uint64(secs / float64(processed.Bytes) * todo)
}
}
// limit update frequency
if time.Since(lastUpdate) < b.MinUpdatePause {
continue
}
lastUpdate = time.Now()
b.update(total, processed, errors, currentFiles, secondsRemaining)
}
}
// update updates the status lines.
func (b *Backup) update(total, processed counter, errors uint, currentFiles map[string]struct{}, secs uint64) {
var status string
if total.Files == 0 && total.Dirs == 0 {
// no total count available yet
status = fmt.Sprintf("[%s] %v files, %s, %d errors",
formatDuration(time.Since(b.start)),
processed.Files, formatBytes(processed.Bytes), errors,
)
} else {
var eta string
if secs > 0 {
eta = fmt.Sprintf(" ETA %s", formatSeconds(secs))
}
// include totals
status = fmt.Sprintf("[%s] %s %v files %s, total %v files %v, %d errors%s",
formatDuration(time.Since(b.start)),
formatPercent(processed.Bytes, total.Bytes),
processed.Files,
formatBytes(processed.Bytes),
total.Files,
formatBytes(total.Bytes),
errors,
eta,
)
}
lines := make([]string, 0, len(currentFiles)+1)
for filename := range currentFiles {
lines = append(lines, filename)
}
sort.Sort(sort.StringSlice(lines))
lines = append([]string{status}, lines...)
b.term.SetStatus(lines)
}
// ScannerError is the error callback function for the scanner, it prints the
// error in verbose mode and returns nil.
func (b *Backup) ScannerError(item string, fi os.FileInfo, err error) error {
b.V("scan: %v\n", err)
return nil
}
// Error is the error callback function for the archiver, it prints the error and returns nil.
func (b *Backup) Error(item string, fi os.FileInfo, err error) error {
b.E("error: %v\n", err)
b.errCh <- struct{}{}
return nil
}
// StartFile is called when a file is being processed by a worker.
func (b *Backup) StartFile(filename string) {
b.workerCh <- fileWorkerMessage{
filename: filename,
}
}
// CompleteBlob is called for all saved blobs for files.
func (b *Backup) CompleteBlob(filename string, bytes uint64) {
b.processedCh <- counter{Bytes: bytes}
}
func formatPercent(numerator uint64, denominator uint64) string {
if denominator == 0 {
return ""
}
percent := 100.0 * float64(numerator) / float64(denominator)
if percent > 100 {
percent = 100
}
return fmt.Sprintf("%3.2f%%", percent)
}
func formatSeconds(sec uint64) string {
hours := sec / 3600
sec -= hours * 3600
min := sec / 60
sec -= min * 60
if hours > 0 {
return fmt.Sprintf("%d:%02d:%02d", hours, min, sec)
}
return fmt.Sprintf("%d:%02d", min, sec)
}
func formatDuration(d time.Duration) string {
sec := uint64(d / time.Second)
return formatSeconds(sec)
}
func formatBytes(c uint64) string {
b := float64(c)
switch {
case c > 1<<40:
return fmt.Sprintf("%.3f TiB", b/(1<<40))
case c > 1<<30:
return fmt.Sprintf("%.3f GiB", b/(1<<30))
case c > 1<<20:
return fmt.Sprintf("%.3f MiB", b/(1<<20))
case c > 1<<10:
return fmt.Sprintf("%.3f KiB", b/(1<<10))
default:
return fmt.Sprintf("%d B", c)
}
}
// CompleteItemFn is the status callback function for the archiver when a
// file/dir has been saved successfully.
func (b *Backup) CompleteItemFn(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) {
b.summary.Lock()
b.summary.ItemStats.Add(s)
b.summary.Unlock()
if current == nil {
return
}
switch current.Type {
case "file":
b.processedCh <- counter{Files: 1}
b.workerCh <- fileWorkerMessage{
filename: item,
done: true,
}
case "dir":
b.processedCh <- counter{Dirs: 1}
}
if current.Type == "dir" {
if previous == nil {
b.VV("new %v, saved in %.3fs (%v added, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.TreeSize))
b.summary.Lock()
b.summary.Dirs.New++
b.summary.Unlock()
return
}
if previous.Equals(*current) {
b.VV("unchanged %v", item)
b.summary.Lock()
b.summary.Dirs.Unchanged++
b.summary.Unlock()
} else {
b.VV("modified %v, saved in %.3fs (%v added, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.TreeSize))
b.summary.Lock()
b.summary.Dirs.Changed++
b.summary.Unlock()
}
} else if current.Type == "file" {
b.workerCh <- fileWorkerMessage{
done: true,
filename: item,
}
if previous == nil {
b.VV("new %v, saved in %.3fs (%v added)", item, d.Seconds(), formatBytes(s.DataSize))
b.summary.Lock()
b.summary.Files.New++
b.summary.Unlock()
return
}
if previous.Equals(*current) {
b.VV("unchanged %v", item)
b.summary.Lock()
b.summary.Files.Unchanged++
b.summary.Unlock()
} else {
b.VV("modified %v, saved in %.3fs (%v added)", item, d.Seconds(), formatBytes(s.DataSize))
b.summary.Lock()
b.summary.Files.Changed++
b.summary.Unlock()
}
}
}
// ReportTotal sets the total stats up to now
func (b *Backup) ReportTotal(item string, s archiver.ScanStats) {
b.totalCh <- counter{Files: s.Files, Dirs: s.Dirs, Bytes: s.Bytes}
if item == "" {
b.V("scan finished in %.3fs", time.Since(b.start).Seconds())
close(b.totalCh)
return
}
}
// Finish prints the finishing messages.
func (b *Backup) Finish() {
b.V("processed %s in %s", formatBytes(b.totalBytes), formatDuration(time.Since(b.start)))
b.V("\n")
b.V("Files: %5d new, %5d changed, %5d unmodified\n", b.summary.Files.New, b.summary.Files.Changed, b.summary.Files.Unchanged)
b.V("Dirs: %5d new, %5d changed, %5d unmodified\n", b.summary.Dirs.New, b.summary.Dirs.Changed, b.summary.Dirs.Unchanged)
b.VV("Data Blobs: %5d new\n", b.summary.ItemStats.DataBlobs)
b.VV("Tree Blobs: %5d new\n", b.summary.ItemStats.TreeBlobs)
b.V("Added: %-5s\n", formatBytes(b.summary.ItemStats.DataSize+b.summary.ItemStats.TreeSize))
b.V("\n")
}

45
internal/ui/message.go Normal file
View file

@ -0,0 +1,45 @@
package ui
import "github.com/restic/restic/internal/ui/termstatus"
// Message reports progress with messages of different verbosity.
type Message struct {
term *termstatus.Terminal
v uint
}
// NewMessage returns a message progress reporter with underlying terminal
// term.
func NewMessage(term *termstatus.Terminal, verbosity uint) *Message {
return &Message{
term: term,
v: verbosity,
}
}
// E reports an error
func (m *Message) E(msg string, args ...interface{}) {
m.term.Errorf(msg, args...)
}
// P prints a message if verbosity >= 1, this is used for normal messages which
// are not errors.
func (m *Message) P(msg string, args ...interface{}) {
if m.v >= 1 {
m.term.Printf(msg, args...)
}
}
// V prints a message if verbosity >= 2, this is used for verbose messages.
func (m *Message) V(msg string, args ...interface{}) {
if m.v >= 2 {
m.term.Printf(msg, args...)
}
}
// VV prints a message if verbosity >= 3, this is used for debug messages.
func (m *Message) VV(msg string, args ...interface{}) {
if m.v >= 3 {
m.term.Printf(msg, args...)
}
}

View file

@ -0,0 +1,86 @@
package ui
import (
"bytes"
"io"
"github.com/restic/restic/internal/ui/termstatus"
)
// StdioWrapper provides stdout and stderr integration with termstatus.
type StdioWrapper struct {
stdout *lineWriter
stderr *lineWriter
}
// NewStdioWrapper initializes a new stdio wrapper that can be used in place of
// os.Stdout or os.Stderr.
func NewStdioWrapper(term *termstatus.Terminal) *StdioWrapper {
return &StdioWrapper{
stdout: newLineWriter(term.Print),
stderr: newLineWriter(term.Error),
}
}
// Stdout returns a writer that is line buffered and can be used in place of
// os.Stdout. On Close(), the remaining bytes are written, followed by a line
// break.
func (w *StdioWrapper) Stdout() io.WriteCloser {
return w.stdout
}
// Stderr returns a writer that is line buffered and can be used in place of
// os.Stderr. On Close(), the remaining bytes are written, followed by a line
// break.
func (w *StdioWrapper) Stderr() io.WriteCloser {
return w.stderr
}
type lineWriter struct {
buf *bytes.Buffer
print func(string)
}
var _ io.WriteCloser = &lineWriter{}
func newLineWriter(print func(string)) *lineWriter {
return &lineWriter{buf: bytes.NewBuffer(nil), print: print}
}
func (w *lineWriter) Write(data []byte) (n int, err error) {
n, err = w.buf.Write(data)
if err != nil {
return n, err
}
// look for line breaks
buf := w.buf.Bytes()
skip := 0
for i := 0; i < len(buf); {
if buf[i] == '\n' {
// found line
w.print(string(buf[:i+1]))
buf = buf[i+1:]
skip += i + 1
i = 0
continue
}
i++
}
_ = w.buf.Next(skip)
return n, err
}
func (w *lineWriter) Flush() error {
if w.buf.Len() > 0 {
w.print(string(append(w.buf.Bytes(), '\n')))
}
return nil
}
func (w *lineWriter) Close() error {
return w.Flush()
}

View file

@ -0,0 +1,95 @@
package ui
import (
"testing"
"github.com/google/go-cmp/cmp"
)
func TestStdioWrapper(t *testing.T) {
var tests = []struct {
inputs [][]byte
outputs []string
}{
{
inputs: [][]byte{
[]byte("foo"),
},
outputs: []string{
"foo\n",
},
},
{
inputs: [][]byte{
[]byte("foo"),
[]byte("bar"),
[]byte("\n"),
[]byte("baz"),
},
outputs: []string{
"foobar\n",
"baz\n",
},
},
{
inputs: [][]byte{
[]byte("foo"),
[]byte("bar\nbaz\n"),
[]byte("bump\n"),
},
outputs: []string{
"foobar\n",
"baz\n",
"bump\n",
},
},
{
inputs: [][]byte{
[]byte("foo"),
[]byte("bar\nbaz\n"),
[]byte("bum"),
[]byte("p\nx"),
[]byte("x"),
[]byte("x"),
[]byte("z"),
},
outputs: []string{
"foobar\n",
"baz\n",
"bump\n",
"xxxz\n",
},
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
var lines []string
print := func(s string) {
lines = append(lines, s)
}
w := newLineWriter(print)
for _, data := range test.inputs {
n, err := w.Write(data)
if err != nil {
t.Fatal(err)
}
if n != len(data) {
t.Errorf("invalid length returned by Write, want %d, got %d", len(data), n)
}
}
err := w.Close()
if err != nil {
t.Fatal(err)
}
if !cmp.Equal(test.outputs, lines) {
t.Error(cmp.Diff(test.outputs, lines))
}
})
}
}

View file

@ -0,0 +1,9 @@
// +build !linux
package termstatus
// IsProcessBackground reports whether the current process is running in the
// background. Not implemented for this platform.
func IsProcessBackground() bool {
return false
}

View file

@ -1,4 +1,4 @@
package main
package termstatus
import (
"syscall"
@ -7,7 +7,7 @@ import (
"github.com/restic/restic/internal/debug"
)
// IsProcessBackground returns true if it is running in the background or false if not
// IsProcessBackground reports whether the current process is running in the background.
func IsProcessBackground() bool {
var pid int
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdin), syscall.TIOCGPGRP, uintptr(unsafe.Pointer(&pid)))

View file

@ -0,0 +1,293 @@
package termstatus
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"os"
"strings"
)
// Terminal is used to write messages and display status lines which can be
// updated. When the output is redirected to a file, the status lines are not
// printed.
type Terminal struct {
wr *bufio.Writer
fd uintptr
errWriter io.Writer
buf *bytes.Buffer
msg chan message
status chan status
canUpdateStatus bool
clearLines clearLinesFunc
}
type clearLinesFunc func(wr io.Writer, fd uintptr, n int)
type message struct {
line string
err bool
}
type status struct {
lines []string
}
type fder interface {
Fd() uintptr
}
// New returns a new Terminal for wr. A goroutine is started to update the
// terminal. It is terminated when ctx is cancelled. When wr is redirected to
// a file (e.g. via shell output redirection) or is just an io.Writer (not the
// open *os.File for stdout), no status lines are printed. The status lines and
// normal output (via Print/Printf) are written to wr, error messages are
// written to errWriter.
func New(wr io.Writer, errWriter io.Writer) *Terminal {
t := &Terminal{
wr: bufio.NewWriter(wr),
errWriter: errWriter,
buf: bytes.NewBuffer(nil),
msg: make(chan message),
status: make(chan status),
}
if d, ok := wr.(fder); ok && canUpdateStatus(d.Fd()) {
// only use the fancy status code when we're running on a real terminal.
t.canUpdateStatus = true
t.fd = d.Fd()
t.clearLines = clearLines(wr, t.fd)
}
return t
}
// Run updates the screen. It should be run in a separate goroutine. When
// ctx is cancelled, the status lines are cleanly removed.
func (t *Terminal) Run(ctx context.Context) {
if t.canUpdateStatus {
t.run(ctx)
return
}
t.runWithoutStatus(ctx)
}
func countLines(buf []byte) int {
lines := 0
sc := bufio.NewScanner(bytes.NewReader(buf))
for sc.Scan() {
lines++
}
return lines
}
type stringWriter interface {
WriteString(string) (int, error)
}
// run listens on the channels and updates the terminal screen.
func (t *Terminal) run(ctx context.Context) {
statusBuf := bytes.NewBuffer(nil)
statusLines := 0
for {
select {
case <-ctx.Done():
if IsProcessBackground() {
// ignore all messages, do nothing, we are in the background process group
continue
}
t.undoStatus(statusLines)
err := t.wr.Flush()
if err != nil {
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
}
return
case msg := <-t.msg:
if IsProcessBackground() {
// ignore all messages, do nothing, we are in the background process group
continue
}
t.undoStatus(statusLines)
var dst io.Writer
if msg.err {
dst = t.errWriter
// assume t.wr and t.errWriter are different, so we need to
// flush the removal of the status lines first.
err := t.wr.Flush()
if err != nil {
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
}
} else {
dst = t.wr
}
var err error
if w, ok := dst.(stringWriter); ok {
_, err = w.WriteString(msg.line)
} else {
_, err = dst.Write([]byte(msg.line))
}
if err != nil {
fmt.Fprintf(os.Stderr, "write failed: %v\n", err)
continue
}
_, err = t.wr.Write(statusBuf.Bytes())
if err != nil {
fmt.Fprintf(os.Stderr, "write failed: %v\n", err)
}
err = t.wr.Flush()
if err != nil {
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
}
case stat := <-t.status:
if IsProcessBackground() {
// ignore all messages, do nothing, we are in the background process group
continue
}
t.undoStatus(statusLines)
statusBuf.Reset()
for _, line := range stat.lines {
statusBuf.WriteString(line)
}
statusLines = len(stat.lines)
_, err := t.wr.Write(statusBuf.Bytes())
if err != nil {
fmt.Fprintf(os.Stderr, "write failed: %v\n", err)
}
err = t.wr.Flush()
if err != nil {
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
}
}
}
}
// runWithoutStatus listens on the channels and just prints out the messages,
// without status lines.
func (t *Terminal) runWithoutStatus(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case msg := <-t.msg:
var err error
var flush func() error
var dst io.Writer
if msg.err {
dst = t.errWriter
} else {
dst = t.wr
flush = t.wr.Flush
}
if w, ok := dst.(stringWriter); ok {
_, err = w.WriteString(msg.line)
} else {
_, err = dst.Write([]byte(msg.line))
}
if err != nil {
fmt.Fprintf(os.Stderr, "write failed: %v\n", err)
}
if flush == nil {
continue
}
err = flush()
if err != nil {
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
}
case _ = <-t.status:
// discard status lines
}
}
}
func (t *Terminal) undoStatus(lines int) {
if lines == 0 {
return
}
lines--
t.clearLines(t.wr, t.fd, lines)
}
// Print writes a line to the terminal.
func (t *Terminal) Print(line string) {
// make sure the line ends with a line break
if line[len(line)-1] != '\n' {
line += "\n"
}
t.msg <- message{line: line}
}
// Printf uses fmt.Sprintf to write a line to the terminal.
func (t *Terminal) Printf(msg string, args ...interface{}) {
s := fmt.Sprintf(msg, args...)
t.Print(s)
}
// Error writes an error to the terminal.
func (t *Terminal) Error(line string) {
// make sure the line ends with a line break
if line[len(line)-1] != '\n' {
line += "\n"
}
t.msg <- message{line: line, err: true}
}
// Errorf uses fmt.Sprintf to write an error line to the terminal.
func (t *Terminal) Errorf(msg string, args ...interface{}) {
s := fmt.Sprintf(msg, args...)
t.Error(s)
}
// SetStatus updates the status lines.
func (t *Terminal) SetStatus(lines []string) {
if len(lines) == 0 {
return
}
width, _, err := getTermSize(t.fd)
if err != nil || width < 0 {
// use 80 columns by default
width = 80
}
// make sure that all lines have a line break and are not too long
for i, line := range lines {
line = strings.TrimRight(line, "\n")
if len(line) >= width-2 {
line = line[:width-2]
}
line += "\n"
lines[i] = line
}
// make sure the last line does not have a line break
last := len(lines) - 1
lines[last] = strings.TrimRight(lines[last], "\n")
t.status <- status{lines: lines}
}

View file

@ -0,0 +1,33 @@
package termstatus
import (
"fmt"
"io"
"os"
)
const (
posixMoveCursorHome = "\r"
posixMoveCursorUp = "\x1b[1A"
posixClearLine = "\x1b[2K"
)
// posixClearLines will clear the current line and the n lines above.
// Afterwards the cursor is positioned at the start of the first cleared line.
func posixClearLines(wr io.Writer, fd uintptr, n int) {
// clear current line
_, err := wr.Write([]byte(posixMoveCursorHome + posixClearLine))
if err != nil {
fmt.Fprintf(os.Stderr, "write failed: %v\n", err)
return
}
for ; n > 0; n-- {
// clear current line and move on line up
_, err := wr.Write([]byte(posixMoveCursorUp + posixClearLine))
if err != nil {
fmt.Fprintf(os.Stderr, "write failed: %v\n", err)
return
}
}
}

View file

@ -0,0 +1,34 @@
// +build !windows
package termstatus
import (
"io"
"syscall"
"unsafe"
isatty "github.com/mattn/go-isatty"
)
// clearLines will clear the current line and the n lines above. Afterwards the
// cursor is positioned at the start of the first cleared line.
func clearLines(wr io.Writer, fd uintptr) clearLinesFunc {
return posixClearLines
}
// canUpdateStatus returns true if status lines can be printed, the process
// output is not redirected to a file or pipe.
func canUpdateStatus(fd uintptr) bool {
return isatty.IsTerminal(fd)
}
// getTermSize returns the dimensions of the given terminal.
// the code is taken from "golang.org/x/crypto/ssh/terminal"
func getTermSize(fd uintptr) (width, height int, err error) {
var dimensions [4]uint16
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
return -1, -1, err
}
return int(dimensions[1]), int(dimensions[0]), nil
}

View file

@ -0,0 +1,131 @@
// +build windows
package termstatus
import (
"io"
"syscall"
"unsafe"
)
// clearLines clears the current line and n lines above it.
func clearLines(wr io.Writer, fd uintptr) clearLinesFunc {
// easy case, the terminal is cmd or psh, without redirection
if isWindowsTerminal(fd) {
return windowsClearLines
}
// check if the output file type is a pipe (0x0003)
if getFileType(fd) != fileTypePipe {
// return empty func, update state is not possible on this terminal
return func(io.Writer, uintptr, int) {}
}
// assume we're running in mintty/cygwin
return posixClearLines
}
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
procGetFileType = kernel32.NewProc("GetFileType")
)
type (
short int16
word uint16
dword uint32
coord struct {
x short
y short
}
smallRect struct {
left short
top short
right short
bottom short
}
consoleScreenBufferInfo struct {
size coord
cursorPosition coord
attributes word
window smallRect
maximumWindowSize coord
}
)
// windowsClearLines clears the current line and n lines above it.
func windowsClearLines(wr io.Writer, fd uintptr, n int) {
var info consoleScreenBufferInfo
procGetConsoleScreenBufferInfo.Call(fd, uintptr(unsafe.Pointer(&info)))
for i := 0; i <= n; i++ {
// clear the line
cursor := coord{
x: info.window.left,
y: info.cursorPosition.y - short(i),
}
var count, w dword
count = dword(info.size.x)
procFillConsoleOutputAttribute.Call(fd, uintptr(info.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&w)))
procFillConsoleOutputCharacter.Call(fd, uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&w)))
}
// move cursor up by n lines and to the first column
info.cursorPosition.y -= short(n)
info.cursorPosition.x = 0
procSetConsoleCursorPosition.Call(fd, uintptr(*(*int32)(unsafe.Pointer(&info.cursorPosition))))
}
// getTermSize returns the dimensions of the given terminal.
// the code is taken from "golang.org/x/crypto/ssh/terminal"
func getTermSize(fd uintptr) (width, height int, err error) {
var info consoleScreenBufferInfo
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, fd, uintptr(unsafe.Pointer(&info)), 0)
if e != 0 {
return 0, 0, error(e)
}
return int(info.size.x), int(info.size.y), nil
}
// isWindowsTerminal return true if the file descriptor is a windows terminal (cmd, psh).
func isWindowsTerminal(fd uintptr) bool {
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}
const fileTypePipe = 0x0003
// getFileType returns the file type for the given fd.
// https://msdn.microsoft.com/de-de/library/windows/desktop/aa364960(v=vs.85).aspx
func getFileType(fd uintptr) int {
r, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
if e != 0 {
return 0
}
return int(r)
}
// canUpdateStatus returns true if status lines can be printed, the process
// output is not redirected to a file or pipe.
func canUpdateStatus(fd uintptr) bool {
// easy case, the terminal is cmd or psh, without redirection
if isWindowsTerminal(fd) {
return true
}
// check if the output file type is a pipe (0x0003)
if getFileType(fd) != fileTypePipe {
return false
}
// assume we're running in mintty/cygwin
return true
}

Binary file not shown.

View file

@ -1,197 +0,0 @@
package walk
import (
"context"
"fmt"
"os"
"path/filepath"
"sync"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
)
// TreeJob is a job sent from the tree walker.
type TreeJob struct {
Path string
Error error
Node *restic.Node
Tree *restic.Tree
}
// TreeWalker traverses a tree in the repository depth-first and sends a job
// for each item (file or dir) that it encounters.
type TreeWalker struct {
ch chan<- loadTreeJob
out chan<- TreeJob
}
// NewTreeWalker uses ch to load trees from the repository and sends jobs to
// out.
func NewTreeWalker(ch chan<- loadTreeJob, out chan<- TreeJob) *TreeWalker {
return &TreeWalker{ch: ch, out: out}
}
// Walk starts walking the tree given by id. When the channel done is closed,
// processing stops.
func (tw *TreeWalker) Walk(ctx context.Context, path string, id restic.ID) {
debug.Log("starting on tree %v for %v", id, path)
defer debug.Log("done walking tree %v for %v", id, path)
resCh := make(chan loadTreeResult, 1)
tw.ch <- loadTreeJob{
id: id,
res: resCh,
}
res := <-resCh
if res.err != nil {
select {
case tw.out <- TreeJob{Path: path, Error: res.err}:
case <-ctx.Done():
return
}
return
}
tw.walk(ctx, path, res.tree)
select {
case tw.out <- TreeJob{Path: path, Tree: res.tree}:
case <-ctx.Done():
return
}
}
func (tw *TreeWalker) walk(ctx context.Context, path string, tree *restic.Tree) {
debug.Log("start on %q", path)
defer debug.Log("done for %q", path)
debug.Log("tree %#v", tree)
// load all subtrees in parallel
results := make([]<-chan loadTreeResult, len(tree.Nodes))
for i, node := range tree.Nodes {
if node.Type == "dir" {
resCh := make(chan loadTreeResult, 1)
tw.ch <- loadTreeJob{
id: *node.Subtree,
res: resCh,
}
results[i] = resCh
}
}
for i, node := range tree.Nodes {
p := filepath.Join(path, node.Name)
var job TreeJob
if node.Type == "dir" {
if results[i] == nil {
panic("result chan should not be nil")
}
res := <-results[i]
if res.err == nil {
tw.walk(ctx, p, res.tree)
} else {
fmt.Fprintf(os.Stderr, "error loading tree: %v\n", res.err)
}
job = TreeJob{Path: p, Tree: res.tree, Error: res.err}
} else {
job = TreeJob{Path: p, Node: node}
}
select {
case tw.out <- job:
case <-ctx.Done():
return
}
}
}
type loadTreeResult struct {
tree *restic.Tree
err error
}
type loadTreeJob struct {
id restic.ID
res chan<- loadTreeResult
}
type treeLoader func(restic.ID) (*restic.Tree, error)
func loadTreeWorker(ctx context.Context, wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader) {
debug.Log("start")
defer debug.Log("exit")
defer wg.Done()
for {
select {
case <-ctx.Done():
debug.Log("done channel closed")
return
case job, ok := <-in:
if !ok {
debug.Log("input channel closed, exiting")
return
}
debug.Log("received job to load tree %v", job.id)
tree, err := load(job.id)
debug.Log("tree %v loaded, error %v", job.id, err)
select {
case job.res <- loadTreeResult{tree, err}:
debug.Log("job result sent")
case <-ctx.Done():
debug.Log("done channel closed before result could be sent")
return
}
}
}
}
// TreeLoader loads tree objects.
type TreeLoader interface {
LoadTree(context.Context, restic.ID) (*restic.Tree, error)
}
const loadTreeWorkers = 10
// Tree walks the tree specified by id recursively and sends a job for each
// file and directory it finds. When the channel done is closed, processing
// stops.
func Tree(ctx context.Context, repo TreeLoader, id restic.ID, jobCh chan<- TreeJob) {
debug.Log("start on %v, start workers", id)
load := func(id restic.ID) (*restic.Tree, error) {
tree, err := repo.LoadTree(ctx, id)
if err != nil {
return nil, err
}
return tree, nil
}
ch := make(chan loadTreeJob)
var wg sync.WaitGroup
for i := 0; i < loadTreeWorkers; i++ {
wg.Add(1)
go loadTreeWorker(ctx, &wg, ch, load)
}
tw := NewTreeWalker(ch, jobCh)
tw.Walk(ctx, "", id)
close(jobCh)
close(ch)
wg.Wait()
debug.Log("done")
}

File diff suppressed because it is too large Load diff

18
vendor/github.com/google/go-cmp/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,18 @@
sudo: false
language: go
go:
- 1.x
- master
matrix:
include:
- go: 1.6.x
script: go test -v -race ./...
allow_failures:
- go: master
fast_finish: true
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (it is intended for this package to have no dependencies other than the standard library).
script:
- diff -u <(echo -n) <(gofmt -d -s .)
- go tool vet .
- go test -v -race ./...

23
vendor/github.com/google/go-cmp/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,23 @@
# How to Contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution,
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.

27
vendor/github.com/google/go-cmp/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2017 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

44
vendor/github.com/google/go-cmp/README.md generated vendored Normal file
View file

@ -0,0 +1,44 @@
# Package for equality of Go values
[![GoDoc](https://godoc.org/github.com/google/go-cmp/cmp?status.svg)][godoc]
[![Build Status](https://travis-ci.org/google/go-cmp.svg?branch=master)][travis]
This package is intended to be a more powerful and safer alternative to
`reflect.DeepEqual` for comparing whether two values are semantically equal.
The primary features of `cmp` are:
* When the default behavior of equality does not suit the needs of the test,
custom equality functions can override the equality operation.
For example, an equality function may report floats as equal so long as they
are within some tolerance of each other.
* Types that have an `Equal` method may use that method to determine equality.
This allows package authors to determine the equality operation for the types
that they define.
* If no custom equality functions are used and no `Equal` method is defined,
equality is determined by recursively comparing the primitive kinds on both
values, much like `reflect.DeepEqual`. Unlike `reflect.DeepEqual`, unexported
fields are not compared by default; they result in panics unless suppressed
by using an `Ignore` option (see `cmpopts.IgnoreUnexported`) or explictly
compared using the `AllowUnexported` option.
See the [GoDoc documentation][godoc] for more information.
This is not an official Google product.
[godoc]: https://godoc.org/github.com/google/go-cmp/cmp
[travis]: https://travis-ci.org/google/go-cmp
## Install
```
go get -u github.com/google/go-cmp/cmp
```
## License
BSD - See [LICENSE][license] file
[license]: https://github.com/google/go-cmp/blob/master/LICENSE

89
vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go generated vendored Normal file
View file

@ -0,0 +1,89 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package cmpopts provides common options for the cmp package.
package cmpopts
import (
"math"
"reflect"
"github.com/google/go-cmp/cmp"
)
func equateAlways(_, _ interface{}) bool { return true }
// EquateEmpty returns a Comparer option that determines all maps and slices
// with a length of zero to be equal, regardless of whether they are nil.
//
// EquateEmpty can be used in conjuction with SortSlices and SortMaps.
func EquateEmpty() cmp.Option {
return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways))
}
func isEmpty(x, y interface{}) bool {
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
return (x != nil && y != nil && vx.Type() == vy.Type()) &&
(vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) &&
(vx.Len() == 0 && vy.Len() == 0)
}
// EquateApprox returns a Comparer option that determines float32 or float64
// values to be equal if they are within a relative fraction or absolute margin.
// This option is not used when either x or y is NaN or infinite.
//
// The fraction determines that the difference of two values must be within the
// smaller fraction of the two values, while the margin determines that the two
// values must be within some absolute margin.
// To express only a fraction or only a margin, use 0 for the other parameter.
// The fraction and margin must be non-negative.
//
// The mathematical expression used is equivalent to:
// |x-y| ≤ max(fraction*min(|x|, |y|), margin)
//
// EquateApprox can be used in conjuction with EquateNaNs.
func EquateApprox(fraction, margin float64) cmp.Option {
if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) {
panic("margin or fraction must be a non-negative number")
}
a := approximator{fraction, margin}
return cmp.Options{
cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)),
cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)),
}
}
type approximator struct{ frac, marg float64 }
func areRealF64s(x, y float64) bool {
return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0)
}
func areRealF32s(x, y float32) bool {
return areRealF64s(float64(x), float64(y))
}
func (a approximator) compareF64(x, y float64) bool {
relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y))
return math.Abs(x-y) <= math.Max(a.marg, relMarg)
}
func (a approximator) compareF32(x, y float32) bool {
return a.compareF64(float64(x), float64(y))
}
// EquateNaNs returns a Comparer option that determines float32 and float64
// NaN values to be equal.
//
// EquateNaNs can be used in conjuction with EquateApprox.
func EquateNaNs() cmp.Option {
return cmp.Options{
cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)),
cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)),
}
}
func areNaNsF64s(x, y float64) bool {
return math.IsNaN(x) && math.IsNaN(y)
}
func areNaNsF32s(x, y float32) bool {
return areNaNsF64s(float64(x), float64(y))
}

148
vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go generated vendored Normal file
View file

@ -0,0 +1,148 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmpopts
import (
"fmt"
"reflect"
"unicode"
"unicode/utf8"
"github.com/google/go-cmp/cmp"
)
// IgnoreFields returns an Option that ignores exported fields of the
// given names on a single struct type.
// The struct type is specified by passing in a value of that type.
//
// The name may be a dot-delimited string (e.g., "Foo.Bar") to ignore a
// specific sub-field that is embedded or nested within the parent struct.
//
// This does not handle unexported fields; use IgnoreUnexported instead.
func IgnoreFields(typ interface{}, names ...string) cmp.Option {
sf := newStructFilter(typ, names...)
return cmp.FilterPath(sf.filter, cmp.Ignore())
}
// IgnoreTypes returns an Option that ignores all values assignable to
// certain types, which are specified by passing in a value of each type.
func IgnoreTypes(typs ...interface{}) cmp.Option {
tf := newTypeFilter(typs...)
return cmp.FilterPath(tf.filter, cmp.Ignore())
}
type typeFilter []reflect.Type
func newTypeFilter(typs ...interface{}) (tf typeFilter) {
for _, typ := range typs {
t := reflect.TypeOf(typ)
if t == nil {
// This occurs if someone tries to pass in sync.Locker(nil)
panic("cannot determine type; consider using IgnoreInterfaces")
}
tf = append(tf, t)
}
return tf
}
func (tf typeFilter) filter(p cmp.Path) bool {
if len(p) < 1 {
return false
}
t := p[len(p)-1].Type()
for _, ti := range tf {
if t.AssignableTo(ti) {
return true
}
}
return false
}
// IgnoreInterfaces returns an Option that ignores all values or references of
// values assignable to certain interface types. These interfaces are specified
// by passing in an anonymous struct with the interface types embedded in it.
// For example, to ignore sync.Locker, pass in struct{sync.Locker}{}.
func IgnoreInterfaces(ifaces interface{}) cmp.Option {
tf := newIfaceFilter(ifaces)
return cmp.FilterPath(tf.filter, cmp.Ignore())
}
type ifaceFilter []reflect.Type
func newIfaceFilter(ifaces interface{}) (tf ifaceFilter) {
t := reflect.TypeOf(ifaces)
if ifaces == nil || t.Name() != "" || t.Kind() != reflect.Struct {
panic("input must be an anonymous struct")
}
for i := 0; i < t.NumField(); i++ {
fi := t.Field(i)
switch {
case !fi.Anonymous:
panic("struct cannot have named fields")
case fi.Type.Kind() != reflect.Interface:
panic("embedded field must be an interface type")
case fi.Type.NumMethod() == 0:
// This matches everything; why would you ever want this?
panic("cannot ignore empty interface")
default:
tf = append(tf, fi.Type)
}
}
return tf
}
func (tf ifaceFilter) filter(p cmp.Path) bool {
if len(p) < 1 {
return false
}
t := p[len(p)-1].Type()
for _, ti := range tf {
if t.AssignableTo(ti) {
return true
}
if t.Kind() != reflect.Ptr && reflect.PtrTo(t).AssignableTo(ti) {
return true
}
}
return false
}
// IgnoreUnexported returns an Option that only ignores the immediate unexported
// fields of a struct, including anonymous fields of unexported types.
// In particular, unexported fields within the struct's exported fields
// of struct types, including anonymous fields, will not be ignored unless the
// type of the field itself is also passed to IgnoreUnexported.
func IgnoreUnexported(typs ...interface{}) cmp.Option {
ux := newUnexportedFilter(typs...)
return cmp.FilterPath(ux.filter, cmp.Ignore())
}
type unexportedFilter struct{ m map[reflect.Type]bool }
func newUnexportedFilter(typs ...interface{}) unexportedFilter {
ux := unexportedFilter{m: make(map[reflect.Type]bool)}
for _, typ := range typs {
t := reflect.TypeOf(typ)
if t == nil || t.Kind() != reflect.Struct {
panic(fmt.Sprintf("invalid struct type: %T", typ))
}
ux.m[t] = true
}
return ux
}
func (xf unexportedFilter) filter(p cmp.Path) bool {
if len(p) < 2 {
return false
}
sf, ok := p[len(p)-1].(cmp.StructField)
if !ok {
return false
}
return xf.m[p[len(p)-2].Type()] && !isExported(sf.Name())
}
// isExported reports whether the identifier is exported.
func isExported(id string) bool {
r, _ := utf8.DecodeRuneInString(id)
return unicode.IsUpper(r)
}

146
vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go generated vendored Normal file
View file

@ -0,0 +1,146 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmpopts
import (
"fmt"
"reflect"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/internal/function"
)
// SortSlices returns a Transformer option that sorts all []V.
// The less function must be of the form "func(T, T) bool" which is used to
// sort any slice with element type V that is assignable to T.
//
// The less function must be:
// • Deterministic: less(x, y) == less(x, y)
// • Irreflexive: !less(x, x)
// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
//
// The less function does not have to be "total". That is, if !less(x, y) and
// !less(y, x) for two elements x and y, their relative order is maintained.
//
// SortSlices can be used in conjuction with EquateEmpty.
func SortSlices(less interface{}) cmp.Option {
vf := reflect.ValueOf(less)
if !function.IsType(vf.Type(), function.Less) || vf.IsNil() {
panic(fmt.Sprintf("invalid less function: %T", less))
}
ss := sliceSorter{vf.Type().In(0), vf}
return cmp.FilterValues(ss.filter, cmp.Transformer("Sort", ss.sort))
}
type sliceSorter struct {
in reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
func (ss sliceSorter) filter(x, y interface{}) bool {
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
if !(x != nil && y != nil && vx.Type() == vy.Type()) ||
!(vx.Kind() == reflect.Slice && vx.Type().Elem().AssignableTo(ss.in)) ||
(vx.Len() <= 1 && vy.Len() <= 1) {
return false
}
// Check whether the slices are already sorted to avoid an infinite
// recursion cycle applying the same transform to itself.
ok1 := sliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) })
ok2 := sliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) })
return !ok1 || !ok2
}
func (ss sliceSorter) sort(x interface{}) interface{} {
src := reflect.ValueOf(x)
dst := reflect.MakeSlice(src.Type(), src.Len(), src.Len())
for i := 0; i < src.Len(); i++ {
dst.Index(i).Set(src.Index(i))
}
sortSliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) })
ss.checkSort(dst)
return dst.Interface()
}
func (ss sliceSorter) checkSort(v reflect.Value) {
start := -1 // Start of a sequence of equal elements.
for i := 1; i < v.Len(); i++ {
if ss.less(v, i-1, i) {
// Check that first and last elements in v[start:i] are equal.
if start >= 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) {
panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i)))
}
start = -1
} else if start == -1 {
start = i
}
}
}
func (ss sliceSorter) less(v reflect.Value, i, j int) bool {
vx, vy := v.Index(i), v.Index(j)
return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool()
}
// SortMaps returns a Transformer option that flattens map[K]V types to be a
// sorted []struct{K, V}. The less function must be of the form
// "func(T, T) bool" which is used to sort any map with key K that is
// assignable to T.
//
// Flattening the map into a slice has the property that cmp.Equal is able to
// use Comparers on K or the K.Equal method if it exists.
//
// The less function must be:
// • Deterministic: less(x, y) == less(x, y)
// • Irreflexive: !less(x, x)
// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
// • Total: if x != y, then either less(x, y) or less(y, x)
//
// SortMaps can be used in conjuction with EquateEmpty.
func SortMaps(less interface{}) cmp.Option {
vf := reflect.ValueOf(less)
if !function.IsType(vf.Type(), function.Less) || vf.IsNil() {
panic(fmt.Sprintf("invalid less function: %T", less))
}
ms := mapSorter{vf.Type().In(0), vf}
return cmp.FilterValues(ms.filter, cmp.Transformer("Sort", ms.sort))
}
type mapSorter struct {
in reflect.Type // T
fnc reflect.Value // func(T, T) bool
}
func (ms mapSorter) filter(x, y interface{}) bool {
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
return (x != nil && y != nil && vx.Type() == vy.Type()) &&
(vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) &&
(vx.Len() != 0 || vy.Len() != 0)
}
func (ms mapSorter) sort(x interface{}) interface{} {
src := reflect.ValueOf(x)
outType := mapEntryType(src.Type())
dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len())
for i, k := range src.MapKeys() {
v := reflect.New(outType).Elem()
v.Field(0).Set(k)
v.Field(1).Set(src.MapIndex(k))
dst.Index(i).Set(v)
}
sortSlice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) })
ms.checkSort(dst)
return dst.Interface()
}
func (ms mapSorter) checkSort(v reflect.Value) {
for i := 1; i < v.Len(); i++ {
if !ms.less(v, i-1, i) {
panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i)))
}
}
}
func (ms mapSorter) less(v reflect.Value, i, j int) bool {
vx, vy := v.Index(i).Field(0), v.Index(j).Field(0)
if !hasReflectStructOf {
vx, vy = vx.Elem(), vy.Elem()
}
return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool()
}

View file

@ -0,0 +1,46 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build !go1.8
package cmpopts
import (
"reflect"
"sort"
)
const hasReflectStructOf = false
func mapEntryType(reflect.Type) reflect.Type {
return reflect.TypeOf(struct{ K, V interface{} }{})
}
func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool {
return sort.IsSorted(reflectSliceSorter{reflect.ValueOf(slice), less})
}
func sortSlice(slice interface{}, less func(i, j int) bool) {
sort.Sort(reflectSliceSorter{reflect.ValueOf(slice), less})
}
func sortSliceStable(slice interface{}, less func(i, j int) bool) {
sort.Stable(reflectSliceSorter{reflect.ValueOf(slice), less})
}
type reflectSliceSorter struct {
slice reflect.Value
less func(i, j int) bool
}
func (ss reflectSliceSorter) Len() int {
return ss.slice.Len()
}
func (ss reflectSliceSorter) Less(i, j int) bool {
return ss.less(i, j)
}
func (ss reflectSliceSorter) Swap(i, j int) {
vi := ss.slice.Index(i).Interface()
vj := ss.slice.Index(j).Interface()
ss.slice.Index(i).Set(reflect.ValueOf(vj))
ss.slice.Index(j).Set(reflect.ValueOf(vi))
}

View file

@ -0,0 +1,31 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build go1.8
package cmpopts
import (
"reflect"
"sort"
)
const hasReflectStructOf = true
func mapEntryType(t reflect.Type) reflect.Type {
return reflect.StructOf([]reflect.StructField{
{Name: "K", Type: t.Key()},
{Name: "V", Type: t.Elem()},
})
}
func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool {
return sort.SliceIsSorted(slice, less)
}
func sortSlice(slice interface{}, less func(i, j int) bool) {
sort.Slice(slice, less)
}
func sortSliceStable(slice interface{}, less func(i, j int) bool) {
sort.SliceStable(slice, less)
}

View file

@ -0,0 +1,182 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmpopts
import (
"fmt"
"reflect"
"strings"
"github.com/google/go-cmp/cmp"
)
// filterField returns a new Option where opt is only evaluated on paths that
// include a specific exported field on a single struct type.
// The struct type is specified by passing in a value of that type.
//
// The name may be a dot-delimited string (e.g., "Foo.Bar") to select a
// specific sub-field that is embedded or nested within the parent struct.
func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option {
// TODO: This is currently unexported over concerns of how helper filters
// can be composed together easily.
// TODO: Add tests for FilterField.
sf := newStructFilter(typ, name)
return cmp.FilterPath(sf.filter, opt)
}
type structFilter struct {
t reflect.Type // The root struct type to match on
ft fieldTree // Tree of fields to match on
}
func newStructFilter(typ interface{}, names ...string) structFilter {
// TODO: Perhaps allow * as a special identifier to allow ignoring any
// number of path steps until the next field match?
// This could be useful when a concrete struct gets transformed into
// an anonymous struct where it is not possible to specify that by type,
// but the transformer happens to provide guarantees about the names of
// the transformed fields.
t := reflect.TypeOf(typ)
if t == nil || t.Kind() != reflect.Struct {
panic(fmt.Sprintf("%T must be a struct", typ))
}
var ft fieldTree
for _, name := range names {
cname, err := canonicalName(t, name)
if err != nil {
panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err))
}
ft.insert(cname)
}
return structFilter{t, ft}
}
func (sf structFilter) filter(p cmp.Path) bool {
for i, ps := range p {
if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) {
return true
}
}
return false
}
// fieldTree represents a set of dot-separated identifiers.
//
// For example, inserting the following selectors:
// Foo
// Foo.Bar.Baz
// Foo.Buzz
// Nuka.Cola.Quantum
//
// Results in a tree of the form:
// {sub: {
// "Foo": {ok: true, sub: {
// "Bar": {sub: {
// "Baz": {ok: true},
// }},
// "Buzz": {ok: true},
// }},
// "Nuka": {sub: {
// "Cola": {sub: {
// "Quantum": {ok: true},
// }},
// }},
// }}
type fieldTree struct {
ok bool // Whether this is a specified node
sub map[string]fieldTree // The sub-tree of fields under this node
}
// insert inserts a sequence of field accesses into the tree.
func (ft *fieldTree) insert(cname []string) {
if ft.sub == nil {
ft.sub = make(map[string]fieldTree)
}
if len(cname) == 0 {
ft.ok = true
return
}
sub := ft.sub[cname[0]]
sub.insert(cname[1:])
ft.sub[cname[0]] = sub
}
// matchPrefix reports whether any selector in the fieldTree matches
// the start of path p.
func (ft fieldTree) matchPrefix(p cmp.Path) bool {
for _, ps := range p {
switch ps := ps.(type) {
case cmp.StructField:
ft = ft.sub[ps.Name()]
if ft.ok {
return true
}
if len(ft.sub) == 0 {
return false
}
case cmp.Indirect:
default:
return false
}
}
return false
}
// canonicalName returns a list of identifiers where any struct field access
// through an embedded field is expanded to include the names of the embedded
// types themselves.
//
// For example, suppose field "Foo" is not directly in the parent struct,
// but actually from an embedded struct of type "Bar". Then, the canonical name
// of "Foo" is actually "Bar.Foo".
//
// Suppose field "Foo" is not directly in the parent struct, but actually
// a field in two different embedded structs of types "Bar" and "Baz".
// Then the selector "Foo" causes a panic since it is ambiguous which one it
// refers to. The user must specify either "Bar.Foo" or "Baz.Foo".
func canonicalName(t reflect.Type, sel string) ([]string, error) {
var name string
sel = strings.TrimPrefix(sel, ".")
if sel == "" {
return nil, fmt.Errorf("name must not be empty")
}
if i := strings.IndexByte(sel, '.'); i < 0 {
name, sel = sel, ""
} else {
name, sel = sel[:i], sel[i:]
}
// Type must be a struct or pointer to struct.
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return nil, fmt.Errorf("%v must be a struct", t)
}
// Find the canonical name for this current field name.
// If the field exists in an embedded struct, then it will be expanded.
if !isExported(name) {
// Disallow unexported fields:
// * To discourage people from actually touching unexported fields
// * FieldByName is buggy (https://golang.org/issue/4876)
return []string{name}, fmt.Errorf("name must be exported")
}
sf, ok := t.FieldByName(name)
if !ok {
return []string{name}, fmt.Errorf("does not exist")
}
var ss []string
for i := range sf.Index {
ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name)
}
if sel == "" {
return ss, nil
}
ssPost, err := canonicalName(sf.Type, sel)
return append(ss, ssPost...), err
}

View file

@ -0,0 +1,996 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmpopts
import (
"bytes"
"fmt"
"io"
"math"
"reflect"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
)
type (
MyInt int
MyFloat float32
MyTime struct{ time.Time }
MyStruct struct {
A, B []int
C, D map[time.Time]string
}
Foo1 struct{ Alpha, Bravo, Charlie int }
Foo2 struct{ *Foo1 }
Foo3 struct{ *Foo2 }
Bar1 struct{ Foo3 }
Bar2 struct {
Bar1
*Foo3
Bravo float32
}
Bar3 struct {
Bar1
Bravo *Bar2
Delta struct{ Echo Foo1 }
*Foo3
Alpha string
}
privateStruct struct{ Public, private int }
PublicStruct struct{ Public, private int }
ParentStruct struct {
*privateStruct
*PublicStruct
Public int
private int
}
Everything struct {
MyInt
MyFloat
MyTime
MyStruct
Bar3
ParentStruct
}
EmptyInterface interface{}
)
func TestOptions(t *testing.T) {
createBar3X := func() *Bar3 {
return &Bar3{
Bar1: Bar1{Foo3{&Foo2{&Foo1{Bravo: 2}}}},
Bravo: &Bar2{
Bar1: Bar1{Foo3{&Foo2{&Foo1{Charlie: 7}}}},
Foo3: &Foo3{&Foo2{&Foo1{Bravo: 5}}},
Bravo: 4,
},
Delta: struct{ Echo Foo1 }{Foo1{Charlie: 3}},
Foo3: &Foo3{&Foo2{&Foo1{Alpha: 1}}},
Alpha: "alpha",
}
}
createBar3Y := func() *Bar3 {
return &Bar3{
Bar1: Bar1{Foo3{&Foo2{&Foo1{Bravo: 3}}}},
Bravo: &Bar2{
Bar1: Bar1{Foo3{&Foo2{&Foo1{Charlie: 8}}}},
Foo3: &Foo3{&Foo2{&Foo1{Bravo: 6}}},
Bravo: 5,
},
Delta: struct{ Echo Foo1 }{Foo1{Charlie: 4}},
Foo3: &Foo3{&Foo2{&Foo1{Alpha: 2}}},
Alpha: "ALPHA",
}
}
tests := []struct {
label string // Test name
x, y interface{} // Input values to compare
opts []cmp.Option // Input options
wantEqual bool // Whether the inputs are equal
wantPanic bool // Whether Equal should panic
reason string // The reason for the expected outcome
}{{
label: "EquateEmpty",
x: []int{},
y: []int(nil),
wantEqual: false,
reason: "not equal because empty non-nil and nil slice differ",
}, {
label: "EquateEmpty",
x: []int{},
y: []int(nil),
opts: []cmp.Option{EquateEmpty()},
wantEqual: true,
reason: "equal because EquateEmpty equates empty slices",
}, {
label: "SortSlices",
x: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
y: []int{1, 0, 5, 2, 8, 9, 4, 3, 6, 7},
wantEqual: false,
reason: "not equal because element order differs",
}, {
label: "SortSlices",
x: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
y: []int{1, 0, 5, 2, 8, 9, 4, 3, 6, 7},
opts: []cmp.Option{SortSlices(func(x, y int) bool { return x < y })},
wantEqual: true,
reason: "equal because SortSlices sorts the slices",
}, {
label: "SortSlices",
x: []MyInt{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
y: []MyInt{1, 0, 5, 2, 8, 9, 4, 3, 6, 7},
opts: []cmp.Option{SortSlices(func(x, y int) bool { return x < y })},
wantEqual: false,
reason: "not equal because MyInt is not the same type as int",
}, {
label: "SortSlices",
x: []float64{0, 1, 1, 2, 2, 2},
y: []float64{2, 0, 2, 1, 2, 1},
opts: []cmp.Option{SortSlices(func(x, y float64) bool { return x < y })},
wantEqual: true,
reason: "equal even when sorted with duplicate elements",
}, {
label: "SortSlices",
x: []float64{0, 1, 1, 2, 2, 2, math.NaN(), 3, 3, 3, 3, 4, 4, 4, 4},
y: []float64{2, 0, 4, 4, 3, math.NaN(), 4, 1, 3, 2, 3, 3, 4, 1, 2},
opts: []cmp.Option{SortSlices(func(x, y float64) bool { return x < y })},
wantPanic: true,
reason: "panics because SortSlices used with non-transitive less function",
}, {
label: "SortSlices",
x: []float64{0, 1, 1, 2, 2, 2, math.NaN(), 3, 3, 3, 3, 4, 4, 4, 4},
y: []float64{2, 0, 4, 4, 3, math.NaN(), 4, 1, 3, 2, 3, 3, 4, 1, 2},
opts: []cmp.Option{SortSlices(func(x, y float64) bool {
return (!math.IsNaN(x) && math.IsNaN(y)) || x < y
})},
wantEqual: false,
reason: "no panics because SortSlices used with valid less function; not equal because NaN != NaN",
}, {
label: "SortSlices+EquateNaNs",
x: []float64{0, 1, 1, 2, 2, 2, math.NaN(), 3, 3, 3, math.NaN(), 3, 4, 4, 4, 4},
y: []float64{2, 0, 4, 4, 3, math.NaN(), 4, 1, 3, 2, 3, 3, 4, 1, math.NaN(), 2},
opts: []cmp.Option{
EquateNaNs(),
SortSlices(func(x, y float64) bool {
return (!math.IsNaN(x) && math.IsNaN(y)) || x < y
}),
},
wantEqual: true,
reason: "no panics because SortSlices used with valid less function; equal because EquateNaNs is used",
}, {
label: "SortMaps",
x: map[time.Time]string{
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC): "0th birthday",
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC): "1st birthday",
time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC): "2nd birthday",
},
y: map[time.Time]string{
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "0th birthday",
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "1st birthday",
time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "2nd birthday",
},
wantEqual: false,
reason: "not equal because timezones differ",
}, {
label: "SortMaps",
x: map[time.Time]string{
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC): "0th birthday",
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC): "1st birthday",
time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC): "2nd birthday",
},
y: map[time.Time]string{
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "0th birthday",
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "1st birthday",
time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "2nd birthday",
},
opts: []cmp.Option{SortMaps(func(x, y time.Time) bool { return x.Before(y) })},
wantEqual: true,
reason: "equal because SortMaps flattens to a slice where Time.Equal can be used",
}, {
label: "SortMaps",
x: map[MyTime]string{
{time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)}: "0th birthday",
{time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)}: "1st birthday",
{time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC)}: "2nd birthday",
},
y: map[MyTime]string{
{time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local)}: "0th birthday",
{time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local)}: "1st birthday",
{time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local)}: "2nd birthday",
},
opts: []cmp.Option{SortMaps(func(x, y time.Time) bool { return x.Before(y) })},
wantEqual: false,
reason: "not equal because MyTime is not assignable to time.Time",
}, {
label: "SortMaps",
x: map[int]string{-3: "", -2: "", -1: "", 0: "", 1: "", 2: "", 3: ""},
// => {0, 1, 2, 3, -1, -2, -3},
y: map[int]string{300: "", 200: "", 100: "", 0: "", 1: "", 2: "", 3: ""},
// => {0, 1, 2, 3, 100, 200, 300},
opts: []cmp.Option{SortMaps(func(a, b int) bool {
if -10 < a && a <= 0 {
a *= -100
}
if -10 < b && b <= 0 {
b *= -100
}
return a < b
})},
wantEqual: false,
reason: "not equal because values differ even though SortMap provides valid ordering",
}, {
label: "SortMaps",
x: map[int]string{-3: "", -2: "", -1: "", 0: "", 1: "", 2: "", 3: ""},
// => {0, 1, 2, 3, -1, -2, -3},
y: map[int]string{300: "", 200: "", 100: "", 0: "", 1: "", 2: "", 3: ""},
// => {0, 1, 2, 3, 100, 200, 300},
opts: []cmp.Option{
SortMaps(func(x, y int) bool {
if -10 < x && x <= 0 {
x *= -100
}
if -10 < y && y <= 0 {
y *= -100
}
return x < y
}),
cmp.Comparer(func(x, y int) bool {
if -10 < x && x <= 0 {
x *= -100
}
if -10 < y && y <= 0 {
y *= -100
}
return x == y
}),
},
wantEqual: true,
reason: "equal because Comparer used to equate differences",
}, {
label: "SortMaps",
x: map[int]string{-3: "", -2: "", -1: "", 0: "", 1: "", 2: "", 3: ""},
y: map[int]string{},
opts: []cmp.Option{SortMaps(func(x, y int) bool {
return x < y && x >= 0 && y >= 0
})},
wantPanic: true,
reason: "panics because SortMaps used with non-transitive less function",
}, {
label: "SortMaps",
x: map[int]string{-3: "", -2: "", -1: "", 0: "", 1: "", 2: "", 3: ""},
y: map[int]string{},
opts: []cmp.Option{SortMaps(func(x, y int) bool {
return math.Abs(float64(x)) < math.Abs(float64(y))
})},
wantPanic: true,
reason: "panics because SortMaps used with partial less function",
}, {
label: "EquateEmpty+SortSlices+SortMaps",
x: MyStruct{
A: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
C: map[time.Time]string{
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC): "0th birthday",
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC): "1st birthday",
},
D: map[time.Time]string{},
},
y: MyStruct{
A: []int{1, 0, 5, 2, 8, 9, 4, 3, 6, 7},
B: []int{},
C: map[time.Time]string{
time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "0th birthday",
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "1st birthday",
},
},
opts: []cmp.Option{
EquateEmpty(),
SortSlices(func(x, y int) bool { return x < y }),
SortMaps(func(x, y time.Time) bool { return x.Before(y) }),
},
wantEqual: true,
reason: "no panics because EquateEmpty should compose with the sort options",
}, {
label: "EquateApprox",
x: 3.09,
y: 3.10,
wantEqual: false,
reason: "not equal because floats do not exactly matches",
}, {
label: "EquateApprox",
x: 3.09,
y: 3.10,
opts: []cmp.Option{EquateApprox(0, 0)},
wantEqual: false,
reason: "not equal because EquateApprox(0 ,0) is equivalent to using ==",
}, {
label: "EquateApprox",
x: 3.09,
y: 3.10,
opts: []cmp.Option{EquateApprox(0.003, 0.009)},
wantEqual: false,
reason: "not equal because EquateApprox is too strict",
}, {
label: "EquateApprox",
x: 3.09,
y: 3.10,
opts: []cmp.Option{EquateApprox(0, 0.011)},
wantEqual: true,
reason: "equal because margin is loose enough to match",
}, {
label: "EquateApprox",
x: 3.09,
y: 3.10,
opts: []cmp.Option{EquateApprox(0.004, 0)},
wantEqual: true,
reason: "equal because fraction is loose enough to match",
}, {
label: "EquateApprox",
x: 3.09,
y: 3.10,
opts: []cmp.Option{EquateApprox(0.004, 0.011)},
wantEqual: true,
reason: "equal because both the margin and fraction are loose enough to match",
}, {
label: "EquateApprox",
x: float32(3.09),
y: float64(3.10),
opts: []cmp.Option{EquateApprox(0.004, 0)},
wantEqual: false,
reason: "not equal because the types differ",
}, {
label: "EquateApprox",
x: float32(3.09),
y: float32(3.10),
opts: []cmp.Option{EquateApprox(0.004, 0)},
wantEqual: true,
reason: "equal because EquateApprox also applies on float32s",
}, {
label: "EquateApprox",
x: []float64{math.Inf(+1), math.Inf(-1)},
y: []float64{math.Inf(+1), math.Inf(-1)},
opts: []cmp.Option{EquateApprox(0, 1)},
wantEqual: true,
reason: "equal because we fall back on == which matches Inf (EquateApprox does not apply on Inf) ",
}, {
label: "EquateApprox",
x: []float64{math.Inf(+1), -1e100},
y: []float64{+1e100, math.Inf(-1)},
opts: []cmp.Option{EquateApprox(0, 1)},
wantEqual: false,
reason: "not equal because we fall back on == where Inf != 1e100 (EquateApprox does not apply on Inf)",
}, {
label: "EquateApprox",
x: float64(+1e100),
y: float64(-1e100),
opts: []cmp.Option{EquateApprox(math.Inf(+1), 0)},
wantEqual: true,
reason: "equal because infinite fraction matches everything",
}, {
label: "EquateApprox",
x: float64(+1e100),
y: float64(-1e100),
opts: []cmp.Option{EquateApprox(0, math.Inf(+1))},
wantEqual: true,
reason: "equal because infinite margin matches everything",
}, {
label: "EquateApprox",
x: math.Pi,
y: math.Pi,
opts: []cmp.Option{EquateApprox(0, 0)},
wantEqual: true,
reason: "equal because EquateApprox(0, 0) is equivalent to ==",
}, {
label: "EquateApprox",
x: math.Pi,
y: math.Nextafter(math.Pi, math.Inf(+1)),
opts: []cmp.Option{EquateApprox(0, 0)},
wantEqual: false,
reason: "not equal because EquateApprox(0, 0) is equivalent to ==",
}, {
label: "EquateNaNs",
x: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1)},
y: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1)},
wantEqual: false,
reason: "not equal because NaN != NaN",
}, {
label: "EquateNaNs",
x: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1)},
y: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1)},
opts: []cmp.Option{EquateNaNs()},
wantEqual: true,
reason: "equal because EquateNaNs allows NaN == NaN",
}, {
label: "EquateNaNs",
x: []float32{1.0, float32(math.NaN()), math.E, -0.0, +0.0},
y: []float32{1.0, float32(math.NaN()), math.E, -0.0, +0.0},
opts: []cmp.Option{EquateNaNs()},
wantEqual: true,
reason: "equal because EquateNaNs operates on float32",
}, {
label: "EquateApprox+EquateNaNs",
x: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1), 1.01, 5001},
y: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1), 1.02, 5002},
opts: []cmp.Option{
EquateNaNs(),
EquateApprox(0.01, 0),
},
wantEqual: true,
reason: "equal because EquateNaNs and EquateApprox compose together",
}, {
label: "EquateApprox+EquateNaNs",
x: []MyFloat{1.0, MyFloat(math.NaN()), MyFloat(math.E), -0.0, +0.0, MyFloat(math.Inf(+1)), MyFloat(math.Inf(-1)), 1.01, 5001},
y: []MyFloat{1.0, MyFloat(math.NaN()), MyFloat(math.E), -0.0, +0.0, MyFloat(math.Inf(+1)), MyFloat(math.Inf(-1)), 1.02, 5002},
opts: []cmp.Option{
EquateNaNs(),
EquateApprox(0.01, 0),
},
wantEqual: false,
reason: "not equal because EquateApprox and EquateNaNs do not apply on a named type",
}, {
label: "EquateApprox+EquateNaNs+Transform",
x: []MyFloat{1.0, MyFloat(math.NaN()), MyFloat(math.E), -0.0, +0.0, MyFloat(math.Inf(+1)), MyFloat(math.Inf(-1)), 1.01, 5001},
y: []MyFloat{1.0, MyFloat(math.NaN()), MyFloat(math.E), -0.0, +0.0, MyFloat(math.Inf(+1)), MyFloat(math.Inf(-1)), 1.02, 5002},
opts: []cmp.Option{
cmp.Transformer("", func(x MyFloat) float64 { return float64(x) }),
EquateNaNs(),
EquateApprox(0.01, 0),
},
wantEqual: true,
reason: "equal because named type is transformed to float64",
}, {
label: "IgnoreFields",
x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}},
y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}},
wantEqual: false,
reason: "not equal because values do not match in deeply embedded field",
}, {
label: "IgnoreFields",
x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}},
y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}},
opts: []cmp.Option{IgnoreFields(Bar1{}, "Alpha")},
wantEqual: true,
reason: "equal because IgnoreField ignores deeply embedded field: Alpha",
}, {
label: "IgnoreFields",
x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}},
y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}},
opts: []cmp.Option{IgnoreFields(Bar1{}, "Foo1.Alpha")},
wantEqual: true,
reason: "equal because IgnoreField ignores deeply embedded field: Foo1.Alpha",
}, {
label: "IgnoreFields",
x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}},
y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}},
opts: []cmp.Option{IgnoreFields(Bar1{}, "Foo2.Alpha")},
wantEqual: true,
reason: "equal because IgnoreField ignores deeply embedded field: Foo2.Alpha",
}, {
label: "IgnoreFields",
x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}},
y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}},
opts: []cmp.Option{IgnoreFields(Bar1{}, "Foo3.Alpha")},
wantEqual: true,
reason: "equal because IgnoreField ignores deeply embedded field: Foo3.Alpha",
}, {
label: "IgnoreFields",
x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}},
y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}},
opts: []cmp.Option{IgnoreFields(Bar1{}, "Foo3.Foo2.Alpha")},
wantEqual: true,
reason: "equal because IgnoreField ignores deeply embedded field: Foo3.Foo2.Alpha",
}, {
label: "IgnoreFields",
x: createBar3X(),
y: createBar3Y(),
wantEqual: false,
reason: "not equal because many deeply nested or embedded fields differ",
}, {
label: "IgnoreFields",
x: createBar3X(),
y: createBar3Y(),
opts: []cmp.Option{IgnoreFields(Bar3{}, "Bar1", "Bravo", "Delta", "Foo3", "Alpha")},
wantEqual: true,
reason: "equal because IgnoreFields ignores fields at the highest levels",
}, {
label: "IgnoreFields",
x: createBar3X(),
y: createBar3Y(),
opts: []cmp.Option{
IgnoreFields(Bar3{},
"Bar1.Foo3.Bravo",
"Bravo.Bar1.Foo3.Foo2.Foo1.Charlie",
"Bravo.Foo3.Foo2.Foo1.Bravo",
"Bravo.Bravo",
"Delta.Echo.Charlie",
"Foo3.Foo2.Foo1.Alpha",
"Alpha",
),
},
wantEqual: true,
reason: "equal because IgnoreFields ignores fields using fully-qualified field",
}, {
label: "IgnoreFields",
x: createBar3X(),
y: createBar3Y(),
opts: []cmp.Option{
IgnoreFields(Bar3{},
"Bar1.Foo3.Bravo",
"Bravo.Foo3.Foo2.Foo1.Bravo",
"Bravo.Bravo",
"Delta.Echo.Charlie",
"Foo3.Foo2.Foo1.Alpha",
"Alpha",
),
},
wantEqual: false,
reason: "not equal because one fully-qualified field is not ignored: Bravo.Bar1.Foo3.Foo2.Foo1.Charlie",
}, {
label: "IgnoreFields",
x: createBar3X(),
y: createBar3Y(),
opts: []cmp.Option{IgnoreFields(Bar3{}, "Bar1", "Bravo", "Delta", "Alpha")},
wantEqual: false,
reason: "not equal because highest-level field is not ignored: Foo3",
}, {
label: "IgnoreTypes",
x: []interface{}{5, "same"},
y: []interface{}{6, "same"},
wantEqual: false,
reason: "not equal because 5 != 6",
}, {
label: "IgnoreTypes",
x: []interface{}{5, "same"},
y: []interface{}{6, "same"},
opts: []cmp.Option{IgnoreTypes(0)},
wantEqual: true,
reason: "equal because ints are ignored",
}, {
label: "IgnoreTypes+IgnoreInterfaces",
x: []interface{}{5, "same", new(bytes.Buffer)},
y: []interface{}{6, "same", new(bytes.Buffer)},
opts: []cmp.Option{IgnoreTypes(0)},
wantPanic: true,
reason: "panics because bytes.Buffer has unexported fields",
}, {
label: "IgnoreTypes+IgnoreInterfaces",
x: []interface{}{5, "same", new(bytes.Buffer)},
y: []interface{}{6, "diff", new(bytes.Buffer)},
opts: []cmp.Option{
IgnoreTypes(0, ""),
IgnoreInterfaces(struct{ io.Reader }{}),
},
wantEqual: true,
reason: "equal because bytes.Buffer is ignored by match on interface type",
}, {
label: "IgnoreTypes+IgnoreInterfaces",
x: []interface{}{5, "same", new(bytes.Buffer)},
y: []interface{}{6, "same", new(bytes.Buffer)},
opts: []cmp.Option{
IgnoreTypes(0, ""),
IgnoreInterfaces(struct {
io.Reader
io.Writer
fmt.Stringer
}{}),
},
wantEqual: true,
reason: "equal because bytes.Buffer is ignored by match on multiple interface types",
}, {
label: "IgnoreInterfaces",
x: struct{ mu sync.Mutex }{},
y: struct{ mu sync.Mutex }{},
wantPanic: true,
reason: "panics because sync.Mutex has unexported fields",
}, {
label: "IgnoreInterfaces",
x: struct{ mu sync.Mutex }{},
y: struct{ mu sync.Mutex }{},
opts: []cmp.Option{IgnoreInterfaces(struct{ sync.Locker }{})},
wantEqual: true,
reason: "equal because IgnoreInterfaces applies on values (with pointer receiver)",
}, {
label: "IgnoreInterfaces",
x: struct{ mu *sync.Mutex }{},
y: struct{ mu *sync.Mutex }{},
opts: []cmp.Option{IgnoreInterfaces(struct{ sync.Locker }{})},
wantEqual: true,
reason: "equal because IgnoreInterfaces applies on pointers",
}, {
label: "IgnoreUnexported",
x: ParentStruct{Public: 1, private: 2},
y: ParentStruct{Public: 1, private: -2},
opts: []cmp.Option{cmp.AllowUnexported(ParentStruct{})},
wantEqual: false,
reason: "not equal because ParentStruct.private differs with AllowUnexported",
}, {
label: "IgnoreUnexported",
x: ParentStruct{Public: 1, private: 2},
y: ParentStruct{Public: 1, private: -2},
opts: []cmp.Option{IgnoreUnexported(ParentStruct{})},
wantEqual: true,
reason: "equal because IgnoreUnexported ignored ParentStruct.private",
}, {
label: "IgnoreUnexported",
x: ParentStruct{Public: 1, private: 2, PublicStruct: &PublicStruct{Public: 3, private: 4}},
y: ParentStruct{Public: 1, private: -2, PublicStruct: &PublicStruct{Public: 3, private: 4}},
opts: []cmp.Option{
cmp.AllowUnexported(PublicStruct{}),
IgnoreUnexported(ParentStruct{}),
},
wantEqual: true,
reason: "equal because ParentStruct.private is ignored",
}, {
label: "IgnoreUnexported",
x: ParentStruct{Public: 1, private: 2, PublicStruct: &PublicStruct{Public: 3, private: 4}},
y: ParentStruct{Public: 1, private: -2, PublicStruct: &PublicStruct{Public: 3, private: -4}},
opts: []cmp.Option{
cmp.AllowUnexported(PublicStruct{}),
IgnoreUnexported(ParentStruct{}),
},
wantEqual: false,
reason: "not equal because ParentStruct.PublicStruct.private differs and not ignored by IgnoreUnexported(ParentStruct{})",
}, {
label: "IgnoreUnexported",
x: ParentStruct{Public: 1, private: 2, PublicStruct: &PublicStruct{Public: 3, private: 4}},
y: ParentStruct{Public: 1, private: -2, PublicStruct: &PublicStruct{Public: 3, private: -4}},
opts: []cmp.Option{
IgnoreUnexported(ParentStruct{}, PublicStruct{}),
},
wantEqual: true,
reason: "equal because both ParentStruct.PublicStruct and ParentStruct.PublicStruct.private are ignored",
}, {
label: "IgnoreUnexported",
x: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: 4}},
y: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: -3, private: -4}},
opts: []cmp.Option{
cmp.AllowUnexported(privateStruct{}, PublicStruct{}, ParentStruct{}),
},
wantEqual: false,
reason: "not equal since ParentStruct.privateStruct differs",
}, {
label: "IgnoreUnexported",
x: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: 4}},
y: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: -3, private: -4}},
opts: []cmp.Option{
cmp.AllowUnexported(privateStruct{}, PublicStruct{}),
IgnoreUnexported(ParentStruct{}),
},
wantEqual: true,
reason: "equal because ParentStruct.privateStruct ignored by IgnoreUnexported(ParentStruct{})",
}, {
label: "IgnoreUnexported",
x: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: 4}},
y: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: -4}},
opts: []cmp.Option{
cmp.AllowUnexported(PublicStruct{}, ParentStruct{}),
IgnoreUnexported(privateStruct{}),
},
wantEqual: true,
reason: "equal because privateStruct.private ignored by IgnoreUnexported(privateStruct{})",
}, {
label: "IgnoreUnexported",
x: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: 4}},
y: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: -3, private: -4}},
opts: []cmp.Option{
cmp.AllowUnexported(PublicStruct{}, ParentStruct{}),
IgnoreUnexported(privateStruct{}),
},
wantEqual: false,
reason: "not equal because privateStruct.Public differs and not ignored by IgnoreUnexported(privateStruct{})",
}, {
label: "IgnoreFields+IgnoreTypes+IgnoreUnexported",
x: &Everything{
MyInt: 5,
MyFloat: 3.3,
MyTime: MyTime{time.Now()},
Bar3: *createBar3X(),
ParentStruct: ParentStruct{
Public: 1, private: 2, PublicStruct: &PublicStruct{Public: 3, private: 4},
},
},
y: &Everything{
MyInt: -5,
MyFloat: 3.3,
MyTime: MyTime{time.Now()},
Bar3: *createBar3Y(),
ParentStruct: ParentStruct{
Public: 1, private: -2, PublicStruct: &PublicStruct{Public: -3, private: -4},
},
},
opts: []cmp.Option{
IgnoreFields(Everything{}, "MyTime", "Bar3.Foo3"),
IgnoreFields(Bar3{}, "Bar1", "Bravo", "Delta", "Alpha"),
IgnoreTypes(MyInt(0), PublicStruct{}),
IgnoreUnexported(ParentStruct{}),
},
wantEqual: true,
reason: "equal because all Ignore options can be composed together",
}}
for _, tt := range tests {
tRun(t, tt.label, func(t *testing.T) {
var gotEqual bool
var gotPanic string
func() {
defer func() {
if ex := recover(); ex != nil {
gotPanic = fmt.Sprint(ex)
}
}()
gotEqual = cmp.Equal(tt.x, tt.y, tt.opts...)
}()
switch {
case gotPanic == "" && tt.wantPanic:
t.Errorf("expected Equal panic\nreason: %s", tt.reason)
case gotPanic != "" && !tt.wantPanic:
t.Errorf("unexpected Equal panic: got %v\nreason: %v", gotPanic, tt.reason)
case gotEqual != tt.wantEqual:
t.Errorf("Equal = %v, want %v\nreason: %v", gotEqual, tt.wantEqual, tt.reason)
}
})
}
}
func TestPanic(t *testing.T) {
args := func(x ...interface{}) []interface{} { return x }
tests := []struct {
label string // Test name
fnc interface{} // Option function to call
args []interface{} // Arguments to pass in
wantPanic string // Expected panic message
reason string // The reason for the expected outcome
}{{
label: "EquateApprox",
fnc: EquateApprox,
args: args(0.0, 0.0),
reason: "zero margin and fraction is equivalent to exact equality",
}, {
label: "EquateApprox",
fnc: EquateApprox,
args: args(-0.1, 0.0),
wantPanic: "margin or fraction must be a non-negative number",
reason: "negative inputs are invalid",
}, {
label: "EquateApprox",
fnc: EquateApprox,
args: args(0.0, -0.1),
wantPanic: "margin or fraction must be a non-negative number",
reason: "negative inputs are invalid",
}, {
label: "EquateApprox",
fnc: EquateApprox,
args: args(math.NaN(), 0.0),
wantPanic: "margin or fraction must be a non-negative number",
reason: "NaN inputs are invalid",
}, {
label: "EquateApprox",
fnc: EquateApprox,
args: args(1.0, 0.0),
reason: "fraction of 1.0 or greater is valid",
}, {
label: "EquateApprox",
fnc: EquateApprox,
args: args(0.0, math.Inf(+1)),
reason: "margin of infinity is valid",
}, {
label: "SortSlices",
fnc: SortSlices,
args: args(strings.Compare),
wantPanic: "invalid less function",
reason: "func(x, y string) int is wrong signature for less",
}, {
label: "SortSlices",
fnc: SortSlices,
args: args((func(_, _ int) bool)(nil)),
wantPanic: "invalid less function",
reason: "nil value is not valid",
}, {
label: "SortMaps",
fnc: SortMaps,
args: args(strings.Compare),
wantPanic: "invalid less function",
reason: "func(x, y string) int is wrong signature for less",
}, {
label: "SortMaps",
fnc: SortMaps,
args: args((func(_, _ int) bool)(nil)),
wantPanic: "invalid less function",
reason: "nil value is not valid",
}, {
label: "IgnoreFields",
fnc: IgnoreFields,
args: args(Foo1{}, ""),
wantPanic: "name must not be empty",
reason: "empty selector is invalid",
}, {
label: "IgnoreFields",
fnc: IgnoreFields,
args: args(Foo1{}, "."),
wantPanic: "name must not be empty",
reason: "single dot selector is invalid",
}, {
label: "IgnoreFields",
fnc: IgnoreFields,
args: args(Foo1{}, ".Alpha"),
reason: "dot-prefix is okay since Foo1.Alpha reads naturally",
}, {
label: "IgnoreFields",
fnc: IgnoreFields,
args: args(Foo1{}, "Alpha."),
wantPanic: "name must not be empty",
reason: "dot-suffix is invalid",
}, {
label: "IgnoreFields",
fnc: IgnoreFields,
args: args(Foo1{}, "Alpha "),
wantPanic: "does not exist",
reason: "identifiers must not have spaces",
}, {
label: "IgnoreFields",
fnc: IgnoreFields,
args: args(Foo1{}, "Zulu"),
wantPanic: "does not exist",
reason: "name of non-existent field is invalid",
}, {
label: "IgnoreFields",
fnc: IgnoreFields,
args: args(Foo1{}, "Alpha.NoExist"),
wantPanic: "must be a struct",
reason: "cannot select into a non-struct",
}, {
label: "IgnoreFields",
fnc: IgnoreFields,
args: args(&Foo1{}, "Alpha"),
wantPanic: "must be a struct",
reason: "the type must be a struct (not pointer to a struct)",
}, {
label: "IgnoreFields",
fnc: IgnoreFields,
args: args(Foo1{}, "unexported"),
wantPanic: "name must be exported",
reason: "unexported fields must not be specified",
}, {
label: "IgnoreTypes",
fnc: IgnoreTypes,
reason: "empty input is valid",
}, {
label: "IgnoreTypes",
fnc: IgnoreTypes,
args: args(nil),
wantPanic: "cannot determine type",
reason: "input must not be nil value",
}, {
label: "IgnoreTypes",
fnc: IgnoreTypes,
args: args(0, 0, 0),
reason: "duplicate inputs of the same type is valid",
}, {
label: "IgnoreInterfaces",
fnc: IgnoreInterfaces,
args: args(nil),
wantPanic: "input must be an anonymous struct",
reason: "input must not be nil value",
}, {
label: "IgnoreInterfaces",
fnc: IgnoreInterfaces,
args: args(Foo1{}),
wantPanic: "input must be an anonymous struct",
reason: "input must not be a named struct type",
}, {
label: "IgnoreInterfaces",
fnc: IgnoreInterfaces,
args: args(struct{ _ io.Reader }{}),
wantPanic: "struct cannot have named fields",
reason: "input must not have named fields",
}, {
label: "IgnoreInterfaces",
fnc: IgnoreInterfaces,
args: args(struct{ Foo1 }{}),
wantPanic: "embedded field must be an interface type",
reason: "field types must be interfaces",
}, {
label: "IgnoreInterfaces",
fnc: IgnoreInterfaces,
args: args(struct{ EmptyInterface }{}),
wantPanic: "cannot ignore empty interface",
reason: "field types must not be the empty interface",
}, {
label: "IgnoreInterfaces",
fnc: IgnoreInterfaces,
args: args(struct {
io.Reader
io.Writer
io.Closer
io.ReadWriteCloser
}{}),
reason: "multiple interfaces may be specified, even if they overlap",
}, {
label: "IgnoreUnexported",
fnc: IgnoreUnexported,
reason: "empty input is valid",
}, {
label: "IgnoreUnexported",
fnc: IgnoreUnexported,
args: args(nil),
wantPanic: "invalid struct type",
reason: "input must not be nil value",
}, {
label: "IgnoreUnexported",
fnc: IgnoreUnexported,
args: args(&Foo1{}),
wantPanic: "invalid struct type",
reason: "input must be a struct type (not a pointer to a struct)",
}, {
label: "IgnoreUnexported",
fnc: IgnoreUnexported,
args: args(Foo1{}, struct{ x, X int }{}),
reason: "input may be named or unnamed structs",
}}
for _, tt := range tests {
tRun(t, tt.label, func(t *testing.T) {
// Prepare function arguments.
vf := reflect.ValueOf(tt.fnc)
var vargs []reflect.Value
for i, arg := range tt.args {
if arg == nil {
tf := vf.Type()
if i == tf.NumIn()-1 && tf.IsVariadic() {
vargs = append(vargs, reflect.Zero(tf.In(i).Elem()))
} else {
vargs = append(vargs, reflect.Zero(tf.In(i)))
}
} else {
vargs = append(vargs, reflect.ValueOf(arg))
}
}
// Call the function and capture any panics.
var gotPanic string
func() {
defer func() {
if ex := recover(); ex != nil {
if s, ok := ex.(string); ok {
gotPanic = s
} else {
panic(ex)
}
}
}()
vf.Call(vargs)
}()
switch {
case tt.wantPanic == "" && gotPanic != "":
t.Errorf("unexpected panic message: %s\nreason: %s", gotPanic, tt.reason)
case tt.wantPanic != "" && !strings.Contains(gotPanic, tt.wantPanic):
t.Errorf("panic message:\ngot: %s\nwant: %s\nreason: %s", gotPanic, tt.wantPanic, tt.reason)
}
})
}
}
// TODO: Delete this hack when we drop Go1.6 support.
func tRun(t *testing.T, name string, f func(t *testing.T)) {
type runner interface {
Run(string, func(t *testing.T)) bool
}
var ti interface{} = t
if r, ok := ti.(runner); ok {
r.Run(name, f)
} else {
t.Logf("Test: %s", name)
f(t)
}
}

529
vendor/github.com/google/go-cmp/cmp/compare.go generated vendored Normal file
View file

@ -0,0 +1,529 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// Package cmp determines equality of values.
//
// This package is intended to be a more powerful and safer alternative to
// reflect.DeepEqual for comparing whether two values are semantically equal.
//
// The primary features of cmp are:
//
// • When the default behavior of equality does not suit the needs of the test,
// custom equality functions can override the equality operation.
// For example, an equality function may report floats as equal so long as they
// are within some tolerance of each other.
//
// • Types that have an Equal method may use that method to determine equality.
// This allows package authors to determine the equality operation for the types
// that they define.
//
// • If no custom equality functions are used and no Equal method is defined,
// equality is determined by recursively comparing the primitive kinds on both
// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
// fields are not compared by default; they result in panics unless suppressed
// by using an Ignore option (see cmpopts.IgnoreUnexported) or explictly compared
// using the AllowUnexported option.
package cmp
import (
"fmt"
"reflect"
"github.com/google/go-cmp/cmp/internal/diff"
"github.com/google/go-cmp/cmp/internal/function"
"github.com/google/go-cmp/cmp/internal/value"
)
// BUG: Maps with keys containing NaN values cannot be properly compared due to
// the reflection package's inability to retrieve such entries. Equal will panic
// anytime it comes across a NaN key, but this behavior may change.
//
// See https://golang.org/issue/11104 for more details.
var nothing = reflect.Value{}
// Equal reports whether x and y are equal by recursively applying the
// following rules in the given order to x and y and all of their sub-values:
//
// • If two values are not of the same type, then they are never equal
// and the overall result is false.
//
// • Let S be the set of all Ignore, Transformer, and Comparer options that
// remain after applying all path filters, value filters, and type filters.
// If at least one Ignore exists in S, then the comparison is ignored.
// If the number of Transformer and Comparer options in S is greater than one,
// then Equal panics because it is ambiguous which option to use.
// If S contains a single Transformer, then use that to transform the current
// values and recursively call Equal on the output values.
// If S contains a single Comparer, then use that to compare the current values.
// Otherwise, evaluation proceeds to the next rule.
//
// • If the values have an Equal method of the form "(T) Equal(T) bool" or
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
// x.Equal(y). Otherwise, no such method exists and evaluation proceeds to
// the next rule.
//
// • Lastly, try to compare x and y based on their basic kinds.
// Simple kinds like booleans, integers, floats, complex numbers, strings, and
// channels are compared using the equivalent of the == operator in Go.
// Functions are only equal if they are both nil, otherwise they are unequal.
// Pointers are equal if the underlying values they point to are also equal.
// Interfaces are equal if their underlying concrete values are also equal.
//
// Structs are equal if all of their fields are equal. If a struct contains
// unexported fields, Equal panics unless the AllowUnexported option is used or
// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field.
//
// Arrays, slices, and maps are equal if they are both nil or both non-nil
// with the same length and the elements at each index or key are equal.
// Note that a non-nil empty slice and a nil slice are not equal.
// To equate empty slices and maps, consider using cmpopts.EquateEmpty.
// Map keys are equal according to the == operator.
// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
func Equal(x, y interface{}, opts ...Option) bool {
s := newState(opts)
s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y))
return s.result.Equal()
}
// Diff returns a human-readable report of the differences between two values.
// It returns an empty string if and only if Equal returns true for the same
// input values and options. The output string will use the "-" symbol to
// indicate elements removed from x, and the "+" symbol to indicate elements
// added to y.
//
// Do not depend on this output being stable.
func Diff(x, y interface{}, opts ...Option) string {
r := new(defaultReporter)
opts = Options{Options(opts), r}
eq := Equal(x, y, opts...)
d := r.String()
if (d == "") != eq {
panic("inconsistent difference and equality results")
}
return d
}
type state struct {
// These fields represent the "comparison state".
// Calling statelessCompare must not result in observable changes to these.
result diff.Result // The current result of comparison
curPath Path // The current path in the value tree
reporter reporter // Optional reporter used for difference formatting
// dynChecker triggers pseudo-random checks for option correctness.
// It is safe for statelessCompare to mutate this value.
dynChecker dynChecker
// These fields, once set by processOption, will not change.
exporters map[reflect.Type]bool // Set of structs with unexported field visibility
opts Options // List of all fundamental and filter options
}
func newState(opts []Option) *state {
s := new(state)
for _, opt := range opts {
s.processOption(opt)
}
return s
}
func (s *state) processOption(opt Option) {
switch opt := opt.(type) {
case nil:
case Options:
for _, o := range opt {
s.processOption(o)
}
case coreOption:
type filtered interface {
isFiltered() bool
}
if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
}
s.opts = append(s.opts, opt)
case visibleStructs:
if s.exporters == nil {
s.exporters = make(map[reflect.Type]bool)
}
for t := range opt {
s.exporters[t] = true
}
case reporter:
if s.reporter != nil {
panic("difference reporter already registered")
}
s.reporter = opt
default:
panic(fmt.Sprintf("unknown option %T", opt))
}
}
// statelessCompare compares two values and returns the result.
// This function is stateless in that it does not alter the current result,
// or output to any registered reporters.
func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result {
// We do not save and restore the curPath because all of the compareX
// methods should properly push and pop from the path.
// It is an implementation bug if the contents of curPath differs from
// when calling this function to when returning from it.
oldResult, oldReporter := s.result, s.reporter
s.result = diff.Result{} // Reset result
s.reporter = nil // Remove reporter to avoid spurious printouts
s.compareAny(vx, vy)
res := s.result
s.result, s.reporter = oldResult, oldReporter
return res
}
func (s *state) compareAny(vx, vy reflect.Value) {
// TODO: Support cyclic data structures.
// Rule 0: Differing types are never equal.
if !vx.IsValid() || !vy.IsValid() {
s.report(vx.IsValid() == vy.IsValid(), vx, vy)
return
}
if vx.Type() != vy.Type() {
s.report(false, vx, vy) // Possible for path to be empty
return
}
t := vx.Type()
if len(s.curPath) == 0 {
s.curPath.push(&pathStep{typ: t})
defer s.curPath.pop()
}
vx, vy = s.tryExporting(vx, vy)
// Rule 1: Check whether an option applies on this node in the value tree.
if s.tryOptions(vx, vy, t) {
return
}
// Rule 2: Check whether the type has a valid Equal method.
if s.tryMethod(vx, vy, t) {
return
}
// Rule 3: Recursively descend into each value's underlying kind.
switch t.Kind() {
case reflect.Bool:
s.report(vx.Bool() == vy.Bool(), vx, vy)
return
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s.report(vx.Int() == vy.Int(), vx, vy)
return
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s.report(vx.Uint() == vy.Uint(), vx, vy)
return
case reflect.Float32, reflect.Float64:
s.report(vx.Float() == vy.Float(), vx, vy)
return
case reflect.Complex64, reflect.Complex128:
s.report(vx.Complex() == vy.Complex(), vx, vy)
return
case reflect.String:
s.report(vx.String() == vy.String(), vx, vy)
return
case reflect.Chan, reflect.UnsafePointer:
s.report(vx.Pointer() == vy.Pointer(), vx, vy)
return
case reflect.Func:
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
case reflect.Ptr:
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
}
s.curPath.push(&indirect{pathStep{t.Elem()}})
defer s.curPath.pop()
s.compareAny(vx.Elem(), vy.Elem())
return
case reflect.Interface:
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
}
if vx.Elem().Type() != vy.Elem().Type() {
s.report(false, vx.Elem(), vy.Elem())
return
}
s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}})
defer s.curPath.pop()
s.compareAny(vx.Elem(), vy.Elem())
return
case reflect.Slice:
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
}
fallthrough
case reflect.Array:
s.compareArray(vx, vy, t)
return
case reflect.Map:
s.compareMap(vx, vy, t)
return
case reflect.Struct:
s.compareStruct(vx, vy, t)
return
default:
panic(fmt.Sprintf("%v kind not handled", t.Kind()))
}
}
func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) {
if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported {
if sf.force {
// Use unsafe pointer arithmetic to get read-write access to an
// unexported field in the struct.
vx = unsafeRetrieveField(sf.pvx, sf.field)
vy = unsafeRetrieveField(sf.pvy, sf.field)
} else {
// We are not allowed to export the value, so invalidate them
// so that tryOptions can panic later if not explicitly ignored.
vx = nothing
vy = nothing
}
}
return vx, vy
}
func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool {
// If there were no FilterValues, we will not detect invalid inputs,
// so manually check for them and append invalid if necessary.
// We still evaluate the options since an ignore can override invalid.
opts := s.opts
if !vx.IsValid() || !vy.IsValid() {
opts = Options{opts, invalid{}}
}
// Evaluate all filters and apply the remaining options.
if opt := opts.filter(s, vx, vy, t); opt != nil {
return opt.apply(s, vx, vy)
}
return false
}
func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool {
// Check if this type even has an Equal method.
m, ok := t.MethodByName("Equal")
if !ok || !function.IsType(m.Type, function.EqualAssignable) {
return false
}
eq := s.callTTBFunc(m.Func, vx, vy)
s.report(eq, vx, vy)
return true
}
func (s *state) callTRFunc(f, v reflect.Value) reflect.Value {
if !s.dynChecker.Next() {
return f.Call([]reflect.Value{v})[0]
}
// Run the function twice and ensure that we get the same results back.
// We run in goroutines so that the race detector (if enabled) can detect
// unsafe mutations to the input.
c := make(chan reflect.Value)
go detectRaces(c, f, v)
want := f.Call([]reflect.Value{v})[0]
if got := <-c; !s.statelessCompare(got, want).Equal() {
// To avoid false-positives with non-reflexive equality operations,
// we sanity check whether a value is equal to itself.
if !s.statelessCompare(want, want).Equal() {
return want
}
fn := getFuncName(f.Pointer())
panic(fmt.Sprintf("non-deterministic function detected: %s", fn))
}
return want
}
func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
if !s.dynChecker.Next() {
return f.Call([]reflect.Value{x, y})[0].Bool()
}
// Swapping the input arguments is sufficient to check that
// f is symmetric and deterministic.
// We run in goroutines so that the race detector (if enabled) can detect
// unsafe mutations to the input.
c := make(chan reflect.Value)
go detectRaces(c, f, y, x)
want := f.Call([]reflect.Value{x, y})[0].Bool()
if got := <-c; !got.IsValid() || got.Bool() != want {
fn := getFuncName(f.Pointer())
panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn))
}
return want
}
func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
var ret reflect.Value
defer func() {
recover() // Ignore panics, let the other call to f panic instead
c <- ret
}()
ret = f.Call(vs)[0]
}
func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) {
step := &sliceIndex{pathStep{t.Elem()}, 0, 0}
s.curPath.push(step)
// Compute an edit-script for slices vx and vy.
eq, es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
step.xkey, step.ykey = ix, iy
return s.statelessCompare(vx.Index(ix), vy.Index(iy))
})
// Equal or no edit-script, so report entire slices as is.
if eq || es == nil {
s.curPath.pop() // Pop first since we are reporting the whole slice
s.report(eq, vx, vy)
return
}
// Replay the edit-script.
var ix, iy int
for _, e := range es {
switch e {
case diff.UniqueX:
step.xkey, step.ykey = ix, -1
s.report(false, vx.Index(ix), nothing)
ix++
case diff.UniqueY:
step.xkey, step.ykey = -1, iy
s.report(false, nothing, vy.Index(iy))
iy++
default:
step.xkey, step.ykey = ix, iy
if e == diff.Identity {
s.report(true, vx.Index(ix), vy.Index(iy))
} else {
s.compareAny(vx.Index(ix), vy.Index(iy))
}
ix++
iy++
}
}
s.curPath.pop()
return
}
func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) {
if vx.IsNil() || vy.IsNil() {
s.report(vx.IsNil() && vy.IsNil(), vx, vy)
return
}
// We combine and sort the two map keys so that we can perform the
// comparisons in a deterministic order.
step := &mapIndex{pathStep: pathStep{t.Elem()}}
s.curPath.push(step)
defer s.curPath.pop()
for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
step.key = k
vvx := vx.MapIndex(k)
vvy := vy.MapIndex(k)
switch {
case vvx.IsValid() && vvy.IsValid():
s.compareAny(vvx, vvy)
case vvx.IsValid() && !vvy.IsValid():
s.report(false, vvx, nothing)
case !vvx.IsValid() && vvy.IsValid():
s.report(false, nothing, vvy)
default:
// It is possible for both vvx and vvy to be invalid if the
// key contained a NaN value in it. There is no way in
// reflection to be able to retrieve these values.
// See https://golang.org/issue/11104
panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath))
}
}
}
func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) {
var vax, vay reflect.Value // Addressable versions of vx and vy
step := &structField{}
s.curPath.push(step)
defer s.curPath.pop()
for i := 0; i < t.NumField(); i++ {
vvx := vx.Field(i)
vvy := vy.Field(i)
step.typ = t.Field(i).Type
step.name = t.Field(i).Name
step.idx = i
step.unexported = !isExported(step.name)
if step.unexported {
// Defer checking of unexported fields until later to give an
// Ignore a chance to ignore the field.
if !vax.IsValid() || !vay.IsValid() {
// For unsafeRetrieveField to work, the parent struct must
// be addressable. Create a new copy of the values if
// necessary to make them addressable.
vax = makeAddressable(vx)
vay = makeAddressable(vy)
}
step.force = s.exporters[t]
step.pvx = vax
step.pvy = vay
step.field = t.Field(i)
}
s.compareAny(vvx, vvy)
}
}
// report records the result of a single comparison.
// It also calls Report if any reporter is registered.
func (s *state) report(eq bool, vx, vy reflect.Value) {
if eq {
s.result.NSame++
} else {
s.result.NDiff++
}
if s.reporter != nil {
s.reporter.Report(vx, vy, eq, s.curPath)
}
}
// dynChecker tracks the state needed to periodically perform checks that
// user provided functions are symmetric and deterministic.
// The zero value is safe for immediate use.
type dynChecker struct{ curr, next int }
// Next increments the state and reports whether a check should be performed.
//
// Checks occur every Nth function call, where N is a triangular number:
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
// See https://en.wikipedia.org/wiki/Triangular_number
//
// This sequence ensures that the cost of checks drops significantly as
// the number of functions calls grows larger.
func (dc *dynChecker) Next() bool {
ok := dc.curr == dc.next
if ok {
dc.curr = 0
dc.next++
}
dc.curr++
return ok
}
// makeAddressable returns a value that is always addressable.
// It returns the input verbatim if it is already addressable,
// otherwise it creates a new value and returns an addressable copy.
func makeAddressable(v reflect.Value) reflect.Value {
if v.CanAddr() {
return v
}
vc := reflect.New(v.Type()).Elem()
vc.Set(v)
return vc
}

1795
vendor/github.com/google/go-cmp/cmp/compare_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

374
vendor/github.com/google/go-cmp/cmp/example_test.go generated vendored Normal file
View file

@ -0,0 +1,374 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
package cmp_test
import (
"fmt"
"math"
"reflect"
"sort"
"strings"
"github.com/google/go-cmp/cmp"
)
// TODO: Re-write these examples in terms of how you actually use the
// fundamental options and filters and not in terms of what cool things you can
// do with them since that overlaps with cmp/cmpopts.
// Use Diff for printing out human-readable errors for test cases comparing
// nested or structured data.
func ExampleDiff_testing() {
// Code under test:
type ShipManifest struct {
Name string
Crew map[string]string
Androids int
Stolen bool
}
// AddCrew tries to add the given crewmember to the manifest.
AddCrew := func(m *ShipManifest, name, title string) {
if m.Crew == nil {
m.Crew = make(map[string]string)
}
m.Crew[title] = name
}
// Test function:
tests := []struct {
desc string
before *ShipManifest
name, title string
after *ShipManifest
}{
{
desc: "add to empty",
before: &ShipManifest{},
name: "Zaphod Beeblebrox",
title: "Galactic President",
after: &ShipManifest{
Crew: map[string]string{
"Zaphod Beeblebrox": "Galactic President",
},
},
},
{
desc: "add another",
before: &ShipManifest{
Crew: map[string]string{
"Zaphod Beeblebrox": "Galactic President",
},
},
name: "Trillian",
title: "Human",
after: &ShipManifest{
Crew: map[string]string{
"Zaphod Beeblebrox": "Galactic President",
"Trillian": "Human",
},
},
},
{
desc: "overwrite",
before: &ShipManifest{
Crew: map[string]string{
"Zaphod Beeblebrox": "Galactic President",
},
},
name: "Zaphod Beeblebrox",
title: "Just this guy, you know?",
after: &ShipManifest{
Crew: map[string]string{
"Zaphod Beeblebrox": "Just this guy, you know?",
},
},
},
}
var t fakeT
for _, test := range tests {
AddCrew(test.before, test.name, test.title)
if diff := cmp.Diff(test.before, test.after); diff != "" {
t.Errorf("%s: after AddCrew, manifest differs: (-got +want)\n%s", test.desc, diff)
}
}
// Output:
// add to empty: after AddCrew, manifest differs: (-got +want)
// {*cmp_test.ShipManifest}.Crew["Galactic President"]:
// -: "Zaphod Beeblebrox"
// +: <non-existent>
// {*cmp_test.ShipManifest}.Crew["Zaphod Beeblebrox"]:
// -: <non-existent>
// +: "Galactic President"
//
// add another: after AddCrew, manifest differs: (-got +want)
// {*cmp_test.ShipManifest}.Crew["Human"]:
// -: "Trillian"
// +: <non-existent>
// {*cmp_test.ShipManifest}.Crew["Trillian"]:
// -: <non-existent>
// +: "Human"
//
// overwrite: after AddCrew, manifest differs: (-got +want)
// {*cmp_test.ShipManifest}.Crew["Just this guy, you know?"]:
// -: "Zaphod Beeblebrox"
// +: <non-existent>
// {*cmp_test.ShipManifest}.Crew["Zaphod Beeblebrox"]:
// -: "Galactic President"
// +: "Just this guy, you know?"
}
// Approximate equality for floats can be handled by defining a custom
// comparer on floats that determines two values to be equal if they are within
// some range of each other.
//
// This example is for demonstrative purposes; use cmpopts.EquateApprox instead.
func ExampleOption_approximateFloats() {
// This Comparer only operates on float64.
// To handle float32s, either define a similar function for that type
// or use a Transformer to convert float32s into float64s.
opt := cmp.Comparer(func(x, y float64) bool {
delta := math.Abs(x - y)
mean := math.Abs(x+y) / 2.0
return delta/mean < 0.00001
})
x := []float64{1.0, 1.1, 1.2, math.Pi}
y := []float64{1.0, 1.1, 1.2, 3.14159265359} // Accurate enough to Pi
z := []float64{1.0, 1.1, 1.2, 3.1415} // Diverges too far from Pi
fmt.Println(cmp.Equal(x, y, opt))
fmt.Println(cmp.Equal(y, z, opt))
fmt.Println(cmp.Equal(z, x, opt))
// Output:
// true
// false
// false
}
// Normal floating-point arithmetic defines == to be false when comparing
// NaN with itself. In certain cases, this is not the desired property.
//
// This example is for demonstrative purposes; use cmpopts.EquateNaNs instead.
func ExampleOption_equalNaNs() {
// This Comparer only operates on float64.
// To handle float32s, either define a similar function for that type
// or use a Transformer to convert float32s into float64s.
opt := cmp.Comparer(func(x, y float64) bool {
return (math.IsNaN(x) && math.IsNaN(y)) || x == y
})
x := []float64{1.0, math.NaN(), math.E, -0.0, +0.0}
y := []float64{1.0, math.NaN(), math.E, -0.0, +0.0}
z := []float64{1.0, math.NaN(), math.Pi, -0.0, +0.0} // Pi constant instead of E
fmt.Println(cmp.Equal(x, y, opt))
fmt.Println(cmp.Equal(y, z, opt))
fmt.Println(cmp.Equal(z, x, opt))
// Output:
// true
// false
// false
}
// To have floating-point comparisons combine both properties of NaN being
// equal to itself and also approximate equality of values, filters are needed
// to restrict the scope of the comparison so that they are composable.
//
// This example is for demonstrative purposes;
// use cmpopts.EquateNaNs and cmpopts.EquateApprox instead.
func ExampleOption_equalNaNsAndApproximateFloats() {
alwaysEqual := cmp.Comparer(func(_, _ interface{}) bool { return true })
opts := cmp.Options{
// This option declares that a float64 comparison is equal only if
// both inputs are NaN.
cmp.FilterValues(func(x, y float64) bool {
return math.IsNaN(x) && math.IsNaN(y)
}, alwaysEqual),
// This option declares approximate equality on float64s only if
// both inputs are not NaN.
cmp.FilterValues(func(x, y float64) bool {
return !math.IsNaN(x) && !math.IsNaN(y)
}, cmp.Comparer(func(x, y float64) bool {
delta := math.Abs(x - y)
mean := math.Abs(x+y) / 2.0
return delta/mean < 0.00001
})),
}
x := []float64{math.NaN(), 1.0, 1.1, 1.2, math.Pi}
y := []float64{math.NaN(), 1.0, 1.1, 1.2, 3.14159265359} // Accurate enough to Pi
z := []float64{math.NaN(), 1.0, 1.1, 1.2, 3.1415} // Diverges too far from Pi
fmt.Println(cmp.Equal(x, y, opts))
fmt.Println(cmp.Equal(y, z, opts))
fmt.Println(cmp.Equal(z, x, opts))
// Output:
// true
// false
// false
}
// Sometimes, an empty map or slice is considered equal to an allocated one
// of zero length.
//
// This example is for demonstrative purposes; use cmpopts.EquateEmpty instead.
func ExampleOption_equalEmpty() {
alwaysEqual := cmp.Comparer(func(_, _ interface{}) bool { return true })
// This option handles slices and maps of any type.
opt := cmp.FilterValues(func(x, y interface{}) bool {
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
return (vx.IsValid() && vy.IsValid() && vx.Type() == vy.Type()) &&
(vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) &&
(vx.Len() == 0 && vy.Len() == 0)
}, alwaysEqual)
type S struct {
A []int
B map[string]bool
}
x := S{nil, make(map[string]bool, 100)}
y := S{make([]int, 0, 200), nil}
z := S{[]int{0}, nil} // []int has a single element (i.e., not empty)
fmt.Println(cmp.Equal(x, y, opt))
fmt.Println(cmp.Equal(y, z, opt))
fmt.Println(cmp.Equal(z, x, opt))
// Output:
// true
// false
// false
}
// Two slices may be considered equal if they have the same elements,
// regardless of the order that they appear in. Transformations can be used
// to sort the slice.
//
// This example is for demonstrative purposes; use cmpopts.SortSlices instead.
func ExampleOption_sortedSlice() {
// This Transformer sorts a []int.
// Since the transformer transforms []int into []int, there is problem where
// this is recursively applied forever. To prevent this, use a FilterValues
// to first check for the condition upon which the transformer ought to apply.
trans := cmp.FilterValues(func(x, y []int) bool {
return !sort.IntsAreSorted(x) || !sort.IntsAreSorted(y)
}, cmp.Transformer("Sort", func(in []int) []int {
out := append([]int(nil), in...) // Copy input to avoid mutating it
sort.Ints(out)
return out
}))
x := struct{ Ints []int }{[]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}}
y := struct{ Ints []int }{[]int{2, 8, 0, 9, 6, 1, 4, 7, 3, 5}}
z := struct{ Ints []int }{[]int{0, 0, 1, 2, 3, 4, 5, 6, 7, 8}}
fmt.Println(cmp.Equal(x, y, trans))
fmt.Println(cmp.Equal(y, z, trans))
fmt.Println(cmp.Equal(z, x, trans))
// Output:
// true
// false
// false
}
type otherString string
func (x otherString) Equal(y otherString) bool {
return strings.ToLower(string(x)) == strings.ToLower(string(y))
}
// If the Equal method defined on a type is not suitable, the type can be be
// dynamically transformed to be stripped of the Equal method (or any method
// for that matter).
func ExampleOption_avoidEqualMethod() {
// Suppose otherString.Equal performs a case-insensitive equality,
// which is too loose for our needs.
// We can avoid the methods of otherString by declaring a new type.
type myString otherString
// This transformer converts otherString to myString, allowing Equal to use
// other Options to determine equality.
trans := cmp.Transformer("", func(in otherString) myString {
return myString(in)
})
x := []otherString{"foo", "bar", "baz"}
y := []otherString{"fOO", "bAr", "Baz"} // Same as before, but with different case
fmt.Println(cmp.Equal(x, y)) // Equal because of case-insensitivity
fmt.Println(cmp.Equal(x, y, trans)) // Not equal because of more exact equality
// Output:
// true
// false
}
func roundF64(z float64) float64 {
if z < 0 {
return math.Ceil(z - 0.5)
}
return math.Floor(z + 0.5)
}
// The complex numbers complex64 and complex128 can really just be decomposed
// into a pair of float32 or float64 values. It would be convenient to be able
// define only a single comparator on float64 and have float32, complex64, and
// complex128 all be able to use that comparator. Transformations can be used
// to handle this.
func ExampleOption_transformComplex() {
opts := []cmp.Option{
// This transformer decomposes complex128 into a pair of float64s.
cmp.Transformer("T1", func(in complex128) (out struct{ Real, Imag float64 }) {
out.Real, out.Imag = real(in), imag(in)
return out
}),
// This transformer converts complex64 to complex128 to allow the
// above transform to take effect.
cmp.Transformer("T2", func(in complex64) complex128 {
return complex128(in)
}),
// This transformer converts float32 to float64.
cmp.Transformer("T3", func(in float32) float64 {
return float64(in)
}),
// This equality function compares float64s as rounded integers.
cmp.Comparer(func(x, y float64) bool {
return roundF64(x) == roundF64(y)
}),
}
x := []interface{}{
complex128(3.0), complex64(5.1 + 2.9i), float32(-1.2), float64(12.3),
}
y := []interface{}{
complex128(3.1), complex64(4.9 + 3.1i), float32(-1.3), float64(11.7),
}
z := []interface{}{
complex128(3.8), complex64(4.9 + 3.1i), float32(-1.3), float64(11.7),
}
fmt.Println(cmp.Equal(x, y, opts...))
fmt.Println(cmp.Equal(y, z, opts...))
fmt.Println(cmp.Equal(z, x, opts...))
// Output:
// true
// false
// false
}
type fakeT struct{}
func (t fakeT) Errorf(format string, args ...interface{}) { fmt.Printf(format+"\n", args...) }

View file

@ -0,0 +1,17 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
// +build !debug
package diff
var debug debugger
type debugger struct{}
func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
return f
}
func (debugger) Update() {}
func (debugger) Finish() {}

Some files were not shown because too many files have changed in this diff Show more