diff --git a/Gopkg.lock b/Gopkg.lock index 49e3a8518..0426c622b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -67,6 +67,12 @@ revision = "925541529c1fa6821df4e44ce2723319eb2be768" version = "v1.0.0" +[[projects]] + name = "github.com/google/go-cmp" + packages = ["cmp","cmp/internal/diff","cmp/internal/function","cmp/internal/value"] + revision = "8099a9787ce5dc5984ed879a3bda47dc730a8e97" + version = "v0.1.0" + [[projects]] name = "github.com/inconshreveable/mousetrap" packages = ["."] @@ -97,6 +103,12 @@ revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f" version = "v1.1.0" +[[projects]] + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" + version = "v0.0.3" + [[projects]] name = "github.com/minio/minio-go" packages = [".","pkg/credentials","pkg/encrypt","pkg/policy","pkg/s3signer","pkg/s3utils","pkg/set"] @@ -223,6 +235,12 @@ revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" version = "v1.0.0" +[[projects]] + branch = "v2" + name = "gopkg.in/tomb.v2" + packages = ["."] + revision = "d5d1b5820637886def9eef33e03a27a9f166942c" + [[projects]] name = "gopkg.in/yaml.v2" packages = ["."] @@ -232,6 +250,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "d3d59414a33bb8ecc6d88a681c782a87244a565cc9d0f85615cfa0704c02800a" + inputs-digest = "44a8f2ed127a6eaa38c1449b97d298fc703c961617bd93565b89bcc6c9a41483" solver-name = "gps-cdcl" solver-version = 1 diff --git a/changelog/unreleased/issue-549 b/changelog/unreleased/issue-549 new file mode 100644 index 000000000..01cb38144 --- /dev/null +++ b/changelog/unreleased/issue-549 @@ -0,0 +1,36 @@ +Enhancement: Rework archiver code + +The core archiver code and the complementary code for the `backup` command was +rewritten completely. This resolves very annoying issues such as 549. + +Basically, with the old code, restic took the last path component of each +to-be-saved file or directory as the top-level file/directory within the +snapshot. This meant that when called as `restic backup /home/user/foo`, the +snapshot would contain the files in the directory `/home/user/foo` as `/foo`. + +This is not the case any more with the new archiver code. Now, restic works +very similar to what `tar` does: When restic is called with an absolute path to +save, then it'll preserve the directory structure within the snapshot. For the +example above, the snapshot would contain the files in the directory within +`/home/user/foo` in the snapshot. For relative directories, it only preserves +the relative path components. So `restic backup user/foo` will save the files +as `/user/foo` in the snapshot. + +While we were at it, the status display and notification system was completely +rewritten. By default, restic now shows which files are currently read (unless +`--quiet` is specified) in a multi-line status display. + +The `backup` command also gained a new option: `--verbose`. It can be specified +once (which prints a bit more detail what restic is doing) or twice (which +prints a line for each file/directory restic encountered, together with some +statistics). + +https://github.com/restic/restic/issues/549 +https://github.com/restic/restic/issues/1286 +https://github.com/restic/restic/issues/446 +https://github.com/restic/restic/issues/1344 +https://github.com/restic/restic/issues/1416 +https://github.com/restic/restic/issues/1456 +https://github.com/restic/restic/issues/1145 +https://github.com/restic/restic/issues/1160 +https://github.com/restic/restic/pull/1494 diff --git a/cmd/restic/background.go b/cmd/restic/background.go deleted file mode 100644 index 2f115adfd..000000000 --- a/cmd/restic/background.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !linux - -package main - -// IsProcessBackground should return true if it is running in the background or false if not -func IsProcessBackground() bool { - //TODO: Check if the process are running in the background in other OS than linux - return false -} diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 5620e88e9..cf1bb3b1b 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -2,21 +2,24 @@ package main import ( "bufio" - "fmt" + "context" "io" "os" - "path" - "path/filepath" + "strconv" "strings" "time" "github.com/spf13/cobra" + tomb "gopkg.in/tomb.v2" "github.com/restic/restic/internal/archiver" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui" + "github.com/restic/restic/internal/ui/termstatus" ) var cmdBackup = &cobra.Command{ @@ -42,11 +45,16 @@ given as the arguments. return errors.Fatal("cannot use both `--stdin` and `--files-from -`") } - if backupOptions.Stdin { - return readBackupFromStdin(backupOptions, globalOptions, args) - } + var t tomb.Tomb + term := termstatus.New(globalOptions.stdout, globalOptions.stderr) + t.Go(func() error { term.Run(t.Context(globalOptions.ctx)); return nil }) - return runBackup(backupOptions, globalOptions, args) + err := runBackup(backupOptions, globalOptions, term, args) + if err != nil { + return err + } + t.Kill(nil) + return t.Wait() }, } @@ -90,127 +98,6 @@ func init() { f.BoolVar(&backupOptions.WithAtime, "with-atime", false, "store the atime for all files and directories") } -func newScanProgress(gopts GlobalOptions) *restic.Progress { - if gopts.Quiet { - return nil - } - - p := restic.NewProgress() - p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) { - if IsProcessBackground() { - return - } - - PrintProgress("[%s] %d directories, %d files, %s", formatDuration(d), s.Dirs, s.Files, formatBytes(s.Bytes)) - } - - p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) { - PrintProgress("scanned %d directories, %d files in %s\n", s.Dirs, s.Files, formatDuration(d)) - } - - return p -} - -func newArchiveProgress(gopts GlobalOptions, todo restic.Stat) *restic.Progress { - if gopts.Quiet { - return nil - } - - archiveProgress := restic.NewProgress() - - var bps, eta uint64 - itemsTodo := todo.Files + todo.Dirs - - archiveProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) { - if IsProcessBackground() { - return - } - - sec := uint64(d / time.Second) - if todo.Bytes > 0 && sec > 0 && ticker { - bps = s.Bytes / sec - if s.Bytes >= todo.Bytes { - eta = 0 - } else if bps > 0 { - eta = (todo.Bytes - s.Bytes) / bps - } - } - - itemsDone := s.Files + s.Dirs - - status1 := fmt.Sprintf("[%s] %s %s / %s %d / %d items %d errors ", - formatDuration(d), - formatPercent(s.Bytes, todo.Bytes), - formatBytes(s.Bytes), formatBytes(todo.Bytes), - itemsDone, itemsTodo, - s.Errors) - status2 := fmt.Sprintf("ETA %s ", formatSeconds(eta)) - - if w := stdoutTerminalWidth(); w > 0 { - maxlen := w - len(status2) - 1 - - if maxlen < 4 { - status1 = "" - } else if len(status1) > maxlen { - status1 = status1[:maxlen-4] - status1 += "... " - } - } - - PrintProgress("%s%s", status1, status2) - } - - archiveProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) { - fmt.Printf("\nduration: %s\n", formatDuration(d)) - } - - return archiveProgress -} - -func newArchiveStdinProgress(gopts GlobalOptions) *restic.Progress { - if gopts.Quiet { - return nil - } - - archiveProgress := restic.NewProgress() - - var bps uint64 - - archiveProgress.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) { - if IsProcessBackground() { - return - } - - sec := uint64(d / time.Second) - if s.Bytes > 0 && sec > 0 && ticker { - bps = s.Bytes / sec - } - - status1 := fmt.Sprintf("[%s] %s %s/s", formatDuration(d), - formatBytes(s.Bytes), - formatBytes(bps)) - - if w := stdoutTerminalWidth(); w > 0 { - maxlen := w - len(status1) - - if maxlen < 4 { - status1 = "" - } else if len(status1) > maxlen { - status1 = status1[:maxlen-4] - status1 += "... " - } - } - - PrintProgress("%s", status1) - } - - archiveProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) { - fmt.Printf("\nduration: %s\n", formatDuration(d)) - } - - return archiveProgress -} - // filterExisting returns a slice of all existing items, or an error if no // items exist at all. func filterExisting(items []string) (result []string, err error) { @@ -231,72 +118,10 @@ func filterExisting(items []string) (result []string, err error) { return } -func readBackupFromStdin(opts BackupOptions, gopts GlobalOptions, args []string) error { - if len(args) != 0 { - return errors.Fatal("when reading from stdin, no additional files can be specified") - } - - fn := opts.StdinFilename - - if fn == "" { - return errors.Fatal("filename for backup from stdin must not be empty") - } - - if filepath.Base(fn) != fn || path.Base(fn) != fn { - return errors.Fatal("filename is invalid (may not contain a directory, slash or backslash)") - } - - var t time.Time - if opts.TimeStamp != "" { - parsedT, err := time.Parse("2006-01-02 15:04:05", opts.TimeStamp) - if err != nil { - return err - } - t = parsedT - } else { - t = time.Now() - } - - if gopts.password == "" { - return errors.Fatal("unable to read password from stdin when data is to be read from stdin, use --password-file or $RESTIC_PASSWORD") - } - - repo, err := OpenRepository(gopts) - if err != nil { - return err - } - - lock, err := lockRepo(repo) - defer unlockRepo(lock) - if err != nil { - return err - } - - err = repo.LoadIndex(gopts.ctx) - if err != nil { - return err - } - - r := &archiver.Reader{ - Repository: repo, - Tags: opts.Tags, - Hostname: opts.Hostname, - TimeStamp: t, - } - - _, id, err := r.Archive(gopts.ctx, fn, os.Stdin, newArchiveStdinProgress(gopts)) - if err != nil { - return err - } - - Verbosef("archived as %v\n", id.Str()) - return nil -} - -// readFromFile will read all lines from the given filename and write them to a -// string array, if filename is empty readFromFile returns and empty string -// array. If filename is a dash (-), readFromFile will read the lines from -// the standard input. +// readFromFile will read all lines from the given filename and return them as +// a string array, if filename is empty readFromFile returns and empty string +// array. If filename is a dash (-), readFromFile will read the lines from the +// standard input. func readLinesFromFile(filename string) ([]string, error) { if filename == "" { return nil, nil @@ -335,47 +160,45 @@ func readLinesFromFile(filename string) ([]string, error) { return lines, nil } -func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error { +// Check returns an error when an invalid combination of options was set. +func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error { if opts.FilesFrom == "-" && gopts.password == "" { return errors.Fatal("unable to read password from stdin when data is to be read from stdin, use --password-file or $RESTIC_PASSWORD") } - fromfile, err := readLinesFromFile(opts.FilesFrom) - if err != nil { - return err - } - - // merge files from files-from into normal args so we can reuse the normal - // args checks and have the ability to use both files-from and args at the - // same time - args = append(args, fromfile...) - if len(args) == 0 { - return errors.Fatal("nothing to backup, please specify target files/dirs") - } - - target := make([]string, 0, len(args)) - for _, d := range args { - if a, err := filepath.Abs(d); err == nil { - d = a + if opts.Stdin { + if opts.FilesFrom != "" { + return errors.Fatal("--stdin and --files-from cannot be used together") + } + + if len(args) > 0 { + return errors.Fatal("--stdin was specified and files/dirs were listed as arguments") } - target = append(target, d) } - target, err = filterExisting(target) - if err != nil { - return err - } - - // rejectFuncs collect functions that can reject items from the backup - var rejectFuncs []RejectFunc + return nil +} +// collectRejectFuncs returns a list of all functions which may reject data +// from being saved in a snapshot +func collectRejectFuncs(opts BackupOptions, repo *repository.Repository, targets []string) (fs []RejectFunc, err error) { // allowed devices if opts.ExcludeOtherFS { - f, err := rejectByDevice(target) + f, err := rejectByDevice(targets) if err != nil { - return err + return nil, err } - rejectFuncs = append(rejectFuncs, f) + fs = append(fs, f) + } + + // exclude restic cache + if repo.Cache != nil { + f, err := rejectResticCache(repo) + if err != nil { + return nil, err + } + + fs = append(fs, f) } // add patterns from file @@ -384,7 +207,7 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error { } if len(opts.Excludes) > 0 { - rejectFuncs = append(rejectFuncs, rejectByPattern(opts.Excludes)) + fs = append(fs, rejectByPattern(opts.Excludes)) } if opts.ExcludeCaches { @@ -394,111 +217,17 @@ func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error { for _, spec := range opts.ExcludeIfPresent { f, err := rejectIfPresent(spec) if err != nil { - return err + return nil, err } - rejectFuncs = append(rejectFuncs, f) + fs = append(fs, f) } - repo, err := OpenRepository(gopts) - if err != nil { - return err - } - - lock, err := lockRepo(repo) - defer unlockRepo(lock) - if err != nil { - return err - } - - // exclude restic cache - if repo.Cache != nil { - f, err := rejectResticCache(repo) - if err != nil { - return err - } - - rejectFuncs = append(rejectFuncs, f) - } - - err = repo.LoadIndex(gopts.ctx) - if err != nil { - return err - } - - var parentSnapshotID *restic.ID - - // Force using a parent - if !opts.Force && opts.Parent != "" { - id, err := restic.FindSnapshot(repo, opts.Parent) - if err != nil { - return errors.Fatalf("invalid id %q: %v", opts.Parent, err) - } - - parentSnapshotID = &id - } - - // Find last snapshot to set it as parent, if not already set - if !opts.Force && parentSnapshotID == nil { - id, err := restic.FindLatestSnapshot(gopts.ctx, repo, target, []restic.TagList{}, opts.Hostname) - if err == nil { - parentSnapshotID = &id - } else if err != restic.ErrNoSnapshotFound { - return err - } - } - - if parentSnapshotID != nil { - Verbosef("using parent snapshot %v\n", parentSnapshotID.Str()) - } - - Verbosef("scan %v\n", target) - - selectFilter := func(item string, fi os.FileInfo) bool { - for _, reject := range rejectFuncs { - if reject(item, fi) { - return false - } - } - return true - } - - var stat restic.Stat - if !gopts.Quiet { - stat, err = archiver.Scan(target, selectFilter, newScanProgress(gopts)) - if err != nil { - return err - } - } - - arch := archiver.New(repo) - arch.Excludes = opts.Excludes - arch.SelectFilter = selectFilter - arch.WithAccessTime = opts.WithAtime - - arch.Warn = func(dir string, fi os.FileInfo, err error) { - // TODO: make ignoring errors configurable - Warnf("%s\rwarning for %s: %v\n", ClearLine(), dir, err) - } - - timeStamp := time.Now() - if opts.TimeStamp != "" { - timeStamp, err = time.Parse(TimeFormat, opts.TimeStamp) - if err != nil { - return errors.Fatalf("error in time option: %v\n", err) - } - } - - _, id, err := arch.Snapshot(gopts.ctx, newArchiveProgress(gopts, stat), target, opts.Tags, opts.Hostname, parentSnapshotID, timeStamp) - if err != nil { - return err - } - - Verbosef("snapshot %s saved\n", id.Str()) - - return nil + return fs, nil } +// readExcludePatternsFromFiles reads all exclude files and returns the list of +// exclude patterns. func readExcludePatternsFromFiles(excludeFiles []string) []string { var excludes []string for _, filename := range excludeFiles { @@ -540,3 +269,217 @@ func readExcludePatternsFromFiles(excludeFiles []string) []string { } return excludes } + +// collectTargets returns a list of target files/dirs from several sources. +func collectTargets(opts BackupOptions, args []string) (targets []string, err error) { + if opts.Stdin { + return nil, nil + } + + fromfile, err := readLinesFromFile(opts.FilesFrom) + if err != nil { + return nil, err + } + + // merge files from files-from into normal args so we can reuse the normal + // args checks and have the ability to use both files-from and args at the + // same time + args = append(args, fromfile...) + if len(args) == 0 && !opts.Stdin { + return nil, errors.Fatal("nothing to backup, please specify target files/dirs") + } + + targets = args + targets, err = filterExisting(targets) + if err != nil { + return nil, err + } + + return targets, nil +} + +// parent returns the ID of the parent snapshot. If there is none, nil is +// returned. +func findParentSnapshot(ctx context.Context, repo restic.Repository, opts BackupOptions, targets []string) (parentID *restic.ID, err error) { + // Force using a parent + if !opts.Force && opts.Parent != "" { + id, err := restic.FindSnapshot(repo, opts.Parent) + if err != nil { + return nil, errors.Fatalf("invalid id %q: %v", opts.Parent, err) + } + + parentID = &id + } + + // Find last snapshot to set it as parent, if not already set + if !opts.Force && parentID == nil { + id, err := restic.FindLatestSnapshot(ctx, repo, targets, []restic.TagList{}, opts.Hostname) + if err == nil { + parentID = &id + } else if err != restic.ErrNoSnapshotFound { + return nil, err + } + } + + return parentID, nil +} + +func runBackup(opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { + err := opts.Check(gopts, args) + if err != nil { + return err + } + + targets, err := collectTargets(opts, args) + if err != nil { + return err + } + + var t tomb.Tomb + + p := ui.NewBackup(term, gopts.verbosity) + + // use the terminal for stdout/stderr + prevStdout, prevStderr := gopts.stdout, gopts.stderr + defer func() { + gopts.stdout, gopts.stderr = prevStdout, prevStderr + }() + gopts.stdout, gopts.stderr = p.Stdout(), p.Stderr() + + if s, ok := os.LookupEnv("RESTIC_PROGRESS_FPS"); ok { + fps, err := strconv.Atoi(s) + if err == nil && fps >= 1 { + if fps > 60 { + fps = 60 + } + p.MinUpdatePause = time.Second / time.Duration(fps) + } + } + + t.Go(func() error { return p.Run(t.Context(gopts.ctx)) }) + + p.V("open repository") + repo, err := OpenRepository(gopts) + if err != nil { + return err + } + + p.V("lock repository") + lock, err := lockRepo(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + // rejectFuncs collect functions that can reject items from the backup + rejectFuncs, err := collectRejectFuncs(opts, repo, targets) + if err != nil { + return err + } + + p.V("load index files") + err = repo.LoadIndex(gopts.ctx) + if err != nil { + return err + } + + parentSnapshotID, err := findParentSnapshot(gopts.ctx, repo, opts, targets) + if err != nil { + return err + } + + if parentSnapshotID != nil { + p.V("using parent snapshot %v\n", parentSnapshotID.Str()) + } + + selectFilter := func(item string, fi os.FileInfo) bool { + for _, reject := range rejectFuncs { + if reject(item, fi) { + return false + } + } + return true + } + + timeStamp := time.Now() + if opts.TimeStamp != "" { + timeStamp, err = time.Parse(TimeFormat, opts.TimeStamp) + if err != nil { + return errors.Fatalf("error in time option: %v\n", err) + } + } + + var targetFS fs.FS = fs.Local{} + if opts.Stdin { + p.V("read data from stdin") + targetFS = &fs.Reader{ + ModTime: timeStamp, + Name: opts.StdinFilename, + Mode: 0644, + ReadCloser: os.Stdin, + } + targets = []string{opts.StdinFilename} + } + + sc := archiver.NewScanner(targetFS) + sc.Select = selectFilter + sc.Error = p.ScannerError + sc.Result = p.ReportTotal + + p.V("start scan") + t.Go(func() error { return sc.Scan(t.Context(gopts.ctx), targets) }) + + arch := archiver.New(repo, targetFS, archiver.Options{}) + arch.Select = selectFilter + arch.WithAtime = opts.WithAtime + arch.Error = p.Error + arch.CompleteItem = p.CompleteItemFn + arch.StartFile = p.StartFile + arch.CompleteBlob = p.CompleteBlob + + if parentSnapshotID == nil { + parentSnapshotID = &restic.ID{} + } + + snapshotOpts := archiver.SnapshotOptions{ + Excludes: opts.Excludes, + Tags: opts.Tags, + Time: timeStamp, + Hostname: opts.Hostname, + ParentSnapshot: *parentSnapshotID, + } + + uploader := archiver.IndexUploader{ + Repository: repo, + Start: func() { + p.VV("uploading intermediate index") + }, + Complete: func(id restic.ID) { + p.V("uploaded intermediate index %v", id.Str()) + }, + } + + t.Go(func() error { + return uploader.Upload(gopts.ctx, t.Context(gopts.ctx), 30*time.Second) + }) + + p.V("start backup") + _, id, err := arch.Snapshot(gopts.ctx, targets, snapshotOpts) + if err != nil { + return err + } + + p.Finish() + p.P("snapshot %s saved\n", id.Str()) + + // cleanly shutdown all running goroutines + t.Kill(nil) + + // let's see if one returned an error + err = t.Wait() + if err != nil { + return err + } + + return nil +} diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 4a046b7ef..d4a768d70 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -56,7 +56,8 @@ func printTree(ctx context.Context, repo *repository.Repository, id *restic.ID, Printf("%s\n", formatNode(prefix, entry, lsOptions.ListLong)) if entry.Type == "dir" && entry.Subtree != nil { - if err = printTree(ctx, repo, entry.Subtree, filepath.Join(prefix, entry.Name)); err != nil { + entryPath := prefix + string(filepath.Separator) + entry.Name + if err = printTree(ctx, repo, entry.Subtree, entryPath); err != nil { return err } } @@ -84,7 +85,7 @@ func runLs(opts LsOptions, gopts GlobalOptions, args []string) error { for sn := range FindFilteredSnapshots(ctx, repo, opts.Host, opts.Tags, opts.Paths, args) { Verbosef("snapshot %s of %v at %s):\n", sn.ID().Str(), sn.Paths, sn.Time) - if err = printTree(gopts.ctx, repo, sn.Tree, string(filepath.Separator)); err != nil { + if err = printTree(gopts.ctx, repo, sn.Tree, ""); err != nil { return err } } diff --git a/cmd/restic/format.go b/cmd/restic/format.go index 9f66d1c1d..1f8ab366e 100644 --- a/cmd/restic/format.go +++ b/cmd/restic/format.go @@ -64,8 +64,9 @@ func formatDuration(d time.Duration) string { } func formatNode(prefix string, n *restic.Node, long bool) string { + nodepath := prefix + string(filepath.Separator) + n.Name if !long { - return filepath.Join(prefix, n.Name) + return nodepath } var mode os.FileMode @@ -91,6 +92,6 @@ func formatNode(prefix string, n *restic.Node, long bool) string { return fmt.Sprintf("%s %5d %5d %6d %s %s%s", mode|n.Mode, n.UID, n.GID, n.Size, - n.ModTime.Format(TimeFormat), filepath.Join(prefix, n.Name), + n.ModTime.Format(TimeFormat), nodepath, target) } diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 0c3d805b2..8c89a4d80 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -43,6 +43,7 @@ type GlobalOptions struct { Repo string PasswordFile string Quiet bool + Verbose int NoLock bool JSON bool CacheDir string @@ -59,6 +60,13 @@ type GlobalOptions struct { stdout io.Writer stderr io.Writer + // verbosity is set as follows: + // 0 means: don't print any messages except errors, this is used when --quiet is specified + // 1 is the default: print essential messages + // 2 means: print more messages, report minor things, this is used when --verbose is specified + // 3 means: print very detailed debug messages, this is used when --debug is specified + verbosity uint + Options []string extended options.Options @@ -81,6 +89,7 @@ func init() { f.StringVarP(&globalOptions.Repo, "repo", "r", os.Getenv("RESTIC_REPOSITORY"), "repository to backup to or restore from (default: $RESTIC_REPOSITORY)") f.StringVarP(&globalOptions.PasswordFile, "password-file", "p", os.Getenv("RESTIC_PASSWORD_FILE"), "read the repository password from a file (default: $RESTIC_PASSWORD_FILE)") f.BoolVarP(&globalOptions.Quiet, "quiet", "q", false, "do not output comprehensive progress report") + f.CountVarP(&globalOptions.Verbose, "verbose", "v", "be verbose (specify --verbose multiple times or level `n`)") f.BoolVar(&globalOptions.NoLock, "no-lock", false, "do not lock the repo, this allows some operations on read-only repos") f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it") f.StringVar(&globalOptions.CacheDir, "cache-dir", "", "set the cache directory") @@ -173,11 +182,9 @@ func Printf(format string, args ...interface{}) { // Verbosef calls Printf to write the message when the verbose flag is set. func Verbosef(format string, args ...interface{}) { - if globalOptions.Quiet { - return + if globalOptions.verbosity >= 1 { + Printf(format, args...) } - - Printf(format, args...) } // PrintProgress wraps fmt.Printf to handle the difference in writing progress diff --git a/cmd/restic/global_debug.go b/cmd/restic/global_debug.go index 7cad172f6..cb7dac10a 100644 --- a/cmd/restic/global_debug.go +++ b/cmd/restic/global_debug.go @@ -18,6 +18,7 @@ var ( listenMemoryProfile string memProfilePath string cpuProfilePath string + traceProfilePath string insecure bool ) @@ -26,6 +27,7 @@ func init() { f.StringVar(&listenMemoryProfile, "listen-profile", "", "listen on this `address:port` for memory profiling") f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`") f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`") + f.StringVar(&traceProfilePath, "trace-profile", "", "write trace to `dir`") f.BoolVar(&insecure, "insecure-kdf", false, "use insecure KDF settings") } @@ -46,7 +48,18 @@ func runDebug() error { }() } - if memProfilePath != "" && cpuProfilePath != "" { + profilesEnabled := 0 + if memProfilePath != "" { + profilesEnabled++ + } + if cpuProfilePath != "" { + profilesEnabled++ + } + if traceProfilePath != "" { + profilesEnabled++ + } + + if profilesEnabled > 1 { return errors.Fatal("only one profile (memory or CPU) may be activated at the same time") } @@ -58,6 +71,8 @@ func runDebug() error { prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(memProfilePath)) } else if cpuProfilePath != "" { prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(cpuProfilePath)) + } else if traceProfilePath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(traceProfilePath)) } if prof != nil { diff --git a/cmd/restic/integration_fuse_test.go b/cmd/restic/integration_fuse_test.go index a341ff4e6..45a9d4eb0 100644 --- a/cmd/restic/integration_fuse_test.go +++ b/cmd/restic/integration_fuse_test.go @@ -171,7 +171,7 @@ func TestMount(t *testing.T) { rtest.SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz")) // first backup - testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts) + testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) snapshotIDs := testRunList(t, "snapshots", env.gopts) rtest.Assert(t, len(snapshotIDs) == 1, "expected one snapshot, got %v", snapshotIDs) @@ -179,7 +179,7 @@ func TestMount(t *testing.T) { checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 2) // second backup, implicit incremental - testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts) + testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) snapshotIDs = testRunList(t, "snapshots", env.gopts) rtest.Assert(t, len(snapshotIDs) == 2, "expected two snapshots, got %v", snapshotIDs) @@ -188,7 +188,7 @@ func TestMount(t *testing.T) { // third backup, explicit incremental bopts := BackupOptions{Parent: snapshotIDs[0].String()} - testRunBackup(t, []string{env.testdata}, bopts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, bopts, env.gopts) snapshotIDs = testRunList(t, "snapshots", env.gopts) rtest.Assert(t, len(snapshotIDs) == 3, "expected three snapshots, got %v", snapshotIDs) diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 5b4e67e17..36a7670b1 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -3,6 +3,7 @@ package main import ( "bufio" "bytes" + "context" "crypto/rand" "encoding/json" "fmt" @@ -17,12 +18,14 @@ import ( "testing" "time" - "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/filter" + "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" + "golang.org/x/sync/errgroup" ) func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs { @@ -51,9 +54,29 @@ func testRunInit(t testing.TB, opts GlobalOptions) { t.Logf("repository initialized at %v", opts.Repo) } -func testRunBackup(t testing.TB, target []string, opts BackupOptions, gopts GlobalOptions) { - t.Logf("backing up %v", target) - rtest.OK(t, runBackup(opts, gopts, target)) +func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) { + ctx, cancel := context.WithCancel(gopts.ctx) + defer cancel() + + var wg errgroup.Group + term := termstatus.New(gopts.stdout, gopts.stderr) + wg.Go(func() error { term.Run(ctx); return nil }) + + gopts.stdout = ioutil.Discard + t.Logf("backing up %v in %v", target, dir) + if dir != "" { + cleanup := fs.TestChdir(t, dir) + defer cleanup() + } + + rtest.OK(t, runBackup(opts, gopts, term, target)) + + cancel() + + err := wg.Wait() + if err != nil { + t.Fatal(err) + } } func testRunList(t testing.TB, tpe string, opts GlobalOptions) restic.IDs { @@ -219,7 +242,7 @@ func TestBackup(t *testing.T) { opts := BackupOptions{} // first backup - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) snapshotIDs := testRunList(t, "snapshots", env.gopts) rtest.Assert(t, len(snapshotIDs) == 1, "expected one snapshot, got %v", snapshotIDs) @@ -228,7 +251,7 @@ func TestBackup(t *testing.T) { stat1 := dirStats(env.repo) // second backup, implicit incremental - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) snapshotIDs = testRunList(t, "snapshots", env.gopts) rtest.Assert(t, len(snapshotIDs) == 2, "expected two snapshots, got %v", snapshotIDs) @@ -242,7 +265,7 @@ func TestBackup(t *testing.T) { testRunCheck(t, env.gopts) // third backup, explicit incremental opts.Parent = snapshotIDs[0].String() - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) snapshotIDs = testRunList(t, "snapshots", env.gopts) rtest.Assert(t, len(snapshotIDs) == 3, "expected three snapshots, got %v", snapshotIDs) @@ -296,198 +319,7 @@ func TestBackupNonExistingFile(t *testing.T) { opts := BackupOptions{} - testRunBackup(t, dirs, opts, env.gopts) -} - -func TestBackupMissingFile1(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - datafile := filepath.Join("testdata", "backup-data.tar.gz") - fd, err := os.Open(datafile) - if os.IsNotExist(errors.Cause(err)) { - t.Skipf("unable to find data file %q, skipping", datafile) - return - } - rtest.OK(t, err) - rtest.OK(t, fd.Close()) - - rtest.SetupTarTestFixture(t, env.testdata, datafile) - - testRunInit(t, env.gopts) - globalOptions.stderr = ioutil.Discard - defer func() { - globalOptions.stderr = os.Stderr - }() - - ranHook := false - debug.Hook("pipe.walk1", func(context interface{}) { - pathname := context.(string) - - if pathname != filepath.Join("testdata", "0", "0", "9") { - return - } - - t.Logf("in hook, removing test file testdata/0/0/9/37") - ranHook = true - - rtest.OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37"))) - }) - - opts := BackupOptions{} - - testRunBackup(t, []string{env.testdata}, opts, env.gopts) - testRunCheck(t, env.gopts) - - rtest.Assert(t, ranHook, "hook did not run") - debug.RemoveHook("pipe.walk1") -} - -func TestBackupMissingFile2(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - datafile := filepath.Join("testdata", "backup-data.tar.gz") - fd, err := os.Open(datafile) - if os.IsNotExist(errors.Cause(err)) { - t.Skipf("unable to find data file %q, skipping", datafile) - return - } - rtest.OK(t, err) - rtest.OK(t, fd.Close()) - - rtest.SetupTarTestFixture(t, env.testdata, datafile) - - testRunInit(t, env.gopts) - - globalOptions.stderr = ioutil.Discard - defer func() { - globalOptions.stderr = os.Stderr - }() - - ranHook := false - debug.Hook("pipe.walk2", func(context interface{}) { - pathname := context.(string) - - if pathname != filepath.Join("testdata", "0", "0", "9", "37") { - return - } - - t.Logf("in hook, removing test file testdata/0/0/9/37") - ranHook = true - - rtest.OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37"))) - }) - - opts := BackupOptions{} - - testRunBackup(t, []string{env.testdata}, opts, env.gopts) - testRunCheck(t, env.gopts) - - rtest.Assert(t, ranHook, "hook did not run") - debug.RemoveHook("pipe.walk2") -} - -func TestBackupChangedFile(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - datafile := filepath.Join("testdata", "backup-data.tar.gz") - fd, err := os.Open(datafile) - if os.IsNotExist(errors.Cause(err)) { - t.Skipf("unable to find data file %q, skipping", datafile) - return - } - rtest.OK(t, err) - rtest.OK(t, fd.Close()) - - rtest.SetupTarTestFixture(t, env.testdata, datafile) - - testRunInit(t, env.gopts) - - globalOptions.stderr = ioutil.Discard - defer func() { - globalOptions.stderr = os.Stderr - }() - - modFile := filepath.Join(env.testdata, "0", "0", "9", "18") - - ranHook := false - debug.Hook("archiver.SaveFile", func(context interface{}) { - pathname := context.(string) - - if pathname != modFile { - return - } - - t.Logf("in hook, modifying test file %v", modFile) - ranHook = true - - rtest.OK(t, ioutil.WriteFile(modFile, []byte("modified"), 0600)) - }) - - opts := BackupOptions{} - - testRunBackup(t, []string{env.testdata}, opts, env.gopts) - testRunCheck(t, env.gopts) - - rtest.Assert(t, ranHook, "hook did not run") - debug.RemoveHook("archiver.SaveFile") -} - -func TestBackupDirectoryError(t *testing.T) { - env, cleanup := withTestEnvironment(t) - defer cleanup() - - datafile := filepath.Join("testdata", "backup-data.tar.gz") - fd, err := os.Open(datafile) - if os.IsNotExist(errors.Cause(err)) { - t.Skipf("unable to find data file %q, skipping", datafile) - return - } - rtest.OK(t, err) - rtest.OK(t, fd.Close()) - - rtest.SetupTarTestFixture(t, env.testdata, datafile) - - testRunInit(t, env.gopts) - - globalOptions.stderr = ioutil.Discard - defer func() { - globalOptions.stderr = os.Stderr - }() - - ranHook := false - - testdir := filepath.Join(env.testdata, "0", "0", "9") - - // install hook that removes the dir right before readdirnames() - debug.Hook("pipe.readdirnames", func(context interface{}) { - path := context.(string) - - if path != testdir { - return - } - - t.Logf("in hook, removing test file %v", testdir) - ranHook = true - - rtest.OK(t, os.RemoveAll(testdir)) - }) - - testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0")}, BackupOptions{}, env.gopts) - testRunCheck(t, env.gopts) - - rtest.Assert(t, ranHook, "hook did not run") - debug.RemoveHook("pipe.walk2") - - snapshots := testRunList(t, "snapshots", env.gopts) - rtest.Assert(t, len(snapshots) > 0, - "no snapshots found in repo (%v)", datafile) - - files := testRunLs(t, env.gopts, snapshots[0].String()) - - rtest.Assert(t, len(files) > 1, "snapshot is empty") + testRunBackup(t, "", dirs, opts, env.gopts) } func includes(haystack []string, needle string) bool { @@ -552,21 +384,21 @@ func TestBackupExclude(t *testing.T) { opts := BackupOptions{} - testRunBackup(t, []string{datadir}, opts, env.gopts) + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) snapshots, snapshotID := lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) files := testRunLs(t, env.gopts, snapshotID) rtest.Assert(t, includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")), "expected file %q in first snapshot, but it's not included", "foo.tar.gz") opts.Excludes = []string{"*.tar.gz"} - testRunBackup(t, []string{datadir}, opts, env.gopts) + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) snapshots, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) files = testRunLs(t, env.gopts, snapshotID) rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")), "expected file %q not in first snapshot, but it's included", "foo.tar.gz") opts.Excludes = []string{"*.tar.gz", "private/secret"} - testRunBackup(t, []string{datadir}, opts, env.gopts) + testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) _, snapshotID = lastSnapshot(snapshots, loadSnapshotMap(t, env.gopts)) files = testRunLs(t, env.gopts, snapshotID) rtest.Assert(t, !includes(files, filepath.Join(string(filepath.Separator), "testdata", "foo.tar.gz")), @@ -616,13 +448,13 @@ func TestIncrementalBackup(t *testing.T) { opts := BackupOptions{} - testRunBackup(t, []string{datadir}, opts, env.gopts) + testRunBackup(t, "", []string{datadir}, opts, env.gopts) testRunCheck(t, env.gopts) stat1 := dirStats(env.repo) rtest.OK(t, appendRandomData(testfile, incrementalSecondWrite)) - testRunBackup(t, []string{datadir}, opts, env.gopts) + testRunBackup(t, "", []string{datadir}, opts, env.gopts) testRunCheck(t, env.gopts) stat2 := dirStats(env.repo) if stat2.size-stat1.size > incrementalFirstWrite { @@ -632,7 +464,7 @@ func TestIncrementalBackup(t *testing.T) { rtest.OK(t, appendRandomData(testfile, incrementalThirdWrite)) - testRunBackup(t, []string{datadir}, opts, env.gopts) + testRunBackup(t, "", []string{datadir}, opts, env.gopts) testRunCheck(t, env.gopts) stat3 := dirStats(env.repo) if stat3.size-stat2.size > incrementalFirstWrite { @@ -651,7 +483,7 @@ func TestBackupTags(t *testing.T) { opts := BackupOptions{} - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) testRunCheck(t, env.gopts) newest, _ := testRunSnapshots(t, env.gopts) rtest.Assert(t, newest != nil, "expected a new backup, got nil") @@ -660,7 +492,7 @@ func TestBackupTags(t *testing.T) { parent := newest opts.Tags = []string{"NL"} - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) testRunCheck(t, env.gopts) newest, _ = testRunSnapshots(t, env.gopts) rtest.Assert(t, newest != nil, "expected a new backup, got nil") @@ -683,7 +515,7 @@ func TestTag(t *testing.T) { testRunInit(t, env.gopts) rtest.SetupTarTestFixture(t, env.testdata, datafile) - testRunBackup(t, []string{env.testdata}, BackupOptions{}, env.gopts) + testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) testRunCheck(t, env.gopts) newest, _ := testRunSnapshots(t, env.gopts) rtest.Assert(t, newest != nil, "expected a new backup, got nil") @@ -859,7 +691,7 @@ func TestRestoreFilter(t *testing.T) { opts := BackupOptions{} - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) testRunCheck(t, env.gopts) snapshotID := testRunList(t, "snapshots", env.gopts)[0] @@ -899,7 +731,7 @@ func TestRestore(t *testing.T) { opts := BackupOptions{} - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) testRunCheck(t, env.gopts) // Restore latest without any filters @@ -922,12 +754,22 @@ func TestRestoreLatest(t *testing.T) { opts := BackupOptions{} - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + // chdir manually here so we can get the current directory. This is not the + // same as the temp dir returned by ioutil.TempDir() on darwin. + back := fs.TestChdir(t, filepath.Dir(env.testdata)) + defer back() + + curdir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts) testRunCheck(t, env.gopts) os.Remove(p) rtest.OK(t, appendRandomData(p, 101)) - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, "", []string{filepath.Base(env.testdata)}, opts, env.gopts) testRunCheck(t, env.gopts) // Restore latest without any filters @@ -935,16 +777,18 @@ func TestRestoreLatest(t *testing.T) { rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101))) // Setup test files in different directories backed up in different snapshots - p1 := filepath.Join(env.testdata, "p1/testfile.c") + p1 := filepath.Join(curdir, filepath.FromSlash("p1/testfile.c")) + rtest.OK(t, os.MkdirAll(filepath.Dir(p1), 0755)) rtest.OK(t, appendRandomData(p1, 102)) - testRunBackup(t, []string{filepath.Dir(p1)}, opts, env.gopts) + testRunBackup(t, "", []string{"p1"}, opts, env.gopts) testRunCheck(t, env.gopts) - p2 := filepath.Join(env.testdata, "p2/testfile.c") + p2 := filepath.Join(curdir, filepath.FromSlash("p2/testfile.c")) + rtest.OK(t, os.MkdirAll(filepath.Dir(p2), 0755)) rtest.OK(t, appendRandomData(p2, 103)) - testRunBackup(t, []string{filepath.Dir(p2)}, opts, env.gopts) + testRunBackup(t, "", []string{"p2"}, opts, env.gopts) testRunCheck(t, env.gopts) p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c") @@ -1017,7 +861,7 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) { opts := BackupOptions{} - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) testRunCheck(t, env.gopts) snapshotID := testRunList(t, "snapshots", env.gopts)[0] @@ -1055,7 +899,7 @@ func TestFind(t *testing.T) { opts := BackupOptions{} - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) testRunCheck(t, env.gopts) results := testRunFind(t, false, env.gopts, "unexistingfile") @@ -1095,7 +939,7 @@ func TestFindJSON(t *testing.T) { opts := BackupOptions{} - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) testRunCheck(t, env.gopts) results := testRunFind(t, true, env.gopts, "unexistingfile") @@ -1198,13 +1042,13 @@ func TestPrune(t *testing.T) { rtest.SetupTarTestFixture(t, env.testdata, datafile) opts := BackupOptions{} - testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, opts, env.gopts) firstSnapshot := testRunList(t, "snapshots", env.gopts) rtest.Assert(t, len(firstSnapshot) == 1, "expected one snapshot, got %v", firstSnapshot) - testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) - testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "2")}, opts, env.gopts) + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) snapshotIDs := testRunList(t, "snapshots", env.gopts) rtest.Assert(t, len(snapshotIDs) == 3, @@ -1238,7 +1082,7 @@ func TestHardLink(t *testing.T) { opts := BackupOptions{} // first backup - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) snapshotIDs := testRunList(t, "snapshots", env.gopts) rtest.Assert(t, len(snapshotIDs) == 1, "expected one snapshot, got %v", snapshotIDs) @@ -1332,7 +1176,7 @@ func TestQuietBackup(t *testing.T) { opts := BackupOptions{} env.gopts.Quiet = false - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) snapshotIDs := testRunList(t, "snapshots", env.gopts) rtest.Assert(t, len(snapshotIDs) == 1, "expected one snapshot, got %v", snapshotIDs) @@ -1340,7 +1184,7 @@ func TestQuietBackup(t *testing.T) { testRunCheck(t, env.gopts) env.gopts.Quiet = true - testRunBackup(t, []string{env.testdata}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) snapshotIDs = testRunList(t, "snapshots", env.gopts) rtest.Assert(t, len(snapshotIDs) == 2, "expected two snapshots, got %v", snapshotIDs) diff --git a/cmd/restic/main.go b/cmd/restic/main.go index ca1067cda..01a902b1d 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -30,6 +30,21 @@ directories in an encrypted repository stored on different backends. DisableAutoGenTag: true, PersistentPreRunE: func(c *cobra.Command, args []string) error { + // set verbosity, default is one + globalOptions.verbosity = 1 + if globalOptions.Quiet && (globalOptions.Verbose > 1) { + return errors.Fatal("--quiet and --verbose cannot be specified at the same time") + } + + switch { + case globalOptions.Verbose >= 2: + globalOptions.verbosity = 3 + case globalOptions.Verbose > 0: + globalOptions.verbosity = 2 + case globalOptions.Quiet: + globalOptions.verbosity = 0 + } + // parse extended options opts, err := options.Parse(globalOptions.Options) if err != nil { diff --git a/doc/010_introduction.rst b/doc/010_introduction.rst index 6128c6662..5c213f6cd 100644 --- a/doc/010_introduction.rst +++ b/doc/010_introduction.rst @@ -14,3 +14,6 @@ Introduction ############ +Restic is a fast and secure backup program. In the following sections, we will +present typical workflows, starting with installing, preparing a new +repository, and making the first backup. diff --git a/doc/020_installation.rst b/doc/020_installation.rst index df10fcb84..6cab0c9a1 100644 --- a/doc/020_installation.rst +++ b/doc/020_installation.rst @@ -145,9 +145,17 @@ Admin rights. Docker Container **************** +We're maintaining a bare docker container with just a few files and the restic +binary, you can get it with `docker pull` like this: + +.. code-block:: console + + $ docker pull restic/restic + .. note:: - | A docker container is available as a contribution (Thank you!). - | You can find it at https://github.com/Lobaro/restic-backup-docker + | Another docker container which offers more configuration options is + | available as a contribution (Thank you!). You can find it at + | https://github.com/Lobaro/restic-backup-docker From Source *********** @@ -173,7 +181,7 @@ You can easily cross-compile restic for all supported platforms, just supply the target OS and platform via the command-line options like this (for Windows and FreeBSD respectively): -:: +.. code-block:: console $ go run build.go --goos windows --goarch amd64 diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 7d865d9a5..2133ec7c2 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -15,20 +15,24 @@ Preparing a new repository ########################## The place where your backups will be saved at is called a "repository". -This chapter explains how to create ("init") such a repository. +This chapter explains how to create ("init") such a repository. The repository +can be stored locally, or on some remote server or service. We'll first cover +using a local repository, the remaining sections of this chapter cover all the +other options. You can skip to the next chapter once you've read the relevant +section here. Local ***** -In order to create a repository at ``/tmp/backup``, run the following +In order to create a repository at ``/srv/restic-repo``, run the following command and enter the same password twice: .. code-block:: console - $ restic init --repo /tmp/backup + $ restic init --repo /srv/restic-repo enter password for new backend: enter password again: - created restic backend 085b3c76b9 at /tmp/backup + created restic backend 085b3c76b9 at /srv/restic-repo Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. @@ -55,10 +59,10 @@ simply be achieved by changing the URL scheme in the ``init`` command: .. code-block:: console - $ restic -r sftp:user@host:/tmp/backup init + $ restic -r sftp:user@host:/srv/restic-repo init enter password for new backend: enter password again: - created restic backend f1c6108821 at sftp:user@host:/tmp/backup + created restic backend f1c6108821 at sftp:user@host:/srv/restic-repo Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. @@ -87,7 +91,7 @@ specify the user name in this case): :: - $ restic -r sftp:foo:/tmp/backup init + $ restic -r sftp:foo:/srv/restic-repo init You can also add an entry with a special host name which does not exist, just for use with restic, and use the ``Hostname`` option to set the @@ -104,7 +108,7 @@ Then use it in the backend specification: :: - $ restic -r sftp:restic-backup-host:/tmp/backup init + $ restic -r sftp:restic-backup-host:/srv/restic-repo init Last, if you'd like to use an entirely different program to create the SFTP connection, you can specify the command to be run with the option @@ -509,5 +513,5 @@ On MSYS2, you can install ``winpty`` as follows: .. code-block:: console $ pacman -S winpty - $ winpty restic -r /tmp/backup init + $ winpty restic -r /srv/restic-repo init diff --git a/doc/040_backup.rst b/doc/040_backup.rst index a181879d5..badc836ba 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -21,43 +21,88 @@ again: .. code-block:: console - $ restic -r /tmp/backup backup ~/work + $ restic -r /srv/restic-repo --verbose backup ~/work + open repository enter password for repository: - scan [/home/user/work] - scanned 764 directories, 1816 files in 0:00 - [0:29] 100.00% 54.732 MiB/s 1.582 GiB / 1.582 GiB 2580 / 2580 items 0 errors ETA 0:00 - duration: 0:29, 54.47MiB/s + password is correct + lock repository + load index files + start scan + start backup + scan finished in 1.837s + processed 1.720 GiB in 0:12 + Files: 5307 new, 0 changed, 0 unmodified + Dirs: 1867 new, 0 changed, 0 unmodified + Added: 1.700 GiB snapshot 40dc1520 saved As you can see, restic created a backup of the directory and was pretty fast! The specific snapshot just created is identified by a sequence of hexadecimal characters, ``40dc1520`` in this case. +If you don't pass the ``--verbose`` option, restic will print less data (but +you'll still get a nice live status display). + If you run the command again, restic will create another snapshot of your data, but this time it's even faster. This is de-duplication at work! .. code-block:: console - $ restic -r /tmp/backup backup ~/work + $ restic -r /srv/restic-repo backup --verbose ~/work + open repository enter password for repository: - using parent snapshot 40dc1520aa6a07b7b3ae561786770a01951245d2367241e71e9485f18ae8228c - scan [/home/user/work] - scanned 764 directories, 1816 files in 0:00 - [0:00] 100.00% 0B/s 1.582 GiB / 1.582 GiB 2580 / 2580 items 0 errors ETA 0:00 - duration: 0:00, 6572.38MiB/s + password is correct + lock repository + load index files + using parent snapshot d875ae93 + start scan + start backup + scan finished in 1.881s + processed 1.720 GiB in 0:03 + Files: 0 new, 0 changed, 5307 unmodified + Dirs: 0 new, 0 changed, 1867 unmodified + Added: 0 B snapshot 79766175 saved -You can even backup individual files in the same repository. +You can even backup individual files in the same repository (not passing +``--verbose`` means less output): .. code-block:: console - $ restic -r /tmp/backup backup ~/work.txt - scan [/home/user/work.txt] - scanned 0 directories, 1 files in 0:00 - [0:00] 100.00% 0B/s 220B / 220B 1 / 1 items 0 errors ETA 0:00 - duration: 0:00, 0.03MiB/s - snapshot 31f7bd63 saved + $ restic -r /srv/restic-repo backup ~/work.txt + enter password for repository: + password is correct + snapshot 249d0210 saved + +If you're interested in what restic does, pass ``--verbose`` twice (or +``--verbose 2``) to display detailed information about each file and directory +restic encounters: + +.. code-block:: console + + $ echo 'more data foo bar' >> ~/work.txt + + $ restic -r /srv/restic-repo backup --verbose --verbose ~/work.txt + open repository + enter password for repository: + password is correct + lock repository + load index files + using parent snapshot f3f8d56b + start scan + start backup + scan finished in 2.115s + modified /home/user/work.txt, saved in 0.007s (22 B added) + modified /home/user/, saved in 0.008s (0 B added, 378 B metadata) + modified /home/, saved in 0.009s (0 B added, 375 B metadata) + processed 22 B in 0:02 + Files: 0 new, 1 changed, 0 unmodified + Dirs: 0 new, 2 changed, 0 unmodified + Data Blobs: 1 new + Tree Blobs: 3 new + Added: 1.116 KiB + snapshot 8dc503fc saved In fact several hosts may use the same repository to backup directories and files leading to a greater de-duplication. @@ -87,33 +132,53 @@ the exclude options are: - ``--exclude-if-present`` Specified one or more times to exclude a folders content if it contains a given file (optionally having a given header) -Basic example: + Let's say we have a file called ``excludes.txt`` with the following content: -.. code-block:: console - - $ cat exclude +:: # exclude go-files *.go # exclude foo/x/y/z/bar foo/x/bar foo/bar foo/**/bar - $ restic -r /tmp/backup backup ~/work --exclude="*.c" --exclude-file=exclude + +It can be used like this: + +.. code-block:: console + + $ restic -r /srv/restic-repo backup ~/work --exclude="*.c" --exclude-file=excludes.txt + +This instruct restic to exclude files matching the following criteria: + + * All files matching ``*.go`` (second line in ``excludes.txt``) + * All files and sub-directories named ``bar`` which reside somewhere below a directory called ``foo`` (fourth line in ``excludes.txt``) + * All files matching ``*.c`` (parameter ``--exclude``) Please see ``restic help backup`` for more specific information about each exclude option. Patterns use `filepath.Glob `__ internally, -see `filepath.Match `__ for syntax. -Patterns are tested against the full path of a file/dir to be saved, not only -against the relative path below the argument given to restic backup. -Patterns need to match on complete path components. (``foo`` matches -``/dir1/foo/dir2/file`` and ``/dir/foo`` but does not match ``/dir/foobar`` or -``barfoo``.) A trailing ``/`` is ignored. A leading ``/`` anchors the -pattern at the root directory. (``/bin`` matches ``/bin/bash`` but does not -match ``/usr/bin/restic``.) Regular wildcards cannot be used to match over the -directory separator ``/``. (``b*ash`` matches ``/bin/bash`` but does not match -``/bin/ash``.) However ``**`` matches arbitrary subdirectories. (``foo/**/bar`` -matches ``/dir1/foo/dir2/bar/file``, ``/foo/bar/file`` and ``/tmp/foo/bar``.) -Environment-variables in exclude-files are expanded with -`os.ExpandEnv `__. +see `filepath.Match `__ for +syntax. Patterns are tested against the full path of a file/dir to be saved, +even if restic is passed a relative path to save. Environment-variables in +exclude-files are expanded with `os.ExpandEnv `__. + +Patterns need to match on complete path components. For example, the pattern ``foo``: + + * matches ``/dir1/foo/dir2/file`` and ``/dir/foo`` + * does not match ``/dir/foobar`` or ``barfoo`` + +A trailing ``/`` is ignored, a leading ``/`` anchors the +pattern at the root directory. This means, ``/bin`` matches ``/bin/bash`` but +does not match ``/usr/bin/restic``. + +Regular wildcards cannot be used to match over the +directory separator ``/``. For example: ``b*ash`` matches ``/bin/bash`` but does not match +``/bin/ash``. + +For this, the special wildcard ``**`` can be used to match arbitrary +sub-directories: The pattern ``foo/**/bar`` matches: + + * ``/dir1/foo/dir2/bar/file`` + * ``/foo/bar/file`` + * ``/tmp/foo/bar`` By specifying the option ``--one-file-system`` you can instruct restic to only backup files from the file systems the initially specified files @@ -122,15 +187,15 @@ backup ``/sys`` or ``/dev`` on a Linux system: .. code-block:: console - $ restic -r /tmp/backup backup --one-file-system / + $ restic -r /srv/restic-repo backup --one-file-system / By using the ``--files-from`` option you can read the files you want to backup from a file. This is especially useful if a lot of files have to be backed up that are not in the same folder or are maybe pre-filtered by other software. -For example maybe you want to backup files that have a certain filename -in them: +For example maybe you want to backup files which have a name that matches a +certain pattern: .. code-block:: console @@ -140,14 +205,14 @@ You can then use restic to backup the filtered files: .. code-block:: console - $ restic -r /tmp/backup backup --files-from /tmp/files_to_backup + $ restic -r /srv/restic-repo backup --files-from /tmp/files_to_backup Incidentally you can also combine ``--files-from`` with the normal files args: .. code-block:: console - $ restic -r /tmp/backup backup --files-from /tmp/files_to_backup /tmp/some_additional_file + $ restic -r /srv/restic-repo backup --files-from /tmp/files_to_backup /tmp/some_additional_file Paths in the listing file can be absolute or relative. @@ -159,7 +224,7 @@ and displays a small statistic, just pass the command two snapshot IDs: .. code-block:: console - $ restic -r /tmp/backup diff 5845b002 2ab627a6 + $ restic -r /srv/restic-repo diff 5845b002 2ab627a6 password is correct comparing snapshot ea657ce5 to 2ab627a6: @@ -206,7 +271,7 @@ this mode of operation, just supply the option ``--stdin`` to the .. code-block:: console - $ mysqldump [...] | restic -r /tmp/backup backup --stdin + $ mysqldump [...] | restic -r /srv/restic-repo backup --stdin This creates a new snapshot of the output of ``mysqldump``. You can then use e.g. the fuse mounting option (see below) to mount the repository @@ -217,7 +282,7 @@ specified with ``--stdin-filename``, e.g. like this: .. code-block:: console - $ mysqldump [...] | restic -r /tmp/backup backup --stdin --stdin-filename production.sql + $ mysqldump [...] | restic -r /srv/restic-repo backup --stdin --stdin-filename production.sql Tags for backup *************** @@ -227,7 +292,7 @@ information. Just specify the tags for a snapshot one by one with ``--tag``: .. code-block:: console - $ restic -r /tmp/backup backup --tag projectX --tag foo --tag bar ~/work + $ restic -r /srv/restic-repo backup --tag projectX --tag foo --tag bar ~/work [...] The tags can later be used to keep (or forget) snapshots with the ``forget`` diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 5ee39ea26..773234ca0 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -22,7 +22,7 @@ Now, you can list all the snapshots stored in the repository: .. code-block:: console - $ restic -r /tmp/backup snapshots + $ restic -r /srv/restic-repo snapshots enter password for repository: ID Date Host Tags Directory ---------------------------------------------------------------------- @@ -36,7 +36,7 @@ You can filter the listing by directory path: .. code-block:: console - $ restic -r /tmp/backup snapshots --path="/srv" + $ restic -r /srv/restic-repo snapshots --path="/srv" enter password for repository: ID Date Host Tags Directory ---------------------------------------------------------------------- @@ -47,7 +47,7 @@ Or filter by host: .. code-block:: console - $ restic -r /tmp/backup snapshots --host luigi + $ restic -r /srv/restic-repo snapshots --host luigi enter password for repository: ID Date Host Tags Directory ---------------------------------------------------------------------- @@ -74,7 +74,7 @@ backup data is consistent and the integrity is unharmed: .. code-block:: console - $ restic -r /tmp/backup check + $ restic -r /srv/restic-repo check Load indexes ciphertext verification failed @@ -83,7 +83,7 @@ yield the same error: .. code-block:: console - $ restic -r /tmp/backup restore 79766175 --target /tmp/restore-work + $ restic -r /srv/restic-repo restore 79766175 --target /tmp/restore-work Load indexes ciphertext verification failed @@ -93,7 +93,7 @@ data files: .. code-block:: console - $ restic -r /tmp/backup check --read-data + $ restic -r /srv/restic-repo check --read-data load indexes check all packs check snapshots, trees and blobs @@ -107,9 +107,9 @@ commands check all repository data files over 5 separate invocations: .. code-block:: console - $ restic -r /tmp/backup check --read-data-subset=1/5 - $ restic -r /tmp/backup check --read-data-subset=2/5 - $ restic -r /tmp/backup check --read-data-subset=3/5 - $ restic -r /tmp/backup check --read-data-subset=4/5 - $ restic -r /tmp/backup check --read-data-subset=5/5 + $ restic -r /srv/restic-repo check --read-data-subset=1/5 + $ restic -r /srv/restic-repo check --read-data-subset=2/5 + $ restic -r /srv/restic-repo check --read-data-subset=3/5 + $ restic -r /srv/restic-repo check --read-data-subset=4/5 + $ restic -r /srv/restic-repo check --read-data-subset=5/5 diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 50c02c760..35e27e730 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -23,7 +23,7 @@ command to restore the contents of the latest snapshot to .. code-block:: console - $ restic -r /tmp/backup restore 79766175 --target /tmp/restore-work + $ restic -r /srv/restic-repo restore 79766175 --target /tmp/restore-work enter password for repository: restoring to /tmp/restore-work @@ -33,7 +33,7 @@ backup for a specific host, path or both. .. code-block:: console - $ restic -r /tmp/backup restore latest --target /tmp/restore-art --path "/home/art" --host luigi + $ restic -r /srv/restic-repo restore latest --target /tmp/restore-art --path "/home/art" --host luigi enter password for repository: restoring to /tmp/restore-art @@ -42,7 +42,7 @@ files in the snapshot. For example, to restore a single file: .. code-block:: console - $ restic -r /tmp/backup restore 79766175 --target /tmp/restore-work --include /work/foo + $ restic -r /srv/restic-repo restore 79766175 --target /tmp/restore-work --include /work/foo enter password for repository: restoring to /tmp/restore-work @@ -58,9 +58,9 @@ command to serve the repository with FUSE: .. code-block:: console $ mkdir /mnt/restic - $ restic -r /tmp/backup mount /mnt/restic + $ restic -r /srv/restic-repo mount /mnt/restic enter password for repository: - Now serving /tmp/backup at /mnt/restic + Now serving /srv/restic-repo at /mnt/restic Don't forget to umount after quitting! Mounting repositories via FUSE is not possible on OpenBSD, Solaris/illumos @@ -80,4 +80,4 @@ the data directly. This can be achieved by using the `dump` command, like this: .. code-block:: console - $ restic -r /tmp/backup dump latest production.sql | mysql + $ restic -r /srv/restic-repo dump latest production.sql | mysql diff --git a/doc/060_forget.rst b/doc/060_forget.rst index 2a3595952..ab5274758 100644 --- a/doc/060_forget.rst +++ b/doc/060_forget.rst @@ -35,7 +35,7 @@ repository like this: .. code-block:: console - $ restic -r /tmp/backup snapshots + $ restic -r /srv/restic-repo snapshots enter password for repository: ID Date Host Tags Directory ---------------------------------------------------------------------- @@ -50,7 +50,7 @@ command and specify the snapshot ID on the command line: .. code-block:: console - $ restic -r /tmp/backup forget bdbd3439 + $ restic -r /srv/restic-repo forget bdbd3439 enter password for repository: removed snapshot d3f01f63 @@ -58,7 +58,7 @@ Afterwards this snapshot is removed: .. code-block:: console - $ restic -r /tmp/backup snapshots + $ restic -r /srv/restic-repo snapshots enter password for repository: ID Date Host Tags Directory ---------------------------------------------------------------------- @@ -73,7 +73,7 @@ command must be run: .. code-block:: console - $ restic -r /tmp/backup prune + $ restic -r /srv/restic-repo prune enter password for repository: counting files in repo diff --git a/doc/070_encryption.rst b/doc/070_encryption.rst index c0889b852..a7b8716ac 100644 --- a/doc/070_encryption.rst +++ b/doc/070_encryption.rst @@ -16,8 +16,8 @@ Encryption *"The design might not be perfect, but it’s good. Encryption is a first-class feature, -the implementation looks sane and I guess the deduplication trade-off is worth it. So… I’m going to use restic for -my personal backups.*" `Filippo Valsorda`_ +the implementation looks sane and I guess the deduplication trade-off is worth +it. So… I’m going to use restic for my personal backups.*" `Filippo Valsorda`_ .. _Filippo Valsorda: https://blog.filippo.io/restic-cryptography/ @@ -31,19 +31,19 @@ per repository. In fact, you can use the ``list``, ``add``, ``remove``, and .. code-block:: console - $ restic -r /tmp/backup key list + $ restic -r /srv/restic-repo key list enter password for repository: ID User Host Created ---------------------------------------------------------------------- *eb78040b username kasimir 2015-08-12 13:29:57 - $ restic -r /tmp/backup key add + $ restic -r /srv/restic-repo key add enter password for repository: enter password for new key: enter password again: saved new key as - $ restic -r backup key list + $ restic -r /srv/restic-repo key list enter password for repository: ID User Host Created ---------------------------------------------------------------------- diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index c0a73d9b5..712a70244 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -26,10 +26,10 @@ times. The command ``snapshots`` may be used for this purpose: .. code-block:: console - $ restic -r /tmp/backup snapshots - Fatal: unable to open config file: Stat: stat /tmp/backup/config: no such file or directory + $ restic -r /srv/restic-repo snapshots + Fatal: unable to open config file: Stat: stat /srv/restic-repo/config: no such file or directory Is there a repository at the following location? - /tmp/backup + /srv/restic-repo If a repository does not exist, restic will return a non-zero exit code and print an error message. Note that restic will also return a non-zero diff --git a/doc/100_references.rst b/doc/100_references.rst index be22defa0..23ae2956e 100644 --- a/doc/100_references.rst +++ b/doc/100_references.rst @@ -625,14 +625,15 @@ are deleted, the particular snapshot vanished and all snapshots depending on data that has been added in the snapshot cannot be restored completely. Restic is not designed to detect this attack. +****** Local Cache -=========== +****** In order to speed up certain operations, restic manages a local cache of data. This document describes the data structures for the local cache with version 1. Versions --------- +======== The cache directory is selected according to the `XDG base dir specification `__. @@ -646,12 +647,21 @@ a lower version number is found the cache is recreated with the current version. If a higher version number is found the cache is ignored and left as is. -Snapshots and Indexes ---------------------- +Snapshots, Data and Indexes +=========================== Snapshot, Data and Index files are cached in the sub-directories ``snapshots``, ``data`` and ``index``, as read from the repository. +Expiry +====== + +Whenever a cache directory for a repo is used, that directory's modification +timestamp is updated to the current time. By looking at the modification +timestamps of the repo cache directories it is easy to decide which directories +are old and haven't been used in a long time. Those are probably stale and can +be removed. + ************ REST Backend @@ -798,24 +808,3 @@ Returns "200 OK" if the blob with the given name and type has been deleted from the repository, an HTTP error otherwise. -***** -Talks -***** - -The following talks will be or have been given about restic: - -- 2016-01-31: Lightning Talk at the Go Devroom at FOSDEM 2016, - Brussels, Belgium -- 2016-01-29: `restic - Backups mal - richtig `__: - Public lecture in German at `CCC Cologne - e.V. `__ in Cologne, Germany -- 2015-08-23: `A Solution to the Backup - Inconvenience `__: - Lecture at `FROSCON 2015 `__ in Bonn, Germany -- 2015-02-01: `Lightning Talk at FOSDEM - 2015 `__: A - short introduction (with slightly outdated command line) -- 2015-01-27: `Talk about restic at CCC - Aachen `__ - (in German) diff --git a/doc/110_talks.rst b/doc/110_talks.rst new file mode 100644 index 000000000..06952896f --- /dev/null +++ b/doc/110_talks.rst @@ -0,0 +1,34 @@ +.. + Normally, there are no heading levels assigned to certain characters as the structure is + determined from the succession of headings. However, this convention is used in Python’s + Style Guide for documenting which you may follow: + + # with overline, for parts + * for chapters + = for sections + - for subsections + ^ for subsubsections + " for paragraphs + + +##### +Talks +##### + +The following talks will be or have been given about restic: + +- 2016-01-31: Lightning Talk at the Go Devroom at FOSDEM 2016, + Brussels, Belgium +- 2016-01-29: `restic - Backups mal + richtig `__: + Public lecture in German at `CCC Cologne + e.V. `__ in Cologne, Germany +- 2015-08-23: `A Solution to the Backup + Inconvenience `__: + Lecture at `FROSCON 2015 `__ in Bonn, Germany +- 2015-02-01: `Lightning Talk at FOSDEM + 2015 `__: A + short introduction (with slightly outdated command line) +- 2015-01-27: `Talk about restic at CCC + Aachen `__ + (in German) diff --git a/doc/cache.rst b/doc/cache.rst deleted file mode 100644 index a39a1e76c..000000000 --- a/doc/cache.rst +++ /dev/null @@ -1,36 +0,0 @@ -Local Cache -=========== - -In order to speed up certain operations, restic manages a local cache of data. -This document describes the data structures for the local cache with version 1. - -Versions --------- - -The cache directory is selected according to the `XDG base dir specification -`__. -Each repository has its own cache sub-directory, consting of the repository ID -which is chosen at ``init``. All cache directories for different repos are -independent of each other. - -The cache dir for a repo contains a file named ``version``, which contains a -single ASCII integer line that stands for the current version of the cache. If -a lower version number is found the cache is recreated with the current -version. If a higher version number is found the cache is ignored and left as -is. - -Snapshots, Data and Indexes ---------------------------- - -Snapshot, Data and Index files are cached in the sub-directories ``snapshots``, -``data`` and ``index``, as read from the repository. - -Expiry ------- - -Whenever a cache directory for a repo is used, that directory's modification -timestamp is updated to the current time. By looking at the modification -timestamps of the repo cache directories it is easy to decide which directories -are old and haven't been used in a long time. Those are probably stale and can -be removed. - diff --git a/doc/conf.py b/doc/conf.py index 3f7c66158..3c0af927b 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -35,7 +35,7 @@ master_doc = 'index' # General information about the project. project = 'restic' -copyright = '2017, restic authors' +copyright = '2018, restic authors' author = 'fd0' # The version info for the project you're documenting, acts as replacement for diff --git a/doc/index.rst b/doc/index.rst index a5f82e284..68f86c398 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -16,5 +16,6 @@ Restic Documentation 080_examples 090_participating 100_references + 110_talks faq manual_rest diff --git a/doc/manual_rest.rst b/doc/manual_rest.rst index a53e34869..540a6d60a 100644 --- a/doc/manual_rest.rst +++ b/doc/manual_rest.rst @@ -19,6 +19,7 @@ Usage help is available: backup Create a new backup of files and/or directories cat Print internal objects to stdout check Check the repository for errors + diff Show differences between two snapshots dump Print a backed-up file to stdout find Find a file or directory forget Remove snapshots from the repository @@ -39,24 +40,24 @@ Usage help is available: version Print version information Flags: - --cacert stringSlice path to load root certificates from (default: use system certificates) - --cache-dir string set the cache directory - -h, --help help for restic - --json set output mode to JSON for commands that support it - --limit-download int limits downloads to a maximum rate in KiB/s. (default: unlimited) - --limit-upload int limits uploads to a maximum rate in KiB/s. (default: unlimited) - --no-cache do not use a local cache - --no-lock do not lock the repo, this allows some operations on read-only repos - -o, --option key=value set extended option (key=value, can be specified multiple times) - -p, --password-file string read the repository password from a file (default: $RESTIC_PASSWORD_FILE) - -q, --quiet do not output comprehensive progress report - -r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY) + --cacert stringSlice path to load root certificates from (default: use system certificates) + --cache-dir string set the cache directory + --cleanup-cache auto remove old cache directories + -h, --help help for restic + --json set output mode to JSON for commands that support it + --limit-download int limits downloads to a maximum rate in KiB/s. (default: unlimited) + --limit-upload int limits uploads to a maximum rate in KiB/s. (default: unlimited) + --no-cache do not use a local cache + --no-lock do not lock the repo, this allows some operations on read-only repos + -o, --option key=value set extended option (key=value, can be specified multiple times) + -p, --password-file string read the repository password from a file (default: $RESTIC_PASSWORD_FILE) + -q, --quiet do not output comprehensive progress report + -r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY) --tls-client-cert string path to a file containing PEM encoded TLS client certificate and private key - + -v, --verbose count[=-1] be verbose (can be specified multiple times) Use "restic [command] --help" for more information about a command. - Similar to programs such as ``git``, restic has a number of sub-commands. You can see these commands in the listing above. Each sub-command may have own command-line options, and there is a help @@ -87,21 +88,23 @@ command: --stdin-filename string file name to use when reading from stdin (default "stdin") --tag tag add a tag for the new snapshot (can be specified multiple times) --time string time of the backup (ex. '2012-11-01 22:08:41') (default: now) + --with-atime store the atime for all files and directories Global Flags: - --cacert stringSlice path to load root certificates from (default: use system certificates) - --cache-dir string set the cache directory - --json set output mode to JSON for commands that support it - --limit-download int limits downloads to a maximum rate in KiB/s. (default: unlimited) - --limit-upload int limits uploads to a maximum rate in KiB/s. (default: unlimited) - --no-cache do not use a local cache - --no-lock do not lock the repo, this allows some operations on read-only repos - -o, --option key=value set extended option (key=value, can be specified multiple times) - -p, --password-file string read the repository password from a file (default: $RESTIC_PASSWORD_FILE) - -q, --quiet do not output comprehensive progress report - -r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY) - --tls-client-cert string path to a TLS client certificate - --tls-client-key string path to a TLS client certificate key + --cacert stringSlice path to load root certificates from (default: use system certificates) + --cache-dir string set the cache directory + --cleanup-cache auto remove old cache directories + --json set output mode to JSON for commands that support it + --limit-download int limits downloads to a maximum rate in KiB/s. (default: unlimited) + --limit-upload int limits uploads to a maximum rate in KiB/s. (default: unlimited) + --no-cache do not use a local cache + --no-lock do not lock the repo, this allows some operations on read-only repos + -o, --option key=value set extended option (key=value, can be specified multiple times) + -p, --password-file string read the repository password from a file (default: $RESTIC_PASSWORD_FILE) + -q, --quiet do not output comprehensive progress report + -r, --repo string repository to backup to or restore from (default: $RESTIC_REPOSITORY) + --tls-client-cert string path to a file containing PEM encoded TLS client certificate and private key + -v, --verbose n[=-1] be verbose (specify --verbose multiple times or level n) Subcommand that support showing progress information such as ``backup``, ``check`` and ``prune`` will do so unless the quiet flag ``-q`` or @@ -128,7 +131,7 @@ command does that: .. code-block:: console - $ restic -r /tmp/backup tag --set NL --set CH 590c8fc8 + $ restic -r /srv/restic-repo tag --set NL --set CH 590c8fc8 create exclusive lock for repository modified tags on 1 snapshots @@ -141,19 +144,19 @@ So we can add and remove tags incrementally like this: .. code-block:: console - $ restic -r /tmp/backup tag --tag NL --remove CH + $ restic -r /srv/restic-repo tag --tag NL --remove CH create exclusive lock for repository modified tags on 1 snapshots - $ restic -r /tmp/backup tag --tag NL --add UK + $ restic -r /srv/restic-repo tag --tag NL --add UK create exclusive lock for repository modified tags on 1 snapshots - $ restic -r /tmp/backup tag --tag NL --remove NL + $ restic -r /srv/restic-repo tag --tag NL --remove NL create exclusive lock for repository modified tags on 1 snapshots - $ restic -r /tmp/backup tag --tag NL --add SOMETHING + $ restic -r /srv/restic-repo tag --tag NL --add SOMETHING no snapshots were modified Under the hood @@ -170,7 +173,7 @@ locks with the following command: .. code-block:: console - $ restic -r /tmp/backup list snapshots + $ restic -r /srv/restic-repo list snapshots d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c The ``find`` command searches for a given @@ -191,7 +194,7 @@ objects or their raw content. .. code-block:: console - $ restic -r /tmp/backup cat snapshot d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c + $ restic -r /srv/restic-repo cat snapshot d369ccc7d126594950bf74f0a348d5d98d9e99f3215082eb69bf02dc9b3e464c enter password for repository: { "time": "2015-08-12T12:52:44.091448856+02:00", @@ -242,7 +245,7 @@ lists all snapshots as JSON and uses ``jq`` to pretty-print the result: .. code-block:: console - $ restic -r /tmp/backup snapshots --json | jq . + $ restic -r /srv/restic-repo snapshots --json | jq . [ { "time": "2017-03-11T09:57:43.26630619+01:00", @@ -283,7 +286,7 @@ instead of the default, set the environment variable like this: .. code-block:: console $ export TMPDIR=/var/tmp/restic-tmp - $ restic -r /tmp/backup backup ~/work + $ restic -r /srv/restic-repo backup ~/work diff --git a/internal/archiver/archive_reader.go b/internal/archiver/archive_reader.go deleted file mode 100644 index fa00a7406..000000000 --- a/internal/archiver/archive_reader.go +++ /dev/null @@ -1,117 +0,0 @@ -package archiver - -import ( - "context" - "io" - "time" - - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/restic" - - "github.com/restic/restic/internal/errors" - - "github.com/restic/chunker" -) - -// Reader allows saving a stream of data to the repository. -type Reader struct { - restic.Repository - - Tags []string - Hostname string - TimeStamp time.Time -} - -// Archive reads data from the reader and saves it to the repo. -func (r *Reader) Archive(ctx context.Context, name string, rd io.Reader, p *restic.Progress) (*restic.Snapshot, restic.ID, error) { - if name == "" { - return nil, restic.ID{}, errors.New("no filename given") - } - debug.Log("start archiving %s", name) - sn, err := restic.NewSnapshot([]string{name}, r.Tags, r.Hostname, r.TimeStamp) - if err != nil { - return nil, restic.ID{}, err - } - - p.Start() - defer p.Done() - - repo := r.Repository - chnker := chunker.New(rd, repo.Config().ChunkerPolynomial) - - ids := restic.IDs{} - var fileSize uint64 - - for { - chunk, err := chnker.Next(getBuf()) - if errors.Cause(err) == io.EOF { - break - } - - if err != nil { - return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()") - } - - id := restic.Hash(chunk.Data) - - if !repo.Index().Has(id, restic.DataBlob) { - _, err := repo.SaveBlob(ctx, restic.DataBlob, chunk.Data, id) - if err != nil { - return nil, restic.ID{}, err - } - debug.Log("saved blob %v (%d bytes)\n", id, chunk.Length) - } else { - debug.Log("blob %v already saved in the repo\n", id) - } - - freeBuf(chunk.Data) - - ids = append(ids, id) - - p.Report(restic.Stat{Bytes: uint64(chunk.Length)}) - fileSize += uint64(chunk.Length) - } - - tree := &restic.Tree{ - Nodes: []*restic.Node{ - { - Name: name, - AccessTime: time.Now(), - ModTime: time.Now(), - Type: "file", - Mode: 0644, - Size: fileSize, - UID: sn.UID, - GID: sn.GID, - User: sn.Username, - Content: ids, - }, - }, - } - - treeID, err := repo.SaveTree(ctx, tree) - if err != nil { - return nil, restic.ID{}, err - } - sn.Tree = &treeID - debug.Log("tree saved as %v", treeID) - - id, err := repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn) - if err != nil { - return nil, restic.ID{}, err - } - - debug.Log("snapshot saved as %v", id) - - err = repo.Flush(ctx) - if err != nil { - return nil, restic.ID{}, err - } - - err = repo.SaveIndex(ctx) - if err != nil { - return nil, restic.ID{}, err - } - - return sn, id, nil -} diff --git a/internal/archiver/archive_reader_test.go b/internal/archiver/archive_reader_test.go deleted file mode 100644 index d0fdb06cf..000000000 --- a/internal/archiver/archive_reader_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package archiver - -import ( - "bytes" - "context" - "errors" - "io" - "math/rand" - "testing" - - "github.com/restic/restic/internal/checker" - "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" -) - -func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) int { - n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf) - if err != nil { - t.Fatalf("LoadBlob(%v) returned error %v", id, err) - } - - return n -} - -func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name string, rd io.Reader) { - tree, err := repo.LoadTree(context.TODO(), treeID) - if err != nil { - t.Fatalf("LoadTree() returned error %v", err) - } - - if len(tree.Nodes) != 1 { - t.Fatalf("wrong number of nodes for tree, want %v, got %v", 1, len(tree.Nodes)) - } - - node := tree.Nodes[0] - if node.Name != "fakefile" { - t.Fatalf("wrong filename, want %v, got %v", "fakefile", node.Name) - } - - if len(node.Content) == 0 { - t.Fatalf("node.Content has length 0") - } - - // check blobs - for i, id := range node.Content { - size, found := repo.LookupBlobSize(id, restic.DataBlob) - if !found { - t.Fatal("Failed to find blob", id.Str()) - } - - buf := restic.NewBlobBuffer(int(size)) - n := loadBlob(t, repo, id, buf) - if n != len(buf) { - t.Errorf("wrong number of bytes read, want %d, got %d", len(buf), n) - } - - buf2 := make([]byte, int(size)) - _, err := io.ReadFull(rd, buf2) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(buf, buf2) { - t.Fatalf("blob %d (%v) is wrong", i, id.Str()) - } - } -} - -// fakeFile returns a reader which yields deterministic pseudo-random data. -func fakeFile(t testing.TB, seed, size int64) io.Reader { - return io.LimitReader(restic.NewRandReader(rand.New(rand.NewSource(seed))), size) -} - -func TestArchiveReader(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - seed := rand.Int63() - size := int64(rand.Intn(50*1024*1024) + 50*1024*1024) - t.Logf("seed is 0x%016x, size is %v", seed, size) - - f := fakeFile(t, seed, size) - - r := &Reader{ - Repository: repo, - Hostname: "localhost", - Tags: []string{"test"}, - } - - sn, id, err := r.Archive(context.TODO(), "fakefile", f, nil) - if err != nil { - t.Fatalf("ArchiveReader() returned error %v", err) - } - - if id.IsNull() { - t.Fatalf("ArchiveReader() returned null ID") - } - - t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str()) - - checkSavedFile(t, repo, *sn.Tree, "fakefile", fakeFile(t, seed, size)) - - checker.TestCheckRepo(t, repo) -} - -func TestArchiveReaderNull(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - r := &Reader{ - Repository: repo, - Hostname: "localhost", - Tags: []string{"test"}, - } - - sn, id, err := r.Archive(context.TODO(), "fakefile", bytes.NewReader(nil), nil) - if err != nil { - t.Fatalf("ArchiveReader() returned error %v", err) - } - - if id.IsNull() { - t.Fatalf("ArchiveReader() returned null ID") - } - - t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str()) - - checker.TestCheckRepo(t, repo) -} - -type errReader string - -func (e errReader) Read([]byte) (int, error) { - return 0, errors.New(string(e)) -} - -func countSnapshots(t testing.TB, repo restic.Repository) int { - snapshots := 0 - err := repo.List(context.TODO(), restic.SnapshotFile, func(id restic.ID, size int64) error { - snapshots++ - return nil - }) - if err != nil { - t.Fatal(err) - } - return snapshots -} - -func TestArchiveReaderError(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - r := &Reader{ - Repository: repo, - Hostname: "localhost", - Tags: []string{"test"}, - } - - sn, id, err := r.Archive(context.TODO(), "fakefile", errReader("error returned by reading stdin"), nil) - if err == nil { - t.Errorf("expected error not returned") - } - - if sn != nil { - t.Errorf("Snapshot should be nil, but isn't") - } - - if !id.IsNull() { - t.Errorf("id should be null, but %v returned", id.Str()) - } - - n := countSnapshots(t, repo) - if n > 0 { - t.Errorf("expected zero snapshots, but got %d", n) - } - - checker.TestCheckRepo(t, repo) -} - -func BenchmarkArchiveReader(t *testing.B) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - const size = 50 * 1024 * 1024 - - buf := make([]byte, size) - _, err := io.ReadFull(fakeFile(t, 23, size), buf) - if err != nil { - t.Fatal(err) - } - - r := &Reader{ - Repository: repo, - Hostname: "localhost", - Tags: []string{"test"}, - } - - t.SetBytes(size) - t.ResetTimer() - - for i := 0; i < t.N; i++ { - _, _, err := r.Archive(context.TODO(), "fakefile", bytes.NewReader(buf), nil) - if err != nil { - t.Fatal(err) - } - } -} diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 9f2e029fb..143c81e34 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -3,868 +3,786 @@ package archiver import ( "context" "encoding/json" - "fmt" - "io" "os" - "path/filepath" + "path" + "runtime" "sort" - "sync" + "syscall" "time" - "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/walk" - "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/fs" - "github.com/restic/restic/internal/pipe" - - "github.com/restic/chunker" + "github.com/restic/restic/internal/restic" ) -const ( - maxConcurrentBlobs = 32 - maxConcurrency = 10 -) +// SelectFunc returns true for all items that should be included (files and +// dirs). If false is returned, files are ignored and dirs are not even walked. +type SelectFunc func(item string, fi os.FileInfo) bool -var archiverPrintWarnings = func(path string, fi os.FileInfo, err error) { - fmt.Fprintf(os.Stderr, "warning for %v: %v", path, err) +// ErrorFunc is called when an error during archiving occurs. When nil is +// returned, the archiver continues, otherwise it aborts and passes the error +// up the call stack. +type ErrorFunc func(file string, fi os.FileInfo, err error) error + +// ItemStats collects some statistics about a particular file or directory. +type ItemStats struct { + DataBlobs int // number of new data blobs added for this item + DataSize uint64 // sum of the sizes of all new data blobs + TreeBlobs int // number of new tree blobs added for this item + TreeSize uint64 // sum of the sizes of all new tree blobs } -var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true } -// Archiver is used to backup a set of directories. +// Add adds other to the current ItemStats. +func (s *ItemStats) Add(other ItemStats) { + s.DataBlobs += other.DataBlobs + s.DataSize += other.DataSize + s.TreeBlobs += other.TreeBlobs + s.TreeSize += other.TreeSize +} + +// Archiver saves a directory structure to the repo. type Archiver struct { - repo restic.Repository - knownBlobs struct { - restic.IDSet - sync.Mutex - } + Repo restic.Repository + Select SelectFunc + FS fs.FS + Options Options - blobToken chan struct{} + blobSaver *BlobSaver + fileSaver *FileSaver - Warn func(dir string, fi os.FileInfo, err error) - SelectFilter pipe.SelectFunc - Excludes []string + // Error is called for all errors that occur during backup. + Error ErrorFunc - WithAccessTime bool + // CompleteItem is called for all files and dirs once they have been + // processed successfully. The parameter item contains the path as it will + // be in the snapshot after saving. s contains some statistics about this + // particular file/dir. + // + // CompleteItem may be called asynchronously from several different + // goroutines! + CompleteItem func(item string, previous, current *restic.Node, s ItemStats, d time.Duration) + + // StartFile is called when a file is being processed by a worker. + StartFile func(filename string) + + // CompleteBlob is called for all saved blobs for files. + CompleteBlob func(filename string, bytes uint64) + + // WithAtime configures if the access time for files and directories should + // be saved. Enabling it may result in much metadata, so it's off by + // default. + WithAtime bool } -// New returns a new archiver. -func New(repo restic.Repository) *Archiver { +// Options is used to configure the archiver. +type Options struct { + // FileReadConcurrency sets how many files are read in concurrently. If + // it's set to zero, at most two files are read in concurrently (which + // turned out to be a good default for most situations). + FileReadConcurrency uint + + // SaveBlobConcurrency sets how many blobs are hashed and saved + // concurrently. If it's set to zero, the default is the number of CPUs + // available in the system. + SaveBlobConcurrency uint +} + +// ApplyDefaults returns a copy of o with the default options set for all unset +// fields. +func (o Options) ApplyDefaults() Options { + if o.FileReadConcurrency == 0 { + // two is a sweet spot for almost all situations. We've done some + // experiments documented here: + // https://github.com/borgbackup/borg/issues/3500 + o.FileReadConcurrency = 2 + } + + if o.SaveBlobConcurrency == 0 { + o.SaveBlobConcurrency = uint(runtime.NumCPU()) + } + + return o +} + +// New initializes a new archiver. +func New(repo restic.Repository, fs fs.FS, opts Options) *Archiver { arch := &Archiver{ - repo: repo, - blobToken: make(chan struct{}, maxConcurrentBlobs), - knownBlobs: struct { - restic.IDSet - sync.Mutex - }{ - IDSet: restic.NewIDSet(), - }, - } + Repo: repo, + Select: func(string, os.FileInfo) bool { return true }, + FS: fs, + Options: opts.ApplyDefaults(), - for i := 0; i < maxConcurrentBlobs; i++ { - arch.blobToken <- struct{}{} + CompleteItem: func(string, *restic.Node, *restic.Node, ItemStats, time.Duration) {}, + StartFile: func(string) {}, + CompleteBlob: func(string, uint64) {}, } - arch.Warn = archiverPrintWarnings - arch.SelectFilter = archiverAllowAllFiles - return arch } -// isKnownBlob returns true iff the blob is not yet in the list of known blobs. -// When the blob is not known, false is returned and the blob is added to the -// list. This means that the caller false is returned to is responsible to save -// the blob to the backend. -func (arch *Archiver) isKnownBlob(id restic.ID, t restic.BlobType) bool { - arch.knownBlobs.Lock() - defer arch.knownBlobs.Unlock() +// Valid returns an error if anything is missing. +func (arch *Archiver) Valid() error { + if arch.blobSaver == nil { + return errors.New("blobSaver is nil") + } - if arch.knownBlobs.Has(id) { + if arch.fileSaver == nil { + return errors.New("fileSaver is nil") + } + + if arch.Repo == nil { + return errors.New("repo is not set") + } + + if arch.Select == nil { + return errors.New("Select is not set") + } + + if arch.FS == nil { + return errors.New("FS is not set") + } + + return nil +} + +// error calls arch.Error if it is set. +func (arch *Archiver) error(item string, fi os.FileInfo, err error) error { + if arch.Error == nil || err == nil { + return err + } + + errf := arch.Error(item, fi, err) + if err != errf { + debug.Log("item %v: error was filtered by handler, before: %q, after: %v", item, err, errf) + } + return errf +} + +// saveTree stores a tree in the repo. It checks the index and the known blobs +// before saving anything. +func (arch *Archiver) saveTree(ctx context.Context, t *restic.Tree) (restic.ID, ItemStats, error) { + var s ItemStats + buf, err := json.Marshal(t) + if err != nil { + return restic.ID{}, s, errors.Wrap(err, "MarshalJSON") + } + + // append a newline so that the data is always consistent (json.Encoder + // adds a newline after each object) + buf = append(buf, '\n') + + b := Buffer{Data: buf} + res := arch.blobSaver.Save(ctx, restic.TreeBlob, b) + if res.Err() != nil { + return restic.ID{}, s, res.Err() + } + + if !res.Known() { + s.TreeBlobs++ + s.TreeSize += uint64(len(buf)) + } + return res.ID(), s, nil +} + +// nodeFromFileInfo returns the restic node from a os.FileInfo. +func (arch *Archiver) nodeFromFileInfo(filename string, fi os.FileInfo) (*restic.Node, error) { + node, err := restic.NodeFromFileInfo(filename, fi) + if !arch.WithAtime { + node.AccessTime = node.ModTime + } + return node, errors.Wrap(err, "NodeFromFileInfo") +} + +// loadSubtree tries to load the subtree referenced by node. In case of an error, nil is returned. +func (arch *Archiver) loadSubtree(ctx context.Context, node *restic.Node) *restic.Tree { + if node == nil || node.Type != "dir" || node.Subtree == nil { + return nil + } + + tree, err := arch.Repo.LoadTree(ctx, *node.Subtree) + if err != nil { + debug.Log("unable to load tree %v: %v", node.Subtree.Str(), err) + // TODO: handle error + return nil + } + + return tree +} + +// SaveDir stores a directory in the repo and returns the node. snPath is the +// path within the current snapshot. +func (arch *Archiver) SaveDir(ctx context.Context, snPath string, fi os.FileInfo, dir string, previous *restic.Tree) (*restic.Node, ItemStats, error) { + debug.Log("%v %v", snPath, dir) + + var s ItemStats + + treeNode, err := arch.nodeFromFileInfo(dir, fi) + if err != nil { + return nil, s, err + } + + names, err := readdirnames(arch.FS, dir) + if err != nil { + return nil, s, err + } + + var futures []FutureNode + + tree := restic.NewTree() + + for _, name := range names { + pathname := arch.FS.Join(dir, name) + oldNode := previous.Find(name) + snItem := join(snPath, name) + fn, excluded, err := arch.Save(ctx, snItem, pathname, oldNode) + + // return error early if possible + if err != nil { + err = arch.error(pathname, fi, err) + if err == nil { + // ignore error + continue + } + + return nil, s, err + } + + if excluded { + continue + } + + futures = append(futures, fn) + } + + for _, fn := range futures { + fn.wait() + + // return the error if it wasn't ignored + if fn.err != nil { + fn.err = arch.error(fn.target, fn.fi, fn.err) + if fn.err == nil { + // ignore error + continue + } + + return nil, s, fn.err + } + + // when the error is ignored, the node could not be saved, so ignore it + if fn.node == nil { + debug.Log("%v excluded: %v", fn.snPath, fn.target) + continue + } + + err := tree.Insert(fn.node) + if err != nil { + return nil, s, err + } + } + + id, treeStats, err := arch.saveTree(ctx, tree) + if err != nil { + return nil, ItemStats{}, err + } + + s.Add(treeStats) + + treeNode.Subtree = &id + return treeNode, s, nil +} + +// FutureNode holds a reference to a node or a FutureFile. +type FutureNode struct { + snPath, target string + + // kept to call the error callback function + absTarget string + fi os.FileInfo + + node *restic.Node + stats ItemStats + err error + + isFile bool + file FutureFile +} + +func (fn *FutureNode) wait() { + if fn.isFile { + // wait for and collect the data for the file + fn.node = fn.file.Node() + fn.err = fn.file.Err() + fn.stats = fn.file.Stats() + } +} + +// Save saves a target (file or directory) to the repo. If the item is +// excluded,this function returns a nil node and error. +// +// Errors and completion is needs to be handled by the caller. +// +// snPath is the path within the current snapshot. +func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) { + fn = FutureNode{ + snPath: snPath, + target: target, + } + + debug.Log("%v target %q, previous %v", snPath, target, previous) + abstarget, err := arch.FS.Abs(target) + if err != nil { + return FutureNode{}, false, err + } + + fn.absTarget = abstarget + + var fi os.FileInfo + var errFI error + + file, errOpen := arch.FS.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + if errOpen == nil { + fi, errFI = file.Stat() + } + + if !arch.Select(abstarget, fi) { + debug.Log("%v is excluded", target) + if file != nil { + _ = file.Close() + } + return FutureNode{}, true, nil + } + + if errOpen != nil { + debug.Log(" open error %#v", errOpen) + // test if the open failed because target is a symbolic link or a socket + if e, ok := errOpen.(*os.PathError); ok && (e.Err == syscall.ELOOP || e.Err == syscall.ENXIO) { + // in this case, redo the stat and carry on + fi, errFI = arch.FS.Lstat(target) + } else { + return FutureNode{}, false, errors.Wrap(errOpen, "OpenFile") + } + } + + if errFI != nil { + _ = file.Close() + return FutureNode{}, false, errors.Wrap(errFI, "Stat") + } + + switch { + case fs.IsRegularFile(fi): + debug.Log(" %v regular file", target) + start := time.Now() + + // use previous node if the file hasn't changed + if previous != nil && !fileChanged(fi, previous) { + debug.Log("%v hasn't changed, returning old node", target) + arch.CompleteItem(snPath, previous, previous, ItemStats{}, time.Since(start)) + arch.CompleteBlob(snPath, previous.Size) + fn.node = previous + _ = file.Close() + return fn, false, nil + } + + fn.isFile = true + // Save will close the file, we don't need to do that + fn.file = arch.fileSaver.Save(ctx, snPath, file, fi, func() { + arch.StartFile(snPath) + }, func(node *restic.Node, stats ItemStats) { + arch.CompleteItem(snPath, previous, node, stats, time.Since(start)) + }) + + file = nil + + case fi.IsDir(): + debug.Log(" %v dir", target) + + snItem := snPath + "/" + start := time.Now() + oldSubtree := arch.loadSubtree(ctx, previous) + fn.node, fn.stats, err = arch.SaveDir(ctx, snPath, fi, target, oldSubtree) + if err == nil { + arch.CompleteItem(snItem, previous, fn.node, fn.stats, time.Since(start)) + } else { + _ = file.Close() + return FutureNode{}, false, err + } + + case fi.Mode()&os.ModeSocket > 0: + debug.Log(" %v is a socket, ignoring", target) + return FutureNode{}, true, nil + + default: + debug.Log(" %v other", target) + + fn.node, err = arch.nodeFromFileInfo(target, fi) + if err != nil { + _ = file.Close() + return FutureNode{}, false, err + } + } + + if file != nil { + err = file.Close() + if err != nil { + return fn, false, errors.Wrap(err, "Close") + } + } + + return fn, false, nil +} + +// fileChanged returns true if the file's content has changed since the node +// was created. +func fileChanged(fi os.FileInfo, node *restic.Node) bool { + if node == nil { return true } - arch.knownBlobs.Insert(id) + // check type change + if node.Type != "file" { + return true + } - if arch.repo.Index().Has(id, t) { + // check modification timestamp + if !fi.ModTime().Equal(node.ModTime) { + return true + } + + // check size + extFI := fs.ExtendedStat(fi) + if uint64(fi.Size()) != node.Size || uint64(extFI.Size) != node.Size { + return true + } + + // check inode + if node.Inode != extFI.Inode { return true } return false } -// Save stores a blob read from rd in the repository. -func (arch *Archiver) Save(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) error { - debug.Log("Save(%v, %v)\n", t, id) +// join returns all elements separated with a forward slash. +func join(elem ...string) string { + return path.Join(elem...) +} - if arch.isKnownBlob(id, restic.DataBlob) { - debug.Log("blob %v is known\n", id) +// statDir returns the file info for the directory. Symbolic links are +// resolved. If the target directory is not a directory, an error is returned. +func (arch *Archiver) statDir(dir string) (os.FileInfo, error) { + fi, err := arch.FS.Stat(dir) + if err != nil { + return nil, errors.Wrap(err, "Lstat") + } + + tpe := fi.Mode() & (os.ModeType | os.ModeCharDevice) + if tpe != os.ModeDir { + return fi, errors.Errorf("path is not a directory: %v", dir) + } + + return fi, nil +} + +// SaveTree stores a Tree in the repo, returned is the tree. snPath is the path +// within the current snapshot. +func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree) (*restic.Tree, error) { + debug.Log("%v (%v nodes), parent %v", snPath, len(atree.Nodes), previous) + + tree := restic.NewTree() + + futureNodes := make(map[string]FutureNode) + + for name, subatree := range atree.Nodes { + + // this is a leaf node + if subatree.Path != "" { + fn, excluded, err := arch.Save(ctx, join(snPath, name), subatree.Path, previous.Find(name)) + + if err != nil { + err = arch.error(subatree.Path, fn.fi, err) + if err == nil { + // ignore error + continue + } + return nil, err + } + + if err != nil { + return nil, err + } + + if !excluded { + futureNodes[name] = fn + } + continue + } + + snItem := join(snPath, name) + "/" + start := time.Now() + + oldNode := previous.Find(name) + oldSubtree := arch.loadSubtree(ctx, oldNode) + + // not a leaf node, archive subtree + subtree, err := arch.SaveTree(ctx, join(snPath, name), &subatree, oldSubtree) + if err != nil { + return nil, err + } + + id, nodeStats, err := arch.saveTree(ctx, subtree) + if err != nil { + return nil, err + } + + if subatree.FileInfoPath == "" { + return nil, errors.Errorf("FileInfoPath for %v/%v is empty", snPath, name) + } + + debug.Log("%v, saved subtree %v as %v", snPath, subtree, id.Str()) + + fi, err := arch.statDir(subatree.FileInfoPath) + if err != nil { + return nil, err + } + + debug.Log("%v, dir node data loaded from %v", snPath, subatree.FileInfoPath) + + node, err := arch.nodeFromFileInfo(subatree.FileInfoPath, fi) + if err != nil { + return nil, err + } + + node.Name = name + node.Subtree = &id + + err = tree.Insert(node) + if err != nil { + return nil, err + } + + arch.CompleteItem(snItem, oldNode, node, nodeStats, time.Since(start)) + } + + // process all futures + for name, fn := range futureNodes { + fn.wait() + + // return the error, or ignore it + if fn.err != nil { + fn.err = arch.error(fn.target, fn.fi, fn.err) + if fn.err == nil { + // ignore error + continue + } + + return nil, fn.err + } + + // when the error is ignored, the node could not be saved, so ignore it + if fn.node == nil { + debug.Log("%v excluded: %v", fn.snPath, fn.target) + continue + } + + fn.node.Name = name + + err := tree.Insert(fn.node) + if err != nil { + return nil, err + } + } + + return tree, nil +} + +type fileInfoSlice []os.FileInfo + +func (fi fileInfoSlice) Len() int { + return len(fi) +} + +func (fi fileInfoSlice) Swap(i, j int) { + fi[i], fi[j] = fi[j], fi[i] +} + +func (fi fileInfoSlice) Less(i, j int) bool { + return fi[i].Name() < fi[j].Name() +} + +func readdir(filesystem fs.FS, dir string) ([]os.FileInfo, error) { + f, err := filesystem.OpenFile(dir, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + if err != nil { + return nil, errors.Wrap(err, "Open") + } + + entries, err := f.Readdir(-1) + if err != nil { + _ = f.Close() + return nil, errors.Wrap(err, "Readdir") + } + + err = f.Close() + if err != nil { + return nil, err + } + + sort.Sort(fileInfoSlice(entries)) + return entries, nil +} + +func readdirnames(filesystem fs.FS, dir string) ([]string, error) { + f, err := filesystem.OpenFile(dir, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + if err != nil { + return nil, errors.Wrap(err, "Open") + } + + entries, err := f.Readdirnames(-1) + if err != nil { + _ = f.Close() + return nil, errors.Wrap(err, "Readdirnames") + } + + err = f.Close() + if err != nil { + return nil, err + } + + sort.Sort(sort.StringSlice(entries)) + return entries, nil +} + +// resolveRelativeTargets replaces targets that only contain relative +// directories ("." or "../../") with the contents of the directory. Each +// element of target is processed with fs.Clean(). +func resolveRelativeTargets(fs fs.FS, targets []string) ([]string, error) { + debug.Log("targets before resolving: %v", targets) + result := make([]string, 0, len(targets)) + for _, target := range targets { + target = fs.Clean(target) + pc, _ := pathComponents(fs, target, false) + if len(pc) > 0 { + result = append(result, target) + continue + } + + debug.Log("replacing %q with readdir(%q)", target, target) + entries, err := readdirnames(fs, target) + if err != nil { + return nil, err + } + + for _, name := range entries { + result = append(result, fs.Join(target, name)) + } + } + + debug.Log("targets after resolving: %v", result) + return result, nil +} + +// SnapshotOptions collect attributes for a new snapshot. +type SnapshotOptions struct { + Tags []string + Hostname string + Excludes []string + Time time.Time + ParentSnapshot restic.ID +} + +// loadParentTree loads a tree referenced by snapshot id. If id is null, nil is returned. +func (arch *Archiver) loadParentTree(ctx context.Context, snapshotID restic.ID) *restic.Tree { + if snapshotID.IsNull() { return nil } - _, err := arch.repo.SaveBlob(ctx, t, data, id) + debug.Log("load parent snapshot %v", snapshotID) + sn, err := restic.LoadSnapshot(ctx, arch.Repo, snapshotID) if err != nil { - debug.Log("Save(%v, %v): error %v\n", t, id, err) - return err + debug.Log("unable to load snapshot %v: %v", snapshotID, err) + return nil } - debug.Log("Save(%v, %v): new blob\n", t, id) - return nil -} + if sn.Tree == nil { + debug.Log("snapshot %v has empty tree %v", snapshotID) + return nil + } -// SaveTreeJSON stores a tree in the repository. -func (arch *Archiver) SaveTreeJSON(ctx context.Context, tree *restic.Tree) (restic.ID, error) { - data, err := json.Marshal(tree) + debug.Log("load parent tree %v", *sn.Tree) + tree, err := arch.Repo.LoadTree(ctx, *sn.Tree) if err != nil { - return restic.ID{}, errors.Wrap(err, "Marshal") + debug.Log("unable to load tree %v: %v", *sn.Tree, err) + return nil } - data = append(data, '\n') - - // check if tree has been saved before - id := restic.Hash(data) - if arch.isKnownBlob(id, restic.TreeBlob) { - return id, nil - } - - return arch.repo.SaveBlob(ctx, restic.TreeBlob, data, id) + return tree } -func (arch *Archiver) reloadFileIfChanged(node *restic.Node, file fs.File) (*restic.Node, error) { - if !arch.WithAccessTime { - node.AccessTime = node.ModTime - } +// runWorkers starts the worker pools, which are stopped when the context is cancelled. +func (arch *Archiver) runWorkers(ctx context.Context) { + arch.blobSaver = NewBlobSaver(ctx, arch.Repo, arch.Options.SaveBlobConcurrency) + arch.fileSaver = NewFileSaver(ctx, arch.FS, arch.blobSaver, arch.Repo.Config().ChunkerPolynomial, arch.Options.FileReadConcurrency) + arch.fileSaver.CompleteBlob = arch.CompleteBlob - fi, err := file.Stat() - if err != nil { - return nil, errors.Wrap(err, "restic.Stat") - } - - if fi.ModTime().Equal(node.ModTime) { - return node, nil - } - - arch.Warn(node.Path, fi, errors.New("file has changed")) - - node, err = restic.NodeFromFileInfo(node.Path, fi) - if err != nil { - debug.Log("restic.NodeFromFileInfo returned error for %v: %v", node.Path, err) - arch.Warn(node.Path, fi, err) - } - - if !arch.WithAccessTime { - node.AccessTime = node.ModTime - } - - return node, nil + arch.fileSaver.NodeFromFileInfo = arch.nodeFromFileInfo } -type saveResult struct { - id restic.ID - bytes uint64 -} +// Snapshot saves several targets and returns a snapshot. +func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, error) { + workerCtx, cancel := context.WithCancel(ctx) + defer cancel() -func (arch *Archiver) saveChunk(ctx context.Context, chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) { - defer freeBuf(chunk.Data) + arch.runWorkers(workerCtx) - id := restic.Hash(chunk.Data) - err := arch.Save(ctx, restic.DataBlob, chunk.Data, id) - // TODO handle error - if err != nil { - debug.Log("Save(%v) failed: %v", id, err) - fmt.Printf("\nerror while saving data to the repo: %+v\n", err) - panic(err) - } - - p.Report(restic.Stat{Bytes: uint64(chunk.Length)}) - arch.blobToken <- token - resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)} -} - -func waitForResults(resultChannels [](<-chan saveResult)) ([]saveResult, error) { - results := []saveResult{} - - for _, ch := range resultChannels { - results = append(results, <-ch) - } - - if len(results) != len(resultChannels) { - return nil, errors.Errorf("chunker returned %v chunks, but only %v blobs saved", len(resultChannels), len(results)) - } - - return results, nil -} - -func updateNodeContent(node *restic.Node, results []saveResult) error { - debug.Log("checking size for file %s", node.Path) - - var bytes uint64 - node.Content = make([]restic.ID, len(results)) - - for i, b := range results { - node.Content[i] = b.id - bytes += b.bytes - - debug.Log(" adding blob %s, %d bytes", b.id, b.bytes) - } - - if bytes != node.Size { - fmt.Fprintf(os.Stderr, "warning for %v: expected %d bytes, saved %d bytes\n", node.Path, node.Size, bytes) - } - - debug.Log("SaveFile(%q): %v blobs\n", node.Path, len(results)) - - return nil -} - -// SaveFile stores the content of the file on the backend as a Blob by calling -// Save for each chunk. -func (arch *Archiver) SaveFile(ctx context.Context, p *restic.Progress, node *restic.Node) (*restic.Node, error) { - file, err := fs.Open(node.Path) - if err != nil { - return node, errors.Wrap(err, "Open") - } - defer file.Close() - - debug.RunHook("archiver.SaveFile", node.Path) - - node, err = arch.reloadFileIfChanged(node, file) - if err != nil { - return node, err - } - - chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial) - resultChannels := [](<-chan saveResult){} - - for { - chunk, err := chnker.Next(getBuf()) - if errors.Cause(err) == io.EOF { - break - } - - if err != nil { - return node, errors.Wrap(err, "chunker.Next") - } - - resCh := make(chan saveResult, 1) - go arch.saveChunk(ctx, chunk, p, <-arch.blobToken, file, resCh) - resultChannels = append(resultChannels, resCh) - } - - results, err := waitForResults(resultChannels) - if err != nil { - return node, err - } - err = updateNodeContent(node, results) - - return node, err -} - -func (arch *Archiver) fileWorker(ctx context.Context, wg *sync.WaitGroup, p *restic.Progress, entCh <-chan pipe.Entry) { - defer func() { - debug.Log("done") - wg.Done() - }() - for { - select { - case e, ok := <-entCh: - if !ok { - // channel is closed - return - } - - debug.Log("got job %v", e) - - // check for errors - if e.Error() != nil { - debug.Log("job %v has errors: %v", e.Path(), e.Error()) - // TODO: integrate error reporting - fmt.Fprintf(os.Stderr, "error for %v: %v\n", e.Path(), e.Error()) - // ignore this file - e.Result() <- nil - p.Report(restic.Stat{Errors: 1}) - continue - } - - node, err := restic.NodeFromFileInfo(e.Fullpath(), e.Info()) - if err != nil { - debug.Log("restic.NodeFromFileInfo returned error for %v: %v", node.Path, err) - arch.Warn(e.Fullpath(), e.Info(), err) - } - - if !arch.WithAccessTime { - node.AccessTime = node.ModTime - } - - // try to use old node, if present - if e.Node != nil { - debug.Log(" %v use old data", e.Path()) - - oldNode := e.Node.(*restic.Node) - // check if all content is still available in the repository - contentMissing := false - for _, blob := range oldNode.Content { - if !arch.repo.Index().Has(blob, restic.DataBlob) { - debug.Log(" %v not using old data, %v is missing", e.Path(), blob) - contentMissing = true - break - } - } - - if !contentMissing { - node.Content = oldNode.Content - debug.Log(" %v content is complete", e.Path()) - } - } else { - debug.Log(" %v no old data", e.Path()) - } - - // otherwise read file normally - if node.Type == "file" && len(node.Content) == 0 { - debug.Log(" read and save %v", e.Path()) - node, err = arch.SaveFile(ctx, p, node) - if err != nil { - fmt.Fprintf(os.Stderr, "error for %v: %v\n", node.Path, err) - arch.Warn(e.Path(), nil, err) - // ignore this file - e.Result() <- nil - p.Report(restic.Stat{Errors: 1}) - continue - } - } else { - // report old data size - p.Report(restic.Stat{Bytes: node.Size}) - } - - debug.Log(" processed %v, %d blobs", e.Path(), len(node.Content)) - e.Result() <- node - p.Report(restic.Stat{Files: 1}) - case <-ctx.Done(): - // pipeline was cancelled - return - } - } -} - -func (arch *Archiver) dirWorker(ctx context.Context, wg *sync.WaitGroup, p *restic.Progress, dirCh <-chan pipe.Dir) { - debug.Log("start") - defer func() { - debug.Log("done") - wg.Done() - }() - for { - select { - case dir, ok := <-dirCh: - if !ok { - // channel is closed - return - } - debug.Log("save dir %v (%d entries), error %v\n", dir.Path(), len(dir.Entries), dir.Error()) - - // ignore dir nodes with errors - if dir.Error() != nil { - fmt.Fprintf(os.Stderr, "error walking dir %v: %v\n", dir.Path(), dir.Error()) - dir.Result() <- nil - p.Report(restic.Stat{Errors: 1}) - continue - } - - tree := restic.NewTree() - - // wait for all content - for _, ch := range dir.Entries { - debug.Log("receiving result from %v", ch) - res := <-ch - - // if we get a nil pointer here, an error has happened while - // processing this entry. Ignore it for now. - if res == nil { - debug.Log("got nil result?") - continue - } - - // else insert node - node := res.(*restic.Node) - - if node.Type == "dir" { - debug.Log("got tree node for %s: %v", node.Path, node.Subtree) - - if node.Subtree == nil { - debug.Log("subtree is nil for node %v", node.Path) - continue - } - - if node.Subtree.IsNull() { - panic("invalid null subtree restic.ID") - } - } - - // insert node into tree, resolve name collisions - name := node.Name - i := 0 - for { - i++ - err := tree.Insert(node) - if err == nil { - break - } - - newName := fmt.Sprintf("%v-%d", name, i) - fmt.Fprintf(os.Stderr, "%v: name collision for %q, renaming to %q\n", filepath.Dir(node.Path), node.Name, newName) - node.Name = newName - } - - } - - node := &restic.Node{} - - if dir.Path() != "" && dir.Info() != nil { - n, err := restic.NodeFromFileInfo(dir.Fullpath(), dir.Info()) - if err != nil { - arch.Warn(dir.Path(), dir.Info(), err) - } - node = n - - if !arch.WithAccessTime { - node.AccessTime = node.ModTime - } - } - - if err := dir.Error(); err != nil { - node.Error = err.Error() - } - - id, err := arch.SaveTreeJSON(ctx, tree) - if err != nil { - panic(err) - } - debug.Log("save tree for %s: %v", dir.Path(), id) - if id.IsNull() { - panic("invalid null subtree restic.ID return from SaveTreeJSON()") - } - - node.Subtree = &id - - debug.Log("sending result to %v", dir.Result()) - - dir.Result() <- node - if dir.Path() != "" { - p.Report(restic.Stat{Dirs: 1}) - } - case <-ctx.Done(): - // pipeline was cancelled - return - } - } -} - -type archivePipe struct { - Old <-chan walk.TreeJob - New <-chan pipe.Job -} - -func copyJobs(ctx context.Context, in <-chan pipe.Job, out chan<- pipe.Job) { - var ( - // disable sending on the outCh until we received a job - outCh chan<- pipe.Job - // enable receiving from in - inCh = in - job pipe.Job - ok bool - ) - - for { - select { - case <-ctx.Done(): - return - case job, ok = <-inCh: - if !ok { - // input channel closed, we're done - debug.Log("input channel closed, we're done") - return - } - inCh = nil - outCh = out - case outCh <- job: - outCh = nil - inCh = in - } - } -} - -type archiveJob struct { - hasOld bool - old walk.TreeJob - new pipe.Job -} - -func (a *archivePipe) compare(ctx context.Context, out chan<- pipe.Job) { - defer func() { - close(out) - debug.Log("done") - }() - - debug.Log("start") - var ( - loadOld, loadNew bool = true, true - ok bool - oldJob walk.TreeJob - newJob pipe.Job - ) - - for { - if loadOld { - oldJob, ok = <-a.Old - // if the old channel is closed, just pass through the new jobs - if !ok { - debug.Log("old channel is closed, copy from new channel") - - // handle remaining newJob - if !loadNew { - out <- archiveJob{new: newJob}.Copy() - } - - copyJobs(ctx, a.New, out) - return - } - - loadOld = false - } - - if loadNew { - newJob, ok = <-a.New - // if the new channel is closed, there are no more files in the current snapshot, return - if !ok { - debug.Log("new channel is closed, we're done") - return - } - - loadNew = false - } - - debug.Log("old job: %v", oldJob.Path) - debug.Log("new job: %v", newJob.Path()) - - // at this point we have received an old job as well as a new job, compare paths - file1 := oldJob.Path - file2 := newJob.Path() - - dir1 := filepath.Dir(file1) - dir2 := filepath.Dir(file2) - - if file1 == file2 { - debug.Log(" same filename %q", file1) - - // send job - out <- archiveJob{hasOld: true, old: oldJob, new: newJob}.Copy() - loadOld = true - loadNew = true - continue - } else if dir1 < dir2 { - debug.Log(" %q < %q, file %q added", dir1, dir2, file2) - // file is new, send new job and load new - loadNew = true - out <- archiveJob{new: newJob}.Copy() - continue - } else if dir1 == dir2 { - if file1 < file2 { - debug.Log(" %q < %q, file %q removed", file1, file2, file1) - // file has been removed, load new old - loadOld = true - continue - } else { - debug.Log(" %q > %q, file %q added", file1, file2, file2) - // file is new, send new job and load new - loadNew = true - out <- archiveJob{new: newJob}.Copy() - continue - } - } - - debug.Log(" %q > %q, file %q removed", file1, file2, file1) - // file has been removed, throw away old job and load new - loadOld = true - } -} - -func (j archiveJob) Copy() pipe.Job { - if !j.hasOld { - return j.new - } - - // handle files - if isRegularFile(j.new.Info()) { - debug.Log(" job %v is file", j.new.Path()) - - // if type has changed, return new job directly - if j.old.Node == nil { - return j.new - } - - // if file is newer, return the new job - if j.old.Node.IsNewer(j.new.Fullpath(), j.new.Info()) { - debug.Log(" job %v is newer", j.new.Path()) - return j.new - } - - debug.Log(" job %v add old data", j.new.Path()) - // otherwise annotate job with old data - e := j.new.(pipe.Entry) - e.Node = j.old.Node - return e - } - - // dirs and other types are just returned - return j.new -} - -const saveIndexTime = 30 * time.Second - -// saveIndexes regularly queries the master index for full indexes and saves them. -func (arch *Archiver) saveIndexes(saveCtx, shutdownCtx context.Context, wg *sync.WaitGroup) { - defer wg.Done() - - ticker := time.NewTicker(saveIndexTime) - defer ticker.Stop() - - for { - select { - case <-saveCtx.Done(): - return - case <-shutdownCtx.Done(): - return - case <-ticker.C: - debug.Log("saving full indexes") - err := arch.repo.SaveFullIndex(saveCtx) - if err != nil { - debug.Log("save indexes returned an error: %v", err) - fmt.Fprintf(os.Stderr, "error saving preliminary index: %v\n", err) - } - } - } -} - -// unique returns a slice that only contains unique strings. -func unique(items []string) []string { - seen := make(map[string]struct{}) - for _, item := range items { - seen[item] = struct{}{} - } - - items = items[:0] - for item := range seen { - items = append(items, item) - } - return items -} - -// baseNameSlice allows sorting paths by basename. -// -// Snapshots have contents sorted by basename, but we receive full paths. -// For the archivePipe to advance them in pairs, we traverse the given -// paths in the same order as the snapshot. -type baseNameSlice []string - -func (p baseNameSlice) Len() int { return len(p) } -func (p baseNameSlice) Less(i, j int) bool { return filepath.Base(p[i]) < filepath.Base(p[j]) } -func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Snapshot creates a snapshot of the given paths. If parentrestic.ID is set, this is -// used to compare the files to the ones archived at the time this snapshot was -// taken. -func (arch *Archiver) Snapshot(ctx context.Context, p *restic.Progress, paths, tags []string, hostname string, parentID *restic.ID, time time.Time) (*restic.Snapshot, restic.ID, error) { - paths = unique(paths) - sort.Sort(baseNameSlice(paths)) - - debug.Log("start for %v", paths) - - debug.RunHook("Archiver.Snapshot", nil) - - // signal the whole pipeline to stop - var err error - - p.Start() - defer p.Done() - - // create new snapshot - sn, err := restic.NewSnapshot(paths, tags, hostname, time) - if err != nil { - return nil, restic.ID{}, err - } - sn.Excludes = arch.Excludes - - jobs := archivePipe{} - - // use parent snapshot (if some was given) - if parentID != nil { - sn.Parent = parentID - - // load parent snapshot - parent, err := restic.LoadSnapshot(ctx, arch.repo, *parentID) - if err != nil { - return nil, restic.ID{}, err - } - - // start walker on old tree - ch := make(chan walk.TreeJob) - go walk.Tree(ctx, arch.repo, *parent.Tree, ch) - jobs.Old = ch - } else { - // use closed channel - ch := make(chan walk.TreeJob) - close(ch) - jobs.Old = ch - } - - // start walker - pipeCh := make(chan pipe.Job) - resCh := make(chan pipe.Result, 1) - go func() { - pipe.Walk(ctx, paths, arch.SelectFilter, pipeCh, resCh) - debug.Log("pipe.Walk done") - }() - jobs.New = pipeCh - - ch := make(chan pipe.Job) - go jobs.compare(ctx, ch) - - var wg sync.WaitGroup - entCh := make(chan pipe.Entry) - dirCh := make(chan pipe.Dir) - - // split - wg.Add(1) - go func() { - pipe.Split(ch, dirCh, entCh) - debug.Log("split done") - close(dirCh) - close(entCh) - wg.Done() - }() - - // run workers - for i := 0; i < maxConcurrency; i++ { - wg.Add(2) - go arch.fileWorker(ctx, &wg, p, entCh) - go arch.dirWorker(ctx, &wg, p, dirCh) - } - - // run index saver - var wgIndexSaver sync.WaitGroup - shutdownCtx, indexShutdown := context.WithCancel(ctx) - wgIndexSaver.Add(1) - go arch.saveIndexes(ctx, shutdownCtx, &wgIndexSaver) - - // wait for all workers to terminate - debug.Log("wait for workers") - wg.Wait() - - // stop index saver - indexShutdown() - wgIndexSaver.Wait() - - debug.Log("workers terminated") - - // flush repository - err = arch.repo.Flush(ctx) + err := arch.Valid() if err != nil { return nil, restic.ID{}, err } - // receive the top-level tree - root := (<-resCh).(*restic.Node) - debug.Log("root node received: %v", root.Subtree) - sn.Tree = root.Subtree - - // load top-level tree again to see if it is empty - toptree, err := arch.repo.LoadTree(ctx, *root.Subtree) + cleanTargets, err := resolveRelativeTargets(arch.FS, targets) if err != nil { return nil, restic.ID{}, err } - if len(toptree.Nodes) == 0 { - return nil, restic.ID{}, errors.Fatal("no files/dirs saved, refusing to create empty snapshot") - } - - // save index - err = arch.repo.SaveIndex(ctx) - if err != nil { - debug.Log("error saving index: %v", err) - return nil, restic.ID{}, err - } - - debug.Log("saved indexes") - - // save snapshot - id, err := arch.repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn) + atree, err := NewTree(arch.FS, cleanTargets) if err != nil { return nil, restic.ID{}, err } - debug.Log("saved snapshot %v", id) + start := time.Now() + tree, err := arch.SaveTree(ctx, "/", atree, arch.loadParentTree(ctx, opts.ParentSnapshot)) + if err != nil { + return nil, restic.ID{}, err + } + + rootTreeID, stats, err := arch.saveTree(ctx, tree) + if err != nil { + return nil, restic.ID{}, err + } + + arch.CompleteItem("/", nil, nil, stats, time.Since(start)) + + err = arch.Repo.Flush(ctx) + if err != nil { + return nil, restic.ID{}, err + } + + err = arch.Repo.SaveIndex(ctx) + if err != nil { + return nil, restic.ID{}, err + } + + sn, err := restic.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time) + sn.Excludes = opts.Excludes + if !opts.ParentSnapshot.IsNull() { + id := opts.ParentSnapshot + sn.Parent = &id + } + sn.Tree = &rootTreeID + + id, err := arch.Repo.SaveJSONUnpacked(ctx, restic.SnapshotFile, sn) + if err != nil { + return nil, restic.ID{}, err + } return sn, id, nil } - -func isRegularFile(fi os.FileInfo) bool { - if fi == nil { - return false - } - - return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 -} - -// Scan traverses the dirs to collect restic.Stat information while emitting progress -// information with p. -func Scan(dirs []string, filter pipe.SelectFunc, p *restic.Progress) (restic.Stat, error) { - p.Start() - defer p.Done() - - var stat restic.Stat - - for _, dir := range dirs { - debug.Log("Start for %v", dir) - err := fs.Walk(dir, func(str string, fi os.FileInfo, err error) error { - // TODO: integrate error reporting - if err != nil { - fmt.Fprintf(os.Stderr, "error for %v: %v\n", str, err) - return nil - } - if fi == nil { - fmt.Fprintf(os.Stderr, "error for %v: FileInfo is nil\n", str) - return nil - } - - if !filter(str, fi) { - debug.Log("path %v excluded", str) - if fi.IsDir() { - return filepath.SkipDir - } - return nil - } - - s := restic.Stat{} - if fi.IsDir() { - s.Dirs++ - } else { - s.Files++ - - if isRegularFile(fi) { - s.Bytes += uint64(fi.Size()) - } - } - - p.Report(s) - stat.Add(s) - - // TODO: handle error? - return nil - }) - - debug.Log("Done for %v, err: %v", dir, err) - if err != nil { - return restic.Stat{}, errors.Wrap(err, "fs.Walk") - } - } - - return stat, nil -} diff --git a/internal/archiver/archiver_int_test.go b/internal/archiver/archiver_int_test.go deleted file mode 100644 index b1273fee9..000000000 --- a/internal/archiver/archiver_int_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package archiver - -import ( - "context" - "os" - "testing" - - "github.com/restic/restic/internal/pipe" - "github.com/restic/restic/internal/walk" -) - -var treeJobs = []string{ - "foo/baz/subdir", - "foo/baz", - "foo", - "quu/bar/file1", - "quu/bar/file2", - "quu/foo/file1", - "quu/foo/file2", - "quu/foo/file3", - "quu/foo", - "quu/fooz", - "quu", - "yy/a", - "yy/b", - "yy", -} - -var pipeJobs = []string{ - "foo/baz/subdir", - "foo/baz/subdir2", // subdir2 added - "foo/baz", - "foo", - "quu/bar/.file1.swp", // file with . added - "quu/bar/file1", - "quu/bar/file2", - "quu/foo/file1", // file2 removed - "quu/foo/file3", - "quu/foo", - "quu", - "quv/file1", // files added and removed - "quv/file2", - "quv", - "yy", - "zz/file1", // files removed and added at the end - "zz/file2", - "zz", -} - -var resultJobs = []struct { - path string - action string -}{ - {"foo/baz/subdir", "same, not a file"}, - {"foo/baz/subdir2", "new, no old job"}, - {"foo/baz", "same, not a file"}, - {"foo", "same, not a file"}, - {"quu/bar/.file1.swp", "new, no old job"}, - {"quu/bar/file1", "same, not a file"}, - {"quu/bar/file2", "same, not a file"}, - {"quu/foo/file1", "same, not a file"}, - {"quu/foo/file3", "same, not a file"}, - {"quu/foo", "same, not a file"}, - {"quu", "same, not a file"}, - {"quv/file1", "new, no old job"}, - {"quv/file2", "new, no old job"}, - {"quv", "new, no old job"}, - {"yy", "same, not a file"}, - {"zz/file1", "testPipeJob"}, - {"zz/file2", "testPipeJob"}, - {"zz", "testPipeJob"}, -} - -type testPipeJob struct { - path string - err error - fi os.FileInfo - res chan<- pipe.Result -} - -func (j testPipeJob) Path() string { return j.path } -func (j testPipeJob) Fullpath() string { return j.path } -func (j testPipeJob) Error() error { return j.err } -func (j testPipeJob) Info() os.FileInfo { return j.fi } -func (j testPipeJob) Result() chan<- pipe.Result { return j.res } - -func testTreeWalker(ctx context.Context, out chan<- walk.TreeJob) { - for _, e := range treeJobs { - select { - case <-ctx.Done(): - return - case out <- walk.TreeJob{Path: e}: - } - } - - close(out) -} - -func testPipeWalker(ctx context.Context, out chan<- pipe.Job) { - for _, e := range pipeJobs { - select { - case <-ctx.Done(): - return - case out <- testPipeJob{path: e}: - } - } - - close(out) -} - -func TestArchivePipe(t *testing.T) { - ctx := context.TODO() - - treeCh := make(chan walk.TreeJob) - pipeCh := make(chan pipe.Job) - - go testTreeWalker(ctx, treeCh) - go testPipeWalker(ctx, pipeCh) - - p := archivePipe{Old: treeCh, New: pipeCh} - - ch := make(chan pipe.Job) - - go p.compare(ctx, ch) - - i := 0 - for job := range ch { - if job.Path() != resultJobs[i].path { - t.Fatalf("wrong job received: wanted %v, got %v", resultJobs[i], job) - } - - // switch j := job.(type) { - // case archivePipeJob: - // if j.action != resultJobs[i].action { - // t.Fatalf("wrong action for %v detected: wanted %q, got %q", job.Path(), resultJobs[i].action, j.action) - // } - // case testPipeJob: - // if resultJobs[i].action != "testPipeJob" { - // t.Fatalf("unexpected testPipeJob, expected %q: %v", resultJobs[i].action, j) - // } - // } - - i++ - } -} diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 035355a32..a8557ef2a 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1,278 +1,1567 @@ -package archiver_test +package archiver import ( - "bytes" "context" - "io" "io/ioutil" "os" "path/filepath" + "runtime" + "strings" + "sync" + "syscall" "testing" "time" - "github.com/restic/restic/internal/archiver" - "github.com/restic/restic/internal/crypto" + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" - rtest "github.com/restic/restic/internal/test" - - "github.com/restic/restic/internal/errors" - - "github.com/restic/chunker" + restictest "github.com/restic/restic/internal/test" ) -var testPol = chunker.Pol(0x3DA3358B4DC173) +func prepareTempdirRepoSrc(t testing.TB, src TestDir) (tempdir string, repo restic.Repository, cleanup func()) { + tempdir, removeTempdir := restictest.TempDir(t) + repo, removeRepository := repository.TestRepository(t) -type Rdr interface { - io.ReadSeeker - io.ReaderAt + TestCreateFiles(t, tempdir, src) + + cleanup = func() { + removeRepository() + removeTempdir() + } + + return tempdir, repo, cleanup } -func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.Key) { - rd.Seek(0, 0) - ch := chunker.New(rd, testPol) - nonce := crypto.NewRandomNonce() +func saveFile(t testing.TB, repo restic.Repository, filename string, filesystem fs.FS) (*restic.Node, ItemStats) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - for { - chunk, err := ch.Next(buf) + arch := New(repo, filesystem, Options{}) + arch.runWorkers(ctx) - if errors.Cause(err) == io.EOF { - break - } + var ( + completeCallbackNode *restic.Node + completeCallbackStats ItemStats + completeCallback bool - rtest.OK(b, err) + startCallback bool + ) - rtest.Assert(b, uint(len(chunk.Data)) == chunk.Length, - "invalid length: got %d, expected %d", len(chunk.Data), chunk.Length) + complete := func(node *restic.Node, stats ItemStats) { + completeCallback = true + completeCallbackNode = node + completeCallbackStats = stats + } - _ = key.Seal(buf2[:0], nonce, chunk.Data, nil) + start := func() { + startCallback = true + } + + file, err := arch.FS.OpenFile(filename, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + if err != nil { + t.Fatal(err) + } + + fi, err := file.Stat() + if err != nil { + t.Fatal(err) + } + + res := arch.fileSaver.Save(ctx, "/", file, fi, start, complete) + if res.Err() != nil { + t.Fatal(res.Err()) + } + + err = repo.Flush(ctx) + if err != nil { + t.Fatal(err) + } + + err = repo.SaveIndex(ctx) + if err != nil { + t.Fatal(err) + } + + if !startCallback { + t.Errorf("start callback did not happen") + } + + if !completeCallback { + t.Errorf("complete callback did not happen") + } + + if completeCallbackNode == nil { + t.Errorf("no node returned for complete callback") + } + + if completeCallbackNode != nil && !res.Node().Equals(*completeCallbackNode) { + t.Errorf("different node returned for complete callback") + } + + if completeCallbackStats != res.Stats() { + t.Errorf("different stats return for complete callback, want:\n %v\ngot:\n %v", res.Stats(), completeCallbackStats) + } + + return res.Node(), res.Stats() +} + +func TestArchiverSaveFile(t *testing.T) { + var tests = []TestFile{ + TestFile{Content: ""}, + TestFile{Content: "foo"}, + TestFile{Content: string(restictest.Random(23, 12*1024*1024+1287898))}, + } + + for _, testfile := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, TestDir{"file": testfile}) + defer cleanup() + + node, stats := saveFile(t, repo, filepath.Join(tempdir, "file"), fs.Track{fs.Local{}}) + + TestEnsureFileContent(ctx, t, repo, "file", node, testfile) + if stats.DataSize != uint64(len(testfile.Content)) { + t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(testfile.Content), stats.DataSize) + } + if stats.DataBlobs <= 0 && len(testfile.Content) > 0 { + t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs) + } + if stats.TreeSize != 0 { + t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs != 0 { + t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + }) } } -func BenchmarkChunkEncrypt(b *testing.B) { - repo, cleanup := repository.TestRepository(b) - defer cleanup() +func TestArchiverSaveFileReaderFS(t *testing.T) { + var tests = []struct { + Data string + }{ + {Data: ""}, + {Data: "foo"}, + {Data: string(restictest.Random(23, 12*1024*1024+1287898))}, + } - data := rtest.Random(23, 10<<20) // 10MiB - rd := bytes.NewReader(data) + for _, test := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - buf := make([]byte, chunker.MaxSize) - buf2 := make([]byte, chunker.MaxSize) + repo, cleanup := repository.TestRepository(t) + defer cleanup() - b.ResetTimer() - b.SetBytes(int64(len(data))) + ts := time.Now() + filename := "xx" + readerFs := &fs.Reader{ + ModTime: ts, + Mode: 0123, + Name: filename, + ReadCloser: ioutil.NopCloser(strings.NewReader(test.Data)), + } + + node, stats := saveFile(t, repo, filename, readerFs) + + TestEnsureFileContent(ctx, t, repo, "file", node, TestFile{Content: test.Data}) + if stats.DataSize != uint64(len(test.Data)) { + t.Errorf("wrong stats returned in DataSize, want %d, got %d", len(test.Data), stats.DataSize) + } + if stats.DataBlobs <= 0 && len(test.Data) > 0 { + t.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs) + } + if stats.TreeSize != 0 { + t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs != 0 { + t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + }) + } +} + +func BenchmarkArchiverSaveFileSmall(b *testing.B) { + const fileSize = 4 * 1024 + d := TestDir{"file": TestFile{ + Content: string(restictest.Random(23, fileSize)), + }} + + b.SetBytes(fileSize) for i := 0; i < b.N; i++ { - benchmarkChunkEncrypt(b, buf, buf2, rd, repo.Key()) - } -} + b.StopTimer() + tempdir, repo, cleanup := prepareTempdirRepoSrc(b, d) + b.StartTimer() -func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key) { - ch := chunker.New(rd, testPol) - nonce := crypto.NewRandomNonce() + _, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{fs.Local{}}) - for { - chunk, err := ch.Next(buf) - if errors.Cause(err) == io.EOF { - break + b.StopTimer() + if stats.DataSize != fileSize { + b.Errorf("wrong stats returned in DataSize, want %d, got %d", fileSize, stats.DataSize) } - - _ = key.Seal(chunk.Data[:0], nonce, chunk.Data, nil) + if stats.DataBlobs <= 0 { + b.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs) + } + if stats.TreeSize != 0 { + b.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs != 0 { + b.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + cleanup() + b.StartTimer() } } -func BenchmarkChunkEncryptParallel(b *testing.B) { - repo, cleanup := repository.TestRepository(b) +func BenchmarkArchiverSaveFileLarge(b *testing.B) { + const fileSize = 40*1024*1024 + 1287898 + d := TestDir{"file": TestFile{ + Content: string(restictest.Random(23, fileSize)), + }} + + b.SetBytes(fileSize) + + for i := 0; i < b.N; i++ { + b.StopTimer() + tempdir, repo, cleanup := prepareTempdirRepoSrc(b, d) + b.StartTimer() + + _, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{fs.Local{}}) + + b.StopTimer() + if stats.DataSize != fileSize { + b.Errorf("wrong stats returned in DataSize, want %d, got %d", fileSize, stats.DataSize) + } + if stats.DataBlobs <= 0 { + b.Errorf("wrong stats returned in DataBlobs, want > 0, got %d", stats.DataBlobs) + } + if stats.TreeSize != 0 { + b.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs != 0 { + b.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + cleanup() + b.StartTimer() + } +} + +type blobCountingRepo struct { + restic.Repository + + m sync.Mutex + saved map[restic.BlobHandle]uint +} + +func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { + id, err := repo.Repository.SaveBlob(ctx, t, buf, id) + h := restic.BlobHandle{ID: id, Type: t} + repo.m.Lock() + repo.saved[h]++ + repo.m.Unlock() + return id, err +} + +func (repo *blobCountingRepo) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) { + id, err := repo.Repository.SaveTree(ctx, t) + h := restic.BlobHandle{ID: id, Type: restic.TreeBlob} + repo.m.Lock() + repo.saved[h]++ + repo.m.Unlock() + return id, err +} + +func appendToFile(t testing.TB, filename string, data []byte) { + f, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + t.Fatal(err) + } + + _, err = f.Write(data) + if err != nil { + _ = f.Close() + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } +} + +func TestArchiverSaveFileIncremental(t *testing.T) { + tempdir, removeTempdir := restictest.TempDir(t) + defer removeTempdir() + + testRepo, removeRepository := repository.TestRepository(t) + defer removeRepository() + + repo := &blobCountingRepo{ + Repository: testRepo, + saved: make(map[restic.BlobHandle]uint), + } + + data := restictest.Random(23, 512*1024+887898) + testfile := filepath.Join(tempdir, "testfile") + + for i := 0; i < 3; i++ { + appendToFile(t, testfile, data) + node, _ := saveFile(t, repo, testfile, fs.Track{fs.Local{}}) + + t.Logf("node blobs: %v", node.Content) + + for h, n := range repo.saved { + if n > 1 { + t.Errorf("iteration %v: blob %v saved more than once (%d times)", i, h, n) + } + } + } +} + +func save(t testing.TB, filename string, data []byte) { + f, err := os.Create(filename) + if err != nil { + t.Fatal(err) + } + + _, err = f.Write(data) + if err != nil { + t.Fatal(err) + } + + err = f.Sync() + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } +} + +func lstat(t testing.TB, name string) os.FileInfo { + fi, err := os.Lstat(name) + if err != nil { + t.Fatal(err) + } + + return fi +} + +func setTimestamp(t testing.TB, filename string, atime, mtime time.Time) { + var utimes = [...]syscall.Timespec{ + syscall.NsecToTimespec(atime.UnixNano()), + syscall.NsecToTimespec(mtime.UnixNano()), + } + + err := syscall.UtimesNano(filename, utimes[:]) + if err != nil { + t.Fatal(err) + } +} + +func remove(t testing.TB, filename string) { + err := os.Remove(filename) + if err != nil { + t.Fatal(err) + } +} + +func nodeFromFI(t testing.TB, filename string, fi os.FileInfo) *restic.Node { + node, err := restic.NodeFromFileInfo(filename, fi) + if err != nil { + t.Fatal(err) + } + + return node +} + +func TestFileChanged(t *testing.T) { + var defaultContent = []byte("foobar") + + var d = 50 * time.Millisecond + if runtime.GOOS == "darwin" { + // on older darwin instances the file system only supports one second + // granularity + d = time.Second + } + + sleep := func() { + time.Sleep(d) + } + + var tests = []struct { + Name string + Content []byte + Modify func(t testing.TB, filename string) + }{ + { + Name: "same-content-new-file", + Modify: func(t testing.TB, filename string) { + remove(t, filename) + sleep() + save(t, filename, defaultContent) + }, + }, + { + Name: "same-content-new-timestamp", + Modify: func(t testing.TB, filename string) { + sleep() + save(t, filename, defaultContent) + }, + }, + { + Name: "other-content", + Modify: func(t testing.TB, filename string) { + remove(t, filename) + sleep() + save(t, filename, []byte("xxxxxx")) + }, + }, + { + Name: "longer-content", + Modify: func(t testing.TB, filename string) { + save(t, filename, []byte("xxxxxxxxxxxxxxxxxxxxxx")) + }, + }, + { + Name: "new-file", + Modify: func(t testing.TB, filename string) { + remove(t, filename) + sleep() + save(t, filename, defaultContent) + }, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + filename := filepath.Join(tempdir, "file") + content := defaultContent + if test.Content != nil { + content = test.Content + } + save(t, filename, content) + + fiBefore := lstat(t, filename) + node := nodeFromFI(t, filename, fiBefore) + + if fileChanged(fiBefore, node) { + t.Fatalf("unchanged file detected as changed") + } + + test.Modify(t, filename) + + fiAfter := lstat(t, filename) + if !fileChanged(fiAfter, node) { + t.Fatalf("modified file detected as unchanged") + } + }) + } +} + +func TestFilChangedSpecialCases(t *testing.T) { + tempdir, cleanup := restictest.TempDir(t) defer cleanup() - data := rtest.Random(23, 10<<20) // 10MiB + filename := filepath.Join(tempdir, "file") + content := []byte("foobar") + save(t, filename, content) - buf := make([]byte, chunker.MaxSize) + t.Run("nil-node", func(t *testing.T) { + fi := lstat(t, filename) + if !fileChanged(fi, nil) { + t.Fatal("nil node detected as unchanged") + } + }) - b.ResetTimer() - b.SetBytes(int64(len(data))) - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - rd := bytes.NewReader(data) - benchmarkChunkEncryptP(pb, buf, rd, repo.Key()) + t.Run("type-change", func(t *testing.T) { + fi := lstat(t, filename) + node := nodeFromFI(t, filename, fi) + node.Type = "symlink" + if !fileChanged(fi, node) { + t.Fatal("node with changed type detected as unchanged") } }) } -func archiveDirectory(b testing.TB) { - repo, cleanup := repository.TestRepository(b) - defer cleanup() +func TestArchiverSaveDir(t *testing.T) { + const targetNodeName = "targetdir" - arch := archiver.New(repo) - - _, id, err := arch.Snapshot(context.TODO(), nil, []string{rtest.BenchArchiveDirectory}, nil, "localhost", nil, time.Now()) - rtest.OK(b, err) - - b.Logf("snapshot archived as %v", id) -} - -func TestArchiveDirectory(t *testing.T) { - if rtest.BenchArchiveDirectory == "" { - t.Skip("benchdir not set, skipping TestArchiveDirectory") + var tests = []struct { + src TestDir + chdir string + target string + want TestDir + }{ + { + src: TestDir{ + "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + }, + target: ".", + want: TestDir{ + "targetdir": TestDir{ + "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + }, + }, + }, + { + src: TestDir{ + "targetdir": TestDir{ + "foo": TestFile{Content: "foo"}, + "emptyfile": TestFile{Content: ""}, + "bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}, + "largefile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "largerfile": TestFile{Content: string(restictest.Random(234, 5*1024*1024+5000))}, + }, + }, + target: "targetdir", + }, + { + src: TestDir{ + "foo": TestFile{Content: "foo"}, + "emptyfile": TestFile{Content: ""}, + "bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}, + }, + target: ".", + want: TestDir{ + "targetdir": TestDir{ + "foo": TestFile{Content: "foo"}, + "emptyfile": TestFile{Content: ""}, + "bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}, + }, + }, + }, + { + src: TestDir{ + "foo": TestDir{ + "subdir": TestDir{ + "x": TestFile{Content: "xxx"}, + "y": TestFile{Content: "yyyyyyyyyyyyyyyy"}, + "z": TestFile{Content: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"}, + }, + "file": TestFile{Content: "just a test"}, + }, + }, + chdir: "foo/subdir", + target: "../../", + want: TestDir{ + "targetdir": TestDir{ + "foo": TestDir{ + "subdir": TestDir{ + "x": TestFile{Content: "xxx"}, + "y": TestFile{Content: "yyyyyyyyyyyyyyyy"}, + "z": TestFile{Content: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"}, + }, + "file": TestFile{Content: "just a test"}, + }, + }, + }, + }, + { + src: TestDir{ + "foo": TestDir{ + "file": TestFile{Content: "just a test"}, + "file2": TestFile{Content: "again"}, + }, + }, + target: "./foo", + want: TestDir{ + "targetdir": TestDir{ + "file": TestFile{Content: "just a test"}, + "file2": TestFile{Content: "again"}, + }, + }, + }, } - archiveDirectory(t) -} + for _, test := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() -func BenchmarkArchiveDirectory(b *testing.B) { - if rtest.BenchArchiveDirectory == "" { - b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory") - } + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() - for i := 0; i < b.N; i++ { - archiveDirectory(b) + arch := New(repo, fs.Track{fs.Local{}}, Options{}) + arch.runWorkers(ctx) + + chdir := tempdir + if test.chdir != "" { + chdir = filepath.Join(chdir, test.chdir) + } + + back := fs.TestChdir(t, chdir) + defer back() + + fi, err := fs.Lstat(test.target) + if err != nil { + t.Fatal(err) + } + + node, stats, err := arch.SaveDir(ctx, "/", fi, test.target, nil) + if err != nil { + t.Fatal(err) + } + + t.Logf("stats: %v", stats) + if stats.DataSize != 0 { + t.Errorf("wrong stats returned in DataSize, want 0, got %d", stats.DataSize) + } + if stats.DataBlobs != 0 { + t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + if stats.TreeSize <= 0 { + t.Errorf("wrong stats returned in TreeSize, want > 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs <= 0 { + t.Errorf("wrong stats returned in TreeBlobs, want > 0, got %d", stats.TreeBlobs) + } + + node.Name = targetNodeName + tree := &restic.Tree{Nodes: []*restic.Node{node}} + treeID, err := repo.SaveTree(ctx, tree) + if err != nil { + t.Fatal(err) + } + + err = repo.Flush(ctx) + if err != nil { + t.Fatal(err) + } + + err = repo.SaveIndex(ctx) + if err != nil { + t.Fatal(err) + } + + want := test.want + if want == nil { + want = test.src + } + TestEnsureTree(ctx, t, "/", repo, treeID, want) + }) } } -func countPacks(t testing.TB, repo restic.Repository, tpe restic.FileType) (n uint) { - err := repo.Backend().List(context.TODO(), tpe, func(restic.FileInfo) error { - n++ - return nil - }) - if err != nil { - t.Fatal(err) +func TestArchiverSaveDirIncremental(t *testing.T) { + tempdir, removeTempdir := restictest.TempDir(t) + defer removeTempdir() + + testRepo, removeRepository := repository.TestRepository(t) + defer removeRepository() + + repo := &blobCountingRepo{ + Repository: testRepo, + saved: make(map[restic.BlobHandle]uint), } - return n -} + appendToFile(t, filepath.Join(tempdir, "testfile"), []byte("foobar")) -func archiveWithDedup(t testing.TB) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() + // save the empty directory several times in a row, then have a look if the + // archiver did save the same tree several times + for i := 0; i < 5; i++ { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - if rtest.BenchArchiveDirectory == "" { - t.Skip("benchdir not set, skipping TestArchiverDedup") - } + arch := New(repo, fs.Track{fs.Local{}}, Options{}) + arch.runWorkers(ctx) - var cnt struct { - before, after, after2 struct { - packs, dataBlobs, treeBlobs uint - } - } - - // archive a few files - sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil) - t.Logf("archived snapshot %v", sn.ID().Str()) - - // get archive stats - cnt.before.packs = countPacks(t, repo, restic.DataFile) - cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob) - cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob) - t.Logf("packs %v, data blobs %v, tree blobs %v", - cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs) - - // archive the same files again, without parent snapshot - sn2 := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil) - t.Logf("archived snapshot %v", sn2.ID().Str()) - - // get archive stats again - cnt.after.packs = countPacks(t, repo, restic.DataFile) - cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob) - cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob) - t.Logf("packs %v, data blobs %v, tree blobs %v", - cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs) - - // if there are more data blobs, something is wrong - if cnt.after.dataBlobs > cnt.before.dataBlobs { - t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d", - cnt.before.dataBlobs, cnt.after.dataBlobs) - } - - // archive the same files again, with a parent snapshot - sn3 := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, sn2.ID()) - t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str()) - - // get archive stats again - cnt.after2.packs = countPacks(t, repo, restic.DataFile) - cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob) - cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob) - t.Logf("packs %v, data blobs %v, tree blobs %v", - cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs) - - // if there are more data blobs, something is wrong - if cnt.after2.dataBlobs > cnt.before.dataBlobs { - t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d", - cnt.before.dataBlobs, cnt.after2.dataBlobs) - } -} - -func TestArchiveDedup(t *testing.T) { - archiveWithDedup(t) -} - -func TestArchiveEmptySnapshot(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - arch := archiver.New(repo) - - sn, id, err := arch.Snapshot(context.TODO(), nil, []string{"file-does-not-exist-123123213123", "file2-does-not-exist-too-123123123"}, nil, "localhost", nil, time.Now()) - if err == nil { - t.Errorf("expected error for empty snapshot, got nil") - } - - if !id.IsNull() { - t.Errorf("expected null ID for empty snapshot, got %v", id.Str()) - } - - if sn != nil { - t.Errorf("expected null snapshot for empty snapshot, got %v", sn) - } -} - -func chdir(t testing.TB, target string) (cleanup func()) { - curdir, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - t.Logf("chdir to %v", target) - err = os.Chdir(target) - if err != nil { - t.Fatal(err) - } - - return func() { - t.Logf("chdir back to %v", curdir) - err := os.Chdir(curdir) + fi, err := fs.Lstat(tempdir) if err != nil { t.Fatal(err) } + + node, stats, err := arch.SaveDir(ctx, "/", fi, tempdir, nil) + if err != nil { + t.Fatal(err) + } + + if i == 0 { + // operation must have added new tree data + if stats.DataSize != 0 { + t.Errorf("wrong stats returned in DataSize, want 0, got %d", stats.DataSize) + } + if stats.DataBlobs != 0 { + t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + if stats.TreeSize <= 0 { + t.Errorf("wrong stats returned in TreeSize, want > 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs <= 0 { + t.Errorf("wrong stats returned in TreeBlobs, want > 0, got %d", stats.TreeBlobs) + } + } else { + // operation must not have added any new data + if stats.DataSize != 0 { + t.Errorf("wrong stats returned in DataSize, want 0, got %d", stats.DataSize) + } + if stats.DataBlobs != 0 { + t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs) + } + if stats.TreeSize != 0 { + t.Errorf("wrong stats returned in TreeSize, want 0, got %d", stats.TreeSize) + } + if stats.TreeBlobs != 0 { + t.Errorf("wrong stats returned in TreeBlobs, want 0, got %d", stats.TreeBlobs) + } + } + + t.Logf("node subtree %v", node.Subtree) + + err = repo.Flush(ctx) + if err != nil { + t.Fatal(err) + } + + err = repo.SaveIndex(ctx) + if err != nil { + t.Fatal(err) + } + + for h, n := range repo.saved { + if n > 1 { + t.Errorf("iteration %v: blob %v saved more than once (%d times)", i, h, n) + } + } } } -func TestArchiveNameCollision(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() +func TestArchiverSaveTree(t *testing.T) { + symlink := func(from, to string) func(t testing.TB) { + return func(t testing.TB) { + err := os.Symlink(from, to) + if err != nil { + t.Fatal(err) + } + } + } - dir, cleanup := rtest.TempDir(t) - defer cleanup() + var tests = []struct { + src TestDir + prepare func(t testing.TB) + targets []string + want TestDir + }{ + { + src: TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + }, + targets: []string{"targetfile"}, + want: TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + }, + }, + { + src: TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + }, + prepare: symlink("targetfile", "filesymlink"), + targets: []string{"targetfile", "filesymlink"}, + want: TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + "filesymlink": TestSymlink{Target: "targetfile"}, + }, + }, + { + src: TestDir{ + "dir": TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + }, + }, + "otherfile": TestFile{Content: string("xxx")}, + }, + }, + prepare: symlink("subdir", filepath.FromSlash("dir/symlink")), + targets: []string{filepath.FromSlash("dir/symlink")}, + want: TestDir{ + "dir": TestDir{ + "symlink": TestSymlink{Target: "subdir"}, + }, + }, + }, + { + src: TestDir{ + "dir": TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + }, + }, + "otherfile": TestFile{Content: string("xxx")}, + }, + }, + prepare: symlink("subdir", filepath.FromSlash("dir/symlink")), + targets: []string{filepath.FromSlash("dir/symlink/subsubdir")}, + want: TestDir{ + "dir": TestDir{ + "symlink": TestDir{ + "subsubdir": TestDir{ + "targetfile": TestFile{Content: string("foobar")}, + }, + }, + }, + }, + }, + } - root := filepath.Join(dir, "root") - rtest.OK(t, os.MkdirAll(root, 0755)) + for _, test := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - rtest.OK(t, ioutil.WriteFile(filepath.Join(dir, "testfile"), []byte("testfile1"), 0644)) - rtest.OK(t, ioutil.WriteFile(filepath.Join(dir, "root", "testfile"), []byte("testfile2"), 0644)) + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() - defer chdir(t, root)() + testFS := fs.Track{fs.Local{}} - arch := archiver.New(repo) + arch := New(repo, testFS, Options{}) + arch.runWorkers(ctx) - sn, id, err := arch.Snapshot(context.TODO(), nil, []string{"testfile", filepath.Join("..", "testfile")}, nil, "localhost", nil, time.Now()) - rtest.OK(t, err) + back := fs.TestChdir(t, tempdir) + defer back() - t.Logf("snapshot archived as %v", id) + if test.prepare != nil { + test.prepare(t) + } - tree, err := repo.LoadTree(context.TODO(), *sn.Tree) - rtest.OK(t, err) + atree, err := NewTree(testFS, test.targets) + if err != nil { + t.Fatal(err) + } - if len(tree.Nodes) != 2 { - t.Fatalf("tree has %d nodes, wanted 2: %v", len(tree.Nodes), tree.Nodes) + tree, err := arch.SaveTree(ctx, "/", atree, nil) + if err != nil { + t.Fatal(err) + } + + treeID, err := repo.SaveTree(ctx, tree) + if err != nil { + t.Fatal(err) + } + + err = repo.Flush(ctx) + if err != nil { + t.Fatal(err) + } + + err = repo.SaveIndex(ctx) + if err != nil { + t.Fatal(err) + } + + want := test.want + if want == nil { + want = test.src + } + TestEnsureTree(ctx, t, "/", repo, treeID, want) + }) + } +} + +func TestArchiverSnapshot(t *testing.T) { + var tests = []struct { + name string + src TestDir + want TestDir + chdir string + targets []string + }{ + { + name: "single-file", + src: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + targets: []string{"foo"}, + }, + { + name: "file-current-dir", + src: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + targets: []string{"./foo"}, + }, + { + name: "dir", + src: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + targets: []string{"target"}, + }, + { + name: "dir-current-dir", + src: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + targets: []string{"./target"}, + }, + { + name: "content-dir-current-dir", + src: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + targets: []string{"./target/."}, + }, + { + name: "current-dir", + src: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + targets: []string{"."}, + }, + { + name: "subdir", + src: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo in subsubdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + targets: []string{"subdir"}, + want: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo in subsubdir"}, + }, + }, + }, + }, + { + name: "subsubdir", + src: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo in subsubdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + targets: []string{"subdir/subsubdir"}, + want: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo in subsubdir"}, + }, + }, + }, + }, + { + name: "parent-dir", + src: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + "other": TestFile{Content: "another file"}, + }, + chdir: "subdir", + targets: []string{".."}, + }, + { + name: "parent-parent-dir", + src: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + "subsubdir": TestDir{ + "empty": TestFile{Content: ""}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + chdir: "subdir/subsubdir", + targets: []string{"../.."}, + }, + { + name: "parent-parent-dir-slash", + src: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + chdir: "subdir/subsubdir", + targets: []string{"../../"}, + want: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + }, + { + name: "parent-subdir", + src: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + "other": TestFile{Content: "another file"}, + }, + chdir: "subdir", + targets: []string{"../subdir"}, + want: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + }, + { + name: "parent-parent-dir-subdir", + src: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + chdir: "subdir/subsubdir", + targets: []string{"../../subdir/subsubdir"}, + want: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + }, + }, + { + name: "included-multiple1", + src: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + "other": TestFile{Content: "another file"}, + }, + }, + targets: []string{"subdir", "subdir/subsubdir"}, + }, + { + name: "included-multiple2", + src: TestDir{ + "subdir": TestDir{ + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + "other": TestFile{Content: "another file"}, + }, + }, + targets: []string{"subdir/subsubdir", "subdir"}, + }, + { + name: "collision", + src: TestDir{ + "subdir": TestDir{ + "foo": TestFile{Content: "foo in subdir"}, + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo in subsubdir"}, + }, + }, + "foo": TestFile{Content: "another file"}, + }, + chdir: "subdir", + targets: []string{".", "../foo"}, + want: TestDir{ + + "foo": TestFile{Content: "foo in subdir"}, + "subsubdir": TestDir{ + "foo": TestFile{Content: "foo in subsubdir"}, + }, + "foo-1": TestFile{Content: "another file"}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() + + arch := New(repo, fs.Track{fs.Local{}}, Options{}) + + chdir := tempdir + if test.chdir != "" { + chdir = filepath.Join(chdir, filepath.FromSlash(test.chdir)) + } + + back := fs.TestChdir(t, chdir) + defer back() + + var targets []string + for _, target := range test.targets { + targets = append(targets, os.ExpandEnv(target)) + } + + t.Logf("targets: %v", targets) + sn, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) + if err != nil { + t.Fatal(err) + } + + t.Logf("saved as %v", snapshotID.Str()) + + want := test.want + if want == nil { + want = test.src + } + TestEnsureSnapshot(t, repo, snapshotID, want) + + checker.TestCheckRepo(t, repo) + + // check that the snapshot contains the targets with absolute paths + for i, target := range sn.Paths { + atarget, err := filepath.Abs(test.targets[i]) + if err != nil { + t.Fatal(err) + } + + if target != atarget { + t.Errorf("wrong path in snapshot: want %v, got %v", atarget, target) + } + } + }) + } +} + +func TestArchiverSnapshotSelect(t *testing.T) { + var tests = []struct { + name string + src TestDir + want TestDir + selFn SelectFunc + }{ + { + name: "include-all", + src: TestDir{ + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + selFn: func(item string, fi os.FileInfo) bool { + return true + }, + }, + { + name: "exclude-all", + src: TestDir{ + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + selFn: func(item string, fi os.FileInfo) bool { + return false + }, + want: TestDir{}, + }, + { + name: "exclude-txt-files", + src: TestDir{ + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + want: TestDir{ + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + selFn: func(item string, fi os.FileInfo) bool { + if filepath.Ext(item) == ".txt" { + return false + } + return true + }, + }, + { + name: "exclude-dir", + src: TestDir{ + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + "other": TestFile{Content: "another file"}, + }, + want: TestDir{ + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + }, + "other": TestFile{Content: "another file"}, + }, + selFn: func(item string, fi os.FileInfo) bool { + if filepath.Base(item) == "subdir" { + return false + } + return true + }, + }, + { + name: "select-absolute-paths", + src: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + selFn: func(item string, fi os.FileInfo) bool { + return filepath.IsAbs(item) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() + + arch := New(repo, fs.Track{fs.Local{}}, Options{}) + arch.Select = test.selFn + + back := fs.TestChdir(t, tempdir) + defer back() + + targets := []string{"."} + _, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) + if err != nil { + t.Fatal(err) + } + + t.Logf("saved as %v", snapshotID.Str()) + + want := test.want + if want == nil { + want = test.src + } + TestEnsureSnapshot(t, repo, snapshotID, want) + + checker.TestCheckRepo(t, repo) + }) + } +} + +// MockFS keeps track which files are read. +type MockFS struct { + fs.FS + + m sync.Mutex + bytesRead map[string]int // tracks bytes read from all opened files +} + +func (m *MockFS) Open(name string) (fs.File, error) { + f, err := m.FS.Open(name) + if err != nil { + return f, err + } + + return MockFile{File: f, fs: m, filename: name}, nil +} + +func (m *MockFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) { + f, err := m.FS.OpenFile(name, flag, perm) + if err != nil { + return f, err + } + + return MockFile{File: f, fs: m, filename: name}, nil +} + +type MockFile struct { + fs.File + filename string + + fs *MockFS +} + +func (f MockFile) Read(p []byte) (int, error) { + n, err := f.File.Read(p) + if n > 0 { + f.fs.m.Lock() + f.fs.bytesRead[f.filename] += n + f.fs.m.Unlock() + } + return n, err +} + +func TestArchiverParent(t *testing.T) { + var tests = []struct { + src TestDir + read map[string]int // tracks number of times a file must have been read + }{ + { + src: TestDir{ + "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + }, + read: map[string]int{ + "targetfile": 1, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() + + testFS := &MockFS{ + FS: fs.Track{fs.Local{}}, + bytesRead: make(map[string]int), + } + + arch := New(repo, testFS, Options{}) + + back := fs.TestChdir(t, tempdir) + defer back() + + _, firstSnapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + if err != nil { + t.Fatal(err) + } + + t.Logf("first backup saved as %v", firstSnapshotID.Str()) + t.Logf("testfs: %v", testFS) + + // check that all files have been read exactly once + TestWalkFiles(t, ".", test.src, func(filename string, item interface{}) error { + file, ok := item.(TestFile) + if !ok { + return nil + } + + n, ok := testFS.bytesRead[filename] + if !ok { + t.Fatalf("file %v was not read at all", filename) + } + + if n != len(file.Content) { + t.Fatalf("file %v: read %v bytes, wanted %v bytes", filename, n, len(file.Content)) + } + return nil + }) + + opts := SnapshotOptions{ + Time: time.Now(), + ParentSnapshot: firstSnapshotID, + } + _, secondSnapshotID, err := arch.Snapshot(ctx, []string{"."}, opts) + if err != nil { + t.Fatal(err) + } + + // check that all files still been read exactly once + TestWalkFiles(t, ".", test.src, func(filename string, item interface{}) error { + file, ok := item.(TestFile) + if !ok { + return nil + } + + n, ok := testFS.bytesRead[filename] + if !ok { + t.Fatalf("file %v was not read at all", filename) + } + + if n != len(file.Content) { + t.Fatalf("file %v: read %v bytes, wanted %v bytes", filename, n, len(file.Content)) + } + return nil + }) + + t.Logf("second backup saved as %v", secondSnapshotID.Str()) + t.Logf("testfs: %v", testFS) + + checker.TestCheckRepo(t, repo) + }) + } +} + +func TestArchiverErrorReporting(t *testing.T) { + ignoreErrorForBasename := func(basename string) ErrorFunc { + return func(item string, fi os.FileInfo, err error) error { + if filepath.Base(item) == "targetfile" { + t.Logf("ignoring error for targetfile: %v", err) + return nil + } + + t.Errorf("error handler called for unexpected file %v: %v", item, err) + return err + } + } + + chmodUnreadable := func(filename string) func(testing.TB) { + return func(t testing.TB) { + if runtime.GOOS == "windows" { + t.Skip("Skipping this test for windows") + } + + err := os.Chmod(filepath.FromSlash(filename), 0004) + if err != nil { + t.Fatal(err) + } + } + } + + var tests = []struct { + name string + src TestDir + want TestDir + prepare func(t testing.TB) + errFn ErrorFunc + mustError bool + }{ + { + name: "no-error", + src: TestDir{ + "targetfile": TestFile{Content: "foobar"}, + }, + }, + { + name: "file-unreadable", + src: TestDir{ + "targetfile": TestFile{Content: "foobar"}, + }, + prepare: chmodUnreadable("targetfile"), + mustError: true, + }, + { + name: "file-unreadable-ignore-error", + src: TestDir{ + "targetfile": TestFile{Content: "foobar"}, + "other": TestFile{Content: "xxx"}, + }, + want: TestDir{ + "other": TestFile{Content: "xxx"}, + }, + prepare: chmodUnreadable("targetfile"), + errFn: ignoreErrorForBasename("targetfile"), + }, + { + name: "file-subdir-unreadable", + src: TestDir{ + "subdir": TestDir{ + "targetfile": TestFile{Content: "foobar"}, + }, + }, + prepare: chmodUnreadable("subdir/targetfile"), + mustError: true, + }, + { + name: "file-subdir-unreadable-ignore-error", + src: TestDir{ + "subdir": TestDir{ + "targetfile": TestFile{Content: "foobar"}, + "other": TestFile{Content: "xxx"}, + }, + }, + want: TestDir{ + "subdir": TestDir{ + "other": TestFile{Content: "xxx"}, + }, + }, + prepare: chmodUnreadable("subdir/targetfile"), + errFn: ignoreErrorForBasename("targetfile"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) + defer cleanup() + + back := fs.TestChdir(t, tempdir) + defer back() + + if test.prepare != nil { + test.prepare(t) + } + + arch := New(repo, fs.Track{fs.Local{}}, Options{}) + arch.Error = test.errFn + + _, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + if test.mustError { + if err != nil { + t.Logf("found expected error (%v), skipping further checks", err) + return + } + + t.Fatalf("expected error not returned by archiver") + return + } + + if err != nil { + t.Fatalf("unexpected error of type %T found: %v", err, err) + } + + t.Logf("saved as %v", snapshotID.Str()) + + want := test.want + if want == nil { + want = test.src + } + TestEnsureSnapshot(t, repo, snapshotID, want) + + checker.TestCheckRepo(t, repo) + }) } } diff --git a/internal/archiver/blob_saver.go b/internal/archiver/blob_saver.go new file mode 100644 index 000000000..5e45d7175 --- /dev/null +++ b/internal/archiver/blob_saver.go @@ -0,0 +1,158 @@ +package archiver + +import ( + "context" + "sync" + + "github.com/restic/restic/internal/restic" +) + +// Saver allows saving a blob. +type Saver interface { + SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) (restic.ID, error) + Index() restic.Index +} + +// BlobSaver concurrently saves incoming blobs to the repo. +type BlobSaver struct { + repo Saver + + m sync.Mutex + knownBlobs restic.BlobSet + + ch chan<- saveBlobJob + wg sync.WaitGroup +} + +// NewBlobSaver returns a new blob. A worker pool is started, it is stopped +// when ctx is cancelled. +func NewBlobSaver(ctx context.Context, repo Saver, workers uint) *BlobSaver { + ch := make(chan saveBlobJob, 2*int(workers)) + s := &BlobSaver{ + repo: repo, + knownBlobs: restic.NewBlobSet(), + ch: ch, + } + + for i := uint(0); i < workers; i++ { + s.wg.Add(1) + go s.worker(ctx, &s.wg, ch) + } + + return s +} + +// Save stores a blob in the repo. It checks the index and the known blobs +// before saving anything. The second return parameter is true if the blob was +// previously unknown. +func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf Buffer) FutureBlob { + ch := make(chan saveBlobResponse, 1) + s.ch <- saveBlobJob{BlobType: t, buf: buf, ch: ch} + + return FutureBlob{ch: ch, length: len(buf.Data)} +} + +// FutureBlob is returned by SaveBlob and will return the data once it has been processed. +type FutureBlob struct { + ch <-chan saveBlobResponse + length int + res saveBlobResponse +} + +func (s *FutureBlob) wait() { + res, ok := <-s.ch + if ok { + s.res = res + } +} + +// ID returns the ID of the blob after it has been saved. +func (s *FutureBlob) ID() restic.ID { + s.wait() + return s.res.id +} + +// Known returns whether or not the blob was already known. +func (s *FutureBlob) Known() bool { + s.wait() + return s.res.known +} + +// Err returns the error which may have occurred during save. +func (s *FutureBlob) Err() error { + s.wait() + return s.res.err +} + +// Length returns the length of the blob. +func (s *FutureBlob) Length() int { + return s.length +} + +type saveBlobJob struct { + restic.BlobType + buf Buffer + ch chan<- saveBlobResponse +} + +type saveBlobResponse struct { + id restic.ID + known bool + err error +} + +func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) saveBlobResponse { + id := restic.Hash(buf) + h := restic.BlobHandle{ID: id, Type: t} + + // check if another goroutine has already saved this blob + known := false + s.m.Lock() + if s.knownBlobs.Has(h) { + known = true + } else { + s.knownBlobs.Insert(h) + known = false + } + s.m.Unlock() + + // blob is already known, nothing to do + if known { + return saveBlobResponse{ + id: id, + known: true, + } + } + + // check if the repo knows this blob + if s.repo.Index().Has(id, t) { + return saveBlobResponse{ + id: id, + known: true, + } + } + + // otherwise we're responsible for saving it + _, err := s.repo.SaveBlob(ctx, t, buf, id) + return saveBlobResponse{ + id: id, + known: false, + err: err, + } +} + +func (s *BlobSaver) worker(ctx context.Context, wg *sync.WaitGroup, jobs <-chan saveBlobJob) { + defer wg.Done() + for { + var job saveBlobJob + select { + case <-ctx.Done(): + return + case job = <-jobs: + } + + job.ch <- s.saveBlob(ctx, job.BlobType, job.buf.Data) + close(job.ch) + job.buf.Release() + } +} diff --git a/internal/archiver/buffer.go b/internal/archiver/buffer.go new file mode 100644 index 000000000..c97d990cf --- /dev/null +++ b/internal/archiver/buffer.go @@ -0,0 +1,90 @@ +package archiver + +import ( + "context" + "sync" +) + +// Buffer is a reusable buffer. After the buffer has been used, Release should +// be called so the underlying slice is put back into the pool. +type Buffer struct { + Data []byte + Put func([]byte) +} + +// Release puts the buffer back into the pool it came from. +func (b Buffer) Release() { + if b.Put != nil { + b.Put(b.Data) + } +} + +// BufferPool implements a limited set of reusable buffers. +type BufferPool struct { + ch chan []byte + chM sync.Mutex + defaultSize int + clearOnce sync.Once +} + +// NewBufferPool initializes a new buffer pool. When the context is cancelled, +// all buffers are released. The pool stores at most max items. New buffers are +// created with defaultSize, buffers that are larger are released and not put +// back. +func NewBufferPool(ctx context.Context, max int, defaultSize int) *BufferPool { + b := &BufferPool{ + ch: make(chan []byte, max), + defaultSize: defaultSize, + } + go func() { + <-ctx.Done() + b.clear() + }() + return b +} + +// Get returns a new buffer, either from the pool or newly allocated. +func (pool *BufferPool) Get() Buffer { + b := Buffer{Put: pool.put} + + pool.chM.Lock() + defer pool.chM.Unlock() + select { + case buf := <-pool.ch: + b.Data = buf + default: + b.Data = make([]byte, pool.defaultSize) + } + + return b +} + +func (pool *BufferPool) put(b []byte) { + pool.chM.Lock() + defer pool.chM.Unlock() + select { + case pool.ch <- b: + default: + } +} + +// Put returns a buffer to the pool for reuse. +func (pool *BufferPool) Put(b Buffer) { + if cap(b.Data) > pool.defaultSize { + return + } + pool.put(b.Data) +} + +// clear empties the buffer so that all items can be garbage collected. +func (pool *BufferPool) clear() { + pool.clearOnce.Do(func() { + ch := pool.ch + pool.chM.Lock() + pool.ch = nil + pool.chM.Unlock() + close(ch) + for range ch { + } + }) +} diff --git a/internal/archiver/buffer_pool.go b/internal/archiver/buffer_pool.go deleted file mode 100644 index 32df5ab7b..000000000 --- a/internal/archiver/buffer_pool.go +++ /dev/null @@ -1,21 +0,0 @@ -package archiver - -import ( - "sync" - - "github.com/restic/chunker" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - return make([]byte, chunker.MinSize) - }, -} - -func getBuf() []byte { - return bufPool.Get().([]byte) -} - -func freeBuf(data []byte) { - bufPool.Put(data) -} diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go new file mode 100644 index 000000000..9a923c6c7 --- /dev/null +++ b/internal/archiver/file_saver.go @@ -0,0 +1,228 @@ +package archiver + +import ( + "context" + "io" + "os" + "sync" + + "github.com/restic/chunker" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" +) + +// FutureFile is returned by SaveFile and will return the data once it +// has been processed. +type FutureFile struct { + ch <-chan saveFileResponse + res saveFileResponse +} + +func (s *FutureFile) wait() { + res, ok := <-s.ch + if ok { + s.res = res + } +} + +// Node returns the node once it is available. +func (s *FutureFile) Node() *restic.Node { + s.wait() + return s.res.node +} + +// Stats returns the stats for the file once they are available. +func (s *FutureFile) Stats() ItemStats { + s.wait() + return s.res.stats +} + +// Err returns the error in case an error occurred. +func (s *FutureFile) Err() error { + s.wait() + return s.res.err +} + +// FileSaver concurrently saves incoming files to the repo. +type FileSaver struct { + fs fs.FS + blobSaver *BlobSaver + saveFilePool *BufferPool + + pol chunker.Pol + + ch chan<- saveFileJob + wg sync.WaitGroup + + CompleteBlob func(filename string, bytes uint64) + + NodeFromFileInfo func(filename string, fi os.FileInfo) (*restic.Node, error) +} + +// NewFileSaver returns a new file saver. A worker pool with workers is +// started, it is stopped when ctx is cancelled. +func NewFileSaver(ctx context.Context, fs fs.FS, blobSaver *BlobSaver, pol chunker.Pol, workers uint) *FileSaver { + ch := make(chan saveFileJob, workers) + + s := &FileSaver{ + fs: fs, + blobSaver: blobSaver, + saveFilePool: NewBufferPool(ctx, 3*int(workers), chunker.MaxSize/4), + pol: pol, + ch: ch, + + CompleteBlob: func(string, uint64) {}, + } + + for i := uint(0); i < workers; i++ { + s.wg.Add(1) + go s.worker(ctx, &s.wg, ch) + } + + return s +} + +// CompleteFunc is called when the file has been saved. +type CompleteFunc func(*restic.Node, ItemStats) + +// Save stores the file f and returns the data once it has been completed. The +// file is closed by Save. +func (s *FileSaver) Save(ctx context.Context, snPath string, file fs.File, fi os.FileInfo, start func(), complete CompleteFunc) FutureFile { + ch := make(chan saveFileResponse, 1) + s.ch <- saveFileJob{ + snPath: snPath, + file: file, + fi: fi, + start: start, + complete: complete, + ch: ch, + } + + return FutureFile{ch: ch} +} + +type saveFileJob struct { + snPath string + file fs.File + fi os.FileInfo + ch chan<- saveFileResponse + complete CompleteFunc + start func() +} + +type saveFileResponse struct { + node *restic.Node + stats ItemStats + err error +} + +// saveFile stores the file f in the repo, then closes it. +func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, f fs.File, fi os.FileInfo, start func()) saveFileResponse { + start() + + stats := ItemStats{} + + debug.Log("%v", snPath) + + node, err := s.NodeFromFileInfo(f.Name(), fi) + if err != nil { + _ = f.Close() + return saveFileResponse{err: err} + } + + if node.Type != "file" { + _ = f.Close() + return saveFileResponse{err: errors.Errorf("node type %q is wrong", node.Type)} + } + + // reuse the chunker + chnker.Reset(f, s.pol) + + var results []FutureBlob + + node.Content = []restic.ID{} + var size uint64 + for { + buf := s.saveFilePool.Get() + chunk, err := chnker.Next(buf.Data) + if errors.Cause(err) == io.EOF { + buf.Release() + break + } + buf.Data = chunk.Data + + size += uint64(chunk.Length) + + if err != nil { + _ = f.Close() + return saveFileResponse{err: err} + } + + // test if the context has been cancelled, return the error + if ctx.Err() != nil { + _ = f.Close() + return saveFileResponse{err: ctx.Err()} + } + + res := s.blobSaver.Save(ctx, restic.DataBlob, buf) + results = append(results, res) + + // test if the context has been cancelled, return the error + if ctx.Err() != nil { + _ = f.Close() + return saveFileResponse{err: ctx.Err()} + } + + s.CompleteBlob(f.Name(), uint64(len(chunk.Data))) + } + + err = f.Close() + if err != nil { + return saveFileResponse{err: err} + } + + for _, res := range results { + // test if the context has been cancelled, return the error + if res.Err() != nil { + return saveFileResponse{err: ctx.Err()} + } + + if !res.Known() { + stats.DataBlobs++ + stats.DataSize += uint64(res.Length()) + } + + node.Content = append(node.Content, res.ID()) + } + + node.Size = size + + return saveFileResponse{ + node: node, + stats: stats, + } +} + +func (s *FileSaver) worker(ctx context.Context, wg *sync.WaitGroup, jobs <-chan saveFileJob) { + // a worker has one chunker which is reused for each file (because it contains a rather large buffer) + chnker := chunker.New(nil, s.pol) + + defer wg.Done() + for { + var job saveFileJob + select { + case <-ctx.Done(): + return + case job = <-jobs: + } + + res := s.saveFile(ctx, chnker, job.snPath, job.file, job.fi, job.start) + if job.complete != nil { + job.complete(res.node, res.stats) + } + job.ch <- res + close(job.ch) + } +} diff --git a/internal/archiver/index_uploader.go b/internal/archiver/index_uploader.go new file mode 100644 index 000000000..c6edb7a01 --- /dev/null +++ b/internal/archiver/index_uploader.go @@ -0,0 +1,53 @@ +package archiver + +import ( + "context" + "time" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" +) + +// IndexUploader polls the repo for full indexes and uploads them. +type IndexUploader struct { + restic.Repository + + // Start is called when an index is to be uploaded. + Start func() + + // Complete is called when uploading an index has finished. + Complete func(id restic.ID) +} + +// Upload periodically uploads full indexes to the repo. When shutdown is +// cancelled, the last index upload will finish and then Upload returns. +func (u IndexUploader) Upload(ctx, shutdown context.Context, interval time.Duration) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return nil + case <-shutdown.Done(): + return nil + case <-ticker.C: + full := u.Repository.Index().(*repository.MasterIndex).FullIndexes() + for _, idx := range full { + if u.Start != nil { + u.Start() + } + + id, err := repository.SaveIndex(ctx, u.Repository, idx) + if err != nil { + debug.Log("save indexes returned an error: %v", err) + return err + } + if u.Complete != nil { + u.Complete(id) + } + } + } + } +} diff --git a/internal/archiver/scanner.go b/internal/archiver/scanner.go new file mode 100644 index 000000000..000d2d875 --- /dev/null +++ b/internal/archiver/scanner.go @@ -0,0 +1,112 @@ +package archiver + +import ( + "context" + "os" + "path/filepath" + + "github.com/restic/restic/internal/fs" +) + +// Scanner traverses the targets and calls the function Result with cumulated +// stats concerning the files and folders found. Select is used to decide which +// items should be included. Error is called when an error occurs. +type Scanner struct { + FS fs.FS + Select SelectFunc + Error ErrorFunc + Result func(item string, s ScanStats) +} + +// NewScanner initializes a new Scanner. +func NewScanner(fs fs.FS) *Scanner { + return &Scanner{ + FS: fs, + Select: func(item string, fi os.FileInfo) bool { + return true + }, + Error: func(item string, fi os.FileInfo, err error) error { + return err + }, + Result: func(item string, s ScanStats) {}, + } +} + +// ScanStats collect statistics. +type ScanStats struct { + Files, Dirs, Others uint + Bytes uint64 +} + +// Scan traverses the targets. The function Result is called for each new item +// found, the complete result is also returned by Scan. +func (s *Scanner) Scan(ctx context.Context, targets []string) error { + var stats ScanStats + for _, target := range targets { + abstarget, err := s.FS.Abs(target) + if err != nil { + return err + } + + stats, err = s.scan(ctx, stats, abstarget) + if err != nil { + return err + } + + if ctx.Err() != nil { + return ctx.Err() + } + } + + s.Result("", stats) + return nil +} + +func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (ScanStats, error) { + if ctx.Err() != nil { + return stats, ctx.Err() + } + + fi, err := s.FS.Lstat(target) + if err != nil { + // ignore error if the target is to be excluded anyway + if !s.Select(target, nil) { + return stats, nil + } + + // else return filtered error + return stats, s.Error(target, fi, err) + } + + if !s.Select(target, fi) { + return stats, nil + } + + switch { + case fi.Mode().IsRegular(): + stats.Files++ + stats.Bytes += uint64(fi.Size()) + case fi.Mode().IsDir(): + if ctx.Err() != nil { + return stats, ctx.Err() + } + + names, err := readdirnames(s.FS, target) + if err != nil { + return stats, s.Error(target, fi, err) + } + + for _, name := range names { + stats, err = s.scan(ctx, stats, filepath.Join(target, name)) + if err != nil { + return stats, err + } + } + stats.Dirs++ + default: + stats.Others++ + } + + s.Result(target, stats) + return stats, nil +} diff --git a/internal/archiver/scanner_test.go b/internal/archiver/scanner_test.go new file mode 100644 index 000000000..91b8d7f63 --- /dev/null +++ b/internal/archiver/scanner_test.go @@ -0,0 +1,333 @@ +package archiver + +import ( + "context" + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/restic/restic/internal/fs" + restictest "github.com/restic/restic/internal/test" +) + +func TestScanner(t *testing.T) { + var tests = []struct { + name string + src TestDir + want map[string]ScanStats + selFn SelectFunc + }{ + { + name: "include-all", + src: TestDir{ + "other": TestFile{Content: "another file"}, + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + }, + want: map[string]ScanStats{ + filepath.FromSlash("other"): ScanStats{Files: 1, Bytes: 12}, + filepath.FromSlash("work/foo"): ScanStats{Files: 2, Bytes: 15}, + filepath.FromSlash("work/foo.txt"): ScanStats{Files: 3, Bytes: 28}, + filepath.FromSlash("work/subdir/bar.txt"): ScanStats{Files: 4, Bytes: 45}, + filepath.FromSlash("work/subdir/other"): ScanStats{Files: 5, Bytes: 60}, + filepath.FromSlash("work/subdir"): ScanStats{Files: 5, Dirs: 1, Bytes: 60}, + filepath.FromSlash("work"): ScanStats{Files: 5, Dirs: 2, Bytes: 60}, + filepath.FromSlash("."): ScanStats{Files: 5, Dirs: 3, Bytes: 60}, + filepath.FromSlash(""): ScanStats{Files: 5, Dirs: 3, Bytes: 60}, + }, + }, + { + name: "select-txt", + src: TestDir{ + "other": TestFile{Content: "another file"}, + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + }, + selFn: func(item string, fi os.FileInfo) bool { + if fi.IsDir() { + return true + } + + if filepath.Ext(item) == ".txt" { + return true + } + return false + }, + want: map[string]ScanStats{ + filepath.FromSlash("work/foo.txt"): ScanStats{Files: 1, Bytes: 13}, + filepath.FromSlash("work/subdir/bar.txt"): ScanStats{Files: 2, Bytes: 30}, + filepath.FromSlash("work/subdir"): ScanStats{Files: 2, Dirs: 1, Bytes: 30}, + filepath.FromSlash("work"): ScanStats{Files: 2, Dirs: 2, Bytes: 30}, + filepath.FromSlash("."): ScanStats{Files: 2, Dirs: 3, Bytes: 30}, + filepath.FromSlash(""): ScanStats{Files: 2, Dirs: 3, Bytes: 30}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + TestCreateFiles(t, tempdir, test.src) + + back := fs.TestChdir(t, tempdir) + defer back() + + cur, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + sc := NewScanner(fs.Track{fs.Local{}}) + if test.selFn != nil { + sc.Select = test.selFn + } + + results := make(map[string]ScanStats) + sc.Result = func(item string, s ScanStats) { + var p string + var err error + + if item != "" { + p, err = filepath.Rel(cur, item) + if err != nil { + panic(err) + } + } + + results[p] = s + } + + err = sc.Scan(ctx, []string{"."}) + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(test.want, results) { + t.Error(cmp.Diff(test.want, results)) + } + }) + } +} + +func TestScannerError(t *testing.T) { + var tests = []struct { + name string + unix bool + src TestDir + result ScanStats + selFn SelectFunc + errFn func(t testing.TB, item string, fi os.FileInfo, err error) error + resFn func(t testing.TB, item string, s ScanStats) + prepare func(t testing.TB) + }{ + { + name: "no-error", + src: TestDir{ + "other": TestFile{Content: "another file"}, + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + }, + result: ScanStats{Files: 5, Dirs: 3, Bytes: 60}, + }, + { + name: "unreadable-dir", + unix: true, + src: TestDir{ + "other": TestFile{Content: "another file"}, + "work": TestDir{ + "foo": TestFile{Content: "foo"}, + "foo.txt": TestFile{Content: "foo text file"}, + "subdir": TestDir{ + "other": TestFile{Content: "other in subdir"}, + "bar.txt": TestFile{Content: "bar.txt in subdir"}, + }, + }, + }, + result: ScanStats{Files: 3, Dirs: 2, Bytes: 28}, + prepare: func(t testing.TB) { + err := os.Chmod(filepath.Join("work", "subdir"), 0000) + if err != nil { + t.Fatal(err) + } + }, + errFn: func(t testing.TB, item string, fi os.FileInfo, err error) error { + if item == filepath.FromSlash("work/subdir") { + return nil + } + + return err + }, + }, + { + name: "removed-item", + src: TestDir{ + "bar": TestFile{Content: "bar"}, + "baz": TestFile{Content: "baz"}, + "foo": TestFile{Content: "foo"}, + "other": TestFile{Content: "other"}, + }, + result: ScanStats{Files: 3, Dirs: 1, Bytes: 11}, + resFn: func(t testing.TB, item string, s ScanStats) { + if item == "bar" { + err := os.Remove("foo") + if err != nil { + t.Fatal(err) + } + } + }, + errFn: func(t testing.TB, item string, fi os.FileInfo, err error) error { + if item == "foo" { + t.Logf("ignoring error for %v: %v", item, err) + return nil + } + + return err + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.unix && runtime.GOOS == "windows" { + t.Skipf("skip on windows") + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + TestCreateFiles(t, tempdir, test.src) + + back := fs.TestChdir(t, tempdir) + defer back() + + cur, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + if test.prepare != nil { + test.prepare(t) + } + + sc := NewScanner(fs.Track{fs.Local{}}) + if test.selFn != nil { + sc.Select = test.selFn + } + + var stats ScanStats + + sc.Result = func(item string, s ScanStats) { + if item == "" { + stats = s + return + } + + if test.resFn != nil { + p, relErr := filepath.Rel(cur, item) + if relErr != nil { + panic(relErr) + } + test.resFn(t, p, s) + } + } + if test.errFn != nil { + sc.Error = func(item string, fi os.FileInfo, err error) error { + p, relErr := filepath.Rel(cur, item) + if relErr != nil { + panic(relErr) + } + + return test.errFn(t, p, fi, err) + } + } + + err = sc.Scan(ctx, []string{"."}) + if err != nil { + t.Fatal(err) + } + + if stats != test.result { + t.Errorf("wrong final result, want\n %#v\ngot:\n %#v", test.result, stats) + } + }) + } +} + +func TestScannerCancel(t *testing.T) { + src := TestDir{ + "bar": TestFile{Content: "bar"}, + "baz": TestFile{Content: "baz"}, + "foo": TestFile{Content: "foo"}, + "other": TestFile{Content: "other"}, + } + + result := ScanStats{Files: 2, Bytes: 6} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + TestCreateFiles(t, tempdir, src) + + back := fs.TestChdir(t, tempdir) + defer back() + + cur, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + sc := NewScanner(fs.Track{fs.Local{}}) + var lastStats ScanStats + sc.Result = func(item string, s ScanStats) { + lastStats = s + + if item == filepath.Join(cur, "baz") { + t.Logf("found baz") + cancel() + } + } + + err = sc.Scan(ctx, []string{"."}) + if err == nil { + t.Errorf("did not find expected error") + } + + if err != context.Canceled { + t.Errorf("unexpected error found, want %v, got %v", context.Canceled, err) + } + + if lastStats != result { + t.Errorf("wrong final result, want\n %#v\ngot:\n %#v", result, lastStats) + } +} diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go index d700135b4..bdb122d69 100644 --- a/internal/archiver/testing.go +++ b/internal/archiver/testing.go @@ -2,18 +2,342 @@ package archiver import ( "context" + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" "testing" "time" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" ) // TestSnapshot creates a new snapshot of path. func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *restic.ID) *restic.Snapshot { - arch := New(repo) - sn, _, err := arch.Snapshot(context.TODO(), nil, []string{path}, []string{"test"}, "localhost", parent, time.Now()) + arch := New(repo, fs.Local{}, Options{}) + opts := SnapshotOptions{ + Time: time.Now(), + Hostname: "localhost", + Tags: []string{"test"}, + } + if parent != nil { + opts.ParentSnapshot = *parent + } + sn, _, err := arch.Snapshot(context.TODO(), []string{path}, opts) if err != nil { t.Fatal(err) } return sn } + +// TestDir describes a directory structure to create for a test. +type TestDir map[string]interface{} + +func (d TestDir) String() string { + return "" +} + +// TestFile describes a file created for a test. +type TestFile struct { + Content string +} + +func (f TestFile) String() string { + return "" +} + +// TestSymlink describes a symlink created for a test. +type TestSymlink struct { + Target string +} + +func (s TestSymlink) String() string { + return "" +} + +// TestCreateFiles creates a directory structure described by dir at target, +// which must already exist. On Windows, symlinks aren't created. +func TestCreateFiles(t testing.TB, target string, dir TestDir) { + test.Helper(t).Helper() + for name, item := range dir { + targetPath := filepath.Join(target, name) + + switch it := item.(type) { + case TestFile: + err := ioutil.WriteFile(targetPath, []byte(it.Content), 0644) + if err != nil { + t.Fatal(err) + } + case TestSymlink: + if runtime.GOOS == "windows" { + continue + } + + err := fs.Symlink(filepath.FromSlash(it.Target), targetPath) + if err != nil { + t.Fatal(err) + } + case TestDir: + err := fs.Mkdir(targetPath, 0755) + if err != nil { + t.Fatal(err) + } + + TestCreateFiles(t, targetPath, it) + } + } +} + +// TestWalkFunc is used by TestWalkFiles to traverse the dir. When an error is +// returned, traversal stops and the surrounding test is marked as failed. +type TestWalkFunc func(path string, item interface{}) error + +// TestWalkFiles runs fn for each file/directory in dir, the filename will be +// constructed with target as the prefix. Symlinks on Windows are ignored. +func TestWalkFiles(t testing.TB, target string, dir TestDir, fn TestWalkFunc) { + test.Helper(t).Helper() + for name, item := range dir { + targetPath := filepath.Join(target, name) + + err := fn(targetPath, item) + if err != nil { + t.Fatalf("TestWalkFunc returned error for %v: %v", targetPath, err) + return + } + + if dir, ok := item.(TestDir); ok { + TestWalkFiles(t, targetPath, dir, fn) + } + } +} + +// fixpath removes UNC paths (starting with `\\?`) on windows. On Linux, it's a noop. +func fixpath(item string) string { + if runtime.GOOS != "windows" { + return item + } + if strings.HasPrefix(item, `\\?`) { + return item[4:] + } + return item +} + +// TestEnsureFiles tests if the directory structure at target is the same as +// described in dir. +func TestEnsureFiles(t testing.TB, target string, dir TestDir) { + test.Helper(t).Helper() + pathsChecked := make(map[string]struct{}) + + // first, test that all items are there + TestWalkFiles(t, target, dir, func(path string, item interface{}) error { + // ignore symlinks on Windows + if _, ok := item.(TestSymlink); ok && runtime.GOOS == "windows" { + // mark paths and parents as checked + pathsChecked[path] = struct{}{} + for parent := filepath.Dir(path); parent != target; parent = filepath.Dir(parent) { + pathsChecked[parent] = struct{}{} + } + return nil + } + + fi, err := fs.Lstat(path) + if err != nil { + return err + } + + switch node := item.(type) { + case TestDir: + if !fi.IsDir() { + t.Errorf("is not a directory: %v", path) + } + return nil + case TestFile: + if !fs.IsRegularFile(fi) { + t.Errorf("is not a regular file: %v", path) + return nil + } + + content, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + if string(content) != node.Content { + t.Errorf("wrong content for %v, want %q, got %q", path, node.Content, content) + } + case TestSymlink: + if fi.Mode()&os.ModeType != os.ModeSymlink { + t.Errorf("is not a symlink: %v", path) + return nil + } + + target, err := fs.Readlink(path) + if err != nil { + return err + } + + if target != node.Target { + t.Errorf("wrong target for %v, want %v, got %v", path, node.Target, target) + } + } + + pathsChecked[path] = struct{}{} + + for parent := filepath.Dir(path); parent != target; parent = filepath.Dir(parent) { + pathsChecked[parent] = struct{}{} + } + + return nil + }) + + // then, traverse the directory again, looking for additional files + err := fs.Walk(target, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + path = fixpath(path) + + if path == target { + return nil + } + + _, ok := pathsChecked[path] + if !ok { + t.Errorf("additional item found: %v %v", path, fi.Mode()) + } + + return nil + }) + if err != nil { + t.Fatal(err) + } +} + +// TestEnsureFileContent checks if the file in the repo is the same as file. +func TestEnsureFileContent(ctx context.Context, t testing.TB, repo restic.Repository, filename string, node *restic.Node, file TestFile) { + if int(node.Size) != len(file.Content) { + t.Fatalf("%v: wrong node size: want %d, got %d", filename, node.Size, len(file.Content)) + return + } + + content := make([]byte, restic.CiphertextLength(len(file.Content))) + pos := 0 + for _, id := range node.Content { + n, err := repo.LoadBlob(ctx, restic.DataBlob, id, content[pos:]) + if err != nil { + t.Fatalf("error loading blob %v: %v", id.Str(), err) + return + } + + pos += n + } + + content = content[:pos] + + if string(content) != file.Content { + t.Fatalf("%v: wrong content returned, want %q, got %q", filename, file.Content, content) + } +} + +// TestEnsureTree checks that the tree ID in the repo matches dir. On Windows, +// Symlinks are ignored. +func TestEnsureTree(ctx context.Context, t testing.TB, prefix string, repo restic.Repository, treeID restic.ID, dir TestDir) { + test.Helper(t).Helper() + + tree, err := repo.LoadTree(ctx, treeID) + if err != nil { + t.Fatal(err) + return + } + + var nodeNames []string + for _, node := range tree.Nodes { + nodeNames = append(nodeNames, node.Name) + } + debug.Log("%v (%v) %v", prefix, treeID.Str(), nodeNames) + + checked := make(map[string]struct{}) + for _, node := range tree.Nodes { + nodePrefix := path.Join(prefix, node.Name) + + entry, ok := dir[node.Name] + if !ok { + t.Errorf("unexpected tree node %q found, want: %#v", node.Name, dir) + return + } + + checked[node.Name] = struct{}{} + + switch e := entry.(type) { + case TestDir: + if node.Type != "dir" { + t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "dir") + return + } + + if node.Subtree == nil { + t.Errorf("tree node %v has nil subtree", nodePrefix) + return + } + + TestEnsureTree(ctx, t, path.Join(prefix, node.Name), repo, *node.Subtree, e) + case TestFile: + if node.Type != "file" { + t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file") + } + TestEnsureFileContent(ctx, t, repo, nodePrefix, node, e) + case TestSymlink: + // skip symlinks on windows + if runtime.GOOS == "windows" { + continue + } + if node.Type != "symlink" { + t.Errorf("tree node %v has wrong type %q, want %q", nodePrefix, node.Type, "file") + } + + if e.Target != node.LinkTarget { + t.Errorf("symlink %v has wrong target, want %q, got %q", nodePrefix, e.Target, node.LinkTarget) + } + } + } + + for name := range dir { + // skip checking symlinks on Windows + entry := dir[name] + if _, ok := entry.(TestSymlink); ok && runtime.GOOS == "windows" { + continue + } + + _, ok := checked[name] + if !ok { + t.Errorf("tree %v: expected node %q not found, has: %v", prefix, name, nodeNames) + } + } +} + +// TestEnsureSnapshot tests if the snapshot in the repo has exactly the same +// structure as dir. On Windows, Symlinks are ignored. +func TestEnsureSnapshot(t testing.TB, repo restic.Repository, snapshotID restic.ID, dir TestDir) { + test.Helper(t).Helper() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sn, err := restic.LoadSnapshot(ctx, repo, snapshotID) + if err != nil { + t.Fatal(err) + return + } + + if sn.Tree == nil { + t.Fatal("snapshot has nil tree ID") + return + } + + TestEnsureTree(ctx, t, "/", repo, *sn.Tree, dir) +} diff --git a/internal/archiver/testing_test.go b/internal/archiver/testing_test.go new file mode 100644 index 000000000..2f0a5f5d8 --- /dev/null +++ b/internal/archiver/testing_test.go @@ -0,0 +1,530 @@ +package archiver + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/repository" + restictest "github.com/restic/restic/internal/test" +) + +// MockT passes through all logging functions from T, but catches Fail(), +// Error/f() and Fatal/f(). It is used to test test helper functions. +type MockT struct { + *testing.T + HasFailed bool +} + +// Fail marks the function as having failed but continues execution. +func (t *MockT) Fail() { + t.T.Log("MockT Fail() called") + t.HasFailed = true +} + +// Fatal is equivalent to Log followed by FailNow. +func (t *MockT) Fatal(args ...interface{}) { + t.T.Logf("MockT Fatal called with %v", args) + t.HasFailed = true +} + +// Fatalf is equivalent to Logf followed by FailNow. +func (t *MockT) Fatalf(msg string, args ...interface{}) { + t.T.Logf("MockT Fatal called: "+msg, args...) + t.HasFailed = true +} + +// Error is equivalent to Log followed by Fail. +func (t *MockT) Error(args ...interface{}) { + t.T.Logf("MockT Error called with %v", args) + t.HasFailed = true +} + +// Errorf is equivalent to Logf followed by Fail. +func (t *MockT) Errorf(msg string, args ...interface{}) { + t.T.Logf("MockT Error called: "+msg, args...) + t.HasFailed = true +} + +func createFilesAt(t testing.TB, targetdir string, files map[string]interface{}) { + for name, item := range files { + target := filepath.Join(targetdir, filepath.FromSlash(name)) + err := fs.MkdirAll(filepath.Dir(target), 0700) + if err != nil { + t.Fatal(err) + } + + switch it := item.(type) { + case TestFile: + err := ioutil.WriteFile(target, []byte(it.Content), 0600) + if err != nil { + t.Fatal(err) + } + case TestSymlink: + // ignore symlinks on windows + if runtime.GOOS == "windows" { + continue + } + err := fs.Symlink(filepath.FromSlash(it.Target), target) + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestTestCreateFiles(t *testing.T) { + var tests = []struct { + dir TestDir + files map[string]interface{} + }{ + { + dir: TestDir{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{ + "subfile": TestFile{Content: "bar"}, + }, + "sub": TestDir{ + "subsub": TestDir{ + "link": TestSymlink{Target: "x/y/z"}, + }, + }, + }, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{}, + "subdir/subfile": TestFile{Content: "bar"}, + "sub/subsub/link": TestSymlink{Target: "x/y/z"}, + }, + }, + } + + for i, test := range tests { + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + t.Run("", func(t *testing.T) { + tempdir := filepath.Join(tempdir, fmt.Sprintf("test-%d", i)) + err := fs.MkdirAll(tempdir, 0700) + if err != nil { + t.Fatal(err) + } + + TestCreateFiles(t, tempdir, test.dir) + + for name, item := range test.files { + // don't check symlinks on windows + if runtime.GOOS == "windows" { + if _, ok := item.(TestSymlink); ok { + continue + } + continue + } + + targetPath := filepath.Join(tempdir, filepath.FromSlash(name)) + fi, err := fs.Lstat(targetPath) + if err != nil { + t.Error(err) + continue + } + + switch node := item.(type) { + case TestFile: + if !fs.IsRegularFile(fi) { + t.Errorf("is not regular file: %v", name) + continue + } + + content, err := ioutil.ReadFile(targetPath) + if err != nil { + t.Error(err) + continue + } + + if string(content) != node.Content { + t.Errorf("wrong content for %v: want %q, got %q", name, node.Content, content) + } + case TestSymlink: + if fi.Mode()&os.ModeType != os.ModeSymlink { + t.Errorf("is not symlink: %v, %o != %o", name, fi.Mode(), os.ModeSymlink) + continue + } + + target, err := fs.Readlink(targetPath) + if err != nil { + t.Error(err) + continue + } + + if target != node.Target { + t.Errorf("wrong target for %v: want %q, got %q", name, node.Target, target) + } + case TestDir: + if !fi.IsDir() { + t.Errorf("is not directory: %v", name) + } + } + } + }) + } +} + +func TestTestWalkFiles(t *testing.T) { + var tests = []struct { + dir TestDir + want map[string]string + }{ + { + dir: TestDir{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{ + "subfile": TestFile{Content: "bar"}, + }, + "x": TestDir{ + "y": TestDir{ + "link": TestSymlink{Target: filepath.FromSlash("../../foo")}, + }, + }, + }, + want: map[string]string{ + "foo": "", + "subdir": "", + filepath.FromSlash("subdir/subfile"): "", + "x": "", + filepath.FromSlash("x/y"): "", + filepath.FromSlash("x/y/link"): "", + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + got := make(map[string]string) + + TestCreateFiles(t, tempdir, test.dir) + TestWalkFiles(t, tempdir, test.dir, func(path string, item interface{}) error { + p, err := filepath.Rel(tempdir, path) + if err != nil { + return err + } + + got[p] = fmt.Sprintf("%v", item) + return nil + }) + + if !cmp.Equal(test.want, got) { + t.Error(cmp.Diff(test.want, got)) + } + }) + } +} + +func TestTestEnsureFiles(t *testing.T) { + var tests = []struct { + expectFailure bool + files map[string]interface{} + want TestDir + unixOnly bool + }{ + { + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + "subdir/subfile": TestFile{Content: "bar"}, + "x/y/link": TestSymlink{Target: "../../foo"}, + }, + want: TestDir{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{ + "subfile": TestFile{Content: "bar"}, + }, + "x": TestDir{ + "y": TestDir{ + "link": TestSymlink{Target: "../../foo"}, + }, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{ + "subfile": TestFile{Content: "bar"}, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + "subdir/subfile": TestFile{Content: "bar"}, + }, + want: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "xxx"}, + }, + want: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestSymlink{Target: "/xxx"}, + }, + want: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + { + expectFailure: true, + unixOnly: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "foo": TestSymlink{Target: "/xxx"}, + }, + }, + { + expectFailure: true, + unixOnly: true, + files: map[string]interface{}{ + "foo": TestSymlink{Target: "xxx"}, + }, + want: TestDir{ + "foo": TestSymlink{Target: "/yyy"}, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + want: TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "foo": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + if test.unixOnly && runtime.GOOS == "windows" { + t.Skip("skip on Windows") + return + } + + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + createFilesAt(t, tempdir, test.files) + + subtestT := testing.TB(t) + if test.expectFailure { + subtestT = &MockT{T: t} + } + + TestEnsureFiles(subtestT, tempdir, test.want) + + if test.expectFailure && !subtestT.(*MockT).HasFailed { + t.Fatal("expected failure of TestEnsureFiles not found") + } + }) + } +} + +func TestTestEnsureSnapshot(t *testing.T) { + var tests = []struct { + expectFailure bool + files map[string]interface{} + want TestDir + unixOnly bool + }{ + { + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + filepath.FromSlash("subdir/subfile"): TestFile{Content: "bar"}, + filepath.FromSlash("x/y/link"): TestSymlink{Target: filepath.FromSlash("../../foo")}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + "subdir": TestDir{ + "subfile": TestFile{Content: "bar"}, + }, + "x": TestDir{ + "y": TestDir{ + "link": TestSymlink{Target: filepath.FromSlash("../../foo")}, + }, + }, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "target": TestDir{ + "bar": TestFile{Content: "foo"}, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + "bar": TestFile{Content: "bar"}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + "bar": TestFile{Content: "bar"}, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestSymlink{Target: filepath.FromSlash("x/y/z")}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "foo"}, + }, + }, + }, + { + expectFailure: true, + unixOnly: true, + files: map[string]interface{}{ + "foo": TestSymlink{Target: filepath.FromSlash("x/y/z")}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestSymlink{Target: filepath.FromSlash("x/y/z2")}, + }, + }, + }, + { + expectFailure: true, + files: map[string]interface{}{ + "foo": TestFile{Content: "foo"}, + }, + want: TestDir{ + "target": TestDir{ + "foo": TestFile{Content: "xxx"}, + }, + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + if test.unixOnly && runtime.GOOS == "windows" { + t.Skip("skip on Windows") + return + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + targetDir := filepath.Join(tempdir, "target") + err := fs.Mkdir(targetDir, 0700) + if err != nil { + t.Fatal(err) + } + + createFilesAt(t, targetDir, test.files) + + back := fs.TestChdir(t, tempdir) + defer back() + + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + arch := New(repo, fs.Local{}, Options{}) + opts := SnapshotOptions{ + Time: time.Now(), + Hostname: "localhost", + Tags: []string{"test"}, + } + _, id, err := arch.Snapshot(ctx, []string{"."}, opts) + if err != nil { + t.Fatal(err) + } + + t.Logf("snapshot saved as %v", id.Str()) + + subtestT := testing.TB(t) + if test.expectFailure { + subtestT = &MockT{T: t} + } + + TestEnsureSnapshot(subtestT, repo, id, test.want) + + if test.expectFailure && !subtestT.(*MockT).HasFailed { + t.Fatal("expected failure of TestEnsureSnapshot not found") + } + }) + } +} diff --git a/internal/archiver/tree.go b/internal/archiver/tree.go new file mode 100644 index 000000000..8adca2cc3 --- /dev/null +++ b/internal/archiver/tree.go @@ -0,0 +1,254 @@ +package archiver + +import ( + "fmt" + + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/fs" +) + +// Tree recursively defines how a snapshot should look like when +// archived. +// +// When `Path` is set, this is a leaf node and the contents of `Path` should be +// inserted at this point in the tree. +// +// The attribute `Root` is used to distinguish between files/dirs which have +// the same name, but live in a separate directory on the local file system. +// +// `FileInfoPath` is used to extract metadata for intermediate (=non-leaf) +// trees. +type Tree struct { + Nodes map[string]Tree + Path string // where the files/dirs to be saved are found + FileInfoPath string // where the dir can be found that is not included itself, but its subdirs + Root string // parent directory of the tree +} + +// pathComponents returns all path components of p. If a virtual directory +// (volume name on Windows) is added, virtualPrefix is set to true. See the +// tests for examples. +func pathComponents(fs fs.FS, p string, includeRelative bool) (components []string, virtualPrefix bool) { + volume := fs.VolumeName(p) + + if !fs.IsAbs(p) { + if !includeRelative { + p = fs.Join(fs.Separator(), p) + } + } + + p = fs.Clean(p) + + for { + dir, file := fs.Dir(p), fs.Base(p) + + if p == dir { + break + } + + components = append(components, file) + p = dir + } + + // reverse components + for i := len(components)/2 - 1; i >= 0; i-- { + opp := len(components) - 1 - i + components[i], components[opp] = components[opp], components[i] + } + + if volume != "" { + // strip colon + if len(volume) == 2 && volume[1] == ':' { + volume = volume[:1] + } + + components = append([]string{volume}, components...) + virtualPrefix = true + } + + return components, virtualPrefix +} + +// rootDirectory returns the directory which contains the first element of target. +func rootDirectory(fs fs.FS, target string) string { + if target == "" { + return "" + } + + if fs.IsAbs(target) { + return fs.Join(fs.VolumeName(target), fs.Separator()) + } + + target = fs.Clean(target) + pc, _ := pathComponents(fs, target, true) + + rel := "." + for _, c := range pc { + if c == ".." { + rel = fs.Join(rel, c) + } + } + + return rel +} + +// Add adds a new file or directory to the tree. +func (t *Tree) Add(fs fs.FS, path string) error { + if path == "" { + panic("invalid path (empty string)") + } + + if t.Nodes == nil { + t.Nodes = make(map[string]Tree) + } + + pc, virtualPrefix := pathComponents(fs, path, false) + if len(pc) == 0 { + return errors.New("invalid path (no path components)") + } + + name := pc[0] + root := rootDirectory(fs, path) + tree := Tree{Root: root} + + origName := name + i := 0 + for { + other, ok := t.Nodes[name] + if !ok { + break + } + + i++ + if other.Root == root { + tree = other + break + } + + // resolve conflict and try again + name = fmt.Sprintf("%s-%d", origName, i) + continue + } + + if len(pc) > 1 { + subroot := fs.Join(root, origName) + if virtualPrefix { + // use the original root dir if this is a virtual directory (volume name on Windows) + subroot = root + } + err := tree.add(fs, path, subroot, pc[1:]) + if err != nil { + return err + } + tree.FileInfoPath = subroot + } else { + tree.Path = path + } + + t.Nodes[name] = tree + return nil +} + +// add adds a new target path into the tree. +func (t *Tree) add(fs fs.FS, target, root string, pc []string) error { + if len(pc) == 0 { + return errors.Errorf("invalid path %q", target) + } + + if t.Nodes == nil { + t.Nodes = make(map[string]Tree) + } + + name := pc[0] + + if len(pc) == 1 { + tree, ok := t.Nodes[name] + + if !ok { + t.Nodes[name] = Tree{Path: target} + return nil + } + + if tree.Path != "" { + return errors.Errorf("path is already set for target %v", target) + } + tree.Path = target + t.Nodes[name] = tree + return nil + } + + tree := Tree{} + if other, ok := t.Nodes[name]; ok { + tree = other + } + + subroot := fs.Join(root, name) + tree.FileInfoPath = subroot + + err := tree.add(fs, target, subroot, pc[1:]) + if err != nil { + return err + } + t.Nodes[name] = tree + + return nil +} + +func (t Tree) String() string { + return formatTree(t, "") +} + +// formatTree returns a text representation of the tree t. +func formatTree(t Tree, indent string) (s string) { + for name, node := range t.Nodes { + if node.Path != "" { + s += fmt.Sprintf("%v/%v, src %q\n", indent, name, node.Path) + continue + } + s += fmt.Sprintf("%v/%v, root %q, meta %q\n", indent, name, node.Root, node.FileInfoPath) + s += formatTree(node, indent+" ") + } + return s +} + +// prune removes sub-trees of leaf nodes. +func prune(t *Tree) { + // if the current tree is a leaf node (Path is set), remove all nodes, + // those are automatically included anyway. + if t.Path != "" && len(t.Nodes) > 0 { + t.FileInfoPath = "" + t.Nodes = nil + return + } + + for i, subtree := range t.Nodes { + prune(&subtree) + t.Nodes[i] = subtree + } +} + +// NewTree creates a Tree from the target files/directories. +func NewTree(fs fs.FS, targets []string) (*Tree, error) { + debug.Log("targets: %v", targets) + tree := &Tree{} + seen := make(map[string]struct{}) + for _, target := range targets { + target = fs.Clean(target) + + // skip duplicate targets + if _, ok := seen[target]; ok { + continue + } + seen[target] = struct{}{} + + err := tree.Add(fs, target) + if err != nil { + return nil, err + } + } + + prune(tree) + debug.Log("result:\n%v", tree) + return tree, nil +} diff --git a/internal/archiver/tree_test.go b/internal/archiver/tree_test.go new file mode 100644 index 000000000..f50bb510f --- /dev/null +++ b/internal/archiver/tree_test.go @@ -0,0 +1,341 @@ +package archiver + +import ( + "path/filepath" + "runtime" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/restic/restic/internal/fs" +) + +func TestPathComponents(t *testing.T) { + var tests = []struct { + p string + c []string + virtual bool + rel bool + win bool + }{ + { + p: "/foo/bar/baz", + c: []string{"foo", "bar", "baz"}, + }, + { + p: "/foo/bar/baz", + c: []string{"foo", "bar", "baz"}, + rel: true, + }, + { + p: "foo/bar/baz", + c: []string{"foo", "bar", "baz"}, + }, + { + p: "foo/bar/baz", + c: []string{"foo", "bar", "baz"}, + rel: true, + }, + { + p: "../foo/bar/baz", + c: []string{"foo", "bar", "baz"}, + }, + { + p: "../foo/bar/baz", + c: []string{"..", "foo", "bar", "baz"}, + rel: true, + }, + { + p: "c:/foo/bar/baz", + c: []string{"c", "foo", "bar", "baz"}, + virtual: true, + rel: true, + win: true, + }, + { + p: "c:/foo/../bar/baz", + c: []string{"c", "bar", "baz"}, + virtual: true, + win: true, + }, + { + p: `c:\foo\..\bar\baz`, + c: []string{"c", "bar", "baz"}, + virtual: true, + win: true, + }, + { + p: "c:/foo/../bar/baz", + c: []string{"c", "bar", "baz"}, + virtual: true, + rel: true, + win: true, + }, + { + p: `c:\foo\..\bar\baz`, + c: []string{"c", "bar", "baz"}, + virtual: true, + rel: true, + win: true, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + if test.win && runtime.GOOS != "windows" { + t.Skip("skip test on unix") + } + + c, v := pathComponents(fs.Local{}, filepath.FromSlash(test.p), test.rel) + if !cmp.Equal(test.c, c) { + t.Error(test.c, c) + } + + if v != test.virtual { + t.Errorf("unexpected virtual prefix count returned, want %v, got %v", test.virtual, v) + } + }) + } +} + +func TestRootDirectory(t *testing.T) { + var tests = []struct { + target string + root string + unix bool + win bool + }{ + {target: ".", root: "."}, + {target: "foo/bar/baz", root: "."}, + {target: "../foo/bar/baz", root: ".."}, + {target: "..", root: ".."}, + {target: "../../..", root: "../../.."}, + {target: "/home/foo", root: "/", unix: true}, + {target: "c:/home/foo", root: "c:/", win: true}, + {target: `c:\home\foo`, root: `c:\`, win: true}, + {target: "//host/share/foo", root: "//host/share/", win: true}, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + if test.unix && runtime.GOOS == "windows" { + t.Skip("skip test on windows") + } + if test.win && runtime.GOOS != "windows" { + t.Skip("skip test on unix") + } + + root := rootDirectory(fs.Local{}, filepath.FromSlash(test.target)) + want := filepath.FromSlash(test.root) + if root != want { + t.Fatalf("wrong root directory, want %v, got %v", want, root) + } + }) + } +} + +func TestTree(t *testing.T) { + var tests = []struct { + targets []string + want Tree + unix bool + win bool + mustError bool + }{ + { + targets: []string{"foo"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Path: "foo", Root: "."}, + }}, + }, + { + targets: []string{"foo", "bar", "baz"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Path: "foo", Root: "."}, + "bar": Tree{Path: "bar", Root: "."}, + "baz": Tree{Path: "baz", Root: "."}, + }}, + }, + { + targets: []string{"foo/user1", "foo/user2", "foo/other"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("foo/user1")}, + "user2": Tree{Path: filepath.FromSlash("foo/user2")}, + "other": Tree{Path: filepath.FromSlash("foo/other")}, + }}, + }}, + }, + { + targets: []string{"foo/work/user1", "foo/work/user2"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("foo/work/user1")}, + "user2": Tree{Path: filepath.FromSlash("foo/work/user2")}, + }}, + }}, + }}, + }, + { + targets: []string{"foo/user1", "bar/user1", "foo/other"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("foo/user1")}, + "other": Tree{Path: filepath.FromSlash("foo/other")}, + }}, + "bar": Tree{Root: ".", FileInfoPath: "bar", Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("bar/user1")}, + }}, + }}, + }, + { + targets: []string{"../work"}, + want: Tree{Nodes: map[string]Tree{ + "work": Tree{Root: "..", Path: filepath.FromSlash("../work")}, + }}, + }, + { + targets: []string{"../work/other"}, + want: Tree{Nodes: map[string]Tree{ + "work": Tree{Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{ + "other": Tree{Path: filepath.FromSlash("../work/other")}, + }}, + }}, + }, + { + targets: []string{"foo/user1", "../work/other", "foo/user2"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("foo/user1")}, + "user2": Tree{Path: filepath.FromSlash("foo/user2")}, + }}, + "work": Tree{Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{ + "other": Tree{Path: filepath.FromSlash("../work/other")}, + }}, + }}, + }, + { + targets: []string{"foo/user1", "../foo/other", "foo/user2"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "user1": Tree{Path: filepath.FromSlash("foo/user1")}, + "user2": Tree{Path: filepath.FromSlash("foo/user2")}, + }}, + "foo-1": Tree{Root: "..", FileInfoPath: filepath.FromSlash("../foo"), Nodes: map[string]Tree{ + "other": Tree{Path: filepath.FromSlash("../foo/other")}, + }}, + }}, + }, + { + targets: []string{"foo/work", "foo/work/user2"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "work": Tree{ + Path: filepath.FromSlash("foo/work"), + }, + }}, + }}, + }, + { + targets: []string{"foo/work/user2", "foo/work"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "work": Tree{ + Path: filepath.FromSlash("foo/work"), + }, + }}, + }}, + }, + { + targets: []string{"foo/work/user2/data/secret", "foo"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", Path: "foo"}, + }}, + }, + { + unix: true, + targets: []string{"/mnt/driveA", "/mnt/driveA/work/driveB"}, + want: Tree{Nodes: map[string]Tree{ + "mnt": Tree{Root: "/", FileInfoPath: filepath.FromSlash("/mnt"), Nodes: map[string]Tree{ + "driveA": Tree{ + Path: filepath.FromSlash("/mnt/driveA"), + }, + }}, + }}, + }, + { + targets: []string{"foo/work/user", "foo/work/user"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + "user": Tree{Path: filepath.FromSlash("foo/work/user")}, + }}, + }}, + }}, + }, + { + targets: []string{"./foo/work/user", "foo/work/user"}, + want: Tree{Nodes: map[string]Tree{ + "foo": Tree{Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ + "work": Tree{FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ + "user": Tree{Path: filepath.FromSlash("foo/work/user")}, + }}, + }}, + }}, + }, + { + win: true, + targets: []string{`c:\users\foobar\temp`}, + want: Tree{Nodes: map[string]Tree{ + "c": Tree{Root: `c:\`, FileInfoPath: `c:\`, Nodes: map[string]Tree{ + "users": Tree{FileInfoPath: `c:\users`, Nodes: map[string]Tree{ + "foobar": Tree{FileInfoPath: `c:\users\foobar`, Nodes: map[string]Tree{ + "temp": Tree{Path: `c:\users\foobar\temp`}, + }}, + }}, + }}, + }}, + }, + { + targets: []string{"."}, + mustError: true, + }, + { + targets: []string{".."}, + mustError: true, + }, + { + targets: []string{"../.."}, + mustError: true, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + if test.unix && runtime.GOOS == "windows" { + t.Skip("skip test on windows") + } + + if test.win && runtime.GOOS != "windows" { + t.Skip("skip test on unix") + } + + tree, err := NewTree(fs.Local{}, test.targets) + if test.mustError { + if err == nil { + t.Fatal("expected error, got nil") + } + t.Logf("found expected error: %v", err) + return + } + + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(&test.want, tree) { + t.Error(cmp.Diff(&test.want, tree)) + } + }) + } +} diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 0b645caa1..432bfa742 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -569,12 +569,24 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)}) } + var size uint64 for b, blobID := range node.Content { if blobID.IsNull() { errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d has null ID", node.Name, b)}) continue } blobs = append(blobs, blobID) + blobSize, found := c.repo.LookupBlobSize(blobID, restic.DataBlob) + if !found { + errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d size could not be found", node.Name, b)}) + } + size += uint64(blobSize) + } + if size != node.Size { + errs = append(errs, Error{ + TreeID: id, + Err: errors.Errorf("file %q: metadata size (%v) and sum of blob sizes (%v) do not match", node.Name, node.Size, size), + }) } case "dir": if node.Subtree == nil { diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index 601407636..09ff15a10 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -9,7 +9,6 @@ import ( "path/filepath" "sort" "testing" - "time" "github.com/restic/restic/internal/archiver" "github.com/restic/restic/internal/checker" @@ -326,10 +325,8 @@ func TestCheckerModifiedData(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() - arch := archiver.New(repo) - _, id, err := arch.Snapshot(context.TODO(), nil, []string{"."}, nil, "localhost", nil, time.Now()) - test.OK(t, err) - t.Logf("archived as %v", id.Str()) + sn := archiver.TestSnapshot(t, repo, ".", nil) + t.Logf("archived as %v", sn.ID().Str()) beError := &errorBackend{Backend: repo.Backend()} checkRepo := repository.New(beError) diff --git a/internal/fs/const.go b/internal/fs/const.go new file mode 100644 index 000000000..dfa6ad5f0 --- /dev/null +++ b/internal/fs/const.go @@ -0,0 +1,16 @@ +package fs + +import "syscall" + +// Flags to OpenFile wrapping those of the underlying system. Not all flags may +// be implemented on a given system. +const ( + O_RDONLY int = syscall.O_RDONLY // open the file read-only. + O_WRONLY int = syscall.O_WRONLY // open the file write-only. + O_RDWR int = syscall.O_RDWR // open the file read-write. + O_APPEND int = syscall.O_APPEND // append data to the file when writing. + O_CREATE int = syscall.O_CREAT // create a new file if none exists. + O_EXCL int = syscall.O_EXCL // used with O_CREATE, file must not exist + O_SYNC int = syscall.O_SYNC // open for synchronous I/O. + O_TRUNC int = syscall.O_TRUNC // if possible, truncate file when opened. +) diff --git a/internal/fs/const_unix.go b/internal/fs/const_unix.go new file mode 100644 index 000000000..a90d171b1 --- /dev/null +++ b/internal/fs/const_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package fs + +import "syscall" + +// O_NOFOLLOW instructs the kernel to not follow symlinks when opening a file. +const O_NOFOLLOW int = syscall.O_NOFOLLOW diff --git a/internal/fs/const_windows.go b/internal/fs/const_windows.go new file mode 100644 index 000000000..18c89c27e --- /dev/null +++ b/internal/fs/const_windows.go @@ -0,0 +1,6 @@ +// +build windows + +package fs + +// O_NOFOLLOW is a noop on Windows. +const O_NOFOLLOW int = 0 diff --git a/internal/fs/file.go b/internal/fs/file.go index d055107b4..86c519aff 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -1,25 +1,11 @@ package fs import ( - "io" "os" "path/filepath" "time" ) -// File is an open file on a file system. -type File interface { - io.Reader - io.Writer - io.Closer - - Fd() uintptr - Readdirnames(n int) ([]string, error) - Readdir(int) ([]os.FileInfo, error) - Seek(int64, int) (int64, error) - Stat() (os.FileInfo, error) -} - // Mkdir creates a new directory with the specified name and permission bits. // If there is an error, it will be of type *PathError. func Mkdir(name string, perm os.FileMode) error { diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go new file mode 100644 index 000000000..dd1faafa0 --- /dev/null +++ b/internal/fs/fs_local.go @@ -0,0 +1,96 @@ +package fs + +import ( + "os" + "path/filepath" +) + +// Local is the local file system. Most methods are just passed on to the stdlib. +type Local struct{} + +// statically ensure that Local implements FS. +var _ FS = &Local{} + +// VolumeName returns leading volume name. Given "C:\foo\bar" it returns "C:" +// on Windows. Given "\\host\share\foo" it returns "\\host\share". On other +// platforms it returns "". +func (fs Local) VolumeName(path string) string { + return filepath.VolumeName(path) +} + +// Open opens a file for reading. +func (fs Local) Open(name string) (File, error) { + f, err := os.Open(fixpath(name)) + if err != nil { + return nil, err + } + return f, nil +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func (fs Local) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, err := os.OpenFile(fixpath(name), flag, perm) + if err != nil { + return nil, err + } + return f, nil +} + +// Stat returns a FileInfo describing the named file. If there is an error, it +// will be of type *PathError. +func (fs Local) Stat(name string) (os.FileInfo, error) { + return os.Stat(fixpath(name)) +} + +// Lstat returns the FileInfo structure describing the named file. +// If the file is a symbolic link, the returned FileInfo +// describes the symbolic link. Lstat makes no attempt to follow the link. +// If there is an error, it will be of type *PathError. +func (fs Local) Lstat(name string) (os.FileInfo, error) { + return os.Lstat(fixpath(name)) +} + +// Join joins any number of path elements into a single path, adding a +// Separator if necessary. Join calls Clean on the result; in particular, all +// empty strings are ignored. On Windows, the result is a UNC path if and only +// if the first path element is a UNC path. +func (fs Local) Join(elem ...string) string { + return filepath.Join(elem...) +} + +// Separator returns the OS and FS dependent separator for dirs/subdirs/files. +func (fs Local) Separator() string { + return string(filepath.Separator) +} + +// IsAbs reports whether the path is absolute. +func (fs Local) IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// Abs returns an absolute representation of path. If the path is not absolute +// it will be joined with the current working directory to turn it into an +// absolute path. The absolute path name for a given file is not guaranteed to +// be unique. Abs calls Clean on the result. +func (fs Local) Abs(path string) (string, error) { + return filepath.Abs(path) +} + +// Clean returns the cleaned path. For details, see filepath.Clean. +func (fs Local) Clean(p string) string { + return filepath.Clean(p) +} + +// Base returns the last element of path. +func (fs Local) Base(path string) string { + return filepath.Base(path) +} + +// Dir returns path without the last element. +func (fs Local) Dir(path string) string { + return filepath.Dir(path) +} diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go new file mode 100644 index 000000000..385c8f92b --- /dev/null +++ b/internal/fs/fs_reader.go @@ -0,0 +1,289 @@ +package fs + +import ( + "io" + "os" + "path" + "sync" + "syscall" + "time" + + "github.com/restic/restic/internal/errors" +) + +// Reader is a file system which provides a directory with a single file. When +// this file is opened for reading, the reader is passed through. The file can +// be opened once, all subsequent open calls return syscall.EIO. For Lstat(), +// the provided FileInfo is returned. +type Reader struct { + Name string + io.ReadCloser + + Mode os.FileMode + ModTime time.Time + Size int64 + + open sync.Once +} + +// statically ensure that Local implements FS. +var _ FS = &Reader{} + +// VolumeName returns leading volume name, for the Reader file system it's +// always the empty string. +func (fs *Reader) VolumeName(path string) string { + return "" +} + +// Open opens a file for reading. +func (fs *Reader) Open(name string) (f File, err error) { + switch name { + case fs.Name: + fs.open.Do(func() { + f = newReaderFile(fs.ReadCloser, fs.fi()) + }) + + if f == nil { + return nil, syscall.EIO + } + + return f, nil + case "/", ".": + f = fakeDir{ + entries: []os.FileInfo{fs.fi()}, + } + return f, nil + } + + return nil, syscall.ENOENT +} + +func (fs *Reader) fi() os.FileInfo { + return fakeFileInfo{ + name: fs.Name, + size: fs.Size, + mode: fs.Mode, + modtime: fs.ModTime, + } +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func (fs *Reader) OpenFile(name string, flag int, perm os.FileMode) (f File, err error) { + if flag & ^(O_RDONLY|O_NOFOLLOW) != 0 { + return nil, errors.Errorf("invalid combination of flags 0x%x", flag) + } + + fs.open.Do(func() { + f = newReaderFile(fs.ReadCloser, fs.fi()) + }) + + if f == nil { + return nil, syscall.EIO + } + + return f, nil +} + +// Stat returns a FileInfo describing the named file. If there is an error, it +// will be of type *PathError. +func (fs *Reader) Stat(name string) (os.FileInfo, error) { + return fs.Lstat(name) +} + +// Lstat returns the FileInfo structure describing the named file. +// If the file is a symbolic link, the returned FileInfo +// describes the symbolic link. Lstat makes no attempt to follow the link. +// If there is an error, it will be of type *PathError. +func (fs *Reader) Lstat(name string) (os.FileInfo, error) { + switch name { + case fs.Name: + return fs.fi(), nil + case "/", ".": + fi := fakeFileInfo{ + name: name, + size: 0, + mode: 0755, + modtime: time.Now(), + } + return fi, nil + } + + return nil, os.ErrNotExist +} + +// Join joins any number of path elements into a single path, adding a +// Separator if necessary. Join calls Clean on the result; in particular, all +// empty strings are ignored. On Windows, the result is a UNC path if and only +// if the first path element is a UNC path. +func (fs *Reader) Join(elem ...string) string { + return path.Join(elem...) +} + +// Separator returns the OS and FS dependent separator for dirs/subdirs/files. +func (fs *Reader) Separator() string { + return "/" +} + +// IsAbs reports whether the path is absolute. For the Reader, this is always the case. +func (fs *Reader) IsAbs(p string) bool { + return true +} + +// Abs returns an absolute representation of path. If the path is not absolute +// it will be joined with the current working directory to turn it into an +// absolute path. The absolute path name for a given file is not guaranteed to +// be unique. Abs calls Clean on the result. +// +// For the Reader, all paths are absolute. +func (fs *Reader) Abs(p string) (string, error) { + return path.Clean(p), nil +} + +// Clean returns the cleaned path. For details, see filepath.Clean. +func (fs *Reader) Clean(p string) string { + return path.Clean(p) +} + +// Base returns the last element of p. +func (fs *Reader) Base(p string) string { + return path.Base(p) +} + +// Dir returns p without the last element. +func (fs *Reader) Dir(p string) string { + return path.Dir(p) +} + +func newReaderFile(rd io.ReadCloser, fi os.FileInfo) readerFile { + return readerFile{ + ReadCloser: rd, + fakeFile: fakeFile{ + FileInfo: fi, + name: fi.Name(), + }, + } +} + +type readerFile struct { + io.ReadCloser + fakeFile +} + +func (r readerFile) Read(p []byte) (int, error) { + return r.ReadCloser.Read(p) +} + +func (r readerFile) Close() error { + return r.ReadCloser.Close() +} + +// ensure that readerFile implements File +var _ File = readerFile{} + +// fakeFile implements all File methods, but only returns errors for anything +// except Stat() and Name(). +type fakeFile struct { + name string + os.FileInfo +} + +// ensure that fakeFile implements File +var _ File = fakeFile{} + +func (f fakeFile) Fd() uintptr { + return 0 +} + +func (f fakeFile) Readdirnames(n int) ([]string, error) { + return nil, os.ErrInvalid +} + +func (f fakeFile) Readdir(n int) ([]os.FileInfo, error) { + return nil, os.ErrInvalid +} + +func (f fakeFile) Seek(int64, int) (int64, error) { + return 0, os.ErrInvalid +} + +func (f fakeFile) Write(p []byte) (int, error) { + return 0, os.ErrInvalid +} + +func (f fakeFile) Read(p []byte) (int, error) { + return 0, os.ErrInvalid +} + +func (f fakeFile) Close() error { + return nil +} + +func (f fakeFile) Stat() (os.FileInfo, error) { + return f.FileInfo, nil +} + +func (f fakeFile) Name() string { + return f.name +} + +// fakeDir implements Readdirnames and Readdir, everything else is delegated to fakeFile. +type fakeDir struct { + entries []os.FileInfo + fakeFile +} + +func (d fakeDir) Readdirnames(n int) ([]string, error) { + if n >= 0 { + return nil, errors.New("not implemented") + } + names := make([]string, 0, len(d.entries)) + for _, entry := range d.entries { + names = append(names, entry.Name()) + } + + return names, nil +} + +func (d fakeDir) Readdir(n int) ([]os.FileInfo, error) { + if n >= 0 { + return nil, errors.New("not implemented") + } + return d.entries, nil +} + +// fakeFileInfo implements the bare minimum of os.FileInfo. +type fakeFileInfo struct { + name string + size int64 + mode os.FileMode + modtime time.Time + sys interface{} +} + +func (fi fakeFileInfo) Name() string { + return fi.name +} + +func (fi fakeFileInfo) Size() int64 { + return fi.size +} + +func (fi fakeFileInfo) Mode() os.FileMode { + return fi.mode +} + +func (fi fakeFileInfo) ModTime() time.Time { + return fi.modtime +} + +func (fi fakeFileInfo) IsDir() bool { + return fi.mode&os.ModeDir > 0 +} + +func (fi fakeFileInfo) Sys() interface{} { + return fi.sys +} diff --git a/internal/fs/fs_reader_test.go b/internal/fs/fs_reader_test.go new file mode 100644 index 000000000..f4cb2bb34 --- /dev/null +++ b/internal/fs/fs_reader_test.go @@ -0,0 +1,319 @@ +package fs + +import ( + "bytes" + "io/ioutil" + "os" + "sort" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/restic/restic/internal/test" +) + +func verifyFileContentOpen(t testing.TB, fs FS, filename string, want []byte) { + f, err := fs.Open(filename) + if err != nil { + t.Fatal(err) + } + + buf, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(want, buf) { + t.Error(cmp.Diff(want, buf)) + } +} + +func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte) { + f, err := fs.OpenFile(filename, O_RDONLY, 0) + if err != nil { + t.Fatal(err) + } + + buf, err := ioutil.ReadAll(f) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(want, buf) { + t.Error(cmp.Diff(want, buf)) + } +} + +func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) { + f, err := fs.Open(dir) + if err != nil { + t.Fatal(err) + } + + entries, err := f.Readdirnames(-1) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + sort.Sort(sort.StringSlice(want)) + sort.Sort(sort.StringSlice(entries)) + + if !cmp.Equal(want, entries) { + t.Error(cmp.Diff(want, entries)) + } +} + +type fiSlice []os.FileInfo + +func (s fiSlice) Len() int { + return len(s) +} + +func (s fiSlice) Less(i, j int) bool { + return s[i].Name() < s[j].Name() +} + +func (s fiSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func verifyDirectoryContentsFI(t testing.TB, fs FS, dir string, want []os.FileInfo) { + f, err := fs.Open(dir) + if err != nil { + t.Fatal(err) + } + + entries, err := f.Readdir(-1) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + sort.Sort(fiSlice(want)) + sort.Sort(fiSlice(entries)) + + if len(want) != len(entries) { + t.Errorf("wrong number of entries returned, want %d, got %d", len(want), len(entries)) + } + max := len(want) + if len(entries) < max { + max = len(entries) + } + + for i := 0; i < max; i++ { + fi1 := want[i] + fi2 := entries[i] + + if fi1.Name() != fi2.Name() { + t.Errorf("entry %d: wrong value for Name: want %q, got %q", i, fi1.Name(), fi2.Name()) + } + + if fi1.IsDir() != fi2.IsDir() { + t.Errorf("entry %d: wrong value for IsDir: want %v, got %v", i, fi1.IsDir(), fi2.IsDir()) + } + + if fi1.Mode() != fi2.Mode() { + t.Errorf("entry %d: wrong value for Mode: want %v, got %v", i, fi1.Mode(), fi2.Mode()) + } + + if fi1.ModTime() != fi2.ModTime() { + t.Errorf("entry %d: wrong value for ModTime: want %v, got %v", i, fi1.ModTime(), fi2.ModTime()) + } + + if fi1.Size() != fi2.Size() { + t.Errorf("entry %d: wrong value for Size: want %v, got %v", i, fi1.Size(), fi2.Size()) + } + + if fi1.Sys() != fi2.Sys() { + t.Errorf("entry %d: wrong value for Sys: want %v, got %v", i, fi1.Sys(), fi2.Sys()) + } + } +} + +func checkFileInfo(t testing.TB, fi os.FileInfo, filename string, modtime time.Time, mode os.FileMode, isdir bool) { + if fi.IsDir() { + t.Errorf("IsDir returned true, want false") + } + + if fi.Mode() != mode { + t.Errorf("Mode() returned wrong value, want 0%o, got 0%o", mode, fi.Mode()) + } + + if !modtime.Equal(time.Time{}) && !fi.ModTime().Equal(modtime) { + t.Errorf("ModTime() returned wrong value, want %v, got %v", modtime, fi.ModTime()) + } + + if fi.Name() != filename { + t.Errorf("Name() returned wrong value, want %q, got %q", filename, fi.Name()) + } +} + +func TestFSReader(t *testing.T) { + data := test.Random(55, 1<<18+588) + now := time.Now() + filename := "foobar" + + var tests = []struct { + name string + f func(t *testing.T, fs FS) + }{ + { + name: "Readdirnames-slash", + f: func(t *testing.T, fs FS) { + verifyDirectoryContents(t, fs, "/", []string{filename}) + }, + }, + { + name: "Readdirnames-current", + f: func(t *testing.T, fs FS) { + verifyDirectoryContents(t, fs, ".", []string{filename}) + }, + }, + { + name: "Readdir-slash", + f: func(t *testing.T, fs FS) { + fi := fakeFileInfo{ + mode: 0644, + modtime: now, + name: filename, + size: int64(len(data)), + } + verifyDirectoryContentsFI(t, fs, "/", []os.FileInfo{fi}) + }, + }, + { + name: "Readdir-current", + f: func(t *testing.T, fs FS) { + fi := fakeFileInfo{ + mode: 0644, + modtime: now, + name: filename, + size: int64(len(data)), + } + verifyDirectoryContentsFI(t, fs, ".", []os.FileInfo{fi}) + }, + }, + { + name: "file/Open", + f: func(t *testing.T, fs FS) { + verifyFileContentOpen(t, fs, filename, data) + }, + }, + { + name: "file/OpenFile", + f: func(t *testing.T, fs FS) { + verifyFileContentOpenFile(t, fs, filename, data) + }, + }, + { + name: "file/Lstat", + f: func(t *testing.T, fs FS) { + fi, err := fs.Lstat(filename) + if err != nil { + t.Fatal(err) + } + + checkFileInfo(t, fi, filename, now, 0644, false) + }, + }, + { + name: "file/Stat", + f: func(t *testing.T, fs FS) { + f, err := fs.Open(filename) + if err != nil { + t.Fatal(err) + } + + fi, err := f.Stat() + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + checkFileInfo(t, fi, filename, now, 0644, false) + }, + }, + { + name: "dir/Lstat-slash", + f: func(t *testing.T, fs FS) { + fi, err := fs.Lstat("/") + if err != nil { + t.Fatal(err) + } + + checkFileInfo(t, fi, "/", time.Time{}, 0755, false) + }, + }, + { + name: "dir/Lstat-current", + f: func(t *testing.T, fs FS) { + fi, err := fs.Lstat(".") + if err != nil { + t.Fatal(err) + } + + checkFileInfo(t, fi, ".", time.Time{}, 0755, false) + }, + }, + { + name: "dir/Open-slash", + f: func(t *testing.T, fs FS) { + fi, err := fs.Lstat("/") + if err != nil { + t.Fatal(err) + } + + checkFileInfo(t, fi, "/", time.Time{}, 0755, false) + }, + }, + { + name: "dir/Open-current", + f: func(t *testing.T, fs FS) { + fi, err := fs.Lstat(".") + if err != nil { + t.Fatal(err) + } + + checkFileInfo(t, fi, ".", time.Time{}, 0755, false) + }, + }, + } + + for _, test := range tests { + fs := &Reader{ + Name: filename, + ReadCloser: ioutil.NopCloser(bytes.NewReader(data)), + + Mode: 0644, + Size: int64(len(data)), + ModTime: now, + } + + t.Run(test.name, func(t *testing.T) { + test.f(t, fs) + }) + } +} diff --git a/internal/fs/fs_track.go b/internal/fs/fs_track.go new file mode 100644 index 000000000..319fbfaff --- /dev/null +++ b/internal/fs/fs_track.go @@ -0,0 +1,54 @@ +package fs + +import ( + "fmt" + "os" + "runtime" + "runtime/debug" +) + +// Track is a wrapper around another file system which installs finalizers +// for open files which call panic() when they are not closed when the garbage +// collector releases them. This can be used to find resource leaks via open +// files. +type Track struct { + FS +} + +// Open wraps the Open method of the underlying file system. +func (fs Track) Open(name string) (File, error) { + f, err := fs.FS.Open(fixpath(name)) + if err != nil { + return nil, err + } + + return newTrackFile(debug.Stack(), name, f), nil +} + +// OpenFile wraps the OpenFile method of the underlying file system. +func (fs Track) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, err := fs.FS.OpenFile(fixpath(name), flag, perm) + if err != nil { + return nil, err + } + + return newTrackFile(debug.Stack(), name, f), nil +} + +type trackFile struct { + File +} + +func newTrackFile(stack []byte, filename string, file File) *trackFile { + f := &trackFile{file} + runtime.SetFinalizer(f, func(f *trackFile) { + fmt.Fprintf(os.Stderr, "file %s not closed\n\nStacktrack:\n%s\n", filename, stack) + panic("file " + filename + " not closed") + }) + return f +} + +func (f *trackFile) Close() error { + runtime.SetFinalizer(f, nil) + return f.File.Close() +} diff --git a/internal/fs/helpers.go b/internal/fs/helpers.go new file mode 100644 index 000000000..7235834ae --- /dev/null +++ b/internal/fs/helpers.go @@ -0,0 +1,43 @@ +package fs + +import ( + "os" + "testing" + + "github.com/restic/restic/internal/test" +) + +// IsRegularFile returns true if fi belongs to a normal file. If fi is nil, +// false is returned. +func IsRegularFile(fi os.FileInfo) bool { + if fi == nil { + return false + } + + return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0 +} + +// TestChdir changes the current directory to dest, the function back returns to the previous directory. +func TestChdir(t testing.TB, dest string) (back func()) { + test.Helper(t).Helper() + + prev, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + t.Logf("chdir to %v", dest) + err = os.Chdir(dest) + if err != nil { + t.Fatal(err) + } + + return func() { + test.Helper(t).Helper() + t.Logf("chdir back to %v", prev) + err = os.Chdir(prev) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/internal/fs/interface.go b/internal/fs/interface.go new file mode 100644 index 000000000..1c2260215 --- /dev/null +++ b/internal/fs/interface.go @@ -0,0 +1,38 @@ +package fs + +import ( + "io" + "os" +) + +// FS bundles all methods needed for a file system. +type FS interface { + Open(name string) (File, error) + OpenFile(name string, flag int, perm os.FileMode) (File, error) + Stat(name string) (os.FileInfo, error) + Lstat(name string) (os.FileInfo, error) + + Join(elem ...string) string + Separator() string + Abs(path string) (string, error) + Clean(path string) string + VolumeName(path string) string + IsAbs(path string) bool + + Dir(path string) string + Base(path string) string +} + +// File is an open file on a file system. +type File interface { + io.Reader + io.Writer + io.Closer + + Fd() uintptr + Readdirnames(n int) ([]string, error) + Readdir(int) ([]os.FileInfo, error) + Seek(int64, int) (int64, error) + Stat() (os.FileInfo, error) + Name() string +} diff --git a/internal/fs/stat.go b/internal/fs/stat.go new file mode 100644 index 000000000..d37d12942 --- /dev/null +++ b/internal/fs/stat.go @@ -0,0 +1,34 @@ +package fs + +import ( + "os" + "time" +) + +// ExtendedFileInfo is an extended stat_t, filled with attributes that are +// supported by most operating systems. The original FileInfo is embedded. +type ExtendedFileInfo struct { + os.FileInfo + + DeviceID uint64 // ID of device containing the file + Inode uint64 // Inode number + Links uint64 // Number of hard links + UID uint32 // owner user ID + GID uint32 // owner group ID + Device uint64 // Device ID (if this is a device file) + BlockSize int64 // block size for filesystem IO + Blocks int64 // number of allocated filesystem blocks + Size int64 // file size in byte + + AccessTime time.Time // last access time stamp + ModTime time.Time // last (content) modification time stamp +} + +// ExtendedStat returns an ExtendedFileInfo constructed from the os.FileInfo. +func ExtendedStat(fi os.FileInfo) ExtendedFileInfo { + if fi == nil { + panic("os.FileInfo is nil") + } + + return extendedStat(fi) +} diff --git a/internal/fs/stat_bsd.go b/internal/fs/stat_bsd.go new file mode 100644 index 000000000..97c03bedc --- /dev/null +++ b/internal/fs/stat_bsd.go @@ -0,0 +1,36 @@ +// +build freebsd darwin + +package fs + +import ( + "fmt" + "os" + "syscall" + "time" +) + +// extendedStat extracts info into an ExtendedFileInfo for unix based operating systems. +func extendedStat(fi os.FileInfo) ExtendedFileInfo { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + panic(fmt.Sprintf("conversion to syscall.Stat_t failed, type is %T", fi.Sys())) + } + + extFI := ExtendedFileInfo{ + FileInfo: fi, + DeviceID: uint64(s.Dev), + Inode: uint64(s.Ino), + Links: uint64(s.Nlink), + UID: s.Uid, + GID: s.Gid, + Device: uint64(s.Rdev), + BlockSize: int64(s.Blksize), + Blocks: s.Blocks, + Size: s.Size, + + AccessTime: time.Unix(s.Atimespec.Unix()), + ModTime: time.Unix(s.Mtimespec.Unix()), + } + + return extFI +} diff --git a/internal/fs/stat_test.go b/internal/fs/stat_test.go new file mode 100644 index 000000000..43e514047 --- /dev/null +++ b/internal/fs/stat_test.go @@ -0,0 +1,31 @@ +package fs + +import ( + "io/ioutil" + "path/filepath" + "testing" + + restictest "github.com/restic/restic/internal/test" +) + +func TestExtendedStat(t *testing.T) { + tempdir, cleanup := restictest.TempDir(t) + defer cleanup() + + filename := filepath.Join(tempdir, "file") + err := ioutil.WriteFile(filename, []byte("foobar"), 0640) + if err != nil { + t.Fatal(err) + } + + fi, err := Lstat(filename) + if err != nil { + t.Fatal(err) + } + + extFI := ExtendedStat(fi) + + if !extFI.ModTime.Equal(fi.ModTime()) { + t.Errorf("extFI.ModTime does not match, want %v, got %v", fi.ModTime(), extFI.ModTime) + } +} diff --git a/internal/fs/stat_unix.go b/internal/fs/stat_unix.go new file mode 100644 index 000000000..612898566 --- /dev/null +++ b/internal/fs/stat_unix.go @@ -0,0 +1,36 @@ +// +build !windows,!darwin,!freebsd + +package fs + +import ( + "fmt" + "os" + "syscall" + "time" +) + +// extendedStat extracts info into an ExtendedFileInfo for unix based operating systems. +func extendedStat(fi os.FileInfo) ExtendedFileInfo { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + panic(fmt.Sprintf("conversion to syscall.Stat_t failed, type is %T", fi.Sys())) + } + + extFI := ExtendedFileInfo{ + FileInfo: fi, + DeviceID: uint64(s.Dev), + Inode: s.Ino, + Links: uint64(s.Nlink), + UID: s.Uid, + GID: s.Gid, + Device: uint64(s.Rdev), + BlockSize: int64(s.Blksize), + Blocks: s.Blocks, + Size: s.Size, + + AccessTime: time.Unix(s.Atim.Unix()), + ModTime: time.Unix(s.Mtim.Unix()), + } + + return extFI +} diff --git a/internal/fs/stat_windows.go b/internal/fs/stat_windows.go new file mode 100644 index 000000000..16f9fe0eb --- /dev/null +++ b/internal/fs/stat_windows.go @@ -0,0 +1,31 @@ +// +build windows + +package fs + +import ( + "fmt" + "os" + "syscall" + "time" +) + +// extendedStat extracts info into an ExtendedFileInfo for Windows. +func extendedStat(fi os.FileInfo) ExtendedFileInfo { + s, ok := fi.Sys().(*syscall.Win32FileAttributeData) + if !ok { + panic(fmt.Sprintf("conversion to syscall.Win32FileAttributeData failed, type is %T", fi.Sys())) + } + + extFI := ExtendedFileInfo{ + FileInfo: fi, + Size: int64(s.FileSizeLow) + int64(s.FileSizeHigh)<<32, + } + + atime := syscall.NsecToTimespec(s.LastAccessTime.Nanoseconds()) + extFI.AccessTime = time.Unix(atime.Unix()) + + mtime := syscall.NsecToTimespec(s.LastWriteTime.Nanoseconds()) + extFI.ModTime = time.Unix(mtime.Unix()) + + return extFI +} diff --git a/internal/pipe/doc.go b/internal/pipe/doc.go deleted file mode 100644 index ba5fc04ae..000000000 --- a/internal/pipe/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package pipe implements walking a directory in a deterministic order. -package pipe diff --git a/internal/pipe/pipe.go b/internal/pipe/pipe.go deleted file mode 100644 index 1cf7ff6bd..000000000 --- a/internal/pipe/pipe.go +++ /dev/null @@ -1,292 +0,0 @@ -package pipe - -import ( - "context" - "fmt" - "os" - "path/filepath" - "sort" - - "github.com/restic/restic/internal/errors" - - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/fs" -) - -type Result interface{} - -type Job interface { - Path() string - Fullpath() string - Error() error - Info() os.FileInfo - - Result() chan<- Result -} - -type Entry struct { - basedir string - path string - info os.FileInfo - error error - result chan<- Result - - // points to the old node if available, interface{} is used to prevent - // circular import - Node interface{} -} - -func (e Entry) Path() string { return e.path } -func (e Entry) Fullpath() string { return filepath.Join(e.basedir, e.path) } -func (e Entry) Error() error { return e.error } -func (e Entry) Info() os.FileInfo { return e.info } -func (e Entry) Result() chan<- Result { return e.result } - -type Dir struct { - basedir string - path string - error error - info os.FileInfo - - Entries [](<-chan Result) - result chan<- Result -} - -func (e Dir) Path() string { return e.path } -func (e Dir) Fullpath() string { return filepath.Join(e.basedir, e.path) } -func (e Dir) Error() error { return e.error } -func (e Dir) Info() os.FileInfo { return e.info } -func (e Dir) Result() chan<- Result { return e.result } - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -// taken from filepath/path.go -func readDirNames(dirname string) ([]string, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, errors.Wrap(err, "Open") - } - names, err := f.Readdirnames(-1) - _ = f.Close() - if err != nil { - return nil, errors.Wrap(err, "Readdirnames") - } - sort.Strings(names) - return names, nil -} - -// SelectFunc returns true for all items that should be included (files and -// dirs). If false is returned, files are ignored and dirs are not even walked. -type SelectFunc func(item string, fi os.FileInfo) bool - -func walk(ctx context.Context, basedir, dir string, selectFunc SelectFunc, jobs chan<- Job, res chan<- Result) (excluded bool) { - debug.Log("start on %q, basedir %q", dir, basedir) - - relpath, err := filepath.Rel(basedir, dir) - if err != nil { - panic(err) - } - - info, err := fs.Lstat(dir) - if err != nil { - err = errors.Wrap(err, "Lstat") - debug.Log("error for %v: %v, res %p", dir, err, res) - select { - case jobs <- Dir{basedir: basedir, path: relpath, info: info, error: err, result: res}: - case <-ctx.Done(): - } - return - } - - if !selectFunc(dir, info) { - debug.Log("file %v excluded by filter, res %p", dir, res) - excluded = true - return - } - - if !info.IsDir() { - debug.Log("sending file job for %v, res %p", dir, res) - select { - case jobs <- Entry{info: info, basedir: basedir, path: relpath, result: res}: - case <-ctx.Done(): - } - return - } - - debug.RunHook("pipe.readdirnames", dir) - names, err := readDirNames(dir) - if err != nil { - debug.Log("Readdirnames(%v) returned error: %v, res %p", dir, err, res) - select { - case <-ctx.Done(): - case jobs <- Dir{basedir: basedir, path: relpath, info: info, error: err, result: res}: - } - return - } - - // Insert breakpoint to allow testing behaviour with vanishing files - // between Readdir() and lstat() - debug.RunHook("pipe.walk1", relpath) - - entries := make([]<-chan Result, 0, len(names)) - - for _, name := range names { - subpath := filepath.Join(dir, name) - - fi, statErr := fs.Lstat(subpath) - if !selectFunc(subpath, fi) { - debug.Log("file %v excluded by filter", subpath) - continue - } - - ch := make(chan Result, 1) - entries = append(entries, ch) - - if statErr != nil { - statErr = errors.Wrap(statErr, "Lstat") - debug.Log("sending file job for %v, err %v, res %p", subpath, err, res) - select { - case jobs <- Entry{info: fi, error: statErr, basedir: basedir, path: filepath.Join(relpath, name), result: ch}: - case <-ctx.Done(): - return - } - continue - } - - // Insert breakpoint to allow testing behaviour with vanishing files - // between walk and open - debug.RunHook("pipe.walk2", filepath.Join(relpath, name)) - - walk(ctx, basedir, subpath, selectFunc, jobs, ch) - } - - debug.Log("sending dirjob for %q, basedir %q, res %p", dir, basedir, res) - select { - case jobs <- Dir{basedir: basedir, path: relpath, info: info, Entries: entries, result: res}: - case <-ctx.Done(): - } - - return -} - -// cleanupPath is used to clean a path. For a normal path, a slice with just -// the path is returned. For special cases such as "." and "/" the list of -// names within those paths is returned. -func cleanupPath(path string) ([]string, error) { - path = filepath.Clean(path) - if filepath.Dir(path) != path { - return []string{path}, nil - } - - paths, err := readDirNames(path) - if err != nil { - return nil, err - } - - for i, p := range paths { - paths[i] = filepath.Join(path, p) - } - - return paths, nil -} - -// Walk sends a Job for each file and directory it finds below the paths. When -// the channel done is closed, processing stops. -func Walk(ctx context.Context, walkPaths []string, selectFunc SelectFunc, jobs chan<- Job, res chan<- Result) { - var paths []string - - for _, p := range walkPaths { - ps, err := cleanupPath(p) - if err != nil { - fmt.Fprintf(os.Stderr, "Readdirnames(%v): %v, skipping\n", p, err) - debug.Log("Readdirnames(%v) returned error: %v, skipping", p, err) - continue - } - - paths = append(paths, ps...) - } - - debug.Log("start on %v", paths) - defer func() { - debug.Log("output channel closed") - close(jobs) - }() - - entries := make([]<-chan Result, 0, len(paths)) - for _, path := range paths { - debug.Log("start walker for %v", path) - ch := make(chan Result, 1) - excluded := walk(ctx, filepath.Dir(path), path, selectFunc, jobs, ch) - - if excluded { - debug.Log("walker for %v done, it was excluded by the filter", path) - continue - } - - entries = append(entries, ch) - debug.Log("walker for %v done", path) - } - - debug.Log("sending root node, res %p", res) - select { - case <-ctx.Done(): - return - case jobs <- Dir{Entries: entries, result: res}: - } - - debug.Log("walker done") -} - -// Split feeds all elements read from inChan to dirChan and entChan. -func Split(inChan <-chan Job, dirChan chan<- Dir, entChan chan<- Entry) { - debug.Log("start") - defer debug.Log("done") - - inCh := inChan - dirCh := dirChan - entCh := entChan - - var ( - dir Dir - ent Entry - ) - - // deactivate sending until we received at least one job - dirCh = nil - entCh = nil - for { - select { - case job, ok := <-inCh: - if !ok { - // channel is closed - return - } - - if job == nil { - panic("nil job received") - } - - // disable receiving until the current job has been sent - inCh = nil - - switch j := job.(type) { - case Dir: - dir = j - dirCh = dirChan - case Entry: - ent = j - entCh = entChan - default: - panic(fmt.Sprintf("unknown job type %v", j)) - } - case dirCh <- dir: - // disable sending, re-enable receiving - dirCh = nil - inCh = inChan - case entCh <- ent: - // disable sending, re-enable receiving - entCh = nil - inCh = inChan - } - } -} diff --git a/internal/pipe/pipe_test.go b/internal/pipe/pipe_test.go deleted file mode 100644 index 1612987e7..000000000 --- a/internal/pipe/pipe_test.go +++ /dev/null @@ -1,600 +0,0 @@ -package pipe_test - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sync" - "testing" - "time" - - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/pipe" - rtest "github.com/restic/restic/internal/test" -) - -type stats struct { - dirs, files int -} - -func acceptAll(string, os.FileInfo) bool { - return true -} - -func statPath(path string) (stats, error) { - var s stats - - // count files and directories with filepath.Walk() - err := filepath.Walk(rtest.TestWalkerPath, func(p string, fi os.FileInfo, err error) error { - if fi == nil { - return err - } - - if fi.IsDir() { - s.dirs++ - } else { - s.files++ - } - - return err - }) - - return s, err -} - -const maxWorkers = 100 - -func TestPipelineWalkerWithSplit(t *testing.T) { - if rtest.TestWalkerPath == "" { - t.Skipf("walkerpath not set, skipping TestPipelineWalker") - } - - var err error - if !filepath.IsAbs(rtest.TestWalkerPath) { - rtest.TestWalkerPath, err = filepath.Abs(rtest.TestWalkerPath) - rtest.OK(t, err) - } - - before, err := statPath(rtest.TestWalkerPath) - rtest.OK(t, err) - - t.Logf("walking path %s with %d dirs, %d files", rtest.TestWalkerPath, - before.dirs, before.files) - - // account for top level dir - before.dirs++ - - after := stats{} - m := sync.Mutex{} - - worker := func(wg *sync.WaitGroup, done <-chan struct{}, entCh <-chan pipe.Entry, dirCh <-chan pipe.Dir) { - defer wg.Done() - for { - select { - case e, ok := <-entCh: - if !ok { - // channel is closed - return - } - - m.Lock() - after.files++ - m.Unlock() - - e.Result() <- true - - case dir, ok := <-dirCh: - if !ok { - // channel is closed - return - } - - // wait for all content - for _, ch := range dir.Entries { - <-ch - } - - m.Lock() - after.dirs++ - m.Unlock() - - dir.Result() <- true - case <-done: - // pipeline was cancelled - return - } - } - } - - var wg sync.WaitGroup - done := make(chan struct{}) - entCh := make(chan pipe.Entry) - dirCh := make(chan pipe.Dir) - - for i := 0; i < maxWorkers; i++ { - wg.Add(1) - go worker(&wg, done, entCh, dirCh) - } - - jobs := make(chan pipe.Job, 200) - wg.Add(1) - go func() { - pipe.Split(jobs, dirCh, entCh) - close(entCh) - close(dirCh) - wg.Done() - }() - - resCh := make(chan pipe.Result, 1) - pipe.Walk(context.TODO(), []string{rtest.TestWalkerPath}, acceptAll, jobs, resCh) - - // wait for all workers to terminate - wg.Wait() - - // wait for top-level blob - <-resCh - - t.Logf("walked path %s with %d dirs, %d files", rtest.TestWalkerPath, - after.dirs, after.files) - - rtest.Assert(t, before == after, "stats do not match, expected %v, got %v", before, after) -} - -func TestPipelineWalker(t *testing.T) { - if rtest.TestWalkerPath == "" { - t.Skipf("walkerpath not set, skipping TestPipelineWalker") - } - - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - var err error - if !filepath.IsAbs(rtest.TestWalkerPath) { - rtest.TestWalkerPath, err = filepath.Abs(rtest.TestWalkerPath) - rtest.OK(t, err) - } - - before, err := statPath(rtest.TestWalkerPath) - rtest.OK(t, err) - - t.Logf("walking path %s with %d dirs, %d files", rtest.TestWalkerPath, - before.dirs, before.files) - - // account for top level dir - before.dirs++ - - after := stats{} - m := sync.Mutex{} - - worker := func(ctx context.Context, wg *sync.WaitGroup, jobs <-chan pipe.Job) { - defer wg.Done() - for { - select { - case job, ok := <-jobs: - if !ok { - // channel is closed - return - } - rtest.Assert(t, job != nil, "job is nil") - - switch j := job.(type) { - case pipe.Dir: - // wait for all content - for _, ch := range j.Entries { - <-ch - } - - m.Lock() - after.dirs++ - m.Unlock() - - j.Result() <- true - case pipe.Entry: - m.Lock() - after.files++ - m.Unlock() - - j.Result() <- true - } - - case <-ctx.Done(): - // pipeline was cancelled - return - } - } - } - - var wg sync.WaitGroup - jobs := make(chan pipe.Job) - - for i := 0; i < maxWorkers; i++ { - wg.Add(1) - go worker(ctx, &wg, jobs) - } - - resCh := make(chan pipe.Result, 1) - pipe.Walk(ctx, []string{rtest.TestWalkerPath}, acceptAll, jobs, resCh) - - // wait for all workers to terminate - wg.Wait() - - // wait for top-level blob - <-resCh - - t.Logf("walked path %s with %d dirs, %d files", rtest.TestWalkerPath, - after.dirs, after.files) - - rtest.Assert(t, before == after, "stats do not match, expected %v, got %v", before, after) -} - -func createFile(filename, data string) error { - f, err := os.Create(filename) - if err != nil { - return err - } - - defer f.Close() - - _, err = f.Write([]byte(data)) - if err != nil { - return err - } - - return nil -} - -func TestPipeWalkerError(t *testing.T) { - dir, err := ioutil.TempDir("", "restic-test-") - rtest.OK(t, err) - - base := filepath.Base(dir) - - var testjobs = []struct { - path []string - err bool - }{ - {[]string{base, "a", "file_a"}, false}, - {[]string{base, "a"}, false}, - {[]string{base, "b"}, true}, - {[]string{base, "c", "file_c"}, false}, - {[]string{base, "c"}, false}, - {[]string{base}, false}, - {[]string{}, false}, - } - - rtest.OK(t, os.Mkdir(filepath.Join(dir, "a"), 0755)) - rtest.OK(t, os.Mkdir(filepath.Join(dir, "b"), 0755)) - rtest.OK(t, os.Mkdir(filepath.Join(dir, "c"), 0755)) - - rtest.OK(t, createFile(filepath.Join(dir, "a", "file_a"), "file a")) - rtest.OK(t, createFile(filepath.Join(dir, "b", "file_b"), "file b")) - rtest.OK(t, createFile(filepath.Join(dir, "c", "file_c"), "file c")) - - ranHook := false - testdir := filepath.Join(dir, "b") - - // install hook that removes the dir right before readdirnames() - debug.Hook("pipe.readdirnames", func(context interface{}) { - path := context.(string) - - if path != testdir { - return - } - - t.Logf("in hook, removing test file %v", testdir) - ranHook = true - - rtest.OK(t, os.RemoveAll(testdir)) - }) - - ctx, cancel := context.WithCancel(context.TODO()) - - ch := make(chan pipe.Job) - resCh := make(chan pipe.Result, 1) - - go pipe.Walk(ctx, []string{dir}, acceptAll, ch, resCh) - - i := 0 - for job := range ch { - if i == len(testjobs) { - t.Errorf("too many jobs received") - break - } - - p := filepath.Join(testjobs[i].path...) - if p != job.Path() { - t.Errorf("job %d has wrong path: expected %q, got %q", i, p, job.Path()) - } - - if testjobs[i].err { - if job.Error() == nil { - t.Errorf("job %d expected error but got nil", i) - } - } else { - if job.Error() != nil { - t.Errorf("job %d expected no error but got %v", i, job.Error()) - } - } - - i++ - } - - if i != len(testjobs) { - t.Errorf("expected %d jobs, got %d", len(testjobs), i) - } - - cancel() - - rtest.Assert(t, ranHook, "hook did not run") - rtest.OK(t, os.RemoveAll(dir)) -} - -func BenchmarkPipelineWalker(b *testing.B) { - if rtest.TestWalkerPath == "" { - b.Skipf("walkerpath not set, skipping BenchPipelineWalker") - } - - var max time.Duration - m := sync.Mutex{} - - fileWorker := func(ctx context.Context, wg *sync.WaitGroup, ch <-chan pipe.Entry) { - defer wg.Done() - for { - select { - case e, ok := <-ch: - if !ok { - // channel is closed - return - } - - // simulate backup - //time.Sleep(10 * time.Millisecond) - - e.Result() <- true - case <-ctx.Done(): - // pipeline was cancelled - return - } - } - } - - dirWorker := func(ctx context.Context, wg *sync.WaitGroup, ch <-chan pipe.Dir) { - defer wg.Done() - for { - select { - case dir, ok := <-ch: - if !ok { - // channel is closed - return - } - - start := time.Now() - - // wait for all content - for _, ch := range dir.Entries { - <-ch - } - - d := time.Since(start) - m.Lock() - if d > max { - max = d - } - m.Unlock() - - dir.Result() <- true - case <-ctx.Done(): - // pipeline was cancelled - return - } - } - } - - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - for i := 0; i < b.N; i++ { - max = 0 - entCh := make(chan pipe.Entry, 200) - dirCh := make(chan pipe.Dir, 200) - - var wg sync.WaitGroup - b.Logf("starting %d workers", maxWorkers) - for i := 0; i < maxWorkers; i++ { - wg.Add(2) - go dirWorker(ctx, &wg, dirCh) - go fileWorker(ctx, &wg, entCh) - } - - jobs := make(chan pipe.Job, 200) - wg.Add(1) - go func() { - pipe.Split(jobs, dirCh, entCh) - close(entCh) - close(dirCh) - wg.Done() - }() - - resCh := make(chan pipe.Result, 1) - pipe.Walk(ctx, []string{rtest.TestWalkerPath}, acceptAll, jobs, resCh) - - // wait for all workers to terminate - wg.Wait() - - // wait for final result - <-resCh - - b.Logf("max duration for a dir: %v", max) - } -} - -func TestPipelineWalkerMultiple(t *testing.T) { - if rtest.TestWalkerPath == "" { - t.Skipf("walkerpath not set, skipping TestPipelineWalker") - } - - ctx, cancel := context.WithCancel(context.TODO()) - defer cancel() - - paths, err := filepath.Glob(filepath.Join(rtest.TestWalkerPath, "*")) - rtest.OK(t, err) - - before, err := statPath(rtest.TestWalkerPath) - rtest.OK(t, err) - - t.Logf("walking paths %v with %d dirs, %d files", paths, - before.dirs, before.files) - - after := stats{} - m := sync.Mutex{} - - worker := func(ctx context.Context, wg *sync.WaitGroup, jobs <-chan pipe.Job) { - defer wg.Done() - for { - select { - case job, ok := <-jobs: - if !ok { - // channel is closed - return - } - rtest.Assert(t, job != nil, "job is nil") - - switch j := job.(type) { - case pipe.Dir: - // wait for all content - for _, ch := range j.Entries { - <-ch - } - - m.Lock() - after.dirs++ - m.Unlock() - - j.Result() <- true - case pipe.Entry: - m.Lock() - after.files++ - m.Unlock() - - j.Result() <- true - } - - case <-ctx.Done(): - // pipeline was cancelled - return - } - } - } - - var wg sync.WaitGroup - jobs := make(chan pipe.Job) - - for i := 0; i < maxWorkers; i++ { - wg.Add(1) - go worker(ctx, &wg, jobs) - } - - resCh := make(chan pipe.Result, 1) - pipe.Walk(ctx, paths, acceptAll, jobs, resCh) - - // wait for all workers to terminate - wg.Wait() - - // wait for top-level blob - <-resCh - - t.Logf("walked %d paths with %d dirs, %d files", len(paths), after.dirs, after.files) - - rtest.Assert(t, before == after, "stats do not match, expected %v, got %v", before, after) -} - -func dirsInPath(path string) int { - if path == "/" || path == "." || path == "" { - return 0 - } - - n := 0 - for dir := path; dir != "/" && dir != "."; dir = filepath.Dir(dir) { - n++ - } - - return n -} - -func TestPipeWalkerRoot(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skipf("not running TestPipeWalkerRoot on %s", runtime.GOOS) - return - } - - cwd, err := os.Getwd() - rtest.OK(t, err) - - testPaths := []string{ - string(filepath.Separator), - ".", - cwd, - } - - for _, path := range testPaths { - testPipeWalkerRootWithPath(path, t) - } -} - -func testPipeWalkerRootWithPath(path string, t *testing.T) { - pattern := filepath.Join(path, "*") - rootPaths, err := filepath.Glob(pattern) - rtest.OK(t, err) - - for i, p := range rootPaths { - rootPaths[i], err = filepath.Rel(path, p) - rtest.OK(t, err) - } - - t.Logf("paths in %v (pattern %q) expanded to %v items", path, pattern, len(rootPaths)) - - jobCh := make(chan pipe.Job) - var jobs []pipe.Job - - worker := func(wg *sync.WaitGroup) { - defer wg.Done() - for job := range jobCh { - jobs = append(jobs, job) - } - } - - var wg sync.WaitGroup - wg.Add(1) - go worker(&wg) - - filter := func(p string, fi os.FileInfo) bool { - p, err := filepath.Rel(path, p) - rtest.OK(t, err) - return dirsInPath(p) <= 1 - } - - resCh := make(chan pipe.Result, 1) - pipe.Walk(context.TODO(), []string{path}, filter, jobCh, resCh) - - wg.Wait() - - t.Logf("received %d jobs", len(jobs)) - - for i, job := range jobs[:len(jobs)-1] { - path := job.Path() - if path == "." || path == ".." || path == string(filepath.Separator) { - t.Errorf("job %v has invalid path %q", i, path) - } - } - - lastPath := jobs[len(jobs)-1].Path() - if lastPath != "" { - t.Errorf("last job has non-empty path %q", lastPath) - } - - if len(jobs) < len(rootPaths) { - t.Errorf("want at least %v jobs, got %v for path %v\n", len(rootPaths), len(jobs), path) - } -} diff --git a/internal/repository/pool.go b/internal/repository/pool.go index 9c7450d5c..b87791f14 100644 --- a/internal/repository/pool.go +++ b/internal/repository/pool.go @@ -8,7 +8,7 @@ import ( var bufPool = sync.Pool{ New: func() interface{} { - return make([]byte, chunker.MinSize) + return make([]byte, chunker.MaxSize/3) }, } diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 3a1ce40c2..4a76f4025 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -214,11 +214,11 @@ func (r *Repository) SaveAndEncrypt(ctx context.Context, t restic.BlobType, data // get buf from the pool ciphertext := getBuf() - defer freeBuf(ciphertext) ciphertext = ciphertext[:0] nonce := crypto.NewRandomNonce() ciphertext = append(ciphertext, nonce...) + defer freeBuf(ciphertext) // encrypt blob ciphertext = r.key.Seal(ciphertext, nonce, data, nil) diff --git a/internal/repository/testing.go b/internal/repository/testing.go index a49072335..739aa4d62 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -42,6 +42,7 @@ const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) // password. If be is nil, an in-memory backend is used. A constant polynomial // is used for the chunker and low-security test parameters. func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r restic.Repository, cleanup func()) { + test.Helper(t).Helper() TestUseLowSecurityKDFParameters(t) restic.TestDisableCheckPolynomial(t) @@ -70,6 +71,7 @@ func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r restic.Reposi // a non-existing directory, a local backend is created there and this is used // instead. The directory is not removed, but left there for inspection. func TestRepository(t testing.TB) (r restic.Repository, cleanup func()) { + test.Helper(t).Helper() dir := os.Getenv("RESTIC_TEST_REPO") if dir != "" { _, err := os.Stat(dir) diff --git a/internal/restic/node.go b/internal/restic/node.go index 33e5285a8..d1adf39eb 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -517,45 +517,6 @@ func (node Node) sameExtendedAttributes(other Node) bool { return true } -// IsNewer returns true of the file has been updated since the last Stat(). -func (node *Node) IsNewer(path string, fi os.FileInfo) bool { - if node.Type != "file" { - debug.Log("node %v is newer: not file", path) - return true - } - - tpe := nodeTypeFromFileInfo(fi) - if node.Name != fi.Name() || node.Type != tpe { - debug.Log("node %v is newer: name or type changed", path) - return true - } - - size := uint64(fi.Size()) - - extendedStat, ok := toStatT(fi.Sys()) - if !ok { - if !node.ModTime.Equal(fi.ModTime()) || - node.Size != size { - debug.Log("node %v is newer: timestamp or size changed", path) - return true - } - return false - } - - inode := extendedStat.ino() - - if !node.ModTime.Equal(fi.ModTime()) || - !node.ChangeTime.Equal(changeTime(extendedStat)) || - node.Inode != uint64(inode) || - node.Size != size { - debug.Log("node %v is newer: timestamp, size or inode changed", path) - return true - } - - debug.Log("node %v is not newer", path) - return false -} - func (node *Node) fillUser(stat statT) error { node.UID = stat.uid() node.GID = stat.gid() @@ -635,6 +596,10 @@ func lookupGroup(gid string) (string, error) { func (node *Node) fillExtra(path string, fi os.FileInfo) error { stat, ok := toStatT(fi.Sys()) if !ok { + // fill minimal info with current values for uid, gid + node.UID = uint32(os.Getuid()) + node.GID = uint32(os.Getgid()) + node.ChangeTime = node.ModTime return nil } diff --git a/internal/restic/restorer_test.go b/internal/restic/restorer_test.go index ec3282e5d..9b1758dc9 100644 --- a/internal/restic/restorer_test.go +++ b/internal/restic/restorer_test.go @@ -346,27 +346,6 @@ func TestRestorer(t *testing.T) { } } -func chdir(t testing.TB, target string) func() { - prev, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - t.Logf("chdir to %v", target) - err = os.Chdir(target) - if err != nil { - t.Fatal(err) - } - - return func() { - t.Logf("chdir back to %v", prev) - err = os.Chdir(prev) - if err != nil { - t.Fatal(err) - } - } -} - func TestRestorerRelative(t *testing.T) { var tests = []struct { Snapshot @@ -406,7 +385,7 @@ func TestRestorerRelative(t *testing.T) { tempdir, cleanup := rtest.TempDir(t) defer cleanup() - cleanup = chdir(t, tempdir) + cleanup = fs.TestChdir(t, tempdir) defer cleanup() errors := make(map[string]string) diff --git a/internal/restic/snapshot.go b/internal/restic/snapshot.go index 4622bb530..61467013a 100644 --- a/internal/restic/snapshot.go +++ b/internal/restic/snapshot.go @@ -30,14 +30,18 @@ type Snapshot struct { // NewSnapshot returns an initialized snapshot struct for the current user and // time. func NewSnapshot(paths []string, tags []string, hostname string, time time.Time) (*Snapshot, error) { - for i, path := range paths { - if p, err := filepath.Abs(path); err != nil { - paths[i] = p + absPaths := make([]string, 0, len(paths)) + for _, path := range paths { + p, err := filepath.Abs(path) + if err == nil { + absPaths = append(absPaths, p) + } else { + absPaths = append(absPaths, path) } } sn := &Snapshot{ - Paths: paths, + Paths: absPaths, Time: time, Tags: tags, Hostname: hostname, diff --git a/internal/restic/snapshot_find.go b/internal/restic/snapshot_find.go index b5d0a8276..f16b91e3d 100644 --- a/internal/restic/snapshot_find.go +++ b/internal/restic/snapshot_find.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "path/filepath" "time" "github.com/restic/restic/internal/errors" @@ -14,13 +15,25 @@ var ErrNoSnapshotFound = errors.New("no snapshot found") // FindLatestSnapshot finds latest snapshot with optional target/directory, tags and hostname filters. func FindLatestSnapshot(ctx context.Context, repo Repository, targets []string, tagLists []TagList, hostname string) (ID, error) { + var err error + absTargets := make([]string, 0, len(targets)) + for _, target := range targets { + if !filepath.IsAbs(target) { + target, err = filepath.Abs(target) + if err != nil { + return ID{}, errors.Wrap(err, "Abs") + } + } + absTargets = append(absTargets, target) + } + var ( latest time.Time latestID ID found bool ) - err := repo.List(ctx, SnapshotFile, func(snapshotID ID, size int64) error { + err = repo.List(ctx, SnapshotFile, func(snapshotID ID, size int64) error { snapshot, err := LoadSnapshot(ctx, repo, snapshotID) if err != nil { return errors.Errorf("Error loading snapshot %v: %v", snapshotID.Str(), err) @@ -33,7 +46,7 @@ func FindLatestSnapshot(ctx context.Context, repo Repository, targets []string, return nil } - if !snapshot.HasPaths(targets) { + if !snapshot.HasPaths(absTargets) { return nil } diff --git a/internal/restic/tree.go b/internal/restic/tree.go index c2cb3b27b..81650105a 100644 --- a/internal/restic/tree.go +++ b/internal/restic/tree.go @@ -21,12 +21,12 @@ func NewTree() *Tree { } } -func (t Tree) String() string { +func (t *Tree) String() string { return fmt.Sprintf("Tree<%d nodes>", len(t.Nodes)) } // Equals returns true if t and other have exactly the same nodes. -func (t Tree) Equals(other *Tree) bool { +func (t *Tree) Equals(other *Tree) bool { if len(t.Nodes) != len(other.Nodes) { debug.Log("tree.Equals(): trees have different number of nodes") return false @@ -46,9 +46,9 @@ func (t Tree) Equals(other *Tree) bool { // Insert adds a new node at the correct place in the tree. func (t *Tree) Insert(node *Node) error { - pos, _, err := t.binarySearch(node.Name) - if err == nil { - return errors.New("node already present") + pos, found := t.find(node.Name) + if found != nil { + return errors.Errorf("node %q already present", node.Name) } // https://code.google.com/p/go-wiki/wiki/SliceTricks @@ -59,16 +59,26 @@ func (t *Tree) Insert(node *Node) error { return nil } -func (t Tree) binarySearch(name string) (int, *Node, error) { +func (t *Tree) find(name string) (int, *Node) { pos := sort.Search(len(t.Nodes), func(i int) bool { return t.Nodes[i].Name >= name }) if pos < len(t.Nodes) && t.Nodes[pos].Name == name { - return pos, t.Nodes[pos], nil + return pos, t.Nodes[pos] } - return pos, nil, errors.New("named node not found") + return pos, nil +} + +// Find returns a node with the given name, or nil if none could be found. +func (t *Tree) Find(name string) *Node { + if t == nil { + return nil + } + + _, node := t.find(name) + return node } // Sort sorts the nodes by name. @@ -79,7 +89,7 @@ func (t *Tree) Sort() { } // Subtrees returns a slice of all subtree IDs of the tree. -func (t Tree) Subtrees() (trees IDs) { +func (t *Tree) Subtrees() (trees IDs) { for _, node := range t.Nodes { if node.Type == "dir" && node.Subtree != nil { trees = append(trees, *node.Subtree) diff --git a/internal/test/helper.go b/internal/test/helper.go new file mode 100644 index 000000000..f0fc1f61b --- /dev/null +++ b/internal/test/helper.go @@ -0,0 +1,15 @@ +// +build go1.9 + +package test + +import "testing" + +// Helperer marks the current function as a test helper. +type Helperer interface { + Helper() +} + +// Helper returns a function that marks the current function as a helper function. +func Helper(t testing.TB) Helperer { + return t +} diff --git a/internal/test/helper_go18.go b/internal/test/helper_go18.go new file mode 100644 index 000000000..d4f8b8de6 --- /dev/null +++ b/internal/test/helper_go18.go @@ -0,0 +1,19 @@ +// +build !go1.9 + +package test + +import "testing" + +// Helperer marks the current function as a test helper. +type Helperer interface { + Helper() +} + +type fakeHelper struct{} + +func (fakeHelper) Helper() {} + +// Helper returns a function that marks the current function as a helper function. +func Helper(t testing.TB) Helperer { + return fakeHelper{} +} diff --git a/internal/ui/backup.go b/internal/ui/backup.go new file mode 100644 index 000000000..ebd56b8bc --- /dev/null +++ b/internal/ui/backup.go @@ -0,0 +1,343 @@ +package ui + +import ( + "context" + "fmt" + "os" + "sort" + "sync" + "time" + + "github.com/restic/restic/internal/archiver" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/termstatus" +) + +type counter struct { + Files, Dirs uint + Bytes uint64 +} + +type fileWorkerMessage struct { + filename string + done bool +} + +// Backup reports progress for the `backup` command. +type Backup struct { + *Message + *StdioWrapper + + MinUpdatePause time.Duration + + term *termstatus.Terminal + v uint + start time.Time + + totalBytes uint64 + + totalCh chan counter + processedCh chan counter + errCh chan struct{} + workerCh chan fileWorkerMessage + + summary struct { + sync.Mutex + Files, Dirs struct { + New uint + Changed uint + Unchanged uint + } + archiver.ItemStats + } +} + +// NewBackup returns a new backup progress reporter. +func NewBackup(term *termstatus.Terminal, verbosity uint) *Backup { + return &Backup{ + Message: NewMessage(term, verbosity), + StdioWrapper: NewStdioWrapper(term), + term: term, + v: verbosity, + start: time.Now(), + + // limit to 60fps by default + MinUpdatePause: time.Second / 60, + + totalCh: make(chan counter), + processedCh: make(chan counter), + errCh: make(chan struct{}), + workerCh: make(chan fileWorkerMessage), + } +} + +// Run regularly updates the status lines. It should be called in a separate +// goroutine. +func (b *Backup) Run(ctx context.Context) error { + var ( + lastUpdate time.Time + total, processed counter + errors uint + started bool + currentFiles = make(map[string]struct{}) + secondsRemaining uint64 + ) + + t := time.NewTicker(time.Second) + defer t.Stop() + + for { + select { + case <-ctx.Done(): + return nil + case t, ok := <-b.totalCh: + if ok { + total = t + started = true + } else { + // scan has finished + b.totalCh = nil + b.totalBytes = total.Bytes + } + case s := <-b.processedCh: + processed.Files += s.Files + processed.Dirs += s.Dirs + processed.Bytes += s.Bytes + started = true + case <-b.errCh: + errors++ + started = true + case m := <-b.workerCh: + if m.done { + delete(currentFiles, m.filename) + } else { + currentFiles[m.filename] = struct{}{} + } + case <-t.C: + if !started { + continue + } + + if b.totalCh == nil { + secs := float64(time.Since(b.start) / time.Second) + todo := float64(total.Bytes - processed.Bytes) + secondsRemaining = uint64(secs / float64(processed.Bytes) * todo) + } + } + + // limit update frequency + if time.Since(lastUpdate) < b.MinUpdatePause { + continue + } + lastUpdate = time.Now() + + b.update(total, processed, errors, currentFiles, secondsRemaining) + } +} + +// update updates the status lines. +func (b *Backup) update(total, processed counter, errors uint, currentFiles map[string]struct{}, secs uint64) { + var status string + if total.Files == 0 && total.Dirs == 0 { + // no total count available yet + status = fmt.Sprintf("[%s] %v files, %s, %d errors", + formatDuration(time.Since(b.start)), + processed.Files, formatBytes(processed.Bytes), errors, + ) + } else { + var eta string + + if secs > 0 { + eta = fmt.Sprintf(" ETA %s", formatSeconds(secs)) + } + + // include totals + status = fmt.Sprintf("[%s] %s %v files %s, total %v files %v, %d errors%s", + formatDuration(time.Since(b.start)), + formatPercent(processed.Bytes, total.Bytes), + processed.Files, + formatBytes(processed.Bytes), + total.Files, + formatBytes(total.Bytes), + errors, + eta, + ) + } + + lines := make([]string, 0, len(currentFiles)+1) + for filename := range currentFiles { + lines = append(lines, filename) + } + sort.Sort(sort.StringSlice(lines)) + lines = append([]string{status}, lines...) + + b.term.SetStatus(lines) +} + +// ScannerError is the error callback function for the scanner, it prints the +// error in verbose mode and returns nil. +func (b *Backup) ScannerError(item string, fi os.FileInfo, err error) error { + b.V("scan: %v\n", err) + return nil +} + +// Error is the error callback function for the archiver, it prints the error and returns nil. +func (b *Backup) Error(item string, fi os.FileInfo, err error) error { + b.E("error: %v\n", err) + b.errCh <- struct{}{} + return nil +} + +// StartFile is called when a file is being processed by a worker. +func (b *Backup) StartFile(filename string) { + b.workerCh <- fileWorkerMessage{ + filename: filename, + } +} + +// CompleteBlob is called for all saved blobs for files. +func (b *Backup) CompleteBlob(filename string, bytes uint64) { + b.processedCh <- counter{Bytes: bytes} +} + +func formatPercent(numerator uint64, denominator uint64) string { + if denominator == 0 { + return "" + } + + percent := 100.0 * float64(numerator) / float64(denominator) + + if percent > 100 { + percent = 100 + } + + return fmt.Sprintf("%3.2f%%", percent) +} + +func formatSeconds(sec uint64) string { + hours := sec / 3600 + sec -= hours * 3600 + min := sec / 60 + sec -= min * 60 + if hours > 0 { + return fmt.Sprintf("%d:%02d:%02d", hours, min, sec) + } + + return fmt.Sprintf("%d:%02d", min, sec) +} + +func formatDuration(d time.Duration) string { + sec := uint64(d / time.Second) + return formatSeconds(sec) +} + +func formatBytes(c uint64) string { + b := float64(c) + switch { + case c > 1<<40: + return fmt.Sprintf("%.3f TiB", b/(1<<40)) + case c > 1<<30: + return fmt.Sprintf("%.3f GiB", b/(1<<30)) + case c > 1<<20: + return fmt.Sprintf("%.3f MiB", b/(1<<20)) + case c > 1<<10: + return fmt.Sprintf("%.3f KiB", b/(1<<10)) + default: + return fmt.Sprintf("%d B", c) + } +} + +// CompleteItemFn is the status callback function for the archiver when a +// file/dir has been saved successfully. +func (b *Backup) CompleteItemFn(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) { + b.summary.Lock() + b.summary.ItemStats.Add(s) + b.summary.Unlock() + + if current == nil { + return + } + + switch current.Type { + case "file": + b.processedCh <- counter{Files: 1} + b.workerCh <- fileWorkerMessage{ + filename: item, + done: true, + } + case "dir": + b.processedCh <- counter{Dirs: 1} + } + + if current.Type == "dir" { + if previous == nil { + b.VV("new %v, saved in %.3fs (%v added, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.TreeSize)) + b.summary.Lock() + b.summary.Dirs.New++ + b.summary.Unlock() + return + } + + if previous.Equals(*current) { + b.VV("unchanged %v", item) + b.summary.Lock() + b.summary.Dirs.Unchanged++ + b.summary.Unlock() + } else { + b.VV("modified %v, saved in %.3fs (%v added, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.TreeSize)) + b.summary.Lock() + b.summary.Dirs.Changed++ + b.summary.Unlock() + } + + } else if current.Type == "file" { + + b.workerCh <- fileWorkerMessage{ + done: true, + filename: item, + } + + if previous == nil { + b.VV("new %v, saved in %.3fs (%v added)", item, d.Seconds(), formatBytes(s.DataSize)) + b.summary.Lock() + b.summary.Files.New++ + b.summary.Unlock() + return + } + + if previous.Equals(*current) { + b.VV("unchanged %v", item) + b.summary.Lock() + b.summary.Files.Unchanged++ + b.summary.Unlock() + } else { + b.VV("modified %v, saved in %.3fs (%v added)", item, d.Seconds(), formatBytes(s.DataSize)) + b.summary.Lock() + b.summary.Files.Changed++ + b.summary.Unlock() + } + } +} + +// ReportTotal sets the total stats up to now +func (b *Backup) ReportTotal(item string, s archiver.ScanStats) { + b.totalCh <- counter{Files: s.Files, Dirs: s.Dirs, Bytes: s.Bytes} + + if item == "" { + b.V("scan finished in %.3fs", time.Since(b.start).Seconds()) + close(b.totalCh) + return + } +} + +// Finish prints the finishing messages. +func (b *Backup) Finish() { + b.V("processed %s in %s", formatBytes(b.totalBytes), formatDuration(time.Since(b.start))) + b.V("\n") + b.V("Files: %5d new, %5d changed, %5d unmodified\n", b.summary.Files.New, b.summary.Files.Changed, b.summary.Files.Unchanged) + b.V("Dirs: %5d new, %5d changed, %5d unmodified\n", b.summary.Dirs.New, b.summary.Dirs.Changed, b.summary.Dirs.Unchanged) + b.VV("Data Blobs: %5d new\n", b.summary.ItemStats.DataBlobs) + b.VV("Tree Blobs: %5d new\n", b.summary.ItemStats.TreeBlobs) + b.V("Added: %-5s\n", formatBytes(b.summary.ItemStats.DataSize+b.summary.ItemStats.TreeSize)) + b.V("\n") +} diff --git a/internal/ui/message.go b/internal/ui/message.go new file mode 100644 index 000000000..75e54b019 --- /dev/null +++ b/internal/ui/message.go @@ -0,0 +1,45 @@ +package ui + +import "github.com/restic/restic/internal/ui/termstatus" + +// Message reports progress with messages of different verbosity. +type Message struct { + term *termstatus.Terminal + v uint +} + +// NewMessage returns a message progress reporter with underlying terminal +// term. +func NewMessage(term *termstatus.Terminal, verbosity uint) *Message { + return &Message{ + term: term, + v: verbosity, + } +} + +// E reports an error +func (m *Message) E(msg string, args ...interface{}) { + m.term.Errorf(msg, args...) +} + +// P prints a message if verbosity >= 1, this is used for normal messages which +// are not errors. +func (m *Message) P(msg string, args ...interface{}) { + if m.v >= 1 { + m.term.Printf(msg, args...) + } +} + +// V prints a message if verbosity >= 2, this is used for verbose messages. +func (m *Message) V(msg string, args ...interface{}) { + if m.v >= 2 { + m.term.Printf(msg, args...) + } +} + +// VV prints a message if verbosity >= 3, this is used for debug messages. +func (m *Message) VV(msg string, args ...interface{}) { + if m.v >= 3 { + m.term.Printf(msg, args...) + } +} diff --git a/internal/ui/stdio_wrapper.go b/internal/ui/stdio_wrapper.go new file mode 100644 index 000000000..eccaefb7b --- /dev/null +++ b/internal/ui/stdio_wrapper.go @@ -0,0 +1,86 @@ +package ui + +import ( + "bytes" + "io" + + "github.com/restic/restic/internal/ui/termstatus" +) + +// StdioWrapper provides stdout and stderr integration with termstatus. +type StdioWrapper struct { + stdout *lineWriter + stderr *lineWriter +} + +// NewStdioWrapper initializes a new stdio wrapper that can be used in place of +// os.Stdout or os.Stderr. +func NewStdioWrapper(term *termstatus.Terminal) *StdioWrapper { + return &StdioWrapper{ + stdout: newLineWriter(term.Print), + stderr: newLineWriter(term.Error), + } +} + +// Stdout returns a writer that is line buffered and can be used in place of +// os.Stdout. On Close(), the remaining bytes are written, followed by a line +// break. +func (w *StdioWrapper) Stdout() io.WriteCloser { + return w.stdout +} + +// Stderr returns a writer that is line buffered and can be used in place of +// os.Stderr. On Close(), the remaining bytes are written, followed by a line +// break. +func (w *StdioWrapper) Stderr() io.WriteCloser { + return w.stderr +} + +type lineWriter struct { + buf *bytes.Buffer + print func(string) +} + +var _ io.WriteCloser = &lineWriter{} + +func newLineWriter(print func(string)) *lineWriter { + return &lineWriter{buf: bytes.NewBuffer(nil), print: print} +} + +func (w *lineWriter) Write(data []byte) (n int, err error) { + n, err = w.buf.Write(data) + if err != nil { + return n, err + } + + // look for line breaks + buf := w.buf.Bytes() + skip := 0 + for i := 0; i < len(buf); { + if buf[i] == '\n' { + // found line + w.print(string(buf[:i+1])) + buf = buf[i+1:] + skip += i + 1 + i = 0 + continue + } + + i++ + } + + _ = w.buf.Next(skip) + + return n, err +} + +func (w *lineWriter) Flush() error { + if w.buf.Len() > 0 { + w.print(string(append(w.buf.Bytes(), '\n'))) + } + return nil +} + +func (w *lineWriter) Close() error { + return w.Flush() +} diff --git a/internal/ui/stdio_wrapper_test.go b/internal/ui/stdio_wrapper_test.go new file mode 100644 index 000000000..fc071f992 --- /dev/null +++ b/internal/ui/stdio_wrapper_test.go @@ -0,0 +1,95 @@ +package ui + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestStdioWrapper(t *testing.T) { + var tests = []struct { + inputs [][]byte + outputs []string + }{ + { + inputs: [][]byte{ + []byte("foo"), + }, + outputs: []string{ + "foo\n", + }, + }, + { + inputs: [][]byte{ + []byte("foo"), + []byte("bar"), + []byte("\n"), + []byte("baz"), + }, + outputs: []string{ + "foobar\n", + "baz\n", + }, + }, + { + inputs: [][]byte{ + []byte("foo"), + []byte("bar\nbaz\n"), + []byte("bump\n"), + }, + outputs: []string{ + "foobar\n", + "baz\n", + "bump\n", + }, + }, + { + inputs: [][]byte{ + []byte("foo"), + []byte("bar\nbaz\n"), + []byte("bum"), + []byte("p\nx"), + []byte("x"), + []byte("x"), + []byte("z"), + }, + outputs: []string{ + "foobar\n", + "baz\n", + "bump\n", + "xxxz\n", + }, + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + var lines []string + print := func(s string) { + lines = append(lines, s) + } + + w := newLineWriter(print) + + for _, data := range test.inputs { + n, err := w.Write(data) + if err != nil { + t.Fatal(err) + } + + if n != len(data) { + t.Errorf("invalid length returned by Write, want %d, got %d", len(data), n) + } + } + + err := w.Close() + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(test.outputs, lines) { + t.Error(cmp.Diff(test.outputs, lines)) + } + }) + } +} diff --git a/internal/ui/termstatus/background.go b/internal/ui/termstatus/background.go new file mode 100644 index 000000000..e371c18df --- /dev/null +++ b/internal/ui/termstatus/background.go @@ -0,0 +1,9 @@ +// +build !linux + +package termstatus + +// IsProcessBackground reports whether the current process is running in the +// background. Not implemented for this platform. +func IsProcessBackground() bool { + return false +} diff --git a/cmd/restic/background_linux.go b/internal/ui/termstatus/background_linux.go similarity index 79% rename from cmd/restic/background_linux.go rename to internal/ui/termstatus/background_linux.go index b9a2a2f00..f99091128 100644 --- a/cmd/restic/background_linux.go +++ b/internal/ui/termstatus/background_linux.go @@ -1,4 +1,4 @@ -package main +package termstatus import ( "syscall" @@ -7,7 +7,7 @@ import ( "github.com/restic/restic/internal/debug" ) -// IsProcessBackground returns true if it is running in the background or false if not +// IsProcessBackground reports whether the current process is running in the background. func IsProcessBackground() bool { var pid int _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdin), syscall.TIOCGPGRP, uintptr(unsafe.Pointer(&pid))) diff --git a/internal/ui/termstatus/status.go b/internal/ui/termstatus/status.go new file mode 100644 index 000000000..6682430e8 --- /dev/null +++ b/internal/ui/termstatus/status.go @@ -0,0 +1,293 @@ +package termstatus + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "strings" +) + +// Terminal is used to write messages and display status lines which can be +// updated. When the output is redirected to a file, the status lines are not +// printed. +type Terminal struct { + wr *bufio.Writer + fd uintptr + errWriter io.Writer + buf *bytes.Buffer + msg chan message + status chan status + canUpdateStatus bool + clearLines clearLinesFunc +} + +type clearLinesFunc func(wr io.Writer, fd uintptr, n int) + +type message struct { + line string + err bool +} + +type status struct { + lines []string +} + +type fder interface { + Fd() uintptr +} + +// New returns a new Terminal for wr. A goroutine is started to update the +// terminal. It is terminated when ctx is cancelled. When wr is redirected to +// a file (e.g. via shell output redirection) or is just an io.Writer (not the +// open *os.File for stdout), no status lines are printed. The status lines and +// normal output (via Print/Printf) are written to wr, error messages are +// written to errWriter. +func New(wr io.Writer, errWriter io.Writer) *Terminal { + t := &Terminal{ + wr: bufio.NewWriter(wr), + errWriter: errWriter, + buf: bytes.NewBuffer(nil), + msg: make(chan message), + status: make(chan status), + } + + if d, ok := wr.(fder); ok && canUpdateStatus(d.Fd()) { + // only use the fancy status code when we're running on a real terminal. + t.canUpdateStatus = true + t.fd = d.Fd() + t.clearLines = clearLines(wr, t.fd) + } + + return t +} + +// Run updates the screen. It should be run in a separate goroutine. When +// ctx is cancelled, the status lines are cleanly removed. +func (t *Terminal) Run(ctx context.Context) { + if t.canUpdateStatus { + t.run(ctx) + return + } + + t.runWithoutStatus(ctx) +} + +func countLines(buf []byte) int { + lines := 0 + sc := bufio.NewScanner(bytes.NewReader(buf)) + for sc.Scan() { + lines++ + } + return lines +} + +type stringWriter interface { + WriteString(string) (int, error) +} + +// run listens on the channels and updates the terminal screen. +func (t *Terminal) run(ctx context.Context) { + statusBuf := bytes.NewBuffer(nil) + statusLines := 0 + for { + select { + case <-ctx.Done(): + if IsProcessBackground() { + // ignore all messages, do nothing, we are in the background process group + continue + } + t.undoStatus(statusLines) + + err := t.wr.Flush() + if err != nil { + fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + } + + return + + case msg := <-t.msg: + if IsProcessBackground() { + // ignore all messages, do nothing, we are in the background process group + continue + } + t.undoStatus(statusLines) + + var dst io.Writer + if msg.err { + dst = t.errWriter + + // assume t.wr and t.errWriter are different, so we need to + // flush the removal of the status lines first. + err := t.wr.Flush() + if err != nil { + fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + } + } else { + dst = t.wr + } + + var err error + if w, ok := dst.(stringWriter); ok { + _, err = w.WriteString(msg.line) + } else { + _, err = dst.Write([]byte(msg.line)) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + continue + } + + _, err = t.wr.Write(statusBuf.Bytes()) + if err != nil { + fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + } + + err = t.wr.Flush() + if err != nil { + fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + } + + case stat := <-t.status: + if IsProcessBackground() { + // ignore all messages, do nothing, we are in the background process group + continue + } + t.undoStatus(statusLines) + + statusBuf.Reset() + for _, line := range stat.lines { + statusBuf.WriteString(line) + } + statusLines = len(stat.lines) + + _, err := t.wr.Write(statusBuf.Bytes()) + if err != nil { + fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + } + + err = t.wr.Flush() + if err != nil { + fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + } + } + } +} + +// runWithoutStatus listens on the channels and just prints out the messages, +// without status lines. +func (t *Terminal) runWithoutStatus(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case msg := <-t.msg: + var err error + var flush func() error + + var dst io.Writer + if msg.err { + dst = t.errWriter + } else { + dst = t.wr + flush = t.wr.Flush + } + + if w, ok := dst.(stringWriter); ok { + _, err = w.WriteString(msg.line) + } else { + _, err = dst.Write([]byte(msg.line)) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + } + + if flush == nil { + continue + } + + err = flush() + if err != nil { + fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + } + + case _ = <-t.status: + // discard status lines + } + } +} + +func (t *Terminal) undoStatus(lines int) { + if lines == 0 { + return + } + + lines-- + t.clearLines(t.wr, t.fd, lines) +} + +// Print writes a line to the terminal. +func (t *Terminal) Print(line string) { + // make sure the line ends with a line break + if line[len(line)-1] != '\n' { + line += "\n" + } + + t.msg <- message{line: line} +} + +// Printf uses fmt.Sprintf to write a line to the terminal. +func (t *Terminal) Printf(msg string, args ...interface{}) { + s := fmt.Sprintf(msg, args...) + t.Print(s) +} + +// Error writes an error to the terminal. +func (t *Terminal) Error(line string) { + // make sure the line ends with a line break + if line[len(line)-1] != '\n' { + line += "\n" + } + + t.msg <- message{line: line, err: true} +} + +// Errorf uses fmt.Sprintf to write an error line to the terminal. +func (t *Terminal) Errorf(msg string, args ...interface{}) { + s := fmt.Sprintf(msg, args...) + t.Error(s) +} + +// SetStatus updates the status lines. +func (t *Terminal) SetStatus(lines []string) { + if len(lines) == 0 { + return + } + + width, _, err := getTermSize(t.fd) + if err != nil || width < 0 { + // use 80 columns by default + width = 80 + } + + // make sure that all lines have a line break and are not too long + for i, line := range lines { + line = strings.TrimRight(line, "\n") + + if len(line) >= width-2 { + line = line[:width-2] + } + line += "\n" + lines[i] = line + } + + // make sure the last line does not have a line break + last := len(lines) - 1 + lines[last] = strings.TrimRight(lines[last], "\n") + + t.status <- status{lines: lines} +} diff --git a/internal/ui/termstatus/terminal_posix.go b/internal/ui/termstatus/terminal_posix.go new file mode 100644 index 000000000..6b86e0d43 --- /dev/null +++ b/internal/ui/termstatus/terminal_posix.go @@ -0,0 +1,33 @@ +package termstatus + +import ( + "fmt" + "io" + "os" +) + +const ( + posixMoveCursorHome = "\r" + posixMoveCursorUp = "\x1b[1A" + posixClearLine = "\x1b[2K" +) + +// posixClearLines will clear the current line and the n lines above. +// Afterwards the cursor is positioned at the start of the first cleared line. +func posixClearLines(wr io.Writer, fd uintptr, n int) { + // clear current line + _, err := wr.Write([]byte(posixMoveCursorHome + posixClearLine)) + if err != nil { + fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + return + } + + for ; n > 0; n-- { + // clear current line and move on line up + _, err := wr.Write([]byte(posixMoveCursorUp + posixClearLine)) + if err != nil { + fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + return + } + } +} diff --git a/internal/ui/termstatus/terminal_unix.go b/internal/ui/termstatus/terminal_unix.go new file mode 100644 index 000000000..52db49a17 --- /dev/null +++ b/internal/ui/termstatus/terminal_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package termstatus + +import ( + "io" + "syscall" + "unsafe" + + isatty "github.com/mattn/go-isatty" +) + +// clearLines will clear the current line and the n lines above. Afterwards the +// cursor is positioned at the start of the first cleared line. +func clearLines(wr io.Writer, fd uintptr) clearLinesFunc { + return posixClearLines +} + +// canUpdateStatus returns true if status lines can be printed, the process +// output is not redirected to a file or pipe. +func canUpdateStatus(fd uintptr) bool { + return isatty.IsTerminal(fd) +} + +// getTermSize returns the dimensions of the given terminal. +// the code is taken from "golang.org/x/crypto/ssh/terminal" +func getTermSize(fd uintptr) (width, height int, err error) { + var dimensions [4]uint16 + + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { + return -1, -1, err + } + return int(dimensions[1]), int(dimensions[0]), nil +} diff --git a/internal/ui/termstatus/terminal_windows.go b/internal/ui/termstatus/terminal_windows.go new file mode 100644 index 000000000..56910c67e --- /dev/null +++ b/internal/ui/termstatus/terminal_windows.go @@ -0,0 +1,131 @@ +// +build windows + +package termstatus + +import ( + "io" + "syscall" + "unsafe" +) + +// clearLines clears the current line and n lines above it. +func clearLines(wr io.Writer, fd uintptr) clearLinesFunc { + // easy case, the terminal is cmd or psh, without redirection + if isWindowsTerminal(fd) { + return windowsClearLines + } + + // check if the output file type is a pipe (0x0003) + if getFileType(fd) != fileTypePipe { + // return empty func, update state is not possible on this terminal + return func(io.Writer, uintptr, int) {} + } + + // assume we're running in mintty/cygwin + return posixClearLines +} + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileType = kernel32.NewProc("GetFileType") +) + +type ( + short int16 + word uint16 + dword uint32 + + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +// windowsClearLines clears the current line and n lines above it. +func windowsClearLines(wr io.Writer, fd uintptr, n int) { + var info consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(fd, uintptr(unsafe.Pointer(&info))) + + for i := 0; i <= n; i++ { + // clear the line + cursor := coord{ + x: info.window.left, + y: info.cursorPosition.y - short(i), + } + var count, w dword + count = dword(info.size.x) + procFillConsoleOutputAttribute.Call(fd, uintptr(info.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&w))) + procFillConsoleOutputCharacter.Call(fd, uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&w))) + } + + // move cursor up by n lines and to the first column + info.cursorPosition.y -= short(n) + info.cursorPosition.x = 0 + procSetConsoleCursorPosition.Call(fd, uintptr(*(*int32)(unsafe.Pointer(&info.cursorPosition)))) +} + +// getTermSize returns the dimensions of the given terminal. +// the code is taken from "golang.org/x/crypto/ssh/terminal" +func getTermSize(fd uintptr) (width, height int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, fd, uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, 0, error(e) + } + return int(info.size.x), int(info.size.y), nil +} + +// isWindowsTerminal return true if the file descriptor is a windows terminal (cmd, psh). +func isWindowsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +const fileTypePipe = 0x0003 + +// getFileType returns the file type for the given fd. +// https://msdn.microsoft.com/de-de/library/windows/desktop/aa364960(v=vs.85).aspx +func getFileType(fd uintptr) int { + r, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if e != 0 { + return 0 + } + return int(r) +} + +// canUpdateStatus returns true if status lines can be printed, the process +// output is not redirected to a file or pipe. +func canUpdateStatus(fd uintptr) bool { + // easy case, the terminal is cmd or psh, without redirection + if isWindowsTerminal(fd) { + return true + } + + // check if the output file type is a pipe (0x0003) + if getFileType(fd) != fileTypePipe { + return false + } + + // assume we're running in mintty/cygwin + return true +} diff --git a/internal/walk/testdata/walktree-test-repo.tar.gz b/internal/walk/testdata/walktree-test-repo.tar.gz deleted file mode 100644 index 5f3d19c10..000000000 Binary files a/internal/walk/testdata/walktree-test-repo.tar.gz and /dev/null differ diff --git a/internal/walk/walk.go b/internal/walk/walk.go deleted file mode 100644 index 41618830a..000000000 --- a/internal/walk/walk.go +++ /dev/null @@ -1,197 +0,0 @@ -package walk - -import ( - "context" - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/restic" -) - -// TreeJob is a job sent from the tree walker. -type TreeJob struct { - Path string - Error error - - Node *restic.Node - Tree *restic.Tree -} - -// TreeWalker traverses a tree in the repository depth-first and sends a job -// for each item (file or dir) that it encounters. -type TreeWalker struct { - ch chan<- loadTreeJob - out chan<- TreeJob -} - -// NewTreeWalker uses ch to load trees from the repository and sends jobs to -// out. -func NewTreeWalker(ch chan<- loadTreeJob, out chan<- TreeJob) *TreeWalker { - return &TreeWalker{ch: ch, out: out} -} - -// Walk starts walking the tree given by id. When the channel done is closed, -// processing stops. -func (tw *TreeWalker) Walk(ctx context.Context, path string, id restic.ID) { - debug.Log("starting on tree %v for %v", id, path) - defer debug.Log("done walking tree %v for %v", id, path) - - resCh := make(chan loadTreeResult, 1) - tw.ch <- loadTreeJob{ - id: id, - res: resCh, - } - - res := <-resCh - if res.err != nil { - select { - case tw.out <- TreeJob{Path: path, Error: res.err}: - case <-ctx.Done(): - return - } - return - } - - tw.walk(ctx, path, res.tree) - - select { - case tw.out <- TreeJob{Path: path, Tree: res.tree}: - case <-ctx.Done(): - return - } -} - -func (tw *TreeWalker) walk(ctx context.Context, path string, tree *restic.Tree) { - debug.Log("start on %q", path) - defer debug.Log("done for %q", path) - - debug.Log("tree %#v", tree) - - // load all subtrees in parallel - results := make([]<-chan loadTreeResult, len(tree.Nodes)) - for i, node := range tree.Nodes { - if node.Type == "dir" { - resCh := make(chan loadTreeResult, 1) - tw.ch <- loadTreeJob{ - id: *node.Subtree, - res: resCh, - } - - results[i] = resCh - } - } - - for i, node := range tree.Nodes { - p := filepath.Join(path, node.Name) - var job TreeJob - - if node.Type == "dir" { - if results[i] == nil { - panic("result chan should not be nil") - } - - res := <-results[i] - if res.err == nil { - tw.walk(ctx, p, res.tree) - } else { - fmt.Fprintf(os.Stderr, "error loading tree: %v\n", res.err) - } - - job = TreeJob{Path: p, Tree: res.tree, Error: res.err} - } else { - job = TreeJob{Path: p, Node: node} - } - - select { - case tw.out <- job: - case <-ctx.Done(): - return - } - } -} - -type loadTreeResult struct { - tree *restic.Tree - err error -} - -type loadTreeJob struct { - id restic.ID - res chan<- loadTreeResult -} - -type treeLoader func(restic.ID) (*restic.Tree, error) - -func loadTreeWorker(ctx context.Context, wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader) { - debug.Log("start") - defer debug.Log("exit") - defer wg.Done() - - for { - select { - case <-ctx.Done(): - debug.Log("done channel closed") - return - case job, ok := <-in: - if !ok { - debug.Log("input channel closed, exiting") - return - } - - debug.Log("received job to load tree %v", job.id) - tree, err := load(job.id) - - debug.Log("tree %v loaded, error %v", job.id, err) - - select { - case job.res <- loadTreeResult{tree, err}: - debug.Log("job result sent") - case <-ctx.Done(): - debug.Log("done channel closed before result could be sent") - return - } - } - } -} - -// TreeLoader loads tree objects. -type TreeLoader interface { - LoadTree(context.Context, restic.ID) (*restic.Tree, error) -} - -const loadTreeWorkers = 10 - -// Tree walks the tree specified by id recursively and sends a job for each -// file and directory it finds. When the channel done is closed, processing -// stops. -func Tree(ctx context.Context, repo TreeLoader, id restic.ID, jobCh chan<- TreeJob) { - debug.Log("start on %v, start workers", id) - - load := func(id restic.ID) (*restic.Tree, error) { - tree, err := repo.LoadTree(ctx, id) - if err != nil { - return nil, err - } - return tree, nil - } - - ch := make(chan loadTreeJob) - - var wg sync.WaitGroup - for i := 0; i < loadTreeWorkers; i++ { - wg.Add(1) - go loadTreeWorker(ctx, &wg, ch, load) - } - - tw := NewTreeWalker(ch, jobCh) - tw.Walk(ctx, "", id) - close(jobCh) - - close(ch) - wg.Wait() - - debug.Log("done") -} diff --git a/internal/walk/walk_test.go b/internal/walk/walk_test.go deleted file mode 100644 index b67ae9151..000000000 --- a/internal/walk/walk_test.go +++ /dev/null @@ -1,1394 +0,0 @@ -package walk_test - -import ( - "context" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/restic/restic/internal/archiver" - "github.com/restic/restic/internal/pipe" - "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" - rtest "github.com/restic/restic/internal/test" - "github.com/restic/restic/internal/walk" -) - -func TestWalkTree(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - dirs, err := filepath.Glob(rtest.TestWalkerPath) - rtest.OK(t, err) - - // archive a few files - arch := archiver.New(repo) - sn, _, err := arch.Snapshot(context.TODO(), nil, dirs, nil, "localhost", nil, time.Now()) - rtest.OK(t, err) - - // flush repo, write all packs - rtest.OK(t, repo.Flush(context.Background())) - - // start tree walker - treeJobs := make(chan walk.TreeJob) - go walk.Tree(context.TODO(), repo, *sn.Tree, treeJobs) - - // start filesystem walker - fsJobs := make(chan pipe.Job) - resCh := make(chan pipe.Result, 1) - - f := func(string, os.FileInfo) bool { - return true - } - go pipe.Walk(context.TODO(), dirs, f, fsJobs, resCh) - - for { - // receive fs job - fsJob, fsChOpen := <-fsJobs - rtest.Assert(t, !fsChOpen || fsJob != nil, - "received nil job from filesystem: %v %v", fsJob, fsChOpen) - if fsJob != nil { - rtest.OK(t, fsJob.Error()) - } - - var path string - fsEntries := 1 - switch j := fsJob.(type) { - case pipe.Dir: - path = j.Path() - fsEntries = len(j.Entries) - case pipe.Entry: - path = j.Path() - } - - // receive tree job - treeJob, treeChOpen := <-treeJobs - treeEntries := 1 - - rtest.OK(t, treeJob.Error) - - if treeJob.Tree != nil { - treeEntries = len(treeJob.Tree.Nodes) - } - - rtest.Assert(t, fsChOpen == treeChOpen, - "one channel closed too early: fsChOpen %v, treeChOpen %v", - fsChOpen, treeChOpen) - - if !fsChOpen || !treeChOpen { - break - } - - rtest.Assert(t, filepath.Base(path) == filepath.Base(treeJob.Path), - "paths do not match: %q != %q", filepath.Base(path), filepath.Base(treeJob.Path)) - - rtest.Assert(t, fsEntries == treeEntries, - "wrong number of entries: %v != %v", fsEntries, treeEntries) - } -} - -type delayRepo struct { - repo restic.Repository - delay time.Duration -} - -func (d delayRepo) LoadTree(ctx context.Context, id restic.ID) (*restic.Tree, error) { - time.Sleep(d.delay) - return d.repo.LoadTree(ctx, id) -} - -var repoFixture = filepath.Join("testdata", "walktree-test-repo.tar.gz") - -var walktreeTestItems = []string{ - "testdata/0/0/0/0", - "testdata/0/0/0/1", - "testdata/0/0/0/10", - "testdata/0/0/0/100", - "testdata/0/0/0/101", - "testdata/0/0/0/102", - "testdata/0/0/0/103", - "testdata/0/0/0/104", - "testdata/0/0/0/105", - "testdata/0/0/0/106", - "testdata/0/0/0/107", - "testdata/0/0/0/108", - "testdata/0/0/0/109", - "testdata/0/0/0/11", - "testdata/0/0/0/110", - "testdata/0/0/0/111", - "testdata/0/0/0/112", - "testdata/0/0/0/113", - "testdata/0/0/0/114", - "testdata/0/0/0/115", - "testdata/0/0/0/116", - "testdata/0/0/0/117", - "testdata/0/0/0/118", - "testdata/0/0/0/119", - "testdata/0/0/0/12", - "testdata/0/0/0/120", - "testdata/0/0/0/121", - "testdata/0/0/0/122", - "testdata/0/0/0/123", - "testdata/0/0/0/124", - "testdata/0/0/0/125", - "testdata/0/0/0/126", - "testdata/0/0/0/127", - "testdata/0/0/0/13", - "testdata/0/0/0/14", - "testdata/0/0/0/15", - "testdata/0/0/0/16", - "testdata/0/0/0/17", - "testdata/0/0/0/18", - "testdata/0/0/0/19", - "testdata/0/0/0/2", - "testdata/0/0/0/20", - "testdata/0/0/0/21", - "testdata/0/0/0/22", - "testdata/0/0/0/23", - "testdata/0/0/0/24", - "testdata/0/0/0/25", - "testdata/0/0/0/26", - "testdata/0/0/0/27", - "testdata/0/0/0/28", - "testdata/0/0/0/29", - "testdata/0/0/0/3", - "testdata/0/0/0/30", - "testdata/0/0/0/31", - "testdata/0/0/0/32", - "testdata/0/0/0/33", - "testdata/0/0/0/34", - "testdata/0/0/0/35", - "testdata/0/0/0/36", - "testdata/0/0/0/37", - "testdata/0/0/0/38", - "testdata/0/0/0/39", - "testdata/0/0/0/4", - "testdata/0/0/0/40", - "testdata/0/0/0/41", - "testdata/0/0/0/42", - "testdata/0/0/0/43", - "testdata/0/0/0/44", - "testdata/0/0/0/45", - "testdata/0/0/0/46", - "testdata/0/0/0/47", - "testdata/0/0/0/48", - "testdata/0/0/0/49", - "testdata/0/0/0/5", - "testdata/0/0/0/50", - "testdata/0/0/0/51", - "testdata/0/0/0/52", - "testdata/0/0/0/53", - "testdata/0/0/0/54", - "testdata/0/0/0/55", - "testdata/0/0/0/56", - "testdata/0/0/0/57", - "testdata/0/0/0/58", - "testdata/0/0/0/59", - "testdata/0/0/0/6", - "testdata/0/0/0/60", - "testdata/0/0/0/61", - "testdata/0/0/0/62", - "testdata/0/0/0/63", - "testdata/0/0/0/64", - "testdata/0/0/0/65", - "testdata/0/0/0/66", - "testdata/0/0/0/67", - "testdata/0/0/0/68", - "testdata/0/0/0/69", - "testdata/0/0/0/7", - "testdata/0/0/0/70", - "testdata/0/0/0/71", - "testdata/0/0/0/72", - "testdata/0/0/0/73", - "testdata/0/0/0/74", - "testdata/0/0/0/75", - "testdata/0/0/0/76", - "testdata/0/0/0/77", - "testdata/0/0/0/78", - "testdata/0/0/0/79", - "testdata/0/0/0/8", - "testdata/0/0/0/80", - "testdata/0/0/0/81", - "testdata/0/0/0/82", - "testdata/0/0/0/83", - "testdata/0/0/0/84", - "testdata/0/0/0/85", - "testdata/0/0/0/86", - "testdata/0/0/0/87", - "testdata/0/0/0/88", - "testdata/0/0/0/89", - "testdata/0/0/0/9", - "testdata/0/0/0/90", - "testdata/0/0/0/91", - "testdata/0/0/0/92", - "testdata/0/0/0/93", - "testdata/0/0/0/94", - "testdata/0/0/0/95", - "testdata/0/0/0/96", - "testdata/0/0/0/97", - "testdata/0/0/0/98", - "testdata/0/0/0/99", - "testdata/0/0/0", - "testdata/0/0/1/0", - "testdata/0/0/1/1", - "testdata/0/0/1/10", - "testdata/0/0/1/100", - "testdata/0/0/1/101", - "testdata/0/0/1/102", - "testdata/0/0/1/103", - "testdata/0/0/1/104", - "testdata/0/0/1/105", - "testdata/0/0/1/106", - "testdata/0/0/1/107", - "testdata/0/0/1/108", - "testdata/0/0/1/109", - "testdata/0/0/1/11", - "testdata/0/0/1/110", - "testdata/0/0/1/111", - "testdata/0/0/1/112", - "testdata/0/0/1/113", - "testdata/0/0/1/114", - "testdata/0/0/1/115", - "testdata/0/0/1/116", - "testdata/0/0/1/117", - "testdata/0/0/1/118", - "testdata/0/0/1/119", - "testdata/0/0/1/12", - "testdata/0/0/1/120", - "testdata/0/0/1/121", - "testdata/0/0/1/122", - "testdata/0/0/1/123", - "testdata/0/0/1/124", - "testdata/0/0/1/125", - "testdata/0/0/1/126", - "testdata/0/0/1/127", - "testdata/0/0/1/13", - "testdata/0/0/1/14", - "testdata/0/0/1/15", - "testdata/0/0/1/16", - "testdata/0/0/1/17", - "testdata/0/0/1/18", - "testdata/0/0/1/19", - "testdata/0/0/1/2", - "testdata/0/0/1/20", - "testdata/0/0/1/21", - "testdata/0/0/1/22", - "testdata/0/0/1/23", - "testdata/0/0/1/24", - "testdata/0/0/1/25", - "testdata/0/0/1/26", - "testdata/0/0/1/27", - "testdata/0/0/1/28", - "testdata/0/0/1/29", - "testdata/0/0/1/3", - "testdata/0/0/1/30", - "testdata/0/0/1/31", - "testdata/0/0/1/32", - "testdata/0/0/1/33", - "testdata/0/0/1/34", - "testdata/0/0/1/35", - "testdata/0/0/1/36", - "testdata/0/0/1/37", - "testdata/0/0/1/38", - "testdata/0/0/1/39", - "testdata/0/0/1/4", - "testdata/0/0/1/40", - "testdata/0/0/1/41", - "testdata/0/0/1/42", - "testdata/0/0/1/43", - "testdata/0/0/1/44", - "testdata/0/0/1/45", - "testdata/0/0/1/46", - "testdata/0/0/1/47", - "testdata/0/0/1/48", - "testdata/0/0/1/49", - "testdata/0/0/1/5", - "testdata/0/0/1/50", - "testdata/0/0/1/51", - "testdata/0/0/1/52", - "testdata/0/0/1/53", - "testdata/0/0/1/54", - "testdata/0/0/1/55", - "testdata/0/0/1/56", - "testdata/0/0/1/57", - "testdata/0/0/1/58", - "testdata/0/0/1/59", - "testdata/0/0/1/6", - "testdata/0/0/1/60", - "testdata/0/0/1/61", - "testdata/0/0/1/62", - "testdata/0/0/1/63", - "testdata/0/0/1/64", - "testdata/0/0/1/65", - "testdata/0/0/1/66", - "testdata/0/0/1/67", - "testdata/0/0/1/68", - "testdata/0/0/1/69", - "testdata/0/0/1/7", - "testdata/0/0/1/70", - "testdata/0/0/1/71", - "testdata/0/0/1/72", - "testdata/0/0/1/73", - "testdata/0/0/1/74", - "testdata/0/0/1/75", - "testdata/0/0/1/76", - "testdata/0/0/1/77", - "testdata/0/0/1/78", - "testdata/0/0/1/79", - "testdata/0/0/1/8", - "testdata/0/0/1/80", - "testdata/0/0/1/81", - "testdata/0/0/1/82", - "testdata/0/0/1/83", - "testdata/0/0/1/84", - "testdata/0/0/1/85", - "testdata/0/0/1/86", - "testdata/0/0/1/87", - "testdata/0/0/1/88", - "testdata/0/0/1/89", - "testdata/0/0/1/9", - "testdata/0/0/1/90", - "testdata/0/0/1/91", - "testdata/0/0/1/92", - "testdata/0/0/1/93", - "testdata/0/0/1/94", - "testdata/0/0/1/95", - "testdata/0/0/1/96", - "testdata/0/0/1/97", - "testdata/0/0/1/98", - "testdata/0/0/1/99", - "testdata/0/0/1", - "testdata/0/0/2/0", - "testdata/0/0/2/1", - "testdata/0/0/2/10", - "testdata/0/0/2/100", - "testdata/0/0/2/101", - "testdata/0/0/2/102", - "testdata/0/0/2/103", - "testdata/0/0/2/104", - "testdata/0/0/2/105", - "testdata/0/0/2/106", - "testdata/0/0/2/107", - "testdata/0/0/2/108", - "testdata/0/0/2/109", - "testdata/0/0/2/11", - "testdata/0/0/2/110", - "testdata/0/0/2/111", - "testdata/0/0/2/112", - "testdata/0/0/2/113", - "testdata/0/0/2/114", - "testdata/0/0/2/115", - "testdata/0/0/2/116", - "testdata/0/0/2/117", - "testdata/0/0/2/118", - "testdata/0/0/2/119", - "testdata/0/0/2/12", - "testdata/0/0/2/120", - "testdata/0/0/2/121", - "testdata/0/0/2/122", - "testdata/0/0/2/123", - "testdata/0/0/2/124", - "testdata/0/0/2/125", - "testdata/0/0/2/126", - "testdata/0/0/2/127", - "testdata/0/0/2/13", - "testdata/0/0/2/14", - "testdata/0/0/2/15", - "testdata/0/0/2/16", - "testdata/0/0/2/17", - "testdata/0/0/2/18", - "testdata/0/0/2/19", - "testdata/0/0/2/2", - "testdata/0/0/2/20", - "testdata/0/0/2/21", - "testdata/0/0/2/22", - "testdata/0/0/2/23", - "testdata/0/0/2/24", - "testdata/0/0/2/25", - "testdata/0/0/2/26", - "testdata/0/0/2/27", - "testdata/0/0/2/28", - "testdata/0/0/2/29", - "testdata/0/0/2/3", - "testdata/0/0/2/30", - "testdata/0/0/2/31", - "testdata/0/0/2/32", - "testdata/0/0/2/33", - "testdata/0/0/2/34", - "testdata/0/0/2/35", - "testdata/0/0/2/36", - "testdata/0/0/2/37", - "testdata/0/0/2/38", - "testdata/0/0/2/39", - "testdata/0/0/2/4", - "testdata/0/0/2/40", - "testdata/0/0/2/41", - "testdata/0/0/2/42", - "testdata/0/0/2/43", - "testdata/0/0/2/44", - "testdata/0/0/2/45", - "testdata/0/0/2/46", - "testdata/0/0/2/47", - "testdata/0/0/2/48", - "testdata/0/0/2/49", - "testdata/0/0/2/5", - "testdata/0/0/2/50", - "testdata/0/0/2/51", - "testdata/0/0/2/52", - "testdata/0/0/2/53", - "testdata/0/0/2/54", - "testdata/0/0/2/55", - "testdata/0/0/2/56", - "testdata/0/0/2/57", - "testdata/0/0/2/58", - "testdata/0/0/2/59", - "testdata/0/0/2/6", - "testdata/0/0/2/60", - "testdata/0/0/2/61", - "testdata/0/0/2/62", - "testdata/0/0/2/63", - "testdata/0/0/2/64", - "testdata/0/0/2/65", - "testdata/0/0/2/66", - "testdata/0/0/2/67", - "testdata/0/0/2/68", - "testdata/0/0/2/69", - "testdata/0/0/2/7", - "testdata/0/0/2/70", - "testdata/0/0/2/71", - "testdata/0/0/2/72", - "testdata/0/0/2/73", - "testdata/0/0/2/74", - "testdata/0/0/2/75", - "testdata/0/0/2/76", - "testdata/0/0/2/77", - "testdata/0/0/2/78", - "testdata/0/0/2/79", - "testdata/0/0/2/8", - "testdata/0/0/2/80", - "testdata/0/0/2/81", - "testdata/0/0/2/82", - "testdata/0/0/2/83", - "testdata/0/0/2/84", - "testdata/0/0/2/85", - "testdata/0/0/2/86", - "testdata/0/0/2/87", - "testdata/0/0/2/88", - "testdata/0/0/2/89", - "testdata/0/0/2/9", - "testdata/0/0/2/90", - "testdata/0/0/2/91", - "testdata/0/0/2/92", - "testdata/0/0/2/93", - "testdata/0/0/2/94", - "testdata/0/0/2/95", - "testdata/0/0/2/96", - "testdata/0/0/2/97", - "testdata/0/0/2/98", - "testdata/0/0/2/99", - "testdata/0/0/2", - "testdata/0/0/3/0", - "testdata/0/0/3/1", - "testdata/0/0/3/10", - "testdata/0/0/3/100", - "testdata/0/0/3/101", - "testdata/0/0/3/102", - "testdata/0/0/3/103", - "testdata/0/0/3/104", - "testdata/0/0/3/105", - "testdata/0/0/3/106", - "testdata/0/0/3/107", - "testdata/0/0/3/108", - "testdata/0/0/3/109", - "testdata/0/0/3/11", - "testdata/0/0/3/110", - "testdata/0/0/3/111", - "testdata/0/0/3/112", - "testdata/0/0/3/113", - "testdata/0/0/3/114", - "testdata/0/0/3/115", - "testdata/0/0/3/116", - "testdata/0/0/3/117", - "testdata/0/0/3/118", - "testdata/0/0/3/119", - "testdata/0/0/3/12", - "testdata/0/0/3/120", - "testdata/0/0/3/121", - "testdata/0/0/3/122", - "testdata/0/0/3/123", - "testdata/0/0/3/124", - "testdata/0/0/3/125", - "testdata/0/0/3/126", - "testdata/0/0/3/127", - "testdata/0/0/3/13", - "testdata/0/0/3/14", - "testdata/0/0/3/15", - "testdata/0/0/3/16", - "testdata/0/0/3/17", - "testdata/0/0/3/18", - "testdata/0/0/3/19", - "testdata/0/0/3/2", - "testdata/0/0/3/20", - "testdata/0/0/3/21", - "testdata/0/0/3/22", - "testdata/0/0/3/23", - "testdata/0/0/3/24", - "testdata/0/0/3/25", - "testdata/0/0/3/26", - "testdata/0/0/3/27", - "testdata/0/0/3/28", - "testdata/0/0/3/29", - "testdata/0/0/3/3", - "testdata/0/0/3/30", - "testdata/0/0/3/31", - "testdata/0/0/3/32", - "testdata/0/0/3/33", - "testdata/0/0/3/34", - "testdata/0/0/3/35", - "testdata/0/0/3/36", - "testdata/0/0/3/37", - "testdata/0/0/3/38", - "testdata/0/0/3/39", - "testdata/0/0/3/4", - "testdata/0/0/3/40", - "testdata/0/0/3/41", - "testdata/0/0/3/42", - "testdata/0/0/3/43", - "testdata/0/0/3/44", - "testdata/0/0/3/45", - "testdata/0/0/3/46", - "testdata/0/0/3/47", - "testdata/0/0/3/48", - "testdata/0/0/3/49", - "testdata/0/0/3/5", - "testdata/0/0/3/50", - "testdata/0/0/3/51", - "testdata/0/0/3/52", - "testdata/0/0/3/53", - "testdata/0/0/3/54", - "testdata/0/0/3/55", - "testdata/0/0/3/56", - "testdata/0/0/3/57", - "testdata/0/0/3/58", - "testdata/0/0/3/59", - "testdata/0/0/3/6", - "testdata/0/0/3/60", - "testdata/0/0/3/61", - "testdata/0/0/3/62", - "testdata/0/0/3/63", - "testdata/0/0/3/64", - "testdata/0/0/3/65", - "testdata/0/0/3/66", - "testdata/0/0/3/67", - "testdata/0/0/3/68", - "testdata/0/0/3/69", - "testdata/0/0/3/7", - "testdata/0/0/3/70", - "testdata/0/0/3/71", - "testdata/0/0/3/72", - "testdata/0/0/3/73", - "testdata/0/0/3/74", - "testdata/0/0/3/75", - "testdata/0/0/3/76", - "testdata/0/0/3/77", - "testdata/0/0/3/78", - "testdata/0/0/3/79", - "testdata/0/0/3/8", - "testdata/0/0/3/80", - "testdata/0/0/3/81", - "testdata/0/0/3/82", - "testdata/0/0/3/83", - "testdata/0/0/3/84", - "testdata/0/0/3/85", - "testdata/0/0/3/86", - "testdata/0/0/3/87", - "testdata/0/0/3/88", - "testdata/0/0/3/89", - "testdata/0/0/3/9", - "testdata/0/0/3/90", - "testdata/0/0/3/91", - "testdata/0/0/3/92", - "testdata/0/0/3/93", - "testdata/0/0/3/94", - "testdata/0/0/3/95", - "testdata/0/0/3/96", - "testdata/0/0/3/97", - "testdata/0/0/3/98", - "testdata/0/0/3/99", - "testdata/0/0/3", - "testdata/0/0/4/0", - "testdata/0/0/4/1", - "testdata/0/0/4/10", - "testdata/0/0/4/100", - "testdata/0/0/4/101", - "testdata/0/0/4/102", - "testdata/0/0/4/103", - "testdata/0/0/4/104", - "testdata/0/0/4/105", - "testdata/0/0/4/106", - "testdata/0/0/4/107", - "testdata/0/0/4/108", - "testdata/0/0/4/109", - "testdata/0/0/4/11", - "testdata/0/0/4/110", - "testdata/0/0/4/111", - "testdata/0/0/4/112", - "testdata/0/0/4/113", - "testdata/0/0/4/114", - "testdata/0/0/4/115", - "testdata/0/0/4/116", - "testdata/0/0/4/117", - "testdata/0/0/4/118", - "testdata/0/0/4/119", - "testdata/0/0/4/12", - "testdata/0/0/4/120", - "testdata/0/0/4/121", - "testdata/0/0/4/122", - "testdata/0/0/4/123", - "testdata/0/0/4/124", - "testdata/0/0/4/125", - "testdata/0/0/4/126", - "testdata/0/0/4/127", - "testdata/0/0/4/13", - "testdata/0/0/4/14", - "testdata/0/0/4/15", - "testdata/0/0/4/16", - "testdata/0/0/4/17", - "testdata/0/0/4/18", - "testdata/0/0/4/19", - "testdata/0/0/4/2", - "testdata/0/0/4/20", - "testdata/0/0/4/21", - "testdata/0/0/4/22", - "testdata/0/0/4/23", - "testdata/0/0/4/24", - "testdata/0/0/4/25", - "testdata/0/0/4/26", - "testdata/0/0/4/27", - "testdata/0/0/4/28", - "testdata/0/0/4/29", - "testdata/0/0/4/3", - "testdata/0/0/4/30", - "testdata/0/0/4/31", - "testdata/0/0/4/32", - "testdata/0/0/4/33", - "testdata/0/0/4/34", - "testdata/0/0/4/35", - "testdata/0/0/4/36", - "testdata/0/0/4/37", - "testdata/0/0/4/38", - "testdata/0/0/4/39", - "testdata/0/0/4/4", - "testdata/0/0/4/40", - "testdata/0/0/4/41", - "testdata/0/0/4/42", - "testdata/0/0/4/43", - "testdata/0/0/4/44", - "testdata/0/0/4/45", - "testdata/0/0/4/46", - "testdata/0/0/4/47", - "testdata/0/0/4/48", - "testdata/0/0/4/49", - "testdata/0/0/4/5", - "testdata/0/0/4/50", - "testdata/0/0/4/51", - "testdata/0/0/4/52", - "testdata/0/0/4/53", - "testdata/0/0/4/54", - "testdata/0/0/4/55", - "testdata/0/0/4/56", - "testdata/0/0/4/57", - "testdata/0/0/4/58", - "testdata/0/0/4/59", - "testdata/0/0/4/6", - "testdata/0/0/4/60", - "testdata/0/0/4/61", - "testdata/0/0/4/62", - "testdata/0/0/4/63", - "testdata/0/0/4/64", - "testdata/0/0/4/65", - "testdata/0/0/4/66", - "testdata/0/0/4/67", - "testdata/0/0/4/68", - "testdata/0/0/4/69", - "testdata/0/0/4/7", - "testdata/0/0/4/70", - "testdata/0/0/4/71", - "testdata/0/0/4/72", - "testdata/0/0/4/73", - "testdata/0/0/4/74", - "testdata/0/0/4/75", - "testdata/0/0/4/76", - "testdata/0/0/4/77", - "testdata/0/0/4/78", - "testdata/0/0/4/79", - "testdata/0/0/4/8", - "testdata/0/0/4/80", - "testdata/0/0/4/81", - "testdata/0/0/4/82", - "testdata/0/0/4/83", - "testdata/0/0/4/84", - "testdata/0/0/4/85", - "testdata/0/0/4/86", - "testdata/0/0/4/87", - "testdata/0/0/4/88", - "testdata/0/0/4/89", - "testdata/0/0/4/9", - "testdata/0/0/4/90", - "testdata/0/0/4/91", - "testdata/0/0/4/92", - "testdata/0/0/4/93", - "testdata/0/0/4/94", - "testdata/0/0/4/95", - "testdata/0/0/4/96", - "testdata/0/0/4/97", - "testdata/0/0/4/98", - "testdata/0/0/4/99", - "testdata/0/0/4", - "testdata/0/0/5/0", - "testdata/0/0/5/1", - "testdata/0/0/5/10", - "testdata/0/0/5/100", - "testdata/0/0/5/101", - "testdata/0/0/5/102", - "testdata/0/0/5/103", - "testdata/0/0/5/104", - "testdata/0/0/5/105", - "testdata/0/0/5/106", - "testdata/0/0/5/107", - "testdata/0/0/5/108", - "testdata/0/0/5/109", - "testdata/0/0/5/11", - "testdata/0/0/5/110", - "testdata/0/0/5/111", - "testdata/0/0/5/112", - "testdata/0/0/5/113", - "testdata/0/0/5/114", - "testdata/0/0/5/115", - "testdata/0/0/5/116", - "testdata/0/0/5/117", - "testdata/0/0/5/118", - "testdata/0/0/5/119", - "testdata/0/0/5/12", - "testdata/0/0/5/120", - "testdata/0/0/5/121", - "testdata/0/0/5/122", - "testdata/0/0/5/123", - "testdata/0/0/5/124", - "testdata/0/0/5/125", - "testdata/0/0/5/126", - "testdata/0/0/5/127", - "testdata/0/0/5/13", - "testdata/0/0/5/14", - "testdata/0/0/5/15", - "testdata/0/0/5/16", - "testdata/0/0/5/17", - "testdata/0/0/5/18", - "testdata/0/0/5/19", - "testdata/0/0/5/2", - "testdata/0/0/5/20", - "testdata/0/0/5/21", - "testdata/0/0/5/22", - "testdata/0/0/5/23", - "testdata/0/0/5/24", - "testdata/0/0/5/25", - "testdata/0/0/5/26", - "testdata/0/0/5/27", - "testdata/0/0/5/28", - "testdata/0/0/5/29", - "testdata/0/0/5/3", - "testdata/0/0/5/30", - "testdata/0/0/5/31", - "testdata/0/0/5/32", - "testdata/0/0/5/33", - "testdata/0/0/5/34", - "testdata/0/0/5/35", - "testdata/0/0/5/36", - "testdata/0/0/5/37", - "testdata/0/0/5/38", - "testdata/0/0/5/39", - "testdata/0/0/5/4", - "testdata/0/0/5/40", - "testdata/0/0/5/41", - "testdata/0/0/5/42", - "testdata/0/0/5/43", - "testdata/0/0/5/44", - "testdata/0/0/5/45", - "testdata/0/0/5/46", - "testdata/0/0/5/47", - "testdata/0/0/5/48", - "testdata/0/0/5/49", - "testdata/0/0/5/5", - "testdata/0/0/5/50", - "testdata/0/0/5/51", - "testdata/0/0/5/52", - "testdata/0/0/5/53", - "testdata/0/0/5/54", - "testdata/0/0/5/55", - "testdata/0/0/5/56", - "testdata/0/0/5/57", - "testdata/0/0/5/58", - "testdata/0/0/5/59", - "testdata/0/0/5/6", - "testdata/0/0/5/60", - "testdata/0/0/5/61", - "testdata/0/0/5/62", - "testdata/0/0/5/63", - "testdata/0/0/5/64", - "testdata/0/0/5/65", - "testdata/0/0/5/66", - "testdata/0/0/5/67", - "testdata/0/0/5/68", - "testdata/0/0/5/69", - "testdata/0/0/5/7", - "testdata/0/0/5/70", - "testdata/0/0/5/71", - "testdata/0/0/5/72", - "testdata/0/0/5/73", - "testdata/0/0/5/74", - "testdata/0/0/5/75", - "testdata/0/0/5/76", - "testdata/0/0/5/77", - "testdata/0/0/5/78", - "testdata/0/0/5/79", - "testdata/0/0/5/8", - "testdata/0/0/5/80", - "testdata/0/0/5/81", - "testdata/0/0/5/82", - "testdata/0/0/5/83", - "testdata/0/0/5/84", - "testdata/0/0/5/85", - "testdata/0/0/5/86", - "testdata/0/0/5/87", - "testdata/0/0/5/88", - "testdata/0/0/5/89", - "testdata/0/0/5/9", - "testdata/0/0/5/90", - "testdata/0/0/5/91", - "testdata/0/0/5/92", - "testdata/0/0/5/93", - "testdata/0/0/5/94", - "testdata/0/0/5/95", - "testdata/0/0/5/96", - "testdata/0/0/5/97", - "testdata/0/0/5/98", - "testdata/0/0/5/99", - "testdata/0/0/5", - "testdata/0/0/6/0", - "testdata/0/0/6/1", - "testdata/0/0/6/10", - "testdata/0/0/6/100", - "testdata/0/0/6/101", - "testdata/0/0/6/102", - "testdata/0/0/6/103", - "testdata/0/0/6/104", - "testdata/0/0/6/105", - "testdata/0/0/6/106", - "testdata/0/0/6/107", - "testdata/0/0/6/108", - "testdata/0/0/6/109", - "testdata/0/0/6/11", - "testdata/0/0/6/110", - "testdata/0/0/6/111", - "testdata/0/0/6/112", - "testdata/0/0/6/113", - "testdata/0/0/6/114", - "testdata/0/0/6/115", - "testdata/0/0/6/116", - "testdata/0/0/6/117", - "testdata/0/0/6/118", - "testdata/0/0/6/119", - "testdata/0/0/6/12", - "testdata/0/0/6/120", - "testdata/0/0/6/121", - "testdata/0/0/6/122", - "testdata/0/0/6/123", - "testdata/0/0/6/124", - "testdata/0/0/6/125", - "testdata/0/0/6/126", - "testdata/0/0/6/127", - "testdata/0/0/6/13", - "testdata/0/0/6/14", - "testdata/0/0/6/15", - "testdata/0/0/6/16", - "testdata/0/0/6/17", - "testdata/0/0/6/18", - "testdata/0/0/6/19", - "testdata/0/0/6/2", - "testdata/0/0/6/20", - "testdata/0/0/6/21", - "testdata/0/0/6/22", - "testdata/0/0/6/23", - "testdata/0/0/6/24", - "testdata/0/0/6/25", - "testdata/0/0/6/26", - "testdata/0/0/6/27", - "testdata/0/0/6/28", - "testdata/0/0/6/29", - "testdata/0/0/6/3", - "testdata/0/0/6/30", - "testdata/0/0/6/31", - "testdata/0/0/6/32", - "testdata/0/0/6/33", - "testdata/0/0/6/34", - "testdata/0/0/6/35", - "testdata/0/0/6/36", - "testdata/0/0/6/37", - "testdata/0/0/6/38", - "testdata/0/0/6/39", - "testdata/0/0/6/4", - "testdata/0/0/6/40", - "testdata/0/0/6/41", - "testdata/0/0/6/42", - "testdata/0/0/6/43", - "testdata/0/0/6/44", - "testdata/0/0/6/45", - "testdata/0/0/6/46", - "testdata/0/0/6/47", - "testdata/0/0/6/48", - "testdata/0/0/6/49", - "testdata/0/0/6/5", - "testdata/0/0/6/50", - "testdata/0/0/6/51", - "testdata/0/0/6/52", - "testdata/0/0/6/53", - "testdata/0/0/6/54", - "testdata/0/0/6/55", - "testdata/0/0/6/56", - "testdata/0/0/6/57", - "testdata/0/0/6/58", - "testdata/0/0/6/59", - "testdata/0/0/6/6", - "testdata/0/0/6/60", - "testdata/0/0/6/61", - "testdata/0/0/6/62", - "testdata/0/0/6/63", - "testdata/0/0/6/64", - "testdata/0/0/6/65", - "testdata/0/0/6/66", - "testdata/0/0/6/67", - "testdata/0/0/6/68", - "testdata/0/0/6/69", - "testdata/0/0/6/7", - "testdata/0/0/6/70", - "testdata/0/0/6/71", - "testdata/0/0/6/72", - "testdata/0/0/6/73", - "testdata/0/0/6/74", - "testdata/0/0/6/75", - "testdata/0/0/6/76", - "testdata/0/0/6/77", - "testdata/0/0/6/78", - "testdata/0/0/6/79", - "testdata/0/0/6/8", - "testdata/0/0/6/80", - "testdata/0/0/6/81", - "testdata/0/0/6/82", - "testdata/0/0/6/83", - "testdata/0/0/6/84", - "testdata/0/0/6/85", - "testdata/0/0/6/86", - "testdata/0/0/6/87", - "testdata/0/0/6/88", - "testdata/0/0/6/89", - "testdata/0/0/6/9", - "testdata/0/0/6/90", - "testdata/0/0/6/91", - "testdata/0/0/6/92", - "testdata/0/0/6/93", - "testdata/0/0/6/94", - "testdata/0/0/6/95", - "testdata/0/0/6/96", - "testdata/0/0/6/97", - "testdata/0/0/6/98", - "testdata/0/0/6/99", - "testdata/0/0/6", - "testdata/0/0/7/0", - "testdata/0/0/7/1", - "testdata/0/0/7/10", - "testdata/0/0/7/100", - "testdata/0/0/7/101", - "testdata/0/0/7/102", - "testdata/0/0/7/103", - "testdata/0/0/7/104", - "testdata/0/0/7/105", - "testdata/0/0/7/106", - "testdata/0/0/7/107", - "testdata/0/0/7/108", - "testdata/0/0/7/109", - "testdata/0/0/7/11", - "testdata/0/0/7/110", - "testdata/0/0/7/111", - "testdata/0/0/7/112", - "testdata/0/0/7/113", - "testdata/0/0/7/114", - "testdata/0/0/7/115", - "testdata/0/0/7/116", - "testdata/0/0/7/117", - "testdata/0/0/7/118", - "testdata/0/0/7/119", - "testdata/0/0/7/12", - "testdata/0/0/7/120", - "testdata/0/0/7/121", - "testdata/0/0/7/122", - "testdata/0/0/7/123", - "testdata/0/0/7/124", - "testdata/0/0/7/125", - "testdata/0/0/7/126", - "testdata/0/0/7/127", - "testdata/0/0/7/13", - "testdata/0/0/7/14", - "testdata/0/0/7/15", - "testdata/0/0/7/16", - "testdata/0/0/7/17", - "testdata/0/0/7/18", - "testdata/0/0/7/19", - "testdata/0/0/7/2", - "testdata/0/0/7/20", - "testdata/0/0/7/21", - "testdata/0/0/7/22", - "testdata/0/0/7/23", - "testdata/0/0/7/24", - "testdata/0/0/7/25", - "testdata/0/0/7/26", - "testdata/0/0/7/27", - "testdata/0/0/7/28", - "testdata/0/0/7/29", - "testdata/0/0/7/3", - "testdata/0/0/7/30", - "testdata/0/0/7/31", - "testdata/0/0/7/32", - "testdata/0/0/7/33", - "testdata/0/0/7/34", - "testdata/0/0/7/35", - "testdata/0/0/7/36", - "testdata/0/0/7/37", - "testdata/0/0/7/38", - "testdata/0/0/7/39", - "testdata/0/0/7/4", - "testdata/0/0/7/40", - "testdata/0/0/7/41", - "testdata/0/0/7/42", - "testdata/0/0/7/43", - "testdata/0/0/7/44", - "testdata/0/0/7/45", - "testdata/0/0/7/46", - "testdata/0/0/7/47", - "testdata/0/0/7/48", - "testdata/0/0/7/49", - "testdata/0/0/7/5", - "testdata/0/0/7/50", - "testdata/0/0/7/51", - "testdata/0/0/7/52", - "testdata/0/0/7/53", - "testdata/0/0/7/54", - "testdata/0/0/7/55", - "testdata/0/0/7/56", - "testdata/0/0/7/57", - "testdata/0/0/7/58", - "testdata/0/0/7/59", - "testdata/0/0/7/6", - "testdata/0/0/7/60", - "testdata/0/0/7/61", - "testdata/0/0/7/62", - "testdata/0/0/7/63", - "testdata/0/0/7/64", - "testdata/0/0/7/65", - "testdata/0/0/7/66", - "testdata/0/0/7/67", - "testdata/0/0/7/68", - "testdata/0/0/7/69", - "testdata/0/0/7/7", - "testdata/0/0/7/70", - "testdata/0/0/7/71", - "testdata/0/0/7/72", - "testdata/0/0/7/73", - "testdata/0/0/7/74", - "testdata/0/0/7/75", - "testdata/0/0/7/76", - "testdata/0/0/7/77", - "testdata/0/0/7/78", - "testdata/0/0/7/79", - "testdata/0/0/7/8", - "testdata/0/0/7/80", - "testdata/0/0/7/81", - "testdata/0/0/7/82", - "testdata/0/0/7/83", - "testdata/0/0/7/84", - "testdata/0/0/7/85", - "testdata/0/0/7/86", - "testdata/0/0/7/87", - "testdata/0/0/7/88", - "testdata/0/0/7/89", - "testdata/0/0/7/9", - "testdata/0/0/7/90", - "testdata/0/0/7/91", - "testdata/0/0/7/92", - "testdata/0/0/7/93", - "testdata/0/0/7/94", - "testdata/0/0/7/95", - "testdata/0/0/7/96", - "testdata/0/0/7/97", - "testdata/0/0/7/98", - "testdata/0/0/7/99", - "testdata/0/0/7", - "testdata/0/0/8/0", - "testdata/0/0/8/1", - "testdata/0/0/8/10", - "testdata/0/0/8/100", - "testdata/0/0/8/101", - "testdata/0/0/8/102", - "testdata/0/0/8/103", - "testdata/0/0/8/104", - "testdata/0/0/8/105", - "testdata/0/0/8/106", - "testdata/0/0/8/107", - "testdata/0/0/8/108", - "testdata/0/0/8/109", - "testdata/0/0/8/11", - "testdata/0/0/8/110", - "testdata/0/0/8/111", - "testdata/0/0/8/112", - "testdata/0/0/8/113", - "testdata/0/0/8/114", - "testdata/0/0/8/115", - "testdata/0/0/8/116", - "testdata/0/0/8/117", - "testdata/0/0/8/118", - "testdata/0/0/8/119", - "testdata/0/0/8/12", - "testdata/0/0/8/120", - "testdata/0/0/8/121", - "testdata/0/0/8/122", - "testdata/0/0/8/123", - "testdata/0/0/8/124", - "testdata/0/0/8/125", - "testdata/0/0/8/126", - "testdata/0/0/8/127", - "testdata/0/0/8/13", - "testdata/0/0/8/14", - "testdata/0/0/8/15", - "testdata/0/0/8/16", - "testdata/0/0/8/17", - "testdata/0/0/8/18", - "testdata/0/0/8/19", - "testdata/0/0/8/2", - "testdata/0/0/8/20", - "testdata/0/0/8/21", - "testdata/0/0/8/22", - "testdata/0/0/8/23", - "testdata/0/0/8/24", - "testdata/0/0/8/25", - "testdata/0/0/8/26", - "testdata/0/0/8/27", - "testdata/0/0/8/28", - "testdata/0/0/8/29", - "testdata/0/0/8/3", - "testdata/0/0/8/30", - "testdata/0/0/8/31", - "testdata/0/0/8/32", - "testdata/0/0/8/33", - "testdata/0/0/8/34", - "testdata/0/0/8/35", - "testdata/0/0/8/36", - "testdata/0/0/8/37", - "testdata/0/0/8/38", - "testdata/0/0/8/39", - "testdata/0/0/8/4", - "testdata/0/0/8/40", - "testdata/0/0/8/41", - "testdata/0/0/8/42", - "testdata/0/0/8/43", - "testdata/0/0/8/44", - "testdata/0/0/8/45", - "testdata/0/0/8/46", - "testdata/0/0/8/47", - "testdata/0/0/8/48", - "testdata/0/0/8/49", - "testdata/0/0/8/5", - "testdata/0/0/8/50", - "testdata/0/0/8/51", - "testdata/0/0/8/52", - "testdata/0/0/8/53", - "testdata/0/0/8/54", - "testdata/0/0/8/55", - "testdata/0/0/8/56", - "testdata/0/0/8/57", - "testdata/0/0/8/58", - "testdata/0/0/8/59", - "testdata/0/0/8/6", - "testdata/0/0/8/60", - "testdata/0/0/8/61", - "testdata/0/0/8/62", - "testdata/0/0/8/63", - "testdata/0/0/8/64", - "testdata/0/0/8/65", - "testdata/0/0/8/66", - "testdata/0/0/8/67", - "testdata/0/0/8/68", - "testdata/0/0/8/69", - "testdata/0/0/8/7", - "testdata/0/0/8/70", - "testdata/0/0/8/71", - "testdata/0/0/8/72", - "testdata/0/0/8/73", - "testdata/0/0/8/74", - "testdata/0/0/8/75", - "testdata/0/0/8/76", - "testdata/0/0/8/77", - "testdata/0/0/8/78", - "testdata/0/0/8/79", - "testdata/0/0/8/8", - "testdata/0/0/8/80", - "testdata/0/0/8/81", - "testdata/0/0/8/82", - "testdata/0/0/8/83", - "testdata/0/0/8/84", - "testdata/0/0/8/85", - "testdata/0/0/8/86", - "testdata/0/0/8/87", - "testdata/0/0/8/88", - "testdata/0/0/8/89", - "testdata/0/0/8/9", - "testdata/0/0/8/90", - "testdata/0/0/8/91", - "testdata/0/0/8/92", - "testdata/0/0/8/93", - "testdata/0/0/8/94", - "testdata/0/0/8/95", - "testdata/0/0/8/96", - "testdata/0/0/8/97", - "testdata/0/0/8/98", - "testdata/0/0/8/99", - "testdata/0/0/8", - "testdata/0/0/9/0", - "testdata/0/0/9/1", - "testdata/0/0/9/10", - "testdata/0/0/9/11", - "testdata/0/0/9/12", - "testdata/0/0/9/13", - "testdata/0/0/9/14", - "testdata/0/0/9/15", - "testdata/0/0/9/16", - "testdata/0/0/9/17", - "testdata/0/0/9/18", - "testdata/0/0/9/19", - "testdata/0/0/9/2", - "testdata/0/0/9/20", - "testdata/0/0/9/21", - "testdata/0/0/9/22", - "testdata/0/0/9/23", - "testdata/0/0/9/24", - "testdata/0/0/9/25", - "testdata/0/0/9/26", - "testdata/0/0/9/27", - "testdata/0/0/9/28", - "testdata/0/0/9/29", - "testdata/0/0/9/3", - "testdata/0/0/9/30", - "testdata/0/0/9/31", - "testdata/0/0/9/32", - "testdata/0/0/9/33", - "testdata/0/0/9/34", - "testdata/0/0/9/35", - "testdata/0/0/9/36", - "testdata/0/0/9/37", - "testdata/0/0/9/38", - "testdata/0/0/9/39", - "testdata/0/0/9/4", - "testdata/0/0/9/40", - "testdata/0/0/9/41", - "testdata/0/0/9/42", - "testdata/0/0/9/43", - "testdata/0/0/9/44", - "testdata/0/0/9/45", - "testdata/0/0/9/46", - "testdata/0/0/9/47", - "testdata/0/0/9/48", - "testdata/0/0/9/49", - "testdata/0/0/9/5", - "testdata/0/0/9/50", - "testdata/0/0/9/51", - "testdata/0/0/9/52", - "testdata/0/0/9/53", - "testdata/0/0/9/54", - "testdata/0/0/9/55", - "testdata/0/0/9/56", - "testdata/0/0/9/57", - "testdata/0/0/9/58", - "testdata/0/0/9/59", - "testdata/0/0/9/6", - "testdata/0/0/9/60", - "testdata/0/0/9/61", - "testdata/0/0/9/62", - "testdata/0/0/9/63", - "testdata/0/0/9/64", - "testdata/0/0/9/65", - "testdata/0/0/9/66", - "testdata/0/0/9/67", - "testdata/0/0/9/68", - "testdata/0/0/9/7", - "testdata/0/0/9/8", - "testdata/0/0/9/9", - "testdata/0/0/9", - "testdata/0/0", - "testdata/0", - "testdata", - "", -} - -func TestDelayedWalkTree(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) - defer cleanup() - - repo := repository.TestOpenLocal(t, repodir) - rtest.OK(t, repo.LoadIndex(context.TODO())) - - root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") - rtest.OK(t, err) - - dr := delayRepo{repo, 100 * time.Millisecond} - - // start tree walker - treeJobs := make(chan walk.TreeJob) - go walk.Tree(context.TODO(), dr, root, treeJobs) - - i := 0 - for job := range treeJobs { - expectedPath := filepath.Join(strings.Split(walktreeTestItems[i], "/")...) - if job.Path != expectedPath { - t.Fatalf("expected path %q (%v), got %q", walktreeTestItems[i], i, job.Path) - } - i++ - } - - if i != len(walktreeTestItems) { - t.Fatalf("got %d items, expected %v", i, len(walktreeTestItems)) - } -} - -func BenchmarkDelayedWalkTree(t *testing.B) { - repodir, cleanup := rtest.Env(t, repoFixture) - defer cleanup() - - repo := repository.TestOpenLocal(t, repodir) - rtest.OK(t, repo.LoadIndex(context.TODO())) - - root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") - rtest.OK(t, err) - - dr := delayRepo{repo, 10 * time.Millisecond} - - t.ResetTimer() - - for i := 0; i < t.N; i++ { - // start tree walker - treeJobs := make(chan walk.TreeJob) - go walk.Tree(context.TODO(), dr, root, treeJobs) - - for range treeJobs { - } - } -} diff --git a/vendor/github.com/google/go-cmp/.travis.yml b/vendor/github.com/google/go-cmp/.travis.yml new file mode 100644 index 000000000..9d9b7f916 --- /dev/null +++ b/vendor/github.com/google/go-cmp/.travis.yml @@ -0,0 +1,18 @@ +sudo: false +language: go +go: + - 1.x + - master +matrix: + include: + - go: 1.6.x + script: go test -v -race ./... + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (it is intended for this package to have no dependencies other than the standard library). +script: + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/google/go-cmp/CONTRIBUTING.md b/vendor/github.com/google/go-cmp/CONTRIBUTING.md new file mode 100644 index 000000000..ae319c70a --- /dev/null +++ b/vendor/github.com/google/go-cmp/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 000000000..32017f8fa --- /dev/null +++ b/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-cmp/README.md b/vendor/github.com/google/go-cmp/README.md new file mode 100644 index 000000000..d82f10bfc --- /dev/null +++ b/vendor/github.com/google/go-cmp/README.md @@ -0,0 +1,44 @@ +# Package for equality of Go values + +[![GoDoc](https://godoc.org/github.com/google/go-cmp/cmp?status.svg)][godoc] +[![Build Status](https://travis-ci.org/google/go-cmp.svg?branch=master)][travis] + +This package is intended to be a more powerful and safer alternative to +`reflect.DeepEqual` for comparing whether two values are semantically equal. + +The primary features of `cmp` are: + +* When the default behavior of equality does not suit the needs of the test, + custom equality functions can override the equality operation. + For example, an equality function may report floats as equal so long as they + are within some tolerance of each other. + +* Types that have an `Equal` method may use that method to determine equality. + This allows package authors to determine the equality operation for the types + that they define. + +* If no custom equality functions are used and no `Equal` method is defined, + equality is determined by recursively comparing the primitive kinds on both + values, much like `reflect.DeepEqual`. Unlike `reflect.DeepEqual`, unexported + fields are not compared by default; they result in panics unless suppressed + by using an `Ignore` option (see `cmpopts.IgnoreUnexported`) or explictly + compared using the `AllowUnexported` option. + +See the [GoDoc documentation][godoc] for more information. + +This is not an official Google product. + +[godoc]: https://godoc.org/github.com/google/go-cmp/cmp +[travis]: https://travis-ci.org/google/go-cmp + +## Install + +``` +go get -u github.com/google/go-cmp/cmp +``` + +## License + +BSD - See [LICENSE][license] file + +[license]: https://github.com/google/go-cmp/blob/master/LICENSE diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go new file mode 100644 index 000000000..cc39492cf --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go @@ -0,0 +1,89 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmpopts provides common options for the cmp package. +package cmpopts + +import ( + "math" + "reflect" + + "github.com/google/go-cmp/cmp" +) + +func equateAlways(_, _ interface{}) bool { return true } + +// EquateEmpty returns a Comparer option that determines all maps and slices +// with a length of zero to be equal, regardless of whether they are nil. +// +// EquateEmpty can be used in conjuction with SortSlices and SortMaps. +func EquateEmpty() cmp.Option { + return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways)) +} + +func isEmpty(x, y interface{}) bool { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + return (x != nil && y != nil && vx.Type() == vy.Type()) && + (vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) && + (vx.Len() == 0 && vy.Len() == 0) +} + +// EquateApprox returns a Comparer option that determines float32 or float64 +// values to be equal if they are within a relative fraction or absolute margin. +// This option is not used when either x or y is NaN or infinite. +// +// The fraction determines that the difference of two values must be within the +// smaller fraction of the two values, while the margin determines that the two +// values must be within some absolute margin. +// To express only a fraction or only a margin, use 0 for the other parameter. +// The fraction and margin must be non-negative. +// +// The mathematical expression used is equivalent to: +// |x-y| ≤ max(fraction*min(|x|, |y|), margin) +// +// EquateApprox can be used in conjuction with EquateNaNs. +func EquateApprox(fraction, margin float64) cmp.Option { + if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) { + panic("margin or fraction must be a non-negative number") + } + a := approximator{fraction, margin} + return cmp.Options{ + cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)), + cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)), + } +} + +type approximator struct{ frac, marg float64 } + +func areRealF64s(x, y float64) bool { + return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0) +} +func areRealF32s(x, y float32) bool { + return areRealF64s(float64(x), float64(y)) +} +func (a approximator) compareF64(x, y float64) bool { + relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y)) + return math.Abs(x-y) <= math.Max(a.marg, relMarg) +} +func (a approximator) compareF32(x, y float32) bool { + return a.compareF64(float64(x), float64(y)) +} + +// EquateNaNs returns a Comparer option that determines float32 and float64 +// NaN values to be equal. +// +// EquateNaNs can be used in conjuction with EquateApprox. +func EquateNaNs() cmp.Option { + return cmp.Options{ + cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)), + cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)), + } +} + +func areNaNsF64s(x, y float64) bool { + return math.IsNaN(x) && math.IsNaN(y) +} +func areNaNsF32s(x, y float32) bool { + return areNaNsF64s(float64(x), float64(y)) +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go new file mode 100644 index 000000000..016891da3 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go @@ -0,0 +1,148 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmpopts + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp" +) + +// IgnoreFields returns an Option that ignores exported fields of the +// given names on a single struct type. +// The struct type is specified by passing in a value of that type. +// +// The name may be a dot-delimited string (e.g., "Foo.Bar") to ignore a +// specific sub-field that is embedded or nested within the parent struct. +// +// This does not handle unexported fields; use IgnoreUnexported instead. +func IgnoreFields(typ interface{}, names ...string) cmp.Option { + sf := newStructFilter(typ, names...) + return cmp.FilterPath(sf.filter, cmp.Ignore()) +} + +// IgnoreTypes returns an Option that ignores all values assignable to +// certain types, which are specified by passing in a value of each type. +func IgnoreTypes(typs ...interface{}) cmp.Option { + tf := newTypeFilter(typs...) + return cmp.FilterPath(tf.filter, cmp.Ignore()) +} + +type typeFilter []reflect.Type + +func newTypeFilter(typs ...interface{}) (tf typeFilter) { + for _, typ := range typs { + t := reflect.TypeOf(typ) + if t == nil { + // This occurs if someone tries to pass in sync.Locker(nil) + panic("cannot determine type; consider using IgnoreInterfaces") + } + tf = append(tf, t) + } + return tf +} +func (tf typeFilter) filter(p cmp.Path) bool { + if len(p) < 1 { + return false + } + t := p[len(p)-1].Type() + for _, ti := range tf { + if t.AssignableTo(ti) { + return true + } + } + return false +} + +// IgnoreInterfaces returns an Option that ignores all values or references of +// values assignable to certain interface types. These interfaces are specified +// by passing in an anonymous struct with the interface types embedded in it. +// For example, to ignore sync.Locker, pass in struct{sync.Locker}{}. +func IgnoreInterfaces(ifaces interface{}) cmp.Option { + tf := newIfaceFilter(ifaces) + return cmp.FilterPath(tf.filter, cmp.Ignore()) +} + +type ifaceFilter []reflect.Type + +func newIfaceFilter(ifaces interface{}) (tf ifaceFilter) { + t := reflect.TypeOf(ifaces) + if ifaces == nil || t.Name() != "" || t.Kind() != reflect.Struct { + panic("input must be an anonymous struct") + } + for i := 0; i < t.NumField(); i++ { + fi := t.Field(i) + switch { + case !fi.Anonymous: + panic("struct cannot have named fields") + case fi.Type.Kind() != reflect.Interface: + panic("embedded field must be an interface type") + case fi.Type.NumMethod() == 0: + // This matches everything; why would you ever want this? + panic("cannot ignore empty interface") + default: + tf = append(tf, fi.Type) + } + } + return tf +} +func (tf ifaceFilter) filter(p cmp.Path) bool { + if len(p) < 1 { + return false + } + t := p[len(p)-1].Type() + for _, ti := range tf { + if t.AssignableTo(ti) { + return true + } + if t.Kind() != reflect.Ptr && reflect.PtrTo(t).AssignableTo(ti) { + return true + } + } + return false +} + +// IgnoreUnexported returns an Option that only ignores the immediate unexported +// fields of a struct, including anonymous fields of unexported types. +// In particular, unexported fields within the struct's exported fields +// of struct types, including anonymous fields, will not be ignored unless the +// type of the field itself is also passed to IgnoreUnexported. +func IgnoreUnexported(typs ...interface{}) cmp.Option { + ux := newUnexportedFilter(typs...) + return cmp.FilterPath(ux.filter, cmp.Ignore()) +} + +type unexportedFilter struct{ m map[reflect.Type]bool } + +func newUnexportedFilter(typs ...interface{}) unexportedFilter { + ux := unexportedFilter{m: make(map[reflect.Type]bool)} + for _, typ := range typs { + t := reflect.TypeOf(typ) + if t == nil || t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + ux.m[t] = true + } + return ux +} +func (xf unexportedFilter) filter(p cmp.Path) bool { + if len(p) < 2 { + return false + } + sf, ok := p[len(p)-1].(cmp.StructField) + if !ok { + return false + } + return xf.m[p[len(p)-2].Type()] && !isExported(sf.Name()) +} + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go new file mode 100644 index 000000000..a566d240b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go @@ -0,0 +1,146 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmpopts + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/internal/function" +) + +// SortSlices returns a Transformer option that sorts all []V. +// The less function must be of the form "func(T, T) bool" which is used to +// sort any slice with element type V that is assignable to T. +// +// The less function must be: +// • Deterministic: less(x, y) == less(x, y) +// • Irreflexive: !less(x, x) +// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z) +// +// The less function does not have to be "total". That is, if !less(x, y) and +// !less(y, x) for two elements x and y, their relative order is maintained. +// +// SortSlices can be used in conjuction with EquateEmpty. +func SortSlices(less interface{}) cmp.Option { + vf := reflect.ValueOf(less) + if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { + panic(fmt.Sprintf("invalid less function: %T", less)) + } + ss := sliceSorter{vf.Type().In(0), vf} + return cmp.FilterValues(ss.filter, cmp.Transformer("Sort", ss.sort)) +} + +type sliceSorter struct { + in reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (ss sliceSorter) filter(x, y interface{}) bool { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + if !(x != nil && y != nil && vx.Type() == vy.Type()) || + !(vx.Kind() == reflect.Slice && vx.Type().Elem().AssignableTo(ss.in)) || + (vx.Len() <= 1 && vy.Len() <= 1) { + return false + } + // Check whether the slices are already sorted to avoid an infinite + // recursion cycle applying the same transform to itself. + ok1 := sliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) }) + ok2 := sliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) }) + return !ok1 || !ok2 +} +func (ss sliceSorter) sort(x interface{}) interface{} { + src := reflect.ValueOf(x) + dst := reflect.MakeSlice(src.Type(), src.Len(), src.Len()) + for i := 0; i < src.Len(); i++ { + dst.Index(i).Set(src.Index(i)) + } + sortSliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) }) + ss.checkSort(dst) + return dst.Interface() +} +func (ss sliceSorter) checkSort(v reflect.Value) { + start := -1 // Start of a sequence of equal elements. + for i := 1; i < v.Len(); i++ { + if ss.less(v, i-1, i) { + // Check that first and last elements in v[start:i] are equal. + if start >= 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) { + panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i))) + } + start = -1 + } else if start == -1 { + start = i + } + } +} +func (ss sliceSorter) less(v reflect.Value, i, j int) bool { + vx, vy := v.Index(i), v.Index(j) + return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool() +} + +// SortMaps returns a Transformer option that flattens map[K]V types to be a +// sorted []struct{K, V}. The less function must be of the form +// "func(T, T) bool" which is used to sort any map with key K that is +// assignable to T. +// +// Flattening the map into a slice has the property that cmp.Equal is able to +// use Comparers on K or the K.Equal method if it exists. +// +// The less function must be: +// • Deterministic: less(x, y) == less(x, y) +// • Irreflexive: !less(x, x) +// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z) +// • Total: if x != y, then either less(x, y) or less(y, x) +// +// SortMaps can be used in conjuction with EquateEmpty. +func SortMaps(less interface{}) cmp.Option { + vf := reflect.ValueOf(less) + if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { + panic(fmt.Sprintf("invalid less function: %T", less)) + } + ms := mapSorter{vf.Type().In(0), vf} + return cmp.FilterValues(ms.filter, cmp.Transformer("Sort", ms.sort)) +} + +type mapSorter struct { + in reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (ms mapSorter) filter(x, y interface{}) bool { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + return (x != nil && y != nil && vx.Type() == vy.Type()) && + (vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) && + (vx.Len() != 0 || vy.Len() != 0) +} +func (ms mapSorter) sort(x interface{}) interface{} { + src := reflect.ValueOf(x) + outType := mapEntryType(src.Type()) + dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len()) + for i, k := range src.MapKeys() { + v := reflect.New(outType).Elem() + v.Field(0).Set(k) + v.Field(1).Set(src.MapIndex(k)) + dst.Index(i).Set(v) + } + sortSlice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) }) + ms.checkSort(dst) + return dst.Interface() +} +func (ms mapSorter) checkSort(v reflect.Value) { + for i := 1; i < v.Len(); i++ { + if !ms.less(v, i-1, i) { + panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i))) + } + } +} +func (ms mapSorter) less(v reflect.Value, i, j int) bool { + vx, vy := v.Index(i).Field(0), v.Index(j).Field(0) + if !hasReflectStructOf { + vx, vy = vx.Elem(), vy.Elem() + } + return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool() +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go new file mode 100644 index 000000000..839b88ca4 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go @@ -0,0 +1,46 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !go1.8 + +package cmpopts + +import ( + "reflect" + "sort" +) + +const hasReflectStructOf = false + +func mapEntryType(reflect.Type) reflect.Type { + return reflect.TypeOf(struct{ K, V interface{} }{}) +} + +func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool { + return sort.IsSorted(reflectSliceSorter{reflect.ValueOf(slice), less}) +} +func sortSlice(slice interface{}, less func(i, j int) bool) { + sort.Sort(reflectSliceSorter{reflect.ValueOf(slice), less}) +} +func sortSliceStable(slice interface{}, less func(i, j int) bool) { + sort.Stable(reflectSliceSorter{reflect.ValueOf(slice), less}) +} + +type reflectSliceSorter struct { + slice reflect.Value + less func(i, j int) bool +} + +func (ss reflectSliceSorter) Len() int { + return ss.slice.Len() +} +func (ss reflectSliceSorter) Less(i, j int) bool { + return ss.less(i, j) +} +func (ss reflectSliceSorter) Swap(i, j int) { + vi := ss.slice.Index(i).Interface() + vj := ss.slice.Index(j).Interface() + ss.slice.Index(i).Set(reflect.ValueOf(vj)) + ss.slice.Index(j).Set(reflect.ValueOf(vi)) +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go new file mode 100644 index 000000000..8a59c0d38 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go @@ -0,0 +1,31 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build go1.8 + +package cmpopts + +import ( + "reflect" + "sort" +) + +const hasReflectStructOf = true + +func mapEntryType(t reflect.Type) reflect.Type { + return reflect.StructOf([]reflect.StructField{ + {Name: "K", Type: t.Key()}, + {Name: "V", Type: t.Elem()}, + }) +} + +func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool { + return sort.SliceIsSorted(slice, less) +} +func sortSlice(slice interface{}, less func(i, j int) bool) { + sort.Slice(slice, less) +} +func sortSliceStable(slice interface{}, less func(i, j int) bool) { + sort.SliceStable(slice, less) +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go new file mode 100644 index 000000000..97f707983 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go @@ -0,0 +1,182 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmpopts + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp" +) + +// filterField returns a new Option where opt is only evaluated on paths that +// include a specific exported field on a single struct type. +// The struct type is specified by passing in a value of that type. +// +// The name may be a dot-delimited string (e.g., "Foo.Bar") to select a +// specific sub-field that is embedded or nested within the parent struct. +func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option { + // TODO: This is currently unexported over concerns of how helper filters + // can be composed together easily. + // TODO: Add tests for FilterField. + + sf := newStructFilter(typ, name) + return cmp.FilterPath(sf.filter, opt) +} + +type structFilter struct { + t reflect.Type // The root struct type to match on + ft fieldTree // Tree of fields to match on +} + +func newStructFilter(typ interface{}, names ...string) structFilter { + // TODO: Perhaps allow * as a special identifier to allow ignoring any + // number of path steps until the next field match? + // This could be useful when a concrete struct gets transformed into + // an anonymous struct where it is not possible to specify that by type, + // but the transformer happens to provide guarantees about the names of + // the transformed fields. + + t := reflect.TypeOf(typ) + if t == nil || t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T must be a struct", typ)) + } + var ft fieldTree + for _, name := range names { + cname, err := canonicalName(t, name) + if err != nil { + panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err)) + } + ft.insert(cname) + } + return structFilter{t, ft} +} + +func (sf structFilter) filter(p cmp.Path) bool { + for i, ps := range p { + if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) { + return true + } + } + return false +} + +// fieldTree represents a set of dot-separated identifiers. +// +// For example, inserting the following selectors: +// Foo +// Foo.Bar.Baz +// Foo.Buzz +// Nuka.Cola.Quantum +// +// Results in a tree of the form: +// {sub: { +// "Foo": {ok: true, sub: { +// "Bar": {sub: { +// "Baz": {ok: true}, +// }}, +// "Buzz": {ok: true}, +// }}, +// "Nuka": {sub: { +// "Cola": {sub: { +// "Quantum": {ok: true}, +// }}, +// }}, +// }} +type fieldTree struct { + ok bool // Whether this is a specified node + sub map[string]fieldTree // The sub-tree of fields under this node +} + +// insert inserts a sequence of field accesses into the tree. +func (ft *fieldTree) insert(cname []string) { + if ft.sub == nil { + ft.sub = make(map[string]fieldTree) + } + if len(cname) == 0 { + ft.ok = true + return + } + sub := ft.sub[cname[0]] + sub.insert(cname[1:]) + ft.sub[cname[0]] = sub +} + +// matchPrefix reports whether any selector in the fieldTree matches +// the start of path p. +func (ft fieldTree) matchPrefix(p cmp.Path) bool { + for _, ps := range p { + switch ps := ps.(type) { + case cmp.StructField: + ft = ft.sub[ps.Name()] + if ft.ok { + return true + } + if len(ft.sub) == 0 { + return false + } + case cmp.Indirect: + default: + return false + } + } + return false +} + +// canonicalName returns a list of identifiers where any struct field access +// through an embedded field is expanded to include the names of the embedded +// types themselves. +// +// For example, suppose field "Foo" is not directly in the parent struct, +// but actually from an embedded struct of type "Bar". Then, the canonical name +// of "Foo" is actually "Bar.Foo". +// +// Suppose field "Foo" is not directly in the parent struct, but actually +// a field in two different embedded structs of types "Bar" and "Baz". +// Then the selector "Foo" causes a panic since it is ambiguous which one it +// refers to. The user must specify either "Bar.Foo" or "Baz.Foo". +func canonicalName(t reflect.Type, sel string) ([]string, error) { + var name string + sel = strings.TrimPrefix(sel, ".") + if sel == "" { + return nil, fmt.Errorf("name must not be empty") + } + if i := strings.IndexByte(sel, '.'); i < 0 { + name, sel = sel, "" + } else { + name, sel = sel[:i], sel[i:] + } + + // Type must be a struct or pointer to struct. + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("%v must be a struct", t) + } + + // Find the canonical name for this current field name. + // If the field exists in an embedded struct, then it will be expanded. + if !isExported(name) { + // Disallow unexported fields: + // * To discourage people from actually touching unexported fields + // * FieldByName is buggy (https://golang.org/issue/4876) + return []string{name}, fmt.Errorf("name must be exported") + } + sf, ok := t.FieldByName(name) + if !ok { + return []string{name}, fmt.Errorf("does not exist") + } + var ss []string + for i := range sf.Index { + ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name) + } + if sel == "" { + return ss, nil + } + ssPost, err := canonicalName(sf.Type, sel) + return append(ss, ssPost...), err +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/util_test.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/util_test.go new file mode 100644 index 000000000..f53278990 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/util_test.go @@ -0,0 +1,996 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmpopts + +import ( + "bytes" + "fmt" + "io" + "math" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +type ( + MyInt int + MyFloat float32 + MyTime struct{ time.Time } + MyStruct struct { + A, B []int + C, D map[time.Time]string + } + + Foo1 struct{ Alpha, Bravo, Charlie int } + Foo2 struct{ *Foo1 } + Foo3 struct{ *Foo2 } + Bar1 struct{ Foo3 } + Bar2 struct { + Bar1 + *Foo3 + Bravo float32 + } + Bar3 struct { + Bar1 + Bravo *Bar2 + Delta struct{ Echo Foo1 } + *Foo3 + Alpha string + } + + privateStruct struct{ Public, private int } + PublicStruct struct{ Public, private int } + ParentStruct struct { + *privateStruct + *PublicStruct + Public int + private int + } + + Everything struct { + MyInt + MyFloat + MyTime + MyStruct + Bar3 + ParentStruct + } + + EmptyInterface interface{} +) + +func TestOptions(t *testing.T) { + createBar3X := func() *Bar3 { + return &Bar3{ + Bar1: Bar1{Foo3{&Foo2{&Foo1{Bravo: 2}}}}, + Bravo: &Bar2{ + Bar1: Bar1{Foo3{&Foo2{&Foo1{Charlie: 7}}}}, + Foo3: &Foo3{&Foo2{&Foo1{Bravo: 5}}}, + Bravo: 4, + }, + Delta: struct{ Echo Foo1 }{Foo1{Charlie: 3}}, + Foo3: &Foo3{&Foo2{&Foo1{Alpha: 1}}}, + Alpha: "alpha", + } + } + createBar3Y := func() *Bar3 { + return &Bar3{ + Bar1: Bar1{Foo3{&Foo2{&Foo1{Bravo: 3}}}}, + Bravo: &Bar2{ + Bar1: Bar1{Foo3{&Foo2{&Foo1{Charlie: 8}}}}, + Foo3: &Foo3{&Foo2{&Foo1{Bravo: 6}}}, + Bravo: 5, + }, + Delta: struct{ Echo Foo1 }{Foo1{Charlie: 4}}, + Foo3: &Foo3{&Foo2{&Foo1{Alpha: 2}}}, + Alpha: "ALPHA", + } + } + + tests := []struct { + label string // Test name + x, y interface{} // Input values to compare + opts []cmp.Option // Input options + wantEqual bool // Whether the inputs are equal + wantPanic bool // Whether Equal should panic + reason string // The reason for the expected outcome + }{{ + label: "EquateEmpty", + x: []int{}, + y: []int(nil), + wantEqual: false, + reason: "not equal because empty non-nil and nil slice differ", + }, { + label: "EquateEmpty", + x: []int{}, + y: []int(nil), + opts: []cmp.Option{EquateEmpty()}, + wantEqual: true, + reason: "equal because EquateEmpty equates empty slices", + }, { + label: "SortSlices", + x: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + y: []int{1, 0, 5, 2, 8, 9, 4, 3, 6, 7}, + wantEqual: false, + reason: "not equal because element order differs", + }, { + label: "SortSlices", + x: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + y: []int{1, 0, 5, 2, 8, 9, 4, 3, 6, 7}, + opts: []cmp.Option{SortSlices(func(x, y int) bool { return x < y })}, + wantEqual: true, + reason: "equal because SortSlices sorts the slices", + }, { + label: "SortSlices", + x: []MyInt{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + y: []MyInt{1, 0, 5, 2, 8, 9, 4, 3, 6, 7}, + opts: []cmp.Option{SortSlices(func(x, y int) bool { return x < y })}, + wantEqual: false, + reason: "not equal because MyInt is not the same type as int", + }, { + label: "SortSlices", + x: []float64{0, 1, 1, 2, 2, 2}, + y: []float64{2, 0, 2, 1, 2, 1}, + opts: []cmp.Option{SortSlices(func(x, y float64) bool { return x < y })}, + wantEqual: true, + reason: "equal even when sorted with duplicate elements", + }, { + label: "SortSlices", + x: []float64{0, 1, 1, 2, 2, 2, math.NaN(), 3, 3, 3, 3, 4, 4, 4, 4}, + y: []float64{2, 0, 4, 4, 3, math.NaN(), 4, 1, 3, 2, 3, 3, 4, 1, 2}, + opts: []cmp.Option{SortSlices(func(x, y float64) bool { return x < y })}, + wantPanic: true, + reason: "panics because SortSlices used with non-transitive less function", + }, { + label: "SortSlices", + x: []float64{0, 1, 1, 2, 2, 2, math.NaN(), 3, 3, 3, 3, 4, 4, 4, 4}, + y: []float64{2, 0, 4, 4, 3, math.NaN(), 4, 1, 3, 2, 3, 3, 4, 1, 2}, + opts: []cmp.Option{SortSlices(func(x, y float64) bool { + return (!math.IsNaN(x) && math.IsNaN(y)) || x < y + })}, + wantEqual: false, + reason: "no panics because SortSlices used with valid less function; not equal because NaN != NaN", + }, { + label: "SortSlices+EquateNaNs", + x: []float64{0, 1, 1, 2, 2, 2, math.NaN(), 3, 3, 3, math.NaN(), 3, 4, 4, 4, 4}, + y: []float64{2, 0, 4, 4, 3, math.NaN(), 4, 1, 3, 2, 3, 3, 4, 1, math.NaN(), 2}, + opts: []cmp.Option{ + EquateNaNs(), + SortSlices(func(x, y float64) bool { + return (!math.IsNaN(x) && math.IsNaN(y)) || x < y + }), + }, + wantEqual: true, + reason: "no panics because SortSlices used with valid less function; equal because EquateNaNs is used", + }, { + label: "SortMaps", + x: map[time.Time]string{ + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC): "0th birthday", + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC): "1st birthday", + time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC): "2nd birthday", + }, + y: map[time.Time]string{ + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "0th birthday", + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "1st birthday", + time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "2nd birthday", + }, + wantEqual: false, + reason: "not equal because timezones differ", + }, { + label: "SortMaps", + x: map[time.Time]string{ + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC): "0th birthday", + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC): "1st birthday", + time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC): "2nd birthday", + }, + y: map[time.Time]string{ + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "0th birthday", + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "1st birthday", + time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "2nd birthday", + }, + opts: []cmp.Option{SortMaps(func(x, y time.Time) bool { return x.Before(y) })}, + wantEqual: true, + reason: "equal because SortMaps flattens to a slice where Time.Equal can be used", + }, { + label: "SortMaps", + x: map[MyTime]string{ + {time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)}: "0th birthday", + {time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)}: "1st birthday", + {time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC)}: "2nd birthday", + }, + y: map[MyTime]string{ + {time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local)}: "0th birthday", + {time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local)}: "1st birthday", + {time.Date(2011, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local)}: "2nd birthday", + }, + opts: []cmp.Option{SortMaps(func(x, y time.Time) bool { return x.Before(y) })}, + wantEqual: false, + reason: "not equal because MyTime is not assignable to time.Time", + }, { + label: "SortMaps", + x: map[int]string{-3: "", -2: "", -1: "", 0: "", 1: "", 2: "", 3: ""}, + // => {0, 1, 2, 3, -1, -2, -3}, + y: map[int]string{300: "", 200: "", 100: "", 0: "", 1: "", 2: "", 3: ""}, + // => {0, 1, 2, 3, 100, 200, 300}, + opts: []cmp.Option{SortMaps(func(a, b int) bool { + if -10 < a && a <= 0 { + a *= -100 + } + if -10 < b && b <= 0 { + b *= -100 + } + return a < b + })}, + wantEqual: false, + reason: "not equal because values differ even though SortMap provides valid ordering", + }, { + label: "SortMaps", + x: map[int]string{-3: "", -2: "", -1: "", 0: "", 1: "", 2: "", 3: ""}, + // => {0, 1, 2, 3, -1, -2, -3}, + y: map[int]string{300: "", 200: "", 100: "", 0: "", 1: "", 2: "", 3: ""}, + // => {0, 1, 2, 3, 100, 200, 300}, + opts: []cmp.Option{ + SortMaps(func(x, y int) bool { + if -10 < x && x <= 0 { + x *= -100 + } + if -10 < y && y <= 0 { + y *= -100 + } + return x < y + }), + cmp.Comparer(func(x, y int) bool { + if -10 < x && x <= 0 { + x *= -100 + } + if -10 < y && y <= 0 { + y *= -100 + } + return x == y + }), + }, + wantEqual: true, + reason: "equal because Comparer used to equate differences", + }, { + label: "SortMaps", + x: map[int]string{-3: "", -2: "", -1: "", 0: "", 1: "", 2: "", 3: ""}, + y: map[int]string{}, + opts: []cmp.Option{SortMaps(func(x, y int) bool { + return x < y && x >= 0 && y >= 0 + })}, + wantPanic: true, + reason: "panics because SortMaps used with non-transitive less function", + }, { + label: "SortMaps", + x: map[int]string{-3: "", -2: "", -1: "", 0: "", 1: "", 2: "", 3: ""}, + y: map[int]string{}, + opts: []cmp.Option{SortMaps(func(x, y int) bool { + return math.Abs(float64(x)) < math.Abs(float64(y)) + })}, + wantPanic: true, + reason: "panics because SortMaps used with partial less function", + }, { + label: "EquateEmpty+SortSlices+SortMaps", + x: MyStruct{ + A: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + C: map[time.Time]string{ + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC): "0th birthday", + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC): "1st birthday", + }, + D: map[time.Time]string{}, + }, + y: MyStruct{ + A: []int{1, 0, 5, 2, 8, 9, 4, 3, 6, 7}, + B: []int{}, + C: map[time.Time]string{ + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "0th birthday", + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC).In(time.Local): "1st birthday", + }, + }, + opts: []cmp.Option{ + EquateEmpty(), + SortSlices(func(x, y int) bool { return x < y }), + SortMaps(func(x, y time.Time) bool { return x.Before(y) }), + }, + wantEqual: true, + reason: "no panics because EquateEmpty should compose with the sort options", + }, { + label: "EquateApprox", + x: 3.09, + y: 3.10, + wantEqual: false, + reason: "not equal because floats do not exactly matches", + }, { + label: "EquateApprox", + x: 3.09, + y: 3.10, + opts: []cmp.Option{EquateApprox(0, 0)}, + wantEqual: false, + reason: "not equal because EquateApprox(0 ,0) is equivalent to using ==", + }, { + label: "EquateApprox", + x: 3.09, + y: 3.10, + opts: []cmp.Option{EquateApprox(0.003, 0.009)}, + wantEqual: false, + reason: "not equal because EquateApprox is too strict", + }, { + label: "EquateApprox", + x: 3.09, + y: 3.10, + opts: []cmp.Option{EquateApprox(0, 0.011)}, + wantEqual: true, + reason: "equal because margin is loose enough to match", + }, { + label: "EquateApprox", + x: 3.09, + y: 3.10, + opts: []cmp.Option{EquateApprox(0.004, 0)}, + wantEqual: true, + reason: "equal because fraction is loose enough to match", + }, { + label: "EquateApprox", + x: 3.09, + y: 3.10, + opts: []cmp.Option{EquateApprox(0.004, 0.011)}, + wantEqual: true, + reason: "equal because both the margin and fraction are loose enough to match", + }, { + label: "EquateApprox", + x: float32(3.09), + y: float64(3.10), + opts: []cmp.Option{EquateApprox(0.004, 0)}, + wantEqual: false, + reason: "not equal because the types differ", + }, { + label: "EquateApprox", + x: float32(3.09), + y: float32(3.10), + opts: []cmp.Option{EquateApprox(0.004, 0)}, + wantEqual: true, + reason: "equal because EquateApprox also applies on float32s", + }, { + label: "EquateApprox", + x: []float64{math.Inf(+1), math.Inf(-1)}, + y: []float64{math.Inf(+1), math.Inf(-1)}, + opts: []cmp.Option{EquateApprox(0, 1)}, + wantEqual: true, + reason: "equal because we fall back on == which matches Inf (EquateApprox does not apply on Inf) ", + }, { + label: "EquateApprox", + x: []float64{math.Inf(+1), -1e100}, + y: []float64{+1e100, math.Inf(-1)}, + opts: []cmp.Option{EquateApprox(0, 1)}, + wantEqual: false, + reason: "not equal because we fall back on == where Inf != 1e100 (EquateApprox does not apply on Inf)", + }, { + label: "EquateApprox", + x: float64(+1e100), + y: float64(-1e100), + opts: []cmp.Option{EquateApprox(math.Inf(+1), 0)}, + wantEqual: true, + reason: "equal because infinite fraction matches everything", + }, { + label: "EquateApprox", + x: float64(+1e100), + y: float64(-1e100), + opts: []cmp.Option{EquateApprox(0, math.Inf(+1))}, + wantEqual: true, + reason: "equal because infinite margin matches everything", + }, { + label: "EquateApprox", + x: math.Pi, + y: math.Pi, + opts: []cmp.Option{EquateApprox(0, 0)}, + wantEqual: true, + reason: "equal because EquateApprox(0, 0) is equivalent to ==", + }, { + label: "EquateApprox", + x: math.Pi, + y: math.Nextafter(math.Pi, math.Inf(+1)), + opts: []cmp.Option{EquateApprox(0, 0)}, + wantEqual: false, + reason: "not equal because EquateApprox(0, 0) is equivalent to ==", + }, { + label: "EquateNaNs", + x: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1)}, + y: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1)}, + wantEqual: false, + reason: "not equal because NaN != NaN", + }, { + label: "EquateNaNs", + x: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1)}, + y: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1)}, + opts: []cmp.Option{EquateNaNs()}, + wantEqual: true, + reason: "equal because EquateNaNs allows NaN == NaN", + }, { + label: "EquateNaNs", + x: []float32{1.0, float32(math.NaN()), math.E, -0.0, +0.0}, + y: []float32{1.0, float32(math.NaN()), math.E, -0.0, +0.0}, + opts: []cmp.Option{EquateNaNs()}, + wantEqual: true, + reason: "equal because EquateNaNs operates on float32", + }, { + label: "EquateApprox+EquateNaNs", + x: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1), 1.01, 5001}, + y: []float64{1.0, math.NaN(), math.E, -0.0, +0.0, math.Inf(+1), math.Inf(-1), 1.02, 5002}, + opts: []cmp.Option{ + EquateNaNs(), + EquateApprox(0.01, 0), + }, + wantEqual: true, + reason: "equal because EquateNaNs and EquateApprox compose together", + }, { + label: "EquateApprox+EquateNaNs", + x: []MyFloat{1.0, MyFloat(math.NaN()), MyFloat(math.E), -0.0, +0.0, MyFloat(math.Inf(+1)), MyFloat(math.Inf(-1)), 1.01, 5001}, + y: []MyFloat{1.0, MyFloat(math.NaN()), MyFloat(math.E), -0.0, +0.0, MyFloat(math.Inf(+1)), MyFloat(math.Inf(-1)), 1.02, 5002}, + opts: []cmp.Option{ + EquateNaNs(), + EquateApprox(0.01, 0), + }, + wantEqual: false, + reason: "not equal because EquateApprox and EquateNaNs do not apply on a named type", + }, { + label: "EquateApprox+EquateNaNs+Transform", + x: []MyFloat{1.0, MyFloat(math.NaN()), MyFloat(math.E), -0.0, +0.0, MyFloat(math.Inf(+1)), MyFloat(math.Inf(-1)), 1.01, 5001}, + y: []MyFloat{1.0, MyFloat(math.NaN()), MyFloat(math.E), -0.0, +0.0, MyFloat(math.Inf(+1)), MyFloat(math.Inf(-1)), 1.02, 5002}, + opts: []cmp.Option{ + cmp.Transformer("", func(x MyFloat) float64 { return float64(x) }), + EquateNaNs(), + EquateApprox(0.01, 0), + }, + wantEqual: true, + reason: "equal because named type is transformed to float64", + }, { + label: "IgnoreFields", + x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}}, + y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}}, + wantEqual: false, + reason: "not equal because values do not match in deeply embedded field", + }, { + label: "IgnoreFields", + x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}}, + y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}}, + opts: []cmp.Option{IgnoreFields(Bar1{}, "Alpha")}, + wantEqual: true, + reason: "equal because IgnoreField ignores deeply embedded field: Alpha", + }, { + label: "IgnoreFields", + x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}}, + y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}}, + opts: []cmp.Option{IgnoreFields(Bar1{}, "Foo1.Alpha")}, + wantEqual: true, + reason: "equal because IgnoreField ignores deeply embedded field: Foo1.Alpha", + }, { + label: "IgnoreFields", + x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}}, + y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}}, + opts: []cmp.Option{IgnoreFields(Bar1{}, "Foo2.Alpha")}, + wantEqual: true, + reason: "equal because IgnoreField ignores deeply embedded field: Foo2.Alpha", + }, { + label: "IgnoreFields", + x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}}, + y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}}, + opts: []cmp.Option{IgnoreFields(Bar1{}, "Foo3.Alpha")}, + wantEqual: true, + reason: "equal because IgnoreField ignores deeply embedded field: Foo3.Alpha", + }, { + label: "IgnoreFields", + x: Bar1{Foo3{&Foo2{&Foo1{Alpha: 5}}}}, + y: Bar1{Foo3{&Foo2{&Foo1{Alpha: 6}}}}, + opts: []cmp.Option{IgnoreFields(Bar1{}, "Foo3.Foo2.Alpha")}, + wantEqual: true, + reason: "equal because IgnoreField ignores deeply embedded field: Foo3.Foo2.Alpha", + }, { + label: "IgnoreFields", + x: createBar3X(), + y: createBar3Y(), + wantEqual: false, + reason: "not equal because many deeply nested or embedded fields differ", + }, { + label: "IgnoreFields", + x: createBar3X(), + y: createBar3Y(), + opts: []cmp.Option{IgnoreFields(Bar3{}, "Bar1", "Bravo", "Delta", "Foo3", "Alpha")}, + wantEqual: true, + reason: "equal because IgnoreFields ignores fields at the highest levels", + }, { + label: "IgnoreFields", + x: createBar3X(), + y: createBar3Y(), + opts: []cmp.Option{ + IgnoreFields(Bar3{}, + "Bar1.Foo3.Bravo", + "Bravo.Bar1.Foo3.Foo2.Foo1.Charlie", + "Bravo.Foo3.Foo2.Foo1.Bravo", + "Bravo.Bravo", + "Delta.Echo.Charlie", + "Foo3.Foo2.Foo1.Alpha", + "Alpha", + ), + }, + wantEqual: true, + reason: "equal because IgnoreFields ignores fields using fully-qualified field", + }, { + label: "IgnoreFields", + x: createBar3X(), + y: createBar3Y(), + opts: []cmp.Option{ + IgnoreFields(Bar3{}, + "Bar1.Foo3.Bravo", + "Bravo.Foo3.Foo2.Foo1.Bravo", + "Bravo.Bravo", + "Delta.Echo.Charlie", + "Foo3.Foo2.Foo1.Alpha", + "Alpha", + ), + }, + wantEqual: false, + reason: "not equal because one fully-qualified field is not ignored: Bravo.Bar1.Foo3.Foo2.Foo1.Charlie", + }, { + label: "IgnoreFields", + x: createBar3X(), + y: createBar3Y(), + opts: []cmp.Option{IgnoreFields(Bar3{}, "Bar1", "Bravo", "Delta", "Alpha")}, + wantEqual: false, + reason: "not equal because highest-level field is not ignored: Foo3", + }, { + label: "IgnoreTypes", + x: []interface{}{5, "same"}, + y: []interface{}{6, "same"}, + wantEqual: false, + reason: "not equal because 5 != 6", + }, { + label: "IgnoreTypes", + x: []interface{}{5, "same"}, + y: []interface{}{6, "same"}, + opts: []cmp.Option{IgnoreTypes(0)}, + wantEqual: true, + reason: "equal because ints are ignored", + }, { + label: "IgnoreTypes+IgnoreInterfaces", + x: []interface{}{5, "same", new(bytes.Buffer)}, + y: []interface{}{6, "same", new(bytes.Buffer)}, + opts: []cmp.Option{IgnoreTypes(0)}, + wantPanic: true, + reason: "panics because bytes.Buffer has unexported fields", + }, { + label: "IgnoreTypes+IgnoreInterfaces", + x: []interface{}{5, "same", new(bytes.Buffer)}, + y: []interface{}{6, "diff", new(bytes.Buffer)}, + opts: []cmp.Option{ + IgnoreTypes(0, ""), + IgnoreInterfaces(struct{ io.Reader }{}), + }, + wantEqual: true, + reason: "equal because bytes.Buffer is ignored by match on interface type", + }, { + label: "IgnoreTypes+IgnoreInterfaces", + x: []interface{}{5, "same", new(bytes.Buffer)}, + y: []interface{}{6, "same", new(bytes.Buffer)}, + opts: []cmp.Option{ + IgnoreTypes(0, ""), + IgnoreInterfaces(struct { + io.Reader + io.Writer + fmt.Stringer + }{}), + }, + wantEqual: true, + reason: "equal because bytes.Buffer is ignored by match on multiple interface types", + }, { + label: "IgnoreInterfaces", + x: struct{ mu sync.Mutex }{}, + y: struct{ mu sync.Mutex }{}, + wantPanic: true, + reason: "panics because sync.Mutex has unexported fields", + }, { + label: "IgnoreInterfaces", + x: struct{ mu sync.Mutex }{}, + y: struct{ mu sync.Mutex }{}, + opts: []cmp.Option{IgnoreInterfaces(struct{ sync.Locker }{})}, + wantEqual: true, + reason: "equal because IgnoreInterfaces applies on values (with pointer receiver)", + }, { + label: "IgnoreInterfaces", + x: struct{ mu *sync.Mutex }{}, + y: struct{ mu *sync.Mutex }{}, + opts: []cmp.Option{IgnoreInterfaces(struct{ sync.Locker }{})}, + wantEqual: true, + reason: "equal because IgnoreInterfaces applies on pointers", + }, { + label: "IgnoreUnexported", + x: ParentStruct{Public: 1, private: 2}, + y: ParentStruct{Public: 1, private: -2}, + opts: []cmp.Option{cmp.AllowUnexported(ParentStruct{})}, + wantEqual: false, + reason: "not equal because ParentStruct.private differs with AllowUnexported", + }, { + label: "IgnoreUnexported", + x: ParentStruct{Public: 1, private: 2}, + y: ParentStruct{Public: 1, private: -2}, + opts: []cmp.Option{IgnoreUnexported(ParentStruct{})}, + wantEqual: true, + reason: "equal because IgnoreUnexported ignored ParentStruct.private", + }, { + label: "IgnoreUnexported", + x: ParentStruct{Public: 1, private: 2, PublicStruct: &PublicStruct{Public: 3, private: 4}}, + y: ParentStruct{Public: 1, private: -2, PublicStruct: &PublicStruct{Public: 3, private: 4}}, + opts: []cmp.Option{ + cmp.AllowUnexported(PublicStruct{}), + IgnoreUnexported(ParentStruct{}), + }, + wantEqual: true, + reason: "equal because ParentStruct.private is ignored", + }, { + label: "IgnoreUnexported", + x: ParentStruct{Public: 1, private: 2, PublicStruct: &PublicStruct{Public: 3, private: 4}}, + y: ParentStruct{Public: 1, private: -2, PublicStruct: &PublicStruct{Public: 3, private: -4}}, + opts: []cmp.Option{ + cmp.AllowUnexported(PublicStruct{}), + IgnoreUnexported(ParentStruct{}), + }, + wantEqual: false, + reason: "not equal because ParentStruct.PublicStruct.private differs and not ignored by IgnoreUnexported(ParentStruct{})", + }, { + label: "IgnoreUnexported", + x: ParentStruct{Public: 1, private: 2, PublicStruct: &PublicStruct{Public: 3, private: 4}}, + y: ParentStruct{Public: 1, private: -2, PublicStruct: &PublicStruct{Public: 3, private: -4}}, + opts: []cmp.Option{ + IgnoreUnexported(ParentStruct{}, PublicStruct{}), + }, + wantEqual: true, + reason: "equal because both ParentStruct.PublicStruct and ParentStruct.PublicStruct.private are ignored", + }, { + label: "IgnoreUnexported", + x: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: 4}}, + y: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: -3, private: -4}}, + opts: []cmp.Option{ + cmp.AllowUnexported(privateStruct{}, PublicStruct{}, ParentStruct{}), + }, + wantEqual: false, + reason: "not equal since ParentStruct.privateStruct differs", + }, { + label: "IgnoreUnexported", + x: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: 4}}, + y: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: -3, private: -4}}, + opts: []cmp.Option{ + cmp.AllowUnexported(privateStruct{}, PublicStruct{}), + IgnoreUnexported(ParentStruct{}), + }, + wantEqual: true, + reason: "equal because ParentStruct.privateStruct ignored by IgnoreUnexported(ParentStruct{})", + }, { + label: "IgnoreUnexported", + x: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: 4}}, + y: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: -4}}, + opts: []cmp.Option{ + cmp.AllowUnexported(PublicStruct{}, ParentStruct{}), + IgnoreUnexported(privateStruct{}), + }, + wantEqual: true, + reason: "equal because privateStruct.private ignored by IgnoreUnexported(privateStruct{})", + }, { + label: "IgnoreUnexported", + x: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: 3, private: 4}}, + y: ParentStruct{Public: 1, private: 2, privateStruct: &privateStruct{Public: -3, private: -4}}, + opts: []cmp.Option{ + cmp.AllowUnexported(PublicStruct{}, ParentStruct{}), + IgnoreUnexported(privateStruct{}), + }, + wantEqual: false, + reason: "not equal because privateStruct.Public differs and not ignored by IgnoreUnexported(privateStruct{})", + }, { + label: "IgnoreFields+IgnoreTypes+IgnoreUnexported", + x: &Everything{ + MyInt: 5, + MyFloat: 3.3, + MyTime: MyTime{time.Now()}, + Bar3: *createBar3X(), + ParentStruct: ParentStruct{ + Public: 1, private: 2, PublicStruct: &PublicStruct{Public: 3, private: 4}, + }, + }, + y: &Everything{ + MyInt: -5, + MyFloat: 3.3, + MyTime: MyTime{time.Now()}, + Bar3: *createBar3Y(), + ParentStruct: ParentStruct{ + Public: 1, private: -2, PublicStruct: &PublicStruct{Public: -3, private: -4}, + }, + }, + opts: []cmp.Option{ + IgnoreFields(Everything{}, "MyTime", "Bar3.Foo3"), + IgnoreFields(Bar3{}, "Bar1", "Bravo", "Delta", "Alpha"), + IgnoreTypes(MyInt(0), PublicStruct{}), + IgnoreUnexported(ParentStruct{}), + }, + wantEqual: true, + reason: "equal because all Ignore options can be composed together", + }} + + for _, tt := range tests { + tRun(t, tt.label, func(t *testing.T) { + var gotEqual bool + var gotPanic string + func() { + defer func() { + if ex := recover(); ex != nil { + gotPanic = fmt.Sprint(ex) + } + }() + gotEqual = cmp.Equal(tt.x, tt.y, tt.opts...) + }() + switch { + case gotPanic == "" && tt.wantPanic: + t.Errorf("expected Equal panic\nreason: %s", tt.reason) + case gotPanic != "" && !tt.wantPanic: + t.Errorf("unexpected Equal panic: got %v\nreason: %v", gotPanic, tt.reason) + case gotEqual != tt.wantEqual: + t.Errorf("Equal = %v, want %v\nreason: %v", gotEqual, tt.wantEqual, tt.reason) + } + }) + } +} + +func TestPanic(t *testing.T) { + args := func(x ...interface{}) []interface{} { return x } + tests := []struct { + label string // Test name + fnc interface{} // Option function to call + args []interface{} // Arguments to pass in + wantPanic string // Expected panic message + reason string // The reason for the expected outcome + }{{ + label: "EquateApprox", + fnc: EquateApprox, + args: args(0.0, 0.0), + reason: "zero margin and fraction is equivalent to exact equality", + }, { + label: "EquateApprox", + fnc: EquateApprox, + args: args(-0.1, 0.0), + wantPanic: "margin or fraction must be a non-negative number", + reason: "negative inputs are invalid", + }, { + label: "EquateApprox", + fnc: EquateApprox, + args: args(0.0, -0.1), + wantPanic: "margin or fraction must be a non-negative number", + reason: "negative inputs are invalid", + }, { + label: "EquateApprox", + fnc: EquateApprox, + args: args(math.NaN(), 0.0), + wantPanic: "margin or fraction must be a non-negative number", + reason: "NaN inputs are invalid", + }, { + label: "EquateApprox", + fnc: EquateApprox, + args: args(1.0, 0.0), + reason: "fraction of 1.0 or greater is valid", + }, { + label: "EquateApprox", + fnc: EquateApprox, + args: args(0.0, math.Inf(+1)), + reason: "margin of infinity is valid", + }, { + label: "SortSlices", + fnc: SortSlices, + args: args(strings.Compare), + wantPanic: "invalid less function", + reason: "func(x, y string) int is wrong signature for less", + }, { + label: "SortSlices", + fnc: SortSlices, + args: args((func(_, _ int) bool)(nil)), + wantPanic: "invalid less function", + reason: "nil value is not valid", + }, { + label: "SortMaps", + fnc: SortMaps, + args: args(strings.Compare), + wantPanic: "invalid less function", + reason: "func(x, y string) int is wrong signature for less", + }, { + label: "SortMaps", + fnc: SortMaps, + args: args((func(_, _ int) bool)(nil)), + wantPanic: "invalid less function", + reason: "nil value is not valid", + }, { + label: "IgnoreFields", + fnc: IgnoreFields, + args: args(Foo1{}, ""), + wantPanic: "name must not be empty", + reason: "empty selector is invalid", + }, { + label: "IgnoreFields", + fnc: IgnoreFields, + args: args(Foo1{}, "."), + wantPanic: "name must not be empty", + reason: "single dot selector is invalid", + }, { + label: "IgnoreFields", + fnc: IgnoreFields, + args: args(Foo1{}, ".Alpha"), + reason: "dot-prefix is okay since Foo1.Alpha reads naturally", + }, { + label: "IgnoreFields", + fnc: IgnoreFields, + args: args(Foo1{}, "Alpha."), + wantPanic: "name must not be empty", + reason: "dot-suffix is invalid", + }, { + label: "IgnoreFields", + fnc: IgnoreFields, + args: args(Foo1{}, "Alpha "), + wantPanic: "does not exist", + reason: "identifiers must not have spaces", + }, { + label: "IgnoreFields", + fnc: IgnoreFields, + args: args(Foo1{}, "Zulu"), + wantPanic: "does not exist", + reason: "name of non-existent field is invalid", + }, { + label: "IgnoreFields", + fnc: IgnoreFields, + args: args(Foo1{}, "Alpha.NoExist"), + wantPanic: "must be a struct", + reason: "cannot select into a non-struct", + }, { + label: "IgnoreFields", + fnc: IgnoreFields, + args: args(&Foo1{}, "Alpha"), + wantPanic: "must be a struct", + reason: "the type must be a struct (not pointer to a struct)", + }, { + label: "IgnoreFields", + fnc: IgnoreFields, + args: args(Foo1{}, "unexported"), + wantPanic: "name must be exported", + reason: "unexported fields must not be specified", + }, { + label: "IgnoreTypes", + fnc: IgnoreTypes, + reason: "empty input is valid", + }, { + label: "IgnoreTypes", + fnc: IgnoreTypes, + args: args(nil), + wantPanic: "cannot determine type", + reason: "input must not be nil value", + }, { + label: "IgnoreTypes", + fnc: IgnoreTypes, + args: args(0, 0, 0), + reason: "duplicate inputs of the same type is valid", + }, { + label: "IgnoreInterfaces", + fnc: IgnoreInterfaces, + args: args(nil), + wantPanic: "input must be an anonymous struct", + reason: "input must not be nil value", + }, { + label: "IgnoreInterfaces", + fnc: IgnoreInterfaces, + args: args(Foo1{}), + wantPanic: "input must be an anonymous struct", + reason: "input must not be a named struct type", + }, { + label: "IgnoreInterfaces", + fnc: IgnoreInterfaces, + args: args(struct{ _ io.Reader }{}), + wantPanic: "struct cannot have named fields", + reason: "input must not have named fields", + }, { + label: "IgnoreInterfaces", + fnc: IgnoreInterfaces, + args: args(struct{ Foo1 }{}), + wantPanic: "embedded field must be an interface type", + reason: "field types must be interfaces", + }, { + label: "IgnoreInterfaces", + fnc: IgnoreInterfaces, + args: args(struct{ EmptyInterface }{}), + wantPanic: "cannot ignore empty interface", + reason: "field types must not be the empty interface", + }, { + label: "IgnoreInterfaces", + fnc: IgnoreInterfaces, + args: args(struct { + io.Reader + io.Writer + io.Closer + io.ReadWriteCloser + }{}), + reason: "multiple interfaces may be specified, even if they overlap", + }, { + label: "IgnoreUnexported", + fnc: IgnoreUnexported, + reason: "empty input is valid", + }, { + label: "IgnoreUnexported", + fnc: IgnoreUnexported, + args: args(nil), + wantPanic: "invalid struct type", + reason: "input must not be nil value", + }, { + label: "IgnoreUnexported", + fnc: IgnoreUnexported, + args: args(&Foo1{}), + wantPanic: "invalid struct type", + reason: "input must be a struct type (not a pointer to a struct)", + }, { + label: "IgnoreUnexported", + fnc: IgnoreUnexported, + args: args(Foo1{}, struct{ x, X int }{}), + reason: "input may be named or unnamed structs", + }} + + for _, tt := range tests { + tRun(t, tt.label, func(t *testing.T) { + // Prepare function arguments. + vf := reflect.ValueOf(tt.fnc) + var vargs []reflect.Value + for i, arg := range tt.args { + if arg == nil { + tf := vf.Type() + if i == tf.NumIn()-1 && tf.IsVariadic() { + vargs = append(vargs, reflect.Zero(tf.In(i).Elem())) + } else { + vargs = append(vargs, reflect.Zero(tf.In(i))) + } + } else { + vargs = append(vargs, reflect.ValueOf(arg)) + } + } + + // Call the function and capture any panics. + var gotPanic string + func() { + defer func() { + if ex := recover(); ex != nil { + if s, ok := ex.(string); ok { + gotPanic = s + } else { + panic(ex) + } + } + }() + vf.Call(vargs) + }() + + switch { + case tt.wantPanic == "" && gotPanic != "": + t.Errorf("unexpected panic message: %s\nreason: %s", gotPanic, tt.reason) + case tt.wantPanic != "" && !strings.Contains(gotPanic, tt.wantPanic): + t.Errorf("panic message:\ngot: %s\nwant: %s\nreason: %s", gotPanic, tt.wantPanic, tt.reason) + } + }) + } +} + +// TODO: Delete this hack when we drop Go1.6 support. +func tRun(t *testing.T, name string, f func(t *testing.T)) { + type runner interface { + Run(string, func(t *testing.T)) bool + } + var ti interface{} = t + if r, ok := ti.(runner); ok { + r.Run(name, f) + } else { + t.Logf("Test: %s", name) + f(t) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 000000000..5527f0149 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,529 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// +// The primary features of cmp are: +// +// • When the default behavior of equality does not suit the needs of the test, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as they +// are within some tolerance of each other. +// +// • Types that have an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation for the types +// that they define. +// +// • If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on both +// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported +// fields are not compared by default; they result in panics unless suppressed +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explictly compared +// using the AllowUnexported option. +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// BUG: Maps with keys containing NaN values cannot be properly compared due to +// the reflection package's inability to retrieve such entries. Equal will panic +// anytime it comes across a NaN key, but this behavior may change. +// +// See https://golang.org/issue/11104 for more details. + +var nothing = reflect.Value{} + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// • If two values are not of the same type, then they are never equal +// and the overall result is false. +// +// • Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is greater than one, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform the current +// values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// • If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y). Otherwise, no such method exists and evaluation proceeds to +// the next rule. +// +// • Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, and +// channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// Pointers are equal if the underlying values they point to are also equal. +// Interfaces are equal if their underlying concrete values are also equal. +// +// Structs are equal if all of their fields are equal. If a struct contains +// unexported fields, Equal panics unless the AllowUnexported option is used or +// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field. +// +// Arrays, slices, and maps are equal if they are both nil or both non-nil +// with the same length and the elements at each index or key are equal. +// Note that a non-nil empty slice and a nil slice are not equal. +// To equate empty slices and maps, consider using cmpopts.EquateEmpty. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +func Equal(x, y interface{}, opts ...Option) bool { + s := newState(opts) + s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y)) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values. +// It returns an empty string if and only if Equal returns true for the same +// input values and options. The output string will use the "-" symbol to +// indicate elements removed from x, and the "+" symbol to indicate elements +// added to y. +// +// Do not depend on this output being stable. +func Diff(x, y interface{}, opts ...Option) string { + r := new(defaultReporter) + opts = Options{Options(opts), r} + eq := Equal(x, y, opts...) + d := r.String() + if (d == "") != eq { + panic("inconsistent difference and equality results") + } + return d +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + reporter reporter // Optional reporter used for difference formatting + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters map[reflect.Type]bool // Set of structs with unexported field visibility + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + s := new(state) + for _, opt := range opts { + s.processOption(opt) + } + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case visibleStructs: + if s.exporters == nil { + s.exporters = make(map[reflect.Type]bool) + } + for t := range opt { + s.exporters[t] = true + } + case reporter: + if s.reporter != nil { + panic("difference reporter already registered") + } + s.reporter = opt + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result { + // We do not save and restore the curPath because all of the compareX + // methods should properly push and pop from the path. + // It is an implementation bug if the contents of curPath differs from + // when calling this function to when returning from it. + + oldResult, oldReporter := s.result, s.reporter + s.result = diff.Result{} // Reset result + s.reporter = nil // Remove reporter to avoid spurious printouts + s.compareAny(vx, vy) + res := s.result + s.result, s.reporter = oldResult, oldReporter + return res +} + +func (s *state) compareAny(vx, vy reflect.Value) { + // TODO: Support cyclic data structures. + + // Rule 0: Differing types are never equal. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), vx, vy) + return + } + if vx.Type() != vy.Type() { + s.report(false, vx, vy) // Possible for path to be empty + return + } + t := vx.Type() + if len(s.curPath) == 0 { + s.curPath.push(&pathStep{typ: t}) + defer s.curPath.pop() + } + vx, vy = s.tryExporting(vx, vy) + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(vx, vy, t) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(vx, vy, t) { + return + } + + // Rule 3: Recursively descend into each value's underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), vx, vy) + return + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), vx, vy) + return + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), vx, vy) + return + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), vx, vy) + return + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), vx, vy) + return + case reflect.String: + s.report(vx.String() == vy.String(), vx, vy) + return + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), vx, vy) + return + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), vx, vy) + return + case reflect.Ptr: + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), vx, vy) + return + } + s.curPath.push(&indirect{pathStep{t.Elem()}}) + defer s.curPath.pop() + s.compareAny(vx.Elem(), vy.Elem()) + return + case reflect.Interface: + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), vx, vy) + return + } + if vx.Elem().Type() != vy.Elem().Type() { + s.report(false, vx.Elem(), vy.Elem()) + return + } + s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}}) + defer s.curPath.pop() + s.compareAny(vx.Elem(), vy.Elem()) + return + case reflect.Slice: + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), vx, vy) + return + } + fallthrough + case reflect.Array: + s.compareArray(vx, vy, t) + return + case reflect.Map: + s.compareMap(vx, vy, t) + return + case reflect.Struct: + s.compareStruct(vx, vy, t) + return + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) { + if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported { + if sf.force { + // Use unsafe pointer arithmetic to get read-write access to an + // unexported field in the struct. + vx = unsafeRetrieveField(sf.pvx, sf.field) + vy = unsafeRetrieveField(sf.pvy, sf.field) + } else { + // We are not allowed to export the value, so invalidate them + // so that tryOptions can panic later if not explicitly ignored. + vx = nothing + vy = nothing + } + } + return vx, vy +} + +func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool { + // If there were no FilterValues, we will not detect invalid inputs, + // so manually check for them and append invalid if necessary. + // We still evaluate the options since an ignore can override invalid. + opts := s.opts + if !vx.IsValid() || !vy.IsValid() { + opts = Options{opts, invalid{}} + } + + // Evaluate all filters and apply the remaining options. + if opt := opts.filter(s, vx, vy, t); opt != nil { + return opt.apply(s, vx, vy) + } + return false +} + +func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, vx, vy) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value) reflect.Value { + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + want := f.Call([]reflect.Value{v})[0] + if got := <-c; !s.statelessCompare(got, want).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if !s.statelessCompare(want, want).Equal() { + return want + } + fn := getFuncName(f.Pointer()) + panic(fmt.Sprintf("non-deterministic function detected: %s", fn)) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + want := f.Call([]reflect.Value{x, y})[0].Bool() + if got := <-c; !got.IsValid() || got.Bool() != want { + fn := getFuncName(f.Pointer()) + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn)) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) { + step := &sliceIndex{pathStep{t.Elem()}, 0, 0} + s.curPath.push(step) + + // Compute an edit-script for slices vx and vy. + eq, es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { + step.xkey, step.ykey = ix, iy + return s.statelessCompare(vx.Index(ix), vy.Index(iy)) + }) + + // Equal or no edit-script, so report entire slices as is. + if eq || es == nil { + s.curPath.pop() // Pop first since we are reporting the whole slice + s.report(eq, vx, vy) + return + } + + // Replay the edit-script. + var ix, iy int + for _, e := range es { + switch e { + case diff.UniqueX: + step.xkey, step.ykey = ix, -1 + s.report(false, vx.Index(ix), nothing) + ix++ + case diff.UniqueY: + step.xkey, step.ykey = -1, iy + s.report(false, nothing, vy.Index(iy)) + iy++ + default: + step.xkey, step.ykey = ix, iy + if e == diff.Identity { + s.report(true, vx.Index(ix), vy.Index(iy)) + } else { + s.compareAny(vx.Index(ix), vy.Index(iy)) + } + ix++ + iy++ + } + } + s.curPath.pop() + return +} + +func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), vx, vy) + return + } + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := &mapIndex{pathStep: pathStep{t.Elem()}} + s.curPath.push(step) + defer s.curPath.pop() + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.key = k + vvx := vx.MapIndex(k) + vvy := vy.MapIndex(k) + switch { + case vvx.IsValid() && vvy.IsValid(): + s.compareAny(vvx, vvy) + case vvx.IsValid() && !vvy.IsValid(): + s.report(false, vvx, nothing) + case !vvx.IsValid() && vvy.IsValid(): + s.report(false, nothing, vvy) + default: + // It is possible for both vvx and vvy to be invalid if the + // key contained a NaN value in it. There is no way in + // reflection to be able to retrieve these values. + // See https://golang.org/issue/11104 + panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath)) + } + } +} + +func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) { + var vax, vay reflect.Value // Addressable versions of vx and vy + + step := &structField{} + s.curPath.push(step) + defer s.curPath.pop() + for i := 0; i < t.NumField(); i++ { + vvx := vx.Field(i) + vvy := vy.Field(i) + step.typ = t.Field(i).Type + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For unsafeRetrieveField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + step.force = s.exporters[t] + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(vvx, vvy) + } +} + +// report records the result of a single comparison. +// It also calls Report if any reporter is registered. +func (s *state) report(eq bool, vx, vy reflect.Value) { + if eq { + s.result.NSame++ + } else { + s.result.NDiff++ + } + if s.reporter != nil { + s.reporter.Report(vx, vy, eq, s.curPath) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/vendor/github.com/google/go-cmp/cmp/compare_test.go b/vendor/github.com/google/go-cmp/cmp/compare_test.go new file mode 100644 index 000000000..36a4ecf7d --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare_test.go @@ -0,0 +1,1795 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp_test + +import ( + "bytes" + "crypto/md5" + "fmt" + "io" + "math" + "math/rand" + "reflect" + "regexp" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + pb "github.com/google/go-cmp/cmp/internal/testprotos" + ts "github.com/google/go-cmp/cmp/internal/teststructs" +) + +var now = time.Now() + +func intPtr(n int) *int { return &n } + +type test struct { + label string // Test description + x, y interface{} // Input values to compare + opts []cmp.Option // Input options + wantDiff string // The exact difference string + wantPanic string // Sub-string of an expected panic message +} + +func TestDiff(t *testing.T) { + var tests []test + tests = append(tests, comparerTests()...) + tests = append(tests, transformerTests()...) + tests = append(tests, embeddedTests()...) + tests = append(tests, methodTests()...) + tests = append(tests, project1Tests()...) + tests = append(tests, project2Tests()...) + tests = append(tests, project3Tests()...) + tests = append(tests, project4Tests()...) + + for _, tt := range tests { + tt := tt + tRunParallel(t, tt.label, func(t *testing.T) { + var gotDiff, gotPanic string + func() { + defer func() { + if ex := recover(); ex != nil { + if s, ok := ex.(string); ok { + gotPanic = s + } else { + panic(ex) + } + } + }() + gotDiff = cmp.Diff(tt.x, tt.y, tt.opts...) + }() + if tt.wantPanic == "" { + if gotPanic != "" { + t.Fatalf("unexpected panic message: %s", gotPanic) + } + if got, want := strings.TrimSpace(gotDiff), strings.TrimSpace(tt.wantDiff); got != want { + t.Fatalf("difference message:\ngot:\n%s\n\nwant:\n%s", got, want) + } + } else { + if !strings.Contains(gotPanic, tt.wantPanic) { + t.Fatalf("panic message:\ngot: %s\nwant: %s", gotPanic, tt.wantPanic) + } + } + }) + } +} + +func comparerTests() []test { + const label = "Comparer" + + return []test{{ + label: label, + x: 1, + y: 1, + }, { + label: label, + x: 1, + y: 1, + opts: []cmp.Option{cmp.Ignore()}, + wantPanic: "cannot use an unfiltered option", + }, { + label: label, + x: 1, + y: 1, + opts: []cmp.Option{cmp.Comparer(func(_, _ interface{}) bool { return true })}, + wantPanic: "cannot use an unfiltered option", + }, { + label: label, + x: 1, + y: 1, + opts: []cmp.Option{cmp.Transformer("", func(x interface{}) interface{} { return x })}, + wantPanic: "cannot use an unfiltered option", + }, { + label: label, + x: 1, + y: 1, + opts: []cmp.Option{ + cmp.Comparer(func(x, y int) bool { return true }), + cmp.Transformer("", func(x int) float64 { return float64(x) }), + }, + wantPanic: "ambiguous set of applicable options", + }, { + label: label, + x: 1, + y: 1, + opts: []cmp.Option{ + cmp.FilterPath(func(p cmp.Path) bool { + return len(p) > 0 && p[len(p)-1].Type().Kind() == reflect.Int + }, cmp.Options{cmp.Ignore(), cmp.Ignore(), cmp.Ignore()}), + cmp.Comparer(func(x, y int) bool { return true }), + cmp.Transformer("", func(x int) float64 { return float64(x) }), + }, + }, { + label: label, + opts: []cmp.Option{struct{ cmp.Option }{}}, + wantPanic: "unknown option", + }, { + label: label, + x: struct{ A, B, C int }{1, 2, 3}, + y: struct{ A, B, C int }{1, 2, 3}, + }, { + label: label, + x: struct{ A, B, C int }{1, 2, 3}, + y: struct{ A, B, C int }{1, 2, 4}, + wantDiff: "root.C:\n\t-: 3\n\t+: 4\n", + }, { + label: label, + x: struct{ a, b, c int }{1, 2, 3}, + y: struct{ a, b, c int }{1, 2, 4}, + wantPanic: "cannot handle unexported field", + }, { + label: label, + x: &struct{ A *int }{intPtr(4)}, + y: &struct{ A *int }{intPtr(4)}, + }, { + label: label, + x: &struct{ A *int }{intPtr(4)}, + y: &struct{ A *int }{intPtr(5)}, + wantDiff: "*root.A:\n\t-: 4\n\t+: 5\n", + }, { + label: label, + x: &struct{ A *int }{intPtr(4)}, + y: &struct{ A *int }{intPtr(5)}, + opts: []cmp.Option{ + cmp.Comparer(func(x, y int) bool { return true }), + }, + }, { + label: label, + x: &struct{ A *int }{intPtr(4)}, + y: &struct{ A *int }{intPtr(5)}, + opts: []cmp.Option{ + cmp.Comparer(func(x, y *int) bool { return x != nil && y != nil }), + }, + }, { + label: label, + x: &struct{ R *bytes.Buffer }{}, + y: &struct{ R *bytes.Buffer }{}, + }, { + label: label, + x: &struct{ R *bytes.Buffer }{new(bytes.Buffer)}, + y: &struct{ R *bytes.Buffer }{}, + wantDiff: "root.R:\n\t-: \"\"\n\t+: \n", + }, { + label: label, + x: &struct{ R *bytes.Buffer }{new(bytes.Buffer)}, + y: &struct{ R *bytes.Buffer }{}, + opts: []cmp.Option{ + cmp.Comparer(func(x, y io.Reader) bool { return true }), + }, + }, { + label: label, + x: &struct{ R bytes.Buffer }{}, + y: &struct{ R bytes.Buffer }{}, + wantPanic: "cannot handle unexported field", + }, { + label: label, + x: &struct{ R bytes.Buffer }{}, + y: &struct{ R bytes.Buffer }{}, + opts: []cmp.Option{ + cmp.Comparer(func(x, y io.Reader) bool { return true }), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label, + x: &struct{ R bytes.Buffer }{}, + y: &struct{ R bytes.Buffer }{}, + opts: []cmp.Option{ + cmp.Transformer("Ref", func(x bytes.Buffer) *bytes.Buffer { return &x }), + cmp.Comparer(func(x, y io.Reader) bool { return true }), + }, + }, { + label: label, + x: []*regexp.Regexp{nil, regexp.MustCompile("a*b*c*")}, + y: []*regexp.Regexp{nil, regexp.MustCompile("a*b*c*")}, + wantPanic: "cannot handle unexported field", + }, { + label: label, + x: []*regexp.Regexp{nil, regexp.MustCompile("a*b*c*")}, + y: []*regexp.Regexp{nil, regexp.MustCompile("a*b*c*")}, + opts: []cmp.Option{cmp.Comparer(func(x, y *regexp.Regexp) bool { + if x == nil || y == nil { + return x == nil && y == nil + } + return x.String() == y.String() + })}, + }, { + label: label, + x: []*regexp.Regexp{nil, regexp.MustCompile("a*b*c*")}, + y: []*regexp.Regexp{nil, regexp.MustCompile("a*b*d*")}, + opts: []cmp.Option{cmp.Comparer(func(x, y *regexp.Regexp) bool { + if x == nil || y == nil { + return x == nil && y == nil + } + return x.String() == y.String() + })}, + wantDiff: ` +{[]*regexp.Regexp}[1]: + -: "a*b*c*" + +: "a*b*d*"`, + }, { + label: label, + x: func() ***int { + a := 0 + b := &a + c := &b + return &c + }(), + y: func() ***int { + a := 0 + b := &a + c := &b + return &c + }(), + }, { + label: label, + x: func() ***int { + a := 0 + b := &a + c := &b + return &c + }(), + y: func() ***int { + a := 1 + b := &a + c := &b + return &c + }(), + wantDiff: ` +***{***int}: + -: 0 + +: 1`, + }, { + label: label, + x: []int{1, 2, 3, 4, 5}[:3], + y: []int{1, 2, 3}, + }, { + label: label, + x: struct{ fmt.Stringer }{bytes.NewBufferString("hello")}, + y: struct{ fmt.Stringer }{regexp.MustCompile("hello")}, + opts: []cmp.Option{cmp.Comparer(func(x, y fmt.Stringer) bool { return x.String() == y.String() })}, + }, { + label: label, + x: struct{ fmt.Stringer }{bytes.NewBufferString("hello")}, + y: struct{ fmt.Stringer }{regexp.MustCompile("hello2")}, + opts: []cmp.Option{cmp.Comparer(func(x, y fmt.Stringer) bool { return x.String() == y.String() })}, + wantDiff: ` +root: + -: "hello" + +: "hello2"`, + }, { + label: label, + x: md5.Sum([]byte{'a'}), + y: md5.Sum([]byte{'b'}), + wantDiff: ` +{[16]uint8}: + -: [16]uint8{0x0c, 0xc1, 0x75, 0xb9, 0xc0, 0xf1, 0xb6, 0xa8, 0x31, 0xc3, 0x99, 0xe2, 0x69, 0x77, 0x26, 0x61} + +: [16]uint8{0x92, 0xeb, 0x5f, 0xfe, 0xe6, 0xae, 0x2f, 0xec, 0x3a, 0xd7, 0x1c, 0x77, 0x75, 0x31, 0x57, 0x8f}`, + }, { + label: label, + x: new(fmt.Stringer), + y: nil, + wantDiff: ` +: + -: & + +: `, + }, { + label: label, + x: make([]int, 1000), + y: make([]int, 1000), + opts: []cmp.Option{ + cmp.Comparer(func(_, _ int) bool { + return rand.Intn(2) == 0 + }), + }, + wantPanic: "non-deterministic or non-symmetric function detected", + }, { + label: label, + x: make([]int, 1000), + y: make([]int, 1000), + opts: []cmp.Option{ + cmp.FilterValues(func(_, _ int) bool { + return rand.Intn(2) == 0 + }, cmp.Ignore()), + }, + wantPanic: "non-deterministic or non-symmetric function detected", + }, { + label: label, + x: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + y: []int{10, 9, 8, 7, 6, 5, 4, 3, 2, 1}, + opts: []cmp.Option{ + cmp.Comparer(func(x, y int) bool { + return x < y + }), + }, + wantPanic: "non-deterministic or non-symmetric function detected", + }, { + label: label, + x: make([]string, 1000), + y: make([]string, 1000), + opts: []cmp.Option{ + cmp.Transformer("", func(x string) int { + return rand.Int() + }), + }, + wantPanic: "non-deterministic function detected", + }, { + // Make sure the dynamic checks don't raise a false positive for + // non-reflexive comparisons. + label: label, + x: make([]int, 10), + y: make([]int, 10), + opts: []cmp.Option{ + cmp.Transformer("", func(x int) float64 { + return math.NaN() + }), + }, + wantDiff: ` +{[]int}: + -: []int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +: []int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}`, + }} +} + +func transformerTests() []test { + const label = "Transformer/" + + return []test{{ + label: label, + x: uint8(0), + y: uint8(1), + opts: []cmp.Option{ + cmp.Transformer("", func(in uint8) uint16 { return uint16(in) }), + cmp.Transformer("", func(in uint16) uint32 { return uint32(in) }), + cmp.Transformer("", func(in uint32) uint64 { return uint64(in) }), + }, + wantDiff: ` +λ(λ(λ({uint8}))): + -: 0x00 + +: 0x01`, + }, { + label: label, + x: 0, + y: 1, + opts: []cmp.Option{ + cmp.Transformer("", func(in int) int { return in / 2 }), + cmp.Transformer("", func(in int) int { return in }), + }, + wantPanic: "ambiguous set of applicable options", + }, { + label: label, + x: []int{0, -5, 0, -1}, + y: []int{1, 3, 0, -5}, + opts: []cmp.Option{ + cmp.FilterValues( + func(x, y int) bool { return x+y >= 0 }, + cmp.Transformer("", func(in int) int64 { return int64(in / 2) }), + ), + cmp.FilterValues( + func(x, y int) bool { return x+y < 0 }, + cmp.Transformer("", func(in int) int64 { return int64(in) }), + ), + }, + wantDiff: ` +λ({[]int}[1]): + -: -5 + +: 3 +λ({[]int}[3]): + -: -1 + +: -5`, + }, { + label: label, + x: 0, + y: 1, + opts: []cmp.Option{ + cmp.Transformer("", func(in int) interface{} { + if in == 0 { + return "string" + } + return float64(in) + }), + }, + wantDiff: ` +λ({int}): + -: "string" + +: 1`, + }} +} + +func embeddedTests() []test { + const label = "EmbeddedStruct/" + + privateStruct := *new(ts.ParentStructA).PrivateStruct() + + createStructA := func(i int) ts.ParentStructA { + s := ts.ParentStructA{} + s.PrivateStruct().Public = 1 + i + s.PrivateStruct().SetPrivate(2 + i) + return s + } + + createStructB := func(i int) ts.ParentStructB { + s := ts.ParentStructB{} + s.PublicStruct.Public = 1 + i + s.PublicStruct.SetPrivate(2 + i) + return s + } + + createStructC := func(i int) ts.ParentStructC { + s := ts.ParentStructC{} + s.PrivateStruct().Public = 1 + i + s.PrivateStruct().SetPrivate(2 + i) + s.Public = 3 + i + s.SetPrivate(4 + i) + return s + } + + createStructD := func(i int) ts.ParentStructD { + s := ts.ParentStructD{} + s.PublicStruct.Public = 1 + i + s.PublicStruct.SetPrivate(2 + i) + s.Public = 3 + i + s.SetPrivate(4 + i) + return s + } + + createStructE := func(i int) ts.ParentStructE { + s := ts.ParentStructE{} + s.PrivateStruct().Public = 1 + i + s.PrivateStruct().SetPrivate(2 + i) + s.PublicStruct.Public = 3 + i + s.PublicStruct.SetPrivate(4 + i) + return s + } + + createStructF := func(i int) ts.ParentStructF { + s := ts.ParentStructF{} + s.PrivateStruct().Public = 1 + i + s.PrivateStruct().SetPrivate(2 + i) + s.PublicStruct.Public = 3 + i + s.PublicStruct.SetPrivate(4 + i) + s.Public = 5 + i + s.SetPrivate(6 + i) + return s + } + + createStructG := func(i int) *ts.ParentStructG { + s := ts.NewParentStructG() + s.PrivateStruct().Public = 1 + i + s.PrivateStruct().SetPrivate(2 + i) + return s + } + + createStructH := func(i int) *ts.ParentStructH { + s := ts.NewParentStructH() + s.PublicStruct.Public = 1 + i + s.PublicStruct.SetPrivate(2 + i) + return s + } + + createStructI := func(i int) *ts.ParentStructI { + s := ts.NewParentStructI() + s.PrivateStruct().Public = 1 + i + s.PrivateStruct().SetPrivate(2 + i) + s.PublicStruct.Public = 3 + i + s.PublicStruct.SetPrivate(4 + i) + return s + } + + createStructJ := func(i int) *ts.ParentStructJ { + s := ts.NewParentStructJ() + s.PrivateStruct().Public = 1 + i + s.PrivateStruct().SetPrivate(2 + i) + s.PublicStruct.Public = 3 + i + s.PublicStruct.SetPrivate(4 + i) + s.Private().Public = 5 + i + s.Private().SetPrivate(6 + i) + s.Public.Public = 7 + i + s.Public.SetPrivate(8 + i) + return s + } + + return []test{{ + label: label + "ParentStructA", + x: ts.ParentStructA{}, + y: ts.ParentStructA{}, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructA", + x: ts.ParentStructA{}, + y: ts.ParentStructA{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructA{}), + }, + }, { + label: label + "ParentStructA", + x: createStructA(0), + y: createStructA(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructA{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructA", + x: createStructA(0), + y: createStructA(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructA{}, privateStruct), + }, + }, { + label: label + "ParentStructA", + x: createStructA(0), + y: createStructA(1), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructA{}, privateStruct), + }, + wantDiff: ` +{teststructs.ParentStructA}.privateStruct.Public: + -: 1 + +: 2 +{teststructs.ParentStructA}.privateStruct.private: + -: 2 + +: 3`, + }, { + label: label + "ParentStructB", + x: ts.ParentStructB{}, + y: ts.ParentStructB{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructB{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructB", + x: ts.ParentStructB{}, + y: ts.ParentStructB{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructB{}), + cmpopts.IgnoreUnexported(ts.PublicStruct{}), + }, + }, { + label: label + "ParentStructB", + x: createStructB(0), + y: createStructB(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructB{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructB", + x: createStructB(0), + y: createStructB(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructB{}, ts.PublicStruct{}), + }, + }, { + label: label + "ParentStructB", + x: createStructB(0), + y: createStructB(1), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructB{}, ts.PublicStruct{}), + }, + wantDiff: ` +{teststructs.ParentStructB}.PublicStruct.Public: + -: 1 + +: 2 +{teststructs.ParentStructB}.PublicStruct.private: + -: 2 + +: 3`, + }, { + label: label + "ParentStructC", + x: ts.ParentStructC{}, + y: ts.ParentStructC{}, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructC", + x: ts.ParentStructC{}, + y: ts.ParentStructC{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructC{}), + }, + }, { + label: label + "ParentStructC", + x: createStructC(0), + y: createStructC(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructC{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructC", + x: createStructC(0), + y: createStructC(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructC{}, privateStruct), + }, + }, { + label: label + "ParentStructC", + x: createStructC(0), + y: createStructC(1), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructC{}, privateStruct), + }, + wantDiff: ` +{teststructs.ParentStructC}.privateStruct.Public: + -: 1 + +: 2 +{teststructs.ParentStructC}.privateStruct.private: + -: 2 + +: 3 +{teststructs.ParentStructC}.Public: + -: 3 + +: 4 +{teststructs.ParentStructC}.private: + -: 4 + +: 5`, + }, { + label: label + "ParentStructD", + x: ts.ParentStructD{}, + y: ts.ParentStructD{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructD{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructD", + x: ts.ParentStructD{}, + y: ts.ParentStructD{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructD{}), + cmpopts.IgnoreUnexported(ts.PublicStruct{}), + }, + }, { + label: label + "ParentStructD", + x: createStructD(0), + y: createStructD(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructD{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructD", + x: createStructD(0), + y: createStructD(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructD{}, ts.PublicStruct{}), + }, + }, { + label: label + "ParentStructD", + x: createStructD(0), + y: createStructD(1), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructD{}, ts.PublicStruct{}), + }, + wantDiff: ` +{teststructs.ParentStructD}.PublicStruct.Public: + -: 1 + +: 2 +{teststructs.ParentStructD}.PublicStruct.private: + -: 2 + +: 3 +{teststructs.ParentStructD}.Public: + -: 3 + +: 4 +{teststructs.ParentStructD}.private: + -: 4 + +: 5`, + }, { + label: label + "ParentStructE", + x: ts.ParentStructE{}, + y: ts.ParentStructE{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructE{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructE", + x: ts.ParentStructE{}, + y: ts.ParentStructE{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructE{}), + cmpopts.IgnoreUnexported(ts.PublicStruct{}), + }, + }, { + label: label + "ParentStructE", + x: createStructE(0), + y: createStructE(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructE{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructE", + x: createStructE(0), + y: createStructE(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructE{}, ts.PublicStruct{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructE", + x: createStructE(0), + y: createStructE(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructE{}, ts.PublicStruct{}, privateStruct), + }, + }, { + label: label + "ParentStructE", + x: createStructE(0), + y: createStructE(1), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructE{}, ts.PublicStruct{}, privateStruct), + }, + wantDiff: ` +{teststructs.ParentStructE}.privateStruct.Public: + -: 1 + +: 2 +{teststructs.ParentStructE}.privateStruct.private: + -: 2 + +: 3 +{teststructs.ParentStructE}.PublicStruct.Public: + -: 3 + +: 4 +{teststructs.ParentStructE}.PublicStruct.private: + -: 4 + +: 5`, + }, { + label: label + "ParentStructF", + x: ts.ParentStructF{}, + y: ts.ParentStructF{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructF{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructF", + x: ts.ParentStructF{}, + y: ts.ParentStructF{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructF{}), + cmpopts.IgnoreUnexported(ts.PublicStruct{}), + }, + }, { + label: label + "ParentStructF", + x: createStructF(0), + y: createStructF(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructF{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructF", + x: createStructF(0), + y: createStructF(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructF{}, ts.PublicStruct{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructF", + x: createStructF(0), + y: createStructF(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructF{}, ts.PublicStruct{}, privateStruct), + }, + }, { + label: label + "ParentStructF", + x: createStructF(0), + y: createStructF(1), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructF{}, ts.PublicStruct{}, privateStruct), + }, + wantDiff: ` +{teststructs.ParentStructF}.privateStruct.Public: + -: 1 + +: 2 +{teststructs.ParentStructF}.privateStruct.private: + -: 2 + +: 3 +{teststructs.ParentStructF}.PublicStruct.Public: + -: 3 + +: 4 +{teststructs.ParentStructF}.PublicStruct.private: + -: 4 + +: 5 +{teststructs.ParentStructF}.Public: + -: 5 + +: 6 +{teststructs.ParentStructF}.private: + -: 6 + +: 7`, + }, { + label: label + "ParentStructG", + x: ts.ParentStructG{}, + y: ts.ParentStructG{}, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructG", + x: ts.ParentStructG{}, + y: ts.ParentStructG{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructG{}), + }, + }, { + label: label + "ParentStructG", + x: createStructG(0), + y: createStructG(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructG{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructG", + x: createStructG(0), + y: createStructG(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructG{}, privateStruct), + }, + }, { + label: label + "ParentStructG", + x: createStructG(0), + y: createStructG(1), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructG{}, privateStruct), + }, + wantDiff: ` +{*teststructs.ParentStructG}.privateStruct.Public: + -: 1 + +: 2 +{*teststructs.ParentStructG}.privateStruct.private: + -: 2 + +: 3`, + }, { + label: label + "ParentStructH", + x: ts.ParentStructH{}, + y: ts.ParentStructH{}, + }, { + label: label + "ParentStructH", + x: createStructH(0), + y: createStructH(0), + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructH", + x: ts.ParentStructH{}, + y: ts.ParentStructH{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructH{}), + }, + }, { + label: label + "ParentStructH", + x: createStructH(0), + y: createStructH(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructH{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructH", + x: createStructH(0), + y: createStructH(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructH{}, ts.PublicStruct{}), + }, + }, { + label: label + "ParentStructH", + x: createStructH(0), + y: createStructH(1), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructH{}, ts.PublicStruct{}), + }, + wantDiff: ` +{*teststructs.ParentStructH}.PublicStruct.Public: + -: 1 + +: 2 +{*teststructs.ParentStructH}.PublicStruct.private: + -: 2 + +: 3`, + }, { + label: label + "ParentStructI", + x: ts.ParentStructI{}, + y: ts.ParentStructI{}, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructI", + x: ts.ParentStructI{}, + y: ts.ParentStructI{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructI{}), + }, + }, { + label: label + "ParentStructI", + x: createStructI(0), + y: createStructI(0), + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructI{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructI", + x: createStructI(0), + y: createStructI(0), + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructI{}, ts.PublicStruct{}), + }, + }, { + label: label + "ParentStructI", + x: createStructI(0), + y: createStructI(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructI{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructI", + x: createStructI(0), + y: createStructI(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructI{}, ts.PublicStruct{}, privateStruct), + }, + }, { + label: label + "ParentStructI", + x: createStructI(0), + y: createStructI(1), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructI{}, ts.PublicStruct{}, privateStruct), + }, + wantDiff: ` +{*teststructs.ParentStructI}.privateStruct.Public: + -: 1 + +: 2 +{*teststructs.ParentStructI}.privateStruct.private: + -: 2 + +: 3 +{*teststructs.ParentStructI}.PublicStruct.Public: + -: 3 + +: 4 +{*teststructs.ParentStructI}.PublicStruct.private: + -: 4 + +: 5`, + }, { + label: label + "ParentStructJ", + x: ts.ParentStructJ{}, + y: ts.ParentStructJ{}, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructJ", + x: ts.ParentStructJ{}, + y: ts.ParentStructJ{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructJ{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructJ", + x: ts.ParentStructJ{}, + y: ts.ParentStructJ{}, + opts: []cmp.Option{ + cmpopts.IgnoreUnexported(ts.ParentStructJ{}, ts.PublicStruct{}), + }, + }, { + label: label + "ParentStructJ", + x: createStructJ(0), + y: createStructJ(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructJ{}, ts.PublicStruct{}), + }, + wantPanic: "cannot handle unexported field", + }, { + label: label + "ParentStructJ", + x: createStructJ(0), + y: createStructJ(0), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructJ{}, ts.PublicStruct{}, privateStruct), + }, + }, { + label: label + "ParentStructJ", + x: createStructJ(0), + y: createStructJ(1), + opts: []cmp.Option{ + cmp.AllowUnexported(ts.ParentStructJ{}, ts.PublicStruct{}, privateStruct), + }, + wantDiff: ` +{*teststructs.ParentStructJ}.privateStruct.Public: + -: 1 + +: 2 +{*teststructs.ParentStructJ}.privateStruct.private: + -: 2 + +: 3 +{*teststructs.ParentStructJ}.PublicStruct.Public: + -: 3 + +: 4 +{*teststructs.ParentStructJ}.PublicStruct.private: + -: 4 + +: 5 +{*teststructs.ParentStructJ}.Public.Public: + -: 7 + +: 8 +{*teststructs.ParentStructJ}.Public.private: + -: 8 + +: 9 +{*teststructs.ParentStructJ}.private.Public: + -: 5 + +: 6 +{*teststructs.ParentStructJ}.private.private: + -: 6 + +: 7`, + }} +} + +func methodTests() []test { + const label = "EqualMethod/" + + // A common mistake that the Equal method is on a pointer receiver, + // but only a non-pointer value is present in the struct. + // A transform can be used to forcibly reference the value. + derefTransform := cmp.FilterPath(func(p cmp.Path) bool { + if len(p) == 0 { + return false + } + t := p[len(p)-1].Type() + if _, ok := t.MethodByName("Equal"); ok || t.Kind() == reflect.Ptr { + return false + } + if m, ok := reflect.PtrTo(t).MethodByName("Equal"); ok { + tf := m.Func.Type() + return !tf.IsVariadic() && tf.NumIn() == 2 && tf.NumOut() == 1 && + tf.In(0).AssignableTo(tf.In(1)) && tf.Out(0) == reflect.TypeOf(true) + } + return false + }, cmp.Transformer("Ref", func(x interface{}) interface{} { + v := reflect.ValueOf(x) + vp := reflect.New(v.Type()) + vp.Elem().Set(v) + return vp.Interface() + })) + + // For each of these types, there is an Equal method defined, which always + // returns true, while the underlying data are fundamentally different. + // Since the method should be called, these are expected to be equal. + return []test{{ + label: label + "StructA", + x: ts.StructA{"NotEqual"}, + y: ts.StructA{"not_equal"}, + }, { + label: label + "StructA", + x: &ts.StructA{"NotEqual"}, + y: &ts.StructA{"not_equal"}, + }, { + label: label + "StructB", + x: ts.StructB{"NotEqual"}, + y: ts.StructB{"not_equal"}, + wantDiff: ` +{teststructs.StructB}.X: + -: "NotEqual" + +: "not_equal"`, + }, { + label: label + "StructB", + x: ts.StructB{"NotEqual"}, + y: ts.StructB{"not_equal"}, + opts: []cmp.Option{derefTransform}, + }, { + label: label + "StructB", + x: &ts.StructB{"NotEqual"}, + y: &ts.StructB{"not_equal"}, + }, { + label: label + "StructC", + x: ts.StructC{"NotEqual"}, + y: ts.StructC{"not_equal"}, + }, { + label: label + "StructC", + x: &ts.StructC{"NotEqual"}, + y: &ts.StructC{"not_equal"}, + }, { + label: label + "StructD", + x: ts.StructD{"NotEqual"}, + y: ts.StructD{"not_equal"}, + wantDiff: ` +{teststructs.StructD}.X: + -: "NotEqual" + +: "not_equal"`, + }, { + label: label + "StructD", + x: ts.StructD{"NotEqual"}, + y: ts.StructD{"not_equal"}, + opts: []cmp.Option{derefTransform}, + }, { + label: label + "StructD", + x: &ts.StructD{"NotEqual"}, + y: &ts.StructD{"not_equal"}, + }, { + label: label + "StructE", + x: ts.StructE{"NotEqual"}, + y: ts.StructE{"not_equal"}, + wantDiff: ` +{teststructs.StructE}.X: + -: "NotEqual" + +: "not_equal"`, + }, { + label: label + "StructE", + x: ts.StructE{"NotEqual"}, + y: ts.StructE{"not_equal"}, + opts: []cmp.Option{derefTransform}, + }, { + label: label + "StructE", + x: &ts.StructE{"NotEqual"}, + y: &ts.StructE{"not_equal"}, + }, { + label: label + "StructF", + x: ts.StructF{"NotEqual"}, + y: ts.StructF{"not_equal"}, + wantDiff: ` +{teststructs.StructF}.X: + -: "NotEqual" + +: "not_equal"`, + }, { + label: label + "StructF", + x: &ts.StructF{"NotEqual"}, + y: &ts.StructF{"not_equal"}, + }, { + label: label + "StructA1", + x: ts.StructA1{ts.StructA{"NotEqual"}, "equal"}, + y: ts.StructA1{ts.StructA{"not_equal"}, "equal"}, + }, { + label: label + "StructA1", + x: ts.StructA1{ts.StructA{"NotEqual"}, "NotEqual"}, + y: ts.StructA1{ts.StructA{"not_equal"}, "not_equal"}, + wantDiff: "{teststructs.StructA1}.X:\n\t-: \"NotEqual\"\n\t+: \"not_equal\"\n", + }, { + label: label + "StructA1", + x: &ts.StructA1{ts.StructA{"NotEqual"}, "equal"}, + y: &ts.StructA1{ts.StructA{"not_equal"}, "equal"}, + }, { + label: label + "StructA1", + x: &ts.StructA1{ts.StructA{"NotEqual"}, "NotEqual"}, + y: &ts.StructA1{ts.StructA{"not_equal"}, "not_equal"}, + wantDiff: "{*teststructs.StructA1}.X:\n\t-: \"NotEqual\"\n\t+: \"not_equal\"\n", + }, { + label: label + "StructB1", + x: ts.StructB1{ts.StructB{"NotEqual"}, "equal"}, + y: ts.StructB1{ts.StructB{"not_equal"}, "equal"}, + opts: []cmp.Option{derefTransform}, + }, { + label: label + "StructB1", + x: ts.StructB1{ts.StructB{"NotEqual"}, "NotEqual"}, + y: ts.StructB1{ts.StructB{"not_equal"}, "not_equal"}, + opts: []cmp.Option{derefTransform}, + wantDiff: "{teststructs.StructB1}.X:\n\t-: \"NotEqual\"\n\t+: \"not_equal\"\n", + }, { + label: label + "StructB1", + x: &ts.StructB1{ts.StructB{"NotEqual"}, "equal"}, + y: &ts.StructB1{ts.StructB{"not_equal"}, "equal"}, + opts: []cmp.Option{derefTransform}, + }, { + label: label + "StructB1", + x: &ts.StructB1{ts.StructB{"NotEqual"}, "NotEqual"}, + y: &ts.StructB1{ts.StructB{"not_equal"}, "not_equal"}, + opts: []cmp.Option{derefTransform}, + wantDiff: "{*teststructs.StructB1}.X:\n\t-: \"NotEqual\"\n\t+: \"not_equal\"\n", + }, { + label: label + "StructC1", + x: ts.StructC1{ts.StructC{"NotEqual"}, "NotEqual"}, + y: ts.StructC1{ts.StructC{"not_equal"}, "not_equal"}, + }, { + label: label + "StructC1", + x: &ts.StructC1{ts.StructC{"NotEqual"}, "NotEqual"}, + y: &ts.StructC1{ts.StructC{"not_equal"}, "not_equal"}, + }, { + label: label + "StructD1", + x: ts.StructD1{ts.StructD{"NotEqual"}, "NotEqual"}, + y: ts.StructD1{ts.StructD{"not_equal"}, "not_equal"}, + wantDiff: ` +{teststructs.StructD1}.StructD.X: + -: "NotEqual" + +: "not_equal" +{teststructs.StructD1}.X: + -: "NotEqual" + +: "not_equal"`, + }, { + label: label + "StructD1", + x: ts.StructD1{ts.StructD{"NotEqual"}, "NotEqual"}, + y: ts.StructD1{ts.StructD{"not_equal"}, "not_equal"}, + opts: []cmp.Option{derefTransform}, + }, { + label: label + "StructD1", + x: &ts.StructD1{ts.StructD{"NotEqual"}, "NotEqual"}, + y: &ts.StructD1{ts.StructD{"not_equal"}, "not_equal"}, + }, { + label: label + "StructE1", + x: ts.StructE1{ts.StructE{"NotEqual"}, "NotEqual"}, + y: ts.StructE1{ts.StructE{"not_equal"}, "not_equal"}, + wantDiff: ` +{teststructs.StructE1}.StructE.X: + -: "NotEqual" + +: "not_equal" +{teststructs.StructE1}.X: + -: "NotEqual" + +: "not_equal"`, + }, { + label: label + "StructE1", + x: ts.StructE1{ts.StructE{"NotEqual"}, "NotEqual"}, + y: ts.StructE1{ts.StructE{"not_equal"}, "not_equal"}, + opts: []cmp.Option{derefTransform}, + }, { + label: label + "StructE1", + x: &ts.StructE1{ts.StructE{"NotEqual"}, "NotEqual"}, + y: &ts.StructE1{ts.StructE{"not_equal"}, "not_equal"}, + }, { + label: label + "StructF1", + x: ts.StructF1{ts.StructF{"NotEqual"}, "NotEqual"}, + y: ts.StructF1{ts.StructF{"not_equal"}, "not_equal"}, + wantDiff: ` +{teststructs.StructF1}.StructF.X: + -: "NotEqual" + +: "not_equal" +{teststructs.StructF1}.X: + -: "NotEqual" + +: "not_equal"`, + }, { + label: label + "StructF1", + x: &ts.StructF1{ts.StructF{"NotEqual"}, "NotEqual"}, + y: &ts.StructF1{ts.StructF{"not_equal"}, "not_equal"}, + }, { + label: label + "StructA2", + x: ts.StructA2{&ts.StructA{"NotEqual"}, "equal"}, + y: ts.StructA2{&ts.StructA{"not_equal"}, "equal"}, + }, { + label: label + "StructA2", + x: ts.StructA2{&ts.StructA{"NotEqual"}, "NotEqual"}, + y: ts.StructA2{&ts.StructA{"not_equal"}, "not_equal"}, + wantDiff: "{teststructs.StructA2}.X:\n\t-: \"NotEqual\"\n\t+: \"not_equal\"\n", + }, { + label: label + "StructA2", + x: &ts.StructA2{&ts.StructA{"NotEqual"}, "equal"}, + y: &ts.StructA2{&ts.StructA{"not_equal"}, "equal"}, + }, { + label: label + "StructA2", + x: &ts.StructA2{&ts.StructA{"NotEqual"}, "NotEqual"}, + y: &ts.StructA2{&ts.StructA{"not_equal"}, "not_equal"}, + wantDiff: "{*teststructs.StructA2}.X:\n\t-: \"NotEqual\"\n\t+: \"not_equal\"\n", + }, { + label: label + "StructB2", + x: ts.StructB2{&ts.StructB{"NotEqual"}, "equal"}, + y: ts.StructB2{&ts.StructB{"not_equal"}, "equal"}, + }, { + label: label + "StructB2", + x: ts.StructB2{&ts.StructB{"NotEqual"}, "NotEqual"}, + y: ts.StructB2{&ts.StructB{"not_equal"}, "not_equal"}, + wantDiff: "{teststructs.StructB2}.X:\n\t-: \"NotEqual\"\n\t+: \"not_equal\"\n", + }, { + label: label + "StructB2", + x: &ts.StructB2{&ts.StructB{"NotEqual"}, "equal"}, + y: &ts.StructB2{&ts.StructB{"not_equal"}, "equal"}, + }, { + label: label + "StructB2", + x: &ts.StructB2{&ts.StructB{"NotEqual"}, "NotEqual"}, + y: &ts.StructB2{&ts.StructB{"not_equal"}, "not_equal"}, + wantDiff: "{*teststructs.StructB2}.X:\n\t-: \"NotEqual\"\n\t+: \"not_equal\"\n", + }, { + label: label + "StructC2", + x: ts.StructC2{&ts.StructC{"NotEqual"}, "NotEqual"}, + y: ts.StructC2{&ts.StructC{"not_equal"}, "not_equal"}, + }, { + label: label + "StructC2", + x: &ts.StructC2{&ts.StructC{"NotEqual"}, "NotEqual"}, + y: &ts.StructC2{&ts.StructC{"not_equal"}, "not_equal"}, + }, { + label: label + "StructD2", + x: ts.StructD2{&ts.StructD{"NotEqual"}, "NotEqual"}, + y: ts.StructD2{&ts.StructD{"not_equal"}, "not_equal"}, + }, { + label: label + "StructD2", + x: &ts.StructD2{&ts.StructD{"NotEqual"}, "NotEqual"}, + y: &ts.StructD2{&ts.StructD{"not_equal"}, "not_equal"}, + }, { + label: label + "StructE2", + x: ts.StructE2{&ts.StructE{"NotEqual"}, "NotEqual"}, + y: ts.StructE2{&ts.StructE{"not_equal"}, "not_equal"}, + }, { + label: label + "StructE2", + x: &ts.StructE2{&ts.StructE{"NotEqual"}, "NotEqual"}, + y: &ts.StructE2{&ts.StructE{"not_equal"}, "not_equal"}, + }, { + label: label + "StructF2", + x: ts.StructF2{&ts.StructF{"NotEqual"}, "NotEqual"}, + y: ts.StructF2{&ts.StructF{"not_equal"}, "not_equal"}, + }, { + label: label + "StructF2", + x: &ts.StructF2{&ts.StructF{"NotEqual"}, "NotEqual"}, + y: &ts.StructF2{&ts.StructF{"not_equal"}, "not_equal"}, + }, { + label: label + "StructNo", + x: ts.StructNo{"NotEqual"}, + y: ts.StructNo{"not_equal"}, + wantDiff: "{teststructs.StructNo}.X:\n\t-: \"NotEqual\"\n\t+: \"not_equal\"\n", + }, { + label: label + "AssignA", + x: ts.AssignA(func() int { return 0 }), + y: ts.AssignA(func() int { return 1 }), + }, { + label: label + "AssignB", + x: ts.AssignB(struct{ A int }{0}), + y: ts.AssignB(struct{ A int }{1}), + }, { + label: label + "AssignC", + x: ts.AssignC(make(chan bool)), + y: ts.AssignC(make(chan bool)), + }, { + label: label + "AssignD", + x: ts.AssignD(make(chan bool)), + y: ts.AssignD(make(chan bool)), + }} +} + +func project1Tests() []test { + const label = "Project1" + + ignoreUnexported := cmpopts.IgnoreUnexported( + ts.EagleImmutable{}, + ts.DreamerImmutable{}, + ts.SlapImmutable{}, + ts.GoatImmutable{}, + ts.DonkeyImmutable{}, + ts.LoveRadius{}, + ts.SummerLove{}, + ts.SummerLoveSummary{}, + ) + + createEagle := func() ts.Eagle { + return ts.Eagle{ + Name: "eagle", + Hounds: []string{"buford", "tannen"}, + Desc: "some description", + Dreamers: []ts.Dreamer{{}, { + Name: "dreamer2", + Animal: []interface{}{ + ts.Goat{ + Target: "corporation", + Immutable: &ts.GoatImmutable{ + ID: "southbay", + State: (*pb.Goat_States)(intPtr(5)), + Started: now, + }, + }, + ts.Donkey{}, + }, + Amoeba: 53, + }}, + Slaps: []ts.Slap{{ + Name: "slapID", + Args: &pb.MetaData{Stringer: pb.Stringer{"metadata"}}, + Immutable: &ts.SlapImmutable{ + ID: "immutableSlap", + MildSlap: true, + Started: now, + LoveRadius: &ts.LoveRadius{ + Summer: &ts.SummerLove{ + Summary: &ts.SummerLoveSummary{ + Devices: []string{"foo", "bar", "baz"}, + ChangeType: []pb.SummerType{1, 2, 3}, + }, + }, + }, + }, + }}, + Immutable: &ts.EagleImmutable{ + ID: "eagleID", + Birthday: now, + MissingCall: (*pb.Eagle_MissingCalls)(intPtr(55)), + }, + } + } + + return []test{{ + label: label, + x: ts.Eagle{Slaps: []ts.Slap{{ + Args: &pb.MetaData{Stringer: pb.Stringer{"metadata"}}, + }}}, + y: ts.Eagle{Slaps: []ts.Slap{{ + Args: &pb.MetaData{Stringer: pb.Stringer{"metadata"}}, + }}}, + wantPanic: "cannot handle unexported field", + }, { + label: label, + x: ts.Eagle{Slaps: []ts.Slap{{ + Args: &pb.MetaData{Stringer: pb.Stringer{"metadata"}}, + }}}, + y: ts.Eagle{Slaps: []ts.Slap{{ + Args: &pb.MetaData{Stringer: pb.Stringer{"metadata"}}, + }}}, + opts: []cmp.Option{cmp.Comparer(pb.Equal)}, + }, { + label: label, + x: ts.Eagle{Slaps: []ts.Slap{{}, {}, {}, {}, { + Args: &pb.MetaData{Stringer: pb.Stringer{"metadata"}}, + }}}, + y: ts.Eagle{Slaps: []ts.Slap{{}, {}, {}, {}, { + Args: &pb.MetaData{Stringer: pb.Stringer{"metadata2"}}, + }}}, + opts: []cmp.Option{cmp.Comparer(pb.Equal)}, + wantDiff: "{teststructs.Eagle}.Slaps[4].Args:\n\t-: \"metadata\"\n\t+: \"metadata2\"\n", + }, { + label: label, + x: createEagle(), + y: createEagle(), + opts: []cmp.Option{ignoreUnexported, cmp.Comparer(pb.Equal)}, + }, { + label: label, + x: func() ts.Eagle { + eg := createEagle() + eg.Dreamers[1].Animal[0].(ts.Goat).Immutable.ID = "southbay2" + eg.Dreamers[1].Animal[0].(ts.Goat).Immutable.State = (*pb.Goat_States)(intPtr(6)) + eg.Slaps[0].Immutable.MildSlap = false + return eg + }(), + y: func() ts.Eagle { + eg := createEagle() + devs := eg.Slaps[0].Immutable.LoveRadius.Summer.Summary.Devices + eg.Slaps[0].Immutable.LoveRadius.Summer.Summary.Devices = devs[:1] + return eg + }(), + opts: []cmp.Option{ignoreUnexported, cmp.Comparer(pb.Equal)}, + wantDiff: ` +{teststructs.Eagle}.Dreamers[1].Animal[0].(teststructs.Goat).Immutable.ID: + -: "southbay2" + +: "southbay" +*{teststructs.Eagle}.Dreamers[1].Animal[0].(teststructs.Goat).Immutable.State: + -: testprotos.Goat_States(6) + +: testprotos.Goat_States(5) +{teststructs.Eagle}.Slaps[0].Immutable.MildSlap: + -: false + +: true +{teststructs.Eagle}.Slaps[0].Immutable.LoveRadius.Summer.Summary.Devices[1->?]: + -: "bar" + +: +{teststructs.Eagle}.Slaps[0].Immutable.LoveRadius.Summer.Summary.Devices[2->?]: + -: "baz" + +: `, + }} +} + +type germSorter []*pb.Germ + +func (gs germSorter) Len() int { return len(gs) } +func (gs germSorter) Less(i, j int) bool { return gs[i].String() < gs[j].String() } +func (gs germSorter) Swap(i, j int) { gs[i], gs[j] = gs[j], gs[i] } + +func project2Tests() []test { + const label = "Project2" + + sortGerms := cmp.FilterValues(func(x, y []*pb.Germ) bool { + ok1 := sort.IsSorted(germSorter(x)) + ok2 := sort.IsSorted(germSorter(y)) + return !ok1 || !ok2 + }, cmp.Transformer("Sort", func(in []*pb.Germ) []*pb.Germ { + out := append([]*pb.Germ(nil), in...) // Make copy + sort.Sort(germSorter(out)) + return out + })) + + equalDish := cmp.Comparer(func(x, y *ts.Dish) bool { + if x == nil || y == nil { + return x == nil && y == nil + } + px, err1 := x.Proto() + py, err2 := y.Proto() + if err1 != nil || err2 != nil { + return err1 == err2 + } + return pb.Equal(px, py) + }) + + createBatch := func() ts.GermBatch { + return ts.GermBatch{ + DirtyGerms: map[int32][]*pb.Germ{ + 17: { + {Stringer: pb.Stringer{"germ1"}}, + }, + 18: { + {Stringer: pb.Stringer{"germ2"}}, + {Stringer: pb.Stringer{"germ3"}}, + {Stringer: pb.Stringer{"germ4"}}, + }, + }, + GermMap: map[int32]*pb.Germ{ + 13: {Stringer: pb.Stringer{"germ13"}}, + 21: {Stringer: pb.Stringer{"germ21"}}, + }, + DishMap: map[int32]*ts.Dish{ + 0: ts.CreateDish(nil, io.EOF), + 1: ts.CreateDish(nil, io.ErrUnexpectedEOF), + 2: ts.CreateDish(&pb.Dish{Stringer: pb.Stringer{"dish"}}, nil), + }, + HasPreviousResult: true, + DirtyID: 10, + GermStrain: 421, + InfectedAt: now, + } + } + + return []test{{ + label: label, + x: createBatch(), + y: createBatch(), + wantPanic: "cannot handle unexported field", + }, { + label: label, + x: createBatch(), + y: createBatch(), + opts: []cmp.Option{cmp.Comparer(pb.Equal), sortGerms, equalDish}, + }, { + label: label, + x: createBatch(), + y: func() ts.GermBatch { + gb := createBatch() + s := gb.DirtyGerms[18] + s[0], s[1], s[2] = s[1], s[2], s[0] + return gb + }(), + opts: []cmp.Option{cmp.Comparer(pb.Equal), equalDish}, + wantDiff: ` +{teststructs.GermBatch}.DirtyGerms[18][0->?]: + -: "germ2" + +: +{teststructs.GermBatch}.DirtyGerms[18][?->2]: + -: + +: "germ2"`, + }, { + label: label, + x: createBatch(), + y: func() ts.GermBatch { + gb := createBatch() + s := gb.DirtyGerms[18] + s[0], s[1], s[2] = s[1], s[2], s[0] + return gb + }(), + opts: []cmp.Option{cmp.Comparer(pb.Equal), sortGerms, equalDish}, + }, { + label: label, + x: func() ts.GermBatch { + gb := createBatch() + delete(gb.DirtyGerms, 17) + gb.DishMap[1] = nil + return gb + }(), + y: func() ts.GermBatch { + gb := createBatch() + gb.DirtyGerms[18] = gb.DirtyGerms[18][:2] + gb.GermStrain = 22 + return gb + }(), + opts: []cmp.Option{cmp.Comparer(pb.Equal), sortGerms, equalDish}, + wantDiff: ` +{teststructs.GermBatch}.DirtyGerms[17]: + -: + +: []*testprotos.Germ{"germ1"} +{teststructs.GermBatch}.DirtyGerms[18][2->?]: + -: "germ4" + +: +{teststructs.GermBatch}.DishMap[1]: + -: (*teststructs.Dish)(nil) + +: &teststructs.Dish{err: &errors.errorString{s: "unexpected EOF"}} +{teststructs.GermBatch}.GermStrain: + -: 421 + +: 22`, + }} +} + +func project3Tests() []test { + const label = "Project3" + + allowVisibility := cmp.AllowUnexported(ts.Dirt{}) + + ignoreLocker := cmpopts.IgnoreInterfaces(struct{ sync.Locker }{}) + + transformProtos := cmp.Transformer("", func(x pb.Dirt) *pb.Dirt { + return &x + }) + + equalTable := cmp.Comparer(func(x, y ts.Table) bool { + tx, ok1 := x.(*ts.MockTable) + ty, ok2 := y.(*ts.MockTable) + if !ok1 || !ok2 { + panic("table type must be MockTable") + } + return cmp.Equal(tx.State(), ty.State()) + }) + + createDirt := func() (d ts.Dirt) { + d.SetTable(ts.CreateMockTable([]string{"a", "b", "c"})) + d.SetTimestamp(12345) + d.Discord = 554 + d.Proto = pb.Dirt{Stringer: pb.Stringer{"proto"}} + d.SetWizard(map[string]*pb.Wizard{ + "harry": {Stringer: pb.Stringer{"potter"}}, + "albus": {Stringer: pb.Stringer{"dumbledore"}}, + }) + d.SetLastTime(54321) + return d + } + + return []test{{ + label: label, + x: createDirt(), + y: createDirt(), + wantPanic: "cannot handle unexported field", + }, { + label: label, + x: createDirt(), + y: createDirt(), + opts: []cmp.Option{allowVisibility, ignoreLocker, cmp.Comparer(pb.Equal), equalTable}, + wantPanic: "cannot handle unexported field", + }, { + label: label, + x: createDirt(), + y: createDirt(), + opts: []cmp.Option{allowVisibility, transformProtos, ignoreLocker, cmp.Comparer(pb.Equal), equalTable}, + }, { + label: label, + x: func() ts.Dirt { + d := createDirt() + d.SetTable(ts.CreateMockTable([]string{"a", "c"})) + d.Proto = pb.Dirt{Stringer: pb.Stringer{"blah"}} + return d + }(), + y: func() ts.Dirt { + d := createDirt() + d.Discord = 500 + d.SetWizard(map[string]*pb.Wizard{ + "harry": {Stringer: pb.Stringer{"otter"}}, + }) + return d + }(), + opts: []cmp.Option{allowVisibility, transformProtos, ignoreLocker, cmp.Comparer(pb.Equal), equalTable}, + wantDiff: ` +{teststructs.Dirt}.table: + -: &teststructs.MockTable{state: []string{"a", "c"}} + +: &teststructs.MockTable{state: []string{"a", "b", "c"}} +{teststructs.Dirt}.Discord: + -: teststructs.DiscordState(554) + +: teststructs.DiscordState(500) +λ({teststructs.Dirt}.Proto): + -: "blah" + +: "proto" +{teststructs.Dirt}.wizard["albus"]: + -: "dumbledore" + +: +{teststructs.Dirt}.wizard["harry"]: + -: "potter" + +: "otter"`, + }} +} + +func project4Tests() []test { + const label = "Project4" + + allowVisibility := cmp.AllowUnexported( + ts.Cartel{}, + ts.Headquarter{}, + ts.Poison{}, + ) + + transformProtos := cmp.Transformer("", func(x pb.Restrictions) *pb.Restrictions { + return &x + }) + + createCartel := func() ts.Cartel { + var p ts.Poison + p.SetPoisonType(5) + p.SetExpiration(now) + p.SetManufactuer("acme") + + var hq ts.Headquarter + hq.SetID(5) + hq.SetLocation("moon") + hq.SetSubDivisions([]string{"alpha", "bravo", "charlie"}) + hq.SetMetaData(&pb.MetaData{Stringer: pb.Stringer{"metadata"}}) + hq.SetPublicMessage([]byte{1, 2, 3, 4, 5}) + hq.SetHorseBack("abcdef") + hq.SetStatus(44) + + var c ts.Cartel + c.Headquarter = hq + c.SetSource("mars") + c.SetCreationTime(now) + c.SetBoss("al capone") + c.SetPoisons([]*ts.Poison{&p}) + + return c + } + + return []test{{ + label: label, + x: createCartel(), + y: createCartel(), + wantPanic: "cannot handle unexported field", + }, { + label: label, + x: createCartel(), + y: createCartel(), + opts: []cmp.Option{allowVisibility, cmp.Comparer(pb.Equal)}, + wantPanic: "cannot handle unexported field", + }, { + label: label, + x: createCartel(), + y: createCartel(), + opts: []cmp.Option{allowVisibility, transformProtos, cmp.Comparer(pb.Equal)}, + }, { + label: label, + x: func() ts.Cartel { + d := createCartel() + var p1, p2 ts.Poison + p1.SetPoisonType(1) + p1.SetExpiration(now) + p1.SetManufactuer("acme") + p2.SetPoisonType(2) + p2.SetManufactuer("acme2") + d.SetPoisons([]*ts.Poison{&p1, &p2}) + return d + }(), + y: func() ts.Cartel { + d := createCartel() + d.SetSubDivisions([]string{"bravo", "charlie"}) + d.SetPublicMessage([]byte{1, 2, 4, 3, 5}) + return d + }(), + opts: []cmp.Option{allowVisibility, transformProtos, cmp.Comparer(pb.Equal)}, + wantDiff: ` +{teststructs.Cartel}.Headquarter.subDivisions[0->?]: + -: "alpha" + +: +{teststructs.Cartel}.Headquarter.publicMessage[2]: + -: 0x03 + +: 0x04 +{teststructs.Cartel}.Headquarter.publicMessage[3]: + -: 0x04 + +: 0x03 +{teststructs.Cartel}.poisons[0].poisonType: + -: testprotos.PoisonType(1) + +: testprotos.PoisonType(5) +{teststructs.Cartel}.poisons[1->?]: + -: &teststructs.Poison{poisonType: testprotos.PoisonType(2), manufactuer: "acme2"} + +: `, + }} +} + +// TODO: Delete this hack when we drop Go1.6 support. +func tRunParallel(t *testing.T, name string, f func(t *testing.T)) { + type runner interface { + Run(string, func(t *testing.T)) bool + } + var ti interface{} = t + if r, ok := ti.(runner); ok { + r.Run(name, func(t *testing.T) { + t.Parallel() + f(t) + }) + } else { + // Cannot run sub-tests in parallel in Go1.6. + t.Logf("Test: %s", name) + f(t) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/example_test.go b/vendor/github.com/google/go-cmp/cmp/example_test.go new file mode 100644 index 000000000..8729db398 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/example_test.go @@ -0,0 +1,374 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp_test + +import ( + "fmt" + "math" + "reflect" + "sort" + "strings" + + "github.com/google/go-cmp/cmp" +) + +// TODO: Re-write these examples in terms of how you actually use the +// fundamental options and filters and not in terms of what cool things you can +// do with them since that overlaps with cmp/cmpopts. + +// Use Diff for printing out human-readable errors for test cases comparing +// nested or structured data. +func ExampleDiff_testing() { + // Code under test: + type ShipManifest struct { + Name string + Crew map[string]string + Androids int + Stolen bool + } + + // AddCrew tries to add the given crewmember to the manifest. + AddCrew := func(m *ShipManifest, name, title string) { + if m.Crew == nil { + m.Crew = make(map[string]string) + } + m.Crew[title] = name + } + + // Test function: + tests := []struct { + desc string + before *ShipManifest + name, title string + after *ShipManifest + }{ + { + desc: "add to empty", + before: &ShipManifest{}, + name: "Zaphod Beeblebrox", + title: "Galactic President", + after: &ShipManifest{ + Crew: map[string]string{ + "Zaphod Beeblebrox": "Galactic President", + }, + }, + }, + { + desc: "add another", + before: &ShipManifest{ + Crew: map[string]string{ + "Zaphod Beeblebrox": "Galactic President", + }, + }, + name: "Trillian", + title: "Human", + after: &ShipManifest{ + Crew: map[string]string{ + "Zaphod Beeblebrox": "Galactic President", + "Trillian": "Human", + }, + }, + }, + { + desc: "overwrite", + before: &ShipManifest{ + Crew: map[string]string{ + "Zaphod Beeblebrox": "Galactic President", + }, + }, + name: "Zaphod Beeblebrox", + title: "Just this guy, you know?", + after: &ShipManifest{ + Crew: map[string]string{ + "Zaphod Beeblebrox": "Just this guy, you know?", + }, + }, + }, + } + + var t fakeT + for _, test := range tests { + AddCrew(test.before, test.name, test.title) + if diff := cmp.Diff(test.before, test.after); diff != "" { + t.Errorf("%s: after AddCrew, manifest differs: (-got +want)\n%s", test.desc, diff) + } + } + + // Output: + // add to empty: after AddCrew, manifest differs: (-got +want) + // {*cmp_test.ShipManifest}.Crew["Galactic President"]: + // -: "Zaphod Beeblebrox" + // +: + // {*cmp_test.ShipManifest}.Crew["Zaphod Beeblebrox"]: + // -: + // +: "Galactic President" + // + // add another: after AddCrew, manifest differs: (-got +want) + // {*cmp_test.ShipManifest}.Crew["Human"]: + // -: "Trillian" + // +: + // {*cmp_test.ShipManifest}.Crew["Trillian"]: + // -: + // +: "Human" + // + // overwrite: after AddCrew, manifest differs: (-got +want) + // {*cmp_test.ShipManifest}.Crew["Just this guy, you know?"]: + // -: "Zaphod Beeblebrox" + // +: + // {*cmp_test.ShipManifest}.Crew["Zaphod Beeblebrox"]: + // -: "Galactic President" + // +: "Just this guy, you know?" +} + +// Approximate equality for floats can be handled by defining a custom +// comparer on floats that determines two values to be equal if they are within +// some range of each other. +// +// This example is for demonstrative purposes; use cmpopts.EquateApprox instead. +func ExampleOption_approximateFloats() { + // This Comparer only operates on float64. + // To handle float32s, either define a similar function for that type + // or use a Transformer to convert float32s into float64s. + opt := cmp.Comparer(func(x, y float64) bool { + delta := math.Abs(x - y) + mean := math.Abs(x+y) / 2.0 + return delta/mean < 0.00001 + }) + + x := []float64{1.0, 1.1, 1.2, math.Pi} + y := []float64{1.0, 1.1, 1.2, 3.14159265359} // Accurate enough to Pi + z := []float64{1.0, 1.1, 1.2, 3.1415} // Diverges too far from Pi + + fmt.Println(cmp.Equal(x, y, opt)) + fmt.Println(cmp.Equal(y, z, opt)) + fmt.Println(cmp.Equal(z, x, opt)) + + // Output: + // true + // false + // false +} + +// Normal floating-point arithmetic defines == to be false when comparing +// NaN with itself. In certain cases, this is not the desired property. +// +// This example is for demonstrative purposes; use cmpopts.EquateNaNs instead. +func ExampleOption_equalNaNs() { + // This Comparer only operates on float64. + // To handle float32s, either define a similar function for that type + // or use a Transformer to convert float32s into float64s. + opt := cmp.Comparer(func(x, y float64) bool { + return (math.IsNaN(x) && math.IsNaN(y)) || x == y + }) + + x := []float64{1.0, math.NaN(), math.E, -0.0, +0.0} + y := []float64{1.0, math.NaN(), math.E, -0.0, +0.0} + z := []float64{1.0, math.NaN(), math.Pi, -0.0, +0.0} // Pi constant instead of E + + fmt.Println(cmp.Equal(x, y, opt)) + fmt.Println(cmp.Equal(y, z, opt)) + fmt.Println(cmp.Equal(z, x, opt)) + + // Output: + // true + // false + // false +} + +// To have floating-point comparisons combine both properties of NaN being +// equal to itself and also approximate equality of values, filters are needed +// to restrict the scope of the comparison so that they are composable. +// +// This example is for demonstrative purposes; +// use cmpopts.EquateNaNs and cmpopts.EquateApprox instead. +func ExampleOption_equalNaNsAndApproximateFloats() { + alwaysEqual := cmp.Comparer(func(_, _ interface{}) bool { return true }) + + opts := cmp.Options{ + // This option declares that a float64 comparison is equal only if + // both inputs are NaN. + cmp.FilterValues(func(x, y float64) bool { + return math.IsNaN(x) && math.IsNaN(y) + }, alwaysEqual), + + // This option declares approximate equality on float64s only if + // both inputs are not NaN. + cmp.FilterValues(func(x, y float64) bool { + return !math.IsNaN(x) && !math.IsNaN(y) + }, cmp.Comparer(func(x, y float64) bool { + delta := math.Abs(x - y) + mean := math.Abs(x+y) / 2.0 + return delta/mean < 0.00001 + })), + } + + x := []float64{math.NaN(), 1.0, 1.1, 1.2, math.Pi} + y := []float64{math.NaN(), 1.0, 1.1, 1.2, 3.14159265359} // Accurate enough to Pi + z := []float64{math.NaN(), 1.0, 1.1, 1.2, 3.1415} // Diverges too far from Pi + + fmt.Println(cmp.Equal(x, y, opts)) + fmt.Println(cmp.Equal(y, z, opts)) + fmt.Println(cmp.Equal(z, x, opts)) + + // Output: + // true + // false + // false +} + +// Sometimes, an empty map or slice is considered equal to an allocated one +// of zero length. +// +// This example is for demonstrative purposes; use cmpopts.EquateEmpty instead. +func ExampleOption_equalEmpty() { + alwaysEqual := cmp.Comparer(func(_, _ interface{}) bool { return true }) + + // This option handles slices and maps of any type. + opt := cmp.FilterValues(func(x, y interface{}) bool { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + return (vx.IsValid() && vy.IsValid() && vx.Type() == vy.Type()) && + (vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) && + (vx.Len() == 0 && vy.Len() == 0) + }, alwaysEqual) + + type S struct { + A []int + B map[string]bool + } + x := S{nil, make(map[string]bool, 100)} + y := S{make([]int, 0, 200), nil} + z := S{[]int{0}, nil} // []int has a single element (i.e., not empty) + + fmt.Println(cmp.Equal(x, y, opt)) + fmt.Println(cmp.Equal(y, z, opt)) + fmt.Println(cmp.Equal(z, x, opt)) + + // Output: + // true + // false + // false +} + +// Two slices may be considered equal if they have the same elements, +// regardless of the order that they appear in. Transformations can be used +// to sort the slice. +// +// This example is for demonstrative purposes; use cmpopts.SortSlices instead. +func ExampleOption_sortedSlice() { + // This Transformer sorts a []int. + // Since the transformer transforms []int into []int, there is problem where + // this is recursively applied forever. To prevent this, use a FilterValues + // to first check for the condition upon which the transformer ought to apply. + trans := cmp.FilterValues(func(x, y []int) bool { + return !sort.IntsAreSorted(x) || !sort.IntsAreSorted(y) + }, cmp.Transformer("Sort", func(in []int) []int { + out := append([]int(nil), in...) // Copy input to avoid mutating it + sort.Ints(out) + return out + })) + + x := struct{ Ints []int }{[]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}} + y := struct{ Ints []int }{[]int{2, 8, 0, 9, 6, 1, 4, 7, 3, 5}} + z := struct{ Ints []int }{[]int{0, 0, 1, 2, 3, 4, 5, 6, 7, 8}} + + fmt.Println(cmp.Equal(x, y, trans)) + fmt.Println(cmp.Equal(y, z, trans)) + fmt.Println(cmp.Equal(z, x, trans)) + + // Output: + // true + // false + // false +} + +type otherString string + +func (x otherString) Equal(y otherString) bool { + return strings.ToLower(string(x)) == strings.ToLower(string(y)) +} + +// If the Equal method defined on a type is not suitable, the type can be be +// dynamically transformed to be stripped of the Equal method (or any method +// for that matter). +func ExampleOption_avoidEqualMethod() { + // Suppose otherString.Equal performs a case-insensitive equality, + // which is too loose for our needs. + // We can avoid the methods of otherString by declaring a new type. + type myString otherString + + // This transformer converts otherString to myString, allowing Equal to use + // other Options to determine equality. + trans := cmp.Transformer("", func(in otherString) myString { + return myString(in) + }) + + x := []otherString{"foo", "bar", "baz"} + y := []otherString{"fOO", "bAr", "Baz"} // Same as before, but with different case + + fmt.Println(cmp.Equal(x, y)) // Equal because of case-insensitivity + fmt.Println(cmp.Equal(x, y, trans)) // Not equal because of more exact equality + + // Output: + // true + // false +} + +func roundF64(z float64) float64 { + if z < 0 { + return math.Ceil(z - 0.5) + } + return math.Floor(z + 0.5) +} + +// The complex numbers complex64 and complex128 can really just be decomposed +// into a pair of float32 or float64 values. It would be convenient to be able +// define only a single comparator on float64 and have float32, complex64, and +// complex128 all be able to use that comparator. Transformations can be used +// to handle this. +func ExampleOption_transformComplex() { + opts := []cmp.Option{ + // This transformer decomposes complex128 into a pair of float64s. + cmp.Transformer("T1", func(in complex128) (out struct{ Real, Imag float64 }) { + out.Real, out.Imag = real(in), imag(in) + return out + }), + // This transformer converts complex64 to complex128 to allow the + // above transform to take effect. + cmp.Transformer("T2", func(in complex64) complex128 { + return complex128(in) + }), + // This transformer converts float32 to float64. + cmp.Transformer("T3", func(in float32) float64 { + return float64(in) + }), + // This equality function compares float64s as rounded integers. + cmp.Comparer(func(x, y float64) bool { + return roundF64(x) == roundF64(y) + }), + } + + x := []interface{}{ + complex128(3.0), complex64(5.1 + 2.9i), float32(-1.2), float64(12.3), + } + y := []interface{}{ + complex128(3.1), complex64(4.9 + 3.1i), float32(-1.3), float64(11.7), + } + z := []interface{}{ + complex128(3.8), complex64(4.9 + 3.1i), float32(-1.3), float64(11.7), + } + + fmt.Println(cmp.Equal(x, y, opts...)) + fmt.Println(cmp.Equal(y, z, opts...)) + fmt.Println(cmp.Equal(z, x, opts...)) + + // Output: + // true + // false + // false +} + +type fakeT struct{} + +func (t fakeT) Errorf(format string, args ...interface{}) { fmt.Printf(format+"\n", args...) } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 000000000..42afa4960 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,17 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 000000000..ba46c62e1 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,122 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// seperated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 000000000..baa41fd23 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,373 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NSame is the number of sub-elements that are equal. +// NDiff is the number of sub-elements that are not equal. +type Result struct{ NSame, NDiff int } + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NSame to NDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NSame+1 to offset NSame so that binary comparisons are similar. + return r.NSame+1 >= r.NDiff +} + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function may return a edit-script, which is a sequence of operations +// needed to convert one list into the other. If non-nil, the following +// invariants for the edit-script are maintained: +// • eq == (es.Dist()==0) +// • nx == es.LenX() +// • ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (eq bool, es EditScript) { + es = searchGraph(nx, ny, f) + st := es.stats() + eq = len(es) == st.NI + if !eq && st.NI < (nx+ny)/4 { + return eq, nil // Edit-script more distracting than helpful + } + return eq, es +} + +func searchGraph(nx, ny int, f EqualFunc) EditScript { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // • fwdFrontier.X < revFrontier.X + // • fwdFrontier.Y < revFrontier.Y + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // • Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). The goal of + // the search is connect with the search from the opposite corner. + // • As we search, we build a path in a greedy manner, where the first + // match seen is added to the path (this is sub-optimal, but provides a + // decent result in practice). When matches are found, we try the next pair + // of symbols in the lists and follow all matches as far as possible. + // • When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, we advance the + // frontier towards the opposite corner. + // • This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + // + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Running the tests with the "debug" build tag prints a visualization of + // the algorithm running in real-time. This is educational for understanding + // how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + for { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + } + + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff_test.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff_test.go new file mode 100644 index 000000000..5996ea2a9 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff_test.go @@ -0,0 +1,467 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package diff + +import ( + "fmt" + "math/rand" + "strings" + "testing" + "unicode" +) + +func TestDifference(t *testing.T) { + tests := []struct { + // Before passing x and y to Difference, we strip all spaces so that + // they can be used by the test author to indicate a missing symbol + // in one of the lists. + x, y string + want string + }{{ + x: "", + y: "", + want: "", + }, { + x: "#", + y: "#", + want: ".", + }, { + x: "##", + y: "# ", + want: ".X", + }, { + x: "a#", + y: "A ", + want: "MX", + }, { + x: "#a", + y: " A", + want: "XM", + }, { + x: "# ", + y: "##", + want: ".Y", + }, { + x: " #", + y: "@#", + want: "Y.", + }, { + x: "@#", + y: " #", + want: "X.", + }, { + x: "##########0123456789", + y: " 0123456789", + want: "XXXXXXXXXX..........", + }, { + x: " 0123456789", + y: "##########0123456789", + want: "YYYYYYYYYY..........", + }, { + x: "#####0123456789#####", + y: " 0123456789 ", + want: "XXXXX..........XXXXX", + }, { + x: " 0123456789 ", + y: "#####0123456789#####", + want: "YYYYY..........YYYYY", + }, { + x: "01234##########56789", + y: "01234 56789", + want: ".....XXXXXXXXXX.....", + }, { + x: "01234 56789", + y: "01234##########56789", + want: ".....YYYYYYYYYY.....", + }, { + x: "0123456789##########", + y: "0123456789 ", + want: "..........XXXXXXXXXX", + }, { + x: "0123456789 ", + y: "0123456789##########", + want: "..........YYYYYYYYYY", + }, { + x: "abcdefghij0123456789", + y: "ABCDEFGHIJ0123456789", + want: "MMMMMMMMMM..........", + }, { + x: "ABCDEFGHIJ0123456789", + y: "abcdefghij0123456789", + want: "MMMMMMMMMM..........", + }, { + x: "01234abcdefghij56789", + y: "01234ABCDEFGHIJ56789", + want: ".....MMMMMMMMMM.....", + }, { + x: "01234ABCDEFGHIJ56789", + y: "01234abcdefghij56789", + want: ".....MMMMMMMMMM.....", + }, { + x: "0123456789abcdefghij", + y: "0123456789ABCDEFGHIJ", + want: "..........MMMMMMMMMM", + }, { + x: "0123456789ABCDEFGHIJ", + y: "0123456789abcdefghij", + want: "..........MMMMMMMMMM", + }, { + x: "ABCDEFGHIJ0123456789 ", + y: " 0123456789abcdefghij", + want: "XXXXXXXXXX..........YYYYYYYYYY", + }, { + x: " 0123456789abcdefghij", + y: "ABCDEFGHIJ0123456789 ", + want: "YYYYYYYYYY..........XXXXXXXXXX", + }, { + x: "ABCDE0123456789 FGHIJ", + y: " 0123456789abcdefghij", + want: "XXXXX..........YYYYYMMMMM", + }, { + x: " 0123456789abcdefghij", + y: "ABCDE0123456789 FGHIJ", + want: "YYYYY..........XXXXXMMMMM", + }, { + x: "ABCDE01234F G H I J 56789 ", + y: " 01234 a b c d e56789fghij", + want: "XXXXX.....XYXYXYXYXY.....YYYYY", + }, { + x: " 01234a b c d e 56789fghij", + y: "ABCDE01234 F G H I J56789 ", + want: "YYYYY.....XYXYXYXYXY.....XXXXX", + }, { + x: "FGHIJ01234ABCDE56789 ", + y: " 01234abcde56789fghij", + want: "XXXXX.....MMMMM.....YYYYY", + }, { + x: " 01234abcde56789fghij", + y: "FGHIJ01234ABCDE56789 ", + want: "YYYYY.....MMMMM.....XXXXX", + }, { + x: "ABCAB BA ", + y: " C BABAC", + want: "XX.X.Y..Y", + }, { + x: "# #### ###", + y: "#y####yy###", + want: ".Y....YY...", + }, { + x: "# #### # ##x#x", + y: "#y####y y## # ", + want: ".Y....YXY..X.X", + }, { + x: "###z#z###### x #", + y: "#y##Z#Z###### yy#", + want: ".Y..M.M......XYY.", + }, { + x: "0 12z3x 456789 x x 0", + y: "0y12Z3 y456789y y y0", + want: ".Y..M.XY......YXYXY.", + }, { + x: "0 2 4 6 8 ..................abXXcdEXF.ghXi", + y: " 1 3 5 7 9..................AB CDE F.GH I", + want: "XYXYXYXYXY..................MMXXMM.X..MMXM", + }, { + x: "I HG.F EDC BA..................9 7 5 3 1 ", + y: "iXhg.FXEdcXXba.................. 8 6 4 2 0", + want: "MYMM..Y.MMYYMM..................XYXYXYXYXY", + }, { + x: "x1234", + y: " 1234", + want: "X....", + }, { + x: "x123x4", + y: " 123 4", + want: "X...X.", + }, { + x: "x1234x56", + y: " 1234 ", + want: "X....XXX", + }, { + x: "x1234xxx56", + y: " 1234 56", + want: "X....XXX..", + }, { + x: ".1234...ab", + y: " 1234 AB", + want: "X....XXXMM", + }, { + x: "x1234xxab.", + y: " 1234 AB ", + want: "X....XXMMX", + }, { + x: " 0123456789", + y: "9012345678 ", + want: "Y.........X", + }, { + x: " 0123456789", + y: "8901234567 ", + want: "YY........XX", + }, { + x: " 0123456789", + y: "7890123456 ", + want: "YYY.......XXX", + }, { + x: " 0123456789", + y: "6789012345 ", + want: "YYYY......XXXX", + }, { + x: "0123456789 ", + y: " 5678901234", + want: "XXXXX.....YYYYY", + }, { + x: "0123456789 ", + y: " 4567890123", + want: "XXXX......YYYY", + }, { + x: "0123456789 ", + y: " 3456789012", + want: "XXX.......YYY", + }, { + x: "0123456789 ", + y: " 2345678901", + want: "XX........YY", + }, { + x: "0123456789 ", + y: " 1234567890", + want: "X.........Y", + }, { + x: "0123456789", + y: "9876543210", + }, { + x: "0123456789", + y: "6725819034", + }, { + x: "FBQMOIGTLN72X90E4SP651HKRJUDA83CVZW", + y: "5WHXO10R9IVKZLCTAJ8P3NSEQM472G6UBDF", + }} + + for _, tt := range tests { + tRun(t, "", func(t *testing.T) { + x := strings.Replace(tt.x, " ", "", -1) + y := strings.Replace(tt.y, " ", "", -1) + es := testStrings(t, x, y) + if got := es.String(); got != tt.want { + t.Errorf("Difference(%s, %s):\ngot %s\nwant %s", x, y, got, tt.want) + } + }) + } +} + +func TestDifferenceFuzz(t *testing.T) { + tests := []struct{ px, py, pm float32 }{ + {px: 0.0, py: 0.0, pm: 0.1}, + {px: 0.0, py: 0.1, pm: 0.0}, + {px: 0.1, py: 0.0, pm: 0.0}, + {px: 0.0, py: 0.1, pm: 0.1}, + {px: 0.1, py: 0.0, pm: 0.1}, + {px: 0.2, py: 0.2, pm: 0.2}, + {px: 0.3, py: 0.1, pm: 0.2}, + {px: 0.1, py: 0.3, pm: 0.2}, + {px: 0.2, py: 0.2, pm: 0.2}, + {px: 0.3, py: 0.3, pm: 0.3}, + {px: 0.1, py: 0.1, pm: 0.5}, + {px: 0.4, py: 0.1, pm: 0.5}, + {px: 0.3, py: 0.2, pm: 0.5}, + {px: 0.2, py: 0.3, pm: 0.5}, + {px: 0.1, py: 0.4, pm: 0.5}, + } + + for i, tt := range tests { + tRun(t, fmt.Sprintf("P%d", i), func(t *testing.T) { + // Sweep from 1B to 1KiB. + for n := 1; n <= 1024; n <<= 1 { + tRun(t, fmt.Sprintf("N%d", n), func(t *testing.T) { + for j := 0; j < 10; j++ { + x, y := generateStrings(n, tt.px, tt.py, tt.pm, int64(j)) + testStrings(t, x, y) + } + }) + } + }) + } +} + +func benchmarkDifference(b *testing.B, n int) { + // TODO: Use testing.B.Run when we drop Go1.6 support. + x, y := generateStrings(n, 0.05, 0.05, 0.10, 0) + b.ReportAllocs() + b.SetBytes(int64(len(x) + len(y))) + for i := 0; i < b.N; i++ { + Difference(len(x), len(y), func(ix, iy int) Result { + return compareByte(x[ix], y[iy]) + }) + } +} +func BenchmarkDifference1K(b *testing.B) { benchmarkDifference(b, 1<<10) } +func BenchmarkDifference4K(b *testing.B) { benchmarkDifference(b, 1<<12) } +func BenchmarkDifference16K(b *testing.B) { benchmarkDifference(b, 1<<14) } +func BenchmarkDifference64K(b *testing.B) { benchmarkDifference(b, 1<<16) } +func BenchmarkDifference256K(b *testing.B) { benchmarkDifference(b, 1<<18) } +func BenchmarkDifference1M(b *testing.B) { benchmarkDifference(b, 1<<20) } + +func generateStrings(n int, px, py, pm float32, seed int64) (string, string) { + if px+py+pm > 1.0 { + panic("invalid probabilities") + } + py += px + pm += py + + b := make([]byte, n) + r := rand.New(rand.NewSource(seed)) + r.Read(b) + + var x, y []byte + for len(b) > 0 { + switch p := r.Float32(); { + case p < px: // UniqueX + x = append(x, b[0]) + case p < py: // UniqueY + y = append(y, b[0]) + case p < pm: // Modified + x = append(x, 'A'+(b[0]%26)) + y = append(y, 'a'+(b[0]%26)) + default: // Identity + x = append(x, b[0]) + y = append(y, b[0]) + } + b = b[1:] + } + return string(x), string(y) +} + +func testStrings(t *testing.T, x, y string) EditScript { + wantEq := x == y + eq, es := Difference(len(x), len(y), func(ix, iy int) Result { + return compareByte(x[ix], y[iy]) + }) + if eq != wantEq { + t.Errorf("equality mismatch: got %v, want %v", eq, wantEq) + } + if es != nil { + if es.LenX() != len(x) { + t.Errorf("es.LenX = %d, want %d", es.LenX(), len(x)) + } + if es.LenY() != len(y) { + t.Errorf("es.LenY = %d, want %d", es.LenY(), len(y)) + } + if got := (es.Dist() == 0); got != wantEq { + t.Errorf("violation of equality invariant: got %v, want %v", got, wantEq) + } + if !validateScript(x, y, es) { + t.Errorf("invalid edit script: %v", es) + } + } + return es +} + +func validateScript(x, y string, es EditScript) bool { + var bx, by []byte + for _, e := range es { + switch e { + case Identity: + if !compareByte(x[len(bx)], y[len(by)]).Equal() { + return false + } + bx = append(bx, x[len(bx)]) + by = append(by, y[len(by)]) + case UniqueX: + bx = append(bx, x[len(bx)]) + case UniqueY: + by = append(by, y[len(by)]) + case Modified: + if !compareByte(x[len(bx)], y[len(by)]).Similar() { + return false + } + bx = append(bx, x[len(bx)]) + by = append(by, y[len(by)]) + } + } + return string(bx) == x && string(by) == y +} + +// compareByte returns a Result where the result is Equal if x == y, +// similar if x and y differ only in casing, and different otherwise. +func compareByte(x, y byte) (r Result) { + switch { + case x == y: + return equalResult // Identity + case unicode.ToUpper(rune(x)) == unicode.ToUpper(rune(y)): + return similarResult // Modified + default: + return differentResult // UniqueX or UniqueY + } +} + +var ( + equalResult = Result{NDiff: 0} + similarResult = Result{NDiff: 1} + differentResult = Result{NDiff: 2} +) + +func TestResult(t *testing.T) { + tests := []struct { + result Result + wantEqual bool + wantSimilar bool + }{ + // equalResult is equal since NDiff == 0, by definition of Equal method. + {equalResult, true, true}, + // similarResult is similar since it is a binary result where only one + // element was compared (i.e., Either NSame==1 or NDiff==1). + {similarResult, false, true}, + // differentResult is different since there are enough differences that + // it isn't even considered similar. + {differentResult, false, false}, + + // Zero value is always equal. + {Result{NSame: 0, NDiff: 0}, true, true}, + + // Binary comparisons (where NSame+NDiff == 1) are always similar. + {Result{NSame: 1, NDiff: 0}, true, true}, + {Result{NSame: 0, NDiff: 1}, false, true}, + + // More complex ratios. The exact ratio for similarity may change, + // and may require updates to these test cases. + {Result{NSame: 1, NDiff: 1}, false, true}, + {Result{NSame: 1, NDiff: 2}, false, true}, + {Result{NSame: 1, NDiff: 3}, false, false}, + {Result{NSame: 2, NDiff: 1}, false, true}, + {Result{NSame: 2, NDiff: 2}, false, true}, + {Result{NSame: 2, NDiff: 3}, false, true}, + {Result{NSame: 3, NDiff: 1}, false, true}, + {Result{NSame: 3, NDiff: 2}, false, true}, + {Result{NSame: 3, NDiff: 3}, false, true}, + {Result{NSame: 1000, NDiff: 0}, true, true}, + {Result{NSame: 1000, NDiff: 1}, false, true}, + {Result{NSame: 1000, NDiff: 2}, false, true}, + {Result{NSame: 0, NDiff: 1000}, false, false}, + {Result{NSame: 1, NDiff: 1000}, false, false}, + {Result{NSame: 2, NDiff: 1000}, false, false}, + } + + for _, tt := range tests { + if got := tt.result.Equal(); got != tt.wantEqual { + t.Errorf("%#v.Equal() = %v, want %v", tt.result, got, tt.wantEqual) + } + if got := tt.result.Similar(); got != tt.wantSimilar { + t.Errorf("%#v.Similar() = %v, want %v", tt.result, got, tt.wantSimilar) + } + } +} + +// TODO: Delete this hack when we drop Go1.6 support. +func tRun(t *testing.T, name string, f func(t *testing.T)) { + type runner interface { + Run(string, func(t *testing.T)) bool + } + var ti interface{} = t + if r, ok := ti.(runner); ok { + r.Run(name, f) + } else { + t.Logf("Test: %s", name) + f(t) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 000000000..4c35ff11e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,49 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package function identifies function types. +package function + +import "reflect" + +type funcType int + +const ( + _ funcType = iota + + ttbFunc // func(T, T) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/testprotos/protos.go b/vendor/github.com/google/go-cmp/cmp/internal/testprotos/protos.go new file mode 100644 index 000000000..120c8b0e8 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/testprotos/protos.go @@ -0,0 +1,116 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package testprotos + +func Equal(x, y Message) bool { + if x == nil || y == nil { + return x == nil && y == nil + } + return x.String() == y.String() +} + +type Message interface { + Proto() + String() string +} + +type proto interface { + Proto() +} + +type notComparable struct { + unexportedField func() +} + +type Stringer struct{ X string } + +func (s *Stringer) String() string { return s.X } + +// Project1 protocol buffers +type ( + Eagle_States int + Eagle_MissingCalls int + Dreamer_States int + Dreamer_MissingCalls int + Slap_States int + Goat_States int + Donkey_States int + SummerType int + + Eagle struct { + proto + notComparable + Stringer + } + Dreamer struct { + proto + notComparable + Stringer + } + Slap struct { + proto + notComparable + Stringer + } + Goat struct { + proto + notComparable + Stringer + } + Donkey struct { + proto + notComparable + Stringer + } +) + +// Project2 protocol buffers +type ( + Germ struct { + proto + notComparable + Stringer + } + Dish struct { + proto + notComparable + Stringer + } +) + +// Project3 protocol buffers +type ( + Dirt struct { + proto + notComparable + Stringer + } + Wizard struct { + proto + notComparable + Stringer + } + Sadistic struct { + proto + notComparable + Stringer + } +) + +// Project4 protocol buffers +type ( + HoneyStatus int + PoisonType int + MetaData struct { + proto + notComparable + Stringer + } + Restrictions struct { + proto + notComparable + Stringer + } +) diff --git a/vendor/github.com/google/go-cmp/cmp/internal/teststructs/project1.go b/vendor/github.com/google/go-cmp/cmp/internal/teststructs/project1.go new file mode 100644 index 000000000..1999e38fd --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/teststructs/project1.go @@ -0,0 +1,267 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package teststructs + +import ( + "time" + + pb "github.com/google/go-cmp/cmp/internal/testprotos" +) + +// This is an sanitized example of equality from a real use-case. +// The original equality function was as follows: +/* +func equalEagle(x, y Eagle) bool { + if x.Name != y.Name && + !reflect.DeepEqual(x.Hounds, y.Hounds) && + x.Desc != y.Desc && + x.DescLong != y.DescLong && + x.Prong != y.Prong && + x.StateGoverner != y.StateGoverner && + x.PrankRating != y.PrankRating && + x.FunnyPrank != y.FunnyPrank && + !pb.Equal(x.Immutable.Proto(), y.Immutable.Proto()) { + return false + } + + if len(x.Dreamers) != len(y.Dreamers) { + return false + } + for i := range x.Dreamers { + if !equalDreamer(x.Dreamers[i], y.Dreamers[i]) { + return false + } + } + if len(x.Slaps) != len(y.Slaps) { + return false + } + for i := range x.Slaps { + if !equalSlap(x.Slaps[i], y.Slaps[i]) { + return false + } + } + return true +} +func equalDreamer(x, y Dreamer) bool { + if x.Name != y.Name || + x.Desc != y.Desc || + x.DescLong != y.DescLong || + x.ContSlapsInterval != y.ContSlapsInterval || + x.Ornamental != y.Ornamental || + x.Amoeba != y.Amoeba || + x.Heroes != y.Heroes || + x.FloppyDisk != y.FloppyDisk || + x.MightiestDuck != y.MightiestDuck || + x.FunnyPrank != y.FunnyPrank || + !pb.Equal(x.Immutable.Proto(), y.Immutable.Proto()) { + + return false + } + if len(x.Animal) != len(y.Animal) { + return false + } + for i := range x.Animal { + vx := x.Animal[i] + vy := y.Animal[i] + if reflect.TypeOf(x.Animal) != reflect.TypeOf(y.Animal) { + return false + } + switch vx.(type) { + case Goat: + if !equalGoat(vx.(Goat), vy.(Goat)) { + return false + } + case Donkey: + if !equalDonkey(vx.(Donkey), vy.(Donkey)) { + return false + } + default: + panic(fmt.Sprintf("unknown type: %T", vx)) + } + } + if len(x.PreSlaps) != len(y.PreSlaps) { + return false + } + for i := range x.PreSlaps { + if !equalSlap(x.PreSlaps[i], y.PreSlaps[i]) { + return false + } + } + if len(x.ContSlaps) != len(y.ContSlaps) { + return false + } + for i := range x.ContSlaps { + if !equalSlap(x.ContSlaps[i], y.ContSlaps[i]) { + return false + } + } + return true +} +func equalSlap(x, y Slap) bool { + return x.Name == y.Name && + x.Desc == y.Desc && + x.DescLong == y.DescLong && + pb.Equal(x.Args, y.Args) && + x.Tense == y.Tense && + x.Interval == y.Interval && + x.Homeland == y.Homeland && + x.FunnyPrank == y.FunnyPrank && + pb.Equal(x.Immutable.Proto(), y.Immutable.Proto()) +} +func equalGoat(x, y Goat) bool { + if x.Target != y.Target || + x.FunnyPrank != y.FunnyPrank || + !pb.Equal(x.Immutable.Proto(), y.Immutable.Proto()) { + return false + } + if len(x.Slaps) != len(y.Slaps) { + return false + } + for i := range x.Slaps { + if !equalSlap(x.Slaps[i], y.Slaps[i]) { + return false + } + } + return true +} +func equalDonkey(x, y Donkey) bool { + return x.Pause == y.Pause && + x.Sleep == y.Sleep && + x.FunnyPrank == y.FunnyPrank && + pb.Equal(x.Immutable.Proto(), y.Immutable.Proto()) +} +*/ + +type Eagle struct { + Name string + Hounds []string + Desc string + DescLong string + Dreamers []Dreamer + Prong int64 + Slaps []Slap + StateGoverner string + PrankRating string + FunnyPrank string + Immutable *EagleImmutable +} + +type EagleImmutable struct { + ID string + State *pb.Eagle_States + MissingCall *pb.Eagle_MissingCalls + Birthday time.Time + Death time.Time + Started time.Time + LastUpdate time.Time + Creator string + empty bool +} + +type Dreamer struct { + Name string + Desc string + DescLong string + PreSlaps []Slap + ContSlaps []Slap + ContSlapsInterval int32 + Animal []interface{} // Could be either Goat or Donkey + Ornamental bool + Amoeba int64 + Heroes int32 + FloppyDisk int32 + MightiestDuck bool + FunnyPrank string + Immutable *DreamerImmutable +} + +type DreamerImmutable struct { + ID string + State *pb.Dreamer_States + MissingCall *pb.Dreamer_MissingCalls + Calls int32 + Started time.Time + Stopped time.Time + LastUpdate time.Time + empty bool +} + +type Slap struct { + Name string + Desc string + DescLong string + Args pb.Message + Tense int32 + Interval int32 + Homeland uint32 + FunnyPrank string + Immutable *SlapImmutable +} + +type SlapImmutable struct { + ID string + Out pb.Message + MildSlap bool + PrettyPrint string + State *pb.Slap_States + Started time.Time + Stopped time.Time + LastUpdate time.Time + LoveRadius *LoveRadius + empty bool +} + +type Goat struct { + Target string + Slaps []Slap + FunnyPrank string + Immutable *GoatImmutable +} + +type GoatImmutable struct { + ID string + State *pb.Goat_States + Started time.Time + Stopped time.Time + LastUpdate time.Time + empty bool +} +type Donkey struct { + Pause bool + Sleep int32 + FunnyPrank string + Immutable *DonkeyImmutable +} + +type DonkeyImmutable struct { + ID string + State *pb.Donkey_States + Started time.Time + Stopped time.Time + LastUpdate time.Time + empty bool +} + +type LoveRadius struct { + Summer *SummerLove + empty bool +} + +type SummerLove struct { + Summary *SummerLoveSummary + empty bool +} + +type SummerLoveSummary struct { + Devices []string + ChangeType []pb.SummerType + empty bool +} + +func (EagleImmutable) Proto() *pb.Eagle { return nil } +func (DreamerImmutable) Proto() *pb.Dreamer { return nil } +func (SlapImmutable) Proto() *pb.Slap { return nil } +func (GoatImmutable) Proto() *pb.Goat { return nil } +func (DonkeyImmutable) Proto() *pb.Donkey { return nil } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/teststructs/project2.go b/vendor/github.com/google/go-cmp/cmp/internal/teststructs/project2.go new file mode 100644 index 000000000..536592bbe --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/teststructs/project2.go @@ -0,0 +1,74 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package teststructs + +import ( + "time" + + pb "github.com/google/go-cmp/cmp/internal/testprotos" +) + +// This is an sanitized example of equality from a real use-case. +// The original equality function was as follows: +/* +func equalBatch(b1, b2 *GermBatch) bool { + for _, b := range []*GermBatch{b1, b2} { + for _, l := range b.DirtyGerms { + sort.Slice(l, func(i, j int) bool { return l[i].String() < l[j].String() }) + } + for _, l := range b.CleanGerms { + sort.Slice(l, func(i, j int) bool { return l[i].String() < l[j].String() }) + } + } + if !pb.DeepEqual(b1.DirtyGerms, b2.DirtyGerms) || + !pb.DeepEqual(b1.CleanGerms, b2.CleanGerms) || + !pb.DeepEqual(b1.GermMap, b2.GermMap) { + return false + } + if len(b1.DishMap) != len(b2.DishMap) { + return false + } + for id := range b1.DishMap { + kpb1, err1 := b1.DishMap[id].Proto() + kpb2, err2 := b2.DishMap[id].Proto() + if !pb.Equal(kpb1, kpb2) || !reflect.DeepEqual(err1, err2) { + return false + } + } + return b1.HasPreviousResult == b2.HasPreviousResult && + b1.DirtyID == b2.DirtyID && + b1.CleanID == b2.CleanID && + b1.GermStrain == b2.GermStrain && + b1.TotalDirtyGerms == b2.TotalDirtyGerms && + b1.InfectedAt.Equal(b2.InfectedAt) +} +*/ + +type GermBatch struct { + DirtyGerms, CleanGerms map[int32][]*pb.Germ + GermMap map[int32]*pb.Germ + DishMap map[int32]*Dish + HasPreviousResult bool + DirtyID, CleanID int32 + GermStrain int32 + TotalDirtyGerms int + InfectedAt time.Time +} + +type Dish struct { + pb *pb.Dish + err error +} + +func CreateDish(m *pb.Dish, err error) *Dish { + return &Dish{pb: m, err: err} +} + +func (d *Dish) Proto() (*pb.Dish, error) { + if d.err != nil { + return nil, d.err + } + return d.pb, nil +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/teststructs/project3.go b/vendor/github.com/google/go-cmp/cmp/internal/teststructs/project3.go new file mode 100644 index 000000000..00c252e5e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/teststructs/project3.go @@ -0,0 +1,77 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package teststructs + +import ( + "sync" + + pb "github.com/google/go-cmp/cmp/internal/testprotos" +) + +// This is an sanitized example of equality from a real use-case. +// The original equality function was as follows: +/* +func equalDirt(x, y *Dirt) bool { + if !reflect.DeepEqual(x.table, y.table) || + !reflect.DeepEqual(x.ts, y.ts) || + x.Discord != y.Discord || + !pb.Equal(&x.Proto, &y.Proto) || + len(x.wizard) != len(y.wizard) || + len(x.sadistic) != len(y.sadistic) || + x.lastTime != y.lastTime { + return false + } + for k, vx := range x.wizard { + vy, ok := y.wizard[k] + if !ok || !pb.Equal(vx, vy) { + return false + } + } + for k, vx := range x.sadistic { + vy, ok := y.sadistic[k] + if !ok || !pb.Equal(vx, vy) { + return false + } + } + return true +} +*/ + +type Dirt struct { + table Table // Always concrete type of MockTable + ts Timestamp + Discord DiscordState + Proto pb.Dirt + wizard map[string]*pb.Wizard + sadistic map[string]*pb.Sadistic + lastTime int64 + mu sync.Mutex +} + +type DiscordState int + +type Timestamp int64 + +func (d *Dirt) SetTable(t Table) { d.table = t } +func (d *Dirt) SetTimestamp(t Timestamp) { d.ts = t } +func (d *Dirt) SetWizard(m map[string]*pb.Wizard) { d.wizard = m } +func (d *Dirt) SetSadistic(m map[string]*pb.Sadistic) { d.sadistic = m } +func (d *Dirt) SetLastTime(t int64) { d.lastTime = t } + +type Table interface { + Operation1() error + Operation2() error + Operation3() error +} + +type MockTable struct { + state []string +} + +func CreateMockTable(s []string) *MockTable { return &MockTable{s} } +func (mt *MockTable) Operation1() error { return nil } +func (mt *MockTable) Operation2() error { return nil } +func (mt *MockTable) Operation3() error { return nil } +func (mt *MockTable) State() []string { return mt.state } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/teststructs/project4.go b/vendor/github.com/google/go-cmp/cmp/internal/teststructs/project4.go new file mode 100644 index 000000000..9b50d738e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/teststructs/project4.go @@ -0,0 +1,142 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package teststructs + +import ( + "time" + + pb "github.com/google/go-cmp/cmp/internal/testprotos" +) + +// This is an sanitized example of equality from a real use-case. +// The original equality function was as follows: +/* +func equalCartel(x, y Cartel) bool { + if !(equalHeadquarter(x.Headquarter, y.Headquarter) && + x.Source() == y.Source() && + x.CreationDate().Equal(y.CreationDate()) && + x.Boss() == y.Boss() && + x.LastCrimeDate().Equal(y.LastCrimeDate())) { + return false + } + if len(x.Poisons()) != len(y.Poisons()) { + return false + } + for i := range x.Poisons() { + if !equalPoison(*x.Poisons()[i], *y.Poisons()[i]) { + return false + } + } + return true +} +func equalHeadquarter(x, y Headquarter) bool { + xr, yr := x.Restrictions(), y.Restrictions() + return x.ID() == y.ID() && + x.Location() == y.Location() && + reflect.DeepEqual(x.SubDivisions(), y.SubDivisions()) && + x.IncorporatedDate().Equal(y.IncorporatedDate()) && + pb.Equal(x.MetaData(), y.MetaData()) && + bytes.Equal(x.PrivateMessage(), y.PrivateMessage()) && + bytes.Equal(x.PublicMessage(), y.PublicMessage()) && + x.HorseBack() == y.HorseBack() && + x.Rattle() == y.Rattle() && + x.Convulsion() == y.Convulsion() && + x.Expansion() == y.Expansion() && + x.Status() == y.Status() && + pb.Equal(&xr, &yr) && + x.CreationTime().Equal(y.CreationTime()) +} +func equalPoison(x, y Poison) bool { + return x.PoisonType() == y.PoisonType() && + x.Expiration().Equal(y.Expiration()) && + x.Manufactuer() == y.Manufactuer() && + x.Potency() == y.Potency() +} +*/ + +type Cartel struct { + Headquarter + source string + creationDate time.Time + boss string + lastCrimeDate time.Time + poisons []*Poison +} + +func (p Cartel) Source() string { return p.source } +func (p Cartel) CreationDate() time.Time { return p.creationDate } +func (p Cartel) Boss() string { return p.boss } +func (p Cartel) LastCrimeDate() time.Time { return p.lastCrimeDate } +func (p Cartel) Poisons() []*Poison { return p.poisons } + +func (p *Cartel) SetSource(x string) { p.source = x } +func (p *Cartel) SetCreationDate(x time.Time) { p.creationDate = x } +func (p *Cartel) SetBoss(x string) { p.boss = x } +func (p *Cartel) SetLastCrimeDate(x time.Time) { p.lastCrimeDate = x } +func (p *Cartel) SetPoisons(x []*Poison) { p.poisons = x } + +type Headquarter struct { + id uint64 + location string + subDivisions []string + incorporatedDate time.Time + metaData *pb.MetaData + privateMessage []byte + publicMessage []byte + horseBack string + rattle string + convulsion bool + expansion uint64 + status pb.HoneyStatus + restrictions pb.Restrictions + creationTime time.Time +} + +func (hq Headquarter) ID() uint64 { return hq.id } +func (hq Headquarter) Location() string { return hq.location } +func (hq Headquarter) SubDivisions() []string { return hq.subDivisions } +func (hq Headquarter) IncorporatedDate() time.Time { return hq.incorporatedDate } +func (hq Headquarter) MetaData() *pb.MetaData { return hq.metaData } +func (hq Headquarter) PrivateMessage() []byte { return hq.privateMessage } +func (hq Headquarter) PublicMessage() []byte { return hq.publicMessage } +func (hq Headquarter) HorseBack() string { return hq.horseBack } +func (hq Headquarter) Rattle() string { return hq.rattle } +func (hq Headquarter) Convulsion() bool { return hq.convulsion } +func (hq Headquarter) Expansion() uint64 { return hq.expansion } +func (hq Headquarter) Status() pb.HoneyStatus { return hq.status } +func (hq Headquarter) Restrictions() pb.Restrictions { return hq.restrictions } +func (hq Headquarter) CreationTime() time.Time { return hq.creationTime } + +func (hq *Headquarter) SetID(x uint64) { hq.id = x } +func (hq *Headquarter) SetLocation(x string) { hq.location = x } +func (hq *Headquarter) SetSubDivisions(x []string) { hq.subDivisions = x } +func (hq *Headquarter) SetIncorporatedDate(x time.Time) { hq.incorporatedDate = x } +func (hq *Headquarter) SetMetaData(x *pb.MetaData) { hq.metaData = x } +func (hq *Headquarter) SetPrivateMessage(x []byte) { hq.privateMessage = x } +func (hq *Headquarter) SetPublicMessage(x []byte) { hq.publicMessage = x } +func (hq *Headquarter) SetHorseBack(x string) { hq.horseBack = x } +func (hq *Headquarter) SetRattle(x string) { hq.rattle = x } +func (hq *Headquarter) SetConvulsion(x bool) { hq.convulsion = x } +func (hq *Headquarter) SetExpansion(x uint64) { hq.expansion = x } +func (hq *Headquarter) SetStatus(x pb.HoneyStatus) { hq.status = x } +func (hq *Headquarter) SetRestrictions(x pb.Restrictions) { hq.restrictions = x } +func (hq *Headquarter) SetCreationTime(x time.Time) { hq.creationTime = x } + +type Poison struct { + poisonType pb.PoisonType + expiration time.Time + manufactuer string + potency int +} + +func (p Poison) PoisonType() pb.PoisonType { return p.poisonType } +func (p Poison) Expiration() time.Time { return p.expiration } +func (p Poison) Manufactuer() string { return p.manufactuer } +func (p Poison) Potency() int { return p.potency } + +func (p *Poison) SetPoisonType(x pb.PoisonType) { p.poisonType = x } +func (p *Poison) SetExpiration(x time.Time) { p.expiration = x } +func (p *Poison) SetManufactuer(x string) { p.manufactuer = x } +func (p *Poison) SetPotency(x int) { p.potency = x } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/teststructs/structs.go b/vendor/github.com/google/go-cmp/cmp/internal/teststructs/structs.go new file mode 100644 index 000000000..6b4d2a725 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/teststructs/structs.go @@ -0,0 +1,197 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package teststructs + +type InterfaceA interface { + InterfaceA() +} + +type ( + StructA struct{ X string } // Equal method on value receiver + StructB struct{ X string } // Equal method on pointer receiver + StructC struct{ X string } // Equal method (with interface argument) on value receiver + StructD struct{ X string } // Equal method (with interface argument) on pointer receiver + StructE struct{ X string } // Equal method (with interface argument on value receiver) on pointer receiver + StructF struct{ X string } // Equal method (with interface argument on pointer receiver) on value receiver + + // These embed the above types as a value. + StructA1 struct { + StructA + X string + } + StructB1 struct { + StructB + X string + } + StructC1 struct { + StructC + X string + } + StructD1 struct { + StructD + X string + } + StructE1 struct { + StructE + X string + } + StructF1 struct { + StructF + X string + } + + // These embed the above types as a pointer. + StructA2 struct { + *StructA + X string + } + StructB2 struct { + *StructB + X string + } + StructC2 struct { + *StructC + X string + } + StructD2 struct { + *StructD + X string + } + StructE2 struct { + *StructE + X string + } + StructF2 struct { + *StructF + X string + } + + StructNo struct{ X string } // Equal method (with interface argument) on non-satisfying receiver + + AssignA func() int + AssignB struct{ A int } + AssignC chan bool + AssignD <-chan bool +) + +func (x StructA) Equal(y StructA) bool { return true } +func (x *StructB) Equal(y *StructB) bool { return true } +func (x StructC) Equal(y InterfaceA) bool { return true } +func (x StructC) InterfaceA() {} +func (x *StructD) Equal(y InterfaceA) bool { return true } +func (x *StructD) InterfaceA() {} +func (x *StructE) Equal(y InterfaceA) bool { return true } +func (x StructE) InterfaceA() {} +func (x StructF) Equal(y InterfaceA) bool { return true } +func (x *StructF) InterfaceA() {} +func (x StructNo) Equal(y InterfaceA) bool { return true } + +func (x AssignA) Equal(y func() int) bool { return true } +func (x AssignB) Equal(y struct{ A int }) bool { return true } +func (x AssignC) Equal(y chan bool) bool { return true } +func (x AssignD) Equal(y <-chan bool) bool { return true } + +var _ = func( + a StructA, b StructB, c StructC, d StructD, e StructE, f StructF, + ap *StructA, bp *StructB, cp *StructC, dp *StructD, ep *StructE, fp *StructF, + a1 StructA1, b1 StructB1, c1 StructC1, d1 StructD1, e1 StructE1, f1 StructF1, + a2 StructA2, b2 StructB2, c2 StructC2, d2 StructD2, e2 StructE2, f2 StructF1, +) { + a.Equal(a) + b.Equal(&b) + c.Equal(c) + d.Equal(&d) + e.Equal(e) + f.Equal(&f) + + ap.Equal(*ap) + bp.Equal(bp) + cp.Equal(*cp) + dp.Equal(dp) + ep.Equal(*ep) + fp.Equal(fp) + + a1.Equal(a1.StructA) + b1.Equal(&b1.StructB) + c1.Equal(c1) + d1.Equal(&d1) + e1.Equal(e1) + f1.Equal(&f1) + + a2.Equal(*a2.StructA) + b2.Equal(b2.StructB) + c2.Equal(c2) + d2.Equal(&d2) + e2.Equal(e2) + f2.Equal(&f2) +} + +type ( + privateStruct struct{ Public, private int } + PublicStruct struct{ Public, private int } + ParentStructA struct{ privateStruct } + ParentStructB struct{ PublicStruct } + ParentStructC struct { + privateStruct + Public, private int + } + ParentStructD struct { + PublicStruct + Public, private int + } + ParentStructE struct { + privateStruct + PublicStruct + } + ParentStructF struct { + privateStruct + PublicStruct + Public, private int + } + ParentStructG struct { + *privateStruct + } + ParentStructH struct { + *PublicStruct + } + ParentStructI struct { + *privateStruct + *PublicStruct + } + ParentStructJ struct { + *privateStruct + *PublicStruct + Public PublicStruct + private privateStruct + } +) + +func NewParentStructG() *ParentStructG { + return &ParentStructG{new(privateStruct)} +} +func NewParentStructH() *ParentStructH { + return &ParentStructH{new(PublicStruct)} +} +func NewParentStructI() *ParentStructI { + return &ParentStructI{new(privateStruct), new(PublicStruct)} +} +func NewParentStructJ() *ParentStructJ { + return &ParentStructJ{ + privateStruct: new(privateStruct), PublicStruct: new(PublicStruct), + } +} +func (s *privateStruct) SetPrivate(i int) { s.private = i } +func (s *PublicStruct) SetPrivate(i int) { s.private = i } +func (s *ParentStructC) SetPrivate(i int) { s.private = i } +func (s *ParentStructD) SetPrivate(i int) { s.private = i } +func (s *ParentStructF) SetPrivate(i int) { s.private = i } +func (s *ParentStructA) PrivateStruct() *privateStruct { return &s.privateStruct } +func (s *ParentStructC) PrivateStruct() *privateStruct { return &s.privateStruct } +func (s *ParentStructE) PrivateStruct() *privateStruct { return &s.privateStruct } +func (s *ParentStructF) PrivateStruct() *privateStruct { return &s.privateStruct } +func (s *ParentStructG) PrivateStruct() *privateStruct { return s.privateStruct } +func (s *ParentStructI) PrivateStruct() *privateStruct { return s.privateStruct } +func (s *ParentStructJ) PrivateStruct() *privateStruct { return s.privateStruct } +func (s *ParentStructJ) Private() *privateStruct { return &s.private } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go new file mode 100644 index 000000000..abaeca89e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go @@ -0,0 +1,259 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package value provides functionality for reflect.Value types. +package value + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// formatFakePointers controls whether to substitute pointer addresses with nil. +// This is used for deterministic testing. +var formatFakePointers = false + +var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + +// Format formats the value v as a string. +// +// This is similar to fmt.Sprintf("%+v", v) except this: +// * Prints the type unless it can be elided +// * Avoids printing struct fields that are zero +// * Prints a nil-slice as being nil, not empty +// * Prints map entries in deterministic order +func Format(v reflect.Value, useStringer bool) string { + return formatAny(v, formatConfig{useStringer, true, true, !formatFakePointers}, nil) +} + +type formatConfig struct { + useStringer bool // Should the String method be used if available? + printType bool // Should we print the type before the value? + followPointers bool // Should we recursively follow pointers? + realPointers bool // Should we print the real address of pointers? +} + +func formatAny(v reflect.Value, conf formatConfig, visited map[uintptr]bool) string { + // TODO: Should this be a multi-line printout in certain situations? + + if !v.IsValid() { + return "" + } + if conf.useStringer && v.Type().Implements(stringerIface) && v.CanInterface() { + if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() { + return "" + } + return fmt.Sprintf("%q", v.Interface().(fmt.Stringer).String()) + } + + switch v.Kind() { + case reflect.Bool: + return formatPrimitive(v.Type(), v.Bool(), conf) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return formatPrimitive(v.Type(), v.Int(), conf) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr { + // Unnamed uints are usually bytes or words, so use hexadecimal. + return formatPrimitive(v.Type(), formatHex(v.Uint()), conf) + } + return formatPrimitive(v.Type(), v.Uint(), conf) + case reflect.Float32, reflect.Float64: + return formatPrimitive(v.Type(), v.Float(), conf) + case reflect.Complex64, reflect.Complex128: + return formatPrimitive(v.Type(), v.Complex(), conf) + case reflect.String: + return formatPrimitive(v.Type(), fmt.Sprintf("%q", v), conf) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return formatPointer(v, conf) + case reflect.Ptr: + if v.IsNil() { + if conf.printType { + return fmt.Sprintf("(%v)(nil)", v.Type()) + } + return "" + } + if visited[v.Pointer()] || !conf.followPointers { + return formatPointer(v, conf) + } + visited = insertPointer(visited, v.Pointer()) + return "&" + formatAny(v.Elem(), conf, visited) + case reflect.Interface: + if v.IsNil() { + if conf.printType { + return fmt.Sprintf("%v(nil)", v.Type()) + } + return "" + } + return formatAny(v.Elem(), conf, visited) + case reflect.Slice: + if v.IsNil() { + if conf.printType { + return fmt.Sprintf("%v(nil)", v.Type()) + } + return "" + } + if visited[v.Pointer()] { + return formatPointer(v, conf) + } + visited = insertPointer(visited, v.Pointer()) + fallthrough + case reflect.Array: + var ss []string + subConf := conf + subConf.printType = v.Type().Elem().Kind() == reflect.Interface + for i := 0; i < v.Len(); i++ { + s := formatAny(v.Index(i), subConf, visited) + ss = append(ss, s) + } + s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) + if conf.printType { + return v.Type().String() + s + } + return s + case reflect.Map: + if v.IsNil() { + if conf.printType { + return fmt.Sprintf("%v(nil)", v.Type()) + } + return "" + } + if visited[v.Pointer()] { + return formatPointer(v, conf) + } + visited = insertPointer(visited, v.Pointer()) + + var ss []string + subConf := conf + subConf.printType = v.Type().Elem().Kind() == reflect.Interface + for _, k := range SortKeys(v.MapKeys()) { + sk := formatAny(k, formatConfig{realPointers: conf.realPointers}, visited) + sv := formatAny(v.MapIndex(k), subConf, visited) + ss = append(ss, fmt.Sprintf("%s: %s", sk, sv)) + } + s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) + if conf.printType { + return v.Type().String() + s + } + return s + case reflect.Struct: + var ss []string + subConf := conf + subConf.printType = true + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if isZero(vv) { + continue // Elide zero value fields + } + name := v.Type().Field(i).Name + subConf.useStringer = conf.useStringer && isExported(name) + s := formatAny(vv, subConf, visited) + ss = append(ss, fmt.Sprintf("%s: %s", name, s)) + } + s := fmt.Sprintf("{%s}", strings.Join(ss, ", ")) + if conf.printType { + return v.Type().String() + s + } + return s + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +func formatPrimitive(t reflect.Type, v interface{}, conf formatConfig) string { + if conf.printType && t.PkgPath() != "" { + return fmt.Sprintf("%v(%v)", t, v) + } + return fmt.Sprintf("%v", v) +} + +func formatPointer(v reflect.Value, conf formatConfig) string { + p := v.Pointer() + if !conf.realPointers { + p = 0 // For deterministic printing purposes + } + s := formatHex(uint64(p)) + if conf.printType { + return fmt.Sprintf("(%v)(%s)", v.Type(), s) + } + return s +} + +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} + +// insertPointer insert p into m, allocating m if necessary. +func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool { + if m == nil { + m = make(map[uintptr]bool) + } + m[p] = true + return m +} + +// isZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/format_test.go b/vendor/github.com/google/go-cmp/cmp/internal/value/format_test.go new file mode 100644 index 000000000..6498854f2 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/format_test.go @@ -0,0 +1,91 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "bytes" + "io" + "reflect" + "testing" +) + +func TestFormat(t *testing.T) { + type key struct { + a int + b string + c chan bool + } + + tests := []struct { + in interface{} + want string + }{{ + in: []int{}, + want: "[]int{}", + }, { + in: []int(nil), + want: "[]int(nil)", + }, { + in: []int{1, 2, 3, 4, 5}, + want: "[]int{1, 2, 3, 4, 5}", + }, { + in: []interface{}{1, true, "hello", struct{ A, B int }{1, 2}}, + want: "[]interface {}{1, true, \"hello\", struct { A int; B int }{A: 1, B: 2}}", + }, { + in: []struct{ A, B int }{{1, 2}, {0, 4}, {}}, + want: "[]struct { A int; B int }{{A: 1, B: 2}, {B: 4}, {}}", + }, { + in: map[*int]string{new(int): "hello"}, + want: "map[*int]string{0x00: \"hello\"}", + }, { + in: map[key]string{{}: "hello"}, + want: "map[value.key]string{{}: \"hello\"}", + }, { + in: map[key]string{{a: 5, b: "key", c: make(chan bool)}: "hello"}, + want: "map[value.key]string{{a: 5, b: \"key\", c: (chan bool)(0x00)}: \"hello\"}", + }, { + in: map[io.Reader]string{new(bytes.Reader): "hello"}, + want: "map[io.Reader]string{0x00: \"hello\"}", + }, { + in: func() interface{} { + var a = []interface{}{nil} + a[0] = a + return a + }(), + want: "[]interface {}{([]interface {})(0x00)}", + }, { + in: func() interface{} { + type A *A + var a A + a = &a + return a + }(), + want: "&(value.A)(0x00)", + }, { + in: func() interface{} { + type A map[*A]A + a := make(A) + a[&a] = a + return a + }(), + want: "value.A{0x00: 0x00}", + }, { + in: func() interface{} { + var a [2]interface{} + a[0] = &a + return a + }(), + want: "[2]interface {}{&[2]interface {}{(*[2]interface {})(0x00), interface {}(nil)}, interface {}(nil)}", + }} + + formatFakePointers = true + defer func() { formatFakePointers = false }() + for i, tt := range tests { + got := Format(reflect.ValueOf(tt.in), true) + if got != tt.want { + t.Errorf("test %d, Format():\ngot %q\nwant %q", i, got, tt.want) + } + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 000000000..ea73cf143 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,111 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.Sort(valueSorter(vs)) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if v.Interface() != vs2[len(vs2)-1].Interface() { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above. +type valueSorter []reflect.Value + +func (vs valueSorter) Len() int { return len(vs) } +func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) } +func (vs valueSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort_test.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort_test.go new file mode 100644 index 000000000..c5a6bbb12 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort_test.go @@ -0,0 +1,152 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value_test + +import ( + "math" + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/internal/value" +) + +func TestSortKeys(t *testing.T) { + type ( + MyString string + MyArray [2]int + MyStruct struct { + A MyString + B MyArray + C chan float64 + } + EmptyStruct struct{} + ) + + opts := []cmp.Option{ + cmp.Comparer(func(x, y float64) bool { + if math.IsNaN(x) && math.IsNaN(y) { + return true + } + return x == y + }), + cmp.Comparer(func(x, y complex128) bool { + rx, ix, ry, iy := real(x), imag(x), real(y), imag(y) + if math.IsNaN(rx) && math.IsNaN(ry) { + rx, ry = 0, 0 + } + if math.IsNaN(ix) && math.IsNaN(iy) { + ix, iy = 0, 0 + } + return rx == ry && ix == iy + }), + cmp.Comparer(func(x, y chan bool) bool { return true }), + cmp.Comparer(func(x, y chan int) bool { return true }), + cmp.Comparer(func(x, y chan float64) bool { return true }), + cmp.Comparer(func(x, y chan interface{}) bool { return true }), + cmp.Comparer(func(x, y *int) bool { return true }), + } + + tests := []struct { + in map[interface{}]bool // Set of keys to sort + want []interface{} + }{{ + in: map[interface{}]bool{1: true, 2: true, 3: true}, + want: []interface{}{1, 2, 3}, + }, { + in: map[interface{}]bool{ + nil: true, + true: true, + false: true, + -5: true, + -55: true, + -555: true, + uint(1): true, + uint(11): true, + uint(111): true, + "abc": true, + "abcd": true, + "abcde": true, + "foo": true, + "bar": true, + MyString("abc"): true, + MyString("abcd"): true, + MyString("abcde"): true, + new(int): true, + new(int): true, + make(chan bool): true, + make(chan bool): true, + make(chan int): true, + make(chan interface{}): true, + math.Inf(+1): true, + math.Inf(-1): true, + 1.2345: true, + 12.345: true, + 123.45: true, + 1234.5: true, + 0 + 0i: true, + 1 + 0i: true, + 2 + 0i: true, + 0 + 1i: true, + 0 + 2i: true, + 0 + 3i: true, + [2]int{2, 3}: true, + [2]int{4, 0}: true, + [2]int{2, 4}: true, + MyArray([2]int{2, 4}): true, + EmptyStruct{}: true, + MyStruct{ + "bravo", [2]int{2, 3}, make(chan float64), + }: true, + MyStruct{ + "alpha", [2]int{3, 3}, make(chan float64), + }: true, + }, + want: []interface{}{ + nil, false, true, + -555, -55, -5, uint(1), uint(11), uint(111), + math.Inf(-1), 1.2345, 12.345, 123.45, 1234.5, math.Inf(+1), + (0 + 0i), (0 + 1i), (0 + 2i), (0 + 3i), (1 + 0i), (2 + 0i), + [2]int{2, 3}, [2]int{2, 4}, [2]int{4, 0}, MyArray([2]int{2, 4}), + make(chan bool), make(chan bool), make(chan int), make(chan interface{}), + new(int), new(int), + "abc", "abcd", "abcde", "bar", "foo", + MyString("abc"), MyString("abcd"), MyString("abcde"), + EmptyStruct{}, + MyStruct{"alpha", [2]int{3, 3}, make(chan float64)}, + MyStruct{"bravo", [2]int{2, 3}, make(chan float64)}, + }, + }, { + // NaN values cannot be properly deduplicated. + // This is okay since map entries with NaN in the keys cannot be + // retrieved anyways. + in: map[interface{}]bool{ + math.NaN(): true, + math.NaN(): true, + complex(0, math.NaN()): true, + complex(0, math.NaN()): true, + complex(math.NaN(), 0): true, + complex(math.NaN(), 0): true, + complex(math.NaN(), math.NaN()): true, + }, + want: []interface{}{ + math.NaN(), math.NaN(), math.NaN(), math.NaN(), + complex(math.NaN(), math.NaN()), complex(math.NaN(), math.NaN()), + complex(math.NaN(), 0), complex(math.NaN(), 0), complex(math.NaN(), 0), complex(math.NaN(), 0), + complex(0, math.NaN()), complex(0, math.NaN()), complex(0, math.NaN()), complex(0, math.NaN()), + }, + }} + + for i, tt := range tests { + keys := append(reflect.ValueOf(tt.in).MapKeys(), reflect.ValueOf(tt.in).MapKeys()...) + var got []interface{} + for _, k := range value.SortKeys(keys) { + got = append(got, k.Interface()) + } + if d := cmp.Diff(got, tt.want, opts...); d != "" { + t.Errorf("test %d, Sort() mismatch (-got +want):\n%s", i, d) + } + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 000000000..a4e159ac4 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,446 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "runtime" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption +} + +// applicableOption represents the following types: +// Fundamental: ignore | invalid | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option and reports whether the option was applied. + // Each option may mutate s. + apply(s *state, vx, vy reflect.Value) bool +} + +// coreOption represents the following types: +// Fundamental: ignore | invalid | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, vx, vy, t); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case invalid: + out = invalid{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case invalid: + // Keep invalid + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) bool { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, vx, vy, t) + } + return nil +} + +func (f pathFilter) String() string { + fn := getFuncName(reflect.ValueOf(f.fnc).Pointer()) + return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If the type of the values is not +// assignable to T, then this filter implicitly returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return invalid{} + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, vx, vy, t) + } + return nil +} + +func (f valuesFilter) String() string { + fn := getFuncName(f.fnc.Pointer()) + return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} } +func (ignore) apply(_ *state, _, _ reflect.Value) bool { return true } +func (ignore) String() string { return "Ignore()" } + +// invalid is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields. +type invalid struct{ core } + +func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} } +func (invalid) apply(s *state, _, _ reflect.Value) bool { + const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported" + panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) +} + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// If T and R are the same type, an additional filter must be applied to +// act as the base case to prevent an infinite recursion applying the same +// transform to itself (see the SortedSlice example). +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep. If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = "λ" // Lambda-symbol as place-holder for anonymous transformer + } + if !isValid(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption { + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) bool { + // Update path before calling the Transformer so that dynamic checks + // will use the updated path. + s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr}) + defer s.curPath.pop() + + vx = s.callTRFunc(tr.fnc, vx) + vy = s.callTRFunc(tr.fnc, vy) + s.compareAny(vx, vy) + return true +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer())) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// • Symmetric: equal(x, y) == equal(y, x) +// • Deterministic: equal(x, y) == equal(x, y) +// • Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) bool { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, vx, vy) + return true +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer())) +} + +// AllowUnexported returns an Option that forcibly allows operations on +// unexported fields in certain structs, which are specified by passing in a +// value of each struct type. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// For some cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func AllowUnexported(types ...interface{}) Option { + if !supportAllowUnexported { + panic("AllowUnexported is not supported on App Engine Classic or GopherJS") + } + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return visibleStructs(m) +} + +type visibleStructs map[reflect.Type]bool + +func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { + panic("not implemented") +} + +// reporter is an Option that configures how differences are reported. +type reporter interface { + // TODO: Not exported yet. + // + // Perhaps add PushStep and PopStep and change Report to only accept + // a PathStep instead of the full-path? Adding a PushStep and PopStep makes + // it clear that we are traversing the value tree in a depth-first-search + // manner, which has an effect on how values are printed. + + Option + + // Report is called for every comparison made and will be provided with + // the two values being compared, the equality result, and the + // current path in the value tree. It is possible for x or y to be an + // invalid reflect.Value if one of the values is non-existent; + // which is possible with maps and slices. + Report(x, y reflect.Value, eq bool, p Path) +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} + +// getFuncName returns a short function name from the pointer. +// The string parsing logic works up until Go1.9. +func getFuncName(p uintptr) string { + fnc := runtime.FuncForPC(p) + if fnc == nil { + return "" + } + name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm" + if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") { + // Strip the package name from method name. + name = strings.TrimSuffix(name, ")-fm") + name = strings.TrimSuffix(name, ")·fm") + if i := strings.LastIndexByte(name, '('); i >= 0 { + methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc" + if j := strings.LastIndexByte(methodName, '.'); j >= 0 { + methodName = methodName[j+1:] // E.g., "myfunc" + } + name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc" + } + } + if i := strings.LastIndexByte(name, '/'); i >= 0 { + // Strip the package name. + name = name[i+1:] // E.g., "mypkg.(mytype).myfunc" + } + return name +} diff --git a/vendor/github.com/google/go-cmp/cmp/options_test.go b/vendor/github.com/google/go-cmp/cmp/options_test.go new file mode 100644 index 000000000..009b524af --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options_test.go @@ -0,0 +1,231 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "io" + "reflect" + "strings" + "testing" + + ts "github.com/google/go-cmp/cmp/internal/teststructs" +) + +// Test that the creation of Option values with non-sensible inputs produces +// a run-time panic with a decent error message +func TestOptionPanic(t *testing.T) { + type myBool bool + tests := []struct { + label string // Test description + fnc interface{} // Option function to call + args []interface{} // Arguments to pass in + wantPanic string // Expected panic message + }{{ + label: "AllowUnexported", + fnc: AllowUnexported, + args: []interface{}{}, + }, { + label: "AllowUnexported", + fnc: AllowUnexported, + args: []interface{}{1}, + wantPanic: "invalid struct type", + }, { + label: "AllowUnexported", + fnc: AllowUnexported, + args: []interface{}{ts.StructA{}}, + }, { + label: "AllowUnexported", + fnc: AllowUnexported, + args: []interface{}{ts.StructA{}, ts.StructB{}, ts.StructA{}}, + }, { + label: "AllowUnexported", + fnc: AllowUnexported, + args: []interface{}{ts.StructA{}, &ts.StructB{}, ts.StructA{}}, + wantPanic: "invalid struct type", + }, { + label: "Comparer", + fnc: Comparer, + args: []interface{}{5}, + wantPanic: "invalid comparer function", + }, { + label: "Comparer", + fnc: Comparer, + args: []interface{}{func(x, y interface{}) bool { return true }}, + }, { + label: "Comparer", + fnc: Comparer, + args: []interface{}{func(x, y io.Reader) bool { return true }}, + }, { + label: "Comparer", + fnc: Comparer, + args: []interface{}{func(x, y io.Reader) myBool { return true }}, + wantPanic: "invalid comparer function", + }, { + label: "Comparer", + fnc: Comparer, + args: []interface{}{func(x string, y interface{}) bool { return true }}, + wantPanic: "invalid comparer function", + }, { + label: "Comparer", + fnc: Comparer, + args: []interface{}{(func(int, int) bool)(nil)}, + wantPanic: "invalid comparer function", + }, { + label: "Transformer", + fnc: Transformer, + args: []interface{}{"", 0}, + wantPanic: "invalid transformer function", + }, { + label: "Transformer", + fnc: Transformer, + args: []interface{}{"", func(int) int { return 0 }}, + }, { + label: "Transformer", + fnc: Transformer, + args: []interface{}{"", func(bool) bool { return true }}, + }, { + label: "Transformer", + fnc: Transformer, + args: []interface{}{"", func(int) bool { return true }}, + }, { + label: "Transformer", + fnc: Transformer, + args: []interface{}{"", func(int, int) bool { return true }}, + wantPanic: "invalid transformer function", + }, { + label: "Transformer", + fnc: Transformer, + args: []interface{}{"", (func(int) uint)(nil)}, + wantPanic: "invalid transformer function", + }, { + label: "Transformer", + fnc: Transformer, + args: []interface{}{"Func", func(Path) Path { return nil }}, + }, { + label: "Transformer", + fnc: Transformer, + args: []interface{}{"世界", func(int) bool { return true }}, + }, { + label: "Transformer", + fnc: Transformer, + args: []interface{}{"/*", func(int) bool { return true }}, + wantPanic: "invalid name", + }, { + label: "Transformer", + fnc: Transformer, + args: []interface{}{"_", func(int) bool { return true }}, + wantPanic: "invalid name", + }, { + label: "FilterPath", + fnc: FilterPath, + args: []interface{}{(func(Path) bool)(nil), Ignore()}, + wantPanic: "invalid path filter function", + }, { + label: "FilterPath", + fnc: FilterPath, + args: []interface{}{func(Path) bool { return true }, Ignore()}, + }, { + label: "FilterPath", + fnc: FilterPath, + args: []interface{}{func(Path) bool { return true }, &defaultReporter{}}, + wantPanic: "invalid option type", + }, { + label: "FilterPath", + fnc: FilterPath, + args: []interface{}{func(Path) bool { return true }, Options{Ignore(), Ignore()}}, + }, { + label: "FilterPath", + fnc: FilterPath, + args: []interface{}{func(Path) bool { return true }, Options{Ignore(), &defaultReporter{}}}, + wantPanic: "invalid option type", + }, { + label: "FilterValues", + fnc: FilterValues, + args: []interface{}{0, Ignore()}, + wantPanic: "invalid values filter function", + }, { + label: "FilterValues", + fnc: FilterValues, + args: []interface{}{func(x, y int) bool { return true }, Ignore()}, + }, { + label: "FilterValues", + fnc: FilterValues, + args: []interface{}{func(x, y interface{}) bool { return true }, Ignore()}, + }, { + label: "FilterValues", + fnc: FilterValues, + args: []interface{}{func(x, y interface{}) myBool { return true }, Ignore()}, + wantPanic: "invalid values filter function", + }, { + label: "FilterValues", + fnc: FilterValues, + args: []interface{}{func(x io.Reader, y interface{}) bool { return true }, Ignore()}, + wantPanic: "invalid values filter function", + }, { + label: "FilterValues", + fnc: FilterValues, + args: []interface{}{(func(int, int) bool)(nil), Ignore()}, + wantPanic: "invalid values filter function", + }, { + label: "FilterValues", + fnc: FilterValues, + args: []interface{}{func(int, int) bool { return true }, &defaultReporter{}}, + wantPanic: "invalid option type", + }, { + label: "FilterValues", + fnc: FilterValues, + args: []interface{}{func(int, int) bool { return true }, Options{Ignore(), Ignore()}}, + }, { + label: "FilterValues", + fnc: FilterValues, + args: []interface{}{func(int, int) bool { return true }, Options{Ignore(), &defaultReporter{}}}, + wantPanic: "invalid option type", + }} + + for _, tt := range tests { + tRun(t, tt.label, func(t *testing.T) { + var gotPanic string + func() { + defer func() { + if ex := recover(); ex != nil { + if s, ok := ex.(string); ok { + gotPanic = s + } else { + panic(ex) + } + } + }() + var vargs []reflect.Value + for _, arg := range tt.args { + vargs = append(vargs, reflect.ValueOf(arg)) + } + reflect.ValueOf(tt.fnc).Call(vargs) + }() + if tt.wantPanic == "" { + if gotPanic != "" { + t.Fatalf("unexpected panic message: %s", gotPanic) + } + } else { + if !strings.Contains(gotPanic, tt.wantPanic) { + t.Fatalf("panic message:\ngot: %s\nwant: %s", gotPanic, tt.wantPanic) + } + } + }) + } +} + +// TODO: Delete this hack when we drop Go1.6 support. +func tRun(t *testing.T, name string, f func(t *testing.T)) { + type runner interface { + Run(string, func(t *testing.T)) bool + } + var ti interface{} = t + if r, ok := ti.(runner); ok { + r.Run(name, f) + } else { + t.Logf("Test: %s", name) + f(t) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 000000000..0c2eb333f --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,293 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +type ( + // Path is a list of PathSteps describing the sequence of operations to get + // from some root type to the current position in the value tree. + // The first Path element is always an operation-less PathStep that exists + // simply to identify the initial type. + // + // When traversing structs with embedded structs, the embedded struct will + // always be accessed as a field before traversing the fields of the + // embedded struct themselves. That is, an exported field from the + // embedded struct will never be accessed directly from the parent struct. + Path []PathStep + + // PathStep is a union-type for specific operations to traverse + // a value's tree structure. Users of this package never need to implement + // these types as values of this type will be returned by this package. + PathStep interface { + String() string + Type() reflect.Type // Resulting type after performing the path step + isPathStep() + } + + // SliceIndex is an index operation on a slice or array at some index Key. + SliceIndex interface { + PathStep + Key() int // May return -1 if in a split state + + // SplitKeys returns the indexes for indexing into slices in the + // x and y values, respectively. These indexes may differ due to the + // insertion or removal of an element in one of the slices, causing + // all of the indexes to be shifted. If an index is -1, then that + // indicates that the element does not exist in the associated slice. + // + // Key is guaranteed to return -1 if and only if the indexes returned + // by SplitKeys are not the same. SplitKeys will never return -1 for + // both indexes. + SplitKeys() (x int, y int) + + isSliceIndex() + } + // MapIndex is an index operation on a map at some index Key. + MapIndex interface { + PathStep + Key() reflect.Value + isMapIndex() + } + // TypeAssertion represents a type assertion on an interface. + TypeAssertion interface { + PathStep + isTypeAssertion() + } + // StructField represents a struct field access on a field called Name. + StructField interface { + PathStep + Name() string + Index() int + isStructField() + } + // Indirect represents pointer indirection on the parent type. + Indirect interface { + PathStep + isIndirect() + } + // Transform is a transformation from the parent type to the current type. + Transform interface { + PathStep + Name() string + Func() reflect.Value + isTransform() + } +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + if len(pa) > 0 { + return pa[len(pa)-1] + } + return pathStep{} +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(*structField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case *indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case *indirect: + continue // Next step is indirection, so let them batch up + case *structField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case *transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + case *typeAssertion: + // Elide type assertions immediately following a transform to + // prevent overly verbose path printouts. + // Some transforms return interface{} because of Go's lack of + // generics, but typically take in and return the exact same + // concrete type. Other times, the transform creates an anonymous + // struct, which will be very verbose to print. + if _, ok := nextStep.(*transform); ok { + continue + } + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type ( + pathStep struct { + typ reflect.Type + } + + sliceIndex struct { + pathStep + xkey, ykey int + } + mapIndex struct { + pathStep + key reflect.Value + } + typeAssertion struct { + pathStep + } + structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + force bool // Forcibly allow visibility + pvx, pvy reflect.Value // Parent values + field reflect.StructField // Field information + } + indirect struct { + pathStep + } + transform struct { + pathStep + trans *transformer + } +) + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := ps.typ.String() + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +func (si sliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} +func (mi mapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } +func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } +func (sf structField) String() string { return fmt.Sprintf(".%s", sf.name) } +func (in indirect) String() string { return "*" } +func (tf transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +func (si sliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} +func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey } +func (mi mapIndex) Key() reflect.Value { return mi.key } +func (sf structField) Name() string { return sf.name } +func (sf structField) Index() int { return sf.idx } +func (tf transform) Name() string { return tf.trans.name } +func (tf transform) Func() reflect.Value { return tf.trans.fnc } + +func (pathStep) isPathStep() {} +func (sliceIndex) isSliceIndex() {} +func (mapIndex) isMapIndex() {} +func (typeAssertion) isTypeAssertion() {} +func (structField) isStructField() {} +func (indirect) isIndirect() {} +func (transform) isTransform() {} + +var ( + _ SliceIndex = sliceIndex{} + _ MapIndex = mapIndex{} + _ TypeAssertion = typeAssertion{} + _ StructField = structField{} + _ Indirect = indirect{} + _ Transform = transform{} + + _ PathStep = sliceIndex{} + _ PathStep = mapIndex{} + _ PathStep = typeAssertion{} + _ PathStep = structField{} + _ PathStep = indirect{} + _ PathStep = transform{} +) + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} + +// isValid reports whether the identifier is valid. +// Empty and underscore-only strings are not valid. +func isValid(id string) bool { + ok := id != "" && id != "_" + for j, c := range id { + ok = ok && (j > 0 || !unicode.IsDigit(c)) + ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c)) + } + return ok +} diff --git a/vendor/github.com/google/go-cmp/cmp/reporter.go b/vendor/github.com/google/go-cmp/cmp/reporter.go new file mode 100644 index 000000000..a21d0cded --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/reporter.go @@ -0,0 +1,53 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/value" +) + +type defaultReporter struct { + Option + diffs []string // List of differences, possibly truncated + ndiffs int // Total number of differences + nbytes int // Number of bytes in diffs + nlines int // Number of lines in diffs +} + +var _ reporter = (*defaultReporter)(nil) + +func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) { + if eq { + return // Ignore equal results + } + const maxBytes = 4096 + const maxLines = 256 + r.ndiffs++ + if r.nbytes < maxBytes && r.nlines < maxLines { + sx := value.Format(x, true) + sy := value.Format(y, true) + if sx == sy { + // Stringer is not helpful, so rely on more exact formatting. + sx = value.Format(x, false) + sy = value.Format(y, false) + } + s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy) + r.diffs = append(r.diffs, s) + r.nbytes += len(s) + r.nlines += strings.Count(s, "\n") + } +} + +func (r *defaultReporter) String() string { + s := strings.Join(r.diffs, "") + if r.ndiffs == len(r.diffs) { + return s + } + return fmt.Sprintf("%s... %d more differences ...", s, len(r.diffs)-r.ndiffs) +} diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go b/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go new file mode 100644 index 000000000..0d44987f5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/unsafe_panic.go @@ -0,0 +1,15 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build appengine js + +package cmp + +import "reflect" + +const supportAllowUnexported = false + +func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value { + panic("unsafeRetrieveField is not implemented") +} diff --git a/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go b/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go new file mode 100644 index 000000000..81fb82632 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go @@ -0,0 +1,23 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !appengine,!js + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportAllowUnexported = true + +// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct +// such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. +func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value { + return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() +} diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml new file mode 100644 index 000000000..b9f8b239c --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - tip + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 000000000..65dc692b6 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 000000000..1e69004bb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 000000000..17d4f90eb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/example_test.go b/vendor/github.com/mattn/go-isatty/example_test.go new file mode 100644 index 000000000..fa8f7e745 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/example_test.go @@ -0,0 +1,18 @@ +package isatty_test + +import ( + "fmt" + "os" + + "github.com/mattn/go-isatty" +) + +func Example() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go new file mode 100644 index 000000000..9584a9884 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go @@ -0,0 +1,15 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 000000000..42f2514d1 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go new file mode 100644 index 000000000..7384cf991 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine,!ppc64,!ppc64le + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go new file mode 100644 index 000000000..44e5d2130 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go @@ -0,0 +1,19 @@ +// +build linux +// +build ppc64 ppc64le + +package isatty + +import ( + "unsafe" + + syscall "golang.org/x/sys/unix" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 000000000..ff4de3d9a --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,10 @@ +// +build !windows +// +build !appengine + +package isatty + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others_test.go b/vendor/github.com/mattn/go-isatty/isatty_others_test.go new file mode 100644 index 000000000..a2091cf47 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others_test.go @@ -0,0 +1,19 @@ +// +build !windows + +package isatty + +import ( + "os" + "testing" +) + +func TestTerminal(t *testing.T) { + // test for non-panic + IsTerminal(os.Stdout.Fd()) +} + +func TestCygwinPipeName(t *testing.T) { + if IsCygwinTerminal(os.Stdout.Fd()) { + t.Fatal("should be false always") + } +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 000000000..1f0c6bf53 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 000000000..af51cbcaa --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,94 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + fileNameInfo uintptr = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && token[0] != `\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + return false + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows_test.go b/vendor/github.com/mattn/go-isatty/isatty_windows_test.go new file mode 100644 index 000000000..777e8a603 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows_test.go @@ -0,0 +1,35 @@ +// +build windows + +package isatty + +import ( + "testing" +) + +func TestCygwinPipeName(t *testing.T) { + tests := []struct { + name string + result bool + }{ + {``, false}, + {`\msys-`, false}, + {`\cygwin-----`, false}, + {`\msys-x-PTY5-pty1-from-master`, false}, + {`\cygwin-x-PTY5-from-master`, false}, + {`\cygwin-x-pty2-from-toaster`, false}, + {`\cygwin--pty2-from-master`, false}, + {`\\cygwin-x-pty2-from-master`, false}, + {`\cygwin-x-pty2-from-master-`, true}, // for the feature + {`\cygwin-e022582115c10879-pty4-from-master`, true}, + {`\msys-e022582115c10879-pty4-to-master`, true}, + {`\cygwin-e022582115c10879-pty4-to-master`, true}, + } + + for _, test := range tests { + want := test.result + got := isCygwinPipeName(test.name) + if want != got { + t.Fatalf("isatty(%q): got %v, want %v:", test.name, got, want) + } + } +} diff --git a/vendor/gopkg.in/tomb.v2/LICENSE b/vendor/gopkg.in/tomb.v2/LICENSE new file mode 100644 index 000000000..a4249bb31 --- /dev/null +++ b/vendor/gopkg.in/tomb.v2/LICENSE @@ -0,0 +1,29 @@ +tomb - support for clean goroutine termination in Go. + +Copyright (c) 2010-2011 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/tomb.v2/README.md b/vendor/gopkg.in/tomb.v2/README.md new file mode 100644 index 000000000..e7f282b5a --- /dev/null +++ b/vendor/gopkg.in/tomb.v2/README.md @@ -0,0 +1,4 @@ +Installation and usage +---------------------- + +See [gopkg.in/tomb.v2](https://gopkg.in/tomb.v2) for documentation and usage details. diff --git a/vendor/gopkg.in/tomb.v2/context.go b/vendor/gopkg.in/tomb.v2/context.go new file mode 100644 index 000000000..f0fe56f5c --- /dev/null +++ b/vendor/gopkg.in/tomb.v2/context.go @@ -0,0 +1,74 @@ +// +build go1.7 + +package tomb + +import ( + "context" +) + +// WithContext returns a new tomb that is killed when the provided parent +// context is canceled, and a copy of parent with a replaced Done channel +// that is closed when either the tomb is dying or the parent is canceled. +// The returned context may also be obtained via the tomb's Context method. +func WithContext(parent context.Context) (*Tomb, context.Context) { + var t Tomb + t.init() + if parent.Done() != nil { + go func() { + select { + case <-t.Dying(): + case <-parent.Done(): + t.Kill(parent.Err()) + } + }() + } + t.parent = parent + child, cancel := context.WithCancel(parent) + t.addChild(parent, child, cancel) + return &t, child +} + +// Context returns a context that is a copy of the provided parent context with +// a replaced Done channel that is closed when either the tomb is dying or the +// parent is cancelled. +// +// If parent is nil, it defaults to the parent provided via WithContext, or an +// empty background parent if the tomb wasn't created via WithContext. +func (t *Tomb) Context(parent context.Context) context.Context { + t.init() + t.m.Lock() + defer t.m.Unlock() + + if parent == nil { + if t.parent == nil { + t.parent = context.Background() + } + parent = t.parent.(context.Context) + } + + if child, ok := t.child[parent]; ok { + return child.context.(context.Context) + } + + child, cancel := context.WithCancel(parent) + t.addChild(parent, child, cancel) + return child +} + +func (t *Tomb) addChild(parent context.Context, child context.Context, cancel func()) { + if t.reason != ErrStillAlive { + cancel() + return + } + if t.child == nil { + t.child = make(map[interface{}]childContext) + } + t.child[parent] = childContext{child, cancel, child.Done()} + for parent, child := range t.child { + select { + case <-child.done: + delete(t.child, parent) + default: + } + } +} diff --git a/vendor/gopkg.in/tomb.v2/context16.go b/vendor/gopkg.in/tomb.v2/context16.go new file mode 100644 index 000000000..d47d83a5a --- /dev/null +++ b/vendor/gopkg.in/tomb.v2/context16.go @@ -0,0 +1,74 @@ +// +build !go1.7 + +package tomb + +import ( + "golang.org/x/net/context" +) + +// WithContext returns a new tomb that is killed when the provided parent +// context is canceled, and a copy of parent with a replaced Done channel +// that is closed when either the tomb is dying or the parent is canceled. +// The returned context may also be obtained via the tomb's Context method. +func WithContext(parent context.Context) (*Tomb, context.Context) { + var t Tomb + t.init() + if parent.Done() != nil { + go func() { + select { + case <-t.Dying(): + case <-parent.Done(): + t.Kill(parent.Err()) + } + }() + } + t.parent = parent + child, cancel := context.WithCancel(parent) + t.addChild(parent, child, cancel) + return &t, child +} + +// Context returns a context that is a copy of the provided parent context with +// a replaced Done channel that is closed when either the tomb is dying or the +// parent is cancelled. +// +// If parent is nil, it defaults to the parent provided via WithContext, or an +// empty background parent if the tomb wasn't created via WithContext. +func (t *Tomb) Context(parent context.Context) context.Context { + t.init() + t.m.Lock() + defer t.m.Unlock() + + if parent == nil { + if t.parent == nil { + t.parent = context.Background() + } + parent = t.parent.(context.Context) + } + + if child, ok := t.child[parent]; ok { + return child.context.(context.Context) + } + + child, cancel := context.WithCancel(parent) + t.addChild(parent, child, cancel) + return child +} + +func (t *Tomb) addChild(parent context.Context, child context.Context, cancel func()) { + if t.reason != ErrStillAlive { + cancel() + return + } + if t.child == nil { + t.child = make(map[interface{}]childContext) + } + t.child[parent] = childContext{child, cancel, child.Done()} + for parent, child := range t.child { + select { + case <-child.done: + delete(t.child, parent) + default: + } + } +} diff --git a/vendor/gopkg.in/tomb.v2/context16_test.go b/vendor/gopkg.in/tomb.v2/context16_test.go new file mode 100644 index 000000000..ad155f37d --- /dev/null +++ b/vendor/gopkg.in/tomb.v2/context16_test.go @@ -0,0 +1,177 @@ +// +build !go1.7 + +package tomb_test + +import ( + "testing" + "time" + + "golang.org/x/net/context" + + "gopkg.in/tomb.v2" +) + +func TestWithContext(t *testing.T) { + parent1, cancel1 := context.WithCancel(context.Background()) + + tb, child1 := tomb.WithContext(parent1) + + if !tb.Alive() { + t.Fatalf("WithContext returned dead tomb") + } + if tb.Context(parent1) != child1 { + t.Fatalf("Context returned different context for same parent") + } + if tb.Context(nil) != child1 { + t.Fatalf("Context returned different context for nil parent") + } + select { + case <-child1.Done(): + t.Fatalf("Tomb's child context was born dead") + default: + } + + parent2, cancel2 := context.WithCancel(context.WithValue(context.Background(), "parent", "parent2")) + child2 := tb.Context(parent2) + + if tb.Context(parent2) != child2 { + t.Fatalf("Context returned different context for same parent") + } + if child2.Value("parent") != "parent2" { + t.Fatalf("Child context didn't inherit its parent's properties") + } + select { + case <-child2.Done(): + t.Fatalf("Tomb's child context was born dead") + default: + } + + cancel2() + + select { + case <-child2.Done(): + case <-time.After(5 * time.Second): + t.Fatalf("Tomb's child context didn't die after parent was canceled") + } + if !tb.Alive() { + t.Fatalf("Canceling unrelated parent context killed tomb") + } + + parent3 := context.WithValue(context.Background(), "parent", "parent3") + child3 := tb.Context(parent3) + + if child3.Value("parent") != "parent3" { + t.Fatalf("Child context didn't inherit its parent's properties") + } + select { + case <-child3.Done(): + t.Fatalf("Tomb's child context was born dead") + default: + } + + cancel1() + + select { + case <-tb.Dying(): + case <-time.After(5 * time.Second): + t.Fatalf("Canceling parent context did not kill tomb") + } + + if tb.Err() != context.Canceled { + t.Fatalf("tomb should be %v, got %v", context.Canceled, tb.Err()) + } + + if tb.Context(parent1) == child1 || tb.Context(parent3) == child3 { + t.Fatalf("Tomb is dead and shouldn't be tracking children anymore") + } + select { + case <-child3.Done(): + case <-time.After(5 * time.Second): + t.Fatalf("Child context didn't die after tomb's death") + } + + parent4 := context.WithValue(context.Background(), "parent", "parent4") + child4 := tb.Context(parent4) + + select { + case <-child4.Done(): + case <-time.After(5 * time.Second): + t.Fatalf("Child context should be born canceled") + } + + childnil := tb.Context(nil) + select { + case <-childnil.Done(): + default: + t.Fatalf("Child context should be born canceled") + } +} + +func TestContextNoParent(t *testing.T) { + var tb tomb.Tomb + + parent2, cancel2 := context.WithCancel(context.WithValue(context.Background(), "parent", "parent2")) + child2 := tb.Context(parent2) + + if tb.Context(parent2) != child2 { + t.Fatalf("Context returned different context for same parent") + } + if child2.Value("parent") != "parent2" { + t.Fatalf("Child context didn't inherit its parent's properties") + } + select { + case <-child2.Done(): + t.Fatalf("Tomb's child context was born dead") + default: + } + + cancel2() + + select { + case <-child2.Done(): + default: + t.Fatalf("Tomb's child context didn't die after parent was canceled") + } + if !tb.Alive() { + t.Fatalf("Canceling unrelated parent context killed tomb") + } + + parent3 := context.WithValue(context.Background(), "parent", "parent3") + child3 := tb.Context(parent3) + + if child3.Value("parent") != "parent3" { + t.Fatalf("Child context didn't inherit its parent's properties") + } + select { + case <-child3.Done(): + t.Fatalf("Tomb's child context was born dead") + default: + } + + tb.Kill(nil) + + if tb.Context(parent3) == child3 { + t.Fatalf("Tomb is dead and shouldn't be tracking children anymore") + } + select { + case <-child3.Done(): + default: + t.Fatalf("Child context didn't die after tomb's death") + } + + parent4 := context.WithValue(context.Background(), "parent", "parent4") + child4 := tb.Context(parent4) + + select { + case <-child4.Done(): + default: + t.Fatalf("Child context should be born canceled") + } + + childnil := tb.Context(nil) + select { + case <-childnil.Done(): + default: + t.Fatalf("Child context should be born canceled") + } +} diff --git a/vendor/gopkg.in/tomb.v2/context_test.go b/vendor/gopkg.in/tomb.v2/context_test.go new file mode 100644 index 000000000..537548386 --- /dev/null +++ b/vendor/gopkg.in/tomb.v2/context_test.go @@ -0,0 +1,176 @@ +// +build go1.7 + +package tomb_test + +import ( + "context" + "testing" + "time" + + "gopkg.in/tomb.v2" +) + +func TestWithContext(t *testing.T) { + parent1, cancel1 := context.WithCancel(context.Background()) + + tb, child1 := tomb.WithContext(parent1) + + if !tb.Alive() { + t.Fatalf("WithContext returned dead tomb") + } + if tb.Context(parent1) != child1 { + t.Fatalf("Context returned different context for same parent") + } + if tb.Context(nil) != child1 { + t.Fatalf("Context returned different context for nil parent") + } + select { + case <-child1.Done(): + t.Fatalf("Tomb's child context was born dead") + default: + } + + parent2, cancel2 := context.WithCancel(context.WithValue(context.Background(), "parent", "parent2")) + child2 := tb.Context(parent2) + + if tb.Context(parent2) != child2 { + t.Fatalf("Context returned different context for same parent") + } + if child2.Value("parent") != "parent2" { + t.Fatalf("Child context didn't inherit its parent's properties") + } + select { + case <-child2.Done(): + t.Fatalf("Tomb's child context was born dead") + default: + } + + cancel2() + + select { + case <-child2.Done(): + default: + t.Fatalf("Tomb's child context didn't die after parent was canceled") + } + if !tb.Alive() { + t.Fatalf("Canceling unrelated parent context killed tomb") + } + + parent3 := context.WithValue(context.Background(), "parent", "parent3") + child3 := tb.Context(parent3) + + if child3.Value("parent") != "parent3" { + t.Fatalf("Child context didn't inherit its parent's properties") + } + select { + case <-child3.Done(): + t.Fatalf("Tomb's child context was born dead") + default: + } + + cancel1() + + select { + case <-tb.Dying(): + case <-time.After(5 * time.Second): + t.Fatalf("Canceling parent context did not kill tomb") + } + + if tb.Err() != context.Canceled { + t.Fatalf("tomb should be %v, got %v", context.Canceled, tb.Err()) + } + + if tb.Context(parent1) == child1 || tb.Context(parent3) == child3 { + t.Fatalf("Tomb is dead and shouldn't be tracking children anymore") + } + select { + case <-child3.Done(): + default: + t.Fatalf("Child context didn't die after tomb's death") + } + + parent4 := context.WithValue(context.Background(), "parent", "parent4") + child4 := tb.Context(parent4) + + select { + case <-child4.Done(): + default: + t.Fatalf("Child context should be born canceled") + } + + childnil := tb.Context(nil) + select { + case <-childnil.Done(): + default: + t.Fatalf("Child context should be born canceled") + } +} + +func TestContextNoParent(t *testing.T) { + var tb tomb.Tomb + + parent2, cancel2 := context.WithCancel(context.WithValue(context.Background(), "parent", "parent2")) + child2 := tb.Context(parent2) + + if tb.Context(parent2) != child2 { + t.Fatalf("Context returned different context for same parent") + } + if child2.Value("parent") != "parent2" { + t.Fatalf("Child context didn't inherit its parent's properties") + } + select { + case <-child2.Done(): + t.Fatalf("Tomb's child context was born dead") + default: + } + + cancel2() + + select { + case <-child2.Done(): + default: + t.Fatalf("Tomb's child context didn't die after parent was canceled") + } + if !tb.Alive() { + t.Fatalf("Canceling unrelated parent context killed tomb") + } + + parent3 := context.WithValue(context.Background(), "parent", "parent3") + child3 := tb.Context(parent3) + + if child3.Value("parent") != "parent3" { + t.Fatalf("Child context didn't inherit its parent's properties") + } + select { + case <-child3.Done(): + t.Fatalf("Tomb's child context was born dead") + default: + } + + tb.Kill(nil) + + if tb.Context(parent3) == child3 { + t.Fatalf("Tomb is dead and shouldn't be tracking children anymore") + } + select { + case <-child3.Done(): + default: + t.Fatalf("Child context didn't die after tomb's death") + } + + parent4 := context.WithValue(context.Background(), "parent", "parent4") + child4 := tb.Context(parent4) + + select { + case <-child4.Done(): + default: + t.Fatalf("Child context should be born canceled") + } + + childnil := tb.Context(nil) + select { + case <-childnil.Done(): + default: + t.Fatalf("Child context should be born canceled") + } +} diff --git a/vendor/gopkg.in/tomb.v2/tomb.go b/vendor/gopkg.in/tomb.v2/tomb.go new file mode 100644 index 000000000..069b3058b --- /dev/null +++ b/vendor/gopkg.in/tomb.v2/tomb.go @@ -0,0 +1,237 @@ +// Copyright (c) 2011 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// The tomb package handles clean goroutine tracking and termination. +// +// The zero value of a Tomb is ready to handle the creation of a tracked +// goroutine via its Go method, and then any tracked goroutine may call +// the Go method again to create additional tracked goroutines at +// any point. +// +// If any of the tracked goroutines returns a non-nil error, or the +// Kill or Killf method is called by any goroutine in the system (tracked +// or not), the tomb Err is set, Alive is set to false, and the Dying +// channel is closed to flag that all tracked goroutines are supposed +// to willingly terminate as soon as possible. +// +// Once all tracked goroutines terminate, the Dead channel is closed, +// and Wait unblocks and returns the first non-nil error presented +// to the tomb via a result or an explicit Kill or Killf method call, +// or nil if there were no errors. +// +// It is okay to create further goroutines via the Go method while +// the tomb is in a dying state. The final dead state is only reached +// once all tracked goroutines terminate, at which point calling +// the Go method again will cause a runtime panic. +// +// Tracked functions and methods that are still running while the tomb +// is in dying state may choose to return ErrDying as their error value. +// This preserves the well established non-nil error convention, but is +// understood by the tomb as a clean termination. The Err and Wait +// methods will still return nil if all observed errors were either +// nil or ErrDying. +// +// For background and a detailed example, see the following blog post: +// +// http://blog.labix.org/2011/10/09/death-of-goroutines-under-control +// +package tomb + +import ( + "errors" + "fmt" + "sync" +) + +// A Tomb tracks the lifecycle of one or more goroutines as alive, +// dying or dead, and the reason for their death. +// +// See the package documentation for details. +type Tomb struct { + m sync.Mutex + alive int + dying chan struct{} + dead chan struct{} + reason error + + // context.Context is available in Go 1.7+. + parent interface{} + child map[interface{}]childContext +} + +type childContext struct { + context interface{} + cancel func() + done <-chan struct{} +} + +var ( + ErrStillAlive = errors.New("tomb: still alive") + ErrDying = errors.New("tomb: dying") +) + +func (t *Tomb) init() { + t.m.Lock() + if t.dead == nil { + t.dead = make(chan struct{}) + t.dying = make(chan struct{}) + t.reason = ErrStillAlive + } + t.m.Unlock() +} + +// Dead returns the channel that can be used to wait until +// all goroutines have finished running. +func (t *Tomb) Dead() <-chan struct{} { + t.init() + return t.dead +} + +// Dying returns the channel that can be used to wait until +// t.Kill is called. +func (t *Tomb) Dying() <-chan struct{} { + t.init() + return t.dying +} + +// Wait blocks until all goroutines have finished running, and +// then returns the reason for their death. +func (t *Tomb) Wait() error { + t.init() + <-t.dead + t.m.Lock() + reason := t.reason + t.m.Unlock() + return reason +} + +// Go runs f in a new goroutine and tracks its termination. +// +// If f returns a non-nil error, t.Kill is called with that +// error as the death reason parameter. +// +// It is f's responsibility to monitor the tomb and return +// appropriately once it is in a dying state. +// +// It is safe for the f function to call the Go method again +// to create additional tracked goroutines. Once all tracked +// goroutines return, the Dead channel is closed and the +// Wait method unblocks and returns the death reason. +// +// Calling the Go method after all tracked goroutines return +// causes a runtime panic. For that reason, calling the Go +// method a second time out of a tracked goroutine is unsafe. +func (t *Tomb) Go(f func() error) { + t.init() + t.m.Lock() + defer t.m.Unlock() + select { + case <-t.dead: + panic("tomb.Go called after all goroutines terminated") + default: + } + t.alive++ + go t.run(f) +} + +func (t *Tomb) run(f func() error) { + err := f() + t.m.Lock() + defer t.m.Unlock() + t.alive-- + if t.alive == 0 || err != nil { + t.kill(err) + if t.alive == 0 { + close(t.dead) + } + } +} + +// Kill puts the tomb in a dying state for the given reason, +// closes the Dying channel, and sets Alive to false. +// +// Althoguh Kill may be called multiple times, only the first +// non-nil error is recorded as the death reason. +// +// If reason is ErrDying, the previous reason isn't replaced +// even if nil. It's a runtime error to call Kill with ErrDying +// if t is not in a dying state. +func (t *Tomb) Kill(reason error) { + t.init() + t.m.Lock() + defer t.m.Unlock() + t.kill(reason) +} + +func (t *Tomb) kill(reason error) { + if reason == ErrStillAlive { + panic("tomb: Kill with ErrStillAlive") + } + if reason == ErrDying { + if t.reason == ErrStillAlive { + panic("tomb: Kill with ErrDying while still alive") + } + return + } + if t.reason == ErrStillAlive { + t.reason = reason + close(t.dying) + for _, child := range t.child { + child.cancel() + } + t.child = nil + return + } + if t.reason == nil { + t.reason = reason + return + } +} + +// Killf calls the Kill method with an error built providing the received +// parameters to fmt.Errorf. The generated error is also returned. +func (t *Tomb) Killf(f string, a ...interface{}) error { + err := fmt.Errorf(f, a...) + t.Kill(err) + return err +} + +// Err returns the death reason, or ErrStillAlive if the tomb +// is not in a dying or dead state. +func (t *Tomb) Err() (reason error) { + t.init() + t.m.Lock() + reason = t.reason + t.m.Unlock() + return +} + +// Alive returns true if the tomb is not in a dying or dead state. +func (t *Tomb) Alive() bool { + return t.Err() == ErrStillAlive +} diff --git a/vendor/gopkg.in/tomb.v2/tomb_test.go b/vendor/gopkg.in/tomb.v2/tomb_test.go new file mode 100644 index 000000000..a1064dffe --- /dev/null +++ b/vendor/gopkg.in/tomb.v2/tomb_test.go @@ -0,0 +1,183 @@ +package tomb_test + +import ( + "errors" + "gopkg.in/tomb.v2" + "reflect" + "testing" +) + +func nothing() error { return nil } + +func TestNewTomb(t *testing.T) { + tb := &tomb.Tomb{} + checkState(t, tb, false, false, tomb.ErrStillAlive) +} + +func TestGo(t *testing.T) { + tb := &tomb.Tomb{} + alive := make(chan bool) + tb.Go(func() error { + alive <- true + tb.Go(func() error { + alive <- true + <-tb.Dying() + return nil + }) + <-tb.Dying() + return nil + }) + <-alive + <-alive + checkState(t, tb, false, false, tomb.ErrStillAlive) + tb.Kill(nil) + tb.Wait() + checkState(t, tb, true, true, nil) +} + +func TestGoErr(t *testing.T) { + first := errors.New("first error") + second := errors.New("first error") + tb := &tomb.Tomb{} + alive := make(chan bool) + tb.Go(func() error { + alive <- true + tb.Go(func() error { + alive <- true + return first + }) + <-tb.Dying() + return second + }) + <-alive + <-alive + tb.Wait() + checkState(t, tb, true, true, first) +} + +func TestGoPanic(t *testing.T) { + // ErrDying being used properly, after a clean death. + tb := &tomb.Tomb{} + tb.Go(nothing) + tb.Wait() + defer func() { + err := recover() + if err != "tomb.Go called after all goroutines terminated" { + t.Fatalf("Wrong panic on post-death tomb.Go call: %v", err) + } + checkState(t, tb, true, true, nil) + }() + tb.Go(nothing) +} + +func TestKill(t *testing.T) { + // a nil reason flags the goroutine as dying + tb := &tomb.Tomb{} + tb.Kill(nil) + checkState(t, tb, true, false, nil) + + // a non-nil reason now will override Kill + err := errors.New("some error") + tb.Kill(err) + checkState(t, tb, true, false, err) + + // another non-nil reason won't replace the first one + tb.Kill(errors.New("ignore me")) + checkState(t, tb, true, false, err) + + tb.Go(nothing) + tb.Wait() + checkState(t, tb, true, true, err) +} + +func TestKillf(t *testing.T) { + tb := &tomb.Tomb{} + + err := tb.Killf("BO%s", "OM") + if s := err.Error(); s != "BOOM" { + t.Fatalf(`Killf("BO%s", "OM"): want "BOOM", got %q`, s) + } + checkState(t, tb, true, false, err) + + // another non-nil reason won't replace the first one + tb.Killf("ignore me") + checkState(t, tb, true, false, err) + + tb.Go(nothing) + tb.Wait() + checkState(t, tb, true, true, err) +} + +func TestErrDying(t *testing.T) { + // ErrDying being used properly, after a clean death. + tb := &tomb.Tomb{} + tb.Kill(nil) + tb.Kill(tomb.ErrDying) + checkState(t, tb, true, false, nil) + + // ErrDying being used properly, after an errorful death. + err := errors.New("some error") + tb.Kill(err) + tb.Kill(tomb.ErrDying) + checkState(t, tb, true, false, err) + + // ErrDying being used badly, with an alive tomb. + tb = &tomb.Tomb{} + defer func() { + err := recover() + if err != "tomb: Kill with ErrDying while still alive" { + t.Fatalf("Wrong panic on Kill(ErrDying): %v", err) + } + checkState(t, tb, false, false, tomb.ErrStillAlive) + }() + tb.Kill(tomb.ErrDying) +} + +func TestKillErrStillAlivePanic(t *testing.T) { + tb := &tomb.Tomb{} + defer func() { + err := recover() + if err != "tomb: Kill with ErrStillAlive" { + t.Fatalf("Wrong panic on Kill(ErrStillAlive): %v", err) + } + checkState(t, tb, false, false, tomb.ErrStillAlive) + }() + tb.Kill(tomb.ErrStillAlive) +} + +func checkState(t *testing.T, tb *tomb.Tomb, wantDying, wantDead bool, wantErr error) { + select { + case <-tb.Dying(): + if !wantDying { + t.Error("<-Dying: should block") + } + default: + if wantDying { + t.Error("<-Dying: should not block") + } + } + seemsDead := false + select { + case <-tb.Dead(): + if !wantDead { + t.Error("<-Dead: should block") + } + seemsDead = true + default: + if wantDead { + t.Error("<-Dead: should not block") + } + } + if err := tb.Err(); err != wantErr { + t.Errorf("Err: want %#v, got %#v", wantErr, err) + } + if wantDead && seemsDead { + waitErr := tb.Wait() + switch { + case waitErr == tomb.ErrStillAlive: + t.Errorf("Wait should not return ErrStillAlive") + case !reflect.DeepEqual(waitErr, wantErr): + t.Errorf("Wait: want %#v, got %#v", wantErr, waitErr) + } + } +}