diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a19767849..71cfe1691 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -25,7 +25,7 @@ jobs: uses: actions/checkout@v4 - name: Log in to the Container registry - uses: docker/login-action@3d58c274f17dffee475a5520cbe67f0a882c4dbb + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} @@ -33,7 +33,7 @@ jobs: - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} tags: | @@ -45,7 +45,7 @@ jobs: uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 + uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 - name: Ensure consistent binaries run: | @@ -55,7 +55,7 @@ jobs: if: github.ref != 'refs/heads/master' - name: Build and push Docker image - uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 + uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 with: push: true context: . diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 091d42e8a..b4b34e62f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -74,7 +74,7 @@ jobs: - name: Get programs (Linux/macOS) run: | echo "build Go tools" - go install github.com/restic/rest-server/cmd/rest-server@latest + go install github.com/restic/rest-server/cmd/rest-server@master echo "install minio server" mkdir $HOME/bin @@ -106,7 +106,7 @@ jobs: $ProgressPreference = 'SilentlyContinue' echo "build Go tools" - go install github.com/restic/rest-server/... + go install github.com/restic/rest-server/cmd/rest-server@master echo "install minio server" mkdir $Env:USERPROFILE/bin @@ -247,6 +247,10 @@ jobs: lint: name: lint runs-on: ubuntu-latest + permissions: + contents: read + # allow annotating code in the PR + checks: write steps: - name: Set up Go ${{ env.latest_go }} uses: actions/setup-go@v5 @@ -257,10 +261,10 @@ jobs: uses: actions/checkout@v4 - name: golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.56.1 + version: v1.57.1 args: --verbose --timeout 5m # only run golangci-lint for pull requests, otherwise ALL hints get @@ -298,7 +302,7 @@ jobs: - name: Docker meta id: meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: # list of Docker images to use as base name for tags images: | @@ -321,7 +325,7 @@ jobs: - name: Build and push id: docker_build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: push: false context: . diff --git a/.golangci.yml b/.golangci.yml index 7dc6a8e7f..e632965bb 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -38,6 +38,8 @@ linters: # ensure that http response bodies are closed - bodyclose + - importas + issues: # don't use the default exclude rules, this hides (among others) ignored # errors from Close() calls @@ -58,4 +60,10 @@ issues: exclude-rules: # revive: ignore unused parameters in tests - path: (_test\.go|testing\.go|backend/.*/tests\.go) - text: "unused-parameter:" \ No newline at end of file + text: "unused-parameter:" + +linters-settings: + importas: + alias: + - pkg: github.com/restic/restic/internal/test + alias: rtest diff --git a/CHANGELOG.md b/CHANGELOG.md index b8969a443..5fea763e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3488,7 +3488,7 @@ restic users. The changes are ordered by importance. NOTE: This new implementation does not guarantee order in which blobs are written to the target files and, for example, the last blob of a file can be - written to the file before any of the preceeding file blobs. It is therefore + written to the file before any of the preceding file blobs. It is therefore possible to have gaps in the data written to the target files if restore fails or interrupted by the user. diff --git a/README.md b/README.md index ad6b13cef..ef12f3e1b 100644 --- a/README.md +++ b/README.md @@ -10,8 +10,7 @@ For detailed usage and installation instructions check out the [documentation](h You can ask questions in our [Discourse forum](https://forum.restic.net). -Quick start ------------ +## Quick start Once you've [installed](https://restic.readthedocs.io/en/latest/020_installation.html) restic, start off with creating a repository for your backups: @@ -59,7 +58,7 @@ Therefore, restic supports the following backends for storing backups natively: Restic is a program that does backups right and was designed with the following principles in mind: -- **Easy:** Doing backups should be a frictionless process, otherwise +- **Easy**: Doing backups should be a frictionless process, otherwise you might be tempted to skip it. Restic should be easy to configure and use, so that, in the event of a data loss, you can just restore it. Likewise, restoring data should not be complicated. @@ -92,20 +91,17 @@ reproduce a byte identical version from the source code for that release. Instructions on how to do that are contained in the [builder repository](https://github.com/restic/builder). -News ----- +## News -You can follow the restic project on Mastodon [@resticbackup](https://fosstodon.org/@restic) or by subscribing to +You can follow the restic project on Mastodon [@resticbackup](https://fosstodon.org/@restic) or subscribe to the [project blog](https://restic.net/blog/). -License -------- +## License Restic is licensed under [BSD 2-Clause License](https://opensource.org/licenses/BSD-2-Clause). You can find the -complete text in [``LICENSE``](LICENSE). +complete text in [`LICENSE`](LICENSE). -Sponsorship ------------ +## Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! diff --git a/changelog/0.10.0_2020-09-19/pull-2195 b/changelog/0.10.0_2020-09-19/pull-2195 index a139aa4e1..7898568fa 100644 --- a/changelog/0.10.0_2020-09-19/pull-2195 +++ b/changelog/0.10.0_2020-09-19/pull-2195 @@ -10,7 +10,7 @@ https://github.com/restic/restic/issues/2244 NOTE: This new implementation does not guarantee order in which blobs are written to the target files and, for example, the last blob of a -file can be written to the file before any of the preceeding file blobs. +file can be written to the file before any of the preceding file blobs. It is therefore possible to have gaps in the data written to the target files if restore fails or interrupted by the user. diff --git a/changelog/unreleased/issue-3600 b/changelog/unreleased/issue-3600 new file mode 100644 index 000000000..0da66d382 --- /dev/null +++ b/changelog/unreleased/issue-3600 @@ -0,0 +1,11 @@ +Bugfix: `backup` works if xattrs above the backup target cannot be read + +When backup targets are specified using absolute paths, then `backup` also +includes information about the parent folders of the backup targets in the +snapshot. If the extended attributes for some of these folders could not be +read due to missing permissions, this caused the backup to fail. This has been +fixed. + +https://github.com/restic/restic/issues/3600 +https://github.com/restic/restic/pull/4668 +https://forum.restic.net/t/parent-directories-above-the-snapshot-source-path-fatal-error-permission-denied/7216 diff --git a/changelog/unreleased/issue-4287 b/changelog/unreleased/issue-4287 new file mode 100644 index 000000000..df4fc5590 --- /dev/null +++ b/changelog/unreleased/issue-4287 @@ -0,0 +1,14 @@ +Enhancement: support connection to rest-server using unix socket + +Restic now supports connecting to rest-server using a unix socket for +rest-server version 0.13.0 or later. + +This allows running restic as follows: + +``` +rest-server --listen unix:/tmp/rest.socket --data /path/to/data & +restic -r rest:http+unix:///tmp/rest.socket:/my_backup_repo/ [...] +``` + +https://github.com/restic/restic/issues/4287 +https://github.com/restic/restic/pull/4655 diff --git a/changelog/unreleased/issue-4601 b/changelog/unreleased/issue-4601 new file mode 100644 index 000000000..f99dbe187 --- /dev/null +++ b/changelog/unreleased/issue-4601 @@ -0,0 +1,9 @@ +Enhancement: Add support for feature flags + +Restic now supports feature flags that can be used to enable and disable +experimental features. The flags can be set using the environment variable +`RESTIC_FEATURES`. To get a list of currently supported feature flags, +run the `features` command. + +https://github.com/restic/restic/issues/4601 +https://github.com/restic/restic/pull/4666 diff --git a/changelog/unreleased/issue-4602 b/changelog/unreleased/issue-4602 new file mode 100644 index 000000000..7532bcb1e --- /dev/null +++ b/changelog/unreleased/issue-4602 @@ -0,0 +1,23 @@ +Change: Deprecate legacy index format and s3legacy layout + +Support for the legacy index format used by restic before version 0.2.0 has +been deprecated and will be removed in the next minor restic version. You can +use `restic repair index` to update the index to the current format. + +It is possible to temporarily reenable support for the legacy index format by +setting the environment variable +`RESTIC_FEATURES=deprecate-legacy-index=false`. Note that this feature flag +will be removed in the next minor restic version. + +Support for the s3legacy layout used for the S3 backend before restic 0.7.0 +has been deprecated and will be removed in the next minor restic version. You +can migrate your S3 repository using `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout`. + +It is possible to temporarily reenable support for the legacy s3layout by +setting the environment variable +`RESTIC_FEATURES=deprecate-s3-legacy-layout=false`. Note that this feature flag +will be removed in the next minor restic version. + +https://github.com/restic/restic/issues/4602 +https://github.com/restic/restic/pull/4724 +https://github.com/restic/restic/pull/4743 diff --git a/changelog/unreleased/issue-4733 b/changelog/unreleased/issue-4733 new file mode 100644 index 000000000..1fc271587 --- /dev/null +++ b/changelog/unreleased/issue-4733 @@ -0,0 +1,10 @@ +Enhancement: Allow specifying `--host` via environment variable + +Restic commands that operate on snapshots, such as `restic backup` and +`restic snapshots`, support the `--host` flag to specify the hostname for +grouoping snapshots. They now permit selecting the hostname via the +environment variable `RESTIC_HOST`. `--host` still takes precedence over the +environment variable. + +https://github.com/restic/restic/issues/4733 +https://github.com/restic/restic/pull/4734 diff --git a/changelog/unreleased/issue-4744 b/changelog/unreleased/issue-4744 new file mode 100644 index 000000000..b0ede1c5c --- /dev/null +++ b/changelog/unreleased/issue-4744 @@ -0,0 +1,9 @@ +Change: Include full key ID in JSON output of `key list` + +We have changed the JSON output of the `key list` command to include the full +key ID instead of just a shortened version, as the latter can be ambiguous +in some rare cases. To derive the short ID, please truncate the full ID down to +eight characters. + +https://github.com/restic/restic/issues/4744 +https://github.com/restic/restic/pull/4745 diff --git a/changelog/unreleased/issue-4760 b/changelog/unreleased/issue-4760 new file mode 100644 index 000000000..bb2d9c5b4 --- /dev/null +++ b/changelog/unreleased/issue-4760 @@ -0,0 +1,8 @@ +Bugfix: Fix possible error on concurrent cache cleanup + +If multiple restic processes concurrently cleaned up no longer existing files +from the cache, this could cause some of the processes to fail with an `no such +file or directory` error. This has been fixed. + +https://github.com/restic/restic/issues/4760 +https://github.com/restic/restic/pull/4761 diff --git a/changelog/unreleased/issue-693 b/changelog/unreleased/issue-693 new file mode 100644 index 000000000..054ae42ed --- /dev/null +++ b/changelog/unreleased/issue-693 @@ -0,0 +1,12 @@ +Enhancement: Support printing snapshot size in `snapshots` command + +The `snapshots` command now supports printing the snapshot size for snapshots +created using this or a future restic version. For this, the `backup` command +now stores the backup summary statistics in the snapshot. + +The text output of the `snapshots` command only shows the snapshot size. The +other statistics are only included in the JSON output. To inspect these +statistics use `restic snapshots --json` or `restic cat snapshot `. + +https://github.com/restic/restic/issues/693 +https://github.com/restic/restic/pull/4705 diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 new file mode 100644 index 000000000..fbdcfd7e5 --- /dev/null +++ b/changelog/unreleased/pull-3067 @@ -0,0 +1,22 @@ +Enhancement: Add options to configure Windows Shadow Copy Service + +Restic always used 120 seconds timeout and unconditionally created VSS snapshots +for all volume mount points on disk. Now this behavior can be fine-tuned by +new options, like exclude specific volumes and mount points or completely +disable auto snapshotting of volume mount points. + +For example: + + restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.exclude-all-mount-points=true + +changes timeout to five minutes and disable snapshotting of mount points on all volumes, and + + restic backup --use-fs-snapshot -o vss.exclude-volumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" + +excludes drive `d:`, mount point `c:\mnt` and specific volume from VSS snapshotting. + + restic backup --use-fs-snapshot -o vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5} + +uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider. + +https://github.com/restic/restic/pull/3067 diff --git a/changelog/unreleased/pull-4006 b/changelog/unreleased/pull-4006 new file mode 100644 index 000000000..01f4ddb6e --- /dev/null +++ b/changelog/unreleased/pull-4006 @@ -0,0 +1,16 @@ +Enhancement: (alpha) Store deviceID only for hardlinks + +Set `RESTIC_FEATURES=device-id-for-hardlinks` to enable this alpha feature. +The feature flag will be removed after repository format version 3 becomes +available or be replaced with a different solution. + +When creating backups from a filesystem snapshot, for example created using +btrfs subvolumes, the deviceID of the filesystem changes compared to previous +snapshots. This prevented restic from deduplicating the directory metadata of +a snapshot. + +When this alpha feature is enabled, then the deviceID is only stored for +hardlinks. This significantly reduces the metadata duplication for most +backups. + +https://github.com/restic/restic/pull/4006 diff --git a/changelog/unreleased/pull-4503 b/changelog/unreleased/pull-4503 index 3ce5c48e8..b52552d69 100644 --- a/changelog/unreleased/pull-4503 +++ b/changelog/unreleased/pull-4503 @@ -4,4 +4,5 @@ If files on different devices had the same inode id, then the `stats` command did not correctly calculate the snapshot size. This has been fixed. https://github.com/restic/restic/pull/4503 +https://github.com/restic/restic/pull/4006 https://forum.restic.net/t/possible-bug-in-stats/6461/8 diff --git a/changelog/unreleased/pull-4709 b/changelog/unreleased/pull-4709 new file mode 100644 index 000000000..5ffb2a6a6 --- /dev/null +++ b/changelog/unreleased/pull-4709 @@ -0,0 +1,10 @@ +Bugfix: Correct `--no-lock` handling of `ls` and `tag` command + +The `ls` command never locked the repository. This has been fixed. The old +behavior is still supported using `ls --no-lock`. The latter invocation also +works with older restic versions. + +The `tag` command erroneously accepted the `--no-lock` command. The command +now always requires an exclusive lock. + +https://github.com/restic/restic/pull/4709 diff --git a/changelog/unreleased/pull-4737 b/changelog/unreleased/pull-4737 new file mode 100644 index 000000000..2637c8f83 --- /dev/null +++ b/changelog/unreleased/pull-4737 @@ -0,0 +1,5 @@ +Enhancement: include snapshot id in reason field of forget JSON output + +The JSON output of the `forget` command now includes the `id` and `short_id` of a snapshot in the `reason` field. + +https://github.com/restic/restic/pull/4737 diff --git a/cmd/restic/cleanup.go b/cmd/restic/cleanup.go index 5a6cf79e1..90ea93b92 100644 --- a/cmd/restic/cleanup.go +++ b/cmd/restic/cleanup.go @@ -1,89 +1,41 @@ package main import ( + "context" "os" "os/signal" - "sync" "syscall" "github.com/restic/restic/internal/debug" ) -var cleanupHandlers struct { - sync.Mutex - list []func(code int) (int, error) - done bool - ch chan os.Signal +func createGlobalContext() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + + ch := make(chan os.Signal, 1) + go cleanupHandler(ch, cancel) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) + + return ctx } -func init() { - cleanupHandlers.ch = make(chan os.Signal, 1) - go CleanupHandler(cleanupHandlers.ch) - signal.Notify(cleanupHandlers.ch, syscall.SIGINT, syscall.SIGTERM) -} +// cleanupHandler handles the SIGINT and SIGTERM signals. +func cleanupHandler(c <-chan os.Signal, cancel context.CancelFunc) { + s := <-c + debug.Log("signal %v received, cleaning up", s) + Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s) -// AddCleanupHandler adds the function f to the list of cleanup handlers so -// that it is executed when all the cleanup handlers are run, e.g. when SIGINT -// is received. -func AddCleanupHandler(f func(code int) (int, error)) { - cleanupHandlers.Lock() - defer cleanupHandlers.Unlock() - - // reset the done flag for integration tests - cleanupHandlers.done = false - - cleanupHandlers.list = append(cleanupHandlers.list, f) -} - -// RunCleanupHandlers runs all registered cleanup handlers -func RunCleanupHandlers(code int) int { - cleanupHandlers.Lock() - defer cleanupHandlers.Unlock() - - if cleanupHandlers.done { - return code + if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" { + _, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n") + _, _ = os.Stderr.WriteString(debug.DumpStacktrace()) + _, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n") } - cleanupHandlers.done = true - for _, f := range cleanupHandlers.list { - var err error - code, err = f(code) - if err != nil { - Warnf("error in cleanup handler: %v\n", err) - } - } - cleanupHandlers.list = nil - return code + cancel() } -// CleanupHandler handles the SIGINT and SIGTERM signals. -func CleanupHandler(c <-chan os.Signal) { - for s := range c { - debug.Log("signal %v received, cleaning up", s) - Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s) - - if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" { - _, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n") - _, _ = os.Stderr.WriteString(debug.DumpStacktrace()) - _, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n") - } - - code := 0 - - if s == syscall.SIGINT || s == syscall.SIGTERM { - code = 130 - } else { - code = 1 - } - - Exit(code) - } -} - -// Exit runs the cleanup handlers and then terminates the process with the -// given exit code. +// Exit terminates the process with the given exit code. func Exit(code int) { - code = RunCleanupHandlers(code) debug.Log("exiting with status code %d", code) os.Exit(code) } diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 318d17796..19b96e9b0 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -114,7 +114,7 @@ func init() { f.BoolVar(&backupOptions.StdinCommand, "stdin-from-command", false, "interpret arguments as command to execute and store its stdout") f.Var(&backupOptions.Tags, "tag", "add `tags` for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times)") f.UintVar(&backupOptions.ReadConcurrency, "read-concurrency", 0, "read `n` files concurrently (default: $RESTIC_READ_CONCURRENCY or 2)") - f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag") + f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually (default: $RESTIC_HOST). To prevent an expensive rescan use the \"parent\" flag") f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually") err := f.MarkDeprecated("hostname", "use --host") if err != nil { @@ -137,6 +137,11 @@ func init() { // parse read concurrency from env, on error the default value will be used readConcurrency, _ := strconv.ParseUint(os.Getenv("RESTIC_READ_CONCURRENCY"), 10, 32) backupOptions.ReadConcurrency = uint(readConcurrency) + + // parse host from env, if not exists or empty the default value will be used + if host := os.Getenv("RESTIC_HOST"); host != "" { + backupOptions.Host = host + } } // filterExisting returns a slice of all existing items, or an error if no @@ -440,7 +445,16 @@ func findParentSnapshot(ctx context.Context, repo restic.ListerLoaderUnpacked, o } func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { - err := opts.Check(gopts, args) + var vsscfg fs.VSSConfig + var err error + + if runtime.GOOS == "windows" { + if vsscfg, err = fs.ParseVSSConfig(gopts.extended); err != nil { + return err + } + } + + err = opts.Check(gopts, args) if err != nil { return err } @@ -451,6 +465,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } timeStamp := time.Now() + backupStart := timeStamp if opts.TimeStamp != "" { timeStamp, err = time.ParseInLocation(TimeFormat, opts.TimeStamp, time.Local) if err != nil { @@ -462,10 +477,11 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter Verbosef("open repository\n") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, opts.DryRun) if err != nil { return err } + defer unlock() var progressPrinter backup.ProgressPrinter if gopts.JSON { @@ -477,22 +493,6 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter calculateProgressInterval(!gopts.Quiet, gopts.JSON)) defer progressReporter.Done() - if opts.DryRun { - repo.SetDryRun() - } - - if !gopts.JSON { - progressPrinter.V("lock repository") - } - if !opts.DryRun { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } - // rejectByNameFuncs collect functions that can reject items from the backup based on path only rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo) if err != nil { @@ -556,8 +556,8 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter return err } - errorHandler := func(item string, err error) error { - return progressReporter.Error(item, err) + errorHandler := func(item string, err error) { + _ = progressReporter.Error(item, err) } messageHandler := func(msg string, args ...interface{}) { @@ -566,7 +566,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } } - localVss := fs.NewLocalVss(errorHandler, messageHandler) + localVss := fs.NewLocalVss(errorHandler, messageHandler, vsscfg) defer localVss.DeleteSnapshots() targetFS = localVss } @@ -640,6 +640,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter snapshotOpts := archiver.SnapshotOptions{ Excludes: opts.Excludes, Tags: opts.Tags.Flatten(), + BackupStart: backupStart, Time: timeStamp, Hostname: opts.Host, ParentSnapshot: parentSnapshot, @@ -649,7 +650,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter if !gopts.JSON { progressPrinter.V("start backup on %v", targets) } - _, id, err := arch.Snapshot(ctx, targets, snapshotOpts) + _, id, summary, err := arch.Snapshot(ctx, targets, snapshotOpts) // cleanly shutdown all running goroutines cancel() @@ -663,7 +664,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } // Report finished execution - progressReporter.Finish(id, opts.DryRun) + progressReporter.Finish(id, summary, opts.DryRun) if !gopts.JSON && !opts.DryRun { progressPrinter.P("snapshot %s saved\n", id.Str()) } diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index c60e9c543..75de1341c 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -9,7 +9,6 @@ import ( "runtime" "testing" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -250,29 +249,18 @@ func TestBackupTreeLoadError(t *testing.T) { opts := BackupOptions{} // Backup a subdirectory first, such that we can remove the tree pack for the subdirectory testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts) - - r, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - rtest.OK(t, r.LoadIndex(context.TODO(), nil)) - treePacks := restic.NewIDSet() - r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { - if pb.Type == restic.TreeBlob { - treePacks.Insert(pb.PackID) - } - }) + treePacks := listTreePacks(env.gopts, t) testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) testRunCheck(t, env.gopts) // delete the subdirectory pack first - for id := range treePacks { - rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})) - } + removePacks(env.gopts, t, treePacks) testRunRebuildIndex(t, env.gopts) // now the repo is missing the tree blob in the index; check should report this testRunCheckMustFail(t, env.gopts) // second backup should report an error but "heal" this situation - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory") testRunCheck(t, env.gopts) @@ -406,6 +394,7 @@ func TestIncrementalBackup(t *testing.T) { t.Logf("repository grown by %d bytes", stat3.size-stat2.size) } +// nolint: staticcheck // false positive nil pointer dereference check func TestBackupTags(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -441,6 +430,7 @@ func TestBackupTags(t *testing.T) { "expected parent to be %v, got %v", parent.ID, newest.Parent) } +// nolint: staticcheck // false positive nil pointer dereference check func TestBackupProgramVersion(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index 92f58b2e7..ccec9b5d9 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -64,19 +64,11 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error { return err } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() tpe := args[0] diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 990702b61..c44edae7e 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -199,25 +199,16 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } cleanup := prepareCheckCache(opts, &gopts) - AddCleanupHandler(func(code int) (int, error) { - cleanup() - return code, nil - }) - - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } + defer cleanup() if !gopts.NoLock { Verbosef("create exclusive lock for repository\n") - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } } + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, gopts.NoLock) + if err != nil { + return err + } + defer unlock() chkr := checker.New(repo, opts.CheckUnused) err = chkr.LoadSnapshots(ctx) @@ -228,15 +219,23 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args Verbosef("load indexes\n") bar := newIndexProgress(gopts.Quiet, gopts.JSON) hints, errs := chkr.LoadIndex(ctx, bar) + if ctx.Err() != nil { + return ctx.Err() + } errorsFound := false suggestIndexRebuild := false + suggestLegacyIndexRebuild := false mixedFound := false for _, hint := range hints { switch hint.(type) { - case *checker.ErrDuplicatePacks, *checker.ErrOldIndexFormat: + case *checker.ErrDuplicatePacks: Printf("%v\n", hint) suggestIndexRebuild = true + case *checker.ErrOldIndexFormat: + Warnf("error: %v\n", hint) + suggestLegacyIndexRebuild = true + errorsFound = true case *checker.ErrMixedPack: Printf("%v\n", hint) mixedFound = true @@ -247,7 +246,10 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } if suggestIndexRebuild { - Printf("Duplicate packs/old indexes are non-critical, you can run `restic repair index' to correct this.\n") + Printf("Duplicate packs are non-critical, you can run `restic repair index' to correct this.\n") + } + if suggestLegacyIndexRebuild { + Warnf("Found indexes using the legacy format, you must run `restic repair index' to correct this.\n") } if mixedFound { Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n") @@ -281,6 +283,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args if orphanedPacks > 0 { Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) } + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("check snapshots, trees and blobs\n") errChan = make(chan error) @@ -314,9 +319,16 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args // Must happen after `errChan` is read from in the above loop to avoid // deadlocking in the case of errors. wg.Wait() + if ctx.Err() != nil { + return ctx.Err() + } if opts.CheckUnused { - for _, id := range chkr.UnusedBlobs(ctx) { + unused, err := chkr.UnusedBlobs(ctx) + if err != nil { + return err + } + for _, id := range unused { Verbosef("unused blob %v\n", id) errorsFound = true } @@ -393,10 +405,13 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args doReadData(packs) } + if ctx.Err() != nil { + return ctx.Err() + } + if errorsFound { return errors.Fatal("repository contains errors") } - Verbosef("no errors were found\n") return nil diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 92922b42b..ad6c58a25 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -53,7 +53,7 @@ func init() { } func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []string) error { - secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "destination") + secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "destination") if err != nil { return err } @@ -62,30 +62,17 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] gopts, secondaryGopts = secondaryGopts, gopts } - srcRepo, err := OpenRepository(ctx, gopts) + ctx, srcRepo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() - dstRepo, err := OpenRepository(ctx, secondaryGopts) - if err != nil { - return err - } - - if !gopts.NoLock { - var srcLock *restic.Lock - srcLock, ctx, err = lockRepo(ctx, srcRepo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(srcLock) - if err != nil { - return err - } - } - - dstLock, ctx, err := lockRepo(ctx, dstRepo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(dstLock) + ctx, dstRepo, unlock, err := openWithAppendLock(ctx, secondaryGopts, false) if err != nil { return err } + defer unlock() srcSnapshotLister, err := restic.MemorizeList(ctx, srcRepo, restic.SnapshotFile) if err != nil { @@ -116,6 +103,9 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] // also consider identical snapshot copies dstSnapshotByOriginal[*sn.ID()] = append(dstSnapshotByOriginal[*sn.ID()], sn) } + if ctx.Err() != nil { + return ctx.Err() + } // remember already processed trees across all snapshots visitedTrees := restic.NewIDSet() @@ -160,7 +150,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] } Verbosef("snapshot %s saved\n", newID.Str()) } - return nil + return ctx.Err() } func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool { diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index a87e7a0c5..3abb9d7eb 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -153,19 +153,11 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error return errors.Fatal("type not specified") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() tpe := args[0] @@ -442,10 +434,15 @@ func storePlainBlob(id restic.ID, prefix string, plain []byte) error { } func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamineOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + if opts.ExtractPack && gopts.NoLock { + return fmt.Errorf("--extract-pack and --no-lock are mutually exclusive") + } + + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() ids := make([]restic.ID, 0) for _, name := range args { @@ -464,15 +461,6 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamine return errors.Fatal("no pack files to examine") } - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } - bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) if err != nil { diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 3bd29fa67..b156191dc 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -344,19 +344,11 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args [] return errors.Fatalf("specify two snapshot IDs") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() // cache snapshots listing be, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 9178f2abe..39e915b40 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -131,19 +131,11 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args [] splittedPath := splitPath(path.Clean(pathToPrint)) - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() sn, subfolder, err := (&restic.SnapshotFilter{ Hosts: opts.Hosts, diff --git a/cmd/restic/cmd_features.go b/cmd/restic/cmd_features.go new file mode 100644 index 000000000..8125d3e26 --- /dev/null +++ b/cmd/restic/cmd_features.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" + "github.com/restic/restic/internal/ui/table" + + "github.com/spf13/cobra" +) + +var featuresCmd = &cobra.Command{ + Use: "features", + Short: "Print list of feature flags", + Long: ` +The "features" command prints a list of supported feature flags. + +To pass feature flags to restic, set the RESTIC_FEATURES environment variable +to "featureA=true,featureB=false". Specifying an unknown feature flag is an error. + +A feature can either be in alpha, beta, stable or deprecated state. +An _alpha_ feature is disabled by default and may change in arbitrary ways between restic versions or be removed. +A _beta_ feature is enabled by default, but still can change in minor ways or be removed. +A _stable_ feature is always enabled and cannot be disabled. The flag will be removed in a future restic version. +A _deprecated_ feature is always disabled and cannot be enabled. The flag will be removed in a future restic version. + +EXIT STATUS +=========== + +Exit status is 0 if the command was successful, and non-zero if there was any error. +`, + Hidden: true, + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.Fatal("the feature command expects no arguments") + } + + fmt.Printf("All Feature Flags:\n") + flags := feature.Flag.List() + + tab := table.New() + tab.AddColumn("Name", "{{ .Name }}") + tab.AddColumn("Type", "{{ .Type }}") + tab.AddColumn("Default", "{{ .Default }}") + tab.AddColumn("Description", "{{ .Description }}") + + for _, flag := range flags { + tab.AddRow(flag) + } + return tab.Write(globalOptions.stdout) + }, +} + +func init() { + cmdRoot.AddCommand(featuresCmd) +} diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 7ea7c425a..81df0ab98 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -439,7 +439,10 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error { if err != errAllPacksFound { // try to resolve unknown pack ids from the index - packIDs = f.indexPacksToBlobs(ctx, packIDs) + packIDs, err = f.indexPacksToBlobs(ctx, packIDs) + if err != nil { + return err + } } if len(packIDs) > 0 { @@ -456,13 +459,13 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error { return nil } -func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) map[string]struct{} { +func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) (map[string]struct{}, error) { wctx, cancel := context.WithCancel(ctx) defer cancel() // remember which packs were found in the index indexPackIDs := make(map[string]struct{}) - f.repo.Index().Each(wctx, func(pb restic.PackedBlob) { + err := f.repo.Index().Each(wctx, func(pb restic.PackedBlob) { idStr := pb.PackID.String() // keep entry in packIDs as Each() returns individual index entries matchingID := false @@ -481,6 +484,9 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc indexPackIDs[idStr] = struct{}{} } }) + if err != nil { + return nil, err + } for id := range indexPackIDs { delete(packIDs, id) @@ -493,7 +499,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc } Warnf("some pack files are missing from the repository, getting their blobs from the repository index: %v\n\n", list) } - return packIDs + return packIDs, nil } func (f *Finder) findObjectPack(id string, t restic.BlobType) { @@ -563,19 +569,11 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args [] return errors.Fatal("cannot have several ID types") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { @@ -616,6 +614,9 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args [] for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) { filteredSnapshots = append(filteredSnapshots, sn) } + if ctx.Err() != nil { + return ctx.Err() + } sort.Slice(filteredSnapshots, func(i, j int) bool { return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 65ff449a3..92eeed4a1 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -8,6 +8,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" ) @@ -33,7 +34,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, args) + term, cancel := setupTermstatus() + defer cancel() + return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, term, args) }, } @@ -152,7 +155,7 @@ func verifyForgetOptions(opts *ForgetOptions) error { return nil } -func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, args []string) error { +func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { err := verifyForgetOptions(&opts) if err != nil { return err @@ -163,23 +166,21 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption return err } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - if gopts.NoLock && !opts.DryRun { return errors.Fatal("--no-lock is only applicable in combination with --dry-run for forget command") } - if !opts.DryRun || !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun && gopts.NoLock) + if err != nil { + return err } + defer unlock() + + verbosity := gopts.verbosity + if gopts.JSON { + verbosity = 0 + } + printer := newTerminalProgressPrinter(verbosity, term) var snapshots restic.Snapshots removeSnIDs := restic.NewIDSet() @@ -187,6 +188,9 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { snapshots = append(snapshots, sn) } + if ctx.Err() != nil { + return ctx.Err() + } var jsonGroups []*ForgetGroup @@ -218,15 +222,11 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } if policy.Empty() && len(args) == 0 { - if !gopts.JSON { - Verbosef("no policy was specified, no snapshots will be removed\n") - } + printer.P("no policy was specified, no snapshots will be removed\n") } if !policy.Empty() { - if !gopts.JSON { - Verbosef("Applying Policy: %v\n", policy) - } + printer.P("Applying Policy: %v\n", policy) for k, snapshotGroup := range snapshotGroups { if gopts.Verbose >= 1 && !gopts.JSON { @@ -249,20 +249,20 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy) if len(keep) != 0 && !gopts.Quiet && !gopts.JSON { - Printf("keep %d snapshots:\n", len(keep)) + printer.P("keep %d snapshots:\n", len(keep)) PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact) - Printf("\n") + printer.P("\n") } - addJSONSnapshots(&fg.Keep, keep) + fg.Keep = asJSONSnapshots(keep) if len(remove) != 0 && !gopts.Quiet && !gopts.JSON { - Printf("remove %d snapshots:\n", len(remove)) + printer.P("remove %d snapshots:\n", len(remove)) PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact) - Printf("\n") + printer.P("\n") } - addJSONSnapshots(&fg.Remove, remove) + fg.Remove = asJSONSnapshots(remove) - fg.Reasons = reasons + fg.Reasons = asJSONKeeps(reasons) jsonGroups = append(jsonGroups, &fg) @@ -273,16 +273,27 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } } + if ctx.Err() != nil { + return ctx.Err() + } + if len(removeSnIDs) > 0 { if !opts.DryRun { - err := DeleteFilesChecked(ctx, gopts, repo, removeSnIDs, restic.SnapshotFile) + bar := printer.NewCounter("files deleted") + err := restic.ParallelRemove(ctx, repo, removeSnIDs, restic.SnapshotFile, func(id restic.ID, err error) error { + if err != nil { + printer.E("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id) + } else { + printer.VV("removed %v/%v\n", restic.SnapshotFile, id) + } + return nil + }, bar) + bar.Done() if err != nil { return err } } else { - if !gopts.JSON { - Printf("Would have removed the following snapshots:\n%v\n\n", removeSnIDs) - } + printer.P("Would have removed the following snapshots:\n%v\n\n", removeSnIDs) } } @@ -294,15 +305,13 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } if len(removeSnIDs) > 0 && opts.Prune { - if !gopts.JSON { - if opts.DryRun { - Verbosef("%d snapshots would be removed, running prune dry run\n", len(removeSnIDs)) - } else { - Verbosef("%d snapshots have been removed, running prune\n", len(removeSnIDs)) - } + if opts.DryRun { + printer.P("%d snapshots would be removed, running prune dry run\n", len(removeSnIDs)) + } else { + printer.P("%d snapshots have been removed, running prune\n", len(removeSnIDs)) } pruneOptions.DryRun = opts.DryRun - return runPruneWithRepo(ctx, pruneOptions, gopts, repo, removeSnIDs) + return runPruneWithRepo(ctx, pruneOptions, gopts, repo, removeSnIDs, term) } return nil @@ -310,23 +319,47 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption // ForgetGroup helps to print what is forgotten in JSON. type ForgetGroup struct { - Tags []string `json:"tags"` - Host string `json:"host"` - Paths []string `json:"paths"` - Keep []Snapshot `json:"keep"` - Remove []Snapshot `json:"remove"` - Reasons []restic.KeepReason `json:"reasons"` + Tags []string `json:"tags"` + Host string `json:"host"` + Paths []string `json:"paths"` + Keep []Snapshot `json:"keep"` + Remove []Snapshot `json:"remove"` + Reasons []KeepReason `json:"reasons"` } -func addJSONSnapshots(js *[]Snapshot, list restic.Snapshots) { +func asJSONSnapshots(list restic.Snapshots) []Snapshot { + var resultList []Snapshot for _, sn := range list { k := Snapshot{ Snapshot: sn, ID: sn.ID(), ShortID: sn.ID().Str(), } - *js = append(*js, k) + resultList = append(resultList, k) } + return resultList +} + +// KeepReason helps to print KeepReasons as JSON with Snapshots with their ID included. +type KeepReason struct { + Snapshot Snapshot `json:"snapshot"` + Matches []string `json:"matches"` +} + +func asJSONKeeps(list []restic.KeepReason) []KeepReason { + var resultList []KeepReason + for _, keep := range list { + k := KeepReason{ + Snapshot: Snapshot{ + Snapshot: keep.Snapshot, + ID: keep.Snapshot.ID(), + ShortID: keep.Snapshot.ID().Str(), + }, + Matches: keep.Matches, + } + resultList = append(resultList, k) + } + return resultList } func printJSONForget(stdout io.Writer, forgets []*ForgetGroup) error { diff --git a/cmd/restic/cmd_forget_integration_test.go b/cmd/restic/cmd_forget_integration_test.go index 1c027a240..e4cdb744e 100644 --- a/cmd/restic/cmd_forget_integration_test.go +++ b/cmd/restic/cmd_forget_integration_test.go @@ -5,6 +5,7 @@ import ( "testing" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) { @@ -12,5 +13,7 @@ func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) { pruneOpts := PruneOptions{ MaxUnused: "5%", } - rtest.OK(t, runForget(context.TODO(), opts, pruneOpts, gopts, args)) + rtest.OK(t, withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runForget(context.TODO(), opts, pruneOpts, gopts, term, args) + })) } diff --git a/cmd/restic/cmd_init.go b/cmd/restic/cmd_init.go index 7154279e8..e6ea69441 100644 --- a/cmd/restic/cmd_init.go +++ b/cmd/restic/cmd_init.go @@ -80,7 +80,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args [] return err } - gopts.password, err = ReadPasswordTwice(gopts, + gopts.password, err = ReadPasswordTwice(ctx, gopts, "enter password for new repository: ", "enter password again: ") if err != nil { @@ -131,7 +131,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args [] func maybeReadChunkerPolynomial(ctx context.Context, opts InitOptions, gopts GlobalOptions) (*chunker.Pol, error) { if opts.CopyChunkerParameters { - otherGopts, _, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "secondary") + otherGopts, _, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "secondary") if err != nil { return nil, err } diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index 43a38f4eb..306754627 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -50,22 +50,17 @@ func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, arg return fmt.Errorf("the key add command expects no arguments, only options - please see `restic help key add` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return addKey(ctx, repo, gopts, opts) } func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyAddOptions) error { - pw, err := getNewPassword(gopts, opts.NewPasswordFile) + pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile) if err != nil { return err } @@ -88,7 +83,7 @@ func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOption // testKeyNewPassword is used to set a new password during integration testing. var testKeyNewPassword string -func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error) { +func getNewPassword(ctx context.Context, gopts GlobalOptions, newPasswordFile string) (string, error) { if testKeyNewPassword != "" { return testKeyNewPassword, nil } @@ -102,7 +97,7 @@ func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error) newopts := gopts newopts.password = "" - return ReadPasswordTwice(newopts, + return ReadPasswordTwice(ctx, newopts, "enter new password: ", "enter password again: ") } diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index 2b3574281..fcca6055a 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -40,19 +40,11 @@ func runKeyList(ctx context.Context, gopts GlobalOptions, args []string) error { return fmt.Errorf("the key list command expects no arguments, only options - please see `restic help key list` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() return listKeys(ctx, repo, gopts) } @@ -61,6 +53,7 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions type keyInfo struct { Current bool `json:"current"` ID string `json:"id"` + ShortID string `json:"-"` UserName string `json:"userName"` HostName string `json:"hostName"` Created string `json:"created"` @@ -78,7 +71,8 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions key := keyInfo{ Current: id == s.KeyID(), - ID: id.Str(), + ID: id.String(), + ShortID: id.Str(), UserName: k.Username, HostName: k.Hostname, Created: k.Created.Local().Format(TimeFormat), @@ -99,7 +93,7 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions } tab := table.New() - tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ID }}") + tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ShortID }}") tab.AddColumn("User", "{{ .UserName }}") tab.AddColumn("Host", "{{ .HostName }}") tab.AddColumn("Created", "{{ .Created }}") diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index cb916274c..0836c4cfe 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -47,22 +47,17 @@ func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOption return fmt.Errorf("the key passwd command expects no arguments, only options - please see `restic help key passwd` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return changePassword(ctx, repo, gopts, opts) } func changePassword(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyPasswdOptions) error { - pw, err := getNewPassword(gopts, opts.NewPasswordFile) + pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile) if err != nil { return err } diff --git a/cmd/restic/cmd_key_remove.go b/cmd/restic/cmd_key_remove.go index c8e303ffc..93babb4f3 100644 --- a/cmd/restic/cmd_key_remove.go +++ b/cmd/restic/cmd_key_remove.go @@ -37,20 +37,13 @@ func runKeyRemove(ctx context.Context, gopts GlobalOptions, args []string) error return fmt.Errorf("key remove expects one argument as the key id") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - - idPrefix := args[0] - - return deleteKey(ctx, repo, idPrefix) + return deleteKey(ctx, repo, args[0]) } func deleteKey(ctx context.Context, repo *repository.Repository, idPrefix string) error { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index becad7f0d..27f59b4ab 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -36,19 +36,11 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error { return errors.Fatal("type not specified") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock || args[0] == "locks") if err != nil { return err } - - if !gopts.NoLock && args[0] != "locks" { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() var t restic.FileType switch args[0] { @@ -67,10 +59,9 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error { if err != nil { return err } - idx.Each(ctx, func(blobs restic.PackedBlob) { + return idx.Each(ctx, func(blobs restic.PackedBlob) { Printf("%v %v\n", blobs.Type, blobs.ID) }) - return nil }) default: return errors.Fatal("invalid type") diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index b0246625e..c4fb32de3 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -309,10 +309,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return false } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index fd2e762c0..c3f82b8dd 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -117,16 +117,11 @@ func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptio } func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() if len(args) == 0 { return checkMigrations(ctx, repo) diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 5fd81b344..5a10447f3 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -125,19 +125,11 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args debug.Log("start mount") defer debug.Log("finish mount") - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) @@ -160,28 +152,15 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args } } - AddCleanupHandler(func(code int) (int, error) { - debug.Log("running umount cleanup handler for mount at %v", mountpoint) - err := umount(mountpoint) - if err != nil { - Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err) - } - // replace error code of sigint - if code == 130 { - code = 0 - } - return code, nil - }) + systemFuse.Debug = func(msg interface{}) { + debug.Log("fuse: %v", msg) + } c, err := systemFuse.Mount(mountpoint, mountOptions...) if err != nil { return err } - systemFuse.Debug = func(msg interface{}) { - debug.Log("fuse: %v", msg) - } - cfg := fuse.Config{ OwnerIsRoot: opts.OwnerRoot, Filter: opts.SnapshotFilter, @@ -195,15 +174,26 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args Printf("When finished, quit with Ctrl-c here or umount the mountpoint.\n") debug.Log("serving mount at %v", mountpoint) - err = fs.Serve(c, root) - if err != nil { - return err + + done := make(chan struct{}) + + go func() { + defer close(done) + err = fs.Serve(c, root) + }() + + select { + case <-ctx.Done(): + debug.Log("running umount cleanup handler for mount at %v", mountpoint) + err := systemFuse.Unmount(mountpoint) + if err != nil { + Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err) + } + + return ErrOK + case <-done: + // clean shutdown, nothing to do } - <-c.Ready - return c.MountError -} - -func umount(mountpoint string) error { - return systemFuse.Unmount(mountpoint) + return err } diff --git a/cmd/restic/cmd_mount_integration_test.go b/cmd/restic/cmd_mount_integration_test.go index d2025a395..d764b4e4f 100644 --- a/cmd/restic/cmd_mount_integration_test.go +++ b/cmd/restic/cmd_mount_integration_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/restic/restic/internal/repository" + systemFuse "github.com/anacrolix/fuse" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -66,7 +66,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr func testRunUmount(t testing.TB, dir string) { var err error for i := 0; i < mountWait; i++ { - if err = umount(dir); err == nil { + if err = systemFuse.Unmount(dir); err == nil { t.Logf("directory %v umounted", dir) return } @@ -86,12 +86,12 @@ func listSnapshots(t testing.TB, dir string) []string { return names } -func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) { +func checkSnapshots(t testing.TB, gopts GlobalOptions, mountpoint string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) { t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs) var wg sync.WaitGroup wg.Add(1) - go testRunMount(t, global, mountpoint, &wg) + go testRunMount(t, gopts, mountpoint, &wg) waitForMount(t, mountpoint) defer wg.Wait() defer testRunUmount(t, mountpoint) @@ -100,7 +100,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit t.Fatal(`virtual directory "snapshots" doesn't exist`) } - ids := listSnapshots(t, repodir) + ids := listSnapshots(t, gopts.Repo) t.Logf("found %v snapshots in repo: %v", len(ids), ids) namesInSnapshots := listSnapshots(t, mountpoint) @@ -124,6 +124,10 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit } } + _, repo, unlock, err := openWithReadLock(context.TODO(), gopts, false) + rtest.OK(t, err) + defer unlock() + for _, id := range snapshotIDs { snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id) rtest.OK(t, err) @@ -166,10 +170,7 @@ func TestMount(t *testing.T) { testRunInit(t, env.gopts) - repo, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{}, 0) + checkSnapshots(t, env.gopts, env.mountpoint, []restic.ID{}, 0) rtest.SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz")) @@ -179,7 +180,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 1, "expected one snapshot, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 2) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 2) // second backup, implicit incremental testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) @@ -187,7 +188,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 2, "expected two snapshots, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 3) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 3) // third backup, explicit incremental bopts := BackupOptions{Parent: snapshotIDs[0].String()} @@ -196,7 +197,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 3, "expected three snapshots, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 4) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 4) } func TestMountSameTimestamps(t *testing.T) { @@ -211,14 +212,11 @@ func TestMountSameTimestamps(t *testing.T) { rtest.SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz")) - repo, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - ids := []restic.ID{ restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"), restic.TestParseID("75ad6cdc0868e082f2596d5ab8705e9f7d87316f5bf5690385eeff8dbe49d9f5"), restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"), } - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids, 4) + checkSnapshots(t, env.gopts, env.mountpoint, ids, 4) } diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 1b9352ea7..cbec100df 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -4,26 +4,20 @@ import ( "context" "math" "runtime" - "sort" "strconv" "strings" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/progress" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" ) -var errorIndexIncomplete = errors.Fatal("index is not complete") -var errorPacksMissing = errors.Fatal("packs from index missing in repo") -var errorSizeNotMatching = errors.Fatal("pack size does not match calculated size from index") - var cmdPrune = &cobra.Command{ Use: "prune [flags]", Short: "Remove unneeded data from the repository", @@ -38,7 +32,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return runPrune(cmd.Context(), pruneOptions, globalOptions) + term, cancel := setupTermstatus() + defer cancel() + return runPrune(cmd.Context(), pruneOptions, globalOptions, term) }, } @@ -138,7 +134,7 @@ func verifyPruneOptions(opts *PruneOptions) error { return nil } -func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error { +func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term *termstatus.Terminal) error { err := verifyPruneOptions(&opts) if err != nil { return err @@ -148,18 +144,11 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } - - if repo.Connections() < 2 { - return errors.Fatal("prune requires a backend connection limit of at least two") - } - - if repo.Config().Version < 2 && opts.RepackUncompressed { - return errors.Fatal("compression requires at least repository format version 2") - } + defer unlock() if opts.UnsafeNoSpaceRecovery != "" { repoID := repo.Config().ID @@ -169,16 +158,10 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error opts.unsafeRecovery = true } - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - - return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet()) + return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet(), term) } -func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet) error { +func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet, term *termstatus.Terminal) error { // we do not need index updates while pruning! repo.DisableAutoIndexUpdate() @@ -186,24 +169,43 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption Print("warning: running prune without a cache, this may be very slow!\n") } - Verbosef("loading indexes...\n") + printer := newTerminalProgressPrinter(gopts.verbosity, term) + + printer.P("loading indexes...\n") // loading the index before the snapshots is ok, as we use an exclusive lock here - bar := newIndexProgress(gopts.Quiet, gopts.JSON) + bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) err := repo.LoadIndex(ctx, bar) if err != nil { return err } - plan, stats, err := planPrune(ctx, opts, repo, ignoreSnapshots, gopts.Quiet) + popts := repository.PruneOptions{ + DryRun: opts.DryRun, + UnsafeRecovery: opts.unsafeRecovery, + + MaxUnusedBytes: opts.maxUnusedBytes, + MaxRepackBytes: opts.MaxRepackBytes, + + RepackCachableOnly: opts.RepackCachableOnly, + RepackSmall: opts.RepackSmall, + RepackUncompressed: opts.RepackUncompressed, + } + + plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { + return getUsedBlobs(ctx, repo, ignoreSnapshots, printer) + }, printer) if err != nil { return err } - - if opts.DryRun { - Verbosef("\nWould have made the following changes:") + if ctx.Err() != nil { + return ctx.Err() } - err = printPruneStats(stats) + if popts.DryRun { + printer.P("\nWould have made the following changes:") + } + + err = printPruneStats(printer, plan.Stats()) if err != nil { return err } @@ -211,605 +213,54 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption // Trigger GC to reset garbage collection threshold runtime.GC() - return doPrune(ctx, opts, gopts, repo, plan) -} - -type pruneStats struct { - blobs struct { - used uint - duplicate uint - unused uint - remove uint - repack uint - repackrm uint - } - size struct { - used uint64 - duplicate uint64 - unused uint64 - remove uint64 - repack uint64 - repackrm uint64 - unref uint64 - uncompressed uint64 - } - packs struct { - used uint - unused uint - partlyUsed uint - unref uint - keep uint - repack uint - remove uint - } -} - -type prunePlan struct { - removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) - repackPacks restic.IDSet // packs to repack - keepBlobs restic.CountedBlobSet // blobs to keep during repacking - removePacks restic.IDSet // packs to remove - ignorePacks restic.IDSet // packs to ignore when rebuilding the index -} - -type packInfo struct { - usedBlobs uint - unusedBlobs uint - usedSize uint64 - unusedSize uint64 - tpe restic.BlobType - uncompressed bool -} - -type packInfoWithID struct { - ID restic.ID - packInfo - mustCompress bool -} - -// planPrune selects which files to rewrite and which to delete and which blobs to keep. -// Also some summary statistics are returned. -func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (prunePlan, pruneStats, error) { - var stats pruneStats - - usedBlobs, err := getUsedBlobs(ctx, repo, ignoreSnapshots, quiet) - if err != nil { - return prunePlan{}, stats, err - } - - Verbosef("searching used packs...\n") - keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats) - if err != nil { - return prunePlan{}, stats, err - } - - Verbosef("collecting packs for deletion and repacking\n") - plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, quiet) - if err != nil { - return prunePlan{}, stats, err - } - - if len(plan.repackPacks) != 0 { - blobCount := keepBlobs.Len() - // when repacking, we do not want to keep blobs which are - // already contained in kept packs, so delete them from keepBlobs - repo.Index().Each(ctx, func(blob restic.PackedBlob) { - if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) { - return - } - keepBlobs.Delete(blob.BlobHandle) - }) - - if keepBlobs.Len() < blobCount/2 { - // replace with copy to shrink map to necessary size if there's a chance to benefit - keepBlobs = keepBlobs.Copy() - } - } else { - // keepBlobs is only needed if packs are repacked - keepBlobs = nil - } - plan.keepBlobs = keepBlobs - - return plan, stats, nil -} - -func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *pruneStats) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { - // iterate over all blobs in index to find out which blobs are duplicates - // The counter in usedBlobs describes how many instances of the blob exist in the repository index - // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist - idx.Each(ctx, func(blob restic.PackedBlob) { - bh := blob.BlobHandle - count, ok := usedBlobs[bh] - if ok { - if count < math.MaxUint8 { - // don't overflow, but saturate count at 255 - // this can lead to a non-optimal pack selection, but won't cause - // problems otherwise - count++ - } - - usedBlobs[bh] = count - } - }) - - // Check if all used blobs have been found in index - missingBlobs := restic.NewBlobSet() - for bh, count := range usedBlobs { - if count == 0 { - // blob does not exist in any pack files - missingBlobs.Insert(bh) - } - } - - if len(missingBlobs) != 0 { - Warnf("%v not found in the index\n\n"+ - "Integrity check failed: Data seems to be missing.\n"+ - "Will not start prune to prevent (additional) data loss!\n"+ - "Please report this error (along with the output of the 'prune' run) at\n"+ - "https://github.com/restic/restic/issues/new/choose\n", missingBlobs) - return nil, nil, errorIndexIncomplete - } - - indexPack := make(map[restic.ID]packInfo) - - // save computed pack header size - for pid, hdrSize := range pack.Size(ctx, idx, true) { - // initialize tpe with NumBlobTypes to indicate it's not set - indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)} - } - - hasDuplicates := false - // iterate over all blobs in index to generate packInfo - idx.Each(ctx, func(blob restic.PackedBlob) { - ip := indexPack[blob.PackID] - - // Set blob type if not yet set - if ip.tpe == restic.NumBlobTypes { - ip.tpe = blob.Type - } - - // mark mixed packs with "Invalid blob type" - if ip.tpe != blob.Type { - ip.tpe = restic.InvalidBlob - } - - bh := blob.BlobHandle - size := uint64(blob.Length) - dupCount := usedBlobs[bh] - switch { - case dupCount >= 2: - hasDuplicates = true - // mark as unused for now, we will later on select one copy - ip.unusedSize += size - ip.unusedBlobs++ - - // count as duplicate, will later on change one copy to be counted as used - stats.size.duplicate += size - stats.blobs.duplicate++ - case dupCount == 1: // used blob, not duplicate - ip.usedSize += size - ip.usedBlobs++ - - stats.size.used += size - stats.blobs.used++ - default: // unused blob - ip.unusedSize += size - ip.unusedBlobs++ - - stats.size.unused += size - stats.blobs.unused++ - } - if !blob.IsCompressed() { - ip.uncompressed = true - } - // update indexPack - indexPack[blob.PackID] = ip - }) - - // if duplicate blobs exist, those will be set to either "used" or "unused": - // - mark only one occurrence of duplicate blobs as used - // - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used" - // - if there are no used blobs in a pack, possibly mark duplicates as "unused" - if hasDuplicates { - // iterate again over all blobs in index (this is pretty cheap, all in-mem) - idx.Each(ctx, func(blob restic.PackedBlob) { - bh := blob.BlobHandle - count, ok := usedBlobs[bh] - // skip non-duplicate, aka. normal blobs - // count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining - if !ok || count == 1 { - return - } - - ip := indexPack[blob.PackID] - size := uint64(blob.Length) - switch { - case ip.usedBlobs > 0, count == 0: - // other used blobs in pack or "last" occurrence -> transition to used - ip.usedSize += size - ip.usedBlobs++ - ip.unusedSize -= size - ip.unusedBlobs-- - // same for the global statistics - stats.size.used += size - stats.blobs.used++ - stats.size.duplicate -= size - stats.blobs.duplicate-- - // let other occurrences remain marked as unused - usedBlobs[bh] = 1 - default: - // remain unused and decrease counter - count-- - if count == 1 { - // setting count to 1 would lead to forgetting that this blob had duplicates - // thus use the special value zero. This will select the last instance of the blob for keeping. - count = 0 - } - usedBlobs[bh] = count - } - // update indexPack - indexPack[blob.PackID] = ip - }) - } - - // Sanity check. If no duplicates exist, all blobs have value 1. After handling - // duplicates, this also applies to duplicates. - for _, count := range usedBlobs { - if count != 1 { - panic("internal error during blob selection") - } - } - - return usedBlobs, indexPack, nil -} - -func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *pruneStats, quiet bool) (prunePlan, error) { - removePacksFirst := restic.NewIDSet() - removePacks := restic.NewIDSet() - repackPacks := restic.NewIDSet() - - var repackCandidates []packInfoWithID - var repackSmallCandidates []packInfoWithID - repoVersion := repo.Config().Version - // only repack very small files by default - targetPackSize := repo.PackSize() / 25 - if opts.RepackSmall { - // consider files with at least 80% of the target size as large enough - targetPackSize = repo.PackSize() / 5 * 4 - } - - // loop over all packs and decide what to do - bar := newProgressMax(!quiet, uint64(len(indexPack)), "packs processed") - err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { - p, ok := indexPack[id] - if !ok { - // Pack was not referenced in index and is not used => immediately remove! - Verboseff("will remove pack %v as it is unused and not indexed\n", id.Str()) - removePacksFirst.Insert(id) - stats.size.unref += uint64(packSize) - return nil - } - - if p.unusedSize+p.usedSize != uint64(packSize) && p.usedBlobs != 0 { - // Pack size does not fit and pack is needed => error - // If the pack is not needed, this is no error, the pack can - // and will be simply removed, see below. - Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n", - id.Str(), p.unusedSize+p.usedSize, packSize) - return errorSizeNotMatching - } - - // statistics - switch { - case p.usedBlobs == 0: - stats.packs.unused++ - case p.unusedBlobs == 0: - stats.packs.used++ - default: - stats.packs.partlyUsed++ - } - - if p.uncompressed { - stats.size.uncompressed += p.unusedSize + p.usedSize - } - mustCompress := false - if repoVersion >= 2 { - // repo v2: always repack tree blobs if uncompressed - // compress data blobs if requested - mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed - } - - // decide what to do - switch { - case p.usedBlobs == 0: - // All blobs in pack are no longer used => remove pack! - removePacks.Insert(id) - stats.blobs.remove += p.unusedBlobs - stats.size.remove += p.unusedSize - - case opts.RepackCachableOnly && p.tpe == restic.DataBlob: - // if this is a data pack and --repack-cacheable-only is set => keep pack! - stats.packs.keep++ - - case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress: - if packSize >= int64(targetPackSize) { - // All blobs in pack are used and not mixed => keep pack! - stats.packs.keep++ - } else { - repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) - } - - default: - // all other packs are candidates for repacking - repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) - } - - delete(indexPack, id) - bar.Add(1) - return nil - }) - bar.Done() - if err != nil { - return prunePlan{}, err - } - - // At this point indexPacks contains only missing packs! - - // missing packs that are not needed can be ignored - ignorePacks := restic.NewIDSet() - for id, p := range indexPack { - if p.usedBlobs == 0 { - ignorePacks.Insert(id) - stats.blobs.remove += p.unusedBlobs - stats.size.remove += p.unusedSize - delete(indexPack, id) - } - } - - if len(indexPack) != 0 { - Warnf("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) - for id := range indexPack { - Warnf(" %v\n", id) - } - return prunePlan{}, errorPacksMissing - } - if len(ignorePacks) != 0 { - Warnf("Missing but unneeded pack files are referenced in the index, will be repaired\n") - for id := range ignorePacks { - Warnf("will forget missing pack file %v\n", id) - } - } - - if len(repackSmallCandidates) < 10 { - // too few small files to be worth the trouble, this also prevents endlessly repacking - // if there is just a single pack file below the target size - stats.packs.keep += uint(len(repackSmallCandidates)) - } else { - repackCandidates = append(repackCandidates, repackSmallCandidates...) - } - - // Sort repackCandidates such that packs with highest ratio unused/used space are picked first. - // This is equivalent to sorting by unused / total space. - // Instead of unused[i] / used[i] > unused[j] / used[j] we use - // unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64 - // Moreover packs containing trees and too small packs are sorted to the beginning - sort.Slice(repackCandidates, func(i, j int) bool { - pi := repackCandidates[i].packInfo - pj := repackCandidates[j].packInfo - switch { - case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob: - return true - case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob: - return false - case pi.unusedSize+pi.usedSize < uint64(targetPackSize) && pj.unusedSize+pj.usedSize >= uint64(targetPackSize): - return true - case pj.unusedSize+pj.usedSize < uint64(targetPackSize) && pi.unusedSize+pi.usedSize >= uint64(targetPackSize): - return false - } - return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize - }) - - repack := func(id restic.ID, p packInfo) { - repackPacks.Insert(id) - stats.blobs.repack += p.unusedBlobs + p.usedBlobs - stats.size.repack += p.unusedSize + p.usedSize - stats.blobs.repackrm += p.unusedBlobs - stats.size.repackrm += p.unusedSize - if p.uncompressed { - stats.size.uncompressed -= p.unusedSize + p.usedSize - } - } - - // calculate limit for number of unused bytes in the repo after repacking - maxUnusedSizeAfter := opts.maxUnusedBytes(stats.size.used) - - for _, p := range repackCandidates { - reachedUnusedSizeAfter := (stats.size.unused-stats.size.remove-stats.size.repackrm < maxUnusedSizeAfter) - reachedRepackSize := stats.size.repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes - packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize) - - switch { - case reachedRepackSize: - stats.packs.keep++ - - case p.tpe != restic.DataBlob, p.mustCompress: - // repacking non-data packs / uncompressed-trees is only limited by repackSize - repack(p.ID, p.packInfo) - - case reachedUnusedSizeAfter && packIsLargeEnough: - // for all other packs stop repacking if tolerated unused size is reached. - stats.packs.keep++ - - default: - repack(p.ID, p.packInfo) - } - } - - stats.packs.unref = uint(len(removePacksFirst)) - stats.packs.repack = uint(len(repackPacks)) - stats.packs.remove = uint(len(removePacks)) - - if repo.Config().Version < 2 { - // compression not supported for repository format version 1 - stats.size.uncompressed = 0 - } - - return prunePlan{removePacksFirst: removePacksFirst, - removePacks: removePacks, - repackPacks: repackPacks, - ignorePacks: ignorePacks, - }, nil + return plan.Execute(ctx, printer) } // printPruneStats prints out the statistics -func printPruneStats(stats pruneStats) error { - Verboseff("\nused: %10d blobs / %s\n", stats.blobs.used, ui.FormatBytes(stats.size.used)) - if stats.blobs.duplicate > 0 { - Verboseff("duplicates: %10d blobs / %s\n", stats.blobs.duplicate, ui.FormatBytes(stats.size.duplicate)) +func printPruneStats(printer progress.Printer, stats repository.PruneStats) error { + printer.V("\nused: %10d blobs / %s\n", stats.Blobs.Used, ui.FormatBytes(stats.Size.Used)) + if stats.Blobs.Duplicate > 0 { + printer.V("duplicates: %10d blobs / %s\n", stats.Blobs.Duplicate, ui.FormatBytes(stats.Size.Duplicate)) } - Verboseff("unused: %10d blobs / %s\n", stats.blobs.unused, ui.FormatBytes(stats.size.unused)) - if stats.size.unref > 0 { - Verboseff("unreferenced: %s\n", ui.FormatBytes(stats.size.unref)) + printer.V("unused: %10d blobs / %s\n", stats.Blobs.Unused, ui.FormatBytes(stats.Size.Unused)) + if stats.Size.Unref > 0 { + printer.V("unreferenced: %s\n", ui.FormatBytes(stats.Size.Unref)) } - totalBlobs := stats.blobs.used + stats.blobs.unused + stats.blobs.duplicate - totalSize := stats.size.used + stats.size.duplicate + stats.size.unused + stats.size.unref - unusedSize := stats.size.duplicate + stats.size.unused - Verboseff("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize)) - Verboseff("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize)) + totalBlobs := stats.Blobs.Used + stats.Blobs.Unused + stats.Blobs.Duplicate + totalSize := stats.Size.Used + stats.Size.Duplicate + stats.Size.Unused + stats.Size.Unref + unusedSize := stats.Size.Duplicate + stats.Size.Unused + printer.V("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize)) + printer.V("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize)) - Verbosef("\nto repack: %10d blobs / %s\n", stats.blobs.repack, ui.FormatBytes(stats.size.repack)) - Verbosef("this removes: %10d blobs / %s\n", stats.blobs.repackrm, ui.FormatBytes(stats.size.repackrm)) - Verbosef("to delete: %10d blobs / %s\n", stats.blobs.remove, ui.FormatBytes(stats.size.remove+stats.size.unref)) - totalPruneSize := stats.size.remove + stats.size.repackrm + stats.size.unref - Verbosef("total prune: %10d blobs / %s\n", stats.blobs.remove+stats.blobs.repackrm, ui.FormatBytes(totalPruneSize)) - if stats.size.uncompressed > 0 { - Verbosef("not yet compressed: %s\n", ui.FormatBytes(stats.size.uncompressed)) + printer.P("\nto repack: %10d blobs / %s\n", stats.Blobs.Repack, ui.FormatBytes(stats.Size.Repack)) + printer.P("this removes: %10d blobs / %s\n", stats.Blobs.Repackrm, ui.FormatBytes(stats.Size.Repackrm)) + printer.P("to delete: %10d blobs / %s\n", stats.Blobs.Remove, ui.FormatBytes(stats.Size.Remove+stats.Size.Unref)) + totalPruneSize := stats.Size.Remove + stats.Size.Repackrm + stats.Size.Unref + printer.P("total prune: %10d blobs / %s\n", stats.Blobs.Remove+stats.Blobs.Repackrm, ui.FormatBytes(totalPruneSize)) + if stats.Size.Uncompressed > 0 { + printer.P("not yet compressed: %s\n", ui.FormatBytes(stats.Size.Uncompressed)) } - Verbosef("remaining: %10d blobs / %s\n", totalBlobs-(stats.blobs.remove+stats.blobs.repackrm), ui.FormatBytes(totalSize-totalPruneSize)) - unusedAfter := unusedSize - stats.size.remove - stats.size.repackrm - Verbosef("unused size after prune: %s (%s of remaining size)\n", + printer.P("remaining: %10d blobs / %s\n", totalBlobs-(stats.Blobs.Remove+stats.Blobs.Repackrm), ui.FormatBytes(totalSize-totalPruneSize)) + unusedAfter := unusedSize - stats.Size.Remove - stats.Size.Repackrm + printer.P("unused size after prune: %s (%s of remaining size)\n", ui.FormatBytes(unusedAfter), ui.FormatPercent(unusedAfter, totalSize-totalPruneSize)) - Verbosef("\n") - Verboseff("totally used packs: %10d\n", stats.packs.used) - Verboseff("partly used packs: %10d\n", stats.packs.partlyUsed) - Verboseff("unused packs: %10d\n\n", stats.packs.unused) + printer.P("\n") + printer.V("totally used packs: %10d\n", stats.Packs.Used) + printer.V("partly used packs: %10d\n", stats.Packs.PartlyUsed) + printer.V("unused packs: %10d\n\n", stats.Packs.Unused) - Verboseff("to keep: %10d packs\n", stats.packs.keep) - Verboseff("to repack: %10d packs\n", stats.packs.repack) - Verboseff("to delete: %10d packs\n", stats.packs.remove) - if stats.packs.unref > 0 { - Verboseff("to delete: %10d unreferenced packs\n\n", stats.packs.unref) + printer.V("to keep: %10d packs\n", stats.Packs.Keep) + printer.V("to repack: %10d packs\n", stats.Packs.Repack) + printer.V("to delete: %10d packs\n", stats.Packs.Remove) + if stats.Packs.Unref > 0 { + printer.V("to delete: %10d unreferenced packs\n\n", stats.Packs.Unref) } return nil } -// doPrune does the actual pruning: -// - remove unreferenced packs first -// - repack given pack files while keeping the given blobs -// - rebuild the index while ignoring all files that will be deleted -// - delete the files -// plan.removePacks and plan.ignorePacks are modified in this function. -func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo restic.Repository, plan prunePlan) (err error) { - if opts.DryRun { - if !gopts.JSON && gopts.verbosity >= 2 { - Printf("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") - if len(plan.removePacksFirst) > 0 { - Printf("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) - } - Printf("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks) - Printf("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks) - } - // Always quit here if DryRun was set! - return nil - } - - // unreferenced packs can be safely deleted first - if len(plan.removePacksFirst) != 0 { - Verbosef("deleting unreferenced packs\n") - DeleteFiles(ctx, gopts, repo, plan.removePacksFirst, restic.PackFile) - } - - if len(plan.repackPacks) != 0 { - Verbosef("repacking packs\n") - bar := newProgressMax(!gopts.Quiet, uint64(len(plan.repackPacks)), "packs repacked") - _, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar) - bar.Done() - if err != nil { - return errors.Fatal(err.Error()) - } - - // Also remove repacked packs - plan.removePacks.Merge(plan.repackPacks) - - if len(plan.keepBlobs) != 0 { - Warnf("%v was not repacked\n\n"+ - "Integrity check failed.\n"+ - "Please report this error (along with the output of the 'prune' run) at\n"+ - "https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs) - return errors.Fatal("internal error: blobs were not repacked") - } - - // allow GC of the blob set - plan.keepBlobs = nil - } - - if len(plan.ignorePacks) == 0 { - plan.ignorePacks = plan.removePacks - } else { - plan.ignorePacks.Merge(plan.removePacks) - } - - if opts.unsafeRecovery { - Verbosef("deleting index files\n") - indexFiles := repo.Index().(*index.MasterIndex).IDs() - err = DeleteFilesChecked(ctx, gopts, repo, indexFiles, restic.IndexFile) - if err != nil { - return errors.Fatalf("%s", err) - } - } else if len(plan.ignorePacks) != 0 { - err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, false) - if err != nil { - return errors.Fatalf("%s", err) - } - } - - if len(plan.removePacks) != 0 { - Verbosef("removing %d old packs\n", len(plan.removePacks)) - DeleteFiles(ctx, gopts, repo, plan.removePacks, restic.PackFile) - } - - if opts.unsafeRecovery { - err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, true) - if err != nil { - return errors.Fatalf("%s", err) - } - } - - Verbosef("done\n") - return nil -} - -func rebuildIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool) error { - Verbosef("rebuilding index\n") - - bar := newProgressMax(!gopts.Quiet, 0, "packs processed") - return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{ - SaveProgress: bar, - DeleteProgress: func() *progress.Counter { - return newProgressMax(!gopts.Quiet, 0, "old indexes deleted") - }, - DeleteReport: func(id restic.ID, _ error) { - if gopts.verbosity > 2 { - Verbosef("removed index %v\n", id.String()) - } - }, - SkipDeletion: skipDeletion, - }) -} - -func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (usedBlobs restic.CountedBlobSet, err error) { +func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (usedBlobs restic.CountedBlobSet, err error) { var snapshotTrees restic.IDs - Verbosef("loading all snapshots...\n") + printer.P("loading all snapshots...\n") err = restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots, func(id restic.ID, sn *restic.Snapshot, err error) error { if err != nil { @@ -824,11 +275,12 @@ func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots r return nil, errors.Fatalf("failed loading snapshot: %v", err) } - Verbosef("finding data that is still in use for %d snapshots\n", len(snapshotTrees)) + printer.P("finding data that is still in use for %d snapshots\n", len(snapshotTrees)) usedBlobs = restic.NewCountedBlobSet() - bar := newProgressMax(!quiet, uint64(len(snapshotTrees)), "snapshots") + bar := printer.NewCounter("snapshots") + bar.SetMax(uint64(len(snapshotTrees))) defer bar.Done() err = restic.FindUsedBlobs(ctx, repo, snapshotTrees, usedBlobs, bar) diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index ebfa7ae4e..715adea9a 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -7,7 +7,9 @@ import ( "testing" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/repository" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) { @@ -16,7 +18,9 @@ func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) { defer func() { gopts.backendTestHook = oldHook }() - rtest.OK(t, runPrune(context.TODO(), opts, gopts)) + rtest.OK(t, withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runPrune(context.TODO(), opts, gopts, term) + })) } func TestPrune(t *testing.T) { @@ -31,7 +35,7 @@ func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) { } t.Run("0"+suffix, func(t *testing.T) { opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery} - checkOpts := CheckOptions{ReadData: true, CheckUnused: true} + checkOpts := CheckOptions{ReadData: true, CheckUnused: !unsafeNoSpaceRecovery} testPrune(t, opts, checkOpts) }) @@ -84,7 +88,9 @@ func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) { pruneOpts := PruneOptions{ MaxUnused: "5%", } - return runForget(context.TODO(), opts, pruneOpts, gopts, args) + return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runForget(context.TODO(), opts, pruneOpts, gopts, term, args) + }) }) rtest.OK(t, err) @@ -138,7 +144,9 @@ func TestPruneWithDamagedRepository(t *testing.T) { env.gopts.backendTestHook = oldHook }() // prune should fail - rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing, + rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runPrune(context.TODO(), pruneDefaultOptions, env.gopts, term) + }) == repository.ErrPacksMissing, "prune should have reported index not complete error") } @@ -218,7 +226,9 @@ func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, o testRunPrune(t, env.gopts, optionsPrune) testRunCheck(t, env.gopts) } else { - rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil, + rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runPrune(context.TODO(), optionsPrune, env.gopts, term) + }) != nil, "prune should have reported an error") } } diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index b97a7582b..debaa4e5b 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -40,16 +40,11 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { return err } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false) if err != nil { return err } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { @@ -66,16 +61,22 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { // tree. If it is not referenced, we have a root tree. trees := make(map[restic.ID]bool) - repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err = repo.Index().Each(ctx, func(blob restic.PackedBlob) { if blob.Type == restic.TreeBlob { trees[blob.Blob.ID] = false } }) + if err != nil { + return err + } Verbosef("load %d trees\n", len(trees)) bar = newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded") for id := range trees { tree, err := restic.LoadTree(ctx, repo, id) + if ctx.Err() != nil { + return ctx.Err() + } if err != nil { Warnf("unable to load tree %v: %v\n", id.Str(), err) continue diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index ea36f02f6..50ba16e33 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -3,10 +3,8 @@ package main import ( "context" - "github.com/restic/restic/internal/index" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -25,7 +23,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions) + term, cancel := setupTermstatus() + defer cancel() + return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions, term) }, } @@ -55,110 +55,22 @@ func init() { } } -func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error { - repo, err := OpenRepository(ctx, gopts) +func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, term *termstatus.Terminal) error { + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) + if err != nil { + return err + } + defer unlock() + + printer := newTerminalProgressPrinter(gopts.verbosity, term) + + err = repository.RepairIndex(ctx, repo, repository.RepairIndexOptions{ + ReadAllPacks: opts.ReadAllPacks, + }, printer) if err != nil { return err } - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - - return rebuildIndex(ctx, opts, gopts, repo) -} - -func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, repo *repository.Repository) error { - var obsoleteIndexes restic.IDs - packSizeFromList := make(map[restic.ID]int64) - packSizeFromIndex := make(map[restic.ID]int64) - removePacks := restic.NewIDSet() - - if opts.ReadAllPacks { - // get list of old index files but start with empty index - err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error { - obsoleteIndexes = append(obsoleteIndexes, id) - return nil - }) - if err != nil { - return err - } - } else { - Verbosef("loading indexes...\n") - mi := index.NewMasterIndex() - err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error { - if err != nil { - Warnf("removing invalid index %v: %v\n", id, err) - obsoleteIndexes = append(obsoleteIndexes, id) - return nil - } - - mi.Insert(idx) - return nil - }) - if err != nil { - return err - } - - err = mi.MergeFinalIndexes() - if err != nil { - return err - } - - err = repo.SetIndex(mi) - if err != nil { - return err - } - packSizeFromIndex = pack.Size(ctx, repo.Index(), false) - } - - Verbosef("getting pack files to read...\n") - err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { - size, ok := packSizeFromIndex[id] - if !ok || size != packSize { - // Pack was not referenced in index or size does not match - packSizeFromList[id] = packSize - removePacks.Insert(id) - } - if !ok { - Warnf("adding pack file to index %v\n", id) - } else if size != packSize { - Warnf("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size) - } - delete(packSizeFromIndex, id) - return nil - }) - if err != nil { - return err - } - for id := range packSizeFromIndex { - // forget pack files that are referenced in the index but do not exist - // when rebuilding the index - removePacks.Insert(id) - Warnf("removing not found pack file %v\n", id) - } - - if len(packSizeFromList) > 0 { - Verbosef("reading pack files\n") - bar := newProgressMax(!gopts.Quiet, uint64(len(packSizeFromList)), "packs") - invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar) - bar.Done() - if err != nil { - return err - } - - for _, id := range invalidFiles { - Verboseff("skipped incomplete pack file: %v\n", id) - } - } - - err = rebuildIndexFiles(ctx, gopts, repo, removePacks, obsoleteIndexes, false) - if err != nil { - return err - } - Verbosef("done\n") - + printer.P("done\n") return nil } diff --git a/cmd/restic/cmd_repair_index_integration_test.go b/cmd/restic/cmd_repair_index_integration_test.go index e3271361a..e1a3dfe03 100644 --- a/cmd/restic/cmd_repair_index_integration_test.go +++ b/cmd/restic/cmd_repair_index_integration_test.go @@ -13,12 +13,15 @@ import ( "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) { rtest.OK(t, withRestoreGlobalOptions(func() error { - globalOptions.stdout = io.Discard - return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts) + return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + globalOptions.stdout = io.Discard + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts, term) + }) })) } @@ -126,12 +129,13 @@ func TestRebuildIndexFailsOnAppendOnly(t *testing.T) { rtest.SetupTarTestFixture(t, env.base, datafile) err := withRestoreGlobalOptions(func() error { - globalOptions.stdout = io.Discard - env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { return &appendOnlyBackend{r}, nil } - return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts) + return withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + globalOptions.stdout = io.Discard + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term) + }) }) if err == nil { diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index 521b5859f..636213965 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -52,25 +52,20 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T return errors.Fatal("no ids specified") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } + printer := newTerminalProgressPrinter(gopts.verbosity, term) - bar := newIndexProgress(gopts.Quiet, gopts.JSON) + bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) err = repo.LoadIndex(ctx, bar) if err != nil { return errors.Fatalf("%s", err) } - printer := newTerminalProgressPrinter(gopts.verbosity, term) - printer.P("saving backup copies of pack files to current folder") for id := range ids { f, err := os.OpenFile("pack-"+id.String(), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o666) @@ -87,6 +82,10 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T return err }) if err != nil { + _ = f.Close() + return err + } + if err := f.Close(); err != nil { return err } } diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index cc3d0eb85..b200d100a 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -66,22 +66,11 @@ func init() { } func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun) if err != nil { return err } - - if !opts.DryRun { - var lock *restic.Lock - var err error - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } else { - repo.SetDryRun() - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { @@ -156,6 +145,9 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt changedCount++ } } + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("\n") if changedCount == 0 { diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 58f257541..5161be50d 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -127,19 +127,11 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, debug.Log("restore %v to %v", snapshotIDString, opts.Target) - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() sn, subfolder, err := (&restic.SnapshotFilter{ Hosts: opts.Hosts, diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 2c7cbe1fb..8da6f522a 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -4,13 +4,14 @@ import ( "context" "fmt" "io" - mrand "math/rand" + "math/rand" "os" "path/filepath" "syscall" "testing" "time" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -116,7 +117,7 @@ func TestRestore(t *testing.T) { for i := 0; i < 10; i++ { p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i)) rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) - rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21)))) + rtest.OK(t, appendRandomData(p, uint(rand.Intn(2<<21)))) } opts := BackupOptions{} @@ -274,6 +275,7 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) { } func TestRestoreLocalLayout(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() env, cleanup := withTestEnvironment(t) defer cleanup() diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 62624e75c..38a868c5c 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -256,27 +256,22 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a return errors.Fatal("Nothing to do: no excludes provided and no new metadata provided") } - repo, err := OpenRepository(ctx, gopts) + var ( + repo *repository.Repository + unlock func() + err error + ) + + if opts.Forget { + Verbosef("create exclusive lock for repository\n") + ctx, repo, unlock, err = openWithExclusiveLock(ctx, gopts, opts.DryRun) + } else { + ctx, repo, unlock, err = openWithAppendLock(ctx, gopts, opts.DryRun) + } if err != nil { return err } - - if !opts.DryRun { - var lock *restic.Lock - var err error - if opts.Forget { - Verbosef("create exclusive lock for repository\n") - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - } else { - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - } - defer unlockRepo(lock) - if err != nil { - return err - } - } else { - repo.SetDryRun() - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { @@ -299,6 +294,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a changedCount++ } } + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("\n") if changedCount == 0 { diff --git a/cmd/restic/cmd_rewrite_integration_test.go b/cmd/restic/cmd_rewrite_integration_test.go index 532855f57..71d6a60a5 100644 --- a/cmd/restic/cmd_rewrite_integration_test.go +++ b/cmd/restic/cmd_rewrite_integration_test.go @@ -78,8 +78,11 @@ func testRewriteMetadata(t *testing.T, metadata snapshotMetadataArgs) { createBasicRewriteRepo(t, env) testRunRewriteExclude(t, env.gopts, []string{}, true, metadata) - repo, _ := OpenRepository(context.TODO(), env.gopts) - snapshots, err := restic.TestLoadAllSnapshots(context.TODO(), repo, nil) + ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false) + rtest.OK(t, err) + defer unlock() + + snapshots, err := restic.TestLoadAllSnapshots(ctx, repo, nil) rtest.OK(t, err) rtest.Assert(t, len(snapshots) == 1, "expected one snapshot, got %v", len(snapshots)) newSnapshot := snapshots[0] diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index e94f2ed9b..faa86d3a6 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/table" "github.com/spf13/cobra" ) @@ -58,24 +59,19 @@ func init() { } func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() var snapshots restic.Snapshots for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { snapshots = append(snapshots, sn) } + if ctx.Err() != nil { + return ctx.Err() + } snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy) if err != nil { return err @@ -163,6 +159,11 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke keepReasons[*id] = reasons[i] } } + // check if any snapshot contains a summary + hasSize := false + for _, sn := range list { + hasSize = hasSize || (sn.Summary != nil) + } // always sort the snapshots so that the newer ones are listed last sort.SliceStable(list, func(i, j int) bool { @@ -198,6 +199,9 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke tab.AddColumn("Reasons", `{{ join .Reasons "\n" }}`) } tab.AddColumn("Paths", `{{ join .Paths "\n" }}`) + if hasSize { + tab.AddColumn("Size", `{{ .Size }}`) + } } type snapshot struct { @@ -207,6 +211,7 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke Tags []string Reasons []string Paths []string + Size string } var multiline bool @@ -228,6 +233,10 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke multiline = true } + if sn.Summary != nil { + data.Size = ui.FormatBytes(sn.Summary.TotalBytesProcessed) + } + tab.AddRow(data) } diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index d3078a419..a7891e5b0 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -38,7 +38,7 @@ depending on what you are trying to calculate. The modes are: * restore-size: (default) Counts the size of the restored files. -* files-by-contents: Counts total size of files, where a file is +* files-by-contents: Counts total size of unique files, where a file is considered unique if it has unique contents. * raw-data: Counts the size of blobs in the repository, regardless of how many files reference them. @@ -80,19 +80,11 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args return err } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { @@ -125,9 +117,8 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args return fmt.Errorf("error walking snapshot: %v", err) } } - - if err != nil { - return err + if ctx.Err() != nil { + return ctx.Err() } if opts.countMode == countModeRawData { @@ -270,11 +261,14 @@ func statsWalkTree(repo restic.Loader, opts StatsOptions, stats *statsContainer, // will still be restored stats.TotalFileCount++ - // if inodes are present, only count each inode once - // (hard links do not increase restore size) - if !hardLinkIndex.Has(node.Inode, node.DeviceID) || node.Inode == 0 { - hardLinkIndex.Add(node.Inode, node.DeviceID, struct{}{}) + if node.Links == 1 || node.Type == "dir" { stats.TotalSize += node.Size + } else { + // if hardlinks are present only count each deviceID+inode once + if !hardLinkIndex.Has(node.Inode, node.DeviceID) || node.Inode == 0 { + hardLinkIndex.Add(node.Inode, node.DeviceID, struct{}{}) + stats.TotalSize += node.Size + } } } @@ -357,7 +351,10 @@ func statsDebug(ctx context.Context, repo restic.Repository) error { Warnf("File Type: %v\n%v\n", t, hist) } - hist := statsDebugBlobs(ctx, repo) + hist, err := statsDebugBlobs(ctx, repo) + if err != nil { + return err + } for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { Warnf("Blob Type: %v\n%v\n\n", t, hist[t]) } @@ -375,17 +372,17 @@ func statsDebugFileType(ctx context.Context, repo restic.Lister, tpe restic.File return hist, err } -func statsDebugBlobs(ctx context.Context, repo restic.Repository) [restic.NumBlobTypes]*sizeHistogram { +func statsDebugBlobs(ctx context.Context, repo restic.Repository) ([restic.NumBlobTypes]*sizeHistogram, error) { var hist [restic.NumBlobTypes]*sizeHistogram for i := 0; i < len(hist); i++ { hist[i] = newSizeHistogram(2 * chunker.MaxSize) } - repo.Index().Each(ctx, func(pb restic.PackedBlob) { + err := repo.Index().Each(ctx, func(pb restic.PackedBlob) { hist[pb.Type].Add(uint64(pb.Length)) }) - return hist + return hist, err } type sizeClass struct { diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index 01f3ad8af..3bf386f2c 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -104,20 +104,12 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st return errors.Fatal("--set and --add/--remove cannot be given at the same time") } - repo, err := OpenRepository(ctx, gopts) + Verbosef("create exclusive lock for repository\n") + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { - return err - } - - if !gopts.NoLock { - Verbosef("create exclusive lock for repository\n") - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } + return nil } + defer unlock() changeCnt := 0 for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { @@ -130,6 +122,9 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st changeCnt++ } } + if ctx.Err() != nil { + return ctx.Err() + } if changeCnt == 0 { Verbosef("no snapshots were modified\n") } else { diff --git a/cmd/restic/cmd_tag_integration_test.go b/cmd/restic/cmd_tag_integration_test.go index 3b902c51e..6979f9c11 100644 --- a/cmd/restic/cmd_tag_integration_test.go +++ b/cmd/restic/cmd_tag_integration_test.go @@ -12,6 +12,7 @@ func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) { rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{})) } +// nolint: staticcheck // false positive nil pointer dereference check func TestTag(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() diff --git a/cmd/restic/delete.go b/cmd/restic/delete.go deleted file mode 100644 index c3a7e039d..000000000 --- a/cmd/restic/delete.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "context" - - "github.com/restic/restic/internal/restic" -) - -// DeleteFiles deletes the given fileList of fileType in parallel -// it will print a warning if there is an error, but continue deleting the remaining files -func DeleteFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) { - _ = deleteFiles(ctx, gopts, true, repo, fileList, fileType) -} - -// DeleteFilesChecked deletes the given fileList of fileType in parallel -// if an error occurs, it will cancel and return this error -func DeleteFilesChecked(ctx context.Context, gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error { - return deleteFiles(ctx, gopts, false, repo, fileList, fileType) -} - -// deleteFiles deletes the given fileList of fileType in parallel -// if ignoreError=true, it will print a warning if there was an error, else it will abort. -func deleteFiles(ctx context.Context, gopts GlobalOptions, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error { - bar := newProgressMax(!gopts.JSON && !gopts.Quiet, 0, "files deleted") - defer bar.Done() - - return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error { - if err != nil { - if !gopts.JSON { - Warnf("unable to remove %v/%v from the repository\n", fileType, id) - } - if !ignoreError { - return err - } - } - if !gopts.JSON && gopts.verbosity > 2 { - Verbosef("removed %v/%v\n", fileType, id) - } - return nil - }, bar) -} diff --git a/cmd/restic/find.go b/cmd/restic/find.go index a990b458d..c7754d5d9 100644 --- a/cmd/restic/find.go +++ b/cmd/restic/find.go @@ -2,6 +2,7 @@ package main import ( "context" + "os" "github.com/restic/restic/internal/restic" "github.com/spf13/pflag" @@ -14,17 +15,27 @@ func initMultiSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter, if !addHostShorthand { hostShorthand = "" } - flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times)") + flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times) (default: $RESTIC_HOST)") flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)") flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)") + + // set default based on env if set + if host := os.Getenv("RESTIC_HOST"); host != "" { + filt.Hosts = []string{host} + } } // initSingleSnapshotFilter is used for commands that work on a single snapshot // MUST be combined with restic.FindFilteredSnapshot func initSingleSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter) { - flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times)") + flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times) (default: $RESTIC_HOST)") flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)") flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)") + + // set default based on env if set + if host := os.Getenv("RESTIC_HOST"); host != "" { + filt.Hosts = []string{host} + } } // FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots. diff --git a/cmd/restic/find_test.go b/cmd/restic/find_test.go new file mode 100644 index 000000000..a98a14f04 --- /dev/null +++ b/cmd/restic/find_test.go @@ -0,0 +1,61 @@ +package main + +import ( + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/spf13/pflag" +) + +func TestSnapshotFilter(t *testing.T) { + for _, test := range []struct { + name string + args []string + expected []string + env string + }{ + { + "no value", + []string{}, + nil, + "", + }, + { + "args only", + []string{"--host", "abc"}, + []string{"abc"}, + "", + }, + { + "env default", + []string{}, + []string{"def"}, + "def", + }, + { + "both", + []string{"--host", "abc"}, + []string{"abc"}, + "def", + }, + } { + t.Run(test.name, func(t *testing.T) { + t.Setenv("RESTIC_HOST", test.env) + + for _, mode := range []bool{false, true} { + set := pflag.NewFlagSet("test", pflag.PanicOnError) + flt := &restic.SnapshotFilter{} + if mode { + initMultiSnapshotFilter(set, flt, false) + } else { + initSingleSnapshotFilter(set, flt) + } + err := set.Parse(test.args) + rtest.OK(t, err) + + rtest.Equals(t, test.expected, flt.Hosts, "unexpected hosts") + } + }) + } +} diff --git a/cmd/restic/global.go b/cmd/restic/global.go index cc47496f3..6920caa8d 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -43,7 +43,7 @@ import ( "golang.org/x/term" ) -var version = "0.16.4-dev (compiled manually)" +const version = "0.16.4-dev (compiled manually)" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" @@ -96,9 +96,6 @@ var globalOptions = GlobalOptions{ stderr: os.Stderr, } -var isReadingPassword bool -var internalGlobalCtx context.Context - func init() { backends := location.NewRegistry() backends.Register(azure.NewFactory()) @@ -112,15 +109,6 @@ func init() { backends.Register(swift.NewFactory()) globalOptions.backends = backends - var cancel context.CancelFunc - internalGlobalCtx, cancel = context.WithCancel(context.Background()) - AddCleanupHandler(func(code int) (int, error) { - // Must be called before the unlock cleanup handler to ensure that the latter is - // not blocked due to limited number of backend connections, see #1434 - cancel() - return code, nil - }) - f := cmdRoot.PersistentFlags() f.StringVarP(&globalOptions.Repo, "repo", "r", "", "`repository` to backup to or restore from (default: $RESTIC_REPOSITORY)") f.StringVarP(&globalOptions.RepositoryFile, "repository-file", "", "", "`file` to read the repository location from (default: $RESTIC_REPOSITORY_FILE)") @@ -165,8 +153,6 @@ func init() { // parse target pack size from env, on error the default value will be used targetPackSize, _ := strconv.ParseUint(os.Getenv("RESTIC_PACK_SIZE"), 10, 32) globalOptions.PackSize = uint(targetPackSize) - - restoreTerminal() } func stdinIsTerminal() bool { @@ -191,40 +177,6 @@ func stdoutTerminalWidth() int { return w } -// restoreTerminal installs a cleanup handler that restores the previous -// terminal state on exit. This handler is only intended to restore the -// terminal configuration if restic exits after receiving a signal. A regular -// program execution must revert changes to the terminal configuration itself. -// The terminal configuration is only restored while reading a password. -func restoreTerminal() { - if !term.IsTerminal(int(os.Stdout.Fd())) { - return - } - - fd := int(os.Stdout.Fd()) - state, err := term.GetState(fd) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) - return - } - - AddCleanupHandler(func(code int) (int, error) { - // Restoring the terminal configuration while restic runs in the - // background, causes restic to get stopped on unix systems with - // a SIGTTOU signal. Thus only restore the terminal settings if - // they might have been modified, which is the case while reading - // a password. - if !isReadingPassword { - return code, nil - } - err := term.Restore(fd, state) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err) - } - return code, err - }) -} - // ClearLine creates a platform dependent string to clear the current // line, so it can be overwritten. // @@ -333,24 +285,48 @@ func readPassword(in io.Reader) (password string, err error) { // readPasswordTerminal reads the password from the given reader which must be a // tty. Prompt is printed on the writer out before attempting to read the -// password. -func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password string, err error) { - fmt.Fprint(out, prompt) - isReadingPassword = true - buf, err := term.ReadPassword(int(in.Fd())) - isReadingPassword = false - fmt.Fprintln(out) +// password. If the context is canceled, the function leaks the password reading +// goroutine. +func readPasswordTerminal(ctx context.Context, in *os.File, out *os.File, prompt string) (password string, err error) { + fd := int(out.Fd()) + state, err := term.GetState(fd) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) + return "", err + } + + done := make(chan struct{}) + var buf []byte + + go func() { + defer close(done) + fmt.Fprint(out, prompt) + buf, err = term.ReadPassword(int(in.Fd())) + fmt.Fprintln(out) + }() + + select { + case <-ctx.Done(): + err := term.Restore(fd, state) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err) + } + return "", ctx.Err() + case <-done: + // clean shutdown, nothing to do + } + if err != nil { return "", errors.Wrap(err, "ReadPassword") } - password = string(buf) - return password, nil + return string(buf), nil } // ReadPassword reads the password from a password file, the environment -// variable RESTIC_PASSWORD or prompts the user. -func ReadPassword(opts GlobalOptions, prompt string) (string, error) { +// variable RESTIC_PASSWORD or prompts the user. If the context is canceled, +// the function leaks the password reading goroutine. +func ReadPassword(ctx context.Context, opts GlobalOptions, prompt string) (string, error) { if opts.password != "" { return opts.password, nil } @@ -361,7 +337,7 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) { ) if stdinIsTerminal() { - password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt) + password, err = readPasswordTerminal(ctx, os.Stdin, os.Stderr, prompt) } else { password, err = readPassword(os.Stdin) Verbosef("reading repository password from stdin\n") @@ -379,14 +355,15 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) { } // ReadPasswordTwice calls ReadPassword two times and returns an error when the -// passwords don't match. -func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, error) { - pw1, err := ReadPassword(gopts, prompt1) +// passwords don't match. If the context is canceled, the function leaks the +// password reading goroutine. +func ReadPasswordTwice(ctx context.Context, gopts GlobalOptions, prompt1, prompt2 string) (string, error) { + pw1, err := ReadPassword(ctx, gopts, prompt1) if err != nil { return "", err } if stdinIsTerminal() { - pw2, err := ReadPassword(gopts, prompt2) + pw2, err := ReadPassword(ctx, gopts, prompt2) if err != nil { return "", err } @@ -469,7 +446,10 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi } for ; passwordTriesLeft > 0; passwordTriesLeft-- { - opts.password, err = ReadPassword(opts, "enter password for repository: ") + opts.password, err = ReadPassword(ctx, opts, "enter password for repository: ") + if ctx.Err() != nil { + return nil, ctx.Err() + } if err != nil && passwordTriesLeft > 1 { opts.password = "" fmt.Printf("%s. Try again\n", err) @@ -570,16 +550,13 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro return cfg, nil } -// Open the backend specified by a location config. -func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { +func innerOpen(ctx context.Context, s string, gopts GlobalOptions, opts options.Options, create bool) (backend.Backend, error) { debug.Log("parsing location %v", location.StripPassword(gopts.backends, s)) loc, err := location.Parse(gopts.backends, s) if err != nil { return nil, errors.Fatalf("parsing repository location failed: %v", err) } - var be backend.Backend - cfg, err := parseConfig(loc, opts) if err != nil { return nil, err @@ -599,7 +576,13 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio return nil, errors.Fatalf("invalid backend: %q", loc.Scheme) } - be, err = factory.Open(ctx, cfg, rt, lim) + var be backend.Backend + if create { + be, err = factory.Create(ctx, cfg, rt, lim) + } else { + be, err = factory.Open(ctx, cfg, rt, lim) + } + if err != nil { return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.backends, s), err) } @@ -615,6 +598,17 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio } } + return be, nil +} + +// Open the backend specified by a location config. +func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { + + be, err := innerOpen(ctx, s, gopts, opts, false) + if err != nil { + return nil, err + } + // check if config is there fi, err := be.Stat(ctx, backend.Handle{Type: restic.ConfigFile}) if err != nil { @@ -630,31 +624,5 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio // Create the backend specified by URI. func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { - debug.Log("parsing location %v", location.StripPassword(gopts.backends, s)) - loc, err := location.Parse(gopts.backends, s) - if err != nil { - return nil, err - } - - cfg, err := parseConfig(loc, opts) - if err != nil { - return nil, err - } - - rt, err := backend.Transport(globalOptions.TransportOptions) - if err != nil { - return nil, errors.Fatal(err.Error()) - } - - factory := gopts.backends.Lookup(loc.Scheme) - if factory == nil { - return nil, errors.Fatalf("invalid backend: %q", loc.Scheme) - } - - be, err := factory.Create(ctx, cfg, rt, nil) - if err != nil { - return nil, err - } - - return logger.New(sema.NewBackend(be)), nil + return innerOpen(ctx, s, gopts, opts, true) } diff --git a/cmd/restic/global_debug.go b/cmd/restic/global_debug.go index b798074d1..502b2cf6e 100644 --- a/cmd/restic/global_debug.go +++ b/cmd/restic/global_debug.go @@ -15,23 +15,28 @@ import ( "github.com/pkg/profile" ) -var ( - listenProfile string - memProfilePath string - cpuProfilePath string - traceProfilePath string - blockProfilePath string - insecure bool -) +type ProfileOptions struct { + listen string + memPath string + cpuPath string + tracePath string + blockPath string + insecure bool +} + +var profileOpts ProfileOptions +var prof interface { + Stop() +} func init() { f := cmdRoot.PersistentFlags() - f.StringVar(&listenProfile, "listen-profile", "", "listen on this `address:port` for memory profiling") - f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`") - f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`") - f.StringVar(&traceProfilePath, "trace-profile", "", "write trace to `dir`") - f.StringVar(&blockProfilePath, "block-profile", "", "write block profile to `dir`") - f.BoolVar(&insecure, "insecure-kdf", false, "use insecure KDF settings") + f.StringVar(&profileOpts.listen, "listen-profile", "", "listen on this `address:port` for memory profiling") + f.StringVar(&profileOpts.memPath, "mem-profile", "", "write memory profile to `dir`") + f.StringVar(&profileOpts.cpuPath, "cpu-profile", "", "write cpu profile to `dir`") + f.StringVar(&profileOpts.tracePath, "trace-profile", "", "write trace to `dir`") + f.StringVar(&profileOpts.blockPath, "block-profile", "", "write block profile to `dir`") + f.BoolVar(&profileOpts.insecure, "insecure-kdf", false, "use insecure KDF settings") } type fakeTestingTB struct{} @@ -41,10 +46,10 @@ func (fakeTestingTB) Logf(msg string, args ...interface{}) { } func runDebug() error { - if listenProfile != "" { - fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", listenProfile) + if profileOpts.listen != "" { + fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", profileOpts.listen) go func() { - err := http.ListenAndServe(listenProfile, nil) + err := http.ListenAndServe(profileOpts.listen, nil) if err != nil { fmt.Fprintf(os.Stderr, "profile HTTP server listen failed: %v\n", err) } @@ -52,16 +57,16 @@ func runDebug() error { } profilesEnabled := 0 - if memProfilePath != "" { + if profileOpts.memPath != "" { profilesEnabled++ } - if cpuProfilePath != "" { + if profileOpts.cpuPath != "" { profilesEnabled++ } - if traceProfilePath != "" { + if profileOpts.tracePath != "" { profilesEnabled++ } - if blockProfilePath != "" { + if profileOpts.blockPath != "" { profilesEnabled++ } @@ -69,30 +74,25 @@ func runDebug() error { return errors.Fatal("only one profile (memory, CPU, trace, or block) may be activated at the same time") } - var prof interface { - Stop() + if profileOpts.memPath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(profileOpts.memPath)) + } else if profileOpts.cpuPath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(profileOpts.cpuPath)) + } else if profileOpts.tracePath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(profileOpts.tracePath)) + } else if profileOpts.blockPath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(profileOpts.blockPath)) } - if memProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(memProfilePath)) - } else if cpuProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(cpuProfilePath)) - } else if traceProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(traceProfilePath)) - } else if blockProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(blockProfilePath)) - } - - if prof != nil { - AddCleanupHandler(func(code int) (int, error) { - prof.Stop() - return code, nil - }) - } - - if insecure { + if profileOpts.insecure { repository.TestUseLowSecurityKDFParameters(fakeTestingTB{}) } return nil } + +func stopDebug() { + if prof != nil { + prof.Stop() + } +} diff --git a/cmd/restic/global_release.go b/cmd/restic/global_release.go index 7cb2e6caf..1dab5a293 100644 --- a/cmd/restic/global_release.go +++ b/cmd/restic/global_release.go @@ -5,3 +5,6 @@ package main // runDebug is a noop without the debug tag. func runDebug() error { return nil } + +// stopDebug is a noop without the debug tag. +func stopDebug() {} diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index 184609d40..e7a90dd56 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -232,47 +232,66 @@ func testSetupBackupData(t testing.TB, env *testEnvironment) string { } func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet { - r, err := OpenRepository(context.TODO(), gopts) + ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() packs := restic.NewIDSet() - rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error { packs.Insert(id) return nil })) return packs } -func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) { - r, err := OpenRepository(context.TODO(), gopts) +func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet { + ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() + + rtest.OK(t, r.LoadIndex(ctx, nil)) + treePacks := restic.NewIDSet() + rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) { + if pb.Type == restic.TreeBlob { + treePacks.Insert(pb.PackID) + } + })) + + return treePacks +} + +func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) { + ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) + rtest.OK(t, err) + defer unlock() for id := range remove { - rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})) + rtest.OK(t, r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()})) } } func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, removeTreePacks bool) { - r, err := OpenRepository(context.TODO(), gopts) + ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() // Get all tree packs - rtest.OK(t, r.LoadIndex(context.TODO(), nil)) + rtest.OK(t, r.LoadIndex(ctx, nil)) treePacks := restic.NewIDSet() - r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) { if pb.Type == restic.TreeBlob { treePacks.Insert(pb.PackID) } - }) + })) // remove all packs containing data blobs - rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error { if treePacks.Has(id) != removeTreePacks || keep.Has(id) { return nil } - return r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}) + return r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}) })) } diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 7cf8396a3..a7b66add8 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -12,6 +12,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func TestCheckRestoreNoLock(t *testing.T) { @@ -88,8 +89,12 @@ func TestListOnce(t *testing.T) { testRunPrune(t, env.gopts, pruneOpts) rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil)) - rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts)) - rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts)) + rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term) + })) + rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts, term) + })) } type writeToOnly struct { @@ -154,12 +159,13 @@ func TestFindListOnce(t *testing.T) { testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) thirdSnapshot := restic.NewIDSet(testListSnapshots(t, env.gopts, 3)...) - repo, err := OpenRepository(context.TODO(), env.gopts) + ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false) rtest.OK(t, err) + defer unlock() snapshotIDs := restic.NewIDSet() // specify the two oldest snapshots explicitly and use "latest" to reference the newest one - for sn := range FindFilteredSnapshots(context.TODO(), repo, repo, &restic.SnapshotFilter{}, []string{ + for sn := range FindFilteredSnapshots(ctx, repo, repo, &restic.SnapshotFilter{}, []string{ secondSnapshot[0].String(), secondSnapshot[1].String()[:8], "latest", diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 600b7476f..99e199a67 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -2,316 +2,47 @@ package main import ( "context" - "fmt" - "sync" - "time" - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/repository" ) -type lockContext struct { - lock *restic.Lock - cancel context.CancelFunc - refreshWG sync.WaitGroup -} - -var globalLocks struct { - locks map[*restic.Lock]*lockContext - sync.Mutex - sync.Once -} - -func lockRepo(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - return lockRepository(ctx, repo, false, retryLock, json) -} - -func lockRepoExclusive(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - return lockRepository(ctx, repo, true, retryLock, json) -} - -var ( - retrySleepStart = 5 * time.Second - retrySleepMax = 60 * time.Second -) - -func minDuration(a, b time.Duration) time.Duration { - if a <= b { - return a - } - return b -} - -// lockRepository wraps the ctx such that it is cancelled when the repository is unlocked -// cancelling the original context also stops the lock refresh -func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - // make sure that a repository is unlocked properly and after cancel() was - // called by the cleanup handler in global.go - globalLocks.Do(func() { - AddCleanupHandler(unlockAll) - }) - - lockFn := restic.NewLock - if exclusive { - lockFn = restic.NewExclusiveLock - } - - var lock *restic.Lock - var err error - - retrySleep := minDuration(retrySleepStart, retryLock) - retryMessagePrinted := false - retryTimeout := time.After(retryLock) - -retryLoop: - for { - lock, err = lockFn(ctx, repo) - if err != nil && restic.IsAlreadyLocked(err) { - - if !retryMessagePrinted { - if !json { - Verbosef("repo already locked, waiting up to %s for the lock\n", retryLock) - } - retryMessagePrinted = true - } - - debug.Log("repo already locked, retrying in %v", retrySleep) - retrySleepCh := time.After(retrySleep) - - select { - case <-ctx.Done(): - return nil, ctx, ctx.Err() - case <-retryTimeout: - debug.Log("repo already locked, timeout expired") - // Last lock attempt - lock, err = lockFn(ctx, repo) - break retryLoop - case <-retrySleepCh: - retrySleep = minDuration(retrySleep*2, retrySleepMax) - } - } else { - // anything else, either a successful lock or another error - break retryLoop - } - } - if restic.IsInvalidLock(err) { - return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err) - } +func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun bool, exclusive bool) (context.Context, *repository.Repository, func(), error) { + repo, err := OpenRepository(ctx, gopts) if err != nil { - return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err) + return nil, nil, nil, err } - debug.Log("create lock %p (exclusive %v)", lock, exclusive) - ctx, cancel := context.WithCancel(ctx) - lockInfo := &lockContext{ - lock: lock, - cancel: cancel, - } - lockInfo.refreshWG.Add(2) - refreshChan := make(chan struct{}) - forceRefreshChan := make(chan refreshLockRequest) + unlock := func() {} + if !dryRun { + var lock *repository.Unlocker - globalLocks.Lock() - globalLocks.locks[lock] = lockInfo - go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan) - go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan) - globalLocks.Unlock() - - return lock, ctx, err -} - -var refreshInterval = 5 * time.Minute - -// consider a lock refresh failed a bit before the lock actually becomes stale -// the difference allows to compensate for a small time drift between clients. -var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2 - -type refreshLockRequest struct { - result chan bool -} - -func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest) { - debug.Log("start") - lock := lockInfo.lock - ticker := time.NewTicker(refreshInterval) - lastRefresh := lock.Time - - defer func() { - ticker.Stop() - // ensure that the context was cancelled before removing the lock - lockInfo.cancel() - - // remove the lock from the repo - debug.Log("unlocking repository with lock %v", lock) - if err := lock.Unlock(); err != nil { - debug.Log("error while unlocking: %v", err) - Warnf("error while unlocking: %v", err) + lock, ctx, err = repository.Lock(ctx, repo, exclusive, gopts.RetryLock, func(msg string) { + if !gopts.JSON { + Verbosef("%s", msg) + } + }, Warnf) + if err != nil { + return nil, nil, nil, err } - lockInfo.refreshWG.Done() - }() - - for { - select { - case <-ctx.Done(): - debug.Log("terminate") - return - - case req := <-forceRefresh: - debug.Log("trying to refresh stale lock") - // keep on going if our current lock still exists - success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel) - // inform refresh goroutine about forced refresh - select { - case <-ctx.Done(): - case req.result <- success: - } - - if success { - // update lock refresh time - lastRefresh = lock.Time - } - - case <-ticker.C: - if time.Since(lastRefresh) > refreshabilityTimeout { - // the lock is too old, wait until the expiry monitor cancels the context - continue - } - - debug.Log("refreshing locks") - err := lock.Refresh(context.TODO()) - if err != nil { - Warnf("unable to refresh lock: %v\n", err) - } else { - lastRefresh = lock.Time - // inform monitor goroutine about successful refresh - select { - case <-ctx.Done(): - case refreshed <- struct{}{}: - } - } - } + unlock = lock.Unlock + } else { + repo.SetDryRun() } + + return ctx, repo, unlock, nil } -func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest) { - // time.Now() might use a monotonic timer which is paused during standby - // convert to unix time to ensure we compare real time values - lastRefresh := time.Now().UnixNano() - pollDuration := 1 * time.Second - if refreshInterval < pollDuration { - // require for TestLockFailedRefresh - pollDuration = refreshInterval / 5 - } - // timers are paused during standby, which is a problem as the refresh timeout - // _must_ expire if the host was too long in standby. Thus fall back to periodic checks - // https://github.com/golang/go/issues/35012 - ticker := time.NewTicker(pollDuration) - defer func() { - ticker.Stop() - lockInfo.cancel() - lockInfo.refreshWG.Done() - }() - - var refreshStaleLockResult chan bool - - for { - select { - case <-ctx.Done(): - debug.Log("terminate expiry monitoring") - return - case <-refreshed: - if refreshStaleLockResult != nil { - // ignore delayed refresh notifications while the stale lock is refreshed - continue - } - lastRefresh = time.Now().UnixNano() - case <-ticker.C: - if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { - continue - } - - debug.Log("trying to refreshStaleLock") - // keep on going if our current lock still exists - refreshReq := refreshLockRequest{ - result: make(chan bool), - } - refreshStaleLockResult = refreshReq.result - - // inform refresh goroutine about forced refresh - select { - case <-ctx.Done(): - case forceRefresh <- refreshReq: - } - case success := <-refreshStaleLockResult: - if success { - lastRefresh = time.Now().UnixNano() - refreshStaleLockResult = nil - continue - } - - Warnf("Fatal: failed to refresh lock in time\n") - return - } - } +func openWithReadLock(ctx context.Context, gopts GlobalOptions, noLock bool) (context.Context, *repository.Repository, func(), error) { + // TODO enfore read-only operations once the locking code has moved to the repository + return internalOpenWithLocked(ctx, gopts, noLock, false) } -func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc) bool { - freeze := backend.AsBackend[backend.FreezeBackend](be) - if freeze != nil { - debug.Log("freezing backend") - freeze.Freeze() - defer freeze.Unfreeze() - } - - err := lock.RefreshStaleLock(ctx) - if err != nil { - Warnf("failed to refresh stale lock: %v\n", err) - // cancel context while the backend is still frozen to prevent accidental modifications - cancel() - return false - } - - return true +func openWithAppendLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { + // TODO enfore non-exclusive operations once the locking code has moved to the repository + return internalOpenWithLocked(ctx, gopts, dryRun, false) } -func unlockRepo(lock *restic.Lock) { - if lock == nil { - return - } - - globalLocks.Lock() - lockInfo, exists := globalLocks.locks[lock] - delete(globalLocks.locks, lock) - globalLocks.Unlock() - - if !exists { - debug.Log("unable to find lock %v in the global list of locks, ignoring", lock) - return - } - lockInfo.cancel() - lockInfo.refreshWG.Wait() -} - -func unlockAll(code int) (int, error) { - globalLocks.Lock() - locks := globalLocks.locks - debug.Log("unlocking %d locks", len(globalLocks.locks)) - for _, lockInfo := range globalLocks.locks { - lockInfo.cancel() - } - globalLocks.locks = make(map[*restic.Lock]*lockContext) - globalLocks.Unlock() - - for _, lockInfo := range locks { - lockInfo.refreshWG.Wait() - } - - return code, nil -} - -func init() { - globalLocks.locks = make(map[*restic.Lock]*lockContext) +func openWithExclusiveLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { + return internalOpenWithLocked(ctx, gopts, dryRun, true) } diff --git a/cmd/restic/main.go b/cmd/restic/main.go index b31ce1bb4..e847b8156 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -3,6 +3,7 @@ package main import ( "bufio" "bytes" + "context" "fmt" "log" "os" @@ -14,6 +15,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/options" "github.com/restic/restic/internal/restic" ) @@ -23,6 +25,8 @@ func init() { _, _ = maxprocs.Set() } +var ErrOK = errors.New("ok") + // cmdRoot is the base command when no other command has been specified. var cmdRoot = &cobra.Command{ Use: "restic", @@ -73,6 +77,9 @@ The full documentation can be found at https://restic.readthedocs.io/ . // enabled) return runDebug() }, + PersistentPostRun: func(_ *cobra.Command, _ []string) { + stopDebug() + }, } // Distinguish commands that need the password from those that work without, @@ -87,8 +94,6 @@ func needsPassword(cmd string) bool { } } -var logBuffer = bytes.NewBuffer(nil) - func tweakGoGC() { // lower GOGC from 100 to 50, unless it was manually overwritten by the user oldValue := godebug.SetGCPercent(50) @@ -101,12 +106,30 @@ func main() { tweakGoGC() // install custom global logger into a buffer, if an error occurs // we can show the logs + logBuffer := bytes.NewBuffer(nil) log.SetOutput(logBuffer) + err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES"), func(s string) { + fmt.Fprintln(os.Stderr, s) + }) + if err != nil { + fmt.Fprintln(os.Stderr, err) + Exit(1) + } + debug.Log("main %#v", os.Args) debug.Log("restic %s compiled with %v on %v/%v", version, runtime.Version(), runtime.GOOS, runtime.GOARCH) - err := cmdRoot.ExecuteContext(internalGlobalCtx) + + ctx := createGlobalContext() + err = cmdRoot.ExecuteContext(ctx) + + if err == nil { + err = ctx.Err() + } else if err == ErrOK { + // ErrOK overwrites context cancelation errors + err = nil + } switch { case restic.IsAlreadyLocked(err): @@ -128,11 +151,13 @@ func main() { } var exitCode int - switch err { - case nil: + switch { + case err == nil: exitCode = 0 - case ErrInvalidSourceData: + case err == ErrInvalidSourceData: exitCode = 3 + case errors.Is(err, context.Canceled): + exitCode = 130 default: exitCode = 1 } diff --git a/cmd/restic/secondary_repo.go b/cmd/restic/secondary_repo.go index 4c46b60df..2afd36a81 100644 --- a/cmd/restic/secondary_repo.go +++ b/cmd/restic/secondary_repo.go @@ -1,6 +1,7 @@ package main import ( + "context" "os" "github.com/restic/restic/internal/errors" @@ -56,7 +57,7 @@ func initSecondaryRepoOptions(f *pflag.FlagSet, opts *secondaryRepoOptions, repo opts.PasswordCommand = os.Getenv("RESTIC_FROM_PASSWORD_COMMAND") } -func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) { +func fillSecondaryGlobalOpts(ctx context.Context, opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) { if opts.Repo == "" && opts.RepositoryFile == "" && opts.LegacyRepo == "" && opts.LegacyRepositoryFile == "" { return GlobalOptions{}, false, errors.Fatal("Please specify a source repository location (--from-repo or --from-repository-file)") } @@ -109,7 +110,7 @@ func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, rep return GlobalOptions{}, false, err } } - dstGopts.password, err = ReadPassword(dstGopts, "enter password for "+repoPrefix+" repository: ") + dstGopts.password, err = ReadPassword(ctx, dstGopts, "enter password for "+repoPrefix+" repository: ") if err != nil { return GlobalOptions{}, false, err } diff --git a/cmd/restic/secondary_repo_test.go b/cmd/restic/secondary_repo_test.go index ff1a10b03..aa511ca99 100644 --- a/cmd/restic/secondary_repo_test.go +++ b/cmd/restic/secondary_repo_test.go @@ -1,6 +1,7 @@ package main import ( + "context" "os" "path/filepath" "testing" @@ -170,7 +171,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) { // Test all valid cases for _, testCase := range validSecondaryRepoTestCases { - DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination") + DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination") rtest.OK(t, err) rtest.Equals(t, DstGOpts, testCase.DstGOpts) rtest.Equals(t, isFromRepo, testCase.FromRepo) @@ -178,7 +179,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) { // Test all invalid cases for _, testCase := range invalidSecondaryRepoTestCases { - _, _, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination") + _, _, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination") rtest.Assert(t, err != nil, "Expected error, but function did not return an error") } } diff --git a/cmd/restic/testdata/repo-restore-permissions-test.tar.gz b/cmd/restic/testdata/repo-restore-permissions-test.tar.gz index 36aa62dbf..dc8e9bc80 100644 Binary files a/cmd/restic/testdata/repo-restore-permissions-test.tar.gz and b/cmd/restic/testdata/repo-restore-permissions-test.tar.gz differ diff --git a/doc/020_installation.rst b/doc/020_installation.rst index 0f1cd6c04..17b581a87 100644 --- a/doc/020_installation.rst +++ b/doc/020_installation.rst @@ -77,8 +77,7 @@ avoid any conflicts: macOS ===== -If you are using macOS, you can install restic using the -`homebrew `__ package manager: +If you are using macOS, you can install restic using `Homebrew `__: .. code-block:: console @@ -363,3 +362,18 @@ Example for using sudo to write a zsh completion script directly to the system-w the operating system used, e.g. ``/usr/share/bash-completion/completions/restic`` in Debian and derivatives. Please look up the correct path in the appropriate documentation. + +Example for setting up a powershell completion script for the local user's profile: + +.. code-block:: pwsh-session + + # Create profile if one does not exist + PS> If (!(Test-Path $PROFILE.CurrentUserAllHosts)) {New-Item -Path $PROFILE.CurrentUserAllHosts -Force} + + PS> $ProfileDir = (Get-Item $PROFILE.CurrentUserAllHosts).Directory + + # Generate Restic completions in the same directory as the profile + PS> restic generate --powershell-completion "$ProfileDir\restic-completion.ps1" + + # Append to the profile file the command to load Restic completions + PS> Add-Content -Path $PROFILE.CurrentUserAllHosts -Value "`r`nImport-Module $ProfileDir\restic-completion.ps1" diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 8661f5904..0c50b65be 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -201,15 +201,16 @@ scheme like this: $ restic -r rest:http://host:8000/ init Depending on your REST server setup, you can use HTTPS protocol, -password protection, multiple repositories or any combination of -those features. The TCP/IP port is also configurable. Here -are some more examples: +unix socket, password protection, multiple repositories or any +combination of those features. The TCP/IP port is also configurable. +Here are some more examples: .. code-block:: console $ restic -r rest:https://host:8000/ init $ restic -r rest:https://user:pass@host:8000/ init $ restic -r rest:https://user:pass@host:8000/my_backup_repo/ init + $ restic -r rest:http+unix:///tmp/rest.socket:/my_backup_repo/ init The server username and password can be specified using environment variables as well: diff --git a/doc/040_backup.rst b/doc/040_backup.rst index b697e38bd..3a332ca75 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -56,6 +56,39 @@ snapshot for each volume that contains files to backup. Files are read from the VSS snapshot instead of the regular filesystem. This allows to backup files that are exclusively locked by another process during the backup. +You can use additional options to change VSS behaviour: + + * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds + * ``-o vss.exclude-all-mount-points`` disable auto snapshotting of all volume mount points + * ``-o vss.exclude-volumes`` allows excluding specific volumes or volume mount points from snapshotting + * ``-o vss.provider`` specifies VSS provider used for snapshotting + +For example a 2.5 minutes timeout with snapshotting of mount points disabled can be specified as + +.. code-block:: console + + -o vss.timeout=2m30s -o vss.exclude-all-mount-points=true + +and excluding drive ``d:\``, mount point ``c:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as + +.. code-block:: console + + -o vss.exclude-volumes="d:;c:\mnt\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" + +VSS provider can be specified by GUID + +.. code-block:: console + + -o vss.provider={3f900f90-00e9-440e-873a-96ca5eb079e5} + +or by name + +.. code-block:: console + + -o vss.provider="Hyper-V IC Software Shadow Copy Provider" + +Also ``MS`` can be used as alias for ``Microsoft Software Shadow Copy provider 1.0``. + By default VSS ignores Outlook OST files. This is not a restriction of restic but the default Windows VSS configuration. The files not to snapshot are configured in the Windows registry under the following key: diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 48e5985dc..85c022580 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -18,19 +18,21 @@ Working with repositories Listing all snapshots ===================== -Now, you can list all the snapshots stored in the repository: +Now, you can list all the snapshots stored in the repository. The size column +only exists for snapshots created using restic 0.17.0 or later. It reflects the +size of the contained files at the time when the snapshot was created. .. code-block:: console $ restic -r /srv/restic-repo snapshots enter password for repository: - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work - 79766175 2015-05-08 21:40:19 kasimir /home/user/work - bdbd3439 2015-05-08 21:45:17 luigi /home/art - 590c8fc8 2015-05-08 21:47:38 kazik /srv - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------------- + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work 20.643GiB + 79766175 2015-05-08 21:40:19 kasimir /home/user/work 20.645GiB + bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB + 590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB You can filter the listing by directory path: @@ -38,10 +40,10 @@ You can filter the listing by directory path: $ restic -r /srv/restic-repo snapshots --path="/srv" enter password for repository: - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 590c8fc8 2015-05-08 21:47:38 kazik /srv - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + 590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB Or filter by host: @@ -49,10 +51,10 @@ Or filter by host: $ restic -r /srv/restic-repo snapshots --host luigi enter password for repository: - ID Date Host Tags Directory - ---------------------------------------------------------------------- - bdbd3439 2015-05-08 21:45:17 luigi /home/art - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB Combining filters is also possible. @@ -64,21 +66,21 @@ Furthermore you can group the output by the same filters (host, paths, tags): enter password for repository: snapshots for (host [kasimir]) - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work - 79766175 2015-05-08 21:40:19 kasimir /home/user/work + ID Date Host Tags Directory Size + ------------------------------------------------------------------------ + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work 20.643GiB + 79766175 2015-05-08 21:40:19 kasimir /home/user/work 20.645GiB 2 snapshots snapshots for (host [luigi]) - ID Date Host Tags Directory - ---------------------------------------------------------------------- - bdbd3439 2015-05-08 21:45:17 luigi /home/art - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB 2 snapshots snapshots for (host [kazik]) - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 590c8fc8 2015-05-08 21:47:38 kazik /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + 590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB 1 snapshots diff --git a/doc/047_tuning_backup_parameters.rst b/doc/047_tuning_backup_parameters.rst index d8fb2c9b6..a6b998cfe 100644 --- a/doc/047_tuning_backup_parameters.rst +++ b/doc/047_tuning_backup_parameters.rst @@ -26,7 +26,8 @@ When you start a backup, restic will concurrently count the number of files and their total size, which is used to estimate how long it will take. This will cause some extra I/O, which can slow down backups of network file systems or FUSE mounts. To avoid this overhead at the cost of not seeing a progress -estimate, use the ``--no-scan`` option which disables this file scanning. +estimate, use the ``--no-scan`` option of the ``backup`` command which disables +this file scanning. Backend Connections =================== @@ -111,3 +112,28 @@ to disk. An operating system usually caches file write operations in memory and them to disk after a short delay. As larger pack files take longer to upload, this increases the chance of these files being written to disk. This can increase disk wear for SSDs. + + +Feature Flags +============= + +Feature flags allow disabling or enabling certain experimental restic features. The flags +can be specified via the ``RESTIC_FEATURES`` environment variable. The variable expects a +comma-separated list of ``key[=value],key2[=value2]`` pairs. The key is the name of a feature +flag. The value is optional and can contain either the value ``true`` (default if omitted) +or ``false``. The list of currently available feature flags is shown by the ``features`` +command. + +Restic will return an error if an invalid feature flag is specified. No longer relevant +feature flags may be removed in a future restic release. Thus, make sure to no longer +specify these flags. + +A feature can either be in alpha, beta, stable or deprecated state. + +- An _alpha_ feature is disabled by default and may change in arbitrary ways between restic + versions or be removed. +- A _beta_ feature is enabled by default, but still can change in minor ways or be removed. +- A _stable_ feature is always enabled and cannot be disabled. This allows for a transition + period after which the flag will be removed in a future restic version. +- A _deprecated_ feature is always disabled and cannot be enabled. The flag will be removed + in a future restic version. diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index fda4b2d53..28419c292 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -163,7 +163,9 @@ Summary is the last output line in a successful backup. +---------------------------+---------------------------------------------------------+ | ``tree_blobs`` | Number of tree blobs | +---------------------------+---------------------------------------------------------+ -| ``data_added`` | Amount of data added, in bytes | +| ``data_added`` | Amount of (uncompressed) data added, in bytes | ++---------------------------+---------------------------------------------------------+ +| ``data_added_packed`` | Amount of data added (after compression), in bytes | +---------------------------+---------------------------------------------------------+ | ``total_files_processed`` | Total number of files processed | +---------------------------+---------------------------------------------------------+ @@ -365,13 +367,13 @@ Snapshot object Reason object -+----------------+---------------------------------------------------------+ -| ``snapshot`` | Snapshot object, without ``id`` and ``short_id`` fields | -+----------------+---------------------------------------------------------+ -| ``matches`` | Array containing descriptions of the matching criteria | -+----------------+---------------------------------------------------------+ -| ``counters`` | Object containing counters used by the policies | -+----------------+---------------------------------------------------------+ ++----------------+-----------------------------------------------------------+ +| ``snapshot`` | Snapshot object, including ``id`` and ``short_id`` fields | ++----------------+-----------------------------------------------------------+ +| ``matches`` | Array containing descriptions of the matching criteria | ++----------------+-----------------------------------------------------------+ +| ``counters`` | Object containing counters used by the policies | ++----------------+-----------------------------------------------------------+ init @@ -551,11 +553,48 @@ The snapshots command returns a single JSON object, an array with objects of the +---------------------+--------------------------------------------------+ | ``program_version`` | restic version used to create snapshot | +---------------------+--------------------------------------------------+ +| ``summary`` | Snapshot statistics, see "Summary object" | ++---------------------+--------------------------------------------------+ | ``id`` | Snapshot ID | +---------------------+--------------------------------------------------+ | ``short_id`` | Snapshot ID, short form | +---------------------+--------------------------------------------------+ +Summary object + +The contained statistics reflect the information at the point in time when the snapshot +was created. + ++---------------------------+---------------------------------------------------------+ +| ``backup_start`` | Time at which the backup was started | ++---------------------------+---------------------------------------------------------+ +| ``backup_end`` | Time at which the backup was completed | ++---------------------------+---------------------------------------------------------+ +| ``files_new`` | Number of new files | ++---------------------------+---------------------------------------------------------+ +| ``files_changed`` | Number of files that changed | ++---------------------------+---------------------------------------------------------+ +| ``files_unmodified`` | Number of files that did not change | ++---------------------------+---------------------------------------------------------+ +| ``dirs_new`` | Number of new directories | ++---------------------------+---------------------------------------------------------+ +| ``dirs_changed`` | Number of directories that changed | ++---------------------------+---------------------------------------------------------+ +| ``dirs_unmodified`` | Number of directories that did not change | ++---------------------------+---------------------------------------------------------+ +| ``data_blobs`` | Number of data blobs | ++---------------------------+---------------------------------------------------------+ +| ``tree_blobs`` | Number of tree blobs | ++---------------------------+---------------------------------------------------------+ +| ``data_added`` | Amount of (uncompressed) data added, in bytes | ++---------------------------+---------------------------------------------------------+ +| ``data_added_packed`` | Amount of data added (after compression), in bytes | ++---------------------------+---------------------------------------------------------+ +| ``total_files_processed`` | Total number of files processed | ++---------------------------+---------------------------------------------------------+ +| ``total_bytes_processed`` | Total number of bytes processed | ++---------------------------+---------------------------------------------------------+ + stats ----- diff --git a/doc/faq.rst b/doc/faq.rst index e8ef2de5e..8e56b5d9e 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -74,7 +74,7 @@ $ restic backup --exclude "~/documents" ~ This command will result in a complete backup of the current logged in user's home directory and it won't exclude the folder ``~/documents/`` - which is not what the user wanted to achieve. The problem is how the path to ``~/documents`` is passed to restic. -In order to spot an issue like this, you can make use of the following ruby command preceeding your restic command. +In order to spot an issue like this, you can make use of the following ruby command preceding your restic command. :: diff --git a/docker/Dockerfile b/docker/Dockerfile index 978da7960..02b53261f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20-alpine AS builder +FROM golang:1.22-alpine AS builder WORKDIR /go/src/github.com/restic/restic diff --git a/go.mod b/go.mod index afcbc427b..c928b4a97 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,10 @@ module github.com/restic/restic require ( - cloud.google.com/go/storage v1.37.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 + cloud.google.com/go/storage v1.40.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.2.0 github.com/cenkalti/backoff/v4 v4.2.1 @@ -13,50 +13,51 @@ require ( github.com/go-ole/go-ole v1.3.0 github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/klauspost/compress v1.17.6 + github.com/klauspost/compress v1.17.7 github.com/minio/minio-go/v7 v7.0.66 github.com/minio/sha256-simd v1.0.1 github.com/ncw/swift/v2 v2.0.2 + github.com/peterbourgon/unixtransport v0.0.4 github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 github.com/pkg/sftp v1.13.6 github.com/pkg/xattr v0.4.10-0.20221120235825-35026bbbd013 github.com/restic/chunker v0.4.0 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.18.0 - golang.org/x/net v0.20.0 - golang.org/x/oauth2 v0.16.0 + golang.org/x/crypto v0.21.0 + golang.org/x/net v0.23.0 + golang.org/x/oauth2 v0.18.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.16.0 - golang.org/x/term v0.16.0 + golang.org/x/sys v0.18.0 + golang.org/x/term v0.18.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.157.0 + google.golang.org/api v0.170.0 ) require ( - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go v0.112.1 // indirect + cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect + cloud.google.com/go/iam v1.1.7 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect github.com/google/s2a-go v0.1.7 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect @@ -66,22 +67,22 @@ require ( github.com/minio/md5-simd v1.1.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/grpc v1.60.1 // indirect - google.golang.org/protobuf v1.32.0 // indirect + google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 // indirect + google.golang.org/grpc v1.62.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index fb70ac9d5..1a7dc1a79 100644 --- a/go.sum +++ b/go.sum @@ -1,25 +1,25 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4= -cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= +cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s= github.com/Backblaze/blazer v0.6.1/go.mod h1:7/jrGx4O6OKOto6av+hLwelPR8rwZ+PLxQ5ZOiYAjwY= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -36,8 +36,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -55,20 +53,19 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -85,8 +82,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -104,12 +101,12 @@ github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0Z github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= @@ -117,8 +114,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -129,6 +126,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= @@ -142,8 +140,13 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk= github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/ff/v3 v3.3.1/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= +github.com/peterbourgon/unixtransport v0.0.4 h1:UTF0FxXCAglvoZz9jaGPYjEg52DjBLDYGMJvJni6Tfw= +github.com/peterbourgon/unixtransport v0.0.4/go.mod h1:o8aUkOCa8W/BIXpi15uKvbSabjtBh0JhSOJGSfoOhAU= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= @@ -165,8 +168,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc= @@ -185,17 +188,17 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -203,14 +206,15 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -222,16 +226,18 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -241,21 +247,22 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -273,12 +280,13 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -google.golang.org/api v0.157.0 h1:ORAeqmbrrozeyw5NjnMxh7peHO0UzV4wWYSwZeCUb20= -google.golang.org/api v0.157.0/go.mod h1:+z4v4ufbZ1WEpld6yMGHyggs+PmAHiaLNj5ytP3N01g= +google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48= +google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -286,19 +294,19 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k= -google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 h1:KHBtwE+eQc3+NxpjmRFlQ3pJQ2FNnhhgB9xOV8kyBuU= -google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 h1:9IZDv+/GcI6u+a4jRFRLxQs0RUCfavGfoOgEW6jpkI0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -310,13 +318,14 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/helpers/prepare-release/main.go b/helpers/prepare-release/main.go index baf8aa2ba..703d85e70 100644 --- a/helpers/prepare-release/main.go +++ b/helpers/prepare-release/main.go @@ -303,7 +303,7 @@ func generateFiles() { } } -var versionPattern = `var version = ".*"` +var versionPattern = `const version = ".*"` const versionCodeFile = "cmd/restic/global.go" @@ -313,7 +313,7 @@ func updateVersion() { die("unable to write version to file: %v", err) } - newVersion := fmt.Sprintf("var version = %q", opts.Version) + newVersion := fmt.Sprintf("const version = %q", opts.Version) replace(versionCodeFile, versionPattern, newVersion) if len(uncommittedChanges("VERSION")) > 0 || len(uncommittedChanges(versionCodeFile)) > 0 { @@ -323,7 +323,7 @@ func updateVersion() { } func updateVersionDev() { - newVersion := fmt.Sprintf(`var version = "%s-dev (compiled manually)"`, opts.Version) + newVersion := fmt.Sprintf(`const version = "%s-dev (compiled manually)"`, opts.Version) replace(versionCodeFile, versionPattern, newVersion) msg("committing cmd/restic/global.go with dev version") diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 77ddba7c4..c1f73eea6 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -8,10 +8,12 @@ import ( "runtime" "sort" "strings" + "sync" "time" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" "golang.org/x/sync/errgroup" @@ -40,6 +42,18 @@ type ItemStats struct { TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead) } +type ChangeStats struct { + New uint + Changed uint + Unchanged uint +} + +type Summary struct { + Files, Dirs ChangeStats + ProcessedBytes uint64 + ItemStats +} + // Add adds other to the current ItemStats. func (s *ItemStats) Add(other ItemStats) { s.DataBlobs += other.DataBlobs @@ -61,6 +75,8 @@ type Archiver struct { blobSaver *BlobSaver fileSaver *FileSaver treeSaver *TreeSaver + mu sync.Mutex + summary *Summary // Error is called for all errors that occur during backup. Error ErrorFunc @@ -182,12 +198,58 @@ func (arch *Archiver) error(item string, err error) error { return errf } +func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s ItemStats, d time.Duration) { + arch.CompleteItem(item, previous, current, s, d) + + arch.mu.Lock() + defer arch.mu.Unlock() + + arch.summary.ItemStats.Add(s) + + if current != nil { + arch.summary.ProcessedBytes += current.Size + } else { + // last item or an error occurred + return + } + + switch current.Type { + case "dir": + switch { + case previous == nil: + arch.summary.Dirs.New++ + case previous.Equals(*current): + arch.summary.Dirs.Unchanged++ + default: + arch.summary.Dirs.Changed++ + } + + case "file": + switch { + case previous == nil: + arch.summary.Files.New++ + case previous.Equals(*current): + arch.summary.Files.Unchanged++ + default: + arch.summary.Files.Changed++ + } + } +} + // nodeFromFileInfo returns the restic node from an os.FileInfo. -func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo) (*restic.Node, error) { - node, err := restic.NodeFromFileInfo(filename, fi) +func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { + node, err := restic.NodeFromFileInfo(filename, fi, ignoreXattrListError) if !arch.WithAtime { node.AccessTime = node.ModTime } + if feature.Flag.Enabled(feature.DeviceIDForHardlinks) { + if node.Links == 1 || node.Type == "dir" { + // the DeviceID is only necessary for hardlinked files + // when using subvolumes or snapshots their deviceIDs tend to change which causes + // restic to upload new tree blobs + node.DeviceID = 0 + } + } // overwrite name to match that within the snapshot node.Name = path.Base(snPath) if err != nil { @@ -222,12 +284,12 @@ func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { return err } -// SaveDir stores a directory in the repo and returns the node. snPath is the +// saveDir stores a directory in the repo and returns the node. snPath is the // path within the current snapshot. -func (arch *Archiver) SaveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) { +func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) { debug.Log("%v %v", snPath, dir) - treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi) + treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi, false) if err != nil { return FutureNode{}, err } @@ -250,7 +312,7 @@ func (arch *Archiver) SaveDir(ctx context.Context, snPath string, dir string, fi pathname := arch.FS.Join(dir, name) oldNode := previous.Find(name) snItem := join(snPath, name) - fn, excluded, err := arch.Save(ctx, snItem, pathname, oldNode) + fn, excluded, err := arch.save(ctx, snItem, pathname, oldNode) // return error early if possible if err != nil { @@ -318,6 +380,7 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult { return res } case <-ctx.Done(): + return futureNodeResult{err: ctx.Err()} } return futureNodeResult{err: errors.Errorf("no result")} } @@ -334,14 +397,14 @@ func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool { return true } -// Save saves a target (file or directory) to the repo. If the item is +// save saves a target (file or directory) to the repo. If the item is // excluded, this function returns a nil node and error, with excluded set to // true. // // Errors and completion needs to be handled by the caller. // // snPath is the path within the current snapshot. -func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) { +func (arch *Archiver) save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) { start := time.Now() debug.Log("%v target %q, previous %v", snPath, target, previous) @@ -380,9 +443,9 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous if previous != nil && !fileChanged(fi, previous, arch.ChangeIgnoreFlags) { if arch.allBlobsPresent(previous) { debug.Log("%v hasn't changed, using old list of blobs", target) - arch.CompleteItem(snPath, previous, previous, ItemStats{}, time.Since(start)) + arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start)) arch.CompleteBlob(previous.Size) - node, err := arch.nodeFromFileInfo(snPath, target, fi) + node, err := arch.nodeFromFileInfo(snPath, target, fi, false) if err != nil { return FutureNode{}, false, err } @@ -445,9 +508,9 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous fn = arch.fileSaver.Save(ctx, snPath, target, file, fi, func() { arch.StartFile(snPath) }, func() { - arch.CompleteItem(snPath, nil, nil, ItemStats{}, 0) + arch.trackItem(snPath, nil, nil, ItemStats{}, 0) }, func(node *restic.Node, stats ItemStats) { - arch.CompleteItem(snPath, previous, node, stats, time.Since(start)) + arch.trackItem(snPath, previous, node, stats, time.Since(start)) }) case fi.IsDir(): @@ -462,9 +525,9 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous return FutureNode{}, false, err } - fn, err = arch.SaveDir(ctx, snPath, target, fi, oldSubtree, + fn, err = arch.saveDir(ctx, snPath, target, fi, oldSubtree, func(node *restic.Node, stats ItemStats) { - arch.CompleteItem(snItem, previous, node, stats, time.Since(start)) + arch.trackItem(snItem, previous, node, stats, time.Since(start)) }) if err != nil { debug.Log("SaveDir for %v returned error: %v", snPath, err) @@ -478,7 +541,7 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous default: debug.Log(" %v other", target) - node, err := arch.nodeFromFileInfo(snPath, target, fi) + node, err := arch.nodeFromFileInfo(snPath, target, fi, false) if err != nil { return FutureNode{}, false, err } @@ -545,9 +608,9 @@ func (arch *Archiver) statDir(dir string) (os.FileInfo, error) { return fi, nil } -// SaveTree stores a Tree in the repo, returned is the tree. snPath is the path +// saveTree stores a Tree in the repo, returned is the tree. snPath is the path // within the current snapshot. -func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree, complete CompleteFunc) (FutureNode, int, error) { +func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree, complete CompleteFunc) (FutureNode, int, error) { var node *restic.Node if snPath != "/" { @@ -561,7 +624,9 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, } debug.Log("%v, dir node data loaded from %v", snPath, atree.FileInfoPath) - node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi) + // in some cases reading xattrs for directories above the backup target is not allowed + // thus ignore errors for such folders. + node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi, true) if err != nil { return FutureNode{}, 0, err } @@ -585,7 +650,7 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, // this is a leaf node if subatree.Leaf() { - fn, excluded, err := arch.Save(ctx, join(snPath, name), subatree.Path, previous.Find(name)) + fn, excluded, err := arch.save(ctx, join(snPath, name), subatree.Path, previous.Find(name)) if err != nil { err = arch.error(subatree.Path, err) @@ -619,8 +684,8 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, } // not a leaf node, archive subtree - fn, _, err := arch.SaveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) { - arch.CompleteItem(snItem, oldNode, n, is, time.Since(start)) + fn, _, err := arch.saveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) { + arch.trackItem(snItem, oldNode, n, is, time.Since(start)) }) if err != nil { return FutureNode{}, 0, err @@ -688,6 +753,7 @@ type SnapshotOptions struct { Tags restic.TagList Hostname string Excludes []string + BackupStart time.Time Time time.Time ParentSnapshot *restic.Snapshot ProgramVersion string @@ -738,15 +804,17 @@ func (arch *Archiver) stopWorkers() { } // Snapshot saves several targets and returns a snapshot. -func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, error) { +func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, *Summary, error) { + arch.summary = &Summary{} + cleanTargets, err := resolveRelativeTargets(arch.FS, targets) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } atree, err := NewTree(arch.FS, cleanTargets) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } var rootTreeID restic.ID @@ -762,8 +830,8 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps arch.runWorkers(wgCtx, wg) debug.Log("starting snapshot") - fn, nodeCount, err := arch.SaveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) { - arch.CompleteItem("/", nil, nil, is, time.Since(start)) + fn, nodeCount, err := arch.saveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) { + arch.trackItem("/", nil, nil, is, time.Since(start)) }) if err != nil { return err @@ -799,12 +867,12 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps }) err = wgUp.Wait() if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } sn, err := restic.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } sn.ProgramVersion = opts.ProgramVersion @@ -813,11 +881,28 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps sn.Parent = opts.ParentSnapshot.ID() } sn.Tree = &rootTreeID + sn.Summary = &restic.SnapshotSummary{ + BackupStart: opts.BackupStart, + BackupEnd: time.Now(), + + FilesNew: arch.summary.Files.New, + FilesChanged: arch.summary.Files.Changed, + FilesUnmodified: arch.summary.Files.Unchanged, + DirsNew: arch.summary.Dirs.New, + DirsChanged: arch.summary.Dirs.Changed, + DirsUnmodified: arch.summary.Dirs.Unchanged, + DataBlobs: arch.summary.ItemStats.DataBlobs, + TreeBlobs: arch.summary.ItemStats.TreeBlobs, + DataAdded: arch.summary.ItemStats.DataSize + arch.summary.ItemStats.TreeSize, + DataAddedPacked: arch.summary.ItemStats.DataSizeInRepo + arch.summary.ItemStats.TreeSizeInRepo, + TotalFilesProcessed: arch.summary.Files.New + arch.summary.Files.Changed + arch.summary.Files.Unchanged, + TotalBytesProcessed: arch.summary.ProcessedBytes, + } id, err := restic.SaveSnapshot(ctx, arch.Repo, sn) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } - return sn, id, nil + return sn, id, arch.summary, nil } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 46ef44251..51a425f4e 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -19,15 +19,16 @@ import ( "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" ) func prepareTempdirRepoSrc(t testing.TB, src TestDir) (string, restic.Repository) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) repo := repository.TestRepository(t) TestCreateFiles(t, tempdir, src) @@ -132,7 +133,7 @@ func TestArchiverSaveFile(t *testing.T) { var tests = []TestFile{ {Content: ""}, {Content: "foo"}, - {Content: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Content: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, testfile := range tests { @@ -165,7 +166,7 @@ func TestArchiverSaveFileReaderFS(t *testing.T) { Data string }{ {Data: "foo"}, - {Data: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Data: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, test := range tests { @@ -207,7 +208,7 @@ func TestArchiverSave(t *testing.T) { var tests = []TestFile{ {Content: ""}, {Content: "foo"}, - {Content: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Content: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, testfile := range tests { @@ -226,8 +227,9 @@ func TestArchiverSave(t *testing.T) { return err } arch.runWorkers(ctx, wg) + arch.summary = &Summary{} - node, excluded, err := arch.Save(ctx, "/", filepath.Join(tempdir, "file"), nil) + node, excluded, err := arch.save(ctx, "/", filepath.Join(tempdir, "file"), nil) if err != nil { t.Fatal(err) } @@ -275,7 +277,7 @@ func TestArchiverSaveReaderFS(t *testing.T) { Data string }{ {Data: "foo"}, - {Data: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Data: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, test := range tests { @@ -303,8 +305,9 @@ func TestArchiverSaveReaderFS(t *testing.T) { return err } arch.runWorkers(ctx, wg) + arch.summary = &Summary{} - node, excluded, err := arch.Save(ctx, "/", filename, nil) + node, excluded, err := arch.save(ctx, "/", filename, nil) t.Logf("Save returned %v %v", node, err) if err != nil { t.Fatal(err) @@ -351,7 +354,7 @@ func TestArchiverSaveReaderFS(t *testing.T) { func BenchmarkArchiverSaveFileSmall(b *testing.B) { const fileSize = 4 * 1024 d := TestDir{"file": TestFile{ - Content: string(restictest.Random(23, fileSize)), + Content: string(rtest.Random(23, fileSize)), }} b.SetBytes(fileSize) @@ -383,7 +386,7 @@ func BenchmarkArchiverSaveFileSmall(b *testing.B) { func BenchmarkArchiverSaveFileLarge(b *testing.B) { const fileSize = 40*1024*1024 + 1287898 d := TestDir{"file": TestFile{ - Content: string(restictest.Random(23, fileSize)), + Content: string(rtest.Random(23, fileSize)), }} b.SetBytes(fileSize) @@ -459,14 +462,14 @@ func appendToFile(t testing.TB, filename string, data []byte) { } func TestArchiverSaveFileIncremental(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) repo := &blobCountingRepo{ Repository: repository.TestRepository(t), saved: make(map[restic.BlobHandle]uint), } - data := restictest.Random(23, 512*1024+887898) + data := rtest.Random(23, 512*1024+887898) testfile := filepath.Join(tempdir, "testfile") for i := 0; i < 3; i++ { @@ -509,12 +512,12 @@ func chmodTwice(t testing.TB, name string) { // POSIX says that ctime is updated "even if the file status does not // change", but let's make sure it does change, just in case. err := os.Chmod(name, 0700) - restictest.OK(t, err) + rtest.OK(t, err) sleep() err = os.Chmod(name, 0600) - restictest.OK(t, err) + rtest.OK(t, err) } func lstat(t testing.TB, name string) os.FileInfo { @@ -553,7 +556,7 @@ func rename(t testing.TB, oldname, newname string) { } func nodeFromFI(t testing.TB, filename string, fi os.FileInfo) *restic.Node { - node, err := restic.NodeFromFileInfo(filename, fi) + node, err := restic.NodeFromFileInfo(filename, fi, false) if err != nil { t.Fatal(err) } @@ -673,7 +676,7 @@ func TestFileChanged(t *testing.T) { t.Skip("don't run test on Windows") } - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) filename := filepath.Join(tempdir, "file") content := defaultContent @@ -709,7 +712,7 @@ func TestFileChanged(t *testing.T) { } func TestFilChangedSpecialCases(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) filename := filepath.Join(tempdir, "file") content := []byte("foobar") @@ -743,12 +746,12 @@ func TestArchiverSaveDir(t *testing.T) { }{ { src: TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, }, target: ".", want: TestDir{ "targetdir": TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, }, }, }, @@ -758,8 +761,8 @@ func TestArchiverSaveDir(t *testing.T) { "foo": TestFile{Content: "foo"}, "emptyfile": TestFile{Content: ""}, "bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}, - "largefile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, - "largerfile": TestFile{Content: string(restictest.Random(234, 5*1024*1024+5000))}, + "largefile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, + "largerfile": TestFile{Content: string(rtest.Random(234, 5*1024*1024+5000))}, }, }, target: "targetdir", @@ -831,13 +834,14 @@ func TestArchiverSaveDir(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.runWorkers(ctx, wg) + arch.summary = &Summary{} chdir := tempdir if test.chdir != "" { chdir = filepath.Join(chdir, test.chdir) } - back := restictest.Chdir(t, chdir) + back := rtest.Chdir(t, chdir) defer back() fi, err := fs.Lstat(test.target) @@ -845,7 +849,7 @@ func TestArchiverSaveDir(t *testing.T) { t.Fatal(err) } - ft, err := arch.SaveDir(ctx, "/", test.target, fi, nil, nil) + ft, err := arch.saveDir(ctx, "/", test.target, fi, nil, nil) if err != nil { t.Fatal(err) } @@ -895,7 +899,7 @@ func TestArchiverSaveDir(t *testing.T) { } func TestArchiverSaveDirIncremental(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) repo := &blobCountingRepo{ Repository: repository.TestRepository(t), @@ -912,13 +916,14 @@ func TestArchiverSaveDirIncremental(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.runWorkers(ctx, wg) + arch.summary = &Summary{} fi, err := fs.Lstat(tempdir) if err != nil { t.Fatal(err) } - ft, err := arch.SaveDir(ctx, "/", tempdir, fi, nil, nil) + ft, err := arch.saveDir(ctx, "/", tempdir, fi, nil, nil) if err != nil { t.Fatal(err) } @@ -982,9 +987,9 @@ func TestArchiverSaveDirIncremental(t *testing.T) { // bothZeroOrNeither fails the test if only one of exp, act is zero. func bothZeroOrNeither(tb testing.TB, exp, act uint64) { + tb.Helper() if (exp == 0 && act != 0) || (exp != 0 && act == 0) { - _, file, line, _ := runtime.Caller(1) - tb.Fatalf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + rtest.Equals(tb, exp, act) } } @@ -1004,7 +1009,7 @@ func TestArchiverSaveTree(t *testing.T) { prepare func(t testing.TB) targets []string want TestDir - stat ItemStats + stat Summary }{ { src: TestDir{ @@ -1014,7 +1019,12 @@ func TestArchiverSaveTree(t *testing.T) { want: TestDir{ "targetfile": TestFile{Content: string("foobar")}, }, - stat: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + stat: Summary{ + ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + ProcessedBytes: 6, + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{0, 0, 0}, + }, }, { src: TestDir{ @@ -1026,7 +1036,12 @@ func TestArchiverSaveTree(t *testing.T) { "targetfile": TestFile{Content: string("foobar")}, "filesymlink": TestSymlink{Target: "targetfile"}, }, - stat: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + stat: Summary{ + ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + ProcessedBytes: 6, + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{0, 0, 0}, + }, }, { src: TestDir{ @@ -1046,7 +1061,12 @@ func TestArchiverSaveTree(t *testing.T) { "symlink": TestSymlink{Target: "subdir"}, }, }, - stat: ItemStats{0, 0, 0, 1, 0x154, 0x16a}, + stat: Summary{ + ItemStats: ItemStats{0, 0, 0, 1, 0x154, 0x16a}, + ProcessedBytes: 0, + Files: ChangeStats{0, 0, 0}, + Dirs: ChangeStats{1, 0, 0}, + }, }, { src: TestDir{ @@ -1070,7 +1090,12 @@ func TestArchiverSaveTree(t *testing.T) { }, }, }, - stat: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1}, + stat: Summary{ + ItemStats: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1}, + ProcessedBytes: 6, + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{3, 0, 0}, + }, }, } @@ -1082,20 +1107,13 @@ func TestArchiverSaveTree(t *testing.T) { arch := New(repo, testFS, Options{}) - var stat ItemStats - lock := &sync.Mutex{} - arch.CompleteItem = func(item string, previous, current *restic.Node, s ItemStats, d time.Duration) { - lock.Lock() - defer lock.Unlock() - stat.Add(s) - } - wg, ctx := errgroup.WithContext(context.TODO()) repo.StartPackUploader(ctx, wg) arch.runWorkers(ctx, wg) + arch.summary = &Summary{} - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() if test.prepare != nil { @@ -1107,7 +1125,7 @@ func TestArchiverSaveTree(t *testing.T) { t.Fatal(err) } - fn, _, err := arch.SaveTree(ctx, "/", atree, nil, nil) + fn, _, err := arch.saveTree(ctx, "/", atree, nil, nil) if err != nil { t.Fatal(err) } @@ -1134,11 +1152,15 @@ func TestArchiverSaveTree(t *testing.T) { want = test.src } TestEnsureTree(context.TODO(), t, "/", repo, treeID, want) + stat := arch.summary bothZeroOrNeither(t, uint64(test.stat.DataBlobs), uint64(stat.DataBlobs)) bothZeroOrNeither(t, uint64(test.stat.TreeBlobs), uint64(stat.TreeBlobs)) bothZeroOrNeither(t, test.stat.DataSize, stat.DataSize) bothZeroOrNeither(t, test.stat.DataSizeInRepo, stat.DataSizeInRepo) bothZeroOrNeither(t, test.stat.TreeSizeInRepo, stat.TreeSizeInRepo) + rtest.Equals(t, test.stat.ProcessedBytes, stat.ProcessedBytes) + rtest.Equals(t, test.stat.Files, stat.Files) + rtest.Equals(t, test.stat.Dirs, stat.Dirs) }) } } @@ -1386,7 +1408,7 @@ func TestArchiverSnapshot(t *testing.T) { chdir = filepath.Join(chdir, filepath.FromSlash(test.chdir)) } - back := restictest.Chdir(t, chdir) + back := rtest.Chdir(t, chdir) defer back() var targets []string @@ -1395,7 +1417,7 @@ func TestArchiverSnapshot(t *testing.T) { } t.Logf("targets: %v", targets) - sn, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) + sn, snapshotID, _, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) if err != nil { t.Fatal(err) } @@ -1408,7 +1430,7 @@ func TestArchiverSnapshot(t *testing.T) { } TestEnsureSnapshot(t, repo, snapshotID, want) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) // check that the snapshot contains the targets with absolute paths for i, target := range sn.Paths { @@ -1539,11 +1561,11 @@ func TestArchiverSnapshotSelect(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.Select = test.selFn - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() targets := []string{"."} - _, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) + _, snapshotID, _, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) if test.err != "" { if err == nil { t.Fatalf("expected error not found, got %v, wanted %q", err, test.err) @@ -1568,7 +1590,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { } TestEnsureSnapshot(t, repo, snapshotID, want) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) }) } } @@ -1616,17 +1638,85 @@ func (f MockFile) Read(p []byte) (int, error) { return n, err } +func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) { + rtest.Equals(t, stat.Files.New, sn.Summary.FilesNew) + rtest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged) + rtest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified) + rtest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew) + rtest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged) + rtest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified) + rtest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed) + rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed) + bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs)) + bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs)) + bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded)) + bothZeroOrNeither(t, uint64(stat.DataSizeInRepo+stat.TreeSizeInRepo), uint64(sn.Summary.DataAddedPacked)) +} + func TestArchiverParent(t *testing.T) { var tests = []struct { - src TestDir - read map[string]int // tracks number of times a file must have been read + src TestDir + modify func(path string) + statInitial Summary + statSecond Summary }{ { src: TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, }, - read: map[string]int{ - "targetfile": 1, + statInitial: Summary{ + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{0, 0, 0}, + ProcessedBytes: 2102152, + ItemStats: ItemStats{3, 0x201593, 0x201632, 1, 0, 0}, + }, + statSecond: Summary{ + Files: ChangeStats{0, 0, 1}, + Dirs: ChangeStats{0, 0, 0}, + ProcessedBytes: 2102152, + }, + }, + { + src: TestDir{ + "targetDir": TestDir{ + "targetfile": TestFile{Content: string(rtest.Random(888, 1234))}, + "targetfile2": TestFile{Content: string(rtest.Random(888, 1235))}, + }, + }, + statInitial: Summary{ + Files: ChangeStats{2, 0, 0}, + Dirs: ChangeStats{1, 0, 0}, + ProcessedBytes: 2469, + ItemStats: ItemStats{2, 0xe1c, 0xcd9, 2, 0, 0}, + }, + statSecond: Summary{ + Files: ChangeStats{0, 0, 2}, + Dirs: ChangeStats{0, 0, 1}, + ProcessedBytes: 2469, + }, + }, + { + src: TestDir{ + "targetDir": TestDir{ + "targetfile": TestFile{Content: string(rtest.Random(888, 1234))}, + }, + "targetfile2": TestFile{Content: string(rtest.Random(888, 1235))}, + }, + modify: func(path string) { + remove(t, filepath.Join(path, "targetDir", "targetfile")) + save(t, filepath.Join(path, "targetfile2"), []byte("foobar")) + }, + statInitial: Summary{ + Files: ChangeStats{2, 0, 0}, + Dirs: ChangeStats{1, 0, 0}, + ProcessedBytes: 2469, + ItemStats: ItemStats{2, 0xe13, 0xcf8, 2, 0, 0}, + }, + statSecond: Summary{ + Files: ChangeStats{0, 1, 0}, + Dirs: ChangeStats{0, 1, 0}, + ProcessedBytes: 6, + ItemStats: ItemStats{1, 0x305, 0x233, 2, 0, 0}, }, }, } @@ -1645,10 +1735,10 @@ func TestArchiverParent(t *testing.T) { arch := New(repo, testFS, Options{}) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() - firstSnapshot, firstSnapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + firstSnapshot, firstSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if err != nil { t.Fatal(err) } @@ -1673,38 +1763,38 @@ func TestArchiverParent(t *testing.T) { } return nil }) + rtest.Equals(t, test.statInitial.Files, summary.Files) + rtest.Equals(t, test.statInitial.Dirs, summary.Dirs) + rtest.Equals(t, test.statInitial.ProcessedBytes, summary.ProcessedBytes) + checkSnapshotStats(t, firstSnapshot, test.statInitial) + + if test.modify != nil { + test.modify(tempdir) + } opts := SnapshotOptions{ Time: time.Now(), ParentSnapshot: firstSnapshot, } - _, secondSnapshotID, err := arch.Snapshot(ctx, []string{"."}, opts) + testFS.bytesRead = map[string]int{} + secondSnapshot, secondSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, opts) if err != nil { t.Fatal(err) } - // check that all files still been read exactly once - TestWalkFiles(t, ".", test.src, func(filename string, item interface{}) error { - file, ok := item.(TestFile) - if !ok { - return nil - } - - n, ok := testFS.bytesRead[filename] - if !ok { - t.Fatalf("file %v was not read at all", filename) - } - - if n != len(file.Content) { - t.Fatalf("file %v: read %v bytes, wanted %v bytes", filename, n, len(file.Content)) - } - return nil - }) + if test.modify == nil { + // check that no files were read this time + rtest.Equals(t, map[string]int{}, testFS.bytesRead) + } + rtest.Equals(t, test.statSecond.Files, summary.Files) + rtest.Equals(t, test.statSecond.Dirs, summary.Dirs) + rtest.Equals(t, test.statSecond.ProcessedBytes, summary.ProcessedBytes) + checkSnapshotStats(t, secondSnapshot, test.statSecond) t.Logf("second backup saved as %v", secondSnapshotID.Str()) t.Logf("testfs: %v", testFS) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) }) } } @@ -1804,7 +1894,7 @@ func TestArchiverErrorReporting(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() if test.prepare != nil { @@ -1814,7 +1904,7 @@ func TestArchiverErrorReporting(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.Error = test.errFn - _, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + _, snapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if test.mustError { if err != nil { t.Logf("found expected error (%v), skipping further checks", err) @@ -1837,7 +1927,7 @@ func TestArchiverErrorReporting(t *testing.T) { } TestEnsureSnapshot(t, repo, snapshotID, want) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) }) } } @@ -1874,7 +1964,7 @@ func TestArchiverContextCanceled(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, TestDir{ "targetfile": TestFile{Content: "foobar"}, }) @@ -1882,12 +1972,12 @@ func TestArchiverContextCanceled(t *testing.T) { // Ensure that the archiver itself reports the canceled context and not just the backend repo := repository.TestRepositoryWithBackend(t, &noCancelBackend{mem.New()}, 0, repository.Options{}) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) - _, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + _, snapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if err != nil { t.Logf("found expected error (%v)", err) @@ -1968,16 +2058,16 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { { src: TestDir{ "dir": TestDir{ - "file0": TestFile{Content: string(restictest.Random(0, 1024))}, - "file1": TestFile{Content: string(restictest.Random(1, 1024))}, - "file2": TestFile{Content: string(restictest.Random(2, 1024))}, - "file3": TestFile{Content: string(restictest.Random(3, 1024))}, - "file4": TestFile{Content: string(restictest.Random(4, 1024))}, - "file5": TestFile{Content: string(restictest.Random(5, 1024))}, - "file6": TestFile{Content: string(restictest.Random(6, 1024))}, - "file7": TestFile{Content: string(restictest.Random(7, 1024))}, - "file8": TestFile{Content: string(restictest.Random(8, 1024))}, - "file9": TestFile{Content: string(restictest.Random(9, 1024))}, + "file0": TestFile{Content: string(rtest.Random(0, 1024))}, + "file1": TestFile{Content: string(rtest.Random(1, 1024))}, + "file2": TestFile{Content: string(rtest.Random(2, 1024))}, + "file3": TestFile{Content: string(rtest.Random(3, 1024))}, + "file4": TestFile{Content: string(rtest.Random(4, 1024))}, + "file5": TestFile{Content: string(rtest.Random(5, 1024))}, + "file6": TestFile{Content: string(rtest.Random(6, 1024))}, + "file7": TestFile{Content: string(rtest.Random(7, 1024))}, + "file8": TestFile{Content: string(rtest.Random(8, 1024))}, + "file9": TestFile{Content: string(rtest.Random(9, 1024))}, }, }, wantOpen: map[string]uint{ @@ -2002,7 +2092,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() testFS := &TrackFS{ @@ -2026,7 +2116,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { SaveBlobConcurrency: 1, }) - _, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + _, _, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if !errors.Is(err, test.err) { t.Errorf("expected error (%v) not found, got %v", test.err, err) } @@ -2054,7 +2144,7 @@ func snapshot(t testing.TB, repo restic.Repository, fs fs.FS, parent *restic.Sna Time: time.Now(), ParentSnapshot: parent, } - snapshot, _, err := arch.Snapshot(ctx, []string{filename}, sopts) + snapshot, _, _, err := arch.Snapshot(ctx, []string{filename}, sopts) if err != nil { t.Fatal(err) } @@ -2125,6 +2215,8 @@ const ( ) func TestMetadataChanged(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeviceIDForHardlinks, true)() + files := TestDir{ "testfile": TestFile{ Content: "foo bar test file", @@ -2133,12 +2225,12 @@ func TestMetadataChanged(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() // get metadata fi := lstat(t, "testfile") - want, err := restic.NodeFromFileInfo("testfile", fi) + want, err := restic.NodeFromFileInfo("testfile", fi, false) if err != nil { t.Fatal(err) } @@ -2153,6 +2245,7 @@ func TestMetadataChanged(t *testing.T) { sn, node2 := snapshot(t, repo, fs, nil, "testfile") // set some values so we can then compare the nodes + want.DeviceID = 0 want.Content = node2.Content want.Path = "" if len(want.ExtendedAttributes) == 0 { @@ -2195,7 +2288,7 @@ func TestMetadataChanged(t *testing.T) { // make sure the content matches TestEnsureFileContent(context.Background(), t, repo, "testfile", node3, files["testfile"].(TestFile)) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) } func TestRacyFileSwap(t *testing.T) { @@ -2207,7 +2300,7 @@ func TestRacyFileSwap(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() // get metadata of current folder @@ -2236,7 +2329,7 @@ func TestRacyFileSwap(t *testing.T) { arch.runWorkers(ctx, wg) // fs.Track will panic if the file was not closed - _, excluded, err := arch.Save(ctx, "/", tempfile, nil) + _, excluded, err := arch.save(ctx, "/", tempfile, nil) if err == nil { t.Errorf("Save() should have failed") } diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 7523f0749..a6b1aad2e 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -6,6 +6,12 @@ package archiver import ( "os" "syscall" + "testing" + + "github.com/restic/restic/internal/feature" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" ) type wrappedFileInfo struct { @@ -39,3 +45,45 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { return res } + +func statAndSnapshot(t *testing.T, repo restic.Repository, name string) (*restic.Node, *restic.Node) { + fi := lstat(t, name) + want, err := restic.NodeFromFileInfo(name, fi, false) + rtest.OK(t, err) + + _, node := snapshot(t, repo, fs.Local{}, nil, name) + return want, node +} + +func TestHardlinkMetadata(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeviceIDForHardlinks, true)() + + files := TestDir{ + "testfile": TestFile{ + Content: "foo bar test file", + }, + "linktarget": TestFile{ + Content: "test file", + }, + "testlink": TestHardlink{ + Target: "./linktarget", + }, + "testdir": TestDir{}, + } + + tempdir, repo := prepareTempdirRepoSrc(t, files) + + back := rtest.Chdir(t, tempdir) + defer back() + + want, node := statAndSnapshot(t, repo, "testlink") + rtest.Assert(t, node.DeviceID == want.DeviceID, "device id mismatch expected %v got %v", want.DeviceID, node.DeviceID) + rtest.Assert(t, node.Links == want.Links, "link count mismatch expected %v got %v", want.Links, node.Links) + rtest.Assert(t, node.Inode == want.Inode, "inode mismatch expected %v got %v", want.Inode, node.Inode) + + _, node = statAndSnapshot(t, repo, "testfile") + rtest.Assert(t, node.DeviceID == 0, "device id mismatch for testfile expected %v got %v", 0, node.DeviceID) + + _, node = statAndSnapshot(t, repo, "testdir") + rtest.Assert(t, node.DeviceID == 0, "device id mismatch for testdir expected %v got %v", 0, node.DeviceID) +} diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index 7f11bff8a..d10334301 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -29,7 +29,7 @@ type FileSaver struct { CompleteBlob func(bytes uint64) - NodeFromFileInfo func(snPath, filename string, fi os.FileInfo) (*restic.Node, error) + NodeFromFileInfo func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) } // NewFileSaver returns a new file saver. A worker pool with fileWorkers is @@ -156,7 +156,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat debug.Log("%v", snPath) - node, err := s.NodeFromFileInfo(snPath, f.Name(), fi) + node, err := s.NodeFromFileInfo(snPath, f.Name(), fi, false) if err != nil { _ = f.Close() completeError(err) diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go index ced9d796e..409bdedd0 100644 --- a/internal/archiver/file_saver_test.go +++ b/internal/archiver/file_saver_test.go @@ -49,8 +49,8 @@ func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Cont } s := NewFileSaver(ctx, wg, saveBlob, pol, workers, workers) - s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo) (*restic.Node, error) { - return restic.NodeFromFileInfo(filename, fi) + s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { + return restic.NodeFromFileInfo(filename, fi, ignoreXattrListError) } return s, ctx, wg diff --git a/internal/archiver/scanner_test.go b/internal/archiver/scanner_test.go index 1b4cd1f7f..b5b7057b8 100644 --- a/internal/archiver/scanner_test.go +++ b/internal/archiver/scanner_test.go @@ -9,7 +9,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/fs" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) func TestScanner(t *testing.T) { @@ -81,10 +81,10 @@ func TestScanner(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() cur, err := os.Getwd() @@ -216,10 +216,10 @@ func TestScannerError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() cur, err := os.Getwd() @@ -288,10 +288,10 @@ func TestScannerCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() cur, err := os.Getwd() diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go index 111c1e68c..a186a4ee5 100644 --- a/internal/archiver/testing.go +++ b/internal/archiver/testing.go @@ -6,6 +6,7 @@ import ( "path" "path/filepath" "runtime" + "sort" "strings" "testing" "time" @@ -31,7 +32,7 @@ func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *res } opts.ParentSnapshot = sn } - sn, _, err := arch.Snapshot(context.TODO(), []string{path}, opts) + sn, _, _, err := arch.Snapshot(context.TODO(), []string{path}, opts) if err != nil { t.Fatal(err) } @@ -63,11 +64,29 @@ func (s TestSymlink) String() string { return "" } +// TestHardlink describes a hardlink created for a test. +type TestHardlink struct { + Target string +} + +func (s TestHardlink) String() string { + return "" +} + // TestCreateFiles creates a directory structure described by dir at target, // which must already exist. On Windows, symlinks aren't created. func TestCreateFiles(t testing.TB, target string, dir TestDir) { t.Helper() - for name, item := range dir { + + // ensure a stable order such that it can be guaranteed that a hardlink target already exists + var names []string + for name := range dir { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + item := dir[name] targetPath := filepath.Join(target, name) switch it := item.(type) { @@ -81,6 +100,11 @@ func TestCreateFiles(t testing.TB, target string, dir TestDir) { if err != nil { t.Fatal(err) } + case TestHardlink: + err := fs.Link(filepath.Join(target, filepath.FromSlash(it.Target)), targetPath) + if err != nil { + t.Fatal(err) + } case TestDir: err := fs.Mkdir(targetPath, 0755) if err != nil { diff --git a/internal/archiver/testing_test.go b/internal/archiver/testing_test.go index ada7261f1..ff3bd3668 100644 --- a/internal/archiver/testing_test.go +++ b/internal/archiver/testing_test.go @@ -11,7 +11,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) // MockT passes through all logging functions from T, but catches Fail(), @@ -101,7 +101,7 @@ func TestTestCreateFiles(t *testing.T) { } for i, test := range tests { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) t.Run("", func(t *testing.T) { tempdir := filepath.Join(tempdir, fmt.Sprintf("test-%d", i)) @@ -191,7 +191,7 @@ func TestTestWalkFiles(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) got := make(map[string]string) @@ -321,7 +321,7 @@ func TestTestEnsureFiles(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) createFilesAt(t, tempdir, test.files) subtestT := testing.TB(t) @@ -452,7 +452,7 @@ func TestTestEnsureSnapshot(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) targetDir := filepath.Join(tempdir, "target") err := fs.Mkdir(targetDir, 0700) @@ -462,7 +462,7 @@ func TestTestEnsureSnapshot(t *testing.T) { createFilesAt(t, targetDir, test.files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() repo := repository.TestRepository(t) @@ -473,7 +473,7 @@ func TestTestEnsureSnapshot(t *testing.T) { Hostname: "localhost", Tags: []string{"test"}, } - _, id, err := arch.Snapshot(ctx, []string{"."}, opts) + _, id, _, err := arch.Snapshot(ctx, []string{"."}, opts) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/tree_saver.go b/internal/archiver/tree_saver.go index eae524a78..9c11b48f0 100644 --- a/internal/archiver/tree_saver.go +++ b/internal/archiver/tree_saver.go @@ -90,6 +90,10 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I // return the error if it wasn't ignored if fnr.err != nil { debug.Log("err for %v: %v", fnr.snPath, fnr.err) + if fnr.err == context.Canceled { + return nil, stats, fnr.err + } + fnr.err = s.errFn(fnr.target, fnr.err) if fnr.err == nil { // ignore error diff --git a/internal/archiver/tree_test.go b/internal/archiver/tree_test.go index 7852a4c2e..a9d2d97ff 100644 --- a/internal/archiver/tree_test.go +++ b/internal/archiver/tree_test.go @@ -8,7 +8,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/fs" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) // debug.Log requires Tree.String. @@ -439,10 +439,10 @@ func TestTree(t *testing.T) { t.Skip("skip test on unix") } - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() tree, err := NewTree(fs.Local{}, test.targets) diff --git a/internal/backend/http_transport.go b/internal/backend/http_transport.go index 9ee1c91f1..19b20dc6a 100644 --- a/internal/backend/http_transport.go +++ b/internal/backend/http_transport.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/peterbourgon/unixtransport" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" ) @@ -82,6 +83,8 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { TLSClientConfig: &tls.Config{}, } + unixtransport.Register(tr) + if opts.InsecureTLS { tr.TLSClientConfig.InsecureSkipVerify = true } diff --git a/internal/backend/layout/layout.go b/internal/backend/layout/layout.go index b600566a4..052fd66ca 100644 --- a/internal/backend/layout/layout.go +++ b/internal/backend/layout/layout.go @@ -10,6 +10,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" ) @@ -93,6 +94,8 @@ func hasBackendFile(ctx context.Context, fs Filesystem, dir string) (bool, error // cannot be detected automatically. var ErrLayoutDetectionFailed = errors.New("auto-detecting the filesystem layout failed") +var ErrLegacyLayoutFound = errors.New("detected legacy S3 layout. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your repository") + // DetectLayout tries to find out which layout is used in a local (or sftp) // filesystem at the given path. If repo is nil, an instance of LocalFilesystem // is used. @@ -123,6 +126,10 @@ func DetectLayout(ctx context.Context, repo Filesystem, dir string) (Layout, err } if foundKeyFile && !foundKeysFile { + if feature.Flag.Enabled(feature.DeprecateS3LegacyLayout) { + return nil, ErrLegacyLayoutFound + } + debug.Log("found s3 layout at %v", dir) return &S3LegacyLayout{ Path: dir, @@ -145,6 +152,10 @@ func ParseLayout(ctx context.Context, repo Filesystem, layout, defaultLayout, pa Join: repo.Join, } case "s3legacy": + if feature.Flag.Enabled(feature.DeprecateS3LegacyLayout) { + return nil, ErrLegacyLayoutFound + } + l = &S3LegacyLayout{ Path: path, Join: repo.Join, diff --git a/internal/backend/layout/layout_test.go b/internal/backend/layout/layout_test.go index 998f5aeb6..55a0749c9 100644 --- a/internal/backend/layout/layout_test.go +++ b/internal/backend/layout/layout_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) @@ -352,6 +353,7 @@ func TestS3LegacyLayout(t *testing.T) { } func TestDetectLayout(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { @@ -389,6 +391,7 @@ func TestDetectLayout(t *testing.T) { } func TestParseLayout(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { diff --git a/internal/backend/local/config.go b/internal/backend/local/config.go index dc5e7948c..e08f05550 100644 --- a/internal/backend/local/config.go +++ b/internal/backend/local/config.go @@ -10,7 +10,7 @@ import ( // Config holds all information needed to open a local repository. type Config struct { Path string - Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"` + Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect) (deprecated)"` Connections uint `option:"connections" help:"set a limit for the number of concurrent operations (default: 2)"` } diff --git a/internal/backend/local/layout_test.go b/internal/backend/local/layout_test.go index 46f3996bb..00c91376a 100644 --- a/internal/backend/local/layout_test.go +++ b/internal/backend/local/layout_test.go @@ -6,10 +6,12 @@ import ( "testing" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) func TestLayout(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go index eea5b060e..8b115b187 100644 --- a/internal/backend/mem/mem_backend.go +++ b/internal/backend/mem/mem_backend.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/base64" + "fmt" "hash" "io" "net/http" @@ -41,7 +42,7 @@ func NewFactory() location.Factory { ) } -var errNotFound = errors.New("not found") +var errNotFound = fmt.Errorf("not found") const connectionCount = 2 diff --git a/internal/backend/rest/config_test.go b/internal/backend/rest/config_test.go index 23ea9095b..13a1ebb13 100644 --- a/internal/backend/rest/config_test.go +++ b/internal/backend/rest/config_test.go @@ -31,6 +31,13 @@ var configTests = []test.ConfigTestData[Config]{ Connections: 5, }, }, + { + S: "rest:http+unix:///tmp/rest.socket:/my_backup_repo/", + Cfg: Config{ + URL: parseURL("http+unix:///tmp/rest.socket:/my_backup_repo/"), + Connections: 5, + }, + }, } func TestParseConfig(t *testing.T) { diff --git a/internal/backend/rest/rest_test.go b/internal/backend/rest/rest_test.go index 6a5b4f8a5..93b9a103e 100644 --- a/internal/backend/rest/rest_test.go +++ b/internal/backend/rest/rest_test.go @@ -1,11 +1,18 @@ +//go:build go1.20 +// +build go1.20 + package rest_test import ( + "bufio" "context" - "net" + "fmt" "net/url" "os" "os/exec" + "regexp" + "strings" + "syscall" "testing" "time" @@ -14,54 +21,133 @@ import ( rtest "github.com/restic/restic/internal/test" ) -func runRESTServer(ctx context.Context, t testing.TB, dir string) (*url.URL, func()) { +var ( + serverStartedRE = regexp.MustCompile("^start server on (.*)$") +) + +func runRESTServer(ctx context.Context, t testing.TB, dir, reqListenAddr string) (*url.URL, func()) { srv, err := exec.LookPath("rest-server") if err != nil { t.Skip(err) } - cmd := exec.CommandContext(ctx, srv, "--no-auth", "--path", dir) + // create our own context, so that our cleanup can cancel and wait for completion + // this will ensure any open ports, open unix sockets etc are properly closed + processCtx, cancel := context.WithCancel(ctx) + cmd := exec.CommandContext(processCtx, srv, "--no-auth", "--path", dir, "--listen", reqListenAddr) + + // this cancel func is called by when the process context is done + cmd.Cancel = func() error { + // we execute in a Go-routine as we know the caller will + // be waiting on a .Wait() regardless + go func() { + // try to send a graceful termination signal + if cmd.Process.Signal(syscall.SIGTERM) == nil { + // if we succeed, then wait a few seconds + time.Sleep(2 * time.Second) + } + // and then make sure it's killed either way, ignoring any error code + _ = cmd.Process.Kill() + }() + return nil + } + + // this is the cleanup function that we return the caller, + // which will cancel our process context, and then wait for it to finish + cleanup := func() { + cancel() + _ = cmd.Wait() + } + + // but in-case we don't finish this method, e.g. by calling t.Fatal() + // we also defer a call to clean it up ourselves, guarded by a flag to + // indicate that we returned the function to the caller to deal with. + callerWillCleanUp := false + defer func() { + if !callerWillCleanUp { + cleanup() + } + }() + + // send stdout to our std out cmd.Stdout = os.Stdout - cmd.Stderr = os.Stdout - if err := cmd.Start(); err != nil { - t.Fatal(err) - } - // wait until the TCP port is reachable - var success bool - for i := 0; i < 10; i++ { - time.Sleep(200 * time.Millisecond) - - c, err := net.Dial("tcp", "localhost:8000") - if err != nil { - continue - } - - success = true - if err := c.Close(); err != nil { - t.Fatal(err) - } - } - - if !success { - t.Fatal("unable to connect to rest server") - return nil, nil - } - - url, err := url.Parse("http://localhost:8000/restic-test/") + // capture stderr with a pipe, as we want to examine this output + // to determine when the server is started and listening. + cmdErr, err := cmd.StderrPipe() if err != nil { t.Fatal(err) } - cleanup := func() { - if err := cmd.Process.Kill(); err != nil { - t.Fatal(err) - } - - // ignore errors, we've killed the process - _ = cmd.Wait() + // start the rest-server + if err := cmd.Start(); err != nil { + t.Fatal(err) } + // create a channel to receive the actual listen address on + listenAddrCh := make(chan string) + go func() { + defer close(listenAddrCh) + matched := false + br := bufio.NewReader(cmdErr) + for { + line, err := br.ReadString('\n') + if err != nil { + // we ignore errors, as code that relies on this + // will happily fail via timeout and empty closed + // channel. + return + } + + line = strings.Trim(line, "\r\n") + if !matched { + // look for the server started message, and return the address + // that it's listening on + matchedServerListen := serverStartedRE.FindSubmatch([]byte(line)) + if len(matchedServerListen) == 2 { + listenAddrCh <- string(matchedServerListen[1]) + matched = true + } + } + fmt.Fprintln(os.Stdout, line) // print all output to console + } + }() + + // wait for us to get an address, + // or the parent context to cancel, + // or for us to timeout + var actualListenAddr string + select { + case <-processCtx.Done(): + t.Fatal(context.Canceled) + case <-time.NewTimer(2 * time.Second).C: + t.Fatal(context.DeadlineExceeded) + case a, ok := <-listenAddrCh: + if !ok { + t.Fatal(context.Canceled) + } + actualListenAddr = a + } + + // this translate the address that the server is listening on + // to a URL suitable for us to connect to + var addrToConnectTo string + if strings.HasPrefix(reqListenAddr, "unix:") { + addrToConnectTo = fmt.Sprintf("http+unix://%s:/restic-test/", actualListenAddr) + } else { + // while we may listen on 0.0.0.0, we connect to localhost + addrToConnectTo = fmt.Sprintf("http://%s/restic-test/", strings.Replace(actualListenAddr, "0.0.0.0", "localhost", 1)) + } + + // parse to a URL + url, err := url.Parse(addrToConnectTo) + if err != nil { + t.Fatal(err) + } + + // indicate that we've completed successfully, and that the caller + // is responsible for calling cleanup + callerWillCleanUp = true return url, cleanup } @@ -91,7 +177,7 @@ func TestBackendREST(t *testing.T) { defer cancel() dir := rtest.TempDir(t) - serverURL, cleanup := runRESTServer(ctx, t, dir) + serverURL, cleanup := runRESTServer(ctx, t, dir, ":0") defer cleanup() newTestSuite(serverURL, false).RunTests(t) @@ -116,7 +202,7 @@ func BenchmarkBackendREST(t *testing.B) { defer cancel() dir := rtest.TempDir(t) - serverURL, cleanup := runRESTServer(ctx, t, dir) + serverURL, cleanup := runRESTServer(ctx, t, dir, ":0") defer cleanup() newTestSuite(serverURL, false).RunBenchmarks(t) diff --git a/internal/backend/rest/rest_unix_test.go b/internal/backend/rest/rest_unix_test.go new file mode 100644 index 000000000..85ef7a73d --- /dev/null +++ b/internal/backend/rest/rest_unix_test.go @@ -0,0 +1,30 @@ +//go:build !windows && go1.20 +// +build !windows,go1.20 + +package rest_test + +import ( + "context" + "fmt" + "path" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func TestBackendRESTWithUnixSocket(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/rest.TestBackendREST") + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dir := rtest.TempDir(t) + serverURL, cleanup := runRESTServer(ctx, t, path.Join(dir, "data"), fmt.Sprintf("unix:%s", path.Join(dir, "sock"))) + defer cleanup() + + newTestSuite(serverURL, false).RunTests(t) +} diff --git a/internal/backend/s3/config.go b/internal/backend/s3/config.go index b4d44399f..4aea4c3d1 100644 --- a/internal/backend/s3/config.go +++ b/internal/backend/s3/config.go @@ -20,7 +20,7 @@ type Config struct { Secret options.SecretString Bucket string Prefix string - Layout string `option:"layout" help:"use this backend layout (default: auto-detect)"` + Layout string `option:"layout" help:"use this backend layout (default: auto-detect) (deprecated)"` StorageClass string `option:"storage-class" help:"set S3 storage class (STANDARD, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or REDUCED_REDUNDANCY)"` Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` diff --git a/internal/backend/sftp/config.go b/internal/backend/sftp/config.go index 65af50d19..aa8ac7bff 100644 --- a/internal/backend/sftp/config.go +++ b/internal/backend/sftp/config.go @@ -13,7 +13,7 @@ import ( type Config struct { User, Host, Port, Path string - Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"` + Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect) (deprecated)"` Command string `option:"command" help:"specify command to create sftp connection"` Args string `option:"args" help:"specify arguments for ssh"` diff --git a/internal/backend/sftp/layout_test.go b/internal/backend/sftp/layout_test.go index 9cf24a753..8bb7eac01 100644 --- a/internal/backend/sftp/layout_test.go +++ b/internal/backend/sftp/layout_test.go @@ -8,6 +8,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/sftp" + "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) @@ -16,6 +17,7 @@ func TestLayout(t *testing.T) { t.Skip("sftp server binary not available") } + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { diff --git a/internal/cache/file.go b/internal/cache/file.go index 48a38c1d3..1bfe922d2 100644 --- a/internal/cache/file.go +++ b/internal/cache/file.go @@ -165,7 +165,8 @@ func (c *Cache) Clear(t restic.FileType, valid restic.IDSet) error { continue } - if err = fs.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil { + // ignore ErrNotExist to gracefully handle multiple processes running Clear() concurrently + if err = fs.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil && !errors.Is(err, os.ErrNotExist) { return err } } diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 28f55ce3a..1057341bc 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -106,9 +106,9 @@ func (c *Checker) LoadSnapshots(ctx context.Context) error { return err } -func computePackTypes(ctx context.Context, idx restic.MasterIndex) map[restic.ID]restic.BlobType { +func computePackTypes(ctx context.Context, idx restic.MasterIndex) (map[restic.ID]restic.BlobType, error) { packs := make(map[restic.ID]restic.BlobType) - idx.Each(ctx, func(pb restic.PackedBlob) { + err := idx.Each(ctx, func(pb restic.PackedBlob) { tpe, exists := packs[pb.PackID] if exists { if pb.Type != tpe { @@ -119,7 +119,7 @@ func computePackTypes(ctx context.Context, idx restic.MasterIndex) map[restic.ID } packs[pb.PackID] = tpe }) - return packs + return packs, err } // LoadIndex loads all index files. @@ -169,7 +169,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e debug.Log("process blobs") cnt := 0 - index.Each(ctx, func(blob restic.PackedBlob) { + err = index.Each(ctx, func(blob restic.PackedBlob) { cnt++ if _, ok := packToIndex[blob.PackID]; !ok { @@ -179,7 +179,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e }) debug.Log("%d blobs processed", cnt) - return nil + return err }) if err != nil { errs = append(errs, err) @@ -193,8 +193,14 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e } // compute pack size using index entries - c.packs = pack.Size(ctx, c.masterIndex, false) - packTypes := computePackTypes(ctx, c.masterIndex) + c.packs, err = pack.Size(ctx, c.masterIndex, false) + if err != nil { + return hints, append(errs, err) + } + packTypes, err := computePackTypes(ctx, c.masterIndex) + if err != nil { + return hints, append(errs, err) + } debug.Log("checking for duplicate packs") for packID := range c.packs { @@ -484,7 +490,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { } // UnusedBlobs returns all blobs that have never been referenced. -func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) { +func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles, err error) { if !c.trackUnused { panic("only works when tracking blob references") } @@ -495,7 +501,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) { ctx, cancel := context.WithCancel(ctx) defer cancel() - c.repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err = c.repo.Index().Each(ctx, func(blob restic.PackedBlob) { h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} if !c.blobRefs.M.Has(h) { debug.Log("blob %v not referenced", h) @@ -503,7 +509,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) { } }) - return blobs + return blobs, err } // CountPacks returns the number of packs in the repository. diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index cca5a582c..9746e9f5d 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -72,11 +72,9 @@ func assertOnlyMixedPackHints(t *testing.T, hints []error) { } func TestCheckRepo(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) if len(errs) > 0 { @@ -92,11 +90,9 @@ func TestCheckRepo(t *testing.T) { } func TestMissingPack(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - packHandle := backend.Handle{ Type: restic.PackFile, Name: "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6", @@ -123,11 +119,9 @@ func TestMissingPack(t *testing.T) { } func TestUnreferencedPack(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - // index 3f1a only references pack 60e0 packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" indexHandle := backend.Handle{ @@ -156,11 +150,9 @@ func TestUnreferencedPack(t *testing.T) { } func TestUnreferencedBlobs(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - snapshotHandle := backend.Handle{ Type: restic.SnapshotFile, Name: "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02", @@ -188,18 +180,17 @@ func TestUnreferencedBlobs(t *testing.T) { test.OKs(t, checkPacks(chkr)) test.OKs(t, checkStruct(chkr)) - blobs := chkr.UnusedBlobs(context.TODO()) + blobs, err := chkr.UnusedBlobs(context.TODO()) + test.OK(t, err) sort.Sort(blobs) test.Equals(t, unusedBlobsBySnapshot, blobs) } func TestModifiedIndex(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - done := make(chan struct{}) defer close(done) @@ -274,11 +265,9 @@ func TestModifiedIndex(t *testing.T) { var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz") func TestDuplicatePacksInIndex(t *testing.T) { - repodir, cleanup := test.Env(t, checkerDuplicateIndexTestData) + repo, cleanup := repository.TestFromFixture(t, checkerDuplicateIndexTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) if len(hints) == 0 { @@ -342,9 +331,7 @@ func TestCheckerModifiedData(t *testing.T) { t.Logf("archived as %v", sn.ID().Str()) beError := &errorBackend{Backend: repo.Backend()} - checkRepo, err := repository.New(beError, repository.Options{}) - test.OK(t, err) - test.OK(t, checkRepo.SearchKey(context.TODO(), test.TestPassword, 5, "")) + checkRepo := repository.TestOpenBackend(t, beError) chkr := checker.New(checkRepo, false) @@ -399,10 +386,8 @@ func (r *loadTreesOnceRepository) LoadTree(ctx context.Context, id restic.ID) (* } func TestCheckerNoDuplicateTreeDecodes(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - - repo := repository.TestOpenLocal(t, repodir) checkRepo := &loadTreesOnceRepository{ Repository: repo, loadedTrees: restic.NewIDSet(), @@ -549,9 +534,7 @@ func TestCheckerBlobTypeConfusion(t *testing.T) { } func loadBenchRepository(t *testing.B) (*checker.Checker, restic.Repository, func()) { - repodir, cleanup := test.Env(t, checkerTestData) - - repo := repository.TestOpenLocal(t, repodir) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) diff --git a/internal/checker/testing.go b/internal/checker/testing.go index fe1679393..d0014398f 100644 --- a/internal/checker/testing.go +++ b/internal/checker/testing.go @@ -8,7 +8,7 @@ import ( ) // TestCheckRepo runs the checker on repo. -func TestCheckRepo(t testing.TB, repo restic.Repository) { +func TestCheckRepo(t testing.TB, repo restic.Repository, skipStructure bool) { chkr := New(repo, true) hints, errs := chkr.LoadIndex(context.TODO(), nil) @@ -33,18 +33,23 @@ func TestCheckRepo(t testing.TB, repo restic.Repository) { t.Error(err) } - // structure - errChan = make(chan error) - go chkr.Structure(context.TODO(), nil, errChan) + if !skipStructure { + // structure + errChan = make(chan error) + go chkr.Structure(context.TODO(), nil, errChan) - for err := range errChan { - t.Error(err) - } + for err := range errChan { + t.Error(err) + } - // unused blobs - blobs := chkr.UnusedBlobs(context.TODO()) - if len(blobs) > 0 { - t.Errorf("unused blobs found: %v", blobs) + // unused blobs + blobs, err := chkr.UnusedBlobs(context.TODO()) + if err != nil { + t.Error(err) + } + if len(blobs) > 0 { + t.Errorf("unused blobs found: %v", blobs) + } } // read data diff --git a/internal/dump/common_test.go b/internal/dump/common_test.go index 3ee9112af..afd19df63 100644 --- a/internal/dump/common_test.go +++ b/internal/dump/common_test.go @@ -78,7 +78,7 @@ func WriteTest(t *testing.T, format string, cd CheckDump) { back := rtest.Chdir(t, tmpdir) defer back() - sn, _, err := arch.Snapshot(ctx, []string{"."}, archiver.SnapshotOptions{}) + sn, _, _, err := arch.Snapshot(ctx, []string{"."}, archiver.SnapshotOptions{}) rtest.OK(t, err) tree, err := restic.LoadTree(ctx, repo, *sn.Tree) diff --git a/internal/feature/features.go b/internal/feature/features.go new file mode 100644 index 000000000..e3b625e92 --- /dev/null +++ b/internal/feature/features.go @@ -0,0 +1,140 @@ +package feature + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +type state string +type FlagName string + +const ( + // Alpha features are disabled by default. They do not guarantee any backwards compatibility and may change in arbitrary ways between restic versions. + Alpha state = "alpha" + // Beta features are enabled by default. They may still change, but incompatible changes should be avoided. + Beta state = "beta" + // Stable features are always enabled + Stable state = "stable" + // Deprecated features are always disabled + Deprecated state = "deprecated" +) + +type FlagDesc struct { + Type state + Description string +} + +type FlagSet struct { + flags map[FlagName]*FlagDesc + enabled map[FlagName]bool +} + +func New() *FlagSet { + return &FlagSet{} +} + +func getDefault(phase state) bool { + switch phase { + case Alpha, Deprecated: + return false + case Beta, Stable: + return true + default: + panic("unknown feature phase") + } +} + +func (f *FlagSet) SetFlags(flags map[FlagName]FlagDesc) { + f.flags = map[FlagName]*FlagDesc{} + f.enabled = map[FlagName]bool{} + + for name, flag := range flags { + fcopy := flag + f.flags[name] = &fcopy + f.enabled[name] = getDefault(fcopy.Type) + } +} + +func (f *FlagSet) Apply(flags string, logWarning func(string)) error { + if flags == "" { + return nil + } + + selection := make(map[string]bool) + + for _, flag := range strings.Split(flags, ",") { + parts := strings.SplitN(flag, "=", 2) + + name := parts[0] + value := "true" + if len(parts) == 2 { + value = parts[1] + } + + isEnabled, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("failed to parse value %q for feature flag %v: %w", value, name, err) + } + + selection[name] = isEnabled + } + + for name, value := range selection { + fname := FlagName(name) + flag := f.flags[fname] + if flag == nil { + return fmt.Errorf("unknown feature flag %q", name) + } + + switch flag.Type { + case Alpha, Beta: + f.enabled[fname] = value + case Stable: + logWarning(fmt.Sprintf("feature flag %q is always enabled and will be removed in a future release", fname)) + case Deprecated: + logWarning(fmt.Sprintf("feature flag %q is always disabled and will be removed in a future release", fname)) + default: + panic("unknown feature phase") + } + } + + return nil +} + +func (f *FlagSet) Enabled(name FlagName) bool { + isEnabled, ok := f.enabled[name] + if !ok { + panic(fmt.Sprintf("unknown feature flag %v", name)) + } + + return isEnabled +} + +// Help contains information about a feature. +type Help struct { + Name string + Type string + Default bool + Description string +} + +func (f *FlagSet) List() []Help { + var help []Help + + for name, flag := range f.flags { + help = append(help, Help{ + Name: string(name), + Type: string(flag.Type), + Default: getDefault(flag.Type), + Description: flag.Description, + }) + } + + sort.Slice(help, func(i, j int) bool { + return strings.Compare(help[i].Name, help[j].Name) < 0 + }) + + return help +} diff --git a/internal/feature/features_test.go b/internal/feature/features_test.go new file mode 100644 index 000000000..f5d405fa7 --- /dev/null +++ b/internal/feature/features_test.go @@ -0,0 +1,151 @@ +package feature_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/restic/restic/internal/feature" + rtest "github.com/restic/restic/internal/test" +) + +var ( + alpha = feature.FlagName("alpha-feature") + beta = feature.FlagName("beta-feature") + stable = feature.FlagName("stable-feature") + deprecated = feature.FlagName("deprecated-feature") +) + +var testFlags = map[feature.FlagName]feature.FlagDesc{ + alpha: { + Type: feature.Alpha, + Description: "alpha", + }, + beta: { + Type: feature.Beta, + Description: "beta", + }, + stable: { + Type: feature.Stable, + Description: "stable", + }, + deprecated: { + Type: feature.Deprecated, + Description: "deprecated", + }, +} + +func buildTestFlagSet() *feature.FlagSet { + flags := feature.New() + flags.SetFlags(testFlags) + return flags +} + +func TestFeatureDefaults(t *testing.T) { + flags := buildTestFlagSet() + for _, exp := range []struct { + flag feature.FlagName + value bool + }{ + {alpha, false}, + {beta, true}, + {stable, true}, + {deprecated, false}, + } { + rtest.Assert(t, flags.Enabled(exp.flag) == exp.value, "expected flag %v to have value %v got %v", exp.flag, exp.value, flags.Enabled(exp.flag)) + } +} + +func panicIfCalled(msg string) { + panic(msg) +} + +func TestEmptyApply(t *testing.T) { + flags := buildTestFlagSet() + rtest.OK(t, flags.Apply("", panicIfCalled)) + + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") + rtest.Assert(t, flags.Enabled(beta), "expected beta feature to be enabled") +} + +func TestFeatureApply(t *testing.T) { + flags := buildTestFlagSet() + rtest.OK(t, flags.Apply(string(alpha), panicIfCalled)) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", alpha), panicIfCalled)) + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", alpha), panicIfCalled)) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled again") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", beta), panicIfCalled)) + rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") + + logMsg := "" + log := func(msg string) { + logMsg = msg + } + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", stable), log)) + rtest.Assert(t, flags.Enabled(stable), "expected stable feature to remain enabled") + rtest.Assert(t, strings.Contains(logMsg, string(stable)), "unexpected log message for stable flag: %v", logMsg) + + logMsg = "" + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", deprecated), log)) + rtest.Assert(t, !flags.Enabled(deprecated), "expected deprecated feature to remain disabled") + rtest.Assert(t, strings.Contains(logMsg, string(deprecated)), "unexpected log message for deprecated flag: %v", logMsg) +} + +func TestFeatureMultipleApply(t *testing.T) { + flags := buildTestFlagSet() + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true,%s=false", alpha, beta), panicIfCalled)) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") + rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") +} + +func TestFeatureApplyInvalid(t *testing.T) { + flags := buildTestFlagSet() + + err := flags.Apply("invalid-flag", panicIfCalled) + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "unknown feature flag"), "expected unknown feature flag error, got: %v", err) + + err = flags.Apply(fmt.Sprintf("%v=invalid", alpha), panicIfCalled) + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "failed to parse value"), "expected parsing error, got: %v", err) +} + +func assertPanic(t *testing.T) { + if r := recover(); r == nil { + t.Fatal("should have panicked") + } +} + +func TestFeatureQueryInvalid(t *testing.T) { + defer assertPanic(t) + + flags := buildTestFlagSet() + flags.Enabled("invalid-flag") +} + +func TestFeatureSetInvalidPhase(t *testing.T) { + defer assertPanic(t) + + flags := feature.New() + flags.SetFlags(map[feature.FlagName]feature.FlagDesc{ + "invalid": { + Type: "invalid", + }, + }) +} + +func TestFeatureList(t *testing.T) { + flags := buildTestFlagSet() + + rtest.Equals(t, []feature.Help{ + {string(alpha), string(feature.Alpha), false, "alpha"}, + {string(beta), string(feature.Beta), true, "beta"}, + {string(deprecated), string(feature.Deprecated), false, "deprecated"}, + {string(stable), string(feature.Stable), true, "stable"}, + }, flags.List()) +} diff --git a/internal/feature/registry.go b/internal/feature/registry.go new file mode 100644 index 000000000..2d2e45edf --- /dev/null +++ b/internal/feature/registry.go @@ -0,0 +1,19 @@ +package feature + +// Flag is named such that checking for a feature uses `feature.Flag.Enabled(feature.ExampleFeature)`. +var Flag = New() + +// flag names are written in kebab-case +const ( + DeprecateLegacyIndex FlagName = "deprecate-legacy-index" + DeprecateS3LegacyLayout FlagName = "deprecate-s3-legacy-layout" + DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" +) + +func init() { + Flag.SetFlags(map[FlagName]FlagDesc{ + DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, + DeprecateS3LegacyLayout: {Type: Beta, Description: "disable support for S3 legacy layout used up to restic 0.7.0. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your S3 repository if necessary."}, + DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, + }) +} diff --git a/internal/feature/testing.go b/internal/feature/testing.go new file mode 100644 index 000000000..b796e89b5 --- /dev/null +++ b/internal/feature/testing.go @@ -0,0 +1,33 @@ +package feature + +import ( + "fmt" + "testing" +) + +// TestSetFlag temporarily sets a feature flag to the given value until the +// returned function is called. +// +// Usage +// ``` +// defer TestSetFlag(t, features.Flags, features.ExampleFlag, true)() +// ``` +func TestSetFlag(t *testing.T, f *FlagSet, flag FlagName, value bool) func() { + current := f.Enabled(flag) + + panicIfCalled := func(msg string) { + panic(msg) + } + + if err := f.Apply(fmt.Sprintf("%s=%v", flag, value), panicIfCalled); err != nil { + // not reachable + panic(err) + } + + return func() { + if err := f.Apply(fmt.Sprintf("%s=%v", flag, current), panicIfCalled); err != nil { + // not reachable + panic(err) + } + } +} diff --git a/internal/feature/testing_test.go b/internal/feature/testing_test.go new file mode 100644 index 000000000..f11b4bae4 --- /dev/null +++ b/internal/feature/testing_test.go @@ -0,0 +1,19 @@ +package feature_test + +import ( + "testing" + + "github.com/restic/restic/internal/feature" + rtest "github.com/restic/restic/internal/test" +) + +func TestSetFeatureFlag(t *testing.T) { + flags := buildTestFlagSet() + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") + + restore := feature.TestSetFlag(t, flags, alpha, true) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") + + restore() + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled again") +} diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index aa3522aea..48ab165f1 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -3,41 +3,108 @@ package fs import ( "os" "path/filepath" + "runtime" "strings" "sync" + "time" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" ) -// ErrorHandler is used to report errors via callback -type ErrorHandler func(item string, err error) error +// VSSConfig holds extended options of windows volume shadow copy service. +type VSSConfig struct { + ExcludeAllMountPoints bool `option:"exclude-all-mount-points" help:"exclude mountpoints from snapshotting on all volumes"` + ExcludeVolumes string `option:"exclude-volumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` + Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"` + Provider string `option:"provider" help:"VSS provider identifier which will be used for snapshotting"` +} + +func init() { + if runtime.GOOS == "windows" { + options.Register("vss", VSSConfig{}) + } +} + +// NewVSSConfig returns a new VSSConfig with the default values filled in. +func NewVSSConfig() VSSConfig { + return VSSConfig{ + Timeout: time.Second * 120, + } +} + +// ParseVSSConfig parses a VSS extended options to VSSConfig struct. +func ParseVSSConfig(o options.Options) (VSSConfig, error) { + cfg := NewVSSConfig() + o = o.Extract("vss") + if err := o.Apply("vss", &cfg); err != nil { + return VSSConfig{}, err + } + + return cfg, nil +} + +// ErrorHandler is used to report errors via callback. +type ErrorHandler func(item string, err error) // MessageHandler is used to report errors/messages via callbacks. type MessageHandler func(msg string, args ...interface{}) +// VolumeFilter is used to filter volumes by it's mount point or GUID path. +type VolumeFilter func(volume string) bool + // LocalVss is a wrapper around the local file system which uses windows volume // shadow copy service (VSS) in a transparent way. type LocalVss struct { FS - snapshots map[string]VssSnapshot - failedSnapshots map[string]struct{} - mutex sync.RWMutex - msgError ErrorHandler - msgMessage MessageHandler + snapshots map[string]VssSnapshot + failedSnapshots map[string]struct{} + mutex sync.RWMutex + msgError ErrorHandler + msgMessage MessageHandler + excludeAllMountPoints bool + excludeVolumes map[string]struct{} + timeout time.Duration + provider string } // statically ensure that LocalVss implements FS. var _ FS = &LocalVss{} +// parseMountPoints try to convert semicolon separated list of mount points +// to map of lowercased volume GUID pathes. Mountpoints already in volume +// GUID path format will be validated and normalized. +func parseMountPoints(list string, msgError ErrorHandler) (volumes map[string]struct{}) { + if list == "" { + return + } + for _, s := range strings.Split(list, ";") { + if v, err := GetVolumeNameForVolumeMountPoint(s); err != nil { + msgError(s, errors.Errorf("failed to parse vss.exclude-volumes [%s]: %s", s, err)) + } else { + if volumes == nil { + volumes = make(map[string]struct{}) + } + volumes[strings.ToLower(v)] = struct{}{} + } + } + + return +} + // NewLocalVss creates a new wrapper around the windows filesystem using volume // shadow copy service to access locked files. -func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler) *LocalVss { +func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig) *LocalVss { return &LocalVss{ - FS: Local{}, - snapshots: make(map[string]VssSnapshot), - failedSnapshots: make(map[string]struct{}), - msgError: msgError, - msgMessage: msgMessage, + FS: Local{}, + snapshots: make(map[string]VssSnapshot), + failedSnapshots: make(map[string]struct{}), + msgError: msgError, + msgMessage: msgMessage, + excludeAllMountPoints: cfg.ExcludeAllMountPoints, + excludeVolumes: parseMountPoints(cfg.ExcludeVolumes, msgError), + timeout: cfg.Timeout, + provider: cfg.Provider, } } @@ -50,7 +117,7 @@ func (fs *LocalVss) DeleteSnapshots() { for volumeName, snapshot := range fs.snapshots { if err := snapshot.Delete(); err != nil { - _ = fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err)) + fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err)) activeSnapshots[volumeName] = snapshot } } @@ -78,12 +145,27 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { return os.Lstat(fs.snapshotPath(name)) } +// isMountPointIncluded is true if given mountpoint included by user. +func (fs *LocalVss) isMountPointIncluded(mountPoint string) bool { + if fs.excludeVolumes == nil { + return true + } + + volume, err := GetVolumeNameForVolumeMountPoint(mountPoint) + if err != nil { + fs.msgError(mountPoint, errors.Errorf("failed to get volume from mount point [%s]: %s", mountPoint, err)) + return true + } + + _, ok := fs.excludeVolumes[strings.ToLower(volume)] + return !ok +} + // snapshotPath returns the path inside a VSS snapshots if it already exists. // If the path is not yet available as a snapshot, a snapshot is created. // If creation of a snapshot fails the file's original path is returned as // a fallback. func (fs *LocalVss) snapshotPath(path string) string { - fixPath := fixpath(path) if strings.HasPrefix(fixPath, `\\?\UNC\`) { @@ -114,23 +196,36 @@ func (fs *LocalVss) snapshotPath(path string) string { if !snapshotExists && !snapshotFailed { vssVolume := volumeNameLower + string(filepath.Separator) - fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) - if snapshot, err := NewVssSnapshot(vssVolume, 120, fs.msgError); err != nil { - _ = fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", - vssVolume, err)) + if !fs.isMountPointIncluded(vssVolume) { + fs.msgMessage("snapshots for [%s] excluded by user\n", vssVolume) fs.failedSnapshots[volumeNameLower] = struct{}{} } else { - fs.snapshots[volumeNameLower] = snapshot - fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume) - if len(snapshot.mountPointInfo) > 0 { - fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume) - for mp, mpInfo := range snapshot.mountPointInfo { - info := "" - if !mpInfo.IsSnapshotted() { - info = " (not snapshotted)" + fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) + + var includeVolume VolumeFilter + if !fs.excludeAllMountPoints { + includeVolume = func(volume string) bool { + return fs.isMountPointIncluded(volume) + } + } + + if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, includeVolume, fs.msgError); err != nil { + fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", + vssVolume, err)) + fs.failedSnapshots[volumeNameLower] = struct{}{} + } else { + fs.snapshots[volumeNameLower] = snapshot + fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume) + if len(snapshot.mountPointInfo) > 0 { + fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume) + for mp, mpInfo := range snapshot.mountPointInfo { + info := "" + if !mpInfo.IsSnapshotted() { + info = " (not snapshotted)" + } + fs.msgMessage(" - %s%s\n", mp, info) } - fs.msgMessage(" - %s%s\n", mp, info) } } } @@ -173,9 +268,8 @@ func (fs *LocalVss) snapshotPath(path string) string { snapshotPath = fs.Join(snapshot.GetSnapshotDeviceObject(), strings.TrimPrefix(fixPath, volumeName)) if snapshotPath == snapshot.GetSnapshotDeviceObject() { - snapshotPath = snapshotPath + string(filepath.Separator) + snapshotPath += string(filepath.Separator) } - } else { // no snapshot is available for the requested path: // -> try to backup without a snapshot diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go new file mode 100644 index 000000000..60262c873 --- /dev/null +++ b/internal/fs/fs_local_vss_test.go @@ -0,0 +1,285 @@ +// +build windows + +package fs + +import ( + "fmt" + "regexp" + "strings" + "testing" + "time" + + ole "github.com/go-ole/go-ole" + "github.com/restic/restic/internal/options" +) + +func matchStrings(ptrs []string, strs []string) bool { + if len(ptrs) != len(strs) { + return false + } + + for i, p := range ptrs { + if p == "" { + return false + } + matched, err := regexp.MatchString(p, strs[i]) + if err != nil { + panic(err) + } + if !matched { + return false + } + } + + return true +} + +func matchMap(strs []string, m map[string]struct{}) bool { + if len(strs) != len(m) { + return false + } + + for _, s := range strs { + if _, ok := m[s]; !ok { + return false + } + } + + return true +} + +func TestVSSConfig(t *testing.T) { + type config struct { + excludeAllMountPoints bool + timeout time.Duration + provider string + } + setTests := []struct { + input options.Options + output config + }{ + { + options.Options{ + "vss.timeout": "6h38m42s", + "vss.provider": "Ms", + }, + config{ + timeout: 23922000000000, + provider: "Ms", + }, + }, + { + options.Options{ + "vss.exclude-all-mount-points": "t", + "vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}", + }, + config{ + excludeAllMountPoints: true, + timeout: 120000000000, + provider: "{b5946137-7b9f-4925-af80-51abd60b20d5}", + }, + }, + { + options.Options{ + "vss.exclude-all-mount-points": "0", + "vss.exclude-volumes": "", + "vss.timeout": "120s", + "vss.provider": "Microsoft Software Shadow Copy provider 1.0", + }, + config{ + timeout: 120000000000, + provider: "Microsoft Software Shadow Copy provider 1.0", + }, + }, + } + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + cfg, err := ParseVSSConfig(test.input) + if err != nil { + t.Fatal(err) + } + + errorHandler := func(item string, err error) { + t.Fatalf("unexpected error (%v)", err) + } + messageHandler := func(msg string, args ...interface{}) { + t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) + } + + dst := NewLocalVss(errorHandler, messageHandler, cfg) + + if dst.excludeAllMountPoints != test.output.excludeAllMountPoints || + dst.excludeVolumes != nil || dst.timeout != test.output.timeout || + dst.provider != test.output.provider { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst) + } + }) + } +} + +func TestParseMountPoints(t *testing.T) { + volumeMatch := regexp.MustCompile(`^\\\\\?\\Volume\{[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}\}\\$`) + + // It's not a good idea to test functions based on GetVolumeNameForVolumeMountPoint by calling + // GetVolumeNameForVolumeMountPoint itself, but we have restricted test environment: + // cannot manage volumes and can only be sure that the mount point C:\ exists + sysVolume, err := GetVolumeNameForVolumeMountPoint("C:") + if err != nil { + t.Fatal(err) + } + // We don't know a valid volume GUID path for c:\, but we'll at least check its format + if !volumeMatch.MatchString(sysVolume) { + t.Fatalf("invalid volume GUID path: %s", sysVolume) + } + // Changing the case and removing trailing backslash allows tests + // the equality of different ways of writing a volume name + sysVolumeMutated := strings.ToUpper(sysVolume[:len(sysVolume)-1]) + sysVolumeMatch := strings.ToLower(sysVolume) + + type check struct { + volume string + result bool + } + setTests := []struct { + input options.Options + output []string + checks []check + errors []string + }{ + { + options.Options{ + "vss.exclude-volumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated, + }, + []string{ + sysVolumeMatch, + }, + []check{ + {`c:\`, false}, + {`c:`, false}, + {sysVolume, false}, + {sysVolumeMutated, false}, + }, + []string{}, + }, + { + options.Options{ + "vss.exclude-volumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, + }, + []string{ + sysVolumeMatch, + }, + []check{ + {`c:\windows\`, true}, + {`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, true}, + {`c:`, false}, + {``, true}, + }, + []string{ + `failed to parse vss\.exclude-volumes \[z:\\nonexistent\]:.*`, + `failed to parse vss\.exclude-volumes \[c:\\windows\\\]:.*`, + `failed to parse vss\.exclude-volumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to get volume from mount point \[c:\\windows\\\]:.*`, + `failed to get volume from mount point \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to get volume from mount point \[\]:.*`, + }, + }, + } + + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + cfg, err := ParseVSSConfig(test.input) + if err != nil { + t.Fatal(err) + } + + var log []string + errorHandler := func(item string, err error) { + log = append(log, strings.TrimSpace(err.Error())) + } + messageHandler := func(msg string, args ...interface{}) { + t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) + } + + dst := NewLocalVss(errorHandler, messageHandler, cfg) + + if !matchMap(test.output, dst.excludeVolumes) { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", + test.output, dst.excludeVolumes) + } + + for _, c := range test.checks { + if dst.isMountPointIncluded(c.volume) != c.result { + t.Fatalf(`wrong check: isMountPointIncluded("%s") != %v`, c.volume, c.result) + } + } + + if !matchStrings(test.errors, log) { + t.Fatalf("wrong log, want:\n %#v\ngot:\n %#v", test.errors, log) + } + }) + } +} + +func TestParseProvider(t *testing.T) { + msProvider := ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}") + setTests := []struct { + provider string + id *ole.GUID + result string + }{ + { + "", + ole.IID_NULL, + "", + }, + { + "mS", + msProvider, + "", + }, + { + "{B5946137-7b9f-4925-Af80-51abD60b20d5}", + msProvider, + "", + }, + { + "Microsoft Software Shadow Copy provider 1.0", + msProvider, + "", + }, + { + "{04560982-3d7d-4bbc-84f7-0712f833a28f}", + nil, + `invalid VSS provider "{04560982-3d7d-4bbc-84f7-0712f833a28f}"`, + }, + { + "non-existent provider", + nil, + `invalid VSS provider "non-existent provider"`, + }, + } + + _ = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + id, err := getProviderID(test.provider) + + if err != nil && id != nil { + t.Fatalf("err!=nil but id=%v", id) + } + + if test.result != "" || err != nil { + var result string + if err != nil { + result = err.Error() + } + if test.result != result || test.result == "" { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.result, result) + } + } else if !ole.IsEqualGUID(id, test.id) { + t.Fatalf("wrong id, want:\n %s\ngot:\n %s", test.id.String(), id.String()) + } + }) + } +} diff --git a/internal/fs/stat_test.go b/internal/fs/stat_test.go index a5ec77c7a..d52415c1d 100644 --- a/internal/fs/stat_test.go +++ b/internal/fs/stat_test.go @@ -5,11 +5,11 @@ import ( "path/filepath" "testing" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) func TestExtendedStat(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) filename := filepath.Join(tempdir, "file") err := os.WriteFile(filename, []byte("foobar"), 0640) if err != nil { diff --git a/internal/fs/vss.go b/internal/fs/vss.go index 5f0ea36d9..8bfffab71 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -4,6 +4,8 @@ package fs import ( + "time" + "github.com/restic/restic/internal/errors" ) @@ -31,10 +33,16 @@ func HasSufficientPrivilegesForVSS() error { return errors.New("VSS snapshots are only supported on windows") } +// GetVolumeNameForVolumeMountPoint add trailing backslash to input parameter +// and calls the equivalent windows api. +func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { + return mountPoint, nil +} + // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. -func NewVssSnapshot( - _ string, _ uint, _ ErrorHandler) (VssSnapshot, error) { +func NewVssSnapshot(_ string, + _ string, _ time.Duration, _ VolumeFilter, _ ErrorHandler) (VssSnapshot, error) { return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows") } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 8c9b8942b..0b51b00f3 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -5,10 +5,12 @@ package fs import ( "fmt" + "math" "path/filepath" "runtime" "strings" "syscall" + "time" "unsafe" ole "github.com/go-ole/go-ole" @@ -20,8 +22,10 @@ import ( type HRESULT uint // HRESULT constant values necessary for using VSS api. +//nolint:golint const ( S_OK HRESULT = 0x00000000 + S_FALSE HRESULT = 0x00000001 E_ACCESSDENIED HRESULT = 0x80070005 E_OUTOFMEMORY HRESULT = 0x8007000E E_INVALIDARG HRESULT = 0x80070057 @@ -190,7 +194,7 @@ func (e *vssError) Error() string { return fmt.Sprintf("VSS error: %s: %s (%#x)", e.text, e.hresult.Str(), e.hresult) } -// VssError encapsulates errors returned from calling VSS api. +// vssTextError encapsulates errors returned from calling VSS api. type vssTextError struct { text string } @@ -255,6 +259,7 @@ type IVssBackupComponents struct { } // IVssBackupComponentsVTable is the vtable for IVssBackupComponents. +// nolint:structcheck type IVssBackupComponentsVTable struct { ole.IUnknownVtbl getWriterComponentsCount uintptr @@ -364,7 +369,7 @@ func (vss *IVssBackupComponents) convertToVSSAsync( } // IsVolumeSupported calls the equivalent VSS api. -func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, error) { +func (vss *IVssBackupComponents) IsVolumeSupported(providerID *ole.GUID, volumeName string) (bool, error) { volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName) if err != nil { panic(err) @@ -374,7 +379,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err var result uintptr if runtime.GOARCH == "386" { - id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) + id := (*[4]uintptr)(unsafe.Pointer(providerID)) result, _, _ = syscall.Syscall9(vss.getVTable().isVolumeSupported, 7, uintptr(unsafe.Pointer(vss)), id[0], id[1], id[2], id[3], @@ -382,7 +387,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err 0) } else { result, _, _ = syscall.Syscall6(vss.getVTable().isVolumeSupported, 4, - uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(ole.IID_NULL)), + uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(volumeNamePointer)), uintptr(unsafe.Pointer(&isSupportedRaw)), 0, 0) } @@ -408,24 +413,24 @@ func (vss *IVssBackupComponents) StartSnapshotSet() (ole.GUID, error) { } // AddToSnapshotSet calls the equivalent VSS api. -func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot *ole.GUID) error { +func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, providerID *ole.GUID, idSnapshot *ole.GUID) error { volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName) if err != nil { panic(err) } - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { - id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) + id := (*[4]uintptr)(unsafe.Pointer(providerID)) result, _, _ = syscall.Syscall9(vss.getVTable().addToSnapshotSet, 7, - uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), id[0], id[1], - id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0) + uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), + id[0], id[1], id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0) } else { result, _, _ = syscall.Syscall6(vss.getVTable().addToSnapshotSet, 4, uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), - uintptr(unsafe.Pointer(ole.IID_NULL)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0) + uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0) } return newVssErrorIfResultNotOK("AddToSnapshotSet() failed", HRESULT(result)) @@ -478,9 +483,9 @@ func (vss *IVssBackupComponents) DoSnapshotSet() (*IVSSAsync, error) { // DeleteSnapshots calls the equivalent VSS api. func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ole.GUID, error) { - var deletedSnapshots int32 = 0 + var deletedSnapshots int32 var nondeletedSnapshotID ole.GUID - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(&snapshotID)) @@ -504,7 +509,7 @@ func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ol // GetSnapshotProperties calls the equivalent VSS api. func (vss *IVssBackupComponents) GetSnapshotProperties(snapshotID ole.GUID, properties *VssSnapshotProperties) error { - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(&snapshotID)) @@ -527,8 +532,8 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error { if err != nil { return err } - - proc.Call(uintptr(unsafe.Pointer(properties))) + // this function always succeeds and returns no value + _, _, _ = proc.Call(uintptr(unsafe.Pointer(properties))) return nil } @@ -543,6 +548,7 @@ func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) { } // VssSnapshotProperties defines the properties of a VSS snapshot as part of the VSS api. +// nolint:structcheck type VssSnapshotProperties struct { snapshotID ole.GUID snapshotSetID ole.GUID @@ -559,6 +565,24 @@ type VssSnapshotProperties struct { status uint } +// VssProviderProperties defines the properties of a VSS provider as part of the VSS api. +// nolint:structcheck +type VssProviderProperties struct { + providerID ole.GUID + providerName *uint16 + providerType uint32 + providerVersion *uint16 + providerVersionID ole.GUID + classID ole.GUID +} + +func vssFreeProviderProperties(p *VssProviderProperties) { + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName))) + p.providerName = nil + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion))) + p.providerVersion = nil +} + // GetSnapshotDeviceObject returns root path to access the snapshot files // and folders. func (p *VssSnapshotProperties) GetSnapshotDeviceObject() string { @@ -617,8 +641,13 @@ func (vssAsync *IVSSAsync) QueryStatus() (HRESULT, uint32) { // WaitUntilAsyncFinished waits until either the async call is finished or // the given timeout is reached. -func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(millis uint32) error { - hresult := vssAsync.Wait(millis) +func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error { + const maxTimeout = math.MaxInt32 * time.Millisecond + if timeout > maxTimeout { + timeout = maxTimeout + } + + hresult := vssAsync.Wait(uint32(timeout.Milliseconds())) err := newVssErrorIfResultNotOK("Wait() failed", hresult) if err != nil { vssAsync.Cancel() @@ -651,6 +680,75 @@ func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(millis uint32) error { return nil } +// UIID_IVSS_ADMIN defines the GUID of IVSSAdmin. +var ( + UIID_IVSS_ADMIN = ole.NewGUID("{77ED5996-2F63-11d3-8A39-00C04F72D8E3}") + CLSID_VSS_COORDINATOR = ole.NewGUID("{E579AB5F-1CC4-44b4-BED9-DE0991FF0623}") +) + +// IVSSAdmin VSS api interface. +type IVSSAdmin struct { + ole.IUnknown +} + +// IVSSAdminVTable is the vtable for IVSSAdmin. +// nolint:structcheck +type IVSSAdminVTable struct { + ole.IUnknownVtbl + registerProvider uintptr + unregisterProvider uintptr + queryProviders uintptr + abortAllSnapshotsInProgress uintptr +} + +// getVTable returns the vtable for IVSSAdmin. +func (vssAdmin *IVSSAdmin) getVTable() *IVSSAdminVTable { + return (*IVSSAdminVTable)(unsafe.Pointer(vssAdmin.RawVTable)) +} + +// QueryProviders calls the equivalent VSS api. +func (vssAdmin *IVSSAdmin) QueryProviders() (*IVssEnumObject, error) { + var enum *IVssEnumObject + + result, _, _ := syscall.Syscall(vssAdmin.getVTable().queryProviders, 2, + uintptr(unsafe.Pointer(vssAdmin)), uintptr(unsafe.Pointer(&enum)), 0) + + return enum, newVssErrorIfResultNotOK("QueryProviders() failed", HRESULT(result)) +} + +// IVssEnumObject VSS api interface. +type IVssEnumObject struct { + ole.IUnknown +} + +// IVssEnumObjectVTable is the vtable for IVssEnumObject. +// nolint:structcheck +type IVssEnumObjectVTable struct { + ole.IUnknownVtbl + next uintptr + skip uintptr + reset uintptr + clone uintptr +} + +// getVTable returns the vtable for IVssEnumObject. +func (vssEnum *IVssEnumObject) getVTable() *IVssEnumObjectVTable { + return (*IVssEnumObjectVTable)(unsafe.Pointer(vssEnum.RawVTable)) +} + +// Next calls the equivalent VSS api. +func (vssEnum *IVssEnumObject) Next(count uint, props unsafe.Pointer) (uint, error) { + var fetched uint32 + result, _, _ := syscall.Syscall6(vssEnum.getVTable().next, 4, + uintptr(unsafe.Pointer(vssEnum)), uintptr(count), uintptr(props), + uintptr(unsafe.Pointer(&fetched)), 0, 0) + if HRESULT(result) == S_FALSE { + return uint(fetched), nil + } + + return uint(fetched), newVssErrorIfResultNotOK("Next() failed", HRESULT(result)) +} + // MountPoint wraps all information of a snapshot of a mountpoint on a volume. type MountPoint struct { isSnapshotted bool @@ -677,7 +775,7 @@ type VssSnapshot struct { snapshotProperties VssSnapshotProperties snapshotDeviceObject string mountPointInfo map[string]MountPoint - timeoutInMillis uint32 + timeout time.Duration } // GetSnapshotDeviceObject returns root path to access the snapshot files @@ -694,7 +792,12 @@ func initializeVssCOMInterface() (*ole.IUnknown, error) { } // ensure COM is initialized before use - ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil { + // CoInitializeEx returns S_FALSE if COM is already initialized + if oleErr, ok := err.(*ole.OleError); !ok || HRESULT(oleErr.Code()) != S_FALSE { + return nil, err + } + } var oleIUnknown *ole.IUnknown result, _, _ := vssInstance.Call(uintptr(unsafe.Pointer(&oleIUnknown))) @@ -727,12 +830,34 @@ func HasSufficientPrivilegesForVSS() error { return err } +// GetVolumeNameForVolumeMountPoint add trailing backslash to input parameter +// and calls the equivalent windows api. +func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { + if mountPoint != "" && mountPoint[len(mountPoint)-1] != filepath.Separator { + mountPoint += string(filepath.Separator) + } + + mountPointPointer, err := syscall.UTF16PtrFromString(mountPoint) + if err != nil { + return mountPoint, err + } + + // A reasonable size for the buffer to accommodate the largest possible + // volume GUID path is 50 characters. + volumeNameBuffer := make([]uint16, 50) + if err := windows.GetVolumeNameForVolumeMountPoint( + mountPointPointer, &volumeNameBuffer[0], 50); err != nil { + return mountPoint, err + } + + return syscall.UTF16ToString(volumeNameBuffer), nil +} + // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. -func NewVssSnapshot( - volume string, timeoutInSeconds uint, msgError ErrorHandler) (VssSnapshot, error) { +func NewVssSnapshot(provider string, + volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() - if err != nil { return VssSnapshot{}, newVssTextError(fmt.Sprintf( "Failed to detect windows architecture: %s", err.Error())) @@ -744,7 +869,7 @@ func NewVssSnapshot( runtime.GOARCH)) } - timeoutInMillis := uint32(timeoutInSeconds * 1000) + deadline := time.Now().Add(timeout) oleIUnknown, err := initializeVssCOMInterface() if oleIUnknown != nil { @@ -778,6 +903,12 @@ func NewVssSnapshot( iVssBackupComponents := (*IVssBackupComponents)(unsafe.Pointer(comInterface)) + providerID, err := getProviderID(provider) + if err != nil { + iVssBackupComponents.Release() + return VssSnapshot{}, err + } + if err := iVssBackupComponents.InitializeForBackup(); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err @@ -796,13 +927,13 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.GatherWriterMetadata, - "GatherWriterMetadata", timeoutInMillis) + "GatherWriterMetadata", deadline) if err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err } - if isSupported, err := iVssBackupComponents.IsVolumeSupported(volume); err != nil { + if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, volume); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err } else if !isSupported { @@ -817,44 +948,53 @@ func NewVssSnapshot( return VssSnapshot{}, err } - if err := iVssBackupComponents.AddToSnapshotSet(volume, &snapshotSetID); err != nil { + if err := iVssBackupComponents.AddToSnapshotSet(volume, providerID, &snapshotSetID); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err } - mountPoints, err := enumerateMountedFolders(volume) - if err != nil { - iVssBackupComponents.Release() - return VssSnapshot{}, newVssTextError(fmt.Sprintf( - "failed to enumerate mount points for volume %s: %s", volume, err)) - } - mountPointInfo := make(map[string]MountPoint) - for _, mountPoint := range mountPoints { - // ensure every mountpoint is available even without a valid - // snapshot because we need to consider this when backing up files - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false} - - if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil { - continue - } else if !isSupported { - continue - } - - var mountPointSnapshotSetID ole.GUID - err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID) + // if filter==nil just don't process mount points for this volume at all + if filter != nil { + mountPoints, err := enumerateMountedFolders(volume) if err != nil { iVssBackupComponents.Release() - return VssSnapshot{}, err + + return VssSnapshot{}, newVssTextError(fmt.Sprintf( + "failed to enumerate mount points for volume %s: %s", volume, err)) } - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true, - snapshotSetID: mountPointSnapshotSetID} + for _, mountPoint := range mountPoints { + // ensure every mountpoint is available even without a valid + // snapshot because we need to consider this when backing up files + mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false} + + if !filter(mountPoint) { + continue + } else if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, mountPoint); err != nil { + continue + } else if !isSupported { + continue + } + + var mountPointSnapshotSetID ole.GUID + err := iVssBackupComponents.AddToSnapshotSet(mountPoint, providerID, &mountPointSnapshotSetID) + if err != nil { + iVssBackupComponents.Release() + + return VssSnapshot{}, err + } + + mountPointInfo[mountPoint] = MountPoint{ + isSnapshotted: true, + snapshotSetID: mountPointSnapshotSetID, + } + } } err = callAsyncFunctionAndWait(iVssBackupComponents.PrepareForBackup, "PrepareForBackup", - timeoutInMillis) + deadline) if err != nil { // After calling PrepareForBackup one needs to call AbortBackup() before releasing the VSS // instance for proper cleanup. @@ -865,9 +1005,9 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.DoSnapshotSet, "DoSnapshotSet", - timeoutInMillis) + deadline) if err != nil { - iVssBackupComponents.AbortBackup() + _ = iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() return VssSnapshot{}, err } @@ -875,13 +1015,12 @@ func NewVssSnapshot( var snapshotProperties VssSnapshotProperties err = iVssBackupComponents.GetSnapshotProperties(snapshotSetID, &snapshotProperties) if err != nil { - iVssBackupComponents.AbortBackup() + _ = iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() return VssSnapshot{}, err } for mountPoint, info := range mountPointInfo { - if !info.isSnapshotted { continue } @@ -900,8 +1039,10 @@ func NewVssSnapshot( mountPointInfo[mountPoint] = info } - return VssSnapshot{iVssBackupComponents, snapshotSetID, snapshotProperties, - snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, timeoutInMillis}, nil + return VssSnapshot{ + iVssBackupComponents, snapshotSetID, snapshotProperties, + snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline), + }, nil } // Delete deletes the created snapshot. @@ -922,15 +1063,17 @@ func (p *VssSnapshot) Delete() error { if p.iVssBackupComponents != nil { defer p.iVssBackupComponents.Release() + deadline := time.Now().Add(p.timeout) + err = callAsyncFunctionAndWait(p.iVssBackupComponents.BackupComplete, "BackupComplete", - p.timeoutInMillis) + deadline) if err != nil { return err } if _, _, e := p.iVssBackupComponents.DeleteSnapshots(p.snapshotID); e != nil { err = newVssTextError(fmt.Sprintf("Failed to delete snapshot: %s", e.Error())) - p.iVssBackupComponents.AbortBackup() + _ = p.iVssBackupComponents.AbortBackup() if err != nil { return err } @@ -940,12 +1083,61 @@ func (p *VssSnapshot) Delete() error { return nil } +func getProviderID(provider string) (*ole.GUID, error) { + providerLower := strings.ToLower(provider) + switch providerLower { + case "": + return ole.IID_NULL, nil + case "ms": + return ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}"), nil + } + + comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN) + if err != nil { + return nil, err + } + defer comInterface.Release() + + vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface)) + + enum, err := vssAdmin.QueryProviders() + if err != nil { + return nil, err + } + defer enum.Release() + + id := ole.NewGUID(provider) + + var props struct { + objectType uint32 + provider VssProviderProperties + } + for { + count, err := enum.Next(1, unsafe.Pointer(&props)) + if err != nil { + return nil, err + } + + if count < 1 { + return nil, errors.Errorf(`invalid VSS provider "%s"`, provider) + } + + name := ole.UTF16PtrToString(props.provider.providerName) + vssFreeProviderProperties(&props.provider) + + if id != nil && *id == props.provider.providerID || + id == nil && providerLower == strings.ToLower(name) { + return &props.provider.providerID, nil + } + } +} + // asyncCallFunc is the callback type for callAsyncFunctionAndWait. type asyncCallFunc func() (*IVSSAsync, error) // callAsyncFunctionAndWait calls an async functions and waits for it to either // finish or timeout. -func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMillis uint32) error { +func callAsyncFunctionAndWait(function asyncCallFunc, name string, deadline time.Time) error { iVssAsync, err := function() if err != nil { return err @@ -955,7 +1147,12 @@ func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMill return newVssTextError(fmt.Sprintf("%s() returned nil", name)) } - err = iVssAsync.WaitUntilAsyncFinished(timeoutInMillis) + timeout := time.Until(deadline) + if timeout <= 0 { + return newVssTextError(fmt.Sprintf("%s() deadline exceeded", name)) + } + + err = iVssAsync.WaitUntilAsyncFinished(timeout) iVssAsync.Release() return err } @@ -1036,6 +1233,7 @@ func enumerateMountedFolders(volume string) ([]string, error) { return mountedFolders, nil } + // nolint:errcheck defer windows.FindVolumeMountPointClose(handle) volumeMountPoint := syscall.UTF16ToString(volumeMountPointBuffer) diff --git a/internal/index/index.go b/internal/index/index.go index ecd481594..1c20fe38d 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -3,12 +3,15 @@ package index import ( "context" "encoding/json" + "fmt" "io" + "math" "sync" "time" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/debug" @@ -67,11 +70,9 @@ func (idx *Index) addToPacks(id restic.ID) int { return len(idx.packs) - 1 } -const maxuint32 = 1<<32 - 1 - func (idx *Index) store(packIndex int, blob restic.Blob) { // assert that offset and length fit into uint32! - if blob.Offset > maxuint32 || blob.Length > maxuint32 || blob.UncompressedLength > maxuint32 { + if blob.Offset > math.MaxUint32 || blob.Length > math.MaxUint32 || blob.UncompressedLength > math.MaxUint32 { panic("offset or length does not fit in uint32. You have packs > 4GB!") } @@ -217,7 +218,7 @@ func (idx *Index) AddToSupersedes(ids ...restic.ID) error { // Each passes all blobs known to the index to the callback fn. This blocks any // modification of the index. -func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) { +func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) error { idx.m.Lock() defer idx.m.Unlock() @@ -231,6 +232,7 @@ func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) { return true }) } + return ctx.Err() } type EachByPackResult struct { @@ -515,8 +517,13 @@ func DecodeIndex(buf []byte, id restic.ID) (idx *Index, oldFormat bool, err erro debug.Log("Error %v", err) if isErrOldIndex(err) { + if feature.Flag.Enabled(feature.DeprecateLegacyIndex) { + return nil, false, fmt.Errorf("index seems to use the legacy format. update it using `restic repair index`") + } + debug.Log("index is probably old format, trying that") idx, err = decodeOldIndex(buf) + idx.ids = append(idx.ids, id) return idx, err == nil, err } diff --git a/internal/index/index_parallel_test.go b/internal/index/index_parallel_test.go index db4853e19..5cb8d299d 100644 --- a/internal/index/index_parallel_test.go +++ b/internal/index/index_parallel_test.go @@ -15,11 +15,9 @@ import ( var repoFixture = filepath.Join("..", "repository", "testdata", "test-repo.tar.gz") func TestRepositoryForAllIndexes(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - expectedIndexIDs := restic.NewIDSet() rtest.OK(t, repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error { expectedIndexIDs.Insert(id) diff --git a/internal/index/index_test.go b/internal/index/index_test.go index 4f0dbd2a0..bafd95c48 100644 --- a/internal/index/index_test.go +++ b/internal/index/index_test.go @@ -8,6 +8,7 @@ import ( "sync" "testing" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -338,7 +339,7 @@ func TestIndexUnserialize(t *testing.T) { rtest.Equals(t, oldIdx, idx.Supersedes()) - blobs := listPack(idx, exampleLookupTest.packID) + blobs := listPack(t, idx, exampleLookupTest.packID) if len(blobs) != len(exampleLookupTest.blobs) { t.Fatalf("expected %d blobs in pack, got %d", len(exampleLookupTest.blobs), len(blobs)) } @@ -355,12 +356,12 @@ func TestIndexUnserialize(t *testing.T) { } } -func listPack(idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) { - idx.Each(context.TODO(), func(pb restic.PackedBlob) { +func listPack(t testing.TB, idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) { + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { if pb.PackID.Equal(id) { pbs = append(pbs, pb) } - }) + })) return pbs } @@ -427,6 +428,8 @@ func BenchmarkEncodeIndex(b *testing.B) { } func TestIndexUnserializeOld(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateLegacyIndex, false)() + idx, oldFormat, err := index.DecodeIndex(docOldExample, restic.NewRandomID()) rtest.OK(t, err) rtest.Assert(t, oldFormat, "old index format recognized as new format") diff --git a/internal/index/master_index.go b/internal/index/master_index.go index 4c114b955..d99a3434d 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -223,13 +223,16 @@ func (mi *MasterIndex) finalizeFullIndexes() []*Index { // Each runs fn on all blobs known to the index. When the context is cancelled, // the index iteration return immediately. This blocks any modification of the index. -func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) { +func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) error { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() for _, idx := range mi.idx { - idx.Each(ctx, fn) + if err := idx.Each(ctx, fn); err != nil { + return err + } } + return nil } // MergeFinalIndexes merges all final indexes together. @@ -320,6 +323,9 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, exclude newIndex = NewIndex() } } + if wgCtx.Err() != nil { + return wgCtx.Err() + } } err := newIndex.AddToSupersedes(extraObsolete...) @@ -426,10 +432,6 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan defer close(out) // only resort a part of the index to keep the memory overhead bounded for i := byte(0); i < 16; i++ { - if ctx.Err() != nil { - return - } - packBlob := make(map[restic.ID][]restic.Blob) for pack := range packs { if pack[0]&0xf == i { @@ -439,11 +441,14 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan if len(packBlob) == 0 { continue } - mi.Each(ctx, func(pb restic.PackedBlob) { + err := mi.Each(ctx, func(pb restic.PackedBlob) { if packs.Has(pb.PackID) && pb.PackID[0]&0xf == i { packBlob[pb.PackID] = append(packBlob[pb.PackID], pb.Blob) } }) + if err != nil { + return + } // pass on packs for packID, pbs := range packBlob { diff --git a/internal/index/master_index_test.go b/internal/index/master_index_test.go index dcf6a94f6..fe0364c61 100644 --- a/internal/index/master_index_test.go +++ b/internal/index/master_index_test.go @@ -166,9 +166,9 @@ func TestMasterMergeFinalIndexes(t *testing.T) { rtest.Equals(t, 1, idxCount) blobCount := 0 - mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { blobCount++ - }) + })) rtest.Equals(t, 2, blobCount) blobs := mIdx.Lookup(bhInIdx1) @@ -198,9 +198,9 @@ func TestMasterMergeFinalIndexes(t *testing.T) { rtest.Equals(t, []restic.PackedBlob{blob2}, blobs) blobCount = 0 - mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { blobCount++ - }) + })) rtest.Equals(t, 2, blobCount) } @@ -319,9 +319,9 @@ func BenchmarkMasterIndexEach(b *testing.B) { for i := 0; i < b.N; i++ { entries := 0 - mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(b, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { entries++ - }) + })) } } diff --git a/internal/pack/pack.go b/internal/pack/pack.go index cd118ab03..53631a6fb 100644 --- a/internal/pack/pack.go +++ b/internal/pack/pack.go @@ -389,10 +389,10 @@ func CalculateHeaderSize(blobs []restic.Blob) int { // If onlyHdr is set to true, only the size of the header is returned // Note that this function only gives correct sizes, if there are no // duplicates in the index. -func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.ID]int64 { +func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) (map[restic.ID]int64, error) { packSize := make(map[restic.ID]int64) - mi.Each(ctx, func(blob restic.PackedBlob) { + err := mi.Each(ctx, func(blob restic.PackedBlob) { size, ok := packSize[blob.PackID] if !ok { size = headerSize @@ -403,5 +403,5 @@ func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.I packSize[blob.PackID] = size + int64(CalculateEntrySize(blob.Blob)) }) - return packSize + return packSize, err } diff --git a/internal/repository/key.go b/internal/repository/key.go index d9f8d8e17..0604b44df 100644 --- a/internal/repository/key.go +++ b/internal/repository/key.go @@ -43,11 +43,11 @@ type Key struct { id restic.ID } -// Params tracks the parameters used for the KDF. If not set, it will be +// params tracks the parameters used for the KDF. If not set, it will be // calibrated on the first run of AddKey(). -var Params *crypto.Params +var params *crypto.Params -var ( +const ( // KDFTimeout specifies the maximum runtime for the KDF. KDFTimeout = 500 * time.Millisecond @@ -196,13 +196,13 @@ func LoadKey(ctx context.Context, s *Repository, id restic.ID) (k *Key, err erro // AddKey adds a new key to an already existing repository. func AddKey(ctx context.Context, s *Repository, password, username, hostname string, template *crypto.Key) (*Key, error) { // make sure we have valid KDF parameters - if Params == nil { + if params == nil { p, err := crypto.Calibrate(KDFTimeout, KDFMemory) if err != nil { return nil, errors.Wrap(err, "Calibrate") } - Params = &p + params = &p debug.Log("calibrated KDF parameters are %v", p) } @@ -213,9 +213,9 @@ func AddKey(ctx context.Context, s *Repository, password, username, hostname str Hostname: hostname, KDF: "scrypt", - N: Params.N, - R: Params.R, - P: Params.P, + N: params.N, + R: params.R, + P: params.P, } if newkey.Hostname == "" { @@ -237,7 +237,7 @@ func AddKey(ctx context.Context, s *Repository, password, username, hostname str } // call KDF to derive user key - newkey.user, err = crypto.KDF(*Params, newkey.Salt, password) + newkey.user, err = crypto.KDF(*params, newkey.Salt, password) if err != nil { return nil, err } diff --git a/internal/repository/lock.go b/internal/repository/lock.go new file mode 100644 index 000000000..fd8214cd1 --- /dev/null +++ b/internal/repository/lock.go @@ -0,0 +1,274 @@ +package repository + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +type lockContext struct { + lock *restic.Lock + cancel context.CancelFunc + refreshWG sync.WaitGroup +} + +type locker struct { + retrySleepStart time.Duration + retrySleepMax time.Duration + refreshInterval time.Duration + refreshabilityTimeout time.Duration +} + +const defaultRefreshInterval = 5 * time.Minute + +var lockerInst = &locker{ + retrySleepStart: 5 * time.Second, + retrySleepMax: 60 * time.Second, + refreshInterval: defaultRefreshInterval, + // consider a lock refresh failed a bit before the lock actually becomes stale + // the difference allows to compensate for a small time drift between clients. + refreshabilityTimeout: restic.StaleLockTimeout - defaultRefreshInterval*3/2, +} + +func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { + return lockerInst.Lock(ctx, repo, exclusive, retryLock, printRetry, logger) +} + +// Lock wraps the ctx such that it is cancelled when the repository is unlocked +// cancelling the original context also stops the lock refresh +func (l *locker) Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { + + lockFn := restic.NewLock + if exclusive { + lockFn = restic.NewExclusiveLock + } + + var lock *restic.Lock + var err error + + retrySleep := minDuration(l.retrySleepStart, retryLock) + retryMessagePrinted := false + retryTimeout := time.After(retryLock) + +retryLoop: + for { + lock, err = lockFn(ctx, repo) + if err != nil && restic.IsAlreadyLocked(err) { + + if !retryMessagePrinted { + printRetry(fmt.Sprintf("repo already locked, waiting up to %s for the lock\n", retryLock)) + retryMessagePrinted = true + } + + debug.Log("repo already locked, retrying in %v", retrySleep) + retrySleepCh := time.After(retrySleep) + + select { + case <-ctx.Done(): + return nil, ctx, ctx.Err() + case <-retryTimeout: + debug.Log("repo already locked, timeout expired") + // Last lock attempt + lock, err = lockFn(ctx, repo) + break retryLoop + case <-retrySleepCh: + retrySleep = minDuration(retrySleep*2, l.retrySleepMax) + } + } else { + // anything else, either a successful lock or another error + break retryLoop + } + } + if restic.IsInvalidLock(err) { + return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err) + } + if err != nil { + return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err) + } + debug.Log("create lock %p (exclusive %v)", lock, exclusive) + + ctx, cancel := context.WithCancel(ctx) + lockInfo := &lockContext{ + lock: lock, + cancel: cancel, + } + lockInfo.refreshWG.Add(2) + refreshChan := make(chan struct{}) + forceRefreshChan := make(chan refreshLockRequest) + + go l.refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) + go l.monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) + + return &Unlocker{lockInfo}, ctx, nil +} + +func minDuration(a, b time.Duration) time.Duration { + if a <= b { + return a + } + return b +} + +type refreshLockRequest struct { + result chan bool +} + +func (l *locker) refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) { + debug.Log("start") + lock := lockInfo.lock + ticker := time.NewTicker(l.refreshInterval) + lastRefresh := lock.Time + + defer func() { + ticker.Stop() + // ensure that the context was cancelled before removing the lock + lockInfo.cancel() + + // remove the lock from the repo + debug.Log("unlocking repository with lock %v", lock) + if err := lock.Unlock(); err != nil { + debug.Log("error while unlocking: %v", err) + logger("error while unlocking: %v", err) + } + + lockInfo.refreshWG.Done() + }() + + for { + select { + case <-ctx.Done(): + debug.Log("terminate") + return + + case req := <-forceRefresh: + debug.Log("trying to refresh stale lock") + // keep on going if our current lock still exists + success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel, logger) + // inform refresh goroutine about forced refresh + select { + case <-ctx.Done(): + case req.result <- success: + } + + if success { + // update lock refresh time + lastRefresh = lock.Time + } + + case <-ticker.C: + if time.Since(lastRefresh) > l.refreshabilityTimeout { + // the lock is too old, wait until the expiry monitor cancels the context + continue + } + + debug.Log("refreshing locks") + err := lock.Refresh(context.TODO()) + if err != nil { + logger("unable to refresh lock: %v\n", err) + } else { + lastRefresh = lock.Time + // inform monitor goroutine about successful refresh + select { + case <-ctx.Done(): + case refreshed <- struct{}{}: + } + } + } + } +} + +func (l *locker) monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) { + // time.Now() might use a monotonic timer which is paused during standby + // convert to unix time to ensure we compare real time values + lastRefresh := time.Now().UnixNano() + pollDuration := 1 * time.Second + if l.refreshInterval < pollDuration { + // required for TestLockFailedRefresh + pollDuration = l.refreshInterval / 5 + } + // timers are paused during standby, which is a problem as the refresh timeout + // _must_ expire if the host was too long in standby. Thus fall back to periodic checks + // https://github.com/golang/go/issues/35012 + ticker := time.NewTicker(pollDuration) + defer func() { + ticker.Stop() + lockInfo.cancel() + lockInfo.refreshWG.Done() + }() + + var refreshStaleLockResult chan bool + + for { + select { + case <-ctx.Done(): + debug.Log("terminate expiry monitoring") + return + case <-refreshed: + if refreshStaleLockResult != nil { + // ignore delayed refresh notifications while the stale lock is refreshed + continue + } + lastRefresh = time.Now().UnixNano() + case <-ticker.C: + if time.Now().UnixNano()-lastRefresh < l.refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { + continue + } + + debug.Log("trying to refreshStaleLock") + // keep on going if our current lock still exists + refreshReq := refreshLockRequest{ + result: make(chan bool), + } + refreshStaleLockResult = refreshReq.result + + // inform refresh goroutine about forced refresh + select { + case <-ctx.Done(): + case forceRefresh <- refreshReq: + } + case success := <-refreshStaleLockResult: + if success { + lastRefresh = time.Now().UnixNano() + refreshStaleLockResult = nil + continue + } + + logger("Fatal: failed to refresh lock in time\n") + return + } + } +} + +func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc, logger func(format string, args ...interface{})) bool { + freeze := backend.AsBackend[backend.FreezeBackend](be) + if freeze != nil { + debug.Log("freezing backend") + freeze.Freeze() + defer freeze.Unfreeze() + } + + err := lock.RefreshStaleLock(ctx) + if err != nil { + logger("failed to refresh stale lock: %v\n", err) + // cancel context while the backend is still frozen to prevent accidental modifications + cancel() + return false + } + + return true +} + +type Unlocker struct { + info *lockContext +} + +func (l *Unlocker) Unlock() { + l.info.cancel() + l.info.refreshWG.Wait() +} diff --git a/cmd/restic/lock_test.go b/internal/repository/lock_test.go similarity index 57% rename from cmd/restic/lock_test.go rename to internal/repository/lock_test.go index bf22db699..644fc6b37 100644 --- a/cmd/restic/lock_test.go +++ b/internal/repository/lock_test.go @@ -1,4 +1,4 @@ -package main +package repository import ( "context" @@ -10,94 +10,76 @@ import ( "time" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) -func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository, func(), *testEnvironment) { - env, cleanup := withTestEnvironment(t) +type backendWrapper func(r backend.Backend) (backend.Backend, error) - reg := location.NewRegistry() - reg.Register(mem.NewFactory()) - env.gopts.backends = reg - env.gopts.Repo = "mem:" +func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { + be := backend.Backend(mem.New()) + // initialize repo + TestRepositoryWithBackend(t, be, 0, Options{}) + // reopen repository to allow injecting a backend wrapper if wrapper != nil { - env.gopts.backendTestHook = wrapper + var err error + be, err = wrapper(be) + rtest.OK(t, err) } - testRunInit(t, env.gopts) - repo, err := OpenRepository(context.TODO(), env.gopts) - test.OK(t, err) - return repo, cleanup, env + return TestOpenBackend(t, be) } -func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) { - lock, wrappedCtx, err := lockRepo(ctx, repo, env.gopts.RetryLock, env.gopts.JSON) +func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) { + lock, wrappedCtx, err := lockerInst.Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, wrappedCtx.Err()) - if lock.Stale() { + if lock.info.lock.Stale() { t.Fatal("lock returned stale lock") } return lock, wrappedCtx } func TestLock(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + t.Parallel() + repo := openLockTestRepo(t, nil) - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - unlockRepo(lock) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, lockerInst, 0) + lock.Unlock() if wrappedCtx.Err() == nil { t.Fatal("unlock did not cancel context") } } func TestLockCancel(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + t.Parallel() + repo := openLockTestRepo(t, nil) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - lock, wrappedCtx := checkedLockRepo(ctx, t, repo, env) + lock, wrappedCtx := checkedLockRepo(ctx, t, repo, lockerInst, 0) cancel() if wrappedCtx.Err() == nil { t.Fatal("canceled parent context did not cancel context") } - // unlockRepo should not crash - unlockRepo(lock) -} - -func TestLockUnlockAll(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - _, err := unlockAll(0) - test.OK(t, err) - if wrappedCtx.Err() == nil { - t.Fatal("canceled parent context did not cancel context") - } - - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + lock.Unlock() } func TestLockConflict(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() - repo2, err := OpenRepository(context.TODO(), env.gopts) - test.OK(t, err) + t.Parallel() + repo := openLockTestRepo(t, nil) + repo2 := TestOpenBackend(t, repo.Backend()) - lock, _, err := lockRepoExclusive(context.Background(), repo, env.gopts.RetryLock, env.gopts.JSON) + lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) - defer unlockRepo(lock) - _, _, err = lockRepo(context.Background(), repo2, env.gopts.RetryLock, env.gopts.JSON) + defer lock.Unlock() + _, _, err = Lock(context.Background(), repo2, false, 0, func(msg string) {}, func(format string, args ...interface{}) {}) if err == nil { t.Fatal("second lock should have failed") } @@ -118,20 +100,19 @@ func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backen } func TestLockFailedRefresh(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + t.Parallel() + repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &writeOnceBackend{Backend: r}, nil }) - defer cleanup() // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 20 * time.Millisecond - refreshabilityTimeout = 100 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 20 * time.Millisecond, + refreshabilityTimeout: 100 * time.Millisecond, + } + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) select { case <-wrappedCtx.Done(): @@ -139,8 +120,8 @@ func TestLockFailedRefresh(t *testing.T) { case <-time.After(time.Second): t.Fatal("failed lock refresh did not cause context cancellation") } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + lock.Unlock() } type loggingBackend struct { @@ -156,24 +137,23 @@ func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend. } func TestLockSuccessfulRefresh(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + t.Parallel() + repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &loggingBackend{ Backend: r, t: t, }, nil }) - defer cleanup() t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 60 * time.Millisecond - refreshabilityTimeout = 500 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 60 * time.Millisecond, + refreshabilityTimeout: 500 * time.Millisecond, + } + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) select { case <-wrappedCtx.Done(): @@ -186,11 +166,11 @@ func TestLockSuccessfulRefresh(t *testing.T) { buf = buf[:n] t.Log(string(buf)) - case <-time.After(2 * refreshabilityTimeout): + case <-time.After(2 * li.refreshabilityTimeout): // expected lock refresh to work } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + lock.Unlock() } type slowBackend struct { @@ -208,26 +188,26 @@ func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.Rew } func TestLockSuccessfulStaleRefresh(t *testing.T) { + t.Parallel() var sb *slowBackend - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { sb = &slowBackend{Backend: r} return sb, nil }) - defer cleanup() t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 10 * time.Millisecond - refreshabilityTimeout = 50 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 10 * time.Millisecond, + refreshabilityTimeout: 50 * time.Millisecond, + } - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) // delay lock refreshing long enough that the lock would expire sb.m.Lock() - sb.sleep = refreshabilityTimeout + refreshInterval + sb.sleep = li.refreshabilityTimeout + li.refreshInterval sb.m.Unlock() select { @@ -235,7 +215,7 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { // don't call t.Fatal to allow the lock to be properly cleaned up t.Error("lock refresh failed", time.Now()) - case <-time.After(refreshabilityTimeout): + case <-time.After(li.refreshabilityTimeout): } // reset slow backend sb.m.Lock() @@ -248,25 +228,26 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { // don't call t.Fatal to allow the lock to be properly cleaned up t.Error("lock refresh failed", time.Now()) - case <-time.After(3 * refreshabilityTimeout): + case <-time.After(3 * li.refreshabilityTimeout): // expected lock refresh to work } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + lock.Unlock() } func TestLockWaitTimeout(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + t.Parallel() + repo := openLockTestRepo(t, nil) - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) + defer elock.Unlock() retryLock := 200 * time.Millisecond start := time.Now() - lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON) + _, _, err = Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -275,17 +256,15 @@ func TestLockWaitTimeout(t *testing.T) { "create normal lock with exclusively locked repo didn't return the correct error") test.Assert(t, retryLock <= duration && duration < retryLock*3/2, "create normal lock with exclusively locked repo didn't wait for the specified timeout") - - test.OK(t, lock.Unlock()) - test.OK(t, elock.Unlock()) } func TestLockWaitCancel(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + t.Parallel() + repo := openLockTestRepo(t, nil) - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) + defer elock.Unlock() retryLock := 200 * time.Millisecond cancelAfter := 40 * time.Millisecond @@ -294,7 +273,7 @@ func TestLockWaitCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) time.AfterFunc(cancelAfter, cancel) - lock, _, err := lockRepo(ctx, repo, retryLock, env.gopts.JSON) + _, _, err = Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -303,27 +282,23 @@ func TestLockWaitCancel(t *testing.T) { "create normal lock with exclusively locked repo didn't return the correct error") test.Assert(t, cancelAfter <= duration && duration < retryLock-10*time.Millisecond, "create normal lock with exclusively locked repo didn't return in time, duration %v", duration) - - test.OK(t, lock.Unlock()) - test.OK(t, elock.Unlock()) } func TestLockWaitSuccess(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + t.Parallel() + repo := openLockTestRepo(t, nil) - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) retryLock := 200 * time.Millisecond unlockAfter := 40 * time.Millisecond time.AfterFunc(unlockAfter, func() { - test.OK(t, elock.Unlock()) + elock.Unlock() }) - lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON) + lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) - - test.OK(t, lock.Unlock()) + lock.Unlock() } diff --git a/internal/repository/prune.go b/internal/repository/prune.go new file mode 100644 index 000000000..77811e321 --- /dev/null +++ b/internal/repository/prune.go @@ -0,0 +1,638 @@ +package repository + +import ( + "context" + "fmt" + "math" + "sort" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" +) + +var ErrIndexIncomplete = errors.Fatal("index is not complete") +var ErrPacksMissing = errors.Fatal("packs from index missing in repo") +var ErrSizeNotMatching = errors.Fatal("pack size does not match calculated size from index") + +// PruneOptions collects all options for the cleanup command. +type PruneOptions struct { + DryRun bool + UnsafeRecovery bool + + MaxUnusedBytes func(used uint64) (unused uint64) // calculates the number of unused bytes after repacking, according to MaxUnused + MaxRepackBytes uint64 + + RepackCachableOnly bool + RepackSmall bool + RepackUncompressed bool +} + +type PruneStats struct { + Blobs struct { + Used uint + Duplicate uint + Unused uint + Remove uint + Repack uint + Repackrm uint + } + Size struct { + Used uint64 + Duplicate uint64 + Unused uint64 + Remove uint64 + Repack uint64 + Repackrm uint64 + Unref uint64 + Uncompressed uint64 + } + Packs struct { + Used uint + Unused uint + PartlyUsed uint + Unref uint + Keep uint + Repack uint + Remove uint + } +} + +type PrunePlan struct { + removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) + repackPacks restic.IDSet // packs to repack + keepBlobs restic.CountedBlobSet // blobs to keep during repacking + removePacks restic.IDSet // packs to remove + ignorePacks restic.IDSet // packs to ignore when rebuilding the index + + repo restic.Repository + stats PruneStats + opts PruneOptions +} + +type packInfo struct { + usedBlobs uint + unusedBlobs uint + usedSize uint64 + unusedSize uint64 + tpe restic.BlobType + uncompressed bool +} + +type packInfoWithID struct { + ID restic.ID + packInfo + mustCompress bool +} + +// PlanPrune selects which files to rewrite and which to delete and which blobs to keep. +// Also some summary statistics are returned. +func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (*PrunePlan, error) { + var stats PruneStats + + if opts.UnsafeRecovery { + // prevent repacking data to make sure users cannot get stuck. + opts.MaxRepackBytes = 0 + } + if repo.Connections() < 2 { + return nil, fmt.Errorf("prune requires a backend connection limit of at least two") + } + if repo.Config().Version < 2 && opts.RepackUncompressed { + return nil, fmt.Errorf("compression requires at least repository format version 2") + } + + usedBlobs, err := getUsedBlobs(ctx, repo) + if err != nil { + return nil, err + } + + printer.P("searching used packs...\n") + keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer) + if err != nil { + return nil, err + } + + printer.P("collecting packs for deletion and repacking\n") + plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer) + if err != nil { + return nil, err + } + + if len(plan.repackPacks) != 0 { + blobCount := keepBlobs.Len() + // when repacking, we do not want to keep blobs which are + // already contained in kept packs, so delete them from keepBlobs + err := repo.Index().Each(ctx, func(blob restic.PackedBlob) { + if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) { + return + } + keepBlobs.Delete(blob.BlobHandle) + }) + if err != nil { + return nil, err + } + + if keepBlobs.Len() < blobCount/2 { + // replace with copy to shrink map to necessary size if there's a chance to benefit + keepBlobs = keepBlobs.Copy() + } + } else { + // keepBlobs is only needed if packs are repacked + keepBlobs = nil + } + plan.keepBlobs = keepBlobs + + plan.repo = repo + plan.stats = stats + plan.opts = opts + + return &plan, nil +} + +func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { + // iterate over all blobs in index to find out which blobs are duplicates + // The counter in usedBlobs describes how many instances of the blob exist in the repository index + // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist + err := idx.Each(ctx, func(blob restic.PackedBlob) { + bh := blob.BlobHandle + count, ok := usedBlobs[bh] + if ok { + if count < math.MaxUint8 { + // don't overflow, but saturate count at 255 + // this can lead to a non-optimal pack selection, but won't cause + // problems otherwise + count++ + } + + usedBlobs[bh] = count + } + }) + if err != nil { + return nil, nil, err + } + + // Check if all used blobs have been found in index + missingBlobs := restic.NewBlobSet() + for bh, count := range usedBlobs { + if count == 0 { + // blob does not exist in any pack files + missingBlobs.Insert(bh) + } + } + + if len(missingBlobs) != 0 { + printer.E("%v not found in the index\n\n"+ + "Integrity check failed: Data seems to be missing.\n"+ + "Will not start prune to prevent (additional) data loss!\n"+ + "Please report this error (along with the output of the 'prune' run) at\n"+ + "https://github.com/restic/restic/issues/new/choose\n", missingBlobs) + return nil, nil, ErrIndexIncomplete + } + + indexPack := make(map[restic.ID]packInfo) + + // save computed pack header size + sz, err := pack.Size(ctx, idx, true) + if err != nil { + return nil, nil, err + } + for pid, hdrSize := range sz { + // initialize tpe with NumBlobTypes to indicate it's not set + indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)} + } + + hasDuplicates := false + // iterate over all blobs in index to generate packInfo + err = idx.Each(ctx, func(blob restic.PackedBlob) { + ip := indexPack[blob.PackID] + + // Set blob type if not yet set + if ip.tpe == restic.NumBlobTypes { + ip.tpe = blob.Type + } + + // mark mixed packs with "Invalid blob type" + if ip.tpe != blob.Type { + ip.tpe = restic.InvalidBlob + } + + bh := blob.BlobHandle + size := uint64(blob.Length) + dupCount := usedBlobs[bh] + switch { + case dupCount >= 2: + hasDuplicates = true + // mark as unused for now, we will later on select one copy + ip.unusedSize += size + ip.unusedBlobs++ + + // count as duplicate, will later on change one copy to be counted as used + stats.Size.Duplicate += size + stats.Blobs.Duplicate++ + case dupCount == 1: // used blob, not duplicate + ip.usedSize += size + ip.usedBlobs++ + + stats.Size.Used += size + stats.Blobs.Used++ + default: // unused blob + ip.unusedSize += size + ip.unusedBlobs++ + + stats.Size.Unused += size + stats.Blobs.Unused++ + } + if !blob.IsCompressed() { + ip.uncompressed = true + } + // update indexPack + indexPack[blob.PackID] = ip + }) + if err != nil { + return nil, nil, err + } + + // if duplicate blobs exist, those will be set to either "used" or "unused": + // - mark only one occurrence of duplicate blobs as used + // - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used" + // - if there are no used blobs in a pack, possibly mark duplicates as "unused" + if hasDuplicates { + // iterate again over all blobs in index (this is pretty cheap, all in-mem) + err = idx.Each(ctx, func(blob restic.PackedBlob) { + bh := blob.BlobHandle + count, ok := usedBlobs[bh] + // skip non-duplicate, aka. normal blobs + // count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining + if !ok || count == 1 { + return + } + + ip := indexPack[blob.PackID] + size := uint64(blob.Length) + switch { + case ip.usedBlobs > 0, count == 0: + // other used blobs in pack or "last" occurrence -> transition to used + ip.usedSize += size + ip.usedBlobs++ + ip.unusedSize -= size + ip.unusedBlobs-- + // same for the global statistics + stats.Size.Used += size + stats.Blobs.Used++ + stats.Size.Duplicate -= size + stats.Blobs.Duplicate-- + // let other occurrences remain marked as unused + usedBlobs[bh] = 1 + default: + // remain unused and decrease counter + count-- + if count == 1 { + // setting count to 1 would lead to forgetting that this blob had duplicates + // thus use the special value zero. This will select the last instance of the blob for keeping. + count = 0 + } + usedBlobs[bh] = count + } + // update indexPack + indexPack[blob.PackID] = ip + }) + if err != nil { + return nil, nil, err + } + } + + // Sanity check. If no duplicates exist, all blobs have value 1. After handling + // duplicates, this also applies to duplicates. + for _, count := range usedBlobs { + if count != 1 { + panic("internal error during blob selection") + } + } + + return usedBlobs, indexPack, nil +} + +func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *PruneStats, printer progress.Printer) (PrunePlan, error) { + removePacksFirst := restic.NewIDSet() + removePacks := restic.NewIDSet() + repackPacks := restic.NewIDSet() + + var repackCandidates []packInfoWithID + var repackSmallCandidates []packInfoWithID + repoVersion := repo.Config().Version + // only repack very small files by default + targetPackSize := repo.PackSize() / 25 + if opts.RepackSmall { + // consider files with at least 80% of the target size as large enough + targetPackSize = repo.PackSize() / 5 * 4 + } + + // loop over all packs and decide what to do + bar := printer.NewCounter("packs processed") + bar.SetMax(uint64(len(indexPack))) + err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { + p, ok := indexPack[id] + if !ok { + // Pack was not referenced in index and is not used => immediately remove! + printer.V("will remove pack %v as it is unused and not indexed\n", id.Str()) + removePacksFirst.Insert(id) + stats.Size.Unref += uint64(packSize) + return nil + } + + if p.unusedSize+p.usedSize != uint64(packSize) && p.usedBlobs != 0 { + // Pack size does not fit and pack is needed => error + // If the pack is not needed, this is no error, the pack can + // and will be simply removed, see below. + printer.E("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n", + id.Str(), p.unusedSize+p.usedSize, packSize) + return ErrSizeNotMatching + } + + // statistics + switch { + case p.usedBlobs == 0: + stats.Packs.Unused++ + case p.unusedBlobs == 0: + stats.Packs.Used++ + default: + stats.Packs.PartlyUsed++ + } + + if p.uncompressed { + stats.Size.Uncompressed += p.unusedSize + p.usedSize + } + mustCompress := false + if repoVersion >= 2 { + // repo v2: always repack tree blobs if uncompressed + // compress data blobs if requested + mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed + } + + // decide what to do + switch { + case p.usedBlobs == 0: + // All blobs in pack are no longer used => remove pack! + removePacks.Insert(id) + stats.Blobs.Remove += p.unusedBlobs + stats.Size.Remove += p.unusedSize + + case opts.RepackCachableOnly && p.tpe == restic.DataBlob: + // if this is a data pack and --repack-cacheable-only is set => keep pack! + stats.Packs.Keep++ + + case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress: + if packSize >= int64(targetPackSize) { + // All blobs in pack are used and not mixed => keep pack! + stats.Packs.Keep++ + } else { + repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) + } + + default: + // all other packs are candidates for repacking + repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) + } + + delete(indexPack, id) + bar.Add(1) + return nil + }) + bar.Done() + if err != nil { + return PrunePlan{}, err + } + + // At this point indexPacks contains only missing packs! + + // missing packs that are not needed can be ignored + ignorePacks := restic.NewIDSet() + for id, p := range indexPack { + if p.usedBlobs == 0 { + ignorePacks.Insert(id) + stats.Blobs.Remove += p.unusedBlobs + stats.Size.Remove += p.unusedSize + delete(indexPack, id) + } + } + + if len(indexPack) != 0 { + printer.E("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) + for id := range indexPack { + printer.E(" %v\n", id) + } + return PrunePlan{}, ErrPacksMissing + } + if len(ignorePacks) != 0 { + printer.E("Missing but unneeded pack files are referenced in the index, will be repaired\n") + for id := range ignorePacks { + printer.E("will forget missing pack file %v\n", id) + } + } + + if len(repackSmallCandidates) < 10 { + // too few small files to be worth the trouble, this also prevents endlessly repacking + // if there is just a single pack file below the target size + stats.Packs.Keep += uint(len(repackSmallCandidates)) + } else { + repackCandidates = append(repackCandidates, repackSmallCandidates...) + } + + // Sort repackCandidates such that packs with highest ratio unused/used space are picked first. + // This is equivalent to sorting by unused / total space. + // Instead of unused[i] / used[i] > unused[j] / used[j] we use + // unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64 + // Moreover packs containing trees and too small packs are sorted to the beginning + sort.Slice(repackCandidates, func(i, j int) bool { + pi := repackCandidates[i].packInfo + pj := repackCandidates[j].packInfo + switch { + case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob: + return true + case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob: + return false + case pi.unusedSize+pi.usedSize < uint64(targetPackSize) && pj.unusedSize+pj.usedSize >= uint64(targetPackSize): + return true + case pj.unusedSize+pj.usedSize < uint64(targetPackSize) && pi.unusedSize+pi.usedSize >= uint64(targetPackSize): + return false + } + return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize + }) + + repack := func(id restic.ID, p packInfo) { + repackPacks.Insert(id) + stats.Blobs.Repack += p.unusedBlobs + p.usedBlobs + stats.Size.Repack += p.unusedSize + p.usedSize + stats.Blobs.Repackrm += p.unusedBlobs + stats.Size.Repackrm += p.unusedSize + if p.uncompressed { + stats.Size.Uncompressed -= p.unusedSize + p.usedSize + } + } + + // calculate limit for number of unused bytes in the repo after repacking + maxUnusedSizeAfter := opts.MaxUnusedBytes(stats.Size.Used) + + for _, p := range repackCandidates { + reachedUnusedSizeAfter := (stats.Size.Unused-stats.Size.Remove-stats.Size.Repackrm < maxUnusedSizeAfter) + reachedRepackSize := stats.Size.Repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes + packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize) + + switch { + case reachedRepackSize: + stats.Packs.Keep++ + + case p.tpe != restic.DataBlob, p.mustCompress: + // repacking non-data packs / uncompressed-trees is only limited by repackSize + repack(p.ID, p.packInfo) + + case reachedUnusedSizeAfter && packIsLargeEnough: + // for all other packs stop repacking if tolerated unused size is reached. + stats.Packs.Keep++ + + default: + repack(p.ID, p.packInfo) + } + } + + stats.Packs.Unref = uint(len(removePacksFirst)) + stats.Packs.Repack = uint(len(repackPacks)) + stats.Packs.Remove = uint(len(removePacks)) + + if repo.Config().Version < 2 { + // compression not supported for repository format version 1 + stats.Size.Uncompressed = 0 + } + + return PrunePlan{removePacksFirst: removePacksFirst, + removePacks: removePacks, + repackPacks: repackPacks, + ignorePacks: ignorePacks, + }, nil +} + +func (plan *PrunePlan) Stats() PruneStats { + return plan.stats +} + +// Execute does the actual pruning: +// - remove unreferenced packs first +// - repack given pack files while keeping the given blobs +// - rebuild the index while ignoring all files that will be deleted +// - delete the files +// plan.removePacks and plan.ignorePacks are modified in this function. +func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (err error) { + if plan.opts.DryRun { + printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") + if len(plan.removePacksFirst) > 0 { + printer.V("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) + } + printer.V("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks) + printer.V("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks) + // Always quit here if DryRun was set! + return nil + } + + repo := plan.repo + // make sure the plan can only be used once + plan.repo = nil + + // unreferenced packs can be safely deleted first + if len(plan.removePacksFirst) != 0 { + printer.P("deleting unreferenced packs\n") + _ = deleteFiles(ctx, true, repo, plan.removePacksFirst, restic.PackFile, printer) + } + if ctx.Err() != nil { + return ctx.Err() + } + + if len(plan.repackPacks) != 0 { + printer.P("repacking packs\n") + bar := printer.NewCounter("packs repacked") + bar.SetMax(uint64(len(plan.repackPacks))) + _, err := Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar) + bar.Done() + if err != nil { + return errors.Fatal(err.Error()) + } + + // Also remove repacked packs + plan.removePacks.Merge(plan.repackPacks) + + if len(plan.keepBlobs) != 0 { + printer.E("%v was not repacked\n\n"+ + "Integrity check failed.\n"+ + "Please report this error (along with the output of the 'prune' run) at\n"+ + "https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs) + return errors.Fatal("internal error: blobs were not repacked") + } + + // allow GC of the blob set + plan.keepBlobs = nil + } + + if len(plan.ignorePacks) == 0 { + plan.ignorePacks = plan.removePacks + } else { + plan.ignorePacks.Merge(plan.removePacks) + } + + if plan.opts.UnsafeRecovery { + printer.P("deleting index files\n") + indexFiles := repo.Index().(*index.MasterIndex).IDs() + err = deleteFiles(ctx, false, repo, indexFiles, restic.IndexFile, printer) + if err != nil { + return errors.Fatalf("%s", err) + } + } else if len(plan.ignorePacks) != 0 { + err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, false, printer) + if err != nil { + return errors.Fatalf("%s", err) + } + } + + if len(plan.removePacks) != 0 { + printer.P("removing %d old packs\n", len(plan.removePacks)) + _ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer) + } + if ctx.Err() != nil { + return ctx.Err() + } + + if plan.opts.UnsafeRecovery { + err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) + if err != nil { + return errors.Fatalf("%s", err) + } + } + + if err != nil { + return err + } + + // drop outdated in-memory index + repo.ClearIndex() + + printer.P("done\n") + return nil +} + +// deleteFiles deletes the given fileList of fileType in parallel +// if ignoreError=true, it will print a warning if there was an error, else it will abort. +func deleteFiles(ctx context.Context, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { + bar := printer.NewCounter("files deleted") + defer bar.Done() + + return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error { + if err != nil { + printer.E("unable to remove %v/%v from the repository\n", fileType, id) + if !ignoreError { + return err + } + } + printer.VV("removed %v/%v\n", fileType, id) + return nil + }, bar) +} diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go new file mode 100644 index 000000000..bff221f49 --- /dev/null +++ b/internal/repository/prune_test.go @@ -0,0 +1,105 @@ +package repository_test + +import ( + "context" + "math" + "testing" + + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" + "golang.org/x/sync/errgroup" +) + +func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { + repo := repository.TestRepository(t).(*repository.Repository) + createRandomBlobs(t, repo, 4, 0.5, true) + createRandomBlobs(t, repo, 5, 0.5, true) + keep, _ := selectBlobs(t, repo, 0.5) + + var wg errgroup.Group + repo.StartPackUploader(context.TODO(), &wg) + // duplicate a few blobs to exercise those code paths + for blob := range keep { + buf, err := repo.LoadBlob(context.TODO(), blob.Type, blob.ID, nil) + rtest.OK(t, err) + _, _, _, err = repo.SaveBlob(context.TODO(), blob.Type, buf, blob.ID, true) + rtest.OK(t, err) + } + rtest.OK(t, repo.Flush(context.TODO())) + + plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { + return restic.NewCountedBlobSet(keep.List()...), nil + }, &progress.NoopPrinter{}) + rtest.OK(t, err) + + rtest.OK(t, plan.Execute(context.TODO(), &progress.NoopPrinter{})) + + repo = repository.TestOpenBackend(t, repo.Backend()).(*repository.Repository) + checker.TestCheckRepo(t, repo, true) + + if errOnUnused { + existing := listBlobs(repo) + rtest.Assert(t, existing.Equals(keep), "unexpected blobs, wanted %v got %v", keep, existing) + } +} + +func TestPrune(t *testing.T) { + for _, test := range []struct { + name string + opts repository.PruneOptions + errOnUnused bool + }{ + { + name: "0", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return 0 }, + }, + errOnUnused: true, + }, + { + name: "50", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 2 }, + }, + }, + { + name: "unlimited", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return math.MaxUint64 }, + }, + }, + { + name: "cachableonly", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 20 }, + RepackCachableOnly: true, + }, + }, + { + name: "small", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return math.MaxUint64 }, + RepackSmall: true, + }, + errOnUnused: true, + }, + } { + t.Run(test.name, func(t *testing.T) { + testPrune(t, test.opts, test.errOnUnused) + }) + t.Run(test.name+"-recovery", func(t *testing.T) { + opts := test.opts + opts.UnsafeRecovery = true + // unsafeNoSpaceRecovery does not repack partially used pack files + testPrune(t, opts, false) + }) + } +} diff --git a/internal/repository/repack.go b/internal/repository/repack.go index 5588984f6..53656252a 100644 --- a/internal/repository/repack.go +++ b/internal/repository/repack.go @@ -72,7 +72,7 @@ func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito return wgCtx.Err() } } - return nil + return wgCtx.Err() }) worker := func() error { diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index e5e46ac2a..2f7867101 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -18,7 +18,7 @@ func randomSize(min, max int) int { return rand.Intn(max-min) + min } -func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32) { +func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32, smallBlobs bool) { var wg errgroup.Group repo.StartPackUploader(context.TODO(), &wg) @@ -30,7 +30,11 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl if rand.Float32() < pData { tpe = restic.DataBlob - length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data + if smallBlobs { + length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB of data + } else { + length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data + } } else { tpe = restic.TreeBlob length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB @@ -121,8 +125,12 @@ func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 } func listPacks(t *testing.T, repo restic.Lister) restic.IDSet { + return listFiles(t, repo, restic.PackFile) +} + +func listFiles(t *testing.T, repo restic.Lister, tpe backend.FileType) restic.IDSet { list := restic.NewIDSet() - err := repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + err := repo.List(context.TODO(), tpe, func(id restic.ID, size int64) error { list.Insert(id) return nil }) @@ -166,12 +174,6 @@ func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs rest } } -func flush(t *testing.T, repo restic.Repository) { - if err := repo.Flush(context.TODO()); err != nil { - t.Fatalf("repo.SaveIndex() %v", err) - } -} - func rebuildIndex(t *testing.T, repo restic.Repository) { err := repo.SetIndex(index.NewMasterIndex()) rtest.OK(t, err) @@ -219,7 +221,9 @@ func testRepack(t *testing.T, version uint) { rand.Seed(seed) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 100, 0.7) + // add a small amount of blobs twice to create multiple pack files + createRandomBlobs(t, repo, 10, 0.7, false) + createRandomBlobs(t, repo, 10, 0.7, false) packsBefore := listPacks(t, repo) @@ -233,8 +237,6 @@ func testRepack(t *testing.T, version uint) { packsBefore, packsAfter) } - flush(t, repo) - removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2) removePacks := findPacksForBlobs(t, repo, removeBlobs) @@ -302,8 +304,9 @@ func testRepackCopy(t *testing.T, version uint) { rand.Seed(seed) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 100, 0.7) - flush(t, repo) + // add a small amount of blobs twice to create multiple pack files + createRandomBlobs(t, repo, 10, 0.7, false) + createRandomBlobs(t, repo, 10, 0.7, false) _, keepBlobs := selectBlobs(t, repo, 0.2) copyPacks := findPacksForBlobs(t, repo, keepBlobs) @@ -343,7 +346,7 @@ func testRepackWrongBlob(t *testing.T, version uint) { rand.Seed(seed) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 5, 0.7) + createRandomBlobs(t, repo, 5, 0.7, false) createRandomWrongBlob(t, repo) // just keep all blobs, but also rewrite every pack diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go new file mode 100644 index 000000000..a6e732b44 --- /dev/null +++ b/internal/repository/repair_index.go @@ -0,0 +1,132 @@ +package repository + +import ( + "context" + + "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" +) + +type RepairIndexOptions struct { + ReadAllPacks bool +} + +func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, printer progress.Printer) error { + var obsoleteIndexes restic.IDs + packSizeFromList := make(map[restic.ID]int64) + packSizeFromIndex := make(map[restic.ID]int64) + removePacks := restic.NewIDSet() + + if opts.ReadAllPacks { + // get list of old index files but start with empty index + err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error { + obsoleteIndexes = append(obsoleteIndexes, id) + return nil + }) + if err != nil { + return err + } + } else { + printer.P("loading indexes...\n") + mi := index.NewMasterIndex() + err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error { + if err != nil { + printer.E("removing invalid index %v: %v\n", id, err) + obsoleteIndexes = append(obsoleteIndexes, id) + return nil + } + + mi.Insert(idx) + return nil + }) + if err != nil { + return err + } + + err = mi.MergeFinalIndexes() + if err != nil { + return err + } + + err = repo.SetIndex(mi) + if err != nil { + return err + } + packSizeFromIndex, err = pack.Size(ctx, repo.Index(), false) + if err != nil { + return err + } + } + + printer.P("getting pack files to read...\n") + err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { + size, ok := packSizeFromIndex[id] + if !ok || size != packSize { + // Pack was not referenced in index or size does not match + packSizeFromList[id] = packSize + removePacks.Insert(id) + } + if !ok { + printer.E("adding pack file to index %v\n", id) + } else if size != packSize { + printer.E("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size) + } + delete(packSizeFromIndex, id) + return nil + }) + if err != nil { + return err + } + for id := range packSizeFromIndex { + // forget pack files that are referenced in the index but do not exist + // when rebuilding the index + removePacks.Insert(id) + printer.E("removing not found pack file %v\n", id) + } + + if len(packSizeFromList) > 0 { + printer.P("reading pack files\n") + bar := printer.NewCounter("packs") + bar.SetMax(uint64(len(packSizeFromList))) + invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar) + bar.Done() + if err != nil { + return err + } + + for _, id := range invalidFiles { + printer.V("skipped incomplete pack file: %v\n", id) + } + } + + err = rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer) + if err != nil { + return err + } + + // drop outdated in-memory index + repo.ClearIndex() + return nil +} + +func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error { + printer.P("rebuilding index\n") + + bar := printer.NewCounter("packs processed") + return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{ + SaveProgress: bar, + DeleteProgress: func() *progress.Counter { + return printer.NewCounter("old indexes deleted") + }, + DeleteReport: func(id restic.ID, err error) { + if err != nil { + printer.VV("failed to remove index %v: %v\n", id.String(), err) + } else { + printer.VV("removed index %v\n", id.String()) + } + }, + SkipDeletion: skipDeletion, + }) +} diff --git a/internal/repository/repair_index_test.go b/internal/repository/repair_index_test.go new file mode 100644 index 000000000..adaee3832 --- /dev/null +++ b/internal/repository/repair_index_test.go @@ -0,0 +1,79 @@ +package repository_test + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" +) + +func listIndex(t *testing.T, repo restic.Lister) restic.IDSet { + return listFiles(t, repo, restic.IndexFile) +} + +func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, repo *repository.Repository)) { + repo := repository.TestRepository(t).(*repository.Repository) + createRandomBlobs(t, repo, 4, 0.5, true) + createRandomBlobs(t, repo, 5, 0.5, true) + indexes := listIndex(t, repo) + t.Logf("old indexes %v", indexes) + + damage(t, repo) + + repo = repository.TestOpenBackend(t, repo.Backend()).(*repository.Repository) + rtest.OK(t, repository.RepairIndex(context.TODO(), repo, repository.RepairIndexOptions{ + ReadAllPacks: readAllPacks, + }, &progress.NoopPrinter{})) + + newIndexes := listIndex(t, repo) + old := indexes.Intersect(newIndexes) + rtest.Assert(t, len(old) == 0, "expected old indexes to be removed, found %v", old) + + checker.TestCheckRepo(t, repo, true) +} + +func TestRebuildIndex(t *testing.T) { + for _, test := range []struct { + name string + damage func(t *testing.T, repo *repository.Repository) + }{ + { + "valid index", + func(t *testing.T, repo *repository.Repository) {}, + }, + { + "damaged index", + func(t *testing.T, repo *repository.Repository) { + index := listIndex(t, repo).List()[0] + replaceFile(t, repo, backend.Handle{Type: restic.IndexFile, Name: index.String()}, func(b []byte) []byte { + b[0] ^= 0xff + return b + }) + }, + }, + { + "missing index", + func(t *testing.T, repo *repository.Repository) { + index := listIndex(t, repo).List()[0] + rtest.OK(t, repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: index.String()})) + }, + }, + { + "missing pack", + func(t *testing.T, repo *repository.Repository) { + pack := listPacks(t, repo).List()[0] + rtest.OK(t, repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: pack.String()})) + }, + }, + } { + t.Run(test.name, func(t *testing.T) { + testRebuildIndex(t, false, test.damage) + testRebuildIndex(t, true, test.damage) + }) + } +} diff --git a/internal/repository/repair_pack.go b/internal/repository/repair_pack.go index 2e0368899..a4261517a 100644 --- a/internal/repository/repair_pack.go +++ b/internal/repository/repair_pack.go @@ -60,19 +60,7 @@ func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, } // remove salvaged packs from index - printer.P("rebuilding index") - - bar = printer.NewCounter("packs processed") - err = repo.Index().Save(ctx, repo, ids, nil, restic.MasterIndexSaveOpts{ - SaveProgress: bar, - DeleteProgress: func() *progress.Counter { - return printer.NewCounter("old indexes deleted") - }, - DeleteReport: func(id restic.ID, _ error) { - printer.VV("removed index %v", id.String()) - }, - }) - + err = rebuildIndexFiles(ctx, repo, ids, nil, false, printer) if err != nil { return err } diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index b950245aa..078017d21 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -17,7 +17,7 @@ import ( func listBlobs(repo restic.Repository) restic.BlobSet { blobs := restic.NewBlobSet() - repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) { + _ = repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) { blobs.Insert(pb.BlobHandle) }) return blobs @@ -109,7 +109,7 @@ func testRepairBrokenPack(t *testing.T, version uint) { rand.Seed(seed) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 5, 0.7) + createRandomBlobs(t, repo, 5, 0.7, true) packsBefore := listPacks(t, repo) blobsBefore := listBlobs(repo) diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 8e34c7125..cac1551c4 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "io" + "math" "os" "runtime" "sort" @@ -142,9 +143,6 @@ func (r *Repository) DisableAutoIndexUpdate() { // setConfig assigns the given config and updates the repository parameters accordingly func (r *Repository) setConfig(cfg restic.Config) { r.cfg = cfg - if r.cfg.Version >= 2 { - r.idx.MarkCompressed() - } } // Config returns the repository configuration. @@ -637,9 +635,21 @@ func (r *Repository) Index() restic.MasterIndex { // SetIndex instructs the repository to use the given index. func (r *Repository) SetIndex(i restic.MasterIndex) error { r.idx = i.(*index.MasterIndex) + r.configureIndex() return r.prepareCache() } +func (r *Repository) ClearIndex() { + r.idx = index.NewMasterIndex() + r.configureIndex() +} + +func (r *Repository) configureIndex() { + if r.cfg.Version >= 2 { + r.idx.MarkCompressed() + } +} + // LoadIndex loads all index files from the backend in parallel and stores them func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { debug.Log("Loading index") @@ -662,6 +672,9 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { defer p.Done() } + // reset in-memory index before loading it from the repository + r.ClearIndex() + err = index.ForAllIndexes(ctx, indexList, r, func(_ restic.ID, idx *index.Index, _ bool, err error) error { if err != nil { return err @@ -691,15 +704,21 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { defer cancel() invalidIndex := false - r.idx.Each(ctx, func(blob restic.PackedBlob) { + err := r.idx.Each(ctx, func(blob restic.PackedBlob) { if blob.IsCompressed() { invalidIndex = true } }) + if err != nil { + return err + } if invalidIndex { return errors.New("index uses feature not supported by repository version 1") } } + if ctx.Err() != nil { + return ctx.Err() + } // remove index files from the cache which have been removed in the repo return r.prepareCache() @@ -917,6 +936,10 @@ func (r *Repository) Close() error { // occupies in the repo (compressed or not, including encryption overhead). func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) { + if int64(len(buf)) > math.MaxUint32 { + return restic.ID{}, false, 0, fmt.Errorf("blob is larger than 4GB") + } + // compute plaintext hash if not already set if id.IsNull() { // Special case the hash calculation for all zero chunks. This is especially diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 0fa8e4d4a..48a56a1fd 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -221,10 +221,9 @@ func benchmarkLoadUnpacked(b *testing.B, version uint) { var repoFixture = filepath.Join("testdata", "test-repo.tar.gz") func TestRepositoryLoadIndex(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } @@ -243,8 +242,7 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (* } func TestRepositoryLoadUnpackedBroken(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) - defer cleanup() + repo := repository.TestRepository(t) data := rtest.Random(23, 12345) id := restic.Hash(data) @@ -252,9 +250,8 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) { // damage buffer data[0] ^= 0xff - repo := repository.TestOpenLocal(t, repodir) // store broken file - err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, nil)) + err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, repo.Backend().Hasher())) rtest.OK(t, err) // without a retry backend this will just return an error that the file is broken @@ -289,10 +286,7 @@ func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) { be, err := local.Open(context.TODO(), local.Config{Path: repodir, Connections: 2}) rtest.OK(t, err) - repo, err := repository.New(&damageOnceBackend{Backend: be}, repository.Options{}) - rtest.OK(t, err) - err = repo.SearchKey(context.TODO(), rtest.TestPassword, 10, "") - rtest.OK(t, err) + repo := repository.TestOpenBackend(t, &damageOnceBackend{Backend: be}) rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } @@ -376,13 +370,13 @@ func testRepositoryIncrementalIndex(t *testing.T, version uint) { idx, err := loadIndex(context.TODO(), repo, id) rtest.OK(t, err) - idx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { if _, ok := packEntries[pb.PackID]; !ok { packEntries[pb.PackID] = make(map[restic.ID]struct{}) } packEntries[pb.PackID][id] = struct{}{} - }) + })) return nil }) if err != nil { diff --git a/internal/repository/testing.go b/internal/repository/testing.go index dbbdbeb07..9fb643a46 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "sync" "testing" "github.com/restic/restic/internal/backend" @@ -17,21 +18,22 @@ import ( "github.com/restic/chunker" ) -// testKDFParams are the parameters for the KDF to be used during testing. -var testKDFParams = crypto.Params{ - N: 128, - R: 1, - P: 1, -} - type logger interface { Logf(format string, args ...interface{}) } +var paramsOnce sync.Once + // TestUseLowSecurityKDFParameters configures low-security KDF parameters for testing. func TestUseLowSecurityKDFParameters(t logger) { t.Logf("using low-security KDF parameters for test") - Params = &testKDFParams + paramsOnce.Do(func() { + params = &crypto.Params{ + N: 128, + R: 1, + P: 1, + } + }) } // TestBackend returns a fully configured in-memory backend. @@ -39,7 +41,7 @@ func TestBackend(_ testing.TB) backend.Backend { return mem.New() } -const TestChunkerPol = chunker.Pol(0x3DA3358B4DC173) +const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) // TestRepositoryWithBackend returns a repository initialized with a test // password. If be is nil, an in-memory backend is used. A constant polynomial @@ -58,8 +60,11 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o t.Fatalf("TestRepository(): new repo failed: %v", err) } - cfg := restic.TestCreateConfig(t, TestChunkerPol, version) - err = repo.init(context.TODO(), test.TestPassword, cfg) + if version == 0 { + version = restic.StableRepoVersion + } + pol := testChunkerPol + err = repo.Init(context.TODO(), version, test.TestPassword, &pol) if err != nil { t.Fatalf("TestRepository(): initialize repo failed: %v", err) } @@ -98,8 +103,15 @@ func TestRepositoryWithVersion(t testing.TB, version uint) restic.Repository { return TestRepositoryWithBackend(t, nil, version, opts) } +func TestFromFixture(t testing.TB, repoFixture string) (restic.Repository, func()) { + repodir, cleanup := test.Env(t, repoFixture) + repo := TestOpenLocal(t, repodir) + + return repo, cleanup +} + // TestOpenLocal opens a local repository. -func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { +func TestOpenLocal(t testing.TB, dir string) restic.Repository { var be backend.Backend be, err := local.Open(context.TODO(), local.Config{Path: dir, Connections: 2}) if err != nil { @@ -108,6 +120,10 @@ func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { be = retry.New(be, 3, nil, nil) + return TestOpenBackend(t, be) +} + +func TestOpenBackend(t testing.TB, be backend.Backend) restic.Repository { repo, err := New(be, Options{}) if err != nil { t.Fatal(err) diff --git a/internal/restic/config.go b/internal/restic/config.go index 67ee190bc..3fb61cc13 100644 --- a/internal/restic/config.go +++ b/internal/restic/config.go @@ -2,6 +2,7 @@ package restic import ( "context" + "sync" "testing" "github.com/restic/restic/internal/errors" @@ -50,29 +51,16 @@ func CreateConfig(version uint) (Config, error) { return cfg, nil } -// TestCreateConfig creates a config for use within tests. -func TestCreateConfig(t testing.TB, pol chunker.Pol, version uint) (cfg Config) { - cfg.ChunkerPolynomial = pol - - cfg.ID = NewRandomID().String() - if version == 0 { - version = StableRepoVersion - } - if version < MinRepoVersion || version > MaxRepoVersion { - t.Fatalf("version %d is out of range", version) - } - cfg.Version = version - - return cfg -} - var checkPolynomial = true +var checkPolynomialOnce sync.Once // TestDisableCheckPolynomial disables the check that the polynomial used for // the chunker. func TestDisableCheckPolynomial(t testing.TB) { t.Logf("disabling check of the chunker polynomial") - checkPolynomial = false + checkPolynomialOnce.Do(func() { + checkPolynomial = false + }) } // LoadConfig returns loads, checks and returns the config for a repository. diff --git a/internal/restic/node.go b/internal/restic/node.go index a0e658b9b..1e7e5d68e 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -84,7 +84,7 @@ type Node struct { User string `json:"user,omitempty"` Group string `json:"group,omitempty"` Inode uint64 `json:"inode,omitempty"` - DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev + DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev, only stored for hardlinks Size uint64 `json:"size,omitempty"` Links uint64 `json:"links,omitempty"` LinkTarget string `json:"linktarget,omitempty"` @@ -136,7 +136,7 @@ func (node Node) String() string { // NodeFromFileInfo returns a new node from the given path and FileInfo. It // returns the first error that is encountered, together with a node. -func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { +func NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*Node, error) { mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky node := &Node{ Path: path, @@ -150,7 +150,7 @@ func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { node.Size = uint64(fi.Size()) } - err := node.fillExtra(path, fi) + err := node.fillExtra(path, fi, ignoreXattrListError) return node, err } @@ -677,7 +677,7 @@ func lookupGroup(gid uint32) string { return group } -func (node *Node) fillExtra(path string, fi os.FileInfo) error { +func (node *Node) fillExtra(path string, fi os.FileInfo, ignoreXattrListError bool) error { stat, ok := toStatT(fi.Sys()) if !ok { // fill minimal info with current values for uid, gid @@ -726,10 +726,13 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error { return err } -func (node *Node) fillExtendedAttributes(path string) error { +func (node *Node) fillExtendedAttributes(path string, ignoreListError bool) error { xattrs, err := Listxattr(path) debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err) if err != nil { + if ignoreListError && IsListxattrPermissionError(err) { + return nil + } return err } diff --git a/internal/restic/node_aix.go b/internal/restic/node_aix.go index def46bd60..8ee9022c9 100644 --- a/internal/restic/node_aix.go +++ b/internal/restic/node_aix.go @@ -33,6 +33,10 @@ func Listxattr(path string) ([]string, error) { return nil, nil } +func IsListxattrPermissionError(_ error) bool { + return false +} + // Setxattr is a no-op on AIX. func Setxattr(path, name string, data []byte) error { return nil diff --git a/internal/restic/node_netbsd.go b/internal/restic/node_netbsd.go index 1a47299be..cf1fa36bd 100644 --- a/internal/restic/node_netbsd.go +++ b/internal/restic/node_netbsd.go @@ -23,6 +23,10 @@ func Listxattr(path string) ([]string, error) { return nil, nil } +func IsListxattrPermissionError(_ error) bool { + return false +} + // Setxattr is a no-op on netbsd. func Setxattr(path, name string, data []byte) error { return nil diff --git a/internal/restic/node_openbsd.go b/internal/restic/node_openbsd.go index e60eb9dc8..4f1c0dacb 100644 --- a/internal/restic/node_openbsd.go +++ b/internal/restic/node_openbsd.go @@ -23,6 +23,10 @@ func Listxattr(path string) ([]string, error) { return nil, nil } +func IsListxattrPermissionError(_ error) bool { + return false +} + // Setxattr is a no-op on openbsd. func Setxattr(path, name string, data []byte) error { return nil diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index d9fa02ac8..ea271faab 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" ) @@ -31,7 +32,7 @@ func BenchmarkNodeFillUser(t *testing.B) { t.ResetTimer() for i := 0; i < t.N; i++ { - _, err := NodeFromFileInfo(path, fi) + _, err := NodeFromFileInfo(path, fi, false) rtest.OK(t, err) } @@ -55,7 +56,7 @@ func BenchmarkNodeFromFileInfo(t *testing.B) { t.ResetTimer() for i := 0; i < t.N; i++ { - _, err := NodeFromFileInfo(path, fi) + _, err := NodeFromFileInfo(path, fi, false) if err != nil { t.Fatal(err) } @@ -227,8 +228,11 @@ func TestNodeRestoreAt(t *testing.T) { fi, err := os.Lstat(nodePath) rtest.OK(t, err) - n2, err := NodeFromFileInfo(nodePath, fi) + n2, err := NodeFromFileInfo(nodePath, fi, false) rtest.OK(t, err) + n3, err := NodeFromFileInfo(nodePath, fi, true) + rtest.OK(t, err) + rtest.Assert(t, n2.Equals(*n3), "unexpected node info mismatch %v", cmp.Diff(n2, n3)) rtest.Assert(t, test.Name == n2.Name, "%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name) diff --git a/internal/restic/node_unix_test.go b/internal/restic/node_unix_test.go index 374326bf7..9ea7b1725 100644 --- a/internal/restic/node_unix_test.go +++ b/internal/restic/node_unix_test.go @@ -128,7 +128,7 @@ func TestNodeFromFileInfo(t *testing.T) { return } - node, err := NodeFromFileInfo(test.filename, fi) + node, err := NodeFromFileInfo(test.filename, fi, false) if err != nil { t.Fatal(err) } diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index f4797c0d7..043a05091 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -81,6 +81,10 @@ func Listxattr(path string) ([]string, error) { return nil, nil } +func IsListxattrPermissionError(_ error) bool { + return false +} + // Setxattr associates name and data together as an attribute of path. func Setxattr(path, name string, data []byte) error { return nil diff --git a/internal/restic/node_windows_test.go b/internal/restic/node_windows_test.go index 5fd1fe376..57fc51e07 100644 --- a/internal/restic/node_windows_test.go +++ b/internal/restic/node_windows_test.go @@ -222,7 +222,7 @@ func restoreAndGetNode(t *testing.T, tempDir string, testNode Node, warningExpec fi, err := os.Lstat(testPath) test.OK(t, errors.Wrapf(err, "Could not Lstat for path: %s", testPath)) - nodeFromFileInfo, err := NodeFromFileInfo(testPath, fi) + nodeFromFileInfo, err := NodeFromFileInfo(testPath, fi, false) test.OK(t, errors.Wrapf(err, "Could not get NodeFromFileInfo for path: %s", testPath)) return testPath, nodeFromFileInfo diff --git a/internal/restic/node_xattr.go b/internal/restic/node_xattr.go index 0b2d5d552..8b080e74f 100644 --- a/internal/restic/node_xattr.go +++ b/internal/restic/node_xattr.go @@ -25,6 +25,14 @@ func Listxattr(path string) ([]string, error) { return l, handleXattrErr(err) } +func IsListxattrPermissionError(err error) bool { + var xerr *xattr.Error + if errors.As(err, &xerr) { + return xerr.Op == "xattr.list" && errors.Is(xerr.Err, os.ErrPermission) + } + return false +} + // Setxattr associates name and data together as an attribute of path. func Setxattr(path, name string, data []byte) error { return handleXattrErr(xattr.LSet(path, name, data)) diff --git a/internal/restic/node_xattr_test.go b/internal/restic/node_xattr_test.go new file mode 100644 index 000000000..5ce77bd28 --- /dev/null +++ b/internal/restic/node_xattr_test.go @@ -0,0 +1,28 @@ +//go:build darwin || freebsd || linux || solaris +// +build darwin freebsd linux solaris + +package restic + +import ( + "os" + "testing" + + "github.com/pkg/xattr" + rtest "github.com/restic/restic/internal/test" +) + +func TestIsListxattrPermissionError(t *testing.T) { + xerr := &xattr.Error{ + Op: "xattr.list", + Name: "test", + Err: os.ErrPermission, + } + err := handleXattrErr(xerr) + rtest.Assert(t, err != nil, "missing error") + rtest.Assert(t, IsListxattrPermissionError(err), "expected IsListxattrPermissionError to return true for %v", err) + + xerr.Err = os.ErrNotExist + err = handleXattrErr(xerr) + rtest.Assert(t, err != nil, "missing error") + rtest.Assert(t, !IsListxattrPermissionError(err), "expected IsListxattrPermissionError to return false for %v", err) +} diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 66cc22ea9..7a3389e00 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -26,6 +26,7 @@ type Repository interface { Index() MasterIndex LoadIndex(context.Context, *progress.Counter) error + ClearIndex() SetIndex(MasterIndex) error LookupBlobSize(ID, BlobType) (uint, bool) @@ -102,8 +103,8 @@ type MasterIndex interface { Lookup(BlobHandle) []PackedBlob // Each runs fn on all blobs known to the index. When the context is cancelled, - // the index iteration return immediately. This blocks any modification of the index. - Each(ctx context.Context, fn func(PackedBlob)) + // the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. + Each(ctx context.Context, fn func(PackedBlob)) error ListPacks(ctx context.Context, packs IDSet) <-chan PackBlobs Save(ctx context.Context, repo Repository, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error diff --git a/internal/restic/snapshot.go b/internal/restic/snapshot.go index 8cf651d96..39ed80627 100644 --- a/internal/restic/snapshot.go +++ b/internal/restic/snapshot.go @@ -25,11 +25,31 @@ type Snapshot struct { Tags []string `json:"tags,omitempty"` Original *ID `json:"original,omitempty"` - ProgramVersion string `json:"program_version,omitempty"` + ProgramVersion string `json:"program_version,omitempty"` + Summary *SnapshotSummary `json:"summary,omitempty"` id *ID // plaintext ID, used during restore } +type SnapshotSummary struct { + BackupStart time.Time `json:"backup_start"` + BackupEnd time.Time `json:"backup_end"` + + // statistics from the backup json output + FilesNew uint `json:"files_new"` + FilesChanged uint `json:"files_changed"` + FilesUnmodified uint `json:"files_unmodified"` + DirsNew uint `json:"dirs_new"` + DirsChanged uint `json:"dirs_changed"` + DirsUnmodified uint `json:"dirs_unmodified"` + DataBlobs int `json:"data_blobs"` + TreeBlobs int `json:"tree_blobs"` + DataAdded uint64 `json:"data_added"` + DataAddedPacked uint64 `json:"data_added_packed"` + TotalFilesProcessed uint `json:"total_files_processed"` + TotalBytesProcessed uint64 `json:"total_bytes_processed"` +} + // NewSnapshot returns an initialized snapshot struct for the current user and // time. func NewSnapshot(paths []string, tags []string, hostname string, time time.Time) (*Snapshot, error) { diff --git a/internal/restic/testing_test.go b/internal/restic/testing_test.go index ae8f8dd34..0a0c43892 100644 --- a/internal/restic/testing_test.go +++ b/internal/restic/testing_test.go @@ -45,7 +45,7 @@ func TestCreateSnapshot(t *testing.T) { t.Fatalf("snapshot has zero tree ID") } - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) } func BenchmarkTestCreateSnapshot(t *testing.B) { diff --git a/internal/restic/tree_test.go b/internal/restic/tree_test.go index da674eb1c..67ecec897 100644 --- a/internal/restic/tree_test.go +++ b/internal/restic/tree_test.go @@ -86,7 +86,7 @@ func TestNodeComparison(t *testing.T) { fi, err := os.Lstat("tree_test.go") rtest.OK(t, err) - node, err := restic.NodeFromFileInfo("tree_test.go", fi) + node, err := restic.NodeFromFileInfo("tree_test.go", fi, false) rtest.OK(t, err) n2 := *node @@ -127,7 +127,7 @@ func TestTreeEqualSerialization(t *testing.T) { for _, fn := range files[:i] { fi, err := os.Lstat(fn) rtest.OK(t, err) - node, err := restic.NodeFromFileInfo(fn, fi) + node, err := restic.NodeFromFileInfo(fn, fi, false) rtest.OK(t, err) rtest.OK(t, tree.Insert(node)) diff --git a/internal/restorer/doc.go b/internal/restorer/doc.go index 8d68d7161..e230f23f0 100644 --- a/internal/restorer/doc.go +++ b/internal/restorer/doc.go @@ -18,7 +18,7 @@ // // Implementation does not guarantee order in which blobs are written to the // target files and, for example, the last blob of a file can be written to the -// file before any of the preceeding file blobs. It is therefore possible to +// file before any of the preceding file blobs. It is therefore possible to // have gaps in the data written to the target files if restore fails or // interrupted by the user. package restorer diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index 5742d7663..757a317b2 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -858,7 +858,7 @@ func TestRestorerSparseFiles(t *testing.T) { rtest.OK(t, err) arch := archiver.New(repo, target, archiver.Options{}) - sn, _, err := arch.Snapshot(context.Background(), []string{"/zeros"}, + sn, _, _, err := arch.Snapshot(context.Background(), []string{"/zeros"}, archiver.SnapshotOptions{}) rtest.OK(t, err) diff --git a/internal/test/helpers.go b/internal/test/helpers.go index 242da6079..3387d36df 100644 --- a/internal/test/helpers.go +++ b/internal/test/helpers.go @@ -5,6 +5,7 @@ import ( "compress/gzip" "fmt" "io" + "math/rand" "os" "os/exec" "path/filepath" @@ -12,8 +13,6 @@ import ( "testing" "github.com/restic/restic/internal/errors" - - mrand "math/rand" ) // Assert fails the test if the condition is false. @@ -71,7 +70,7 @@ func Equals(tb testing.TB, exp, act interface{}, msgs ...string) { func Random(seed, count int) []byte { p := make([]byte, count) - rnd := mrand.New(mrand.NewSource(int64(seed))) + rnd := rand.New(rand.NewSource(int64(seed))) for i := 0; i < len(p); i += 8 { val := rnd.Int63() diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index 10f0e91fa..a14c7ccec 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -163,7 +163,7 @@ func (b *JSONProgress) ReportTotal(start time.Time, s archiver.ScanStats) { } // Finish prints the finishing messages. -func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *Summary, dryRun bool) { +func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { b.print(summaryOutput{ MessageType: "summary", FilesNew: summary.Files.New, @@ -175,6 +175,7 @@ func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *Su DataBlobs: summary.ItemStats.DataBlobs, TreeBlobs: summary.ItemStats.TreeBlobs, DataAdded: summary.ItemStats.DataSize + summary.ItemStats.TreeSize, + DataAddedPacked: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo, TotalFilesProcessed: summary.Files.New + summary.Files.Changed + summary.Files.Unchanged, TotalBytesProcessed: summary.ProcessedBytes, TotalDuration: time.Since(start).Seconds(), @@ -230,6 +231,7 @@ type summaryOutput struct { DataBlobs int `json:"data_blobs"` TreeBlobs int `json:"tree_blobs"` DataAdded uint64 `json:"data_added"` + DataAddedPacked uint64 `json:"data_added_packed"` TotalFilesProcessed uint `json:"total_files_processed"` TotalBytesProcessed uint64 `json:"total_bytes_processed"` TotalDuration float64 `json:"total_duration"` // in seconds diff --git a/internal/ui/backup/progress.go b/internal/ui/backup/progress.go index da0d401a3..1d494bf14 100644 --- a/internal/ui/backup/progress.go +++ b/internal/ui/backup/progress.go @@ -17,7 +17,7 @@ type ProgressPrinter interface { ScannerError(item string, err error) error CompleteItem(messageType string, item string, s archiver.ItemStats, d time.Duration) ReportTotal(start time.Time, s archiver.ScanStats) - Finish(snapshotID restic.ID, start time.Time, summary *Summary, dryRun bool) + Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) Reset() P(msg string, args ...interface{}) @@ -28,16 +28,6 @@ type Counter struct { Files, Dirs, Bytes uint64 } -type Summary struct { - Files, Dirs struct { - New uint - Changed uint - Unchanged uint - } - ProcessedBytes uint64 - archiver.ItemStats -} - // Progress reports progress for the `backup` command. type Progress struct { progress.Updater @@ -52,7 +42,6 @@ type Progress struct { processed, total Counter errors uint - summary Summary printer ProgressPrinter } @@ -126,16 +115,6 @@ func (p *Progress) CompleteBlob(bytes uint64) { // CompleteItem is the status callback function for the archiver when a // file/dir has been saved successfully. func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) { - p.mu.Lock() - p.summary.ItemStats.Add(s) - - // for the last item "/", current is nil - if current != nil { - p.summary.ProcessedBytes += current.Size - } - - p.mu.Unlock() - if current == nil { // error occurred, tell the status display to remove the line p.mu.Lock() @@ -153,21 +132,10 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a switch { case previous == nil: p.printer.CompleteItem("dir new", item, s, d) - p.mu.Lock() - p.summary.Dirs.New++ - p.mu.Unlock() - case previous.Equals(*current): p.printer.CompleteItem("dir unchanged", item, s, d) - p.mu.Lock() - p.summary.Dirs.Unchanged++ - p.mu.Unlock() - default: p.printer.CompleteItem("dir modified", item, s, d) - p.mu.Lock() - p.summary.Dirs.Changed++ - p.mu.Unlock() } case "file": @@ -179,21 +147,10 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a switch { case previous == nil: p.printer.CompleteItem("file new", item, s, d) - p.mu.Lock() - p.summary.Files.New++ - p.mu.Unlock() - case previous.Equals(*current): p.printer.CompleteItem("file unchanged", item, s, d) - p.mu.Lock() - p.summary.Files.Unchanged++ - p.mu.Unlock() - default: p.printer.CompleteItem("file modified", item, s, d) - p.mu.Lock() - p.summary.Files.Changed++ - p.mu.Unlock() } } } @@ -213,8 +170,8 @@ func (p *Progress) ReportTotal(item string, s archiver.ScanStats) { } // Finish prints the finishing messages. -func (p *Progress) Finish(snapshotID restic.ID, dryrun bool) { +func (p *Progress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryrun bool) { // wait for the status update goroutine to shut down p.Updater.Done() - p.printer.Finish(snapshotID, p.start, &p.summary, dryrun) + p.printer.Finish(snapshotID, p.start, summary, dryrun) } diff --git a/internal/ui/backup/progress_test.go b/internal/ui/backup/progress_test.go index 79a56c91e..6b242a0f3 100644 --- a/internal/ui/backup/progress_test.go +++ b/internal/ui/backup/progress_test.go @@ -33,11 +33,10 @@ func (p *mockPrinter) CompleteItem(messageType string, _ string, _ archiver.Item } func (p *mockPrinter) ReportTotal(_ time.Time, _ archiver.ScanStats) {} -func (p *mockPrinter) Finish(id restic.ID, _ time.Time, summary *Summary, _ bool) { +func (p *mockPrinter) Finish(id restic.ID, _ time.Time, _ *archiver.Summary, _ bool) { p.Lock() defer p.Unlock() - _ = *summary // Should not be nil. p.id = id } @@ -64,7 +63,7 @@ func TestProgress(t *testing.T) { time.Sleep(10 * time.Millisecond) id := restic.NewRandomID() - prog.Finish(id, false) + prog.Finish(id, nil, false) if !prnt.dirUnchanged { t.Error(`"dir unchanged" event not seen`) diff --git a/internal/ui/backup/text.go b/internal/ui/backup/text.go index 215982cd4..00d025e51 100644 --- a/internal/ui/backup/text.go +++ b/internal/ui/backup/text.go @@ -126,7 +126,7 @@ func (b *TextProgress) Reset() { } // Finish prints the finishing messages. -func (b *TextProgress) Finish(_ restic.ID, start time.Time, summary *Summary, dryRun bool) { +func (b *TextProgress) Finish(_ restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { b.P("\n") b.P("Files: %5d new, %5d changed, %5d unmodified\n", summary.Files.New, summary.Files.Changed, summary.Files.Unchanged) b.P("Dirs: %5d new, %5d changed, %5d unmodified\n", summary.Dirs.New, summary.Dirs.Changed, summary.Dirs.Unchanged) diff --git a/internal/ui/progress/printer.go b/internal/ui/progress/printer.go index a671621e9..a2bc4c4b5 100644 --- a/internal/ui/progress/printer.go +++ b/internal/ui/progress/printer.go @@ -1,5 +1,7 @@ package progress +import "testing" + // A Printer can can return a new counter or print messages // at different log levels. // It must be safe to call its methods from concurrent goroutines. @@ -28,3 +30,36 @@ func (*NoopPrinter) P(_ string, _ ...interface{}) {} func (*NoopPrinter) V(_ string, _ ...interface{}) {} func (*NoopPrinter) VV(_ string, _ ...interface{}) {} + +// TestPrinter prints messages during testing +type TestPrinter struct { + t testing.TB +} + +func NewTestPrinter(t testing.TB) *TestPrinter { + return &TestPrinter{ + t: t, + } +} + +var _ Printer = (*TestPrinter)(nil) + +func (p *TestPrinter) NewCounter(_ string) *Counter { + return nil +} + +func (p *TestPrinter) E(msg string, args ...interface{}) { + p.t.Logf("error: "+msg, args...) +} + +func (p *TestPrinter) P(msg string, args ...interface{}) { + p.t.Logf("print: "+msg, args...) +} + +func (p *TestPrinter) V(msg string, args ...interface{}) { + p.t.Logf("verbose: "+msg, args...) +} + +func (p *TestPrinter) VV(msg string, args ...interface{}) { + p.t.Logf("verbose2: "+msg, args...) +} diff --git a/internal/walker/walker_test.go b/internal/walker/walker_test.go index 0f0009107..75f80e57f 100644 --- a/internal/walker/walker_test.go +++ b/internal/walker/walker_test.go @@ -13,7 +13,7 @@ import ( // TestTree is used to construct a list of trees for testing the walker. type TestTree map[string]interface{} -// TestNode is used to test the walker. +// TestFile is used to test the walker. type TestFile struct { Size uint64 }