forked from TrueCloudLab/restic
Merge branch 'master' into windows-securitydesc
This commit is contained in:
commit
3f76b902e5
162 changed files with 4553 additions and 2518 deletions
8
.github/workflows/docker.yml
vendored
8
.github/workflows/docker.yml
vendored
|
@ -25,7 +25,7 @@ jobs:
|
|||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@3d58c274f17dffee475a5520cbe67f0a882c4dbb
|
||||
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
|
@ -33,7 +33,7 @@ jobs:
|
|||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
|
||||
uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
|
@ -45,7 +45,7 @@ jobs:
|
|||
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226
|
||||
uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20
|
||||
|
||||
- name: Ensure consistent binaries
|
||||
run: |
|
||||
|
@ -55,7 +55,7 @@ jobs:
|
|||
if: github.ref != 'refs/heads/master'
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
||||
uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
|
|
16
.github/workflows/tests.yml
vendored
16
.github/workflows/tests.yml
vendored
|
@ -74,7 +74,7 @@ jobs:
|
|||
- name: Get programs (Linux/macOS)
|
||||
run: |
|
||||
echo "build Go tools"
|
||||
go install github.com/restic/rest-server/cmd/rest-server@latest
|
||||
go install github.com/restic/rest-server/cmd/rest-server@master
|
||||
|
||||
echo "install minio server"
|
||||
mkdir $HOME/bin
|
||||
|
@ -106,7 +106,7 @@ jobs:
|
|||
$ProgressPreference = 'SilentlyContinue'
|
||||
|
||||
echo "build Go tools"
|
||||
go install github.com/restic/rest-server/...
|
||||
go install github.com/restic/rest-server/cmd/rest-server@master
|
||||
|
||||
echo "install minio server"
|
||||
mkdir $Env:USERPROFILE/bin
|
||||
|
@ -247,6 +247,10 @@ jobs:
|
|||
lint:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
# allow annotating code in the PR
|
||||
checks: write
|
||||
steps:
|
||||
- name: Set up Go ${{ env.latest_go }}
|
||||
uses: actions/setup-go@v5
|
||||
|
@ -257,10 +261,10 @@ jobs:
|
|||
uses: actions/checkout@v4
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.56.1
|
||||
version: v1.57.1
|
||||
args: --verbose --timeout 5m
|
||||
|
||||
# only run golangci-lint for pull requests, otherwise ALL hints get
|
||||
|
@ -298,7 +302,7 @@ jobs:
|
|||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
|
@ -321,7 +325,7 @@ jobs:
|
|||
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
push: false
|
||||
context: .
|
||||
|
|
|
@ -38,6 +38,8 @@ linters:
|
|||
# ensure that http response bodies are closed
|
||||
- bodyclose
|
||||
|
||||
- importas
|
||||
|
||||
issues:
|
||||
# don't use the default exclude rules, this hides (among others) ignored
|
||||
# errors from Close() calls
|
||||
|
@ -58,4 +60,10 @@ issues:
|
|||
exclude-rules:
|
||||
# revive: ignore unused parameters in tests
|
||||
- path: (_test\.go|testing\.go|backend/.*/tests\.go)
|
||||
text: "unused-parameter:"
|
||||
text: "unused-parameter:"
|
||||
|
||||
linters-settings:
|
||||
importas:
|
||||
alias:
|
||||
- pkg: github.com/restic/restic/internal/test
|
||||
alias: rtest
|
||||
|
|
|
@ -3488,7 +3488,7 @@ restic users. The changes are ordered by importance.
|
|||
|
||||
NOTE: This new implementation does not guarantee order in which blobs are
|
||||
written to the target files and, for example, the last blob of a file can be
|
||||
written to the file before any of the preceeding file blobs. It is therefore
|
||||
written to the file before any of the preceding file blobs. It is therefore
|
||||
possible to have gaps in the data written to the target files if restore fails
|
||||
or interrupted by the user.
|
||||
|
||||
|
|
18
README.md
18
README.md
|
@ -10,8 +10,7 @@ For detailed usage and installation instructions check out the [documentation](h
|
|||
|
||||
You can ask questions in our [Discourse forum](https://forum.restic.net).
|
||||
|
||||
Quick start
|
||||
-----------
|
||||
## Quick start
|
||||
|
||||
Once you've [installed](https://restic.readthedocs.io/en/latest/020_installation.html) restic, start
|
||||
off with creating a repository for your backups:
|
||||
|
@ -59,7 +58,7 @@ Therefore, restic supports the following backends for storing backups natively:
|
|||
Restic is a program that does backups right and was designed with the
|
||||
following principles in mind:
|
||||
|
||||
- **Easy:** Doing backups should be a frictionless process, otherwise
|
||||
- **Easy**: Doing backups should be a frictionless process, otherwise
|
||||
you might be tempted to skip it. Restic should be easy to configure
|
||||
and use, so that, in the event of a data loss, you can just restore
|
||||
it. Likewise, restoring data should not be complicated.
|
||||
|
@ -92,20 +91,17 @@ reproduce a byte identical version from the source code for that
|
|||
release. Instructions on how to do that are contained in the
|
||||
[builder repository](https://github.com/restic/builder).
|
||||
|
||||
News
|
||||
----
|
||||
## News
|
||||
|
||||
You can follow the restic project on Mastodon [@resticbackup](https://fosstodon.org/@restic) or by subscribing to
|
||||
You can follow the restic project on Mastodon [@resticbackup](https://fosstodon.org/@restic) or subscribe to
|
||||
the [project blog](https://restic.net/blog/).
|
||||
|
||||
License
|
||||
-------
|
||||
## License
|
||||
|
||||
Restic is licensed under [BSD 2-Clause License](https://opensource.org/licenses/BSD-2-Clause). You can find the
|
||||
complete text in [``LICENSE``](LICENSE).
|
||||
complete text in [`LICENSE`](LICENSE).
|
||||
|
||||
Sponsorship
|
||||
-----------
|
||||
## Sponsorship
|
||||
|
||||
Backend integration tests for Google Cloud Storage and Microsoft Azure Blob
|
||||
Storage are sponsored by [AppsCode](https://appscode.com)!
|
||||
|
|
|
@ -10,7 +10,7 @@ https://github.com/restic/restic/issues/2244
|
|||
|
||||
NOTE: This new implementation does not guarantee order in which blobs
|
||||
are written to the target files and, for example, the last blob of a
|
||||
file can be written to the file before any of the preceeding file blobs.
|
||||
file can be written to the file before any of the preceding file blobs.
|
||||
It is therefore possible to have gaps in the data written to the target
|
||||
files if restore fails or interrupted by the user.
|
||||
|
||||
|
|
11
changelog/unreleased/issue-3600
Normal file
11
changelog/unreleased/issue-3600
Normal file
|
@ -0,0 +1,11 @@
|
|||
Bugfix: `backup` works if xattrs above the backup target cannot be read
|
||||
|
||||
When backup targets are specified using absolute paths, then `backup` also
|
||||
includes information about the parent folders of the backup targets in the
|
||||
snapshot. If the extended attributes for some of these folders could not be
|
||||
read due to missing permissions, this caused the backup to fail. This has been
|
||||
fixed.
|
||||
|
||||
https://github.com/restic/restic/issues/3600
|
||||
https://github.com/restic/restic/pull/4668
|
||||
https://forum.restic.net/t/parent-directories-above-the-snapshot-source-path-fatal-error-permission-denied/7216
|
14
changelog/unreleased/issue-4287
Normal file
14
changelog/unreleased/issue-4287
Normal file
|
@ -0,0 +1,14 @@
|
|||
Enhancement: support connection to rest-server using unix socket
|
||||
|
||||
Restic now supports connecting to rest-server using a unix socket for
|
||||
rest-server version 0.13.0 or later.
|
||||
|
||||
This allows running restic as follows:
|
||||
|
||||
```
|
||||
rest-server --listen unix:/tmp/rest.socket --data /path/to/data &
|
||||
restic -r rest:http+unix:///tmp/rest.socket:/my_backup_repo/ [...]
|
||||
```
|
||||
|
||||
https://github.com/restic/restic/issues/4287
|
||||
https://github.com/restic/restic/pull/4655
|
9
changelog/unreleased/issue-4601
Normal file
9
changelog/unreleased/issue-4601
Normal file
|
@ -0,0 +1,9 @@
|
|||
Enhancement: Add support for feature flags
|
||||
|
||||
Restic now supports feature flags that can be used to enable and disable
|
||||
experimental features. The flags can be set using the environment variable
|
||||
`RESTIC_FEATURES`. To get a list of currently supported feature flags,
|
||||
run the `features` command.
|
||||
|
||||
https://github.com/restic/restic/issues/4601
|
||||
https://github.com/restic/restic/pull/4666
|
23
changelog/unreleased/issue-4602
Normal file
23
changelog/unreleased/issue-4602
Normal file
|
@ -0,0 +1,23 @@
|
|||
Change: Deprecate legacy index format and s3legacy layout
|
||||
|
||||
Support for the legacy index format used by restic before version 0.2.0 has
|
||||
been deprecated and will be removed in the next minor restic version. You can
|
||||
use `restic repair index` to update the index to the current format.
|
||||
|
||||
It is possible to temporarily reenable support for the legacy index format by
|
||||
setting the environment variable
|
||||
`RESTIC_FEATURES=deprecate-legacy-index=false`. Note that this feature flag
|
||||
will be removed in the next minor restic version.
|
||||
|
||||
Support for the s3legacy layout used for the S3 backend before restic 0.7.0
|
||||
has been deprecated and will be removed in the next minor restic version. You
|
||||
can migrate your S3 repository using `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout`.
|
||||
|
||||
It is possible to temporarily reenable support for the legacy s3layout by
|
||||
setting the environment variable
|
||||
`RESTIC_FEATURES=deprecate-s3-legacy-layout=false`. Note that this feature flag
|
||||
will be removed in the next minor restic version.
|
||||
|
||||
https://github.com/restic/restic/issues/4602
|
||||
https://github.com/restic/restic/pull/4724
|
||||
https://github.com/restic/restic/pull/4743
|
10
changelog/unreleased/issue-4733
Normal file
10
changelog/unreleased/issue-4733
Normal file
|
@ -0,0 +1,10 @@
|
|||
Enhancement: Allow specifying `--host` via environment variable
|
||||
|
||||
Restic commands that operate on snapshots, such as `restic backup` and
|
||||
`restic snapshots`, support the `--host` flag to specify the hostname for
|
||||
grouoping snapshots. They now permit selecting the hostname via the
|
||||
environment variable `RESTIC_HOST`. `--host` still takes precedence over the
|
||||
environment variable.
|
||||
|
||||
https://github.com/restic/restic/issues/4733
|
||||
https://github.com/restic/restic/pull/4734
|
9
changelog/unreleased/issue-4744
Normal file
9
changelog/unreleased/issue-4744
Normal file
|
@ -0,0 +1,9 @@
|
|||
Change: Include full key ID in JSON output of `key list`
|
||||
|
||||
We have changed the JSON output of the `key list` command to include the full
|
||||
key ID instead of just a shortened version, as the latter can be ambiguous
|
||||
in some rare cases. To derive the short ID, please truncate the full ID down to
|
||||
eight characters.
|
||||
|
||||
https://github.com/restic/restic/issues/4744
|
||||
https://github.com/restic/restic/pull/4745
|
8
changelog/unreleased/issue-4760
Normal file
8
changelog/unreleased/issue-4760
Normal file
|
@ -0,0 +1,8 @@
|
|||
Bugfix: Fix possible error on concurrent cache cleanup
|
||||
|
||||
If multiple restic processes concurrently cleaned up no longer existing files
|
||||
from the cache, this could cause some of the processes to fail with an `no such
|
||||
file or directory` error. This has been fixed.
|
||||
|
||||
https://github.com/restic/restic/issues/4760
|
||||
https://github.com/restic/restic/pull/4761
|
12
changelog/unreleased/issue-693
Normal file
12
changelog/unreleased/issue-693
Normal file
|
@ -0,0 +1,12 @@
|
|||
Enhancement: Support printing snapshot size in `snapshots` command
|
||||
|
||||
The `snapshots` command now supports printing the snapshot size for snapshots
|
||||
created using this or a future restic version. For this, the `backup` command
|
||||
now stores the backup summary statistics in the snapshot.
|
||||
|
||||
The text output of the `snapshots` command only shows the snapshot size. The
|
||||
other statistics are only included in the JSON output. To inspect these
|
||||
statistics use `restic snapshots --json` or `restic cat snapshot <snapshotID>`.
|
||||
|
||||
https://github.com/restic/restic/issues/693
|
||||
https://github.com/restic/restic/pull/4705
|
22
changelog/unreleased/pull-3067
Normal file
22
changelog/unreleased/pull-3067
Normal file
|
@ -0,0 +1,22 @@
|
|||
Enhancement: Add options to configure Windows Shadow Copy Service
|
||||
|
||||
Restic always used 120 seconds timeout and unconditionally created VSS snapshots
|
||||
for all volume mount points on disk. Now this behavior can be fine-tuned by
|
||||
new options, like exclude specific volumes and mount points or completely
|
||||
disable auto snapshotting of volume mount points.
|
||||
|
||||
For example:
|
||||
|
||||
restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.exclude-all-mount-points=true
|
||||
|
||||
changes timeout to five minutes and disable snapshotting of mount points on all volumes, and
|
||||
|
||||
restic backup --use-fs-snapshot -o vss.exclude-volumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}"
|
||||
|
||||
excludes drive `d:`, mount point `c:\mnt` and specific volume from VSS snapshotting.
|
||||
|
||||
restic backup --use-fs-snapshot -o vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5}
|
||||
|
||||
uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider.
|
||||
|
||||
https://github.com/restic/restic/pull/3067
|
16
changelog/unreleased/pull-4006
Normal file
16
changelog/unreleased/pull-4006
Normal file
|
@ -0,0 +1,16 @@
|
|||
Enhancement: (alpha) Store deviceID only for hardlinks
|
||||
|
||||
Set `RESTIC_FEATURES=device-id-for-hardlinks` to enable this alpha feature.
|
||||
The feature flag will be removed after repository format version 3 becomes
|
||||
available or be replaced with a different solution.
|
||||
|
||||
When creating backups from a filesystem snapshot, for example created using
|
||||
btrfs subvolumes, the deviceID of the filesystem changes compared to previous
|
||||
snapshots. This prevented restic from deduplicating the directory metadata of
|
||||
a snapshot.
|
||||
|
||||
When this alpha feature is enabled, then the deviceID is only stored for
|
||||
hardlinks. This significantly reduces the metadata duplication for most
|
||||
backups.
|
||||
|
||||
https://github.com/restic/restic/pull/4006
|
|
@ -4,4 +4,5 @@ If files on different devices had the same inode id, then the `stats` command
|
|||
did not correctly calculate the snapshot size. This has been fixed.
|
||||
|
||||
https://github.com/restic/restic/pull/4503
|
||||
https://github.com/restic/restic/pull/4006
|
||||
https://forum.restic.net/t/possible-bug-in-stats/6461/8
|
||||
|
|
10
changelog/unreleased/pull-4709
Normal file
10
changelog/unreleased/pull-4709
Normal file
|
@ -0,0 +1,10 @@
|
|||
Bugfix: Correct `--no-lock` handling of `ls` and `tag` command
|
||||
|
||||
The `ls` command never locked the repository. This has been fixed. The old
|
||||
behavior is still supported using `ls --no-lock`. The latter invocation also
|
||||
works with older restic versions.
|
||||
|
||||
The `tag` command erroneously accepted the `--no-lock` command. The command
|
||||
now always requires an exclusive lock.
|
||||
|
||||
https://github.com/restic/restic/pull/4709
|
5
changelog/unreleased/pull-4737
Normal file
5
changelog/unreleased/pull-4737
Normal file
|
@ -0,0 +1,5 @@
|
|||
Enhancement: include snapshot id in reason field of forget JSON output
|
||||
|
||||
The JSON output of the `forget` command now includes the `id` and `short_id` of a snapshot in the `reason` field.
|
||||
|
||||
https://github.com/restic/restic/pull/4737
|
|
@ -1,89 +1,41 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
)
|
||||
|
||||
var cleanupHandlers struct {
|
||||
sync.Mutex
|
||||
list []func(code int) (int, error)
|
||||
done bool
|
||||
ch chan os.Signal
|
||||
func createGlobalContext() context.Context {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
ch := make(chan os.Signal, 1)
|
||||
go cleanupHandler(ch, cancel)
|
||||
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func init() {
|
||||
cleanupHandlers.ch = make(chan os.Signal, 1)
|
||||
go CleanupHandler(cleanupHandlers.ch)
|
||||
signal.Notify(cleanupHandlers.ch, syscall.SIGINT, syscall.SIGTERM)
|
||||
}
|
||||
// cleanupHandler handles the SIGINT and SIGTERM signals.
|
||||
func cleanupHandler(c <-chan os.Signal, cancel context.CancelFunc) {
|
||||
s := <-c
|
||||
debug.Log("signal %v received, cleaning up", s)
|
||||
Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s)
|
||||
|
||||
// AddCleanupHandler adds the function f to the list of cleanup handlers so
|
||||
// that it is executed when all the cleanup handlers are run, e.g. when SIGINT
|
||||
// is received.
|
||||
func AddCleanupHandler(f func(code int) (int, error)) {
|
||||
cleanupHandlers.Lock()
|
||||
defer cleanupHandlers.Unlock()
|
||||
|
||||
// reset the done flag for integration tests
|
||||
cleanupHandlers.done = false
|
||||
|
||||
cleanupHandlers.list = append(cleanupHandlers.list, f)
|
||||
}
|
||||
|
||||
// RunCleanupHandlers runs all registered cleanup handlers
|
||||
func RunCleanupHandlers(code int) int {
|
||||
cleanupHandlers.Lock()
|
||||
defer cleanupHandlers.Unlock()
|
||||
|
||||
if cleanupHandlers.done {
|
||||
return code
|
||||
if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" {
|
||||
_, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n")
|
||||
_, _ = os.Stderr.WriteString(debug.DumpStacktrace())
|
||||
_, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n")
|
||||
}
|
||||
cleanupHandlers.done = true
|
||||
|
||||
for _, f := range cleanupHandlers.list {
|
||||
var err error
|
||||
code, err = f(code)
|
||||
if err != nil {
|
||||
Warnf("error in cleanup handler: %v\n", err)
|
||||
}
|
||||
}
|
||||
cleanupHandlers.list = nil
|
||||
return code
|
||||
cancel()
|
||||
}
|
||||
|
||||
// CleanupHandler handles the SIGINT and SIGTERM signals.
|
||||
func CleanupHandler(c <-chan os.Signal) {
|
||||
for s := range c {
|
||||
debug.Log("signal %v received, cleaning up", s)
|
||||
Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s)
|
||||
|
||||
if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" {
|
||||
_, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n")
|
||||
_, _ = os.Stderr.WriteString(debug.DumpStacktrace())
|
||||
_, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n")
|
||||
}
|
||||
|
||||
code := 0
|
||||
|
||||
if s == syscall.SIGINT || s == syscall.SIGTERM {
|
||||
code = 130
|
||||
} else {
|
||||
code = 1
|
||||
}
|
||||
|
||||
Exit(code)
|
||||
}
|
||||
}
|
||||
|
||||
// Exit runs the cleanup handlers and then terminates the process with the
|
||||
// given exit code.
|
||||
// Exit terminates the process with the given exit code.
|
||||
func Exit(code int) {
|
||||
code = RunCleanupHandlers(code)
|
||||
debug.Log("exiting with status code %d", code)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ func init() {
|
|||
f.BoolVar(&backupOptions.StdinCommand, "stdin-from-command", false, "interpret arguments as command to execute and store its stdout")
|
||||
f.Var(&backupOptions.Tags, "tag", "add `tags` for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times)")
|
||||
f.UintVar(&backupOptions.ReadConcurrency, "read-concurrency", 0, "read `n` files concurrently (default: $RESTIC_READ_CONCURRENCY or 2)")
|
||||
f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag")
|
||||
f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually (default: $RESTIC_HOST). To prevent an expensive rescan use the \"parent\" flag")
|
||||
f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually")
|
||||
err := f.MarkDeprecated("hostname", "use --host")
|
||||
if err != nil {
|
||||
|
@ -137,6 +137,11 @@ func init() {
|
|||
// parse read concurrency from env, on error the default value will be used
|
||||
readConcurrency, _ := strconv.ParseUint(os.Getenv("RESTIC_READ_CONCURRENCY"), 10, 32)
|
||||
backupOptions.ReadConcurrency = uint(readConcurrency)
|
||||
|
||||
// parse host from env, if not exists or empty the default value will be used
|
||||
if host := os.Getenv("RESTIC_HOST"); host != "" {
|
||||
backupOptions.Host = host
|
||||
}
|
||||
}
|
||||
|
||||
// filterExisting returns a slice of all existing items, or an error if no
|
||||
|
@ -440,7 +445,16 @@ func findParentSnapshot(ctx context.Context, repo restic.ListerLoaderUnpacked, o
|
|||
}
|
||||
|
||||
func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error {
|
||||
err := opts.Check(gopts, args)
|
||||
var vsscfg fs.VSSConfig
|
||||
var err error
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
if vsscfg, err = fs.ParseVSSConfig(gopts.extended); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = opts.Check(gopts, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -451,6 +465,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||
}
|
||||
|
||||
timeStamp := time.Now()
|
||||
backupStart := timeStamp
|
||||
if opts.TimeStamp != "" {
|
||||
timeStamp, err = time.ParseInLocation(TimeFormat, opts.TimeStamp, time.Local)
|
||||
if err != nil {
|
||||
|
@ -462,10 +477,11 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||
Verbosef("open repository\n")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, opts.DryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
var progressPrinter backup.ProgressPrinter
|
||||
if gopts.JSON {
|
||||
|
@ -477,22 +493,6 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||
calculateProgressInterval(!gopts.Quiet, gopts.JSON))
|
||||
defer progressReporter.Done()
|
||||
|
||||
if opts.DryRun {
|
||||
repo.SetDryRun()
|
||||
}
|
||||
|
||||
if !gopts.JSON {
|
||||
progressPrinter.V("lock repository")
|
||||
}
|
||||
if !opts.DryRun {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// rejectByNameFuncs collect functions that can reject items from the backup based on path only
|
||||
rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo)
|
||||
if err != nil {
|
||||
|
@ -556,8 +556,8 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||
return err
|
||||
}
|
||||
|
||||
errorHandler := func(item string, err error) error {
|
||||
return progressReporter.Error(item, err)
|
||||
errorHandler := func(item string, err error) {
|
||||
_ = progressReporter.Error(item, err)
|
||||
}
|
||||
|
||||
messageHandler := func(msg string, args ...interface{}) {
|
||||
|
@ -566,7 +566,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||
}
|
||||
}
|
||||
|
||||
localVss := fs.NewLocalVss(errorHandler, messageHandler)
|
||||
localVss := fs.NewLocalVss(errorHandler, messageHandler, vsscfg)
|
||||
defer localVss.DeleteSnapshots()
|
||||
targetFS = localVss
|
||||
}
|
||||
|
@ -640,6 +640,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||
snapshotOpts := archiver.SnapshotOptions{
|
||||
Excludes: opts.Excludes,
|
||||
Tags: opts.Tags.Flatten(),
|
||||
BackupStart: backupStart,
|
||||
Time: timeStamp,
|
||||
Hostname: opts.Host,
|
||||
ParentSnapshot: parentSnapshot,
|
||||
|
@ -649,7 +650,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||
if !gopts.JSON {
|
||||
progressPrinter.V("start backup on %v", targets)
|
||||
}
|
||||
_, id, err := arch.Snapshot(ctx, targets, snapshotOpts)
|
||||
_, id, summary, err := arch.Snapshot(ctx, targets, snapshotOpts)
|
||||
|
||||
// cleanly shutdown all running goroutines
|
||||
cancel()
|
||||
|
@ -663,7 +664,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||
}
|
||||
|
||||
// Report finished execution
|
||||
progressReporter.Finish(id, opts.DryRun)
|
||||
progressReporter.Finish(id, summary, opts.DryRun)
|
||||
if !gopts.JSON && !opts.DryRun {
|
||||
progressPrinter.P("snapshot %s saved\n", id.Str())
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
|
@ -250,29 +249,18 @@ func TestBackupTreeLoadError(t *testing.T) {
|
|||
opts := BackupOptions{}
|
||||
// Backup a subdirectory first, such that we can remove the tree pack for the subdirectory
|
||||
testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts)
|
||||
|
||||
r, err := OpenRepository(context.TODO(), env.gopts)
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, r.LoadIndex(context.TODO(), nil))
|
||||
treePacks := restic.NewIDSet()
|
||||
r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
|
||||
if pb.Type == restic.TreeBlob {
|
||||
treePacks.Insert(pb.PackID)
|
||||
}
|
||||
})
|
||||
treePacks := listTreePacks(env.gopts, t)
|
||||
|
||||
testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
// delete the subdirectory pack first
|
||||
for id := range treePacks {
|
||||
rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}))
|
||||
}
|
||||
removePacks(env.gopts, t, treePacks)
|
||||
testRunRebuildIndex(t, env.gopts)
|
||||
// now the repo is missing the tree blob in the index; check should report this
|
||||
testRunCheckMustFail(t, env.gopts)
|
||||
// second backup should report an error but "heal" this situation
|
||||
err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||
err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts)
|
||||
rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory")
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
|
@ -406,6 +394,7 @@ func TestIncrementalBackup(t *testing.T) {
|
|||
t.Logf("repository grown by %d bytes", stat3.size-stat2.size)
|
||||
}
|
||||
|
||||
// nolint: staticcheck // false positive nil pointer dereference check
|
||||
func TestBackupTags(t *testing.T) {
|
||||
env, cleanup := withTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
@ -441,6 +430,7 @@ func TestBackupTags(t *testing.T) {
|
|||
"expected parent to be %v, got %v", parent.ID, newest.Parent)
|
||||
}
|
||||
|
||||
// nolint: staticcheck // false positive nil pointer dereference check
|
||||
func TestBackupProgramVersion(t *testing.T) {
|
||||
env, cleanup := withTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
|
|
@ -64,19 +64,11 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
tpe := args[0]
|
||||
|
||||
|
|
|
@ -199,25 +199,16 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||
}
|
||||
|
||||
cleanup := prepareCheckCache(opts, &gopts)
|
||||
AddCleanupHandler(func(code int) (int, error) {
|
||||
cleanup()
|
||||
return code, nil
|
||||
})
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
if !gopts.NoLock {
|
||||
Verbosef("create exclusive lock for repository\n")
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
chkr := checker.New(repo, opts.CheckUnused)
|
||||
err = chkr.LoadSnapshots(ctx)
|
||||
|
@ -228,15 +219,23 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||
Verbosef("load indexes\n")
|
||||
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||
hints, errs := chkr.LoadIndex(ctx, bar)
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
errorsFound := false
|
||||
suggestIndexRebuild := false
|
||||
suggestLegacyIndexRebuild := false
|
||||
mixedFound := false
|
||||
for _, hint := range hints {
|
||||
switch hint.(type) {
|
||||
case *checker.ErrDuplicatePacks, *checker.ErrOldIndexFormat:
|
||||
case *checker.ErrDuplicatePacks:
|
||||
Printf("%v\n", hint)
|
||||
suggestIndexRebuild = true
|
||||
case *checker.ErrOldIndexFormat:
|
||||
Warnf("error: %v\n", hint)
|
||||
suggestLegacyIndexRebuild = true
|
||||
errorsFound = true
|
||||
case *checker.ErrMixedPack:
|
||||
Printf("%v\n", hint)
|
||||
mixedFound = true
|
||||
|
@ -247,7 +246,10 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||
}
|
||||
|
||||
if suggestIndexRebuild {
|
||||
Printf("Duplicate packs/old indexes are non-critical, you can run `restic repair index' to correct this.\n")
|
||||
Printf("Duplicate packs are non-critical, you can run `restic repair index' to correct this.\n")
|
||||
}
|
||||
if suggestLegacyIndexRebuild {
|
||||
Warnf("Found indexes using the legacy format, you must run `restic repair index' to correct this.\n")
|
||||
}
|
||||
if mixedFound {
|
||||
Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n")
|
||||
|
@ -281,6 +283,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||
if orphanedPacks > 0 {
|
||||
Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks)
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
Verbosef("check snapshots, trees and blobs\n")
|
||||
errChan = make(chan error)
|
||||
|
@ -314,9 +319,16 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||
// Must happen after `errChan` is read from in the above loop to avoid
|
||||
// deadlocking in the case of errors.
|
||||
wg.Wait()
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if opts.CheckUnused {
|
||||
for _, id := range chkr.UnusedBlobs(ctx) {
|
||||
unused, err := chkr.UnusedBlobs(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, id := range unused {
|
||||
Verbosef("unused blob %v\n", id)
|
||||
errorsFound = true
|
||||
}
|
||||
|
@ -393,10 +405,13 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||
doReadData(packs)
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if errorsFound {
|
||||
return errors.Fatal("repository contains errors")
|
||||
}
|
||||
|
||||
Verbosef("no errors were found\n")
|
||||
|
||||
return nil
|
||||
|
|
|
@ -53,7 +53,7 @@ func init() {
|
|||
}
|
||||
|
||||
func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []string) error {
|
||||
secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "destination")
|
||||
secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "destination")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -62,30 +62,17 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
|
|||
gopts, secondaryGopts = secondaryGopts, gopts
|
||||
}
|
||||
|
||||
srcRepo, err := OpenRepository(ctx, gopts)
|
||||
ctx, srcRepo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
dstRepo, err := OpenRepository(ctx, secondaryGopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
var srcLock *restic.Lock
|
||||
srcLock, ctx, err = lockRepo(ctx, srcRepo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(srcLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dstLock, ctx, err := lockRepo(ctx, dstRepo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(dstLock)
|
||||
ctx, dstRepo, unlock, err := openWithAppendLock(ctx, secondaryGopts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
srcSnapshotLister, err := restic.MemorizeList(ctx, srcRepo, restic.SnapshotFile)
|
||||
if err != nil {
|
||||
|
@ -116,6 +103,9 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
|
|||
// also consider identical snapshot copies
|
||||
dstSnapshotByOriginal[*sn.ID()] = append(dstSnapshotByOriginal[*sn.ID()], sn)
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// remember already processed trees across all snapshots
|
||||
visitedTrees := restic.NewIDSet()
|
||||
|
@ -160,7 +150,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
|
|||
}
|
||||
Verbosef("snapshot %s saved\n", newID.Str())
|
||||
}
|
||||
return nil
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool {
|
||||
|
|
|
@ -153,19 +153,11 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error
|
|||
return errors.Fatal("type not specified")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
tpe := args[0]
|
||||
|
||||
|
@ -442,10 +434,15 @@ func storePlainBlob(id restic.ID, prefix string, plain []byte) error {
|
|||
}
|
||||
|
||||
func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamineOptions, args []string) error {
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
if opts.ExtractPack && gopts.NoLock {
|
||||
return fmt.Errorf("--extract-pack and --no-lock are mutually exclusive")
|
||||
}
|
||||
|
||||
ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
ids := make([]restic.ID, 0)
|
||||
for _, name := range args {
|
||||
|
@ -464,15 +461,6 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamine
|
|||
return errors.Fatal("no pack files to examine")
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||
err = repo.LoadIndex(ctx, bar)
|
||||
if err != nil {
|
||||
|
|
|
@ -344,19 +344,11 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||
return errors.Fatalf("specify two snapshot IDs")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
// cache snapshots listing
|
||||
be, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
|
||||
|
|
|
@ -131,19 +131,11 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args []
|
|||
|
||||
splittedPath := splitPath(path.Clean(pathToPrint))
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
sn, subfolder, err := (&restic.SnapshotFilter{
|
||||
Hosts: opts.Hosts,
|
||||
|
|
58
cmd/restic/cmd_features.go
Normal file
58
cmd/restic/cmd_features.go
Normal file
|
@ -0,0 +1,58 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/feature"
|
||||
"github.com/restic/restic/internal/ui/table"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var featuresCmd = &cobra.Command{
|
||||
Use: "features",
|
||||
Short: "Print list of feature flags",
|
||||
Long: `
|
||||
The "features" command prints a list of supported feature flags.
|
||||
|
||||
To pass feature flags to restic, set the RESTIC_FEATURES environment variable
|
||||
to "featureA=true,featureB=false". Specifying an unknown feature flag is an error.
|
||||
|
||||
A feature can either be in alpha, beta, stable or deprecated state.
|
||||
An _alpha_ feature is disabled by default and may change in arbitrary ways between restic versions or be removed.
|
||||
A _beta_ feature is enabled by default, but still can change in minor ways or be removed.
|
||||
A _stable_ feature is always enabled and cannot be disabled. The flag will be removed in a future restic version.
|
||||
A _deprecated_ feature is always disabled and cannot be enabled. The flag will be removed in a future restic version.
|
||||
|
||||
EXIT STATUS
|
||||
===========
|
||||
|
||||
Exit status is 0 if the command was successful, and non-zero if there was any error.
|
||||
`,
|
||||
Hidden: true,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(_ *cobra.Command, args []string) error {
|
||||
if len(args) != 0 {
|
||||
return errors.Fatal("the feature command expects no arguments")
|
||||
}
|
||||
|
||||
fmt.Printf("All Feature Flags:\n")
|
||||
flags := feature.Flag.List()
|
||||
|
||||
tab := table.New()
|
||||
tab.AddColumn("Name", "{{ .Name }}")
|
||||
tab.AddColumn("Type", "{{ .Type }}")
|
||||
tab.AddColumn("Default", "{{ .Default }}")
|
||||
tab.AddColumn("Description", "{{ .Description }}")
|
||||
|
||||
for _, flag := range flags {
|
||||
tab.AddRow(flag)
|
||||
}
|
||||
return tab.Write(globalOptions.stdout)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdRoot.AddCommand(featuresCmd)
|
||||
}
|
|
@ -439,7 +439,10 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error {
|
|||
|
||||
if err != errAllPacksFound {
|
||||
// try to resolve unknown pack ids from the index
|
||||
packIDs = f.indexPacksToBlobs(ctx, packIDs)
|
||||
packIDs, err = f.indexPacksToBlobs(ctx, packIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(packIDs) > 0 {
|
||||
|
@ -456,13 +459,13 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) map[string]struct{} {
|
||||
func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) (map[string]struct{}, error) {
|
||||
wctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// remember which packs were found in the index
|
||||
indexPackIDs := make(map[string]struct{})
|
||||
f.repo.Index().Each(wctx, func(pb restic.PackedBlob) {
|
||||
err := f.repo.Index().Each(wctx, func(pb restic.PackedBlob) {
|
||||
idStr := pb.PackID.String()
|
||||
// keep entry in packIDs as Each() returns individual index entries
|
||||
matchingID := false
|
||||
|
@ -481,6 +484,9 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc
|
|||
indexPackIDs[idStr] = struct{}{}
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for id := range indexPackIDs {
|
||||
delete(packIDs, id)
|
||||
|
@ -493,7 +499,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc
|
|||
}
|
||||
Warnf("some pack files are missing from the repository, getting their blobs from the repository index: %v\n\n", list)
|
||||
}
|
||||
return packIDs
|
||||
return packIDs, nil
|
||||
}
|
||||
|
||||
func (f *Finder) findObjectPack(id string, t restic.BlobType) {
|
||||
|
@ -563,19 +569,11 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
|||
return errors.Fatal("cannot have several ID types")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
|
||||
if err != nil {
|
||||
|
@ -616,6 +614,9 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
|
|||
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) {
|
||||
filteredSnapshots = append(filteredSnapshots, sn)
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
sort.Slice(filteredSnapshots, func(i, j int) bool {
|
||||
return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time)
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -33,7 +34,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, args)
|
||||
term, cancel := setupTermstatus()
|
||||
defer cancel()
|
||||
return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, term, args)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -152,7 +155,7 @@ func verifyForgetOptions(opts *ForgetOptions) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, args []string) error {
|
||||
func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error {
|
||||
err := verifyForgetOptions(&opts)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -163,23 +166,21 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
|
|||
return err
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if gopts.NoLock && !opts.DryRun {
|
||||
return errors.Fatal("--no-lock is only applicable in combination with --dry-run for forget command")
|
||||
}
|
||||
|
||||
if !opts.DryRun || !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun && gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
verbosity := gopts.verbosity
|
||||
if gopts.JSON {
|
||||
verbosity = 0
|
||||
}
|
||||
printer := newTerminalProgressPrinter(verbosity, term)
|
||||
|
||||
var snapshots restic.Snapshots
|
||||
removeSnIDs := restic.NewIDSet()
|
||||
|
@ -187,6 +188,9 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
|
|||
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
|
||||
snapshots = append(snapshots, sn)
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
var jsonGroups []*ForgetGroup
|
||||
|
||||
|
@ -218,15 +222,11 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
|
|||
}
|
||||
|
||||
if policy.Empty() && len(args) == 0 {
|
||||
if !gopts.JSON {
|
||||
Verbosef("no policy was specified, no snapshots will be removed\n")
|
||||
}
|
||||
printer.P("no policy was specified, no snapshots will be removed\n")
|
||||
}
|
||||
|
||||
if !policy.Empty() {
|
||||
if !gopts.JSON {
|
||||
Verbosef("Applying Policy: %v\n", policy)
|
||||
}
|
||||
printer.P("Applying Policy: %v\n", policy)
|
||||
|
||||
for k, snapshotGroup := range snapshotGroups {
|
||||
if gopts.Verbose >= 1 && !gopts.JSON {
|
||||
|
@ -249,20 +249,20 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
|
|||
keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy)
|
||||
|
||||
if len(keep) != 0 && !gopts.Quiet && !gopts.JSON {
|
||||
Printf("keep %d snapshots:\n", len(keep))
|
||||
printer.P("keep %d snapshots:\n", len(keep))
|
||||
PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact)
|
||||
Printf("\n")
|
||||
printer.P("\n")
|
||||
}
|
||||
addJSONSnapshots(&fg.Keep, keep)
|
||||
fg.Keep = asJSONSnapshots(keep)
|
||||
|
||||
if len(remove) != 0 && !gopts.Quiet && !gopts.JSON {
|
||||
Printf("remove %d snapshots:\n", len(remove))
|
||||
printer.P("remove %d snapshots:\n", len(remove))
|
||||
PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact)
|
||||
Printf("\n")
|
||||
printer.P("\n")
|
||||
}
|
||||
addJSONSnapshots(&fg.Remove, remove)
|
||||
fg.Remove = asJSONSnapshots(remove)
|
||||
|
||||
fg.Reasons = reasons
|
||||
fg.Reasons = asJSONKeeps(reasons)
|
||||
|
||||
jsonGroups = append(jsonGroups, &fg)
|
||||
|
||||
|
@ -273,16 +273,27 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
|
|||
}
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if len(removeSnIDs) > 0 {
|
||||
if !opts.DryRun {
|
||||
err := DeleteFilesChecked(ctx, gopts, repo, removeSnIDs, restic.SnapshotFile)
|
||||
bar := printer.NewCounter("files deleted")
|
||||
err := restic.ParallelRemove(ctx, repo, removeSnIDs, restic.SnapshotFile, func(id restic.ID, err error) error {
|
||||
if err != nil {
|
||||
printer.E("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id)
|
||||
} else {
|
||||
printer.VV("removed %v/%v\n", restic.SnapshotFile, id)
|
||||
}
|
||||
return nil
|
||||
}, bar)
|
||||
bar.Done()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !gopts.JSON {
|
||||
Printf("Would have removed the following snapshots:\n%v\n\n", removeSnIDs)
|
||||
}
|
||||
printer.P("Would have removed the following snapshots:\n%v\n\n", removeSnIDs)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -294,15 +305,13 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
|
|||
}
|
||||
|
||||
if len(removeSnIDs) > 0 && opts.Prune {
|
||||
if !gopts.JSON {
|
||||
if opts.DryRun {
|
||||
Verbosef("%d snapshots would be removed, running prune dry run\n", len(removeSnIDs))
|
||||
} else {
|
||||
Verbosef("%d snapshots have been removed, running prune\n", len(removeSnIDs))
|
||||
}
|
||||
if opts.DryRun {
|
||||
printer.P("%d snapshots would be removed, running prune dry run\n", len(removeSnIDs))
|
||||
} else {
|
||||
printer.P("%d snapshots have been removed, running prune\n", len(removeSnIDs))
|
||||
}
|
||||
pruneOptions.DryRun = opts.DryRun
|
||||
return runPruneWithRepo(ctx, pruneOptions, gopts, repo, removeSnIDs)
|
||||
return runPruneWithRepo(ctx, pruneOptions, gopts, repo, removeSnIDs, term)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -310,23 +319,47 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
|
|||
|
||||
// ForgetGroup helps to print what is forgotten in JSON.
|
||||
type ForgetGroup struct {
|
||||
Tags []string `json:"tags"`
|
||||
Host string `json:"host"`
|
||||
Paths []string `json:"paths"`
|
||||
Keep []Snapshot `json:"keep"`
|
||||
Remove []Snapshot `json:"remove"`
|
||||
Reasons []restic.KeepReason `json:"reasons"`
|
||||
Tags []string `json:"tags"`
|
||||
Host string `json:"host"`
|
||||
Paths []string `json:"paths"`
|
||||
Keep []Snapshot `json:"keep"`
|
||||
Remove []Snapshot `json:"remove"`
|
||||
Reasons []KeepReason `json:"reasons"`
|
||||
}
|
||||
|
||||
func addJSONSnapshots(js *[]Snapshot, list restic.Snapshots) {
|
||||
func asJSONSnapshots(list restic.Snapshots) []Snapshot {
|
||||
var resultList []Snapshot
|
||||
for _, sn := range list {
|
||||
k := Snapshot{
|
||||
Snapshot: sn,
|
||||
ID: sn.ID(),
|
||||
ShortID: sn.ID().Str(),
|
||||
}
|
||||
*js = append(*js, k)
|
||||
resultList = append(resultList, k)
|
||||
}
|
||||
return resultList
|
||||
}
|
||||
|
||||
// KeepReason helps to print KeepReasons as JSON with Snapshots with their ID included.
|
||||
type KeepReason struct {
|
||||
Snapshot Snapshot `json:"snapshot"`
|
||||
Matches []string `json:"matches"`
|
||||
}
|
||||
|
||||
func asJSONKeeps(list []restic.KeepReason) []KeepReason {
|
||||
var resultList []KeepReason
|
||||
for _, keep := range list {
|
||||
k := KeepReason{
|
||||
Snapshot: Snapshot{
|
||||
Snapshot: keep.Snapshot,
|
||||
ID: keep.Snapshot.ID(),
|
||||
ShortID: keep.Snapshot.ID().Str(),
|
||||
},
|
||||
Matches: keep.Matches,
|
||||
}
|
||||
resultList = append(resultList, k)
|
||||
}
|
||||
return resultList
|
||||
}
|
||||
|
||||
func printJSONForget(stdout io.Writer, forgets []*ForgetGroup) error {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"testing"
|
||||
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
)
|
||||
|
||||
func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
|
||||
|
@ -12,5 +13,7 @@ func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) {
|
|||
pruneOpts := PruneOptions{
|
||||
MaxUnused: "5%",
|
||||
}
|
||||
rtest.OK(t, runForget(context.TODO(), opts, pruneOpts, gopts, args))
|
||||
rtest.OK(t, withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||
return runForget(context.TODO(), opts, pruneOpts, gopts, term, args)
|
||||
}))
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
|||
return err
|
||||
}
|
||||
|
||||
gopts.password, err = ReadPasswordTwice(gopts,
|
||||
gopts.password, err = ReadPasswordTwice(ctx, gopts,
|
||||
"enter password for new repository: ",
|
||||
"enter password again: ")
|
||||
if err != nil {
|
||||
|
@ -131,7 +131,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
|||
|
||||
func maybeReadChunkerPolynomial(ctx context.Context, opts InitOptions, gopts GlobalOptions) (*chunker.Pol, error) {
|
||||
if opts.CopyChunkerParameters {
|
||||
otherGopts, _, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "secondary")
|
||||
otherGopts, _, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "secondary")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -50,22 +50,17 @@ func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, arg
|
|||
return fmt.Errorf("the key add command expects no arguments, only options - please see `restic help key add` for usage and flags")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
return addKey(ctx, repo, gopts, opts)
|
||||
}
|
||||
|
||||
func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyAddOptions) error {
|
||||
pw, err := getNewPassword(gopts, opts.NewPasswordFile)
|
||||
pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -88,7 +83,7 @@ func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOption
|
|||
// testKeyNewPassword is used to set a new password during integration testing.
|
||||
var testKeyNewPassword string
|
||||
|
||||
func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error) {
|
||||
func getNewPassword(ctx context.Context, gopts GlobalOptions, newPasswordFile string) (string, error) {
|
||||
if testKeyNewPassword != "" {
|
||||
return testKeyNewPassword, nil
|
||||
}
|
||||
|
@ -102,7 +97,7 @@ func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error)
|
|||
newopts := gopts
|
||||
newopts.password = ""
|
||||
|
||||
return ReadPasswordTwice(newopts,
|
||||
return ReadPasswordTwice(ctx, newopts,
|
||||
"enter new password: ",
|
||||
"enter password again: ")
|
||||
}
|
||||
|
|
|
@ -40,19 +40,11 @@ func runKeyList(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||
return fmt.Errorf("the key list command expects no arguments, only options - please see `restic help key list` for usage and flags")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
return listKeys(ctx, repo, gopts)
|
||||
}
|
||||
|
@ -61,6 +53,7 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions
|
|||
type keyInfo struct {
|
||||
Current bool `json:"current"`
|
||||
ID string `json:"id"`
|
||||
ShortID string `json:"-"`
|
||||
UserName string `json:"userName"`
|
||||
HostName string `json:"hostName"`
|
||||
Created string `json:"created"`
|
||||
|
@ -78,7 +71,8 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions
|
|||
|
||||
key := keyInfo{
|
||||
Current: id == s.KeyID(),
|
||||
ID: id.Str(),
|
||||
ID: id.String(),
|
||||
ShortID: id.Str(),
|
||||
UserName: k.Username,
|
||||
HostName: k.Hostname,
|
||||
Created: k.Created.Local().Format(TimeFormat),
|
||||
|
@ -99,7 +93,7 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions
|
|||
}
|
||||
|
||||
tab := table.New()
|
||||
tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ID }}")
|
||||
tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ShortID }}")
|
||||
tab.AddColumn("User", "{{ .UserName }}")
|
||||
tab.AddColumn("Host", "{{ .HostName }}")
|
||||
tab.AddColumn("Created", "{{ .Created }}")
|
||||
|
|
|
@ -47,22 +47,17 @@ func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOption
|
|||
return fmt.Errorf("the key passwd command expects no arguments, only options - please see `restic help key passwd` for usage and flags")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
return changePassword(ctx, repo, gopts, opts)
|
||||
}
|
||||
|
||||
func changePassword(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyPasswdOptions) error {
|
||||
pw, err := getNewPassword(gopts, opts.NewPasswordFile)
|
||||
pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -37,20 +37,13 @@ func runKeyRemove(ctx context.Context, gopts GlobalOptions, args []string) error
|
|||
return fmt.Errorf("key remove expects one argument as the key id")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
idPrefix := args[0]
|
||||
|
||||
return deleteKey(ctx, repo, idPrefix)
|
||||
return deleteKey(ctx, repo, args[0])
|
||||
}
|
||||
|
||||
func deleteKey(ctx context.Context, repo *repository.Repository, idPrefix string) error {
|
||||
|
|
|
@ -36,19 +36,11 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||
return errors.Fatal("type not specified")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock || args[0] == "locks")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock && args[0] != "locks" {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
var t restic.FileType
|
||||
switch args[0] {
|
||||
|
@ -67,10 +59,9 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx.Each(ctx, func(blobs restic.PackedBlob) {
|
||||
return idx.Each(ctx, func(blobs restic.PackedBlob) {
|
||||
Printf("%v %v\n", blobs.Type, blobs.ID)
|
||||
})
|
||||
return nil
|
||||
})
|
||||
default:
|
||||
return errors.Fatal("invalid type")
|
||||
|
|
|
@ -309,10 +309,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri
|
|||
return false
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
|
||||
if err != nil {
|
||||
|
|
|
@ -117,16 +117,11 @@ func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptio
|
|||
}
|
||||
|
||||
func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, args []string) error {
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
if len(args) == 0 {
|
||||
return checkMigrations(ctx, repo)
|
||||
|
|
|
@ -125,19 +125,11 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
|
|||
debug.Log("start mount")
|
||||
defer debug.Log("finish mount")
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||
err = repo.LoadIndex(ctx, bar)
|
||||
|
@ -160,28 +152,15 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
|
|||
}
|
||||
}
|
||||
|
||||
AddCleanupHandler(func(code int) (int, error) {
|
||||
debug.Log("running umount cleanup handler for mount at %v", mountpoint)
|
||||
err := umount(mountpoint)
|
||||
if err != nil {
|
||||
Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err)
|
||||
}
|
||||
// replace error code of sigint
|
||||
if code == 130 {
|
||||
code = 0
|
||||
}
|
||||
return code, nil
|
||||
})
|
||||
systemFuse.Debug = func(msg interface{}) {
|
||||
debug.Log("fuse: %v", msg)
|
||||
}
|
||||
|
||||
c, err := systemFuse.Mount(mountpoint, mountOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
systemFuse.Debug = func(msg interface{}) {
|
||||
debug.Log("fuse: %v", msg)
|
||||
}
|
||||
|
||||
cfg := fuse.Config{
|
||||
OwnerIsRoot: opts.OwnerRoot,
|
||||
Filter: opts.SnapshotFilter,
|
||||
|
@ -195,15 +174,26 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args
|
|||
Printf("When finished, quit with Ctrl-c here or umount the mountpoint.\n")
|
||||
|
||||
debug.Log("serving mount at %v", mountpoint)
|
||||
err = fs.Serve(c, root)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
err = fs.Serve(c, root)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
debug.Log("running umount cleanup handler for mount at %v", mountpoint)
|
||||
err := systemFuse.Unmount(mountpoint)
|
||||
if err != nil {
|
||||
Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err)
|
||||
}
|
||||
|
||||
return ErrOK
|
||||
case <-done:
|
||||
// clean shutdown, nothing to do
|
||||
}
|
||||
|
||||
<-c.Ready
|
||||
return c.MountError
|
||||
}
|
||||
|
||||
func umount(mountpoint string) error {
|
||||
return systemFuse.Unmount(mountpoint)
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/repository"
|
||||
systemFuse "github.com/anacrolix/fuse"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
@ -66,7 +66,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr
|
|||
func testRunUmount(t testing.TB, dir string) {
|
||||
var err error
|
||||
for i := 0; i < mountWait; i++ {
|
||||
if err = umount(dir); err == nil {
|
||||
if err = systemFuse.Unmount(dir); err == nil {
|
||||
t.Logf("directory %v umounted", dir)
|
||||
return
|
||||
}
|
||||
|
@ -86,12 +86,12 @@ func listSnapshots(t testing.TB, dir string) []string {
|
|||
return names
|
||||
}
|
||||
|
||||
func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) {
|
||||
func checkSnapshots(t testing.TB, gopts GlobalOptions, mountpoint string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) {
|
||||
t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go testRunMount(t, global, mountpoint, &wg)
|
||||
go testRunMount(t, gopts, mountpoint, &wg)
|
||||
waitForMount(t, mountpoint)
|
||||
defer wg.Wait()
|
||||
defer testRunUmount(t, mountpoint)
|
||||
|
@ -100,7 +100,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
|
|||
t.Fatal(`virtual directory "snapshots" doesn't exist`)
|
||||
}
|
||||
|
||||
ids := listSnapshots(t, repodir)
|
||||
ids := listSnapshots(t, gopts.Repo)
|
||||
t.Logf("found %v snapshots in repo: %v", len(ids), ids)
|
||||
|
||||
namesInSnapshots := listSnapshots(t, mountpoint)
|
||||
|
@ -124,6 +124,10 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
|
|||
}
|
||||
}
|
||||
|
||||
_, repo, unlock, err := openWithReadLock(context.TODO(), gopts, false)
|
||||
rtest.OK(t, err)
|
||||
defer unlock()
|
||||
|
||||
for _, id := range snapshotIDs {
|
||||
snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
|
||||
rtest.OK(t, err)
|
||||
|
@ -166,10 +170,7 @@ func TestMount(t *testing.T) {
|
|||
|
||||
testRunInit(t, env.gopts)
|
||||
|
||||
repo, err := OpenRepository(context.TODO(), env.gopts)
|
||||
rtest.OK(t, err)
|
||||
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{}, 0)
|
||||
checkSnapshots(t, env.gopts, env.mountpoint, []restic.ID{}, 0)
|
||||
|
||||
rtest.SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
|
||||
|
||||
|
@ -179,7 +180,7 @@ func TestMount(t *testing.T) {
|
|||
rtest.Assert(t, len(snapshotIDs) == 1,
|
||||
"expected one snapshot, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 2)
|
||||
checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 2)
|
||||
|
||||
// second backup, implicit incremental
|
||||
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||
|
@ -187,7 +188,7 @@ func TestMount(t *testing.T) {
|
|||
rtest.Assert(t, len(snapshotIDs) == 2,
|
||||
"expected two snapshots, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 3)
|
||||
checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 3)
|
||||
|
||||
// third backup, explicit incremental
|
||||
bopts := BackupOptions{Parent: snapshotIDs[0].String()}
|
||||
|
@ -196,7 +197,7 @@ func TestMount(t *testing.T) {
|
|||
rtest.Assert(t, len(snapshotIDs) == 3,
|
||||
"expected three snapshots, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 4)
|
||||
checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 4)
|
||||
}
|
||||
|
||||
func TestMountSameTimestamps(t *testing.T) {
|
||||
|
@ -211,14 +212,11 @@ func TestMountSameTimestamps(t *testing.T) {
|
|||
|
||||
rtest.SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
|
||||
|
||||
repo, err := OpenRepository(context.TODO(), env.gopts)
|
||||
rtest.OK(t, err)
|
||||
|
||||
ids := []restic.ID{
|
||||
restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"),
|
||||
restic.TestParseID("75ad6cdc0868e082f2596d5ab8705e9f7d87316f5bf5690385eeff8dbe49d9f5"),
|
||||
restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"),
|
||||
}
|
||||
|
||||
checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids, 4)
|
||||
checkSnapshots(t, env.gopts, env.mountpoint, ids, 4)
|
||||
}
|
||||
|
|
|
@ -4,26 +4,20 @@ import (
|
|||
"context"
|
||||
"math"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/index"
|
||||
"github.com/restic/restic/internal/pack"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/ui"
|
||||
"github.com/restic/restic/internal/ui/progress"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var errorIndexIncomplete = errors.Fatal("index is not complete")
|
||||
var errorPacksMissing = errors.Fatal("packs from index missing in repo")
|
||||
var errorSizeNotMatching = errors.Fatal("pack size does not match calculated size from index")
|
||||
|
||||
var cmdPrune = &cobra.Command{
|
||||
Use: "prune [flags]",
|
||||
Short: "Remove unneeded data from the repository",
|
||||
|
@ -38,7 +32,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
return runPrune(cmd.Context(), pruneOptions, globalOptions)
|
||||
term, cancel := setupTermstatus()
|
||||
defer cancel()
|
||||
return runPrune(cmd.Context(), pruneOptions, globalOptions, term)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -138,7 +134,7 @@ func verifyPruneOptions(opts *PruneOptions) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error {
|
||||
func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term *termstatus.Terminal) error {
|
||||
err := verifyPruneOptions(&opts)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -148,18 +144,11 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error
|
|||
return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if repo.Connections() < 2 {
|
||||
return errors.Fatal("prune requires a backend connection limit of at least two")
|
||||
}
|
||||
|
||||
if repo.Config().Version < 2 && opts.RepackUncompressed {
|
||||
return errors.Fatal("compression requires at least repository format version 2")
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
if opts.UnsafeNoSpaceRecovery != "" {
|
||||
repoID := repo.Config().ID
|
||||
|
@ -169,16 +158,10 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error
|
|||
opts.unsafeRecovery = true
|
||||
}
|
||||
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet())
|
||||
return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet(), term)
|
||||
}
|
||||
|
||||
func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet) error {
|
||||
func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet, term *termstatus.Terminal) error {
|
||||
// we do not need index updates while pruning!
|
||||
repo.DisableAutoIndexUpdate()
|
||||
|
||||
|
@ -186,24 +169,43 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
|
|||
Print("warning: running prune without a cache, this may be very slow!\n")
|
||||
}
|
||||
|
||||
Verbosef("loading indexes...\n")
|
||||
printer := newTerminalProgressPrinter(gopts.verbosity, term)
|
||||
|
||||
printer.P("loading indexes...\n")
|
||||
// loading the index before the snapshots is ok, as we use an exclusive lock here
|
||||
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||
bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term)
|
||||
err := repo.LoadIndex(ctx, bar)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
plan, stats, err := planPrune(ctx, opts, repo, ignoreSnapshots, gopts.Quiet)
|
||||
popts := repository.PruneOptions{
|
||||
DryRun: opts.DryRun,
|
||||
UnsafeRecovery: opts.unsafeRecovery,
|
||||
|
||||
MaxUnusedBytes: opts.maxUnusedBytes,
|
||||
MaxRepackBytes: opts.MaxRepackBytes,
|
||||
|
||||
RepackCachableOnly: opts.RepackCachableOnly,
|
||||
RepackSmall: opts.RepackSmall,
|
||||
RepackUncompressed: opts.RepackUncompressed,
|
||||
}
|
||||
|
||||
plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) {
|
||||
return getUsedBlobs(ctx, repo, ignoreSnapshots, printer)
|
||||
}, printer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if opts.DryRun {
|
||||
Verbosef("\nWould have made the following changes:")
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
err = printPruneStats(stats)
|
||||
if popts.DryRun {
|
||||
printer.P("\nWould have made the following changes:")
|
||||
}
|
||||
|
||||
err = printPruneStats(printer, plan.Stats())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -211,605 +213,54 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
|
|||
// Trigger GC to reset garbage collection threshold
|
||||
runtime.GC()
|
||||
|
||||
return doPrune(ctx, opts, gopts, repo, plan)
|
||||
}
|
||||
|
||||
type pruneStats struct {
|
||||
blobs struct {
|
||||
used uint
|
||||
duplicate uint
|
||||
unused uint
|
||||
remove uint
|
||||
repack uint
|
||||
repackrm uint
|
||||
}
|
||||
size struct {
|
||||
used uint64
|
||||
duplicate uint64
|
||||
unused uint64
|
||||
remove uint64
|
||||
repack uint64
|
||||
repackrm uint64
|
||||
unref uint64
|
||||
uncompressed uint64
|
||||
}
|
||||
packs struct {
|
||||
used uint
|
||||
unused uint
|
||||
partlyUsed uint
|
||||
unref uint
|
||||
keep uint
|
||||
repack uint
|
||||
remove uint
|
||||
}
|
||||
}
|
||||
|
||||
type prunePlan struct {
|
||||
removePacksFirst restic.IDSet // packs to remove first (unreferenced packs)
|
||||
repackPacks restic.IDSet // packs to repack
|
||||
keepBlobs restic.CountedBlobSet // blobs to keep during repacking
|
||||
removePacks restic.IDSet // packs to remove
|
||||
ignorePacks restic.IDSet // packs to ignore when rebuilding the index
|
||||
}
|
||||
|
||||
type packInfo struct {
|
||||
usedBlobs uint
|
||||
unusedBlobs uint
|
||||
usedSize uint64
|
||||
unusedSize uint64
|
||||
tpe restic.BlobType
|
||||
uncompressed bool
|
||||
}
|
||||
|
||||
type packInfoWithID struct {
|
||||
ID restic.ID
|
||||
packInfo
|
||||
mustCompress bool
|
||||
}
|
||||
|
||||
// planPrune selects which files to rewrite and which to delete and which blobs to keep.
|
||||
// Also some summary statistics are returned.
|
||||
func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (prunePlan, pruneStats, error) {
|
||||
var stats pruneStats
|
||||
|
||||
usedBlobs, err := getUsedBlobs(ctx, repo, ignoreSnapshots, quiet)
|
||||
if err != nil {
|
||||
return prunePlan{}, stats, err
|
||||
}
|
||||
|
||||
Verbosef("searching used packs...\n")
|
||||
keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats)
|
||||
if err != nil {
|
||||
return prunePlan{}, stats, err
|
||||
}
|
||||
|
||||
Verbosef("collecting packs for deletion and repacking\n")
|
||||
plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, quiet)
|
||||
if err != nil {
|
||||
return prunePlan{}, stats, err
|
||||
}
|
||||
|
||||
if len(plan.repackPacks) != 0 {
|
||||
blobCount := keepBlobs.Len()
|
||||
// when repacking, we do not want to keep blobs which are
|
||||
// already contained in kept packs, so delete them from keepBlobs
|
||||
repo.Index().Each(ctx, func(blob restic.PackedBlob) {
|
||||
if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) {
|
||||
return
|
||||
}
|
||||
keepBlobs.Delete(blob.BlobHandle)
|
||||
})
|
||||
|
||||
if keepBlobs.Len() < blobCount/2 {
|
||||
// replace with copy to shrink map to necessary size if there's a chance to benefit
|
||||
keepBlobs = keepBlobs.Copy()
|
||||
}
|
||||
} else {
|
||||
// keepBlobs is only needed if packs are repacked
|
||||
keepBlobs = nil
|
||||
}
|
||||
plan.keepBlobs = keepBlobs
|
||||
|
||||
return plan, stats, nil
|
||||
}
|
||||
|
||||
func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *pruneStats) (restic.CountedBlobSet, map[restic.ID]packInfo, error) {
|
||||
// iterate over all blobs in index to find out which blobs are duplicates
|
||||
// The counter in usedBlobs describes how many instances of the blob exist in the repository index
|
||||
// Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist
|
||||
idx.Each(ctx, func(blob restic.PackedBlob) {
|
||||
bh := blob.BlobHandle
|
||||
count, ok := usedBlobs[bh]
|
||||
if ok {
|
||||
if count < math.MaxUint8 {
|
||||
// don't overflow, but saturate count at 255
|
||||
// this can lead to a non-optimal pack selection, but won't cause
|
||||
// problems otherwise
|
||||
count++
|
||||
}
|
||||
|
||||
usedBlobs[bh] = count
|
||||
}
|
||||
})
|
||||
|
||||
// Check if all used blobs have been found in index
|
||||
missingBlobs := restic.NewBlobSet()
|
||||
for bh, count := range usedBlobs {
|
||||
if count == 0 {
|
||||
// blob does not exist in any pack files
|
||||
missingBlobs.Insert(bh)
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingBlobs) != 0 {
|
||||
Warnf("%v not found in the index\n\n"+
|
||||
"Integrity check failed: Data seems to be missing.\n"+
|
||||
"Will not start prune to prevent (additional) data loss!\n"+
|
||||
"Please report this error (along with the output of the 'prune' run) at\n"+
|
||||
"https://github.com/restic/restic/issues/new/choose\n", missingBlobs)
|
||||
return nil, nil, errorIndexIncomplete
|
||||
}
|
||||
|
||||
indexPack := make(map[restic.ID]packInfo)
|
||||
|
||||
// save computed pack header size
|
||||
for pid, hdrSize := range pack.Size(ctx, idx, true) {
|
||||
// initialize tpe with NumBlobTypes to indicate it's not set
|
||||
indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)}
|
||||
}
|
||||
|
||||
hasDuplicates := false
|
||||
// iterate over all blobs in index to generate packInfo
|
||||
idx.Each(ctx, func(blob restic.PackedBlob) {
|
||||
ip := indexPack[blob.PackID]
|
||||
|
||||
// Set blob type if not yet set
|
||||
if ip.tpe == restic.NumBlobTypes {
|
||||
ip.tpe = blob.Type
|
||||
}
|
||||
|
||||
// mark mixed packs with "Invalid blob type"
|
||||
if ip.tpe != blob.Type {
|
||||
ip.tpe = restic.InvalidBlob
|
||||
}
|
||||
|
||||
bh := blob.BlobHandle
|
||||
size := uint64(blob.Length)
|
||||
dupCount := usedBlobs[bh]
|
||||
switch {
|
||||
case dupCount >= 2:
|
||||
hasDuplicates = true
|
||||
// mark as unused for now, we will later on select one copy
|
||||
ip.unusedSize += size
|
||||
ip.unusedBlobs++
|
||||
|
||||
// count as duplicate, will later on change one copy to be counted as used
|
||||
stats.size.duplicate += size
|
||||
stats.blobs.duplicate++
|
||||
case dupCount == 1: // used blob, not duplicate
|
||||
ip.usedSize += size
|
||||
ip.usedBlobs++
|
||||
|
||||
stats.size.used += size
|
||||
stats.blobs.used++
|
||||
default: // unused blob
|
||||
ip.unusedSize += size
|
||||
ip.unusedBlobs++
|
||||
|
||||
stats.size.unused += size
|
||||
stats.blobs.unused++
|
||||
}
|
||||
if !blob.IsCompressed() {
|
||||
ip.uncompressed = true
|
||||
}
|
||||
// update indexPack
|
||||
indexPack[blob.PackID] = ip
|
||||
})
|
||||
|
||||
// if duplicate blobs exist, those will be set to either "used" or "unused":
|
||||
// - mark only one occurrence of duplicate blobs as used
|
||||
// - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used"
|
||||
// - if there are no used blobs in a pack, possibly mark duplicates as "unused"
|
||||
if hasDuplicates {
|
||||
// iterate again over all blobs in index (this is pretty cheap, all in-mem)
|
||||
idx.Each(ctx, func(blob restic.PackedBlob) {
|
||||
bh := blob.BlobHandle
|
||||
count, ok := usedBlobs[bh]
|
||||
// skip non-duplicate, aka. normal blobs
|
||||
// count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining
|
||||
if !ok || count == 1 {
|
||||
return
|
||||
}
|
||||
|
||||
ip := indexPack[blob.PackID]
|
||||
size := uint64(blob.Length)
|
||||
switch {
|
||||
case ip.usedBlobs > 0, count == 0:
|
||||
// other used blobs in pack or "last" occurrence -> transition to used
|
||||
ip.usedSize += size
|
||||
ip.usedBlobs++
|
||||
ip.unusedSize -= size
|
||||
ip.unusedBlobs--
|
||||
// same for the global statistics
|
||||
stats.size.used += size
|
||||
stats.blobs.used++
|
||||
stats.size.duplicate -= size
|
||||
stats.blobs.duplicate--
|
||||
// let other occurrences remain marked as unused
|
||||
usedBlobs[bh] = 1
|
||||
default:
|
||||
// remain unused and decrease counter
|
||||
count--
|
||||
if count == 1 {
|
||||
// setting count to 1 would lead to forgetting that this blob had duplicates
|
||||
// thus use the special value zero. This will select the last instance of the blob for keeping.
|
||||
count = 0
|
||||
}
|
||||
usedBlobs[bh] = count
|
||||
}
|
||||
// update indexPack
|
||||
indexPack[blob.PackID] = ip
|
||||
})
|
||||
}
|
||||
|
||||
// Sanity check. If no duplicates exist, all blobs have value 1. After handling
|
||||
// duplicates, this also applies to duplicates.
|
||||
for _, count := range usedBlobs {
|
||||
if count != 1 {
|
||||
panic("internal error during blob selection")
|
||||
}
|
||||
}
|
||||
|
||||
return usedBlobs, indexPack, nil
|
||||
}
|
||||
|
||||
func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *pruneStats, quiet bool) (prunePlan, error) {
|
||||
removePacksFirst := restic.NewIDSet()
|
||||
removePacks := restic.NewIDSet()
|
||||
repackPacks := restic.NewIDSet()
|
||||
|
||||
var repackCandidates []packInfoWithID
|
||||
var repackSmallCandidates []packInfoWithID
|
||||
repoVersion := repo.Config().Version
|
||||
// only repack very small files by default
|
||||
targetPackSize := repo.PackSize() / 25
|
||||
if opts.RepackSmall {
|
||||
// consider files with at least 80% of the target size as large enough
|
||||
targetPackSize = repo.PackSize() / 5 * 4
|
||||
}
|
||||
|
||||
// loop over all packs and decide what to do
|
||||
bar := newProgressMax(!quiet, uint64(len(indexPack)), "packs processed")
|
||||
err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error {
|
||||
p, ok := indexPack[id]
|
||||
if !ok {
|
||||
// Pack was not referenced in index and is not used => immediately remove!
|
||||
Verboseff("will remove pack %v as it is unused and not indexed\n", id.Str())
|
||||
removePacksFirst.Insert(id)
|
||||
stats.size.unref += uint64(packSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
if p.unusedSize+p.usedSize != uint64(packSize) && p.usedBlobs != 0 {
|
||||
// Pack size does not fit and pack is needed => error
|
||||
// If the pack is not needed, this is no error, the pack can
|
||||
// and will be simply removed, see below.
|
||||
Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n",
|
||||
id.Str(), p.unusedSize+p.usedSize, packSize)
|
||||
return errorSizeNotMatching
|
||||
}
|
||||
|
||||
// statistics
|
||||
switch {
|
||||
case p.usedBlobs == 0:
|
||||
stats.packs.unused++
|
||||
case p.unusedBlobs == 0:
|
||||
stats.packs.used++
|
||||
default:
|
||||
stats.packs.partlyUsed++
|
||||
}
|
||||
|
||||
if p.uncompressed {
|
||||
stats.size.uncompressed += p.unusedSize + p.usedSize
|
||||
}
|
||||
mustCompress := false
|
||||
if repoVersion >= 2 {
|
||||
// repo v2: always repack tree blobs if uncompressed
|
||||
// compress data blobs if requested
|
||||
mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed
|
||||
}
|
||||
|
||||
// decide what to do
|
||||
switch {
|
||||
case p.usedBlobs == 0:
|
||||
// All blobs in pack are no longer used => remove pack!
|
||||
removePacks.Insert(id)
|
||||
stats.blobs.remove += p.unusedBlobs
|
||||
stats.size.remove += p.unusedSize
|
||||
|
||||
case opts.RepackCachableOnly && p.tpe == restic.DataBlob:
|
||||
// if this is a data pack and --repack-cacheable-only is set => keep pack!
|
||||
stats.packs.keep++
|
||||
|
||||
case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress:
|
||||
if packSize >= int64(targetPackSize) {
|
||||
// All blobs in pack are used and not mixed => keep pack!
|
||||
stats.packs.keep++
|
||||
} else {
|
||||
repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress})
|
||||
}
|
||||
|
||||
default:
|
||||
// all other packs are candidates for repacking
|
||||
repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress})
|
||||
}
|
||||
|
||||
delete(indexPack, id)
|
||||
bar.Add(1)
|
||||
return nil
|
||||
})
|
||||
bar.Done()
|
||||
if err != nil {
|
||||
return prunePlan{}, err
|
||||
}
|
||||
|
||||
// At this point indexPacks contains only missing packs!
|
||||
|
||||
// missing packs that are not needed can be ignored
|
||||
ignorePacks := restic.NewIDSet()
|
||||
for id, p := range indexPack {
|
||||
if p.usedBlobs == 0 {
|
||||
ignorePacks.Insert(id)
|
||||
stats.blobs.remove += p.unusedBlobs
|
||||
stats.size.remove += p.unusedSize
|
||||
delete(indexPack, id)
|
||||
}
|
||||
}
|
||||
|
||||
if len(indexPack) != 0 {
|
||||
Warnf("The index references %d needed pack files which are missing from the repository:\n", len(indexPack))
|
||||
for id := range indexPack {
|
||||
Warnf(" %v\n", id)
|
||||
}
|
||||
return prunePlan{}, errorPacksMissing
|
||||
}
|
||||
if len(ignorePacks) != 0 {
|
||||
Warnf("Missing but unneeded pack files are referenced in the index, will be repaired\n")
|
||||
for id := range ignorePacks {
|
||||
Warnf("will forget missing pack file %v\n", id)
|
||||
}
|
||||
}
|
||||
|
||||
if len(repackSmallCandidates) < 10 {
|
||||
// too few small files to be worth the trouble, this also prevents endlessly repacking
|
||||
// if there is just a single pack file below the target size
|
||||
stats.packs.keep += uint(len(repackSmallCandidates))
|
||||
} else {
|
||||
repackCandidates = append(repackCandidates, repackSmallCandidates...)
|
||||
}
|
||||
|
||||
// Sort repackCandidates such that packs with highest ratio unused/used space are picked first.
|
||||
// This is equivalent to sorting by unused / total space.
|
||||
// Instead of unused[i] / used[i] > unused[j] / used[j] we use
|
||||
// unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64
|
||||
// Moreover packs containing trees and too small packs are sorted to the beginning
|
||||
sort.Slice(repackCandidates, func(i, j int) bool {
|
||||
pi := repackCandidates[i].packInfo
|
||||
pj := repackCandidates[j].packInfo
|
||||
switch {
|
||||
case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob:
|
||||
return true
|
||||
case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob:
|
||||
return false
|
||||
case pi.unusedSize+pi.usedSize < uint64(targetPackSize) && pj.unusedSize+pj.usedSize >= uint64(targetPackSize):
|
||||
return true
|
||||
case pj.unusedSize+pj.usedSize < uint64(targetPackSize) && pi.unusedSize+pi.usedSize >= uint64(targetPackSize):
|
||||
return false
|
||||
}
|
||||
return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize
|
||||
})
|
||||
|
||||
repack := func(id restic.ID, p packInfo) {
|
||||
repackPacks.Insert(id)
|
||||
stats.blobs.repack += p.unusedBlobs + p.usedBlobs
|
||||
stats.size.repack += p.unusedSize + p.usedSize
|
||||
stats.blobs.repackrm += p.unusedBlobs
|
||||
stats.size.repackrm += p.unusedSize
|
||||
if p.uncompressed {
|
||||
stats.size.uncompressed -= p.unusedSize + p.usedSize
|
||||
}
|
||||
}
|
||||
|
||||
// calculate limit for number of unused bytes in the repo after repacking
|
||||
maxUnusedSizeAfter := opts.maxUnusedBytes(stats.size.used)
|
||||
|
||||
for _, p := range repackCandidates {
|
||||
reachedUnusedSizeAfter := (stats.size.unused-stats.size.remove-stats.size.repackrm < maxUnusedSizeAfter)
|
||||
reachedRepackSize := stats.size.repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes
|
||||
packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize)
|
||||
|
||||
switch {
|
||||
case reachedRepackSize:
|
||||
stats.packs.keep++
|
||||
|
||||
case p.tpe != restic.DataBlob, p.mustCompress:
|
||||
// repacking non-data packs / uncompressed-trees is only limited by repackSize
|
||||
repack(p.ID, p.packInfo)
|
||||
|
||||
case reachedUnusedSizeAfter && packIsLargeEnough:
|
||||
// for all other packs stop repacking if tolerated unused size is reached.
|
||||
stats.packs.keep++
|
||||
|
||||
default:
|
||||
repack(p.ID, p.packInfo)
|
||||
}
|
||||
}
|
||||
|
||||
stats.packs.unref = uint(len(removePacksFirst))
|
||||
stats.packs.repack = uint(len(repackPacks))
|
||||
stats.packs.remove = uint(len(removePacks))
|
||||
|
||||
if repo.Config().Version < 2 {
|
||||
// compression not supported for repository format version 1
|
||||
stats.size.uncompressed = 0
|
||||
}
|
||||
|
||||
return prunePlan{removePacksFirst: removePacksFirst,
|
||||
removePacks: removePacks,
|
||||
repackPacks: repackPacks,
|
||||
ignorePacks: ignorePacks,
|
||||
}, nil
|
||||
return plan.Execute(ctx, printer)
|
||||
}
|
||||
|
||||
// printPruneStats prints out the statistics
|
||||
func printPruneStats(stats pruneStats) error {
|
||||
Verboseff("\nused: %10d blobs / %s\n", stats.blobs.used, ui.FormatBytes(stats.size.used))
|
||||
if stats.blobs.duplicate > 0 {
|
||||
Verboseff("duplicates: %10d blobs / %s\n", stats.blobs.duplicate, ui.FormatBytes(stats.size.duplicate))
|
||||
func printPruneStats(printer progress.Printer, stats repository.PruneStats) error {
|
||||
printer.V("\nused: %10d blobs / %s\n", stats.Blobs.Used, ui.FormatBytes(stats.Size.Used))
|
||||
if stats.Blobs.Duplicate > 0 {
|
||||
printer.V("duplicates: %10d blobs / %s\n", stats.Blobs.Duplicate, ui.FormatBytes(stats.Size.Duplicate))
|
||||
}
|
||||
Verboseff("unused: %10d blobs / %s\n", stats.blobs.unused, ui.FormatBytes(stats.size.unused))
|
||||
if stats.size.unref > 0 {
|
||||
Verboseff("unreferenced: %s\n", ui.FormatBytes(stats.size.unref))
|
||||
printer.V("unused: %10d blobs / %s\n", stats.Blobs.Unused, ui.FormatBytes(stats.Size.Unused))
|
||||
if stats.Size.Unref > 0 {
|
||||
printer.V("unreferenced: %s\n", ui.FormatBytes(stats.Size.Unref))
|
||||
}
|
||||
totalBlobs := stats.blobs.used + stats.blobs.unused + stats.blobs.duplicate
|
||||
totalSize := stats.size.used + stats.size.duplicate + stats.size.unused + stats.size.unref
|
||||
unusedSize := stats.size.duplicate + stats.size.unused
|
||||
Verboseff("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize))
|
||||
Verboseff("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize))
|
||||
totalBlobs := stats.Blobs.Used + stats.Blobs.Unused + stats.Blobs.Duplicate
|
||||
totalSize := stats.Size.Used + stats.Size.Duplicate + stats.Size.Unused + stats.Size.Unref
|
||||
unusedSize := stats.Size.Duplicate + stats.Size.Unused
|
||||
printer.V("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize))
|
||||
printer.V("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize))
|
||||
|
||||
Verbosef("\nto repack: %10d blobs / %s\n", stats.blobs.repack, ui.FormatBytes(stats.size.repack))
|
||||
Verbosef("this removes: %10d blobs / %s\n", stats.blobs.repackrm, ui.FormatBytes(stats.size.repackrm))
|
||||
Verbosef("to delete: %10d blobs / %s\n", stats.blobs.remove, ui.FormatBytes(stats.size.remove+stats.size.unref))
|
||||
totalPruneSize := stats.size.remove + stats.size.repackrm + stats.size.unref
|
||||
Verbosef("total prune: %10d blobs / %s\n", stats.blobs.remove+stats.blobs.repackrm, ui.FormatBytes(totalPruneSize))
|
||||
if stats.size.uncompressed > 0 {
|
||||
Verbosef("not yet compressed: %s\n", ui.FormatBytes(stats.size.uncompressed))
|
||||
printer.P("\nto repack: %10d blobs / %s\n", stats.Blobs.Repack, ui.FormatBytes(stats.Size.Repack))
|
||||
printer.P("this removes: %10d blobs / %s\n", stats.Blobs.Repackrm, ui.FormatBytes(stats.Size.Repackrm))
|
||||
printer.P("to delete: %10d blobs / %s\n", stats.Blobs.Remove, ui.FormatBytes(stats.Size.Remove+stats.Size.Unref))
|
||||
totalPruneSize := stats.Size.Remove + stats.Size.Repackrm + stats.Size.Unref
|
||||
printer.P("total prune: %10d blobs / %s\n", stats.Blobs.Remove+stats.Blobs.Repackrm, ui.FormatBytes(totalPruneSize))
|
||||
if stats.Size.Uncompressed > 0 {
|
||||
printer.P("not yet compressed: %s\n", ui.FormatBytes(stats.Size.Uncompressed))
|
||||
}
|
||||
Verbosef("remaining: %10d blobs / %s\n", totalBlobs-(stats.blobs.remove+stats.blobs.repackrm), ui.FormatBytes(totalSize-totalPruneSize))
|
||||
unusedAfter := unusedSize - stats.size.remove - stats.size.repackrm
|
||||
Verbosef("unused size after prune: %s (%s of remaining size)\n",
|
||||
printer.P("remaining: %10d blobs / %s\n", totalBlobs-(stats.Blobs.Remove+stats.Blobs.Repackrm), ui.FormatBytes(totalSize-totalPruneSize))
|
||||
unusedAfter := unusedSize - stats.Size.Remove - stats.Size.Repackrm
|
||||
printer.P("unused size after prune: %s (%s of remaining size)\n",
|
||||
ui.FormatBytes(unusedAfter), ui.FormatPercent(unusedAfter, totalSize-totalPruneSize))
|
||||
Verbosef("\n")
|
||||
Verboseff("totally used packs: %10d\n", stats.packs.used)
|
||||
Verboseff("partly used packs: %10d\n", stats.packs.partlyUsed)
|
||||
Verboseff("unused packs: %10d\n\n", stats.packs.unused)
|
||||
printer.P("\n")
|
||||
printer.V("totally used packs: %10d\n", stats.Packs.Used)
|
||||
printer.V("partly used packs: %10d\n", stats.Packs.PartlyUsed)
|
||||
printer.V("unused packs: %10d\n\n", stats.Packs.Unused)
|
||||
|
||||
Verboseff("to keep: %10d packs\n", stats.packs.keep)
|
||||
Verboseff("to repack: %10d packs\n", stats.packs.repack)
|
||||
Verboseff("to delete: %10d packs\n", stats.packs.remove)
|
||||
if stats.packs.unref > 0 {
|
||||
Verboseff("to delete: %10d unreferenced packs\n\n", stats.packs.unref)
|
||||
printer.V("to keep: %10d packs\n", stats.Packs.Keep)
|
||||
printer.V("to repack: %10d packs\n", stats.Packs.Repack)
|
||||
printer.V("to delete: %10d packs\n", stats.Packs.Remove)
|
||||
if stats.Packs.Unref > 0 {
|
||||
printer.V("to delete: %10d unreferenced packs\n\n", stats.Packs.Unref)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// doPrune does the actual pruning:
|
||||
// - remove unreferenced packs first
|
||||
// - repack given pack files while keeping the given blobs
|
||||
// - rebuild the index while ignoring all files that will be deleted
|
||||
// - delete the files
|
||||
// plan.removePacks and plan.ignorePacks are modified in this function.
|
||||
func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo restic.Repository, plan prunePlan) (err error) {
|
||||
if opts.DryRun {
|
||||
if !gopts.JSON && gopts.verbosity >= 2 {
|
||||
Printf("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n")
|
||||
if len(plan.removePacksFirst) > 0 {
|
||||
Printf("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst)
|
||||
}
|
||||
Printf("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks)
|
||||
Printf("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks)
|
||||
}
|
||||
// Always quit here if DryRun was set!
|
||||
return nil
|
||||
}
|
||||
|
||||
// unreferenced packs can be safely deleted first
|
||||
if len(plan.removePacksFirst) != 0 {
|
||||
Verbosef("deleting unreferenced packs\n")
|
||||
DeleteFiles(ctx, gopts, repo, plan.removePacksFirst, restic.PackFile)
|
||||
}
|
||||
|
||||
if len(plan.repackPacks) != 0 {
|
||||
Verbosef("repacking packs\n")
|
||||
bar := newProgressMax(!gopts.Quiet, uint64(len(plan.repackPacks)), "packs repacked")
|
||||
_, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar)
|
||||
bar.Done()
|
||||
if err != nil {
|
||||
return errors.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// Also remove repacked packs
|
||||
plan.removePacks.Merge(plan.repackPacks)
|
||||
|
||||
if len(plan.keepBlobs) != 0 {
|
||||
Warnf("%v was not repacked\n\n"+
|
||||
"Integrity check failed.\n"+
|
||||
"Please report this error (along with the output of the 'prune' run) at\n"+
|
||||
"https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs)
|
||||
return errors.Fatal("internal error: blobs were not repacked")
|
||||
}
|
||||
|
||||
// allow GC of the blob set
|
||||
plan.keepBlobs = nil
|
||||
}
|
||||
|
||||
if len(plan.ignorePacks) == 0 {
|
||||
plan.ignorePacks = plan.removePacks
|
||||
} else {
|
||||
plan.ignorePacks.Merge(plan.removePacks)
|
||||
}
|
||||
|
||||
if opts.unsafeRecovery {
|
||||
Verbosef("deleting index files\n")
|
||||
indexFiles := repo.Index().(*index.MasterIndex).IDs()
|
||||
err = DeleteFilesChecked(ctx, gopts, repo, indexFiles, restic.IndexFile)
|
||||
if err != nil {
|
||||
return errors.Fatalf("%s", err)
|
||||
}
|
||||
} else if len(plan.ignorePacks) != 0 {
|
||||
err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, false)
|
||||
if err != nil {
|
||||
return errors.Fatalf("%s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(plan.removePacks) != 0 {
|
||||
Verbosef("removing %d old packs\n", len(plan.removePacks))
|
||||
DeleteFiles(ctx, gopts, repo, plan.removePacks, restic.PackFile)
|
||||
}
|
||||
|
||||
if opts.unsafeRecovery {
|
||||
err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, true)
|
||||
if err != nil {
|
||||
return errors.Fatalf("%s", err)
|
||||
}
|
||||
}
|
||||
|
||||
Verbosef("done\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
func rebuildIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool) error {
|
||||
Verbosef("rebuilding index\n")
|
||||
|
||||
bar := newProgressMax(!gopts.Quiet, 0, "packs processed")
|
||||
return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{
|
||||
SaveProgress: bar,
|
||||
DeleteProgress: func() *progress.Counter {
|
||||
return newProgressMax(!gopts.Quiet, 0, "old indexes deleted")
|
||||
},
|
||||
DeleteReport: func(id restic.ID, _ error) {
|
||||
if gopts.verbosity > 2 {
|
||||
Verbosef("removed index %v\n", id.String())
|
||||
}
|
||||
},
|
||||
SkipDeletion: skipDeletion,
|
||||
})
|
||||
}
|
||||
|
||||
func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (usedBlobs restic.CountedBlobSet, err error) {
|
||||
func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (usedBlobs restic.CountedBlobSet, err error) {
|
||||
var snapshotTrees restic.IDs
|
||||
Verbosef("loading all snapshots...\n")
|
||||
printer.P("loading all snapshots...\n")
|
||||
err = restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots,
|
||||
func(id restic.ID, sn *restic.Snapshot, err error) error {
|
||||
if err != nil {
|
||||
|
@ -824,11 +275,12 @@ func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots r
|
|||
return nil, errors.Fatalf("failed loading snapshot: %v", err)
|
||||
}
|
||||
|
||||
Verbosef("finding data that is still in use for %d snapshots\n", len(snapshotTrees))
|
||||
printer.P("finding data that is still in use for %d snapshots\n", len(snapshotTrees))
|
||||
|
||||
usedBlobs = restic.NewCountedBlobSet()
|
||||
|
||||
bar := newProgressMax(!quiet, uint64(len(snapshotTrees)), "snapshots")
|
||||
bar := printer.NewCounter("snapshots")
|
||||
bar.SetMax(uint64(len(snapshotTrees)))
|
||||
defer bar.Done()
|
||||
|
||||
err = restic.FindUsedBlobs(ctx, repo, snapshotTrees, usedBlobs, bar)
|
||||
|
|
|
@ -7,7 +7,9 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
)
|
||||
|
||||
func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
|
||||
|
@ -16,7 +18,9 @@ func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) {
|
|||
defer func() {
|
||||
gopts.backendTestHook = oldHook
|
||||
}()
|
||||
rtest.OK(t, runPrune(context.TODO(), opts, gopts))
|
||||
rtest.OK(t, withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||
return runPrune(context.TODO(), opts, gopts, term)
|
||||
}))
|
||||
}
|
||||
|
||||
func TestPrune(t *testing.T) {
|
||||
|
@ -31,7 +35,7 @@ func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) {
|
|||
}
|
||||
t.Run("0"+suffix, func(t *testing.T) {
|
||||
opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery}
|
||||
checkOpts := CheckOptions{ReadData: true, CheckUnused: true}
|
||||
checkOpts := CheckOptions{ReadData: true, CheckUnused: !unsafeNoSpaceRecovery}
|
||||
testPrune(t, opts, checkOpts)
|
||||
})
|
||||
|
||||
|
@ -84,7 +88,9 @@ func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
|
|||
pruneOpts := PruneOptions{
|
||||
MaxUnused: "5%",
|
||||
}
|
||||
return runForget(context.TODO(), opts, pruneOpts, gopts, args)
|
||||
return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||
return runForget(context.TODO(), opts, pruneOpts, gopts, term, args)
|
||||
})
|
||||
})
|
||||
rtest.OK(t, err)
|
||||
|
||||
|
@ -138,7 +144,9 @@ func TestPruneWithDamagedRepository(t *testing.T) {
|
|||
env.gopts.backendTestHook = oldHook
|
||||
}()
|
||||
// prune should fail
|
||||
rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing,
|
||||
rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||
return runPrune(context.TODO(), pruneDefaultOptions, env.gopts, term)
|
||||
}) == repository.ErrPacksMissing,
|
||||
"prune should have reported index not complete error")
|
||||
}
|
||||
|
||||
|
@ -218,7 +226,9 @@ func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, o
|
|||
testRunPrune(t, env.gopts, optionsPrune)
|
||||
testRunCheck(t, env.gopts)
|
||||
} else {
|
||||
rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil,
|
||||
rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||
return runPrune(context.TODO(), optionsPrune, env.gopts, term)
|
||||
}) != nil,
|
||||
"prune should have reported an error")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,16 +40,11 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
|
|||
return err
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
|
||||
if err != nil {
|
||||
|
@ -66,16 +61,22 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
|
|||
// tree. If it is not referenced, we have a root tree.
|
||||
trees := make(map[restic.ID]bool)
|
||||
|
||||
repo.Index().Each(ctx, func(blob restic.PackedBlob) {
|
||||
err = repo.Index().Each(ctx, func(blob restic.PackedBlob) {
|
||||
if blob.Type == restic.TreeBlob {
|
||||
trees[blob.Blob.ID] = false
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Verbosef("load %d trees\n", len(trees))
|
||||
bar = newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded")
|
||||
for id := range trees {
|
||||
tree, err := restic.LoadTree(ctx, repo, id)
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if err != nil {
|
||||
Warnf("unable to load tree %v: %v\n", id.Str(), err)
|
||||
continue
|
||||
|
|
|
@ -3,10 +3,8 @@ package main
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/restic/restic/internal/index"
|
||||
"github.com/restic/restic/internal/pack"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
@ -25,7 +23,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
|
|||
`,
|
||||
DisableAutoGenTag: true,
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions)
|
||||
term, cancel := setupTermstatus()
|
||||
defer cancel()
|
||||
return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions, term)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -55,110 +55,22 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error {
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, term *termstatus.Terminal) error {
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
printer := newTerminalProgressPrinter(gopts.verbosity, term)
|
||||
|
||||
err = repository.RepairIndex(ctx, repo, repository.RepairIndexOptions{
|
||||
ReadAllPacks: opts.ReadAllPacks,
|
||||
}, printer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return rebuildIndex(ctx, opts, gopts, repo)
|
||||
}
|
||||
|
||||
func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, repo *repository.Repository) error {
|
||||
var obsoleteIndexes restic.IDs
|
||||
packSizeFromList := make(map[restic.ID]int64)
|
||||
packSizeFromIndex := make(map[restic.ID]int64)
|
||||
removePacks := restic.NewIDSet()
|
||||
|
||||
if opts.ReadAllPacks {
|
||||
// get list of old index files but start with empty index
|
||||
err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error {
|
||||
obsoleteIndexes = append(obsoleteIndexes, id)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
Verbosef("loading indexes...\n")
|
||||
mi := index.NewMasterIndex()
|
||||
err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error {
|
||||
if err != nil {
|
||||
Warnf("removing invalid index %v: %v\n", id, err)
|
||||
obsoleteIndexes = append(obsoleteIndexes, id)
|
||||
return nil
|
||||
}
|
||||
|
||||
mi.Insert(idx)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = mi.MergeFinalIndexes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = repo.SetIndex(mi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
packSizeFromIndex = pack.Size(ctx, repo.Index(), false)
|
||||
}
|
||||
|
||||
Verbosef("getting pack files to read...\n")
|
||||
err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error {
|
||||
size, ok := packSizeFromIndex[id]
|
||||
if !ok || size != packSize {
|
||||
// Pack was not referenced in index or size does not match
|
||||
packSizeFromList[id] = packSize
|
||||
removePacks.Insert(id)
|
||||
}
|
||||
if !ok {
|
||||
Warnf("adding pack file to index %v\n", id)
|
||||
} else if size != packSize {
|
||||
Warnf("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size)
|
||||
}
|
||||
delete(packSizeFromIndex, id)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for id := range packSizeFromIndex {
|
||||
// forget pack files that are referenced in the index but do not exist
|
||||
// when rebuilding the index
|
||||
removePacks.Insert(id)
|
||||
Warnf("removing not found pack file %v\n", id)
|
||||
}
|
||||
|
||||
if len(packSizeFromList) > 0 {
|
||||
Verbosef("reading pack files\n")
|
||||
bar := newProgressMax(!gopts.Quiet, uint64(len(packSizeFromList)), "packs")
|
||||
invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar)
|
||||
bar.Done()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, id := range invalidFiles {
|
||||
Verboseff("skipped incomplete pack file: %v\n", id)
|
||||
}
|
||||
}
|
||||
|
||||
err = rebuildIndexFiles(ctx, gopts, repo, removePacks, obsoleteIndexes, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
Verbosef("done\n")
|
||||
|
||||
printer.P("done\n")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -13,12 +13,15 @@ import (
|
|||
"github.com/restic/restic/internal/index"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
)
|
||||
|
||||
func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
|
||||
rtest.OK(t, withRestoreGlobalOptions(func() error {
|
||||
globalOptions.stdout = io.Discard
|
||||
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts)
|
||||
return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||
globalOptions.stdout = io.Discard
|
||||
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts, term)
|
||||
})
|
||||
}))
|
||||
}
|
||||
|
||||
|
@ -126,12 +129,13 @@ func TestRebuildIndexFailsOnAppendOnly(t *testing.T) {
|
|||
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||
|
||||
err := withRestoreGlobalOptions(func() error {
|
||||
globalOptions.stdout = io.Discard
|
||||
|
||||
env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) {
|
||||
return &appendOnlyBackend{r}, nil
|
||||
}
|
||||
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts)
|
||||
return withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||
globalOptions.stdout = io.Discard
|
||||
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term)
|
||||
})
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
|
|
|
@ -52,25 +52,20 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T
|
|||
return errors.Fatal("no ids specified")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printer := newTerminalProgressPrinter(gopts.verbosity, term)
|
||||
|
||||
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
|
||||
bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term)
|
||||
err = repo.LoadIndex(ctx, bar)
|
||||
if err != nil {
|
||||
return errors.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
printer := newTerminalProgressPrinter(gopts.verbosity, term)
|
||||
|
||||
printer.P("saving backup copies of pack files to current folder")
|
||||
for id := range ids {
|
||||
f, err := os.OpenFile("pack-"+id.String(), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o666)
|
||||
|
@ -87,6 +82,10 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T
|
|||
return err
|
||||
})
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,22 +66,11 @@ func init() {
|
|||
}
|
||||
|
||||
func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOptions, args []string) error {
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !opts.DryRun {
|
||||
var lock *restic.Lock
|
||||
var err error
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
repo.SetDryRun()
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
|
||||
if err != nil {
|
||||
|
@ -156,6 +145,9 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt
|
|||
changedCount++
|
||||
}
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
Verbosef("\n")
|
||||
if changedCount == 0 {
|
||||
|
|
|
@ -127,19 +127,11 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
|
|||
|
||||
debug.Log("restore %v to %v", snapshotIDString, opts.Target)
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
sn, subfolder, err := (&restic.SnapshotFilter{
|
||||
Hosts: opts.Hosts,
|
||||
|
|
|
@ -4,13 +4,14 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
mrand "math/rand"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/feature"
|
||||
"github.com/restic/restic/internal/filter"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
|
@ -116,7 +117,7 @@ func TestRestore(t *testing.T) {
|
|||
for i := 0; i < 10; i++ {
|
||||
p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i))
|
||||
rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755))
|
||||
rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21))))
|
||||
rtest.OK(t, appendRandomData(p, uint(rand.Intn(2<<21))))
|
||||
}
|
||||
|
||||
opts := BackupOptions{}
|
||||
|
@ -274,6 +275,7 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRestoreLocalLayout(t *testing.T) {
|
||||
defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)()
|
||||
env, cleanup := withTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
|
|
|
@ -256,27 +256,22 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
|
|||
return errors.Fatal("Nothing to do: no excludes provided and no new metadata provided")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
var (
|
||||
repo *repository.Repository
|
||||
unlock func()
|
||||
err error
|
||||
)
|
||||
|
||||
if opts.Forget {
|
||||
Verbosef("create exclusive lock for repository\n")
|
||||
ctx, repo, unlock, err = openWithExclusiveLock(ctx, gopts, opts.DryRun)
|
||||
} else {
|
||||
ctx, repo, unlock, err = openWithAppendLock(ctx, gopts, opts.DryRun)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !opts.DryRun {
|
||||
var lock *restic.Lock
|
||||
var err error
|
||||
if opts.Forget {
|
||||
Verbosef("create exclusive lock for repository\n")
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
} else {
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
}
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
repo.SetDryRun()
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
|
||||
if err != nil {
|
||||
|
@ -299,6 +294,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
|
|||
changedCount++
|
||||
}
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
Verbosef("\n")
|
||||
if changedCount == 0 {
|
||||
|
|
|
@ -78,8 +78,11 @@ func testRewriteMetadata(t *testing.T, metadata snapshotMetadataArgs) {
|
|||
createBasicRewriteRepo(t, env)
|
||||
testRunRewriteExclude(t, env.gopts, []string{}, true, metadata)
|
||||
|
||||
repo, _ := OpenRepository(context.TODO(), env.gopts)
|
||||
snapshots, err := restic.TestLoadAllSnapshots(context.TODO(), repo, nil)
|
||||
ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false)
|
||||
rtest.OK(t, err)
|
||||
defer unlock()
|
||||
|
||||
snapshots, err := restic.TestLoadAllSnapshots(ctx, repo, nil)
|
||||
rtest.OK(t, err)
|
||||
rtest.Assert(t, len(snapshots) == 1, "expected one snapshot, got %v", len(snapshots))
|
||||
newSnapshot := snapshots[0]
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/ui"
|
||||
"github.com/restic/restic/internal/ui/table"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -58,24 +59,19 @@ func init() {
|
|||
}
|
||||
|
||||
func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions, args []string) error {
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
var snapshots restic.Snapshots
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
|
||||
snapshots = append(snapshots, sn)
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -163,6 +159,11 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke
|
|||
keepReasons[*id] = reasons[i]
|
||||
}
|
||||
}
|
||||
// check if any snapshot contains a summary
|
||||
hasSize := false
|
||||
for _, sn := range list {
|
||||
hasSize = hasSize || (sn.Summary != nil)
|
||||
}
|
||||
|
||||
// always sort the snapshots so that the newer ones are listed last
|
||||
sort.SliceStable(list, func(i, j int) bool {
|
||||
|
@ -198,6 +199,9 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke
|
|||
tab.AddColumn("Reasons", `{{ join .Reasons "\n" }}`)
|
||||
}
|
||||
tab.AddColumn("Paths", `{{ join .Paths "\n" }}`)
|
||||
if hasSize {
|
||||
tab.AddColumn("Size", `{{ .Size }}`)
|
||||
}
|
||||
}
|
||||
|
||||
type snapshot struct {
|
||||
|
@ -207,6 +211,7 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke
|
|||
Tags []string
|
||||
Reasons []string
|
||||
Paths []string
|
||||
Size string
|
||||
}
|
||||
|
||||
var multiline bool
|
||||
|
@ -228,6 +233,10 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke
|
|||
multiline = true
|
||||
}
|
||||
|
||||
if sn.Summary != nil {
|
||||
data.Size = ui.FormatBytes(sn.Summary.TotalBytesProcessed)
|
||||
}
|
||||
|
||||
tab.AddRow(data)
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ depending on what you are trying to calculate.
|
|||
The modes are:
|
||||
|
||||
* restore-size: (default) Counts the size of the restored files.
|
||||
* files-by-contents: Counts total size of files, where a file is
|
||||
* files-by-contents: Counts total size of unique files, where a file is
|
||||
considered unique if it has unique contents.
|
||||
* raw-data: Counts the size of blobs in the repository, regardless of
|
||||
how many files reference them.
|
||||
|
@ -80,19 +80,11 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args
|
|||
return err
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile)
|
||||
if err != nil {
|
||||
|
@ -125,9 +117,8 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args
|
|||
return fmt.Errorf("error walking snapshot: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if opts.countMode == countModeRawData {
|
||||
|
@ -270,11 +261,14 @@ func statsWalkTree(repo restic.Loader, opts StatsOptions, stats *statsContainer,
|
|||
// will still be restored
|
||||
stats.TotalFileCount++
|
||||
|
||||
// if inodes are present, only count each inode once
|
||||
// (hard links do not increase restore size)
|
||||
if !hardLinkIndex.Has(node.Inode, node.DeviceID) || node.Inode == 0 {
|
||||
hardLinkIndex.Add(node.Inode, node.DeviceID, struct{}{})
|
||||
if node.Links == 1 || node.Type == "dir" {
|
||||
stats.TotalSize += node.Size
|
||||
} else {
|
||||
// if hardlinks are present only count each deviceID+inode once
|
||||
if !hardLinkIndex.Has(node.Inode, node.DeviceID) || node.Inode == 0 {
|
||||
hardLinkIndex.Add(node.Inode, node.DeviceID, struct{}{})
|
||||
stats.TotalSize += node.Size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -357,7 +351,10 @@ func statsDebug(ctx context.Context, repo restic.Repository) error {
|
|||
Warnf("File Type: %v\n%v\n", t, hist)
|
||||
}
|
||||
|
||||
hist := statsDebugBlobs(ctx, repo)
|
||||
hist, err := statsDebugBlobs(ctx, repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
|
||||
Warnf("Blob Type: %v\n%v\n\n", t, hist[t])
|
||||
}
|
||||
|
@ -375,17 +372,17 @@ func statsDebugFileType(ctx context.Context, repo restic.Lister, tpe restic.File
|
|||
return hist, err
|
||||
}
|
||||
|
||||
func statsDebugBlobs(ctx context.Context, repo restic.Repository) [restic.NumBlobTypes]*sizeHistogram {
|
||||
func statsDebugBlobs(ctx context.Context, repo restic.Repository) ([restic.NumBlobTypes]*sizeHistogram, error) {
|
||||
var hist [restic.NumBlobTypes]*sizeHistogram
|
||||
for i := 0; i < len(hist); i++ {
|
||||
hist[i] = newSizeHistogram(2 * chunker.MaxSize)
|
||||
}
|
||||
|
||||
repo.Index().Each(ctx, func(pb restic.PackedBlob) {
|
||||
err := repo.Index().Each(ctx, func(pb restic.PackedBlob) {
|
||||
hist[pb.Type].Add(uint64(pb.Length))
|
||||
})
|
||||
|
||||
return hist
|
||||
return hist, err
|
||||
}
|
||||
|
||||
type sizeClass struct {
|
||||
|
|
|
@ -104,20 +104,12 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
|
|||
return errors.Fatal("--set and --add/--remove cannot be given at the same time")
|
||||
}
|
||||
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
Verbosef("create exclusive lock for repository\n")
|
||||
ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gopts.NoLock {
|
||||
Verbosef("create exclusive lock for repository\n")
|
||||
var lock *restic.Lock
|
||||
lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON)
|
||||
defer unlockRepo(lock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
changeCnt := 0
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
|
||||
|
@ -130,6 +122,9 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
|
|||
changeCnt++
|
||||
}
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if changeCnt == 0 {
|
||||
Verbosef("no snapshots were modified\n")
|
||||
} else {
|
||||
|
|
|
@ -12,6 +12,7 @@ func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) {
|
|||
rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{}))
|
||||
}
|
||||
|
||||
// nolint: staticcheck // false positive nil pointer dereference check
|
||||
func TestTag(t *testing.T) {
|
||||
env, cleanup := withTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// DeleteFiles deletes the given fileList of fileType in parallel
|
||||
// it will print a warning if there is an error, but continue deleting the remaining files
|
||||
func DeleteFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) {
|
||||
_ = deleteFiles(ctx, gopts, true, repo, fileList, fileType)
|
||||
}
|
||||
|
||||
// DeleteFilesChecked deletes the given fileList of fileType in parallel
|
||||
// if an error occurs, it will cancel and return this error
|
||||
func DeleteFilesChecked(ctx context.Context, gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error {
|
||||
return deleteFiles(ctx, gopts, false, repo, fileList, fileType)
|
||||
}
|
||||
|
||||
// deleteFiles deletes the given fileList of fileType in parallel
|
||||
// if ignoreError=true, it will print a warning if there was an error, else it will abort.
|
||||
func deleteFiles(ctx context.Context, gopts GlobalOptions, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error {
|
||||
bar := newProgressMax(!gopts.JSON && !gopts.Quiet, 0, "files deleted")
|
||||
defer bar.Done()
|
||||
|
||||
return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error {
|
||||
if err != nil {
|
||||
if !gopts.JSON {
|
||||
Warnf("unable to remove %v/%v from the repository\n", fileType, id)
|
||||
}
|
||||
if !ignoreError {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !gopts.JSON && gopts.verbosity > 2 {
|
||||
Verbosef("removed %v/%v\n", fileType, id)
|
||||
}
|
||||
return nil
|
||||
}, bar)
|
||||
}
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/spf13/pflag"
|
||||
|
@ -14,17 +15,27 @@ func initMultiSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter,
|
|||
if !addHostShorthand {
|
||||
hostShorthand = ""
|
||||
}
|
||||
flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times)")
|
||||
flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times) (default: $RESTIC_HOST)")
|
||||
flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)")
|
||||
flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)")
|
||||
|
||||
// set default based on env if set
|
||||
if host := os.Getenv("RESTIC_HOST"); host != "" {
|
||||
filt.Hosts = []string{host}
|
||||
}
|
||||
}
|
||||
|
||||
// initSingleSnapshotFilter is used for commands that work on a single snapshot
|
||||
// MUST be combined with restic.FindFilteredSnapshot
|
||||
func initSingleSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter) {
|
||||
flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
||||
flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times) (default: $RESTIC_HOST)")
|
||||
flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
||||
flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)")
|
||||
|
||||
// set default based on env if set
|
||||
if host := os.Getenv("RESTIC_HOST"); host != "" {
|
||||
filt.Hosts = []string{host}
|
||||
}
|
||||
}
|
||||
|
||||
// FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots.
|
||||
|
|
61
cmd/restic/find_test.go
Normal file
61
cmd/restic/find_test.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func TestSnapshotFilter(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
args []string
|
||||
expected []string
|
||||
env string
|
||||
}{
|
||||
{
|
||||
"no value",
|
||||
[]string{},
|
||||
nil,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"args only",
|
||||
[]string{"--host", "abc"},
|
||||
[]string{"abc"},
|
||||
"",
|
||||
},
|
||||
{
|
||||
"env default",
|
||||
[]string{},
|
||||
[]string{"def"},
|
||||
"def",
|
||||
},
|
||||
{
|
||||
"both",
|
||||
[]string{"--host", "abc"},
|
||||
[]string{"abc"},
|
||||
"def",
|
||||
},
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Setenv("RESTIC_HOST", test.env)
|
||||
|
||||
for _, mode := range []bool{false, true} {
|
||||
set := pflag.NewFlagSet("test", pflag.PanicOnError)
|
||||
flt := &restic.SnapshotFilter{}
|
||||
if mode {
|
||||
initMultiSnapshotFilter(set, flt, false)
|
||||
} else {
|
||||
initSingleSnapshotFilter(set, flt)
|
||||
}
|
||||
err := set.Parse(test.args)
|
||||
rtest.OK(t, err)
|
||||
|
||||
rtest.Equals(t, test.expected, flt.Hosts, "unexpected hosts")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -43,7 +43,7 @@ import (
|
|||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
var version = "0.16.4-dev (compiled manually)"
|
||||
const version = "0.16.4-dev (compiled manually)"
|
||||
|
||||
// TimeFormat is the format used for all timestamps printed by restic.
|
||||
const TimeFormat = "2006-01-02 15:04:05"
|
||||
|
@ -96,9 +96,6 @@ var globalOptions = GlobalOptions{
|
|||
stderr: os.Stderr,
|
||||
}
|
||||
|
||||
var isReadingPassword bool
|
||||
var internalGlobalCtx context.Context
|
||||
|
||||
func init() {
|
||||
backends := location.NewRegistry()
|
||||
backends.Register(azure.NewFactory())
|
||||
|
@ -112,15 +109,6 @@ func init() {
|
|||
backends.Register(swift.NewFactory())
|
||||
globalOptions.backends = backends
|
||||
|
||||
var cancel context.CancelFunc
|
||||
internalGlobalCtx, cancel = context.WithCancel(context.Background())
|
||||
AddCleanupHandler(func(code int) (int, error) {
|
||||
// Must be called before the unlock cleanup handler to ensure that the latter is
|
||||
// not blocked due to limited number of backend connections, see #1434
|
||||
cancel()
|
||||
return code, nil
|
||||
})
|
||||
|
||||
f := cmdRoot.PersistentFlags()
|
||||
f.StringVarP(&globalOptions.Repo, "repo", "r", "", "`repository` to backup to or restore from (default: $RESTIC_REPOSITORY)")
|
||||
f.StringVarP(&globalOptions.RepositoryFile, "repository-file", "", "", "`file` to read the repository location from (default: $RESTIC_REPOSITORY_FILE)")
|
||||
|
@ -165,8 +153,6 @@ func init() {
|
|||
// parse target pack size from env, on error the default value will be used
|
||||
targetPackSize, _ := strconv.ParseUint(os.Getenv("RESTIC_PACK_SIZE"), 10, 32)
|
||||
globalOptions.PackSize = uint(targetPackSize)
|
||||
|
||||
restoreTerminal()
|
||||
}
|
||||
|
||||
func stdinIsTerminal() bool {
|
||||
|
@ -191,40 +177,6 @@ func stdoutTerminalWidth() int {
|
|||
return w
|
||||
}
|
||||
|
||||
// restoreTerminal installs a cleanup handler that restores the previous
|
||||
// terminal state on exit. This handler is only intended to restore the
|
||||
// terminal configuration if restic exits after receiving a signal. A regular
|
||||
// program execution must revert changes to the terminal configuration itself.
|
||||
// The terminal configuration is only restored while reading a password.
|
||||
func restoreTerminal() {
|
||||
if !term.IsTerminal(int(os.Stdout.Fd())) {
|
||||
return
|
||||
}
|
||||
|
||||
fd := int(os.Stdout.Fd())
|
||||
state, err := term.GetState(fd)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
AddCleanupHandler(func(code int) (int, error) {
|
||||
// Restoring the terminal configuration while restic runs in the
|
||||
// background, causes restic to get stopped on unix systems with
|
||||
// a SIGTTOU signal. Thus only restore the terminal settings if
|
||||
// they might have been modified, which is the case while reading
|
||||
// a password.
|
||||
if !isReadingPassword {
|
||||
return code, nil
|
||||
}
|
||||
err := term.Restore(fd, state)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err)
|
||||
}
|
||||
return code, err
|
||||
})
|
||||
}
|
||||
|
||||
// ClearLine creates a platform dependent string to clear the current
|
||||
// line, so it can be overwritten.
|
||||
//
|
||||
|
@ -333,24 +285,48 @@ func readPassword(in io.Reader) (password string, err error) {
|
|||
|
||||
// readPasswordTerminal reads the password from the given reader which must be a
|
||||
// tty. Prompt is printed on the writer out before attempting to read the
|
||||
// password.
|
||||
func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password string, err error) {
|
||||
fmt.Fprint(out, prompt)
|
||||
isReadingPassword = true
|
||||
buf, err := term.ReadPassword(int(in.Fd()))
|
||||
isReadingPassword = false
|
||||
fmt.Fprintln(out)
|
||||
// password. If the context is canceled, the function leaks the password reading
|
||||
// goroutine.
|
||||
func readPasswordTerminal(ctx context.Context, in *os.File, out *os.File, prompt string) (password string, err error) {
|
||||
fd := int(out.Fd())
|
||||
state, err := term.GetState(fd)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
var buf []byte
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
fmt.Fprint(out, prompt)
|
||||
buf, err = term.ReadPassword(int(in.Fd()))
|
||||
fmt.Fprintln(out)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err := term.Restore(fd, state)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err)
|
||||
}
|
||||
return "", ctx.Err()
|
||||
case <-done:
|
||||
// clean shutdown, nothing to do
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "ReadPassword")
|
||||
}
|
||||
|
||||
password = string(buf)
|
||||
return password, nil
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// ReadPassword reads the password from a password file, the environment
|
||||
// variable RESTIC_PASSWORD or prompts the user.
|
||||
func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
|
||||
// variable RESTIC_PASSWORD or prompts the user. If the context is canceled,
|
||||
// the function leaks the password reading goroutine.
|
||||
func ReadPassword(ctx context.Context, opts GlobalOptions, prompt string) (string, error) {
|
||||
if opts.password != "" {
|
||||
return opts.password, nil
|
||||
}
|
||||
|
@ -361,7 +337,7 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
|
|||
)
|
||||
|
||||
if stdinIsTerminal() {
|
||||
password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt)
|
||||
password, err = readPasswordTerminal(ctx, os.Stdin, os.Stderr, prompt)
|
||||
} else {
|
||||
password, err = readPassword(os.Stdin)
|
||||
Verbosef("reading repository password from stdin\n")
|
||||
|
@ -379,14 +355,15 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) {
|
|||
}
|
||||
|
||||
// ReadPasswordTwice calls ReadPassword two times and returns an error when the
|
||||
// passwords don't match.
|
||||
func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, error) {
|
||||
pw1, err := ReadPassword(gopts, prompt1)
|
||||
// passwords don't match. If the context is canceled, the function leaks the
|
||||
// password reading goroutine.
|
||||
func ReadPasswordTwice(ctx context.Context, gopts GlobalOptions, prompt1, prompt2 string) (string, error) {
|
||||
pw1, err := ReadPassword(ctx, gopts, prompt1)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if stdinIsTerminal() {
|
||||
pw2, err := ReadPassword(gopts, prompt2)
|
||||
pw2, err := ReadPassword(ctx, gopts, prompt2)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -469,7 +446,10 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi
|
|||
}
|
||||
|
||||
for ; passwordTriesLeft > 0; passwordTriesLeft-- {
|
||||
opts.password, err = ReadPassword(opts, "enter password for repository: ")
|
||||
opts.password, err = ReadPassword(ctx, opts, "enter password for repository: ")
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
if err != nil && passwordTriesLeft > 1 {
|
||||
opts.password = ""
|
||||
fmt.Printf("%s. Try again\n", err)
|
||||
|
@ -570,16 +550,13 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro
|
|||
return cfg, nil
|
||||
}
|
||||
|
||||
// Open the backend specified by a location config.
|
||||
func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) {
|
||||
func innerOpen(ctx context.Context, s string, gopts GlobalOptions, opts options.Options, create bool) (backend.Backend, error) {
|
||||
debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
|
||||
loc, err := location.Parse(gopts.backends, s)
|
||||
if err != nil {
|
||||
return nil, errors.Fatalf("parsing repository location failed: %v", err)
|
||||
}
|
||||
|
||||
var be backend.Backend
|
||||
|
||||
cfg, err := parseConfig(loc, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -599,7 +576,13 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
|
|||
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
|
||||
}
|
||||
|
||||
be, err = factory.Open(ctx, cfg, rt, lim)
|
||||
var be backend.Backend
|
||||
if create {
|
||||
be, err = factory.Create(ctx, cfg, rt, lim)
|
||||
} else {
|
||||
be, err = factory.Open(ctx, cfg, rt, lim)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.backends, s), err)
|
||||
}
|
||||
|
@ -615,6 +598,17 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
|
|||
}
|
||||
}
|
||||
|
||||
return be, nil
|
||||
}
|
||||
|
||||
// Open the backend specified by a location config.
|
||||
func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) {
|
||||
|
||||
be, err := innerOpen(ctx, s, gopts, opts, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check if config is there
|
||||
fi, err := be.Stat(ctx, backend.Handle{Type: restic.ConfigFile})
|
||||
if err != nil {
|
||||
|
@ -630,31 +624,5 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
|
|||
|
||||
// Create the backend specified by URI.
|
||||
func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) {
|
||||
debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
|
||||
loc, err := location.Parse(gopts.backends, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg, err := parseConfig(loc, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rt, err := backend.Transport(globalOptions.TransportOptions)
|
||||
if err != nil {
|
||||
return nil, errors.Fatal(err.Error())
|
||||
}
|
||||
|
||||
factory := gopts.backends.Lookup(loc.Scheme)
|
||||
if factory == nil {
|
||||
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
|
||||
}
|
||||
|
||||
be, err := factory.Create(ctx, cfg, rt, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return logger.New(sema.NewBackend(be)), nil
|
||||
return innerOpen(ctx, s, gopts, opts, true)
|
||||
}
|
||||
|
|
|
@ -15,23 +15,28 @@ import (
|
|||
"github.com/pkg/profile"
|
||||
)
|
||||
|
||||
var (
|
||||
listenProfile string
|
||||
memProfilePath string
|
||||
cpuProfilePath string
|
||||
traceProfilePath string
|
||||
blockProfilePath string
|
||||
insecure bool
|
||||
)
|
||||
type ProfileOptions struct {
|
||||
listen string
|
||||
memPath string
|
||||
cpuPath string
|
||||
tracePath string
|
||||
blockPath string
|
||||
insecure bool
|
||||
}
|
||||
|
||||
var profileOpts ProfileOptions
|
||||
var prof interface {
|
||||
Stop()
|
||||
}
|
||||
|
||||
func init() {
|
||||
f := cmdRoot.PersistentFlags()
|
||||
f.StringVar(&listenProfile, "listen-profile", "", "listen on this `address:port` for memory profiling")
|
||||
f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`")
|
||||
f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`")
|
||||
f.StringVar(&traceProfilePath, "trace-profile", "", "write trace to `dir`")
|
||||
f.StringVar(&blockProfilePath, "block-profile", "", "write block profile to `dir`")
|
||||
f.BoolVar(&insecure, "insecure-kdf", false, "use insecure KDF settings")
|
||||
f.StringVar(&profileOpts.listen, "listen-profile", "", "listen on this `address:port` for memory profiling")
|
||||
f.StringVar(&profileOpts.memPath, "mem-profile", "", "write memory profile to `dir`")
|
||||
f.StringVar(&profileOpts.cpuPath, "cpu-profile", "", "write cpu profile to `dir`")
|
||||
f.StringVar(&profileOpts.tracePath, "trace-profile", "", "write trace to `dir`")
|
||||
f.StringVar(&profileOpts.blockPath, "block-profile", "", "write block profile to `dir`")
|
||||
f.BoolVar(&profileOpts.insecure, "insecure-kdf", false, "use insecure KDF settings")
|
||||
}
|
||||
|
||||
type fakeTestingTB struct{}
|
||||
|
@ -41,10 +46,10 @@ func (fakeTestingTB) Logf(msg string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func runDebug() error {
|
||||
if listenProfile != "" {
|
||||
fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", listenProfile)
|
||||
if profileOpts.listen != "" {
|
||||
fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", profileOpts.listen)
|
||||
go func() {
|
||||
err := http.ListenAndServe(listenProfile, nil)
|
||||
err := http.ListenAndServe(profileOpts.listen, nil)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "profile HTTP server listen failed: %v\n", err)
|
||||
}
|
||||
|
@ -52,16 +57,16 @@ func runDebug() error {
|
|||
}
|
||||
|
||||
profilesEnabled := 0
|
||||
if memProfilePath != "" {
|
||||
if profileOpts.memPath != "" {
|
||||
profilesEnabled++
|
||||
}
|
||||
if cpuProfilePath != "" {
|
||||
if profileOpts.cpuPath != "" {
|
||||
profilesEnabled++
|
||||
}
|
||||
if traceProfilePath != "" {
|
||||
if profileOpts.tracePath != "" {
|
||||
profilesEnabled++
|
||||
}
|
||||
if blockProfilePath != "" {
|
||||
if profileOpts.blockPath != "" {
|
||||
profilesEnabled++
|
||||
}
|
||||
|
||||
|
@ -69,30 +74,25 @@ func runDebug() error {
|
|||
return errors.Fatal("only one profile (memory, CPU, trace, or block) may be activated at the same time")
|
||||
}
|
||||
|
||||
var prof interface {
|
||||
Stop()
|
||||
if profileOpts.memPath != "" {
|
||||
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(profileOpts.memPath))
|
||||
} else if profileOpts.cpuPath != "" {
|
||||
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(profileOpts.cpuPath))
|
||||
} else if profileOpts.tracePath != "" {
|
||||
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(profileOpts.tracePath))
|
||||
} else if profileOpts.blockPath != "" {
|
||||
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(profileOpts.blockPath))
|
||||
}
|
||||
|
||||
if memProfilePath != "" {
|
||||
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(memProfilePath))
|
||||
} else if cpuProfilePath != "" {
|
||||
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(cpuProfilePath))
|
||||
} else if traceProfilePath != "" {
|
||||
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(traceProfilePath))
|
||||
} else if blockProfilePath != "" {
|
||||
prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(blockProfilePath))
|
||||
}
|
||||
|
||||
if prof != nil {
|
||||
AddCleanupHandler(func(code int) (int, error) {
|
||||
prof.Stop()
|
||||
return code, nil
|
||||
})
|
||||
}
|
||||
|
||||
if insecure {
|
||||
if profileOpts.insecure {
|
||||
repository.TestUseLowSecurityKDFParameters(fakeTestingTB{})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func stopDebug() {
|
||||
if prof != nil {
|
||||
prof.Stop()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,3 +5,6 @@ package main
|
|||
|
||||
// runDebug is a noop without the debug tag.
|
||||
func runDebug() error { return nil }
|
||||
|
||||
// stopDebug is a noop without the debug tag.
|
||||
func stopDebug() {}
|
||||
|
|
|
@ -232,47 +232,66 @@ func testSetupBackupData(t testing.TB, env *testEnvironment) string {
|
|||
}
|
||||
|
||||
func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet {
|
||||
r, err := OpenRepository(context.TODO(), gopts)
|
||||
ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false)
|
||||
rtest.OK(t, err)
|
||||
defer unlock()
|
||||
|
||||
packs := restic.NewIDSet()
|
||||
|
||||
rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
|
||||
rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {
|
||||
packs.Insert(id)
|
||||
return nil
|
||||
}))
|
||||
return packs
|
||||
}
|
||||
|
||||
func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) {
|
||||
r, err := OpenRepository(context.TODO(), gopts)
|
||||
func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet {
|
||||
ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false)
|
||||
rtest.OK(t, err)
|
||||
defer unlock()
|
||||
|
||||
rtest.OK(t, r.LoadIndex(ctx, nil))
|
||||
treePacks := restic.NewIDSet()
|
||||
rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) {
|
||||
if pb.Type == restic.TreeBlob {
|
||||
treePacks.Insert(pb.PackID)
|
||||
}
|
||||
}))
|
||||
|
||||
return treePacks
|
||||
}
|
||||
|
||||
func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) {
|
||||
ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false)
|
||||
rtest.OK(t, err)
|
||||
defer unlock()
|
||||
|
||||
for id := range remove {
|
||||
rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}))
|
||||
rtest.OK(t, r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}))
|
||||
}
|
||||
}
|
||||
|
||||
func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, removeTreePacks bool) {
|
||||
r, err := OpenRepository(context.TODO(), gopts)
|
||||
ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false)
|
||||
rtest.OK(t, err)
|
||||
defer unlock()
|
||||
|
||||
// Get all tree packs
|
||||
rtest.OK(t, r.LoadIndex(context.TODO(), nil))
|
||||
rtest.OK(t, r.LoadIndex(ctx, nil))
|
||||
|
||||
treePacks := restic.NewIDSet()
|
||||
r.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
|
||||
rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) {
|
||||
if pb.Type == restic.TreeBlob {
|
||||
treePacks.Insert(pb.PackID)
|
||||
}
|
||||
})
|
||||
}))
|
||||
|
||||
// remove all packs containing data blobs
|
||||
rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
|
||||
rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {
|
||||
if treePacks.Has(id) != removeTreePacks || keep.Has(id) {
|
||||
return nil
|
||||
}
|
||||
return r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})
|
||||
return r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()})
|
||||
}))
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
)
|
||||
|
||||
func TestCheckRestoreNoLock(t *testing.T) {
|
||||
|
@ -88,8 +89,12 @@ func TestListOnce(t *testing.T) {
|
|||
testRunPrune(t, env.gopts, pruneOpts)
|
||||
rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil))
|
||||
|
||||
rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts))
|
||||
rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts))
|
||||
rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||
return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term)
|
||||
}))
|
||||
rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||
return runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts, term)
|
||||
}))
|
||||
}
|
||||
|
||||
type writeToOnly struct {
|
||||
|
@ -154,12 +159,13 @@ func TestFindListOnce(t *testing.T) {
|
|||
testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts)
|
||||
thirdSnapshot := restic.NewIDSet(testListSnapshots(t, env.gopts, 3)...)
|
||||
|
||||
repo, err := OpenRepository(context.TODO(), env.gopts)
|
||||
ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false)
|
||||
rtest.OK(t, err)
|
||||
defer unlock()
|
||||
|
||||
snapshotIDs := restic.NewIDSet()
|
||||
// specify the two oldest snapshots explicitly and use "latest" to reference the newest one
|
||||
for sn := range FindFilteredSnapshots(context.TODO(), repo, repo, &restic.SnapshotFilter{}, []string{
|
||||
for sn := range FindFilteredSnapshots(ctx, repo, repo, &restic.SnapshotFilter{}, []string{
|
||||
secondSnapshot[0].String(),
|
||||
secondSnapshot[1].String()[:8],
|
||||
"latest",
|
||||
|
|
|
@ -2,316 +2,47 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
)
|
||||
|
||||
type lockContext struct {
|
||||
lock *restic.Lock
|
||||
cancel context.CancelFunc
|
||||
refreshWG sync.WaitGroup
|
||||
}
|
||||
|
||||
var globalLocks struct {
|
||||
locks map[*restic.Lock]*lockContext
|
||||
sync.Mutex
|
||||
sync.Once
|
||||
}
|
||||
|
||||
func lockRepo(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
|
||||
return lockRepository(ctx, repo, false, retryLock, json)
|
||||
}
|
||||
|
||||
func lockRepoExclusive(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
|
||||
return lockRepository(ctx, repo, true, retryLock, json)
|
||||
}
|
||||
|
||||
var (
|
||||
retrySleepStart = 5 * time.Second
|
||||
retrySleepMax = 60 * time.Second
|
||||
)
|
||||
|
||||
func minDuration(a, b time.Duration) time.Duration {
|
||||
if a <= b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// lockRepository wraps the ctx such that it is cancelled when the repository is unlocked
|
||||
// cancelling the original context also stops the lock refresh
|
||||
func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) {
|
||||
// make sure that a repository is unlocked properly and after cancel() was
|
||||
// called by the cleanup handler in global.go
|
||||
globalLocks.Do(func() {
|
||||
AddCleanupHandler(unlockAll)
|
||||
})
|
||||
|
||||
lockFn := restic.NewLock
|
||||
if exclusive {
|
||||
lockFn = restic.NewExclusiveLock
|
||||
}
|
||||
|
||||
var lock *restic.Lock
|
||||
var err error
|
||||
|
||||
retrySleep := minDuration(retrySleepStart, retryLock)
|
||||
retryMessagePrinted := false
|
||||
retryTimeout := time.After(retryLock)
|
||||
|
||||
retryLoop:
|
||||
for {
|
||||
lock, err = lockFn(ctx, repo)
|
||||
if err != nil && restic.IsAlreadyLocked(err) {
|
||||
|
||||
if !retryMessagePrinted {
|
||||
if !json {
|
||||
Verbosef("repo already locked, waiting up to %s for the lock\n", retryLock)
|
||||
}
|
||||
retryMessagePrinted = true
|
||||
}
|
||||
|
||||
debug.Log("repo already locked, retrying in %v", retrySleep)
|
||||
retrySleepCh := time.After(retrySleep)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx, ctx.Err()
|
||||
case <-retryTimeout:
|
||||
debug.Log("repo already locked, timeout expired")
|
||||
// Last lock attempt
|
||||
lock, err = lockFn(ctx, repo)
|
||||
break retryLoop
|
||||
case <-retrySleepCh:
|
||||
retrySleep = minDuration(retrySleep*2, retrySleepMax)
|
||||
}
|
||||
} else {
|
||||
// anything else, either a successful lock or another error
|
||||
break retryLoop
|
||||
}
|
||||
}
|
||||
if restic.IsInvalidLock(err) {
|
||||
return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err)
|
||||
}
|
||||
func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun bool, exclusive bool) (context.Context, *repository.Repository, func(), error) {
|
||||
repo, err := OpenRepository(ctx, gopts)
|
||||
if err != nil {
|
||||
return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
debug.Log("create lock %p (exclusive %v)", lock, exclusive)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
lockInfo := &lockContext{
|
||||
lock: lock,
|
||||
cancel: cancel,
|
||||
}
|
||||
lockInfo.refreshWG.Add(2)
|
||||
refreshChan := make(chan struct{})
|
||||
forceRefreshChan := make(chan refreshLockRequest)
|
||||
unlock := func() {}
|
||||
if !dryRun {
|
||||
var lock *repository.Unlocker
|
||||
|
||||
globalLocks.Lock()
|
||||
globalLocks.locks[lock] = lockInfo
|
||||
go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan)
|
||||
go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan)
|
||||
globalLocks.Unlock()
|
||||
|
||||
return lock, ctx, err
|
||||
}
|
||||
|
||||
var refreshInterval = 5 * time.Minute
|
||||
|
||||
// consider a lock refresh failed a bit before the lock actually becomes stale
|
||||
// the difference allows to compensate for a small time drift between clients.
|
||||
var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2
|
||||
|
||||
type refreshLockRequest struct {
|
||||
result chan bool
|
||||
}
|
||||
|
||||
func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest) {
|
||||
debug.Log("start")
|
||||
lock := lockInfo.lock
|
||||
ticker := time.NewTicker(refreshInterval)
|
||||
lastRefresh := lock.Time
|
||||
|
||||
defer func() {
|
||||
ticker.Stop()
|
||||
// ensure that the context was cancelled before removing the lock
|
||||
lockInfo.cancel()
|
||||
|
||||
// remove the lock from the repo
|
||||
debug.Log("unlocking repository with lock %v", lock)
|
||||
if err := lock.Unlock(); err != nil {
|
||||
debug.Log("error while unlocking: %v", err)
|
||||
Warnf("error while unlocking: %v", err)
|
||||
lock, ctx, err = repository.Lock(ctx, repo, exclusive, gopts.RetryLock, func(msg string) {
|
||||
if !gopts.JSON {
|
||||
Verbosef("%s", msg)
|
||||
}
|
||||
}, Warnf)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
lockInfo.refreshWG.Done()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
debug.Log("terminate")
|
||||
return
|
||||
|
||||
case req := <-forceRefresh:
|
||||
debug.Log("trying to refresh stale lock")
|
||||
// keep on going if our current lock still exists
|
||||
success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel)
|
||||
// inform refresh goroutine about forced refresh
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case req.result <- success:
|
||||
}
|
||||
|
||||
if success {
|
||||
// update lock refresh time
|
||||
lastRefresh = lock.Time
|
||||
}
|
||||
|
||||
case <-ticker.C:
|
||||
if time.Since(lastRefresh) > refreshabilityTimeout {
|
||||
// the lock is too old, wait until the expiry monitor cancels the context
|
||||
continue
|
||||
}
|
||||
|
||||
debug.Log("refreshing locks")
|
||||
err := lock.Refresh(context.TODO())
|
||||
if err != nil {
|
||||
Warnf("unable to refresh lock: %v\n", err)
|
||||
} else {
|
||||
lastRefresh = lock.Time
|
||||
// inform monitor goroutine about successful refresh
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case refreshed <- struct{}{}:
|
||||
}
|
||||
}
|
||||
}
|
||||
unlock = lock.Unlock
|
||||
} else {
|
||||
repo.SetDryRun()
|
||||
}
|
||||
|
||||
return ctx, repo, unlock, nil
|
||||
}
|
||||
|
||||
func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest) {
|
||||
// time.Now() might use a monotonic timer which is paused during standby
|
||||
// convert to unix time to ensure we compare real time values
|
||||
lastRefresh := time.Now().UnixNano()
|
||||
pollDuration := 1 * time.Second
|
||||
if refreshInterval < pollDuration {
|
||||
// require for TestLockFailedRefresh
|
||||
pollDuration = refreshInterval / 5
|
||||
}
|
||||
// timers are paused during standby, which is a problem as the refresh timeout
|
||||
// _must_ expire if the host was too long in standby. Thus fall back to periodic checks
|
||||
// https://github.com/golang/go/issues/35012
|
||||
ticker := time.NewTicker(pollDuration)
|
||||
defer func() {
|
||||
ticker.Stop()
|
||||
lockInfo.cancel()
|
||||
lockInfo.refreshWG.Done()
|
||||
}()
|
||||
|
||||
var refreshStaleLockResult chan bool
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
debug.Log("terminate expiry monitoring")
|
||||
return
|
||||
case <-refreshed:
|
||||
if refreshStaleLockResult != nil {
|
||||
// ignore delayed refresh notifications while the stale lock is refreshed
|
||||
continue
|
||||
}
|
||||
lastRefresh = time.Now().UnixNano()
|
||||
case <-ticker.C:
|
||||
if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
debug.Log("trying to refreshStaleLock")
|
||||
// keep on going if our current lock still exists
|
||||
refreshReq := refreshLockRequest{
|
||||
result: make(chan bool),
|
||||
}
|
||||
refreshStaleLockResult = refreshReq.result
|
||||
|
||||
// inform refresh goroutine about forced refresh
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case forceRefresh <- refreshReq:
|
||||
}
|
||||
case success := <-refreshStaleLockResult:
|
||||
if success {
|
||||
lastRefresh = time.Now().UnixNano()
|
||||
refreshStaleLockResult = nil
|
||||
continue
|
||||
}
|
||||
|
||||
Warnf("Fatal: failed to refresh lock in time\n")
|
||||
return
|
||||
}
|
||||
}
|
||||
func openWithReadLock(ctx context.Context, gopts GlobalOptions, noLock bool) (context.Context, *repository.Repository, func(), error) {
|
||||
// TODO enfore read-only operations once the locking code has moved to the repository
|
||||
return internalOpenWithLocked(ctx, gopts, noLock, false)
|
||||
}
|
||||
|
||||
func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc) bool {
|
||||
freeze := backend.AsBackend[backend.FreezeBackend](be)
|
||||
if freeze != nil {
|
||||
debug.Log("freezing backend")
|
||||
freeze.Freeze()
|
||||
defer freeze.Unfreeze()
|
||||
}
|
||||
|
||||
err := lock.RefreshStaleLock(ctx)
|
||||
if err != nil {
|
||||
Warnf("failed to refresh stale lock: %v\n", err)
|
||||
// cancel context while the backend is still frozen to prevent accidental modifications
|
||||
cancel()
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
func openWithAppendLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) {
|
||||
// TODO enfore non-exclusive operations once the locking code has moved to the repository
|
||||
return internalOpenWithLocked(ctx, gopts, dryRun, false)
|
||||
}
|
||||
|
||||
func unlockRepo(lock *restic.Lock) {
|
||||
if lock == nil {
|
||||
return
|
||||
}
|
||||
|
||||
globalLocks.Lock()
|
||||
lockInfo, exists := globalLocks.locks[lock]
|
||||
delete(globalLocks.locks, lock)
|
||||
globalLocks.Unlock()
|
||||
|
||||
if !exists {
|
||||
debug.Log("unable to find lock %v in the global list of locks, ignoring", lock)
|
||||
return
|
||||
}
|
||||
lockInfo.cancel()
|
||||
lockInfo.refreshWG.Wait()
|
||||
}
|
||||
|
||||
func unlockAll(code int) (int, error) {
|
||||
globalLocks.Lock()
|
||||
locks := globalLocks.locks
|
||||
debug.Log("unlocking %d locks", len(globalLocks.locks))
|
||||
for _, lockInfo := range globalLocks.locks {
|
||||
lockInfo.cancel()
|
||||
}
|
||||
globalLocks.locks = make(map[*restic.Lock]*lockContext)
|
||||
globalLocks.Unlock()
|
||||
|
||||
for _, lockInfo := range locks {
|
||||
lockInfo.refreshWG.Wait()
|
||||
}
|
||||
|
||||
return code, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
globalLocks.locks = make(map[*restic.Lock]*lockContext)
|
||||
func openWithExclusiveLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) {
|
||||
return internalOpenWithLocked(ctx, gopts, dryRun, true)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
@ -14,6 +15,7 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/feature"
|
||||
"github.com/restic/restic/internal/options"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
@ -23,6 +25,8 @@ func init() {
|
|||
_, _ = maxprocs.Set()
|
||||
}
|
||||
|
||||
var ErrOK = errors.New("ok")
|
||||
|
||||
// cmdRoot is the base command when no other command has been specified.
|
||||
var cmdRoot = &cobra.Command{
|
||||
Use: "restic",
|
||||
|
@ -73,6 +77,9 @@ The full documentation can be found at https://restic.readthedocs.io/ .
|
|||
// enabled)
|
||||
return runDebug()
|
||||
},
|
||||
PersistentPostRun: func(_ *cobra.Command, _ []string) {
|
||||
stopDebug()
|
||||
},
|
||||
}
|
||||
|
||||
// Distinguish commands that need the password from those that work without,
|
||||
|
@ -87,8 +94,6 @@ func needsPassword(cmd string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
var logBuffer = bytes.NewBuffer(nil)
|
||||
|
||||
func tweakGoGC() {
|
||||
// lower GOGC from 100 to 50, unless it was manually overwritten by the user
|
||||
oldValue := godebug.SetGCPercent(50)
|
||||
|
@ -101,12 +106,30 @@ func main() {
|
|||
tweakGoGC()
|
||||
// install custom global logger into a buffer, if an error occurs
|
||||
// we can show the logs
|
||||
logBuffer := bytes.NewBuffer(nil)
|
||||
log.SetOutput(logBuffer)
|
||||
|
||||
err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES"), func(s string) {
|
||||
fmt.Fprintln(os.Stderr, s)
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
debug.Log("main %#v", os.Args)
|
||||
debug.Log("restic %s compiled with %v on %v/%v",
|
||||
version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
err := cmdRoot.ExecuteContext(internalGlobalCtx)
|
||||
|
||||
ctx := createGlobalContext()
|
||||
err = cmdRoot.ExecuteContext(ctx)
|
||||
|
||||
if err == nil {
|
||||
err = ctx.Err()
|
||||
} else if err == ErrOK {
|
||||
// ErrOK overwrites context cancelation errors
|
||||
err = nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case restic.IsAlreadyLocked(err):
|
||||
|
@ -128,11 +151,13 @@ func main() {
|
|||
}
|
||||
|
||||
var exitCode int
|
||||
switch err {
|
||||
case nil:
|
||||
switch {
|
||||
case err == nil:
|
||||
exitCode = 0
|
||||
case ErrInvalidSourceData:
|
||||
case err == ErrInvalidSourceData:
|
||||
exitCode = 3
|
||||
case errors.Is(err, context.Canceled):
|
||||
exitCode = 130
|
||||
default:
|
||||
exitCode = 1
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
|
@ -56,7 +57,7 @@ func initSecondaryRepoOptions(f *pflag.FlagSet, opts *secondaryRepoOptions, repo
|
|||
opts.PasswordCommand = os.Getenv("RESTIC_FROM_PASSWORD_COMMAND")
|
||||
}
|
||||
|
||||
func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) {
|
||||
func fillSecondaryGlobalOpts(ctx context.Context, opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) {
|
||||
if opts.Repo == "" && opts.RepositoryFile == "" && opts.LegacyRepo == "" && opts.LegacyRepositoryFile == "" {
|
||||
return GlobalOptions{}, false, errors.Fatal("Please specify a source repository location (--from-repo or --from-repository-file)")
|
||||
}
|
||||
|
@ -109,7 +110,7 @@ func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, rep
|
|||
return GlobalOptions{}, false, err
|
||||
}
|
||||
}
|
||||
dstGopts.password, err = ReadPassword(dstGopts, "enter password for "+repoPrefix+" repository: ")
|
||||
dstGopts.password, err = ReadPassword(ctx, dstGopts, "enter password for "+repoPrefix+" repository: ")
|
||||
if err != nil {
|
||||
return GlobalOptions{}, false, err
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
@ -170,7 +171,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) {
|
|||
|
||||
// Test all valid cases
|
||||
for _, testCase := range validSecondaryRepoTestCases {
|
||||
DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination")
|
||||
DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination")
|
||||
rtest.OK(t, err)
|
||||
rtest.Equals(t, DstGOpts, testCase.DstGOpts)
|
||||
rtest.Equals(t, isFromRepo, testCase.FromRepo)
|
||||
|
@ -178,7 +179,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) {
|
|||
|
||||
// Test all invalid cases
|
||||
for _, testCase := range invalidSecondaryRepoTestCases {
|
||||
_, _, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination")
|
||||
_, _, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination")
|
||||
rtest.Assert(t, err != nil, "Expected error, but function did not return an error")
|
||||
}
|
||||
}
|
||||
|
|
Binary file not shown.
|
@ -77,8 +77,7 @@ avoid any conflicts:
|
|||
macOS
|
||||
=====
|
||||
|
||||
If you are using macOS, you can install restic using the
|
||||
`homebrew <https://brew.sh/>`__ package manager:
|
||||
If you are using macOS, you can install restic using `Homebrew <https://brew.sh/>`__:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
@ -363,3 +362,18 @@ Example for using sudo to write a zsh completion script directly to the system-w
|
|||
the operating system used, e.g. ``/usr/share/bash-completion/completions/restic``
|
||||
in Debian and derivatives. Please look up the correct path in the appropriate
|
||||
documentation.
|
||||
|
||||
Example for setting up a powershell completion script for the local user's profile:
|
||||
|
||||
.. code-block:: pwsh-session
|
||||
|
||||
# Create profile if one does not exist
|
||||
PS> If (!(Test-Path $PROFILE.CurrentUserAllHosts)) {New-Item -Path $PROFILE.CurrentUserAllHosts -Force}
|
||||
|
||||
PS> $ProfileDir = (Get-Item $PROFILE.CurrentUserAllHosts).Directory
|
||||
|
||||
# Generate Restic completions in the same directory as the profile
|
||||
PS> restic generate --powershell-completion "$ProfileDir\restic-completion.ps1"
|
||||
|
||||
# Append to the profile file the command to load Restic completions
|
||||
PS> Add-Content -Path $PROFILE.CurrentUserAllHosts -Value "`r`nImport-Module $ProfileDir\restic-completion.ps1"
|
||||
|
|
|
@ -201,15 +201,16 @@ scheme like this:
|
|||
$ restic -r rest:http://host:8000/ init
|
||||
|
||||
Depending on your REST server setup, you can use HTTPS protocol,
|
||||
password protection, multiple repositories or any combination of
|
||||
those features. The TCP/IP port is also configurable. Here
|
||||
are some more examples:
|
||||
unix socket, password protection, multiple repositories or any
|
||||
combination of those features. The TCP/IP port is also configurable.
|
||||
Here are some more examples:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r rest:https://host:8000/ init
|
||||
$ restic -r rest:https://user:pass@host:8000/ init
|
||||
$ restic -r rest:https://user:pass@host:8000/my_backup_repo/ init
|
||||
$ restic -r rest:http+unix:///tmp/rest.socket:/my_backup_repo/ init
|
||||
|
||||
The server username and password can be specified using environment
|
||||
variables as well:
|
||||
|
|
|
@ -56,6 +56,39 @@ snapshot for each volume that contains files to backup. Files are read from the
|
|||
VSS snapshot instead of the regular filesystem. This allows to backup files that are
|
||||
exclusively locked by another process during the backup.
|
||||
|
||||
You can use additional options to change VSS behaviour:
|
||||
|
||||
* ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds
|
||||
* ``-o vss.exclude-all-mount-points`` disable auto snapshotting of all volume mount points
|
||||
* ``-o vss.exclude-volumes`` allows excluding specific volumes or volume mount points from snapshotting
|
||||
* ``-o vss.provider`` specifies VSS provider used for snapshotting
|
||||
|
||||
For example a 2.5 minutes timeout with snapshotting of mount points disabled can be specified as
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
-o vss.timeout=2m30s -o vss.exclude-all-mount-points=true
|
||||
|
||||
and excluding drive ``d:\``, mount point ``c:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
-o vss.exclude-volumes="d:;c:\mnt\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}"
|
||||
|
||||
VSS provider can be specified by GUID
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
-o vss.provider={3f900f90-00e9-440e-873a-96ca5eb079e5}
|
||||
|
||||
or by name
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
-o vss.provider="Hyper-V IC Software Shadow Copy Provider"
|
||||
|
||||
Also ``MS`` can be used as alias for ``Microsoft Software Shadow Copy provider 1.0``.
|
||||
|
||||
By default VSS ignores Outlook OST files. This is not a restriction of restic
|
||||
but the default Windows VSS configuration. The files not to snapshot are
|
||||
configured in the Windows registry under the following key:
|
||||
|
|
|
@ -18,19 +18,21 @@ Working with repositories
|
|||
Listing all snapshots
|
||||
=====================
|
||||
|
||||
Now, you can list all the snapshots stored in the repository:
|
||||
Now, you can list all the snapshots stored in the repository. The size column
|
||||
only exists for snapshots created using restic 0.17.0 or later. It reflects the
|
||||
size of the contained files at the time when the snapshot was created.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ restic -r /srv/restic-repo snapshots
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
ID Date Host Tags Directory Size
|
||||
-------------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work 20.643GiB
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work 20.645GiB
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB
|
||||
|
||||
You can filter the listing by directory path:
|
||||
|
||||
|
@ -38,10 +40,10 @@ You can filter the listing by directory path:
|
|||
|
||||
$ restic -r /srv/restic-repo snapshots --path="/srv"
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
ID Date Host Tags Directory Size
|
||||
-------------------------------------------------------------------
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB
|
||||
|
||||
Or filter by host:
|
||||
|
||||
|
@ -49,10 +51,10 @@ Or filter by host:
|
|||
|
||||
$ restic -r /srv/restic-repo snapshots --host luigi
|
||||
enter password for repository:
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
ID Date Host Tags Directory Size
|
||||
-------------------------------------------------------------------
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB
|
||||
|
||||
Combining filters is also possible.
|
||||
|
||||
|
@ -64,21 +66,21 @@ Furthermore you can group the output by the same filters (host, paths, tags):
|
|||
|
||||
enter password for repository:
|
||||
snapshots for (host [kasimir])
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work
|
||||
ID Date Host Tags Directory Size
|
||||
------------------------------------------------------------------------
|
||||
40dc1520 2015-05-08 21:38:30 kasimir /home/user/work 20.643GiB
|
||||
79766175 2015-05-08 21:40:19 kasimir /home/user/work 20.645GiB
|
||||
2 snapshots
|
||||
snapshots for (host [luigi])
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv
|
||||
ID Date Host Tags Directory Size
|
||||
-------------------------------------------------------------------
|
||||
bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB
|
||||
9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB
|
||||
2 snapshots
|
||||
snapshots for (host [kazik])
|
||||
ID Date Host Tags Directory
|
||||
----------------------------------------------------------------------
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv
|
||||
ID Date Host Tags Directory Size
|
||||
-------------------------------------------------------------------
|
||||
590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB
|
||||
1 snapshots
|
||||
|
||||
|
||||
|
|
|
@ -26,7 +26,8 @@ When you start a backup, restic will concurrently count the number of files and
|
|||
their total size, which is used to estimate how long it will take. This will
|
||||
cause some extra I/O, which can slow down backups of network file systems or
|
||||
FUSE mounts. To avoid this overhead at the cost of not seeing a progress
|
||||
estimate, use the ``--no-scan`` option which disables this file scanning.
|
||||
estimate, use the ``--no-scan`` option of the ``backup`` command which disables
|
||||
this file scanning.
|
||||
|
||||
Backend Connections
|
||||
===================
|
||||
|
@ -111,3 +112,28 @@ to disk. An operating system usually caches file write operations in memory and
|
|||
them to disk after a short delay. As larger pack files take longer to upload, this
|
||||
increases the chance of these files being written to disk. This can increase disk wear
|
||||
for SSDs.
|
||||
|
||||
|
||||
Feature Flags
|
||||
=============
|
||||
|
||||
Feature flags allow disabling or enabling certain experimental restic features. The flags
|
||||
can be specified via the ``RESTIC_FEATURES`` environment variable. The variable expects a
|
||||
comma-separated list of ``key[=value],key2[=value2]`` pairs. The key is the name of a feature
|
||||
flag. The value is optional and can contain either the value ``true`` (default if omitted)
|
||||
or ``false``. The list of currently available feature flags is shown by the ``features``
|
||||
command.
|
||||
|
||||
Restic will return an error if an invalid feature flag is specified. No longer relevant
|
||||
feature flags may be removed in a future restic release. Thus, make sure to no longer
|
||||
specify these flags.
|
||||
|
||||
A feature can either be in alpha, beta, stable or deprecated state.
|
||||
|
||||
- An _alpha_ feature is disabled by default and may change in arbitrary ways between restic
|
||||
versions or be removed.
|
||||
- A _beta_ feature is enabled by default, but still can change in minor ways or be removed.
|
||||
- A _stable_ feature is always enabled and cannot be disabled. This allows for a transition
|
||||
period after which the flag will be removed in a future restic version.
|
||||
- A _deprecated_ feature is always disabled and cannot be enabled. The flag will be removed
|
||||
in a future restic version.
|
||||
|
|
|
@ -163,7 +163,9 @@ Summary is the last output line in a successful backup.
|
|||
+---------------------------+---------------------------------------------------------+
|
||||
| ``tree_blobs`` | Number of tree blobs |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``data_added`` | Amount of data added, in bytes |
|
||||
| ``data_added`` | Amount of (uncompressed) data added, in bytes |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``data_added_packed`` | Amount of data added (after compression), in bytes |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``total_files_processed`` | Total number of files processed |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
|
@ -365,13 +367,13 @@ Snapshot object
|
|||
|
||||
Reason object
|
||||
|
||||
+----------------+---------------------------------------------------------+
|
||||
| ``snapshot`` | Snapshot object, without ``id`` and ``short_id`` fields |
|
||||
+----------------+---------------------------------------------------------+
|
||||
| ``matches`` | Array containing descriptions of the matching criteria |
|
||||
+----------------+---------------------------------------------------------+
|
||||
| ``counters`` | Object containing counters used by the policies |
|
||||
+----------------+---------------------------------------------------------+
|
||||
+----------------+-----------------------------------------------------------+
|
||||
| ``snapshot`` | Snapshot object, including ``id`` and ``short_id`` fields |
|
||||
+----------------+-----------------------------------------------------------+
|
||||
| ``matches`` | Array containing descriptions of the matching criteria |
|
||||
+----------------+-----------------------------------------------------------+
|
||||
| ``counters`` | Object containing counters used by the policies |
|
||||
+----------------+-----------------------------------------------------------+
|
||||
|
||||
|
||||
init
|
||||
|
@ -551,11 +553,48 @@ The snapshots command returns a single JSON object, an array with objects of the
|
|||
+---------------------+--------------------------------------------------+
|
||||
| ``program_version`` | restic version used to create snapshot |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``summary`` | Snapshot statistics, see "Summary object" |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``id`` | Snapshot ID |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``short_id`` | Snapshot ID, short form |
|
||||
+---------------------+--------------------------------------------------+
|
||||
|
||||
Summary object
|
||||
|
||||
The contained statistics reflect the information at the point in time when the snapshot
|
||||
was created.
|
||||
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``backup_start`` | Time at which the backup was started |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``backup_end`` | Time at which the backup was completed |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``files_new`` | Number of new files |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``files_changed`` | Number of files that changed |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``files_unmodified`` | Number of files that did not change |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``dirs_new`` | Number of new directories |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``dirs_changed`` | Number of directories that changed |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``dirs_unmodified`` | Number of directories that did not change |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``data_blobs`` | Number of data blobs |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``tree_blobs`` | Number of tree blobs |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``data_added`` | Amount of (uncompressed) data added, in bytes |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``data_added_packed`` | Amount of data added (after compression), in bytes |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``total_files_processed`` | Total number of files processed |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``total_bytes_processed`` | Total number of bytes processed |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
|
||||
|
||||
stats
|
||||
-----
|
||||
|
|
|
@ -74,7 +74,7 @@ $ restic backup --exclude "~/documents" ~
|
|||
This command will result in a complete backup of the current logged in user's home directory and it won't exclude the folder ``~/documents/`` - which is not what the user wanted to achieve.
|
||||
The problem is how the path to ``~/documents`` is passed to restic.
|
||||
|
||||
In order to spot an issue like this, you can make use of the following ruby command preceeding your restic command.
|
||||
In order to spot an issue like this, you can make use of the following ruby command preceding your restic command.
|
||||
|
||||
::
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.20-alpine AS builder
|
||||
FROM golang:1.22-alpine AS builder
|
||||
|
||||
WORKDIR /go/src/github.com/restic/restic
|
||||
|
||||
|
|
67
go.mod
67
go.mod
|
@ -1,10 +1,10 @@
|
|||
module github.com/restic/restic
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.37.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1
|
||||
cloud.google.com/go/storage v1.40.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1
|
||||
github.com/Backblaze/blazer v0.6.1
|
||||
github.com/anacrolix/fuse v0.2.0
|
||||
github.com/cenkalti/backoff/v4 v4.2.1
|
||||
|
@ -13,50 +13,51 @@ require (
|
|||
github.com/go-ole/go-ole v1.3.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||
github.com/klauspost/compress v1.17.6
|
||||
github.com/klauspost/compress v1.17.7
|
||||
github.com/minio/minio-go/v7 v7.0.66
|
||||
github.com/minio/sha256-simd v1.0.1
|
||||
github.com/ncw/swift/v2 v2.0.2
|
||||
github.com/peterbourgon/unixtransport v0.0.4
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/pkg/sftp v1.13.6
|
||||
github.com/pkg/xattr v0.4.10-0.20221120235825-35026bbbd013
|
||||
github.com/restic/chunker v0.4.0
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
golang.org/x/crypto v0.18.0
|
||||
golang.org/x/net v0.20.0
|
||||
golang.org/x/oauth2 v0.16.0
|
||||
golang.org/x/crypto v0.21.0
|
||||
golang.org/x/net v0.23.0
|
||||
golang.org/x/oauth2 v0.18.0
|
||||
golang.org/x/sync v0.6.0
|
||||
golang.org/x/sys v0.16.0
|
||||
golang.org/x/term v0.16.0
|
||||
golang.org/x/sys v0.18.0
|
||||
golang.org/x/term v0.18.0
|
||||
golang.org/x/text v0.14.0
|
||||
golang.org/x/time v0.5.0
|
||||
google.golang.org/api v0.157.0
|
||||
google.golang.org/api v0.170.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.112.0 // indirect
|
||||
cloud.google.com/go/compute v1.23.3 // indirect
|
||||
cloud.google.com/go v0.112.1 // indirect
|
||||
cloud.google.com/go/compute v1.24.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v1.1.5 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect
|
||||
cloud.google.com/go/iam v1.1.7 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.3 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/google/uuid v1.5.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
||||
|
@ -66,22 +67,22 @@ require (
|
|||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
|
||||
go.opentelemetry.io/otel v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.21.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect
|
||||
google.golang.org/grpc v1.60.1 // indirect
|
||||
google.golang.org/protobuf v1.32.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 // indirect
|
||||
google.golang.org/grpc v1.62.1 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
151
go.sum
151
go.sum
|
@ -1,25 +1,25 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
||||
cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
|
||||
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
||||
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
|
||||
cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM=
|
||||
cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4=
|
||||
cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg=
|
||||
cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
|
||||
cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
|
||||
cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4=
|
||||
cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
|
||||
cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM=
|
||||
cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA=
|
||||
cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw=
|
||||
cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s=
|
||||
github.com/Backblaze/blazer v0.6.1/go.mod h1:7/jrGx4O6OKOto6av+hLwelPR8rwZ+PLxQ5ZOiYAjwY=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
|
@ -36,8 +36,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
|||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
|
@ -55,20 +53,19 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
|
|||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
|
@ -85,8 +82,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
|
|||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
|
@ -104,12 +101,12 @@ github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0Z
|
|||
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
|
||||
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
|
||||
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
|
||||
github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA=
|
||||
github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
|
@ -117,8 +114,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
|
|||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
|
||||
github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
|
||||
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
|
||||
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
|
@ -129,6 +126,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw=
|
||||
|
@ -142,8 +140,13 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
|||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk=
|
||||
github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/peterbourgon/ff/v3 v3.3.1/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ=
|
||||
github.com/peterbourgon/unixtransport v0.0.4 h1:UTF0FxXCAglvoZz9jaGPYjEg52DjBLDYGMJvJni6Tfw=
|
||||
github.com/peterbourgon/unixtransport v0.0.4/go.mod h1:o8aUkOCa8W/BIXpi15uKvbSabjtBh0JhSOJGSfoOhAU=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
|
@ -165,8 +168,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
|
|||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
|
||||
|
@ -185,17 +188,17 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
|
||||
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
|
||||
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
|
||||
go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
|
||||
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
|
||||
go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
|
||||
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
|
||||
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
|
||||
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
|
||||
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
|
@ -203,14 +206,15 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -222,16 +226,18 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
|
||||
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
|
||||
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
||||
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -241,21 +247,22 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
|
@ -273,12 +280,13 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
|
|||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
|
||||
google.golang.org/api v0.157.0 h1:ORAeqmbrrozeyw5NjnMxh7peHO0UzV4wWYSwZeCUb20=
|
||||
google.golang.org/api v0.157.0/go.mod h1:+z4v4ufbZ1WEpld6yMGHyggs+PmAHiaLNj5ytP3N01g=
|
||||
google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48=
|
||||
google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
|
@ -286,19 +294,19 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ
|
|||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg=
|
||||
google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 h1:KHBtwE+eQc3+NxpjmRFlQ3pJQ2FNnhhgB9xOV8kyBuU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA=
|
||||
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y=
|
||||
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 h1:9IZDv+/GcI6u+a4jRFRLxQs0RUCfavGfoOgEW6jpkI0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
|
||||
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
|
||||
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
|
||||
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -310,13 +318,14 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
|||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
|
||||
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -303,7 +303,7 @@ func generateFiles() {
|
|||
}
|
||||
}
|
||||
|
||||
var versionPattern = `var version = ".*"`
|
||||
var versionPattern = `const version = ".*"`
|
||||
|
||||
const versionCodeFile = "cmd/restic/global.go"
|
||||
|
||||
|
@ -313,7 +313,7 @@ func updateVersion() {
|
|||
die("unable to write version to file: %v", err)
|
||||
}
|
||||
|
||||
newVersion := fmt.Sprintf("var version = %q", opts.Version)
|
||||
newVersion := fmt.Sprintf("const version = %q", opts.Version)
|
||||
replace(versionCodeFile, versionPattern, newVersion)
|
||||
|
||||
if len(uncommittedChanges("VERSION")) > 0 || len(uncommittedChanges(versionCodeFile)) > 0 {
|
||||
|
@ -323,7 +323,7 @@ func updateVersion() {
|
|||
}
|
||||
|
||||
func updateVersionDev() {
|
||||
newVersion := fmt.Sprintf(`var version = "%s-dev (compiled manually)"`, opts.Version)
|
||||
newVersion := fmt.Sprintf(`const version = "%s-dev (compiled manually)"`, opts.Version)
|
||||
replace(versionCodeFile, versionPattern, newVersion)
|
||||
|
||||
msg("committing cmd/restic/global.go with dev version")
|
||||
|
|
|
@ -8,10 +8,12 @@ import (
|
|||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/feature"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
@ -40,6 +42,18 @@ type ItemStats struct {
|
|||
TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
|
||||
}
|
||||
|
||||
type ChangeStats struct {
|
||||
New uint
|
||||
Changed uint
|
||||
Unchanged uint
|
||||
}
|
||||
|
||||
type Summary struct {
|
||||
Files, Dirs ChangeStats
|
||||
ProcessedBytes uint64
|
||||
ItemStats
|
||||
}
|
||||
|
||||
// Add adds other to the current ItemStats.
|
||||
func (s *ItemStats) Add(other ItemStats) {
|
||||
s.DataBlobs += other.DataBlobs
|
||||
|
@ -61,6 +75,8 @@ type Archiver struct {
|
|||
blobSaver *BlobSaver
|
||||
fileSaver *FileSaver
|
||||
treeSaver *TreeSaver
|
||||
mu sync.Mutex
|
||||
summary *Summary
|
||||
|
||||
// Error is called for all errors that occur during backup.
|
||||
Error ErrorFunc
|
||||
|
@ -182,12 +198,58 @@ func (arch *Archiver) error(item string, err error) error {
|
|||
return errf
|
||||
}
|
||||
|
||||
func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s ItemStats, d time.Duration) {
|
||||
arch.CompleteItem(item, previous, current, s, d)
|
||||
|
||||
arch.mu.Lock()
|
||||
defer arch.mu.Unlock()
|
||||
|
||||
arch.summary.ItemStats.Add(s)
|
||||
|
||||
if current != nil {
|
||||
arch.summary.ProcessedBytes += current.Size
|
||||
} else {
|
||||
// last item or an error occurred
|
||||
return
|
||||
}
|
||||
|
||||
switch current.Type {
|
||||
case "dir":
|
||||
switch {
|
||||
case previous == nil:
|
||||
arch.summary.Dirs.New++
|
||||
case previous.Equals(*current):
|
||||
arch.summary.Dirs.Unchanged++
|
||||
default:
|
||||
arch.summary.Dirs.Changed++
|
||||
}
|
||||
|
||||
case "file":
|
||||
switch {
|
||||
case previous == nil:
|
||||
arch.summary.Files.New++
|
||||
case previous.Equals(*current):
|
||||
arch.summary.Files.Unchanged++
|
||||
default:
|
||||
arch.summary.Files.Changed++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nodeFromFileInfo returns the restic node from an os.FileInfo.
|
||||
func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo) (*restic.Node, error) {
|
||||
node, err := restic.NodeFromFileInfo(filename, fi)
|
||||
func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) {
|
||||
node, err := restic.NodeFromFileInfo(filename, fi, ignoreXattrListError)
|
||||
if !arch.WithAtime {
|
||||
node.AccessTime = node.ModTime
|
||||
}
|
||||
if feature.Flag.Enabled(feature.DeviceIDForHardlinks) {
|
||||
if node.Links == 1 || node.Type == "dir" {
|
||||
// the DeviceID is only necessary for hardlinked files
|
||||
// when using subvolumes or snapshots their deviceIDs tend to change which causes
|
||||
// restic to upload new tree blobs
|
||||
node.DeviceID = 0
|
||||
}
|
||||
}
|
||||
// overwrite name to match that within the snapshot
|
||||
node.Name = path.Base(snPath)
|
||||
if err != nil {
|
||||
|
@ -222,12 +284,12 @@ func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// SaveDir stores a directory in the repo and returns the node. snPath is the
|
||||
// saveDir stores a directory in the repo and returns the node. snPath is the
|
||||
// path within the current snapshot.
|
||||
func (arch *Archiver) SaveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) {
|
||||
func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) {
|
||||
debug.Log("%v %v", snPath, dir)
|
||||
|
||||
treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi)
|
||||
treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi, false)
|
||||
if err != nil {
|
||||
return FutureNode{}, err
|
||||
}
|
||||
|
@ -250,7 +312,7 @@ func (arch *Archiver) SaveDir(ctx context.Context, snPath string, dir string, fi
|
|||
pathname := arch.FS.Join(dir, name)
|
||||
oldNode := previous.Find(name)
|
||||
snItem := join(snPath, name)
|
||||
fn, excluded, err := arch.Save(ctx, snItem, pathname, oldNode)
|
||||
fn, excluded, err := arch.save(ctx, snItem, pathname, oldNode)
|
||||
|
||||
// return error early if possible
|
||||
if err != nil {
|
||||
|
@ -318,6 +380,7 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult {
|
|||
return res
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return futureNodeResult{err: ctx.Err()}
|
||||
}
|
||||
return futureNodeResult{err: errors.Errorf("no result")}
|
||||
}
|
||||
|
@ -334,14 +397,14 @@ func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// Save saves a target (file or directory) to the repo. If the item is
|
||||
// save saves a target (file or directory) to the repo. If the item is
|
||||
// excluded, this function returns a nil node and error, with excluded set to
|
||||
// true.
|
||||
//
|
||||
// Errors and completion needs to be handled by the caller.
|
||||
//
|
||||
// snPath is the path within the current snapshot.
|
||||
func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) {
|
||||
func (arch *Archiver) save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) {
|
||||
start := time.Now()
|
||||
|
||||
debug.Log("%v target %q, previous %v", snPath, target, previous)
|
||||
|
@ -380,9 +443,9 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous
|
|||
if previous != nil && !fileChanged(fi, previous, arch.ChangeIgnoreFlags) {
|
||||
if arch.allBlobsPresent(previous) {
|
||||
debug.Log("%v hasn't changed, using old list of blobs", target)
|
||||
arch.CompleteItem(snPath, previous, previous, ItemStats{}, time.Since(start))
|
||||
arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start))
|
||||
arch.CompleteBlob(previous.Size)
|
||||
node, err := arch.nodeFromFileInfo(snPath, target, fi)
|
||||
node, err := arch.nodeFromFileInfo(snPath, target, fi, false)
|
||||
if err != nil {
|
||||
return FutureNode{}, false, err
|
||||
}
|
||||
|
@ -445,9 +508,9 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous
|
|||
fn = arch.fileSaver.Save(ctx, snPath, target, file, fi, func() {
|
||||
arch.StartFile(snPath)
|
||||
}, func() {
|
||||
arch.CompleteItem(snPath, nil, nil, ItemStats{}, 0)
|
||||
arch.trackItem(snPath, nil, nil, ItemStats{}, 0)
|
||||
}, func(node *restic.Node, stats ItemStats) {
|
||||
arch.CompleteItem(snPath, previous, node, stats, time.Since(start))
|
||||
arch.trackItem(snPath, previous, node, stats, time.Since(start))
|
||||
})
|
||||
|
||||
case fi.IsDir():
|
||||
|
@ -462,9 +525,9 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous
|
|||
return FutureNode{}, false, err
|
||||
}
|
||||
|
||||
fn, err = arch.SaveDir(ctx, snPath, target, fi, oldSubtree,
|
||||
fn, err = arch.saveDir(ctx, snPath, target, fi, oldSubtree,
|
||||
func(node *restic.Node, stats ItemStats) {
|
||||
arch.CompleteItem(snItem, previous, node, stats, time.Since(start))
|
||||
arch.trackItem(snItem, previous, node, stats, time.Since(start))
|
||||
})
|
||||
if err != nil {
|
||||
debug.Log("SaveDir for %v returned error: %v", snPath, err)
|
||||
|
@ -478,7 +541,7 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous
|
|||
default:
|
||||
debug.Log(" %v other", target)
|
||||
|
||||
node, err := arch.nodeFromFileInfo(snPath, target, fi)
|
||||
node, err := arch.nodeFromFileInfo(snPath, target, fi, false)
|
||||
if err != nil {
|
||||
return FutureNode{}, false, err
|
||||
}
|
||||
|
@ -545,9 +608,9 @@ func (arch *Archiver) statDir(dir string) (os.FileInfo, error) {
|
|||
return fi, nil
|
||||
}
|
||||
|
||||
// SaveTree stores a Tree in the repo, returned is the tree. snPath is the path
|
||||
// saveTree stores a Tree in the repo, returned is the tree. snPath is the path
|
||||
// within the current snapshot.
|
||||
func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree, complete CompleteFunc) (FutureNode, int, error) {
|
||||
func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree, complete CompleteFunc) (FutureNode, int, error) {
|
||||
|
||||
var node *restic.Node
|
||||
if snPath != "/" {
|
||||
|
@ -561,7 +624,9 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree,
|
|||
}
|
||||
|
||||
debug.Log("%v, dir node data loaded from %v", snPath, atree.FileInfoPath)
|
||||
node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi)
|
||||
// in some cases reading xattrs for directories above the backup target is not allowed
|
||||
// thus ignore errors for such folders.
|
||||
node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi, true)
|
||||
if err != nil {
|
||||
return FutureNode{}, 0, err
|
||||
}
|
||||
|
@ -585,7 +650,7 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree,
|
|||
|
||||
// this is a leaf node
|
||||
if subatree.Leaf() {
|
||||
fn, excluded, err := arch.Save(ctx, join(snPath, name), subatree.Path, previous.Find(name))
|
||||
fn, excluded, err := arch.save(ctx, join(snPath, name), subatree.Path, previous.Find(name))
|
||||
|
||||
if err != nil {
|
||||
err = arch.error(subatree.Path, err)
|
||||
|
@ -619,8 +684,8 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree,
|
|||
}
|
||||
|
||||
// not a leaf node, archive subtree
|
||||
fn, _, err := arch.SaveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) {
|
||||
arch.CompleteItem(snItem, oldNode, n, is, time.Since(start))
|
||||
fn, _, err := arch.saveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) {
|
||||
arch.trackItem(snItem, oldNode, n, is, time.Since(start))
|
||||
})
|
||||
if err != nil {
|
||||
return FutureNode{}, 0, err
|
||||
|
@ -688,6 +753,7 @@ type SnapshotOptions struct {
|
|||
Tags restic.TagList
|
||||
Hostname string
|
||||
Excludes []string
|
||||
BackupStart time.Time
|
||||
Time time.Time
|
||||
ParentSnapshot *restic.Snapshot
|
||||
ProgramVersion string
|
||||
|
@ -738,15 +804,17 @@ func (arch *Archiver) stopWorkers() {
|
|||
}
|
||||
|
||||
// Snapshot saves several targets and returns a snapshot.
|
||||
func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, error) {
|
||||
func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, *Summary, error) {
|
||||
arch.summary = &Summary{}
|
||||
|
||||
cleanTargets, err := resolveRelativeTargets(arch.FS, targets)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
return nil, restic.ID{}, nil, err
|
||||
}
|
||||
|
||||
atree, err := NewTree(arch.FS, cleanTargets)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
return nil, restic.ID{}, nil, err
|
||||
}
|
||||
|
||||
var rootTreeID restic.ID
|
||||
|
@ -762,8 +830,8 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps
|
|||
arch.runWorkers(wgCtx, wg)
|
||||
|
||||
debug.Log("starting snapshot")
|
||||
fn, nodeCount, err := arch.SaveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) {
|
||||
arch.CompleteItem("/", nil, nil, is, time.Since(start))
|
||||
fn, nodeCount, err := arch.saveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) {
|
||||
arch.trackItem("/", nil, nil, is, time.Since(start))
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -799,12 +867,12 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps
|
|||
})
|
||||
err = wgUp.Wait()
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
return nil, restic.ID{}, nil, err
|
||||
}
|
||||
|
||||
sn, err := restic.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
return nil, restic.ID{}, nil, err
|
||||
}
|
||||
|
||||
sn.ProgramVersion = opts.ProgramVersion
|
||||
|
@ -813,11 +881,28 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps
|
|||
sn.Parent = opts.ParentSnapshot.ID()
|
||||
}
|
||||
sn.Tree = &rootTreeID
|
||||
sn.Summary = &restic.SnapshotSummary{
|
||||
BackupStart: opts.BackupStart,
|
||||
BackupEnd: time.Now(),
|
||||
|
||||
FilesNew: arch.summary.Files.New,
|
||||
FilesChanged: arch.summary.Files.Changed,
|
||||
FilesUnmodified: arch.summary.Files.Unchanged,
|
||||
DirsNew: arch.summary.Dirs.New,
|
||||
DirsChanged: arch.summary.Dirs.Changed,
|
||||
DirsUnmodified: arch.summary.Dirs.Unchanged,
|
||||
DataBlobs: arch.summary.ItemStats.DataBlobs,
|
||||
TreeBlobs: arch.summary.ItemStats.TreeBlobs,
|
||||
DataAdded: arch.summary.ItemStats.DataSize + arch.summary.ItemStats.TreeSize,
|
||||
DataAddedPacked: arch.summary.ItemStats.DataSizeInRepo + arch.summary.ItemStats.TreeSizeInRepo,
|
||||
TotalFilesProcessed: arch.summary.Files.New + arch.summary.Files.Changed + arch.summary.Files.Unchanged,
|
||||
TotalBytesProcessed: arch.summary.ProcessedBytes,
|
||||
}
|
||||
|
||||
id, err := restic.SaveSnapshot(ctx, arch.Repo, sn)
|
||||
if err != nil {
|
||||
return nil, restic.ID{}, err
|
||||
return nil, restic.ID{}, nil, err
|
||||
}
|
||||
|
||||
return sn, id, nil
|
||||
return sn, id, arch.summary, nil
|
||||
}
|
||||
|
|
|
@ -19,15 +19,16 @@ import (
|
|||
"github.com/restic/restic/internal/backend/mem"
|
||||
"github.com/restic/restic/internal/checker"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/feature"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
restictest "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func prepareTempdirRepoSrc(t testing.TB, src TestDir) (string, restic.Repository) {
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
repo := repository.TestRepository(t)
|
||||
|
||||
TestCreateFiles(t, tempdir, src)
|
||||
|
@ -132,7 +133,7 @@ func TestArchiverSaveFile(t *testing.T) {
|
|||
var tests = []TestFile{
|
||||
{Content: ""},
|
||||
{Content: "foo"},
|
||||
{Content: string(restictest.Random(23, 12*1024*1024+1287898))},
|
||||
{Content: string(rtest.Random(23, 12*1024*1024+1287898))},
|
||||
}
|
||||
|
||||
for _, testfile := range tests {
|
||||
|
@ -165,7 +166,7 @@ func TestArchiverSaveFileReaderFS(t *testing.T) {
|
|||
Data string
|
||||
}{
|
||||
{Data: "foo"},
|
||||
{Data: string(restictest.Random(23, 12*1024*1024+1287898))},
|
||||
{Data: string(rtest.Random(23, 12*1024*1024+1287898))},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -207,7 +208,7 @@ func TestArchiverSave(t *testing.T) {
|
|||
var tests = []TestFile{
|
||||
{Content: ""},
|
||||
{Content: "foo"},
|
||||
{Content: string(restictest.Random(23, 12*1024*1024+1287898))},
|
||||
{Content: string(rtest.Random(23, 12*1024*1024+1287898))},
|
||||
}
|
||||
|
||||
for _, testfile := range tests {
|
||||
|
@ -226,8 +227,9 @@ func TestArchiverSave(t *testing.T) {
|
|||
return err
|
||||
}
|
||||
arch.runWorkers(ctx, wg)
|
||||
arch.summary = &Summary{}
|
||||
|
||||
node, excluded, err := arch.Save(ctx, "/", filepath.Join(tempdir, "file"), nil)
|
||||
node, excluded, err := arch.save(ctx, "/", filepath.Join(tempdir, "file"), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -275,7 +277,7 @@ func TestArchiverSaveReaderFS(t *testing.T) {
|
|||
Data string
|
||||
}{
|
||||
{Data: "foo"},
|
||||
{Data: string(restictest.Random(23, 12*1024*1024+1287898))},
|
||||
{Data: string(rtest.Random(23, 12*1024*1024+1287898))},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -303,8 +305,9 @@ func TestArchiverSaveReaderFS(t *testing.T) {
|
|||
return err
|
||||
}
|
||||
arch.runWorkers(ctx, wg)
|
||||
arch.summary = &Summary{}
|
||||
|
||||
node, excluded, err := arch.Save(ctx, "/", filename, nil)
|
||||
node, excluded, err := arch.save(ctx, "/", filename, nil)
|
||||
t.Logf("Save returned %v %v", node, err)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -351,7 +354,7 @@ func TestArchiverSaveReaderFS(t *testing.T) {
|
|||
func BenchmarkArchiverSaveFileSmall(b *testing.B) {
|
||||
const fileSize = 4 * 1024
|
||||
d := TestDir{"file": TestFile{
|
||||
Content: string(restictest.Random(23, fileSize)),
|
||||
Content: string(rtest.Random(23, fileSize)),
|
||||
}}
|
||||
|
||||
b.SetBytes(fileSize)
|
||||
|
@ -383,7 +386,7 @@ func BenchmarkArchiverSaveFileSmall(b *testing.B) {
|
|||
func BenchmarkArchiverSaveFileLarge(b *testing.B) {
|
||||
const fileSize = 40*1024*1024 + 1287898
|
||||
d := TestDir{"file": TestFile{
|
||||
Content: string(restictest.Random(23, fileSize)),
|
||||
Content: string(rtest.Random(23, fileSize)),
|
||||
}}
|
||||
|
||||
b.SetBytes(fileSize)
|
||||
|
@ -459,14 +462,14 @@ func appendToFile(t testing.TB, filename string, data []byte) {
|
|||
}
|
||||
|
||||
func TestArchiverSaveFileIncremental(t *testing.T) {
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
|
||||
repo := &blobCountingRepo{
|
||||
Repository: repository.TestRepository(t),
|
||||
saved: make(map[restic.BlobHandle]uint),
|
||||
}
|
||||
|
||||
data := restictest.Random(23, 512*1024+887898)
|
||||
data := rtest.Random(23, 512*1024+887898)
|
||||
testfile := filepath.Join(tempdir, "testfile")
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
|
@ -509,12 +512,12 @@ func chmodTwice(t testing.TB, name string) {
|
|||
// POSIX says that ctime is updated "even if the file status does not
|
||||
// change", but let's make sure it does change, just in case.
|
||||
err := os.Chmod(name, 0700)
|
||||
restictest.OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
|
||||
sleep()
|
||||
|
||||
err = os.Chmod(name, 0600)
|
||||
restictest.OK(t, err)
|
||||
rtest.OK(t, err)
|
||||
}
|
||||
|
||||
func lstat(t testing.TB, name string) os.FileInfo {
|
||||
|
@ -553,7 +556,7 @@ func rename(t testing.TB, oldname, newname string) {
|
|||
}
|
||||
|
||||
func nodeFromFI(t testing.TB, filename string, fi os.FileInfo) *restic.Node {
|
||||
node, err := restic.NodeFromFileInfo(filename, fi)
|
||||
node, err := restic.NodeFromFileInfo(filename, fi, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -673,7 +676,7 @@ func TestFileChanged(t *testing.T) {
|
|||
t.Skip("don't run test on Windows")
|
||||
}
|
||||
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
|
||||
filename := filepath.Join(tempdir, "file")
|
||||
content := defaultContent
|
||||
|
@ -709,7 +712,7 @@ func TestFileChanged(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFilChangedSpecialCases(t *testing.T) {
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
|
||||
filename := filepath.Join(tempdir, "file")
|
||||
content := []byte("foobar")
|
||||
|
@ -743,12 +746,12 @@ func TestArchiverSaveDir(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
src: TestDir{
|
||||
"targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))},
|
||||
"targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))},
|
||||
},
|
||||
target: ".",
|
||||
want: TestDir{
|
||||
"targetdir": TestDir{
|
||||
"targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))},
|
||||
"targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -758,8 +761,8 @@ func TestArchiverSaveDir(t *testing.T) {
|
|||
"foo": TestFile{Content: "foo"},
|
||||
"emptyfile": TestFile{Content: ""},
|
||||
"bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"},
|
||||
"largefile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))},
|
||||
"largerfile": TestFile{Content: string(restictest.Random(234, 5*1024*1024+5000))},
|
||||
"largefile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))},
|
||||
"largerfile": TestFile{Content: string(rtest.Random(234, 5*1024*1024+5000))},
|
||||
},
|
||||
},
|
||||
target: "targetdir",
|
||||
|
@ -831,13 +834,14 @@ func TestArchiverSaveDir(t *testing.T) {
|
|||
|
||||
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
|
||||
arch.runWorkers(ctx, wg)
|
||||
arch.summary = &Summary{}
|
||||
|
||||
chdir := tempdir
|
||||
if test.chdir != "" {
|
||||
chdir = filepath.Join(chdir, test.chdir)
|
||||
}
|
||||
|
||||
back := restictest.Chdir(t, chdir)
|
||||
back := rtest.Chdir(t, chdir)
|
||||
defer back()
|
||||
|
||||
fi, err := fs.Lstat(test.target)
|
||||
|
@ -845,7 +849,7 @@ func TestArchiverSaveDir(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ft, err := arch.SaveDir(ctx, "/", test.target, fi, nil, nil)
|
||||
ft, err := arch.saveDir(ctx, "/", test.target, fi, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -895,7 +899,7 @@ func TestArchiverSaveDir(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestArchiverSaveDirIncremental(t *testing.T) {
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
|
||||
repo := &blobCountingRepo{
|
||||
Repository: repository.TestRepository(t),
|
||||
|
@ -912,13 +916,14 @@ func TestArchiverSaveDirIncremental(t *testing.T) {
|
|||
|
||||
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
|
||||
arch.runWorkers(ctx, wg)
|
||||
arch.summary = &Summary{}
|
||||
|
||||
fi, err := fs.Lstat(tempdir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ft, err := arch.SaveDir(ctx, "/", tempdir, fi, nil, nil)
|
||||
ft, err := arch.saveDir(ctx, "/", tempdir, fi, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -982,9 +987,9 @@ func TestArchiverSaveDirIncremental(t *testing.T) {
|
|||
|
||||
// bothZeroOrNeither fails the test if only one of exp, act is zero.
|
||||
func bothZeroOrNeither(tb testing.TB, exp, act uint64) {
|
||||
tb.Helper()
|
||||
if (exp == 0 && act != 0) || (exp != 0 && act == 0) {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
tb.Fatalf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
|
||||
rtest.Equals(tb, exp, act)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1004,7 +1009,7 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
prepare func(t testing.TB)
|
||||
targets []string
|
||||
want TestDir
|
||||
stat ItemStats
|
||||
stat Summary
|
||||
}{
|
||||
{
|
||||
src: TestDir{
|
||||
|
@ -1014,7 +1019,12 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
want: TestDir{
|
||||
"targetfile": TestFile{Content: string("foobar")},
|
||||
},
|
||||
stat: ItemStats{1, 6, 32 + 6, 0, 0, 0},
|
||||
stat: Summary{
|
||||
ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0},
|
||||
ProcessedBytes: 6,
|
||||
Files: ChangeStats{1, 0, 0},
|
||||
Dirs: ChangeStats{0, 0, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
src: TestDir{
|
||||
|
@ -1026,7 +1036,12 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
"targetfile": TestFile{Content: string("foobar")},
|
||||
"filesymlink": TestSymlink{Target: "targetfile"},
|
||||
},
|
||||
stat: ItemStats{1, 6, 32 + 6, 0, 0, 0},
|
||||
stat: Summary{
|
||||
ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0},
|
||||
ProcessedBytes: 6,
|
||||
Files: ChangeStats{1, 0, 0},
|
||||
Dirs: ChangeStats{0, 0, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
src: TestDir{
|
||||
|
@ -1046,7 +1061,12 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
"symlink": TestSymlink{Target: "subdir"},
|
||||
},
|
||||
},
|
||||
stat: ItemStats{0, 0, 0, 1, 0x154, 0x16a},
|
||||
stat: Summary{
|
||||
ItemStats: ItemStats{0, 0, 0, 1, 0x154, 0x16a},
|
||||
ProcessedBytes: 0,
|
||||
Files: ChangeStats{0, 0, 0},
|
||||
Dirs: ChangeStats{1, 0, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
src: TestDir{
|
||||
|
@ -1070,7 +1090,12 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
stat: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1},
|
||||
stat: Summary{
|
||||
ItemStats: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1},
|
||||
ProcessedBytes: 6,
|
||||
Files: ChangeStats{1, 0, 0},
|
||||
Dirs: ChangeStats{3, 0, 0},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1082,20 +1107,13 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
|
||||
arch := New(repo, testFS, Options{})
|
||||
|
||||
var stat ItemStats
|
||||
lock := &sync.Mutex{}
|
||||
arch.CompleteItem = func(item string, previous, current *restic.Node, s ItemStats, d time.Duration) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
stat.Add(s)
|
||||
}
|
||||
|
||||
wg, ctx := errgroup.WithContext(context.TODO())
|
||||
repo.StartPackUploader(ctx, wg)
|
||||
|
||||
arch.runWorkers(ctx, wg)
|
||||
arch.summary = &Summary{}
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
if test.prepare != nil {
|
||||
|
@ -1107,7 +1125,7 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fn, _, err := arch.SaveTree(ctx, "/", atree, nil, nil)
|
||||
fn, _, err := arch.saveTree(ctx, "/", atree, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1134,11 +1152,15 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
want = test.src
|
||||
}
|
||||
TestEnsureTree(context.TODO(), t, "/", repo, treeID, want)
|
||||
stat := arch.summary
|
||||
bothZeroOrNeither(t, uint64(test.stat.DataBlobs), uint64(stat.DataBlobs))
|
||||
bothZeroOrNeither(t, uint64(test.stat.TreeBlobs), uint64(stat.TreeBlobs))
|
||||
bothZeroOrNeither(t, test.stat.DataSize, stat.DataSize)
|
||||
bothZeroOrNeither(t, test.stat.DataSizeInRepo, stat.DataSizeInRepo)
|
||||
bothZeroOrNeither(t, test.stat.TreeSizeInRepo, stat.TreeSizeInRepo)
|
||||
rtest.Equals(t, test.stat.ProcessedBytes, stat.ProcessedBytes)
|
||||
rtest.Equals(t, test.stat.Files, stat.Files)
|
||||
rtest.Equals(t, test.stat.Dirs, stat.Dirs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1386,7 +1408,7 @@ func TestArchiverSnapshot(t *testing.T) {
|
|||
chdir = filepath.Join(chdir, filepath.FromSlash(test.chdir))
|
||||
}
|
||||
|
||||
back := restictest.Chdir(t, chdir)
|
||||
back := rtest.Chdir(t, chdir)
|
||||
defer back()
|
||||
|
||||
var targets []string
|
||||
|
@ -1395,7 +1417,7 @@ func TestArchiverSnapshot(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Logf("targets: %v", targets)
|
||||
sn, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()})
|
||||
sn, snapshotID, _, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1408,7 +1430,7 @@ func TestArchiverSnapshot(t *testing.T) {
|
|||
}
|
||||
TestEnsureSnapshot(t, repo, snapshotID, want)
|
||||
|
||||
checker.TestCheckRepo(t, repo)
|
||||
checker.TestCheckRepo(t, repo, false)
|
||||
|
||||
// check that the snapshot contains the targets with absolute paths
|
||||
for i, target := range sn.Paths {
|
||||
|
@ -1539,11 +1561,11 @@ func TestArchiverSnapshotSelect(t *testing.T) {
|
|||
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
|
||||
arch.Select = test.selFn
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
targets := []string{"."}
|
||||
_, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()})
|
||||
_, snapshotID, _, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()})
|
||||
if test.err != "" {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error not found, got %v, wanted %q", err, test.err)
|
||||
|
@ -1568,7 +1590,7 @@ func TestArchiverSnapshotSelect(t *testing.T) {
|
|||
}
|
||||
TestEnsureSnapshot(t, repo, snapshotID, want)
|
||||
|
||||
checker.TestCheckRepo(t, repo)
|
||||
checker.TestCheckRepo(t, repo, false)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1616,17 +1638,85 @@ func (f MockFile) Read(p []byte) (int, error) {
|
|||
return n, err
|
||||
}
|
||||
|
||||
func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) {
|
||||
rtest.Equals(t, stat.Files.New, sn.Summary.FilesNew)
|
||||
rtest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged)
|
||||
rtest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified)
|
||||
rtest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew)
|
||||
rtest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged)
|
||||
rtest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified)
|
||||
rtest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed)
|
||||
rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed)
|
||||
bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs))
|
||||
bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs))
|
||||
bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded))
|
||||
bothZeroOrNeither(t, uint64(stat.DataSizeInRepo+stat.TreeSizeInRepo), uint64(sn.Summary.DataAddedPacked))
|
||||
}
|
||||
|
||||
func TestArchiverParent(t *testing.T) {
|
||||
var tests = []struct {
|
||||
src TestDir
|
||||
read map[string]int // tracks number of times a file must have been read
|
||||
src TestDir
|
||||
modify func(path string)
|
||||
statInitial Summary
|
||||
statSecond Summary
|
||||
}{
|
||||
{
|
||||
src: TestDir{
|
||||
"targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))},
|
||||
"targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))},
|
||||
},
|
||||
read: map[string]int{
|
||||
"targetfile": 1,
|
||||
statInitial: Summary{
|
||||
Files: ChangeStats{1, 0, 0},
|
||||
Dirs: ChangeStats{0, 0, 0},
|
||||
ProcessedBytes: 2102152,
|
||||
ItemStats: ItemStats{3, 0x201593, 0x201632, 1, 0, 0},
|
||||
},
|
||||
statSecond: Summary{
|
||||
Files: ChangeStats{0, 0, 1},
|
||||
Dirs: ChangeStats{0, 0, 0},
|
||||
ProcessedBytes: 2102152,
|
||||
},
|
||||
},
|
||||
{
|
||||
src: TestDir{
|
||||
"targetDir": TestDir{
|
||||
"targetfile": TestFile{Content: string(rtest.Random(888, 1234))},
|
||||
"targetfile2": TestFile{Content: string(rtest.Random(888, 1235))},
|
||||
},
|
||||
},
|
||||
statInitial: Summary{
|
||||
Files: ChangeStats{2, 0, 0},
|
||||
Dirs: ChangeStats{1, 0, 0},
|
||||
ProcessedBytes: 2469,
|
||||
ItemStats: ItemStats{2, 0xe1c, 0xcd9, 2, 0, 0},
|
||||
},
|
||||
statSecond: Summary{
|
||||
Files: ChangeStats{0, 0, 2},
|
||||
Dirs: ChangeStats{0, 0, 1},
|
||||
ProcessedBytes: 2469,
|
||||
},
|
||||
},
|
||||
{
|
||||
src: TestDir{
|
||||
"targetDir": TestDir{
|
||||
"targetfile": TestFile{Content: string(rtest.Random(888, 1234))},
|
||||
},
|
||||
"targetfile2": TestFile{Content: string(rtest.Random(888, 1235))},
|
||||
},
|
||||
modify: func(path string) {
|
||||
remove(t, filepath.Join(path, "targetDir", "targetfile"))
|
||||
save(t, filepath.Join(path, "targetfile2"), []byte("foobar"))
|
||||
},
|
||||
statInitial: Summary{
|
||||
Files: ChangeStats{2, 0, 0},
|
||||
Dirs: ChangeStats{1, 0, 0},
|
||||
ProcessedBytes: 2469,
|
||||
ItemStats: ItemStats{2, 0xe13, 0xcf8, 2, 0, 0},
|
||||
},
|
||||
statSecond: Summary{
|
||||
Files: ChangeStats{0, 1, 0},
|
||||
Dirs: ChangeStats{0, 1, 0},
|
||||
ProcessedBytes: 6,
|
||||
ItemStats: ItemStats{1, 0x305, 0x233, 2, 0, 0},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -1645,10 +1735,10 @@ func TestArchiverParent(t *testing.T) {
|
|||
|
||||
arch := New(repo, testFS, Options{})
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
firstSnapshot, firstSnapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
|
||||
firstSnapshot, firstSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1673,38 +1763,38 @@ func TestArchiverParent(t *testing.T) {
|
|||
}
|
||||
return nil
|
||||
})
|
||||
rtest.Equals(t, test.statInitial.Files, summary.Files)
|
||||
rtest.Equals(t, test.statInitial.Dirs, summary.Dirs)
|
||||
rtest.Equals(t, test.statInitial.ProcessedBytes, summary.ProcessedBytes)
|
||||
checkSnapshotStats(t, firstSnapshot, test.statInitial)
|
||||
|
||||
if test.modify != nil {
|
||||
test.modify(tempdir)
|
||||
}
|
||||
|
||||
opts := SnapshotOptions{
|
||||
Time: time.Now(),
|
||||
ParentSnapshot: firstSnapshot,
|
||||
}
|
||||
_, secondSnapshotID, err := arch.Snapshot(ctx, []string{"."}, opts)
|
||||
testFS.bytesRead = map[string]int{}
|
||||
secondSnapshot, secondSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check that all files still been read exactly once
|
||||
TestWalkFiles(t, ".", test.src, func(filename string, item interface{}) error {
|
||||
file, ok := item.(TestFile)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
n, ok := testFS.bytesRead[filename]
|
||||
if !ok {
|
||||
t.Fatalf("file %v was not read at all", filename)
|
||||
}
|
||||
|
||||
if n != len(file.Content) {
|
||||
t.Fatalf("file %v: read %v bytes, wanted %v bytes", filename, n, len(file.Content))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if test.modify == nil {
|
||||
// check that no files were read this time
|
||||
rtest.Equals(t, map[string]int{}, testFS.bytesRead)
|
||||
}
|
||||
rtest.Equals(t, test.statSecond.Files, summary.Files)
|
||||
rtest.Equals(t, test.statSecond.Dirs, summary.Dirs)
|
||||
rtest.Equals(t, test.statSecond.ProcessedBytes, summary.ProcessedBytes)
|
||||
checkSnapshotStats(t, secondSnapshot, test.statSecond)
|
||||
|
||||
t.Logf("second backup saved as %v", secondSnapshotID.Str())
|
||||
t.Logf("testfs: %v", testFS)
|
||||
|
||||
checker.TestCheckRepo(t, repo)
|
||||
checker.TestCheckRepo(t, repo, false)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1804,7 +1894,7 @@ func TestArchiverErrorReporting(t *testing.T) {
|
|||
|
||||
tempdir, repo := prepareTempdirRepoSrc(t, test.src)
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
if test.prepare != nil {
|
||||
|
@ -1814,7 +1904,7 @@ func TestArchiverErrorReporting(t *testing.T) {
|
|||
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
|
||||
arch.Error = test.errFn
|
||||
|
||||
_, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
|
||||
_, snapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
|
||||
if test.mustError {
|
||||
if err != nil {
|
||||
t.Logf("found expected error (%v), skipping further checks", err)
|
||||
|
@ -1837,7 +1927,7 @@ func TestArchiverErrorReporting(t *testing.T) {
|
|||
}
|
||||
TestEnsureSnapshot(t, repo, snapshotID, want)
|
||||
|
||||
checker.TestCheckRepo(t, repo)
|
||||
checker.TestCheckRepo(t, repo, false)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1874,7 +1964,7 @@ func TestArchiverContextCanceled(t *testing.T) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
TestCreateFiles(t, tempdir, TestDir{
|
||||
"targetfile": TestFile{Content: "foobar"},
|
||||
})
|
||||
|
@ -1882,12 +1972,12 @@ func TestArchiverContextCanceled(t *testing.T) {
|
|||
// Ensure that the archiver itself reports the canceled context and not just the backend
|
||||
repo := repository.TestRepositoryWithBackend(t, &noCancelBackend{mem.New()}, 0, repository.Options{})
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
|
||||
|
||||
_, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
|
||||
_, snapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
|
||||
|
||||
if err != nil {
|
||||
t.Logf("found expected error (%v)", err)
|
||||
|
@ -1968,16 +2058,16 @@ func TestArchiverAbortEarlyOnError(t *testing.T) {
|
|||
{
|
||||
src: TestDir{
|
||||
"dir": TestDir{
|
||||
"file0": TestFile{Content: string(restictest.Random(0, 1024))},
|
||||
"file1": TestFile{Content: string(restictest.Random(1, 1024))},
|
||||
"file2": TestFile{Content: string(restictest.Random(2, 1024))},
|
||||
"file3": TestFile{Content: string(restictest.Random(3, 1024))},
|
||||
"file4": TestFile{Content: string(restictest.Random(4, 1024))},
|
||||
"file5": TestFile{Content: string(restictest.Random(5, 1024))},
|
||||
"file6": TestFile{Content: string(restictest.Random(6, 1024))},
|
||||
"file7": TestFile{Content: string(restictest.Random(7, 1024))},
|
||||
"file8": TestFile{Content: string(restictest.Random(8, 1024))},
|
||||
"file9": TestFile{Content: string(restictest.Random(9, 1024))},
|
||||
"file0": TestFile{Content: string(rtest.Random(0, 1024))},
|
||||
"file1": TestFile{Content: string(rtest.Random(1, 1024))},
|
||||
"file2": TestFile{Content: string(rtest.Random(2, 1024))},
|
||||
"file3": TestFile{Content: string(rtest.Random(3, 1024))},
|
||||
"file4": TestFile{Content: string(rtest.Random(4, 1024))},
|
||||
"file5": TestFile{Content: string(rtest.Random(5, 1024))},
|
||||
"file6": TestFile{Content: string(rtest.Random(6, 1024))},
|
||||
"file7": TestFile{Content: string(rtest.Random(7, 1024))},
|
||||
"file8": TestFile{Content: string(rtest.Random(8, 1024))},
|
||||
"file9": TestFile{Content: string(rtest.Random(9, 1024))},
|
||||
},
|
||||
},
|
||||
wantOpen: map[string]uint{
|
||||
|
@ -2002,7 +2092,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) {
|
|||
|
||||
tempdir, repo := prepareTempdirRepoSrc(t, test.src)
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
testFS := &TrackFS{
|
||||
|
@ -2026,7 +2116,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) {
|
|||
SaveBlobConcurrency: 1,
|
||||
})
|
||||
|
||||
_, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
|
||||
_, _, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
|
||||
if !errors.Is(err, test.err) {
|
||||
t.Errorf("expected error (%v) not found, got %v", test.err, err)
|
||||
}
|
||||
|
@ -2054,7 +2144,7 @@ func snapshot(t testing.TB, repo restic.Repository, fs fs.FS, parent *restic.Sna
|
|||
Time: time.Now(),
|
||||
ParentSnapshot: parent,
|
||||
}
|
||||
snapshot, _, err := arch.Snapshot(ctx, []string{filename}, sopts)
|
||||
snapshot, _, _, err := arch.Snapshot(ctx, []string{filename}, sopts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -2125,6 +2215,8 @@ const (
|
|||
)
|
||||
|
||||
func TestMetadataChanged(t *testing.T) {
|
||||
defer feature.TestSetFlag(t, feature.Flag, feature.DeviceIDForHardlinks, true)()
|
||||
|
||||
files := TestDir{
|
||||
"testfile": TestFile{
|
||||
Content: "foo bar test file",
|
||||
|
@ -2133,12 +2225,12 @@ func TestMetadataChanged(t *testing.T) {
|
|||
|
||||
tempdir, repo := prepareTempdirRepoSrc(t, files)
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
// get metadata
|
||||
fi := lstat(t, "testfile")
|
||||
want, err := restic.NodeFromFileInfo("testfile", fi)
|
||||
want, err := restic.NodeFromFileInfo("testfile", fi, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -2153,6 +2245,7 @@ func TestMetadataChanged(t *testing.T) {
|
|||
sn, node2 := snapshot(t, repo, fs, nil, "testfile")
|
||||
|
||||
// set some values so we can then compare the nodes
|
||||
want.DeviceID = 0
|
||||
want.Content = node2.Content
|
||||
want.Path = ""
|
||||
if len(want.ExtendedAttributes) == 0 {
|
||||
|
@ -2195,7 +2288,7 @@ func TestMetadataChanged(t *testing.T) {
|
|||
// make sure the content matches
|
||||
TestEnsureFileContent(context.Background(), t, repo, "testfile", node3, files["testfile"].(TestFile))
|
||||
|
||||
checker.TestCheckRepo(t, repo)
|
||||
checker.TestCheckRepo(t, repo, false)
|
||||
}
|
||||
|
||||
func TestRacyFileSwap(t *testing.T) {
|
||||
|
@ -2207,7 +2300,7 @@ func TestRacyFileSwap(t *testing.T) {
|
|||
|
||||
tempdir, repo := prepareTempdirRepoSrc(t, files)
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
// get metadata of current folder
|
||||
|
@ -2236,7 +2329,7 @@ func TestRacyFileSwap(t *testing.T) {
|
|||
arch.runWorkers(ctx, wg)
|
||||
|
||||
// fs.Track will panic if the file was not closed
|
||||
_, excluded, err := arch.Save(ctx, "/", tempfile, nil)
|
||||
_, excluded, err := arch.save(ctx, "/", tempfile, nil)
|
||||
if err == nil {
|
||||
t.Errorf("Save() should have failed")
|
||||
}
|
||||
|
|
|
@ -6,6 +6,12 @@ package archiver
|
|||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/feature"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
type wrappedFileInfo struct {
|
||||
|
@ -39,3 +45,45 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo {
|
|||
|
||||
return res
|
||||
}
|
||||
|
||||
func statAndSnapshot(t *testing.T, repo restic.Repository, name string) (*restic.Node, *restic.Node) {
|
||||
fi := lstat(t, name)
|
||||
want, err := restic.NodeFromFileInfo(name, fi, false)
|
||||
rtest.OK(t, err)
|
||||
|
||||
_, node := snapshot(t, repo, fs.Local{}, nil, name)
|
||||
return want, node
|
||||
}
|
||||
|
||||
func TestHardlinkMetadata(t *testing.T) {
|
||||
defer feature.TestSetFlag(t, feature.Flag, feature.DeviceIDForHardlinks, true)()
|
||||
|
||||
files := TestDir{
|
||||
"testfile": TestFile{
|
||||
Content: "foo bar test file",
|
||||
},
|
||||
"linktarget": TestFile{
|
||||
Content: "test file",
|
||||
},
|
||||
"testlink": TestHardlink{
|
||||
Target: "./linktarget",
|
||||
},
|
||||
"testdir": TestDir{},
|
||||
}
|
||||
|
||||
tempdir, repo := prepareTempdirRepoSrc(t, files)
|
||||
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
want, node := statAndSnapshot(t, repo, "testlink")
|
||||
rtest.Assert(t, node.DeviceID == want.DeviceID, "device id mismatch expected %v got %v", want.DeviceID, node.DeviceID)
|
||||
rtest.Assert(t, node.Links == want.Links, "link count mismatch expected %v got %v", want.Links, node.Links)
|
||||
rtest.Assert(t, node.Inode == want.Inode, "inode mismatch expected %v got %v", want.Inode, node.Inode)
|
||||
|
||||
_, node = statAndSnapshot(t, repo, "testfile")
|
||||
rtest.Assert(t, node.DeviceID == 0, "device id mismatch for testfile expected %v got %v", 0, node.DeviceID)
|
||||
|
||||
_, node = statAndSnapshot(t, repo, "testdir")
|
||||
rtest.Assert(t, node.DeviceID == 0, "device id mismatch for testdir expected %v got %v", 0, node.DeviceID)
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ type FileSaver struct {
|
|||
|
||||
CompleteBlob func(bytes uint64)
|
||||
|
||||
NodeFromFileInfo func(snPath, filename string, fi os.FileInfo) (*restic.Node, error)
|
||||
NodeFromFileInfo func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error)
|
||||
}
|
||||
|
||||
// NewFileSaver returns a new file saver. A worker pool with fileWorkers is
|
||||
|
@ -156,7 +156,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat
|
|||
|
||||
debug.Log("%v", snPath)
|
||||
|
||||
node, err := s.NodeFromFileInfo(snPath, f.Name(), fi)
|
||||
node, err := s.NodeFromFileInfo(snPath, f.Name(), fi, false)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
completeError(err)
|
||||
|
|
|
@ -49,8 +49,8 @@ func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Cont
|
|||
}
|
||||
|
||||
s := NewFileSaver(ctx, wg, saveBlob, pol, workers, workers)
|
||||
s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo) (*restic.Node, error) {
|
||||
return restic.NodeFromFileInfo(filename, fi)
|
||||
s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) {
|
||||
return restic.NodeFromFileInfo(filename, fi, ignoreXattrListError)
|
||||
}
|
||||
|
||||
return s, ctx, wg
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
restictest "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestScanner(t *testing.T) {
|
||||
|
@ -81,10 +81,10 @@ func TestScanner(t *testing.T) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
TestCreateFiles(t, tempdir, test.src)
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
cur, err := os.Getwd()
|
||||
|
@ -216,10 +216,10 @@ func TestScannerError(t *testing.T) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
TestCreateFiles(t, tempdir, test.src)
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
cur, err := os.Getwd()
|
||||
|
@ -288,10 +288,10 @@ func TestScannerCancel(t *testing.T) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
TestCreateFiles(t, tempdir, src)
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
cur, err := os.Getwd()
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -31,7 +32,7 @@ func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *res
|
|||
}
|
||||
opts.ParentSnapshot = sn
|
||||
}
|
||||
sn, _, err := arch.Snapshot(context.TODO(), []string{path}, opts)
|
||||
sn, _, _, err := arch.Snapshot(context.TODO(), []string{path}, opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -63,11 +64,29 @@ func (s TestSymlink) String() string {
|
|||
return "<Symlink>"
|
||||
}
|
||||
|
||||
// TestHardlink describes a hardlink created for a test.
|
||||
type TestHardlink struct {
|
||||
Target string
|
||||
}
|
||||
|
||||
func (s TestHardlink) String() string {
|
||||
return "<Hardlink>"
|
||||
}
|
||||
|
||||
// TestCreateFiles creates a directory structure described by dir at target,
|
||||
// which must already exist. On Windows, symlinks aren't created.
|
||||
func TestCreateFiles(t testing.TB, target string, dir TestDir) {
|
||||
t.Helper()
|
||||
for name, item := range dir {
|
||||
|
||||
// ensure a stable order such that it can be guaranteed that a hardlink target already exists
|
||||
var names []string
|
||||
for name := range dir {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
for _, name := range names {
|
||||
item := dir[name]
|
||||
targetPath := filepath.Join(target, name)
|
||||
|
||||
switch it := item.(type) {
|
||||
|
@ -81,6 +100,11 @@ func TestCreateFiles(t testing.TB, target string, dir TestDir) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
case TestHardlink:
|
||||
err := fs.Link(filepath.Join(target, filepath.FromSlash(it.Target)), targetPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
case TestDir:
|
||||
err := fs.Mkdir(targetPath, 0755)
|
||||
if err != nil {
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
restictest "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
// MockT passes through all logging functions from T, but catches Fail(),
|
||||
|
@ -101,7 +101,7 @@ func TestTestCreateFiles(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, test := range tests {
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
|
||||
t.Run("", func(t *testing.T) {
|
||||
tempdir := filepath.Join(tempdir, fmt.Sprintf("test-%d", i))
|
||||
|
@ -191,7 +191,7 @@ func TestTestWalkFiles(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
|
||||
got := make(map[string]string)
|
||||
|
||||
|
@ -321,7 +321,7 @@ func TestTestEnsureFiles(t *testing.T) {
|
|||
|
||||
for _, test := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
createFilesAt(t, tempdir, test.files)
|
||||
|
||||
subtestT := testing.TB(t)
|
||||
|
@ -452,7 +452,7 @@ func TestTestEnsureSnapshot(t *testing.T) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
|
||||
targetDir := filepath.Join(tempdir, "target")
|
||||
err := fs.Mkdir(targetDir, 0700)
|
||||
|
@ -462,7 +462,7 @@ func TestTestEnsureSnapshot(t *testing.T) {
|
|||
|
||||
createFilesAt(t, targetDir, test.files)
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
repo := repository.TestRepository(t)
|
||||
|
@ -473,7 +473,7 @@ func TestTestEnsureSnapshot(t *testing.T) {
|
|||
Hostname: "localhost",
|
||||
Tags: []string{"test"},
|
||||
}
|
||||
_, id, err := arch.Snapshot(ctx, []string{"."}, opts)
|
||||
_, id, _, err := arch.Snapshot(ctx, []string{"."}, opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -90,6 +90,10 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I
|
|||
// return the error if it wasn't ignored
|
||||
if fnr.err != nil {
|
||||
debug.Log("err for %v: %v", fnr.snPath, fnr.err)
|
||||
if fnr.err == context.Canceled {
|
||||
return nil, stats, fnr.err
|
||||
}
|
||||
|
||||
fnr.err = s.errFn(fnr.target, fnr.err)
|
||||
if fnr.err == nil {
|
||||
// ignore error
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
restictest "github.com/restic/restic/internal/test"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
// debug.Log requires Tree.String.
|
||||
|
@ -439,10 +439,10 @@ func TestTree(t *testing.T) {
|
|||
t.Skip("skip test on unix")
|
||||
}
|
||||
|
||||
tempdir := restictest.TempDir(t)
|
||||
tempdir := rtest.TempDir(t)
|
||||
TestCreateFiles(t, tempdir, test.src)
|
||||
|
||||
back := restictest.Chdir(t, tempdir)
|
||||
back := rtest.Chdir(t, tempdir)
|
||||
defer back()
|
||||
|
||||
tree, err := NewTree(fs.Local{}, test.targets)
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/peterbourgon/unixtransport"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
)
|
||||
|
@ -82,6 +83,8 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) {
|
|||
TLSClientConfig: &tls.Config{},
|
||||
}
|
||||
|
||||
unixtransport.Register(tr)
|
||||
|
||||
if opts.InsecureTLS {
|
||||
tr.TLSClientConfig.InsecureSkipVerify = true
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/feature"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
@ -93,6 +94,8 @@ func hasBackendFile(ctx context.Context, fs Filesystem, dir string) (bool, error
|
|||
// cannot be detected automatically.
|
||||
var ErrLayoutDetectionFailed = errors.New("auto-detecting the filesystem layout failed")
|
||||
|
||||
var ErrLegacyLayoutFound = errors.New("detected legacy S3 layout. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your repository")
|
||||
|
||||
// DetectLayout tries to find out which layout is used in a local (or sftp)
|
||||
// filesystem at the given path. If repo is nil, an instance of LocalFilesystem
|
||||
// is used.
|
||||
|
@ -123,6 +126,10 @@ func DetectLayout(ctx context.Context, repo Filesystem, dir string) (Layout, err
|
|||
}
|
||||
|
||||
if foundKeyFile && !foundKeysFile {
|
||||
if feature.Flag.Enabled(feature.DeprecateS3LegacyLayout) {
|
||||
return nil, ErrLegacyLayoutFound
|
||||
}
|
||||
|
||||
debug.Log("found s3 layout at %v", dir)
|
||||
return &S3LegacyLayout{
|
||||
Path: dir,
|
||||
|
@ -145,6 +152,10 @@ func ParseLayout(ctx context.Context, repo Filesystem, layout, defaultLayout, pa
|
|||
Join: repo.Join,
|
||||
}
|
||||
case "s3legacy":
|
||||
if feature.Flag.Enabled(feature.DeprecateS3LegacyLayout) {
|
||||
return nil, ErrLegacyLayoutFound
|
||||
}
|
||||
|
||||
l = &S3LegacyLayout{
|
||||
Path: path,
|
||||
Join: repo.Join,
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/feature"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
|
@ -352,6 +353,7 @@ func TestS3LegacyLayout(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDetectLayout(t *testing.T) {
|
||||
defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)()
|
||||
path := rtest.TempDir(t)
|
||||
|
||||
var tests = []struct {
|
||||
|
@ -389,6 +391,7 @@ func TestDetectLayout(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseLayout(t *testing.T) {
|
||||
defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)()
|
||||
path := rtest.TempDir(t)
|
||||
|
||||
var tests = []struct {
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
// Config holds all information needed to open a local repository.
|
||||
type Config struct {
|
||||
Path string
|
||||
Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"`
|
||||
Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect) (deprecated)"`
|
||||
|
||||
Connections uint `option:"connections" help:"set a limit for the number of concurrent operations (default: 2)"`
|
||||
}
|
||||
|
|
|
@ -6,10 +6,12 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/feature"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestLayout(t *testing.T) {
|
||||
defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)()
|
||||
path := rtest.TempDir(t)
|
||||
|
||||
var tests = []struct {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -41,7 +42,7 @@ func NewFactory() location.Factory {
|
|||
)
|
||||
}
|
||||
|
||||
var errNotFound = errors.New("not found")
|
||||
var errNotFound = fmt.Errorf("not found")
|
||||
|
||||
const connectionCount = 2
|
||||
|
||||
|
|
|
@ -31,6 +31,13 @@ var configTests = []test.ConfigTestData[Config]{
|
|||
Connections: 5,
|
||||
},
|
||||
},
|
||||
{
|
||||
S: "rest:http+unix:///tmp/rest.socket:/my_backup_repo/",
|
||||
Cfg: Config{
|
||||
URL: parseURL("http+unix:///tmp/rest.socket:/my_backup_repo/"),
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestParseConfig(t *testing.T) {
|
||||
|
|
|
@ -1,11 +1,18 @@
|
|||
//go:build go1.20
|
||||
// +build go1.20
|
||||
|
||||
package rest_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"net"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -14,54 +21,133 @@ import (
|
|||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func runRESTServer(ctx context.Context, t testing.TB, dir string) (*url.URL, func()) {
|
||||
var (
|
||||
serverStartedRE = regexp.MustCompile("^start server on (.*)$")
|
||||
)
|
||||
|
||||
func runRESTServer(ctx context.Context, t testing.TB, dir, reqListenAddr string) (*url.URL, func()) {
|
||||
srv, err := exec.LookPath("rest-server")
|
||||
if err != nil {
|
||||
t.Skip(err)
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, srv, "--no-auth", "--path", dir)
|
||||
// create our own context, so that our cleanup can cancel and wait for completion
|
||||
// this will ensure any open ports, open unix sockets etc are properly closed
|
||||
processCtx, cancel := context.WithCancel(ctx)
|
||||
cmd := exec.CommandContext(processCtx, srv, "--no-auth", "--path", dir, "--listen", reqListenAddr)
|
||||
|
||||
// this cancel func is called by when the process context is done
|
||||
cmd.Cancel = func() error {
|
||||
// we execute in a Go-routine as we know the caller will
|
||||
// be waiting on a .Wait() regardless
|
||||
go func() {
|
||||
// try to send a graceful termination signal
|
||||
if cmd.Process.Signal(syscall.SIGTERM) == nil {
|
||||
// if we succeed, then wait a few seconds
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
// and then make sure it's killed either way, ignoring any error code
|
||||
_ = cmd.Process.Kill()
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// this is the cleanup function that we return the caller,
|
||||
// which will cancel our process context, and then wait for it to finish
|
||||
cleanup := func() {
|
||||
cancel()
|
||||
_ = cmd.Wait()
|
||||
}
|
||||
|
||||
// but in-case we don't finish this method, e.g. by calling t.Fatal()
|
||||
// we also defer a call to clean it up ourselves, guarded by a flag to
|
||||
// indicate that we returned the function to the caller to deal with.
|
||||
callerWillCleanUp := false
|
||||
defer func() {
|
||||
if !callerWillCleanUp {
|
||||
cleanup()
|
||||
}
|
||||
}()
|
||||
|
||||
// send stdout to our std out
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stdout
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// wait until the TCP port is reachable
|
||||
var success bool
|
||||
for i := 0; i < 10; i++ {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
c, err := net.Dial("tcp", "localhost:8000")
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
success = true
|
||||
if err := c.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if !success {
|
||||
t.Fatal("unable to connect to rest server")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
url, err := url.Parse("http://localhost:8000/restic-test/")
|
||||
// capture stderr with a pipe, as we want to examine this output
|
||||
// to determine when the server is started and listening.
|
||||
cmdErr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// ignore errors, we've killed the process
|
||||
_ = cmd.Wait()
|
||||
// start the rest-server
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// create a channel to receive the actual listen address on
|
||||
listenAddrCh := make(chan string)
|
||||
go func() {
|
||||
defer close(listenAddrCh)
|
||||
matched := false
|
||||
br := bufio.NewReader(cmdErr)
|
||||
for {
|
||||
line, err := br.ReadString('\n')
|
||||
if err != nil {
|
||||
// we ignore errors, as code that relies on this
|
||||
// will happily fail via timeout and empty closed
|
||||
// channel.
|
||||
return
|
||||
}
|
||||
|
||||
line = strings.Trim(line, "\r\n")
|
||||
if !matched {
|
||||
// look for the server started message, and return the address
|
||||
// that it's listening on
|
||||
matchedServerListen := serverStartedRE.FindSubmatch([]byte(line))
|
||||
if len(matchedServerListen) == 2 {
|
||||
listenAddrCh <- string(matchedServerListen[1])
|
||||
matched = true
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(os.Stdout, line) // print all output to console
|
||||
}
|
||||
}()
|
||||
|
||||
// wait for us to get an address,
|
||||
// or the parent context to cancel,
|
||||
// or for us to timeout
|
||||
var actualListenAddr string
|
||||
select {
|
||||
case <-processCtx.Done():
|
||||
t.Fatal(context.Canceled)
|
||||
case <-time.NewTimer(2 * time.Second).C:
|
||||
t.Fatal(context.DeadlineExceeded)
|
||||
case a, ok := <-listenAddrCh:
|
||||
if !ok {
|
||||
t.Fatal(context.Canceled)
|
||||
}
|
||||
actualListenAddr = a
|
||||
}
|
||||
|
||||
// this translate the address that the server is listening on
|
||||
// to a URL suitable for us to connect to
|
||||
var addrToConnectTo string
|
||||
if strings.HasPrefix(reqListenAddr, "unix:") {
|
||||
addrToConnectTo = fmt.Sprintf("http+unix://%s:/restic-test/", actualListenAddr)
|
||||
} else {
|
||||
// while we may listen on 0.0.0.0, we connect to localhost
|
||||
addrToConnectTo = fmt.Sprintf("http://%s/restic-test/", strings.Replace(actualListenAddr, "0.0.0.0", "localhost", 1))
|
||||
}
|
||||
|
||||
// parse to a URL
|
||||
url, err := url.Parse(addrToConnectTo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// indicate that we've completed successfully, and that the caller
|
||||
// is responsible for calling cleanup
|
||||
callerWillCleanUp = true
|
||||
return url, cleanup
|
||||
}
|
||||
|
||||
|
@ -91,7 +177,7 @@ func TestBackendREST(t *testing.T) {
|
|||
defer cancel()
|
||||
|
||||
dir := rtest.TempDir(t)
|
||||
serverURL, cleanup := runRESTServer(ctx, t, dir)
|
||||
serverURL, cleanup := runRESTServer(ctx, t, dir, ":0")
|
||||
defer cleanup()
|
||||
|
||||
newTestSuite(serverURL, false).RunTests(t)
|
||||
|
@ -116,7 +202,7 @@ func BenchmarkBackendREST(t *testing.B) {
|
|||
defer cancel()
|
||||
|
||||
dir := rtest.TempDir(t)
|
||||
serverURL, cleanup := runRESTServer(ctx, t, dir)
|
||||
serverURL, cleanup := runRESTServer(ctx, t, dir, ":0")
|
||||
defer cleanup()
|
||||
|
||||
newTestSuite(serverURL, false).RunBenchmarks(t)
|
||||
|
|
30
internal/backend/rest/rest_unix_test.go
Normal file
30
internal/backend/rest/rest_unix_test.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
//go:build !windows && go1.20
|
||||
// +build !windows,go1.20
|
||||
|
||||
package rest_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestBackendRESTWithUnixSocket(t *testing.T) {
|
||||
defer func() {
|
||||
if t.Skipped() {
|
||||
rtest.SkipDisallowed(t, "restic/backend/rest.TestBackendREST")
|
||||
}
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
dir := rtest.TempDir(t)
|
||||
serverURL, cleanup := runRESTServer(ctx, t, path.Join(dir, "data"), fmt.Sprintf("unix:%s", path.Join(dir, "sock")))
|
||||
defer cleanup()
|
||||
|
||||
newTestSuite(serverURL, false).RunTests(t)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue