Merge pull request #3704 from MichaelEischer/compression-migrations

Support migration to repository format with compression
This commit is contained in:
Alexander Neumann 2022-05-29 15:52:21 +02:00 committed by GitHub
commit d2c5843c68
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
26 changed files with 439 additions and 38 deletions

View file

@ -13,7 +13,12 @@ The new format version has not received much testing yet. Do not rely on it as
your only backup copy! Please run `check` in regular intervals to detect any your only backup copy! Please run `check` in regular intervals to detect any
problems. problems.
Upgrading in place is not yet supported. As a workaround, first create a new To upgrade in place run `migrate upgrade_repo_v2` followed by `prune`. See the
documentation for more details. The migration checks the repository integrity
and upgrades the repository format but will not change any data. Afterwards,
prune will rewrite the metadata to make use of compression.
As an alternative you can use the `copy` command to migrate snapshots: first create a new
repository using `init --repository-version 2 --copy-chunker-params --repo2 path/to/old/repo`. repository using `init --repository-version 2 --copy-chunker-params --repo2 path/to/old/repo`.
Then use the `copy` command to copy all snapshots to the new repository. Then use the `copy` command to copy all snapshots to the new repository.

View file

@ -219,15 +219,20 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
Verbosef("load indexes\n") Verbosef("load indexes\n")
hints, errs := chkr.LoadIndex(gopts.ctx) hints, errs := chkr.LoadIndex(gopts.ctx)
dupFound := false errorsFound := false
suggestIndexRebuild := false
for _, hint := range hints { for _, hint := range hints {
Printf("%v\n", hint) switch hint.(type) {
if _, ok := hint.(checker.ErrDuplicatePacks); ok { case *checker.ErrDuplicatePacks, *checker.ErrOldIndexFormat:
dupFound = true Printf("%v\n", hint)
suggestIndexRebuild = true
default:
Warnf("error: %v\n", hint)
errorsFound = true
} }
} }
if dupFound { if suggestIndexRebuild {
Printf("This is non-critical, you can run `restic rebuild-index' to correct this\n") Printf("This is non-critical, you can run `restic rebuild-index' to correct this\n")
} }
@ -238,7 +243,6 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
return errors.Fatal("LoadIndex returned errors") return errors.Fatal("LoadIndex returned errors")
} }
errorsFound := false
orphanedPacks := 0 orphanedPacks := 0
errChan := make(chan error) errChan := make(chan error)
@ -252,11 +256,11 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
continue continue
} }
errorsFound = true errorsFound = true
Warnf("%v\n", err) Warnf("error: %v\n", err)
} }
if orphanedPacks > 0 { if orphanedPacks > 0 {
Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nYou can run `restic prune` to correct this.\n", orphanedPacks) Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks)
} }
Verbosef("check snapshots, trees and blobs\n") Verbosef("check snapshots, trees and blobs\n")
@ -273,7 +277,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error {
for err := range errChan { for err := range errChan {
errorsFound = true errorsFound = true
if e, ok := err.(checker.TreeError); ok { if e, ok := err.(*checker.TreeError); ok {
Warnf("error for tree %v:\n", e.ID.Str()) Warnf("error for tree %v:\n", e.ID.Str())
for _, treeErr := range e.Errors { for _, treeErr := range e.Errors {
Warnf(" %v\n", treeErr) Warnf(" %v\n", treeErr)

View file

@ -8,11 +8,12 @@ import (
) )
var cmdMigrate = &cobra.Command{ var cmdMigrate = &cobra.Command{
Use: "migrate [flags] [name]", Use: "migrate [flags] [migration name] [...]",
Short: "Apply migrations", Short: "Apply migrations",
Long: ` Long: `
The "migrate" command applies migrations to a repository. When no migration The "migrate" command checks which migrations can be applied for a repository
name is explicitly given, a list of migrations that can be applied is printed. and prints a list with available migration names. If one or more migration
names are specified, these migrations are applied.
EXIT STATUS EXIT STATUS
=========== ===========
@ -41,6 +42,8 @@ func init() {
func checkMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repository) error { func checkMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repository) error {
ctx := gopts.ctx ctx := gopts.ctx
Printf("available migrations:\n") Printf("available migrations:\n")
found := false
for _, m := range migrations.All { for _, m := range migrations.All {
ok, err := m.Check(ctx, repo) ok, err := m.Check(ctx, repo)
if err != nil { if err != nil {
@ -48,10 +51,15 @@ func checkMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repos
} }
if ok { if ok {
Printf(" %v: %v\n", m.Name(), m.Desc()) Printf(" %v\t%v\n", m.Name(), m.Desc())
found = true
} }
} }
if !found {
Printf("no migrations found")
}
return nil return nil
} }
@ -76,6 +84,20 @@ func applyMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repos
Warnf("check for migration %v failed, continuing anyway\n", m.Name()) Warnf("check for migration %v failed, continuing anyway\n", m.Name())
} }
repoCheckOpts := m.RepoCheckOptions()
if repoCheckOpts != nil {
Printf("checking repository integrity...\n")
checkOptions := CheckOptions{}
checkGopts := gopts
// the repository is already locked
checkGopts.NoLock = true
err = runCheck(checkOptions, checkGopts, []string{})
if err != nil {
return err
}
}
Printf("applying migration %v...\n", m.Name()) Printf("applying migration %v...\n", m.Name())
if err = m.Apply(ctx, repo); err != nil { if err = m.Apply(ctx, repo); err != nil {
Warnf("migration %v failed: %v\n", m.Name(), err) Warnf("migration %v failed: %v\n", m.Name(), err)

View file

@ -51,6 +51,7 @@ type PruneOptions struct {
MaxRepackBytes uint64 MaxRepackBytes uint64
RepackCachableOnly bool RepackCachableOnly bool
RepackUncompressed bool
} }
var pruneOptions PruneOptions var pruneOptions PruneOptions
@ -68,6 +69,7 @@ func addPruneOptions(c *cobra.Command) {
f.StringVar(&pruneOptions.MaxUnused, "max-unused", "5%", "tolerate given `limit` of unused data (absolute value in bytes with suffixes k/K, m/M, g/G, t/T, a value in % or the word 'unlimited')") f.StringVar(&pruneOptions.MaxUnused, "max-unused", "5%", "tolerate given `limit` of unused data (absolute value in bytes with suffixes k/K, m/M, g/G, t/T, a value in % or the word 'unlimited')")
f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "maximum `size` to repack (allowed suffixes: k/K, m/M, g/G, t/T)") f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "maximum `size` to repack (allowed suffixes: k/K, m/M, g/G, t/T)")
f.BoolVar(&pruneOptions.RepackCachableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable") f.BoolVar(&pruneOptions.RepackCachableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable")
f.BoolVar(&pruneOptions.RepackUncompressed, "repack-uncompressed", false, "repack all uncompressed data")
} }
func verifyPruneOptions(opts *PruneOptions) error { func verifyPruneOptions(opts *PruneOptions) error {
@ -135,6 +137,10 @@ func runPrune(opts PruneOptions, gopts GlobalOptions) error {
return err return err
} }
if opts.RepackUncompressed && gopts.Compression == repository.CompressionOff {
return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive")
}
repo, err := OpenRepository(gopts) repo, err := OpenRepository(gopts)
if err != nil { if err != nil {
return err return err
@ -144,6 +150,10 @@ func runPrune(opts PruneOptions, gopts GlobalOptions) error {
return errors.Fatal("prune requires a backend connection limit of at least two") return errors.Fatal("prune requires a backend connection limit of at least two")
} }
if repo.Config().Version < 2 && opts.RepackUncompressed {
return errors.Fatal("compression requires at least repository format version 2")
}
if opts.UnsafeNoSpaceRecovery != "" { if opts.UnsafeNoSpaceRecovery != "" {
repoID := repo.Config().ID repoID := repo.Config().ID
if opts.UnsafeNoSpaceRecovery != repoID { if opts.UnsafeNoSpaceRecovery != repoID {
@ -191,6 +201,7 @@ type packInfo struct {
usedSize uint64 usedSize uint64
unusedSize uint64 unusedSize uint64
tpe restic.BlobType tpe restic.BlobType
uncompressed bool
} }
type packInfoWithID struct { type packInfoWithID struct {
@ -299,6 +310,9 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB
ip.unusedSize += size ip.unusedSize += size
ip.unusedBlobs++ ip.unusedBlobs++
} }
if !blob.IsCompressed() {
ip.uncompressed = true
}
// update indexPack // update indexPack
indexPack[blob.PackID] = ip indexPack[blob.PackID] = ip
} }
@ -318,6 +332,8 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB
} }
} }
repoVersion := repo.Config().Version
// loop over all packs and decide what to do // loop over all packs and decide what to do
bar := newProgressMax(!gopts.Quiet, uint64(len(indexPack)), "packs processed") bar := newProgressMax(!gopts.Quiet, uint64(len(indexPack)), "packs processed")
err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error {
@ -350,6 +366,15 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB
stats.packs.partlyUsed++ stats.packs.partlyUsed++
} }
mustCompress := false
if repoVersion >= 2 {
// repo v2: always repack tree blobs if uncompressed
// compress data blobs if requested
mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed
}
// use a flag that pack must be compressed
p.uncompressed = mustCompress
// decide what to do // decide what to do
switch { switch {
case p.usedBlobs == 0 && p.duplicateBlobs == 0: case p.usedBlobs == 0 && p.duplicateBlobs == 0:
@ -362,7 +387,7 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB
// if this is a data pack and --repack-cacheable-only is set => keep pack! // if this is a data pack and --repack-cacheable-only is set => keep pack!
keep(p) keep(p)
case p.unusedBlobs == 0 && p.duplicateBlobs == 0 && p.tpe != restic.InvalidBlob: case p.unusedBlobs == 0 && p.duplicateBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress:
// All blobs in pack are used and not duplicates/mixed => keep pack! // All blobs in pack are used and not duplicates/mixed => keep pack!
keep(p) keep(p)
@ -447,8 +472,8 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB
case reachedRepackSize: case reachedRepackSize:
keep(p.packInfo) keep(p.packInfo)
case p.duplicateBlobs > 0, p.tpe != restic.DataBlob: case p.duplicateBlobs > 0, p.tpe != restic.DataBlob, p.uncompressed:
// repacking duplicates/non-data is only limited by repackSize // repacking duplicates/non-data/uncompressed-trees is only limited by repackSize
repack(p.ID, p.packInfo) repack(p.ID, p.packInfo)
case reachedUnusedSizeAfter: case reachedUnusedSizeAfter:

View file

@ -40,7 +40,17 @@ options exist:
be used to explicitely set the version for the new repository. By default, be used to explicitely set the version for the new repository. By default,
the current stable version is used. Have a look at the `design documentation the current stable version is used. Have a look at the `design documentation
<https://github.com/restic/restic/blob/master/doc/design.rst>`__ for <https://github.com/restic/restic/blob/master/doc/design.rst>`__ for
details. details. The alias ``latest`` will always point to the latest repository version.
The below table shows which restic version is required to use a certain
repository version and shows new features introduced by the repository format.
+--------------------+------------------------+---------------------+
| Repository version | Minimum restic version | Major new features |
+====================+========================+=====================+
| ``1`` | any version | |
+--------------------+------------------------+---------------------+
| ``2`` | >= 0.14.0 | Compression support |
+--------------------+------------------------+---------------------+
Local Local

View file

@ -298,3 +298,26 @@ a file size value the following command may be used:
$ restic -r /srv/restic-repo check --read-data-subset=50M $ restic -r /srv/restic-repo check --read-data-subset=50M
$ restic -r /srv/restic-repo check --read-data-subset=10G $ restic -r /srv/restic-repo check --read-data-subset=10G
Upgrading the repository format version
=======================================
Repositories created using earlier restic versions use an older repository
format version and have to be upgraded to allow using all new features.
Upgrading must be done explicitly as a newer repository version increases the
minimum restic version required to access the repository. For example the
repository format version 2 is only readable using restic 0.14.0 or newer.
Upgrading to repo version 2 is a two step process: first run
``migrate upgrade_repo_v2`` which will check the repository integrity and
then upgrade the repository version. Repository problems must be corrected
before the migration will be possible. After the migration is complete, run
``prune`` to compress the repository metadata. To limit the amount of data
rewritten in at once, you can use the ``prune --max-repack-size size``
parameter, see :ref:`customize-pruning` for more details.
File contents stored in the repository will not be rewritten, data from new
backups will be compressed. Over time more and more of the repository will
be compressed. To speed up this process and compress all not yet compressed
data, you can run ``prune --repack-uncompressed``.

View file

@ -388,6 +388,8 @@ the specified duration: if ``forget --keep-within 7d`` is run 8 days after the
last good snapshot, then the attacker can still use that opportunity to remove last good snapshot, then the attacker can still use that opportunity to remove
all legitimate snapshots. all legitimate snapshots.
.. _customize-pruning:
Customize pruning Customize pruning
***************** *****************

View file

@ -125,6 +125,11 @@ func (be *Backend) Hasher() hash.Hash {
return md5.New() return md5.New()
} }
// HasAtomicReplace returns whether Save() can atomically replace files
func (be *Backend) HasAtomicReplace() bool {
return true
}
// Path returns the path in the bucket that is used for this backend. // Path returns the path in the bucket that is used for this backend.
func (be *Backend) Path() string { func (be *Backend) Path() string {
return be.prefix return be.prefix

View file

@ -147,6 +147,11 @@ func (be *b2Backend) Hasher() hash.Hash {
return nil return nil
} }
// HasAtomicReplace returns whether Save() can atomically replace files
func (be *b2Backend) HasAtomicReplace() bool {
return true
}
// IsNotExist returns true if the error is caused by a non-existing file. // IsNotExist returns true if the error is caused by a non-existing file.
func (be *b2Backend) IsNotExist(err error) bool { func (be *b2Backend) IsNotExist(err error) bool {
return b2.IsNotExist(errors.Cause(err)) return b2.IsNotExist(errors.Cause(err))

View file

@ -67,6 +67,10 @@ func (be *Backend) Hasher() hash.Hash {
return be.b.Hasher() return be.b.Hasher()
} }
func (be *Backend) HasAtomicReplace() bool {
return be.b.HasAtomicReplace()
}
func (be *Backend) IsNotExist(err error) bool { func (be *Backend) IsNotExist(err error) bool {
return be.b.IsNotExist(err) return be.b.IsNotExist(err)
} }

View file

@ -201,6 +201,11 @@ func (be *Backend) Hasher() hash.Hash {
return md5.New() return md5.New()
} }
// HasAtomicReplace returns whether Save() can atomically replace files
func (be *Backend) HasAtomicReplace() bool {
return true
}
// Path returns the path in the bucket that is used for this backend. // Path returns the path in the bucket that is used for this backend.
func (be *Backend) Path() string { func (be *Backend) Path() string {
return be.prefix return be.prefix

View file

@ -102,6 +102,11 @@ func (b *Local) Hasher() hash.Hash {
return nil return nil
} }
// HasAtomicReplace returns whether Save() can atomically replace files
func (b *Local) HasAtomicReplace() bool {
return true
}
// IsNotExist returns true if the error is caused by a non existing file. // IsNotExist returns true if the error is caused by a non existing file.
func (b *Local) IsNotExist(err error) bool { func (b *Local) IsNotExist(err error) bool {
return errors.Is(err, os.ErrNotExist) return errors.Is(err, os.ErrNotExist)

View file

@ -268,6 +268,11 @@ func (be *MemoryBackend) Hasher() hash.Hash {
return md5.New() return md5.New()
} }
// HasAtomicReplace returns whether Save() can atomically replace files
func (be *MemoryBackend) HasAtomicReplace() bool {
return false
}
// Delete removes all data in the backend. // Delete removes all data in the backend.
func (be *MemoryBackend) Delete(ctx context.Context) error { func (be *MemoryBackend) Delete(ctx context.Context) error {
be.m.Lock() be.m.Lock()

View file

@ -121,6 +121,12 @@ func (b *Backend) Hasher() hash.Hash {
return nil return nil
} }
// HasAtomicReplace returns whether Save() can atomically replace files
func (b *Backend) HasAtomicReplace() bool {
// rest-server prevents overwriting
return false
}
// Save stores data in the backend at the handle. // Save stores data in the backend at the handle.
func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
if err := h.Valid(); err != nil { if err := h.Valid(); err != nil {

View file

@ -269,6 +269,11 @@ func (be *Backend) Hasher() hash.Hash {
return nil return nil
} }
// HasAtomicReplace returns whether Save() can atomically replace files
func (be *Backend) HasAtomicReplace() bool {
return true
}
// Path returns the path in the bucket that is used for this backend. // Path returns the path in the bucket that is used for this backend.
func (be *Backend) Path() string { func (be *Backend) Path() string {
return be.cfg.Prefix return be.cfg.Prefix

View file

@ -267,6 +267,12 @@ func (r *SFTP) Hasher() hash.Hash {
return nil return nil
} }
// HasAtomicReplace returns whether Save() can atomically replace files
func (r *SFTP) HasAtomicReplace() bool {
// we use sftp's 'Rename()' in 'Save()' which does not allow overwriting
return false
}
// Join joins the given paths and cleans them afterwards. This always uses // Join joins the given paths and cleans them afterwards. This always uses
// forward slashes, which is required by sftp. // forward slashes, which is required by sftp.
func Join(parts ...string) string { func Join(parts ...string) string {

View file

@ -129,6 +129,11 @@ func (be *beSwift) Hasher() hash.Hash {
return md5.New() return md5.New()
} }
// HasAtomicReplace returns whether Save() can atomically replace files
func (be *beSwift) HasAtomicReplace() bool {
return true
}
// Load runs fn with a reader that yields the contents of the file at h at the // Load runs fn with a reader that yields the contents of the file at h at the
// given offset. // given offset.
func (be *beSwift) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { func (be *beSwift) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error {

View file

@ -63,7 +63,7 @@ type ErrDuplicatePacks struct {
Indexes restic.IDSet Indexes restic.IDSet
} }
func (e ErrDuplicatePacks) Error() string { func (e *ErrDuplicatePacks) Error() string {
return fmt.Sprintf("pack %v contained in several indexes: %v", e.PackID.Str(), e.Indexes) return fmt.Sprintf("pack %v contained in several indexes: %v", e.PackID.Str(), e.Indexes)
} }
@ -73,7 +73,7 @@ type ErrOldIndexFormat struct {
restic.ID restic.ID
} }
func (err ErrOldIndexFormat) Error() string { func (err *ErrOldIndexFormat) Error() string {
return fmt.Sprintf("index %v has old format", err.ID.Str()) return fmt.Sprintf("index %v has old format", err.ID.Str())
} }
@ -93,7 +93,7 @@ func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) {
if oldFormat { if oldFormat {
debug.Log("index %v has old format", id.Str()) debug.Log("index %v has old format", id.Str())
hints = append(hints, ErrOldIndexFormat{id}) hints = append(hints, &ErrOldIndexFormat{id})
} }
err = errors.Wrapf(err, "error loading index %v", id.Str()) err = errors.Wrapf(err, "error loading index %v", id.Str())
@ -137,7 +137,7 @@ func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) {
for packID := range c.packs { for packID := range c.packs {
debug.Log(" check pack %v: contained in %d indexes", packID, len(packToIndex[packID])) debug.Log(" check pack %v: contained in %d indexes", packID, len(packToIndex[packID]))
if len(packToIndex[packID]) > 1 { if len(packToIndex[packID]) > 1 {
hints = append(hints, ErrDuplicatePacks{ hints = append(hints, &ErrDuplicatePacks{
PackID: packID, PackID: packID,
Indexes: packToIndex[packID], Indexes: packToIndex[packID],
}) })
@ -257,7 +257,7 @@ type TreeError struct {
Errors []error Errors []error
} }
func (e TreeError) Error() string { func (e *TreeError) Error() string {
return fmt.Sprintf("tree %v: %v", e.ID.Str(), e.Errors) return fmt.Sprintf("tree %v: %v", e.ID.Str(), e.Errors)
} }
@ -276,7 +276,7 @@ func (c *Checker) checkTreeWorker(ctx context.Context, trees <-chan restic.TreeI
if len(errs) == 0 { if len(errs) == 0 {
continue continue
} }
treeError := TreeError{ID: job.ID, Errors: errs} treeError := &TreeError{ID: job.ID, Errors: errs}
select { select {
case <-ctx.Done(): case <-ctx.Done():
return return

View file

@ -289,7 +289,7 @@ func TestDuplicatePacksInIndex(t *testing.T) {
found := false found := false
for _, hint := range hints { for _, hint := range hints {
if _, ok := hint.(checker.ErrDuplicatePacks); ok { if _, ok := hint.(*checker.ErrDuplicatePacks); ok {
found = true found = true
} else { } else {
t.Errorf("got unexpected hint: %v", hint) t.Errorf("got unexpected hint: %v", hint)

View file

@ -6,11 +6,16 @@ import (
"github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/restic"
) )
type RepositoryCheckOptions struct {
}
// Migration implements a data migration. // Migration implements a data migration.
type Migration interface { type Migration interface {
// Check returns true if the migration can be applied to a repo. // Check returns true if the migration can be applied to a repo.
Check(context.Context, restic.Repository) (bool, error) Check(context.Context, restic.Repository) (bool, error)
RepoCheckOptions() *RepositoryCheckOptions
// Apply runs the migration. // Apply runs the migration.
Apply(context.Context, restic.Repository) error Apply(context.Context, restic.Repository) error

View file

@ -37,6 +37,10 @@ func (m *S3Layout) Check(ctx context.Context, repo restic.Repository) (bool, err
return true, nil return true, nil
} }
func (m *S3Layout) RepoCheckOptions() *RepositoryCheckOptions {
return nil
}
func retry(max int, fail func(err error), f func() error) error { func retry(max int, fail func(err error), f func() error) error {
var err error var err error
for i := 0; i < max; i++ { for i := 0; i < max; i++ {

View file

@ -0,0 +1,126 @@
package migrations
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/restic/restic/internal/restic"
)
func init() {
register(&UpgradeRepoV2{})
}
type UpgradeRepoV2Error struct {
UploadNewConfigError error
ReuploadOldConfigError error
BackupFilePath string
}
func (err *UpgradeRepoV2Error) Error() string {
if err.ReuploadOldConfigError != nil {
return fmt.Sprintf("error uploading config (%v), re-uploading old config filed failed as well (%v), but there is a backup of the config file in %v", err.UploadNewConfigError, err.ReuploadOldConfigError, err.BackupFilePath)
}
return fmt.Sprintf("error uploading config (%v), re-uploaded old config was successful, there is a backup of the config file in %v", err.UploadNewConfigError, err.BackupFilePath)
}
func (err *UpgradeRepoV2Error) Unwrap() error {
// consider the original upload error as the primary cause
return err.UploadNewConfigError
}
type UpgradeRepoV2 struct{}
func (*UpgradeRepoV2) Name() string {
return "upgrade_repo_v2"
}
func (*UpgradeRepoV2) Desc() string {
return "upgrade a repository to version 2"
}
func (*UpgradeRepoV2) Check(ctx context.Context, repo restic.Repository) (bool, error) {
isV1 := repo.Config().Version == 1
return isV1, nil
}
func (*UpgradeRepoV2) RepoCheckOptions() *RepositoryCheckOptions {
return &RepositoryCheckOptions{}
}
func (*UpgradeRepoV2) upgrade(ctx context.Context, repo restic.Repository) error {
h := restic.Handle{Type: restic.ConfigFile}
if !repo.Backend().HasAtomicReplace() {
// remove the original file for backends which do not support atomic overwriting
err := repo.Backend().Remove(ctx, h)
if err != nil {
return fmt.Errorf("remove config failed: %w", err)
}
}
// upgrade config
cfg := repo.Config()
cfg.Version = 2
_, err := repo.SaveJSONUnpacked(ctx, restic.ConfigFile, cfg)
if err != nil {
return fmt.Errorf("save new config file failed: %w", err)
}
return nil
}
func (m *UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error {
tempdir, err := ioutil.TempDir("", "restic-migrate-upgrade-repo-v2-")
if err != nil {
return fmt.Errorf("create temp dir failed: %w", err)
}
h := restic.Handle{Type: restic.ConfigFile}
// read raw config file and save it to a temp dir, just in case
var rawConfigFile []byte
err = repo.Backend().Load(ctx, h, 0, 0, func(rd io.Reader) (err error) {
rawConfigFile, err = ioutil.ReadAll(rd)
return err
})
if err != nil {
return fmt.Errorf("load config file failed: %w", err)
}
backupFileName := filepath.Join(tempdir, "config")
err = ioutil.WriteFile(backupFileName, rawConfigFile, 0600)
if err != nil {
return fmt.Errorf("write config file backup to %v failed: %w", tempdir, err)
}
// run the upgrade
err = m.upgrade(ctx, repo)
if err != nil {
// build an error we can return to the caller
repoError := &UpgradeRepoV2Error{
UploadNewConfigError: err,
BackupFilePath: backupFileName,
}
// try contingency methods, reupload the original file
_ = repo.Backend().Remove(ctx, h)
err = repo.Backend().Save(ctx, h, restic.NewByteReader(rawConfigFile, nil))
if err != nil {
repoError.ReuploadOldConfigError = err
}
return repoError
}
_ = os.Remove(backupFileName)
_ = os.Remove(tempdir)
return nil
}

View file

@ -0,0 +1,112 @@
package migrations
import (
"context"
"os"
"path/filepath"
"sync"
"testing"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
func TestUpgradeRepoV2(t *testing.T) {
repo, cleanup := repository.TestRepositoryWithVersion(t, 1)
defer cleanup()
if repo.Config().Version != 1 {
t.Fatal("test repo has wrong version")
}
m := &UpgradeRepoV2{}
ok, err := m.Check(context.Background(), repo)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("migration check returned false")
}
err = m.Apply(context.Background(), repo)
if err != nil {
t.Fatal(err)
}
}
type failBackend struct {
restic.Backend
mu sync.Mutex
ConfigFileSavesUntilError uint
}
func (be *failBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error {
if h.Type != restic.ConfigFile {
return be.Backend.Save(ctx, h, rd)
}
be.mu.Lock()
if be.ConfigFileSavesUntilError == 0 {
be.mu.Unlock()
return errors.New("failure induced for testing")
}
be.ConfigFileSavesUntilError--
be.mu.Unlock()
return be.Backend.Save(ctx, h, rd)
}
func TestUpgradeRepoV2Failure(t *testing.T) {
be, cleanup := repository.TestBackend(t)
defer cleanup()
// wrap backend so that it fails upgrading the config after the initial write
be = &failBackend{
ConfigFileSavesUntilError: 1,
Backend: be,
}
repo, cleanup := repository.TestRepositoryWithBackend(t, be, 1)
defer cleanup()
if repo.Config().Version != 1 {
t.Fatal("test repo has wrong version")
}
m := &UpgradeRepoV2{}
ok, err := m.Check(context.Background(), repo)
if err != nil {
t.Fatal(err)
}
if !ok {
t.Fatal("migration check returned false")
}
err = m.Apply(context.Background(), repo)
if err == nil {
t.Fatal("expected error returned from Apply(), got nil")
}
upgradeErr := err.(*UpgradeRepoV2Error)
if upgradeErr.UploadNewConfigError == nil {
t.Fatal("expected upload error, got nil")
}
if upgradeErr.ReuploadOldConfigError == nil {
t.Fatal("expected reupload error, got nil")
}
if upgradeErr.BackupFilePath == "" {
t.Fatal("no backup file path found")
}
test.OK(t, os.Remove(upgradeErr.BackupFilePath))
test.OK(t, os.Remove(filepath.Dir(upgradeErr.BackupFilePath)))
}

View file

@ -11,18 +11,19 @@ import (
// Backend implements a mock backend. // Backend implements a mock backend.
type Backend struct { type Backend struct {
CloseFn func() error CloseFn func() error
IsNotExistFn func(err error) bool IsNotExistFn func(err error) bool
SaveFn func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error SaveFn func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error
OpenReaderFn func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) OpenReaderFn func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error)
StatFn func(ctx context.Context, h restic.Handle) (restic.FileInfo, error) StatFn func(ctx context.Context, h restic.Handle) (restic.FileInfo, error)
ListFn func(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error ListFn func(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error
RemoveFn func(ctx context.Context, h restic.Handle) error RemoveFn func(ctx context.Context, h restic.Handle) error
TestFn func(ctx context.Context, h restic.Handle) (bool, error) TestFn func(ctx context.Context, h restic.Handle) (bool, error)
DeleteFn func(ctx context.Context) error DeleteFn func(ctx context.Context) error
ConnectionsFn func() uint ConnectionsFn func() uint
LocationFn func() string LocationFn func() string
HasherFn func() hash.Hash HasherFn func() hash.Hash
HasAtomicReplaceFn func() bool
} }
// NewBackend returns new mock Backend instance // NewBackend returns new mock Backend instance
@ -66,6 +67,14 @@ func (m *Backend) Hasher() hash.Hash {
return m.HasherFn() return m.HasherFn()
} }
// HasAtomicReplace returns whether Save() can atomically replace files
func (m *Backend) HasAtomicReplace() bool {
if m.HasAtomicReplaceFn == nil {
return false
}
return m.HasAtomicReplaceFn()
}
// IsNotExist returns true if the error is caused by a missing file. // IsNotExist returns true if the error is caused by a missing file.
func (m *Backend) IsNotExist(err error) bool { func (m *Backend) IsNotExist(err error) bool {
if m.IsNotExistFn == nil { if m.IsNotExistFn == nil {

View file

@ -24,6 +24,9 @@ type Backend interface {
// Hasher may return a hash function for calculating a content hash for the backend // Hasher may return a hash function for calculating a content hash for the backend
Hasher() hash.Hash Hasher() hash.Hash
// HasAtomicReplace returns whether Save() can atomically replace files
HasAtomicReplace() bool
// Test a boolean value whether a File with the name and type exists. // Test a boolean value whether a File with the name and type exists.
Test(ctx context.Context, h Handle) (bool, error) Test(ctx context.Context, h Handle) (bool, error)

View file

@ -23,7 +23,7 @@ const MaxRepoVersion = 2
// StableRepoVersion is the version that is written to the config when a repository // StableRepoVersion is the version that is written to the config when a repository
// is newly created with Init(). // is newly created with Init().
const StableRepoVersion = 1 const StableRepoVersion = 2
// JSONUnpackedLoader loads unpacked JSON. // JSONUnpackedLoader loads unpacked JSON.
type JSONUnpackedLoader interface { type JSONUnpackedLoader interface {