Merge pull request #5020 from MichaelEischer/remove-legacy-formats
Remove support for legacy index format and s3 layout
This commit is contained in:
commit
3e0c081bed
34 changed files with 143 additions and 1095 deletions
|
@ -245,17 +245,12 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||
|
||||
errorsFound := false
|
||||
suggestIndexRebuild := false
|
||||
suggestLegacyIndexRebuild := false
|
||||
mixedFound := false
|
||||
for _, hint := range hints {
|
||||
switch hint.(type) {
|
||||
case *checker.ErrDuplicatePacks:
|
||||
term.Print(hint.Error())
|
||||
suggestIndexRebuild = true
|
||||
case *checker.ErrOldIndexFormat:
|
||||
printer.E("error: %v\n", hint)
|
||||
suggestLegacyIndexRebuild = true
|
||||
errorsFound = true
|
||||
case *checker.ErrMixedPack:
|
||||
term.Print(hint.Error())
|
||||
mixedFound = true
|
||||
|
@ -268,9 +263,6 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||
if suggestIndexRebuild {
|
||||
term.Print("Duplicate packs are non-critical, you can run `restic repair index' to correct this.\n")
|
||||
}
|
||||
if suggestLegacyIndexRebuild {
|
||||
printer.E("error: Found indexes using the legacy format, you must run `restic repair index' to correct this.\n")
|
||||
}
|
||||
if mixedFound {
|
||||
term.Print("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n")
|
||||
}
|
||||
|
@ -304,9 +296,6 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||
errorsFound = true
|
||||
printer.E("%v\n", err)
|
||||
}
|
||||
} else if err == checker.ErrLegacyLayout {
|
||||
errorsFound = true
|
||||
printer.E("error: repository still uses the S3 legacy layout\nYou must run `restic migrate s3legacy` to correct this.\n")
|
||||
} else {
|
||||
errorsFound = true
|
||||
printer.E("%v\n", err)
|
||||
|
|
|
@ -143,7 +143,7 @@ func printPacks(ctx context.Context, repo *repository.Repository, wr io.Writer)
|
|||
}
|
||||
|
||||
func dumpIndexes(ctx context.Context, repo restic.ListerLoaderUnpacked, wr io.Writer) error {
|
||||
return index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error {
|
||||
return index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, err error) error {
|
||||
Printf("index_id: %v\n", id)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -60,7 +60,7 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error {
|
|||
case "locks":
|
||||
t = restic.LockFile
|
||||
case "blobs":
|
||||
return index.ForAllIndexes(ctx, repo, repo, func(_ restic.ID, idx *index.Index, _ bool, err error) error {
|
||||
return index.ForAllIndexes(ctx, repo, repo, func(_ restic.ID, idx *index.Index, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/feature"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
|
@ -403,36 +402,21 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
|
|||
"meta data of intermediate directory hasn't been restore")
|
||||
}
|
||||
|
||||
func TestRestoreLocalLayout(t *testing.T) {
|
||||
defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)()
|
||||
func TestRestoreDefaultLayout(t *testing.T) {
|
||||
env, cleanup := withTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
var tests = []struct {
|
||||
filename string
|
||||
layout string
|
||||
}{
|
||||
{"repo-layout-default.tar.gz", ""},
|
||||
{"repo-layout-s3legacy.tar.gz", ""},
|
||||
{"repo-layout-default.tar.gz", "default"},
|
||||
{"repo-layout-s3legacy.tar.gz", "s3legacy"},
|
||||
}
|
||||
datafile := filepath.Join("..", "..", "internal", "backend", "testdata", "repo-layout-default.tar.gz")
|
||||
|
||||
for _, test := range tests {
|
||||
datafile := filepath.Join("..", "..", "internal", "backend", "testdata", test.filename)
|
||||
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||
|
||||
rtest.SetupTarTestFixture(t, env.base, datafile)
|
||||
// check the repo
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
env.gopts.extended["local.layout"] = test.layout
|
||||
// restore latest snapshot
|
||||
target := filepath.Join(env.base, "restore")
|
||||
testRunRestoreLatest(t, env.gopts, target, nil, nil)
|
||||
|
||||
// check the repo
|
||||
testRunCheck(t, env.gopts)
|
||||
|
||||
// restore latest snapshot
|
||||
target := filepath.Join(env.base, "restore")
|
||||
testRunRestoreLatest(t, env.gopts, target, nil, nil)
|
||||
|
||||
rtest.RemoveAll(t, filepath.Join(env.base, "repo"))
|
||||
rtest.RemoveAll(t, target)
|
||||
}
|
||||
rtest.RemoveAll(t, filepath.Join(env.base, "repo"))
|
||||
rtest.RemoveAll(t, target)
|
||||
}
|
||||
|
|
|
@ -119,16 +119,11 @@ A local repository can be initialized with the ``restic init`` command, e.g.:
|
|||
|
||||
$ restic -r /tmp/restic-repo init
|
||||
|
||||
The local and sftp backends will auto-detect and accept all layouts described
|
||||
in the following sections, so that remote repositories mounted locally e.g. via
|
||||
fuse can be accessed. The layout auto-detection can be overridden by specifying
|
||||
the option ``-o local.layout=default``, valid values are ``default`` and
|
||||
``s3legacy``. The option for the sftp backend is named ``sftp.layout``, for the
|
||||
s3 backend ``s3.layout``.
|
||||
|
||||
S3 Legacy Layout (deprecated)
|
||||
-----------------------------
|
||||
|
||||
Restic 0.17 is the last version that supports the legacy layout.
|
||||
|
||||
Unfortunately during development the Amazon S3 backend uses slightly different
|
||||
paths (directory names use singular instead of plural for ``key``,
|
||||
``lock``, and ``snapshot`` files), and the pack files are stored directly below
|
||||
|
@ -152,8 +147,6 @@ the ``data`` directory. The S3 Legacy repository layout looks like this:
|
|||
/snapshot
|
||||
└── 22a5af1bdc6e616f8a29579458c49627e01b32210d09adb288d1ecda7c5711ec
|
||||
|
||||
Restic 0.17 is the last version that supports the legacy layout.
|
||||
|
||||
Pack Format
|
||||
===========
|
||||
|
||||
|
|
|
@ -125,13 +125,10 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
|||
}
|
||||
|
||||
be := &Backend{
|
||||
container: client,
|
||||
cfg: cfg,
|
||||
connections: cfg.Connections,
|
||||
Layout: &layout.DefaultLayout{
|
||||
Path: cfg.Prefix,
|
||||
Join: path.Join,
|
||||
},
|
||||
container: client,
|
||||
cfg: cfg,
|
||||
connections: cfg.Connections,
|
||||
Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join),
|
||||
listMaxItems: defaultListMaxItems,
|
||||
}
|
||||
|
||||
|
@ -191,11 +188,6 @@ func (be *Backend) IsPermanentError(err error) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Join combines path components with slashes.
|
||||
func (be *Backend) Join(p ...string) string {
|
||||
return path.Join(p...)
|
||||
}
|
||||
|
||||
func (be *Backend) Connections() uint {
|
||||
return be.connections
|
||||
}
|
||||
|
|
|
@ -107,13 +107,10 @@ func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backen
|
|||
}
|
||||
|
||||
be := &b2Backend{
|
||||
client: client,
|
||||
bucket: bucket,
|
||||
cfg: cfg,
|
||||
Layout: &layout.DefaultLayout{
|
||||
Join: path.Join,
|
||||
Path: cfg.Prefix,
|
||||
},
|
||||
client: client,
|
||||
bucket: bucket,
|
||||
cfg: cfg,
|
||||
Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join),
|
||||
listMaxItems: defaultListMaxItems,
|
||||
canDelete: true,
|
||||
}
|
||||
|
@ -143,13 +140,10 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Back
|
|||
}
|
||||
|
||||
be := &b2Backend{
|
||||
client: client,
|
||||
bucket: bucket,
|
||||
cfg: cfg,
|
||||
Layout: &layout.DefaultLayout{
|
||||
Join: path.Join,
|
||||
Path: cfg.Prefix,
|
||||
},
|
||||
client: client,
|
||||
bucket: bucket,
|
||||
cfg: cfg,
|
||||
Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join),
|
||||
listMaxItems: defaultListMaxItems,
|
||||
}
|
||||
return be, nil
|
||||
|
|
|
@ -105,17 +105,14 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
|||
}
|
||||
|
||||
be := &Backend{
|
||||
gcsClient: gcsClient,
|
||||
projectID: cfg.ProjectID,
|
||||
connections: cfg.Connections,
|
||||
bucketName: cfg.Bucket,
|
||||
region: cfg.Region,
|
||||
bucket: gcsClient.Bucket(cfg.Bucket),
|
||||
prefix: cfg.Prefix,
|
||||
Layout: &layout.DefaultLayout{
|
||||
Path: cfg.Prefix,
|
||||
Join: path.Join,
|
||||
},
|
||||
gcsClient: gcsClient,
|
||||
projectID: cfg.ProjectID,
|
||||
connections: cfg.Connections,
|
||||
bucketName: cfg.Bucket,
|
||||
region: cfg.Region,
|
||||
bucket: gcsClient.Bucket(cfg.Bucket),
|
||||
prefix: cfg.Prefix,
|
||||
Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join),
|
||||
listMaxItems: defaultListMaxItems,
|
||||
}
|
||||
|
||||
|
@ -189,11 +186,6 @@ func (be *Backend) IsPermanentError(err error) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Join combines path components with slashes.
|
||||
func (be *Backend) Join(p ...string) string {
|
||||
return path.Join(p...)
|
||||
}
|
||||
|
||||
func (be *Backend) Connections() uint {
|
||||
return be.connections
|
||||
}
|
||||
|
|
|
@ -1,18 +1,7 @@
|
|||
package layout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/feature"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// Layout computes paths for file name storage.
|
||||
|
@ -23,159 +12,3 @@ type Layout interface {
|
|||
Paths() []string
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Filesystem is the abstraction of a file system used for a backend.
|
||||
type Filesystem interface {
|
||||
Join(...string) string
|
||||
ReadDir(context.Context, string) ([]os.FileInfo, error)
|
||||
IsNotExist(error) bool
|
||||
}
|
||||
|
||||
// ensure statically that *LocalFilesystem implements Filesystem.
|
||||
var _ Filesystem = &LocalFilesystem{}
|
||||
|
||||
// LocalFilesystem implements Filesystem in a local path.
|
||||
type LocalFilesystem struct {
|
||||
}
|
||||
|
||||
// ReadDir returns all entries of a directory.
|
||||
func (l *LocalFilesystem) ReadDir(_ context.Context, dir string) ([]os.FileInfo, error) {
|
||||
f, err := fs.Open(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries, err := f.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Readdir")
|
||||
}
|
||||
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Close")
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Join combines several path components to one.
|
||||
func (l *LocalFilesystem) Join(paths ...string) string {
|
||||
return filepath.Join(paths...)
|
||||
}
|
||||
|
||||
// IsNotExist returns true for errors that are caused by not existing files.
|
||||
func (l *LocalFilesystem) IsNotExist(err error) bool {
|
||||
return os.IsNotExist(err)
|
||||
}
|
||||
|
||||
var backendFilenameLength = len(restic.ID{}) * 2
|
||||
var backendFilename = regexp.MustCompile(fmt.Sprintf("^[a-fA-F0-9]{%d}$", backendFilenameLength))
|
||||
|
||||
func hasBackendFile(ctx context.Context, fs Filesystem, dir string) (bool, error) {
|
||||
entries, err := fs.ReadDir(ctx, dir)
|
||||
if err != nil && fs.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "ReadDir")
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
if backendFilename.MatchString(e.Name()) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ErrLayoutDetectionFailed is returned by DetectLayout() when the layout
|
||||
// cannot be detected automatically.
|
||||
var ErrLayoutDetectionFailed = errors.New("auto-detecting the filesystem layout failed")
|
||||
|
||||
var ErrLegacyLayoutFound = errors.New("detected legacy S3 layout. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your repository")
|
||||
|
||||
// DetectLayout tries to find out which layout is used in a local (or sftp)
|
||||
// filesystem at the given path. If repo is nil, an instance of LocalFilesystem
|
||||
// is used.
|
||||
func DetectLayout(ctx context.Context, repo Filesystem, dir string) (Layout, error) {
|
||||
debug.Log("detect layout at %v", dir)
|
||||
if repo == nil {
|
||||
repo = &LocalFilesystem{}
|
||||
}
|
||||
|
||||
// key file in the "keys" dir (DefaultLayout)
|
||||
foundKeysFile, err := hasBackendFile(ctx, repo, repo.Join(dir, defaultLayoutPaths[backend.KeyFile]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// key file in the "key" dir (S3LegacyLayout)
|
||||
foundKeyFile, err := hasBackendFile(ctx, repo, repo.Join(dir, s3LayoutPaths[backend.KeyFile]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if foundKeysFile && !foundKeyFile {
|
||||
debug.Log("found default layout at %v", dir)
|
||||
return &DefaultLayout{
|
||||
Path: dir,
|
||||
Join: repo.Join,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if foundKeyFile && !foundKeysFile {
|
||||
if feature.Flag.Enabled(feature.DeprecateS3LegacyLayout) {
|
||||
return nil, ErrLegacyLayoutFound
|
||||
}
|
||||
|
||||
debug.Log("found s3 layout at %v", dir)
|
||||
return &S3LegacyLayout{
|
||||
Path: dir,
|
||||
Join: repo.Join,
|
||||
}, nil
|
||||
}
|
||||
|
||||
debug.Log("layout detection failed")
|
||||
return nil, ErrLayoutDetectionFailed
|
||||
}
|
||||
|
||||
// ParseLayout parses the config string and returns a Layout. When layout is
|
||||
// the empty string, DetectLayout is used. If that fails, defaultLayout is used.
|
||||
func ParseLayout(ctx context.Context, repo Filesystem, layout, defaultLayout, path string) (l Layout, err error) {
|
||||
debug.Log("parse layout string %q for backend at %v", layout, path)
|
||||
switch layout {
|
||||
case "default":
|
||||
l = &DefaultLayout{
|
||||
Path: path,
|
||||
Join: repo.Join,
|
||||
}
|
||||
case "s3legacy":
|
||||
if feature.Flag.Enabled(feature.DeprecateS3LegacyLayout) {
|
||||
return nil, ErrLegacyLayoutFound
|
||||
}
|
||||
|
||||
l = &S3LegacyLayout{
|
||||
Path: path,
|
||||
Join: repo.Join,
|
||||
}
|
||||
case "":
|
||||
l, err = DetectLayout(ctx, repo, path)
|
||||
|
||||
// use the default layout if auto detection failed
|
||||
if errors.Is(err, ErrLayoutDetectionFailed) && defaultLayout != "" {
|
||||
debug.Log("error: %v, use default layout %v", err, defaultLayout)
|
||||
return ParseLayout(ctx, repo, defaultLayout, "", path)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
debug.Log("layout detected: %v", l)
|
||||
default:
|
||||
return nil, errors.Errorf("unknown backend layout string %q, may be one of: default, s3legacy", layout)
|
||||
}
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
|
|
@ -11,8 +11,8 @@ import (
|
|||
// subdirs, two characters each (taken from the first two characters of the
|
||||
// file name).
|
||||
type DefaultLayout struct {
|
||||
Path string
|
||||
Join func(...string) string
|
||||
path string
|
||||
join func(...string) string
|
||||
}
|
||||
|
||||
var defaultLayoutPaths = map[backend.FileType]string{
|
||||
|
@ -23,6 +23,13 @@ var defaultLayoutPaths = map[backend.FileType]string{
|
|||
backend.KeyFile: "keys",
|
||||
}
|
||||
|
||||
func NewDefaultLayout(path string, join func(...string) string) *DefaultLayout {
|
||||
return &DefaultLayout{
|
||||
path: path,
|
||||
join: join,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *DefaultLayout) String() string {
|
||||
return "<DefaultLayout>"
|
||||
}
|
||||
|
@ -37,32 +44,32 @@ func (l *DefaultLayout) Dirname(h backend.Handle) string {
|
|||
p := defaultLayoutPaths[h.Type]
|
||||
|
||||
if h.Type == backend.PackFile && len(h.Name) > 2 {
|
||||
p = l.Join(p, h.Name[:2]) + "/"
|
||||
p = l.join(p, h.Name[:2]) + "/"
|
||||
}
|
||||
|
||||
return l.Join(l.Path, p) + "/"
|
||||
return l.join(l.path, p) + "/"
|
||||
}
|
||||
|
||||
// Filename returns a path to a file, including its name.
|
||||
func (l *DefaultLayout) Filename(h backend.Handle) string {
|
||||
name := h.Name
|
||||
if h.Type == backend.ConfigFile {
|
||||
return l.Join(l.Path, "config")
|
||||
return l.join(l.path, "config")
|
||||
}
|
||||
|
||||
return l.Join(l.Dirname(h), name)
|
||||
return l.join(l.Dirname(h), name)
|
||||
}
|
||||
|
||||
// Paths returns all directory names needed for a repo.
|
||||
func (l *DefaultLayout) Paths() (dirs []string) {
|
||||
for _, p := range defaultLayoutPaths {
|
||||
dirs = append(dirs, l.Join(l.Path, p))
|
||||
dirs = append(dirs, l.join(l.path, p))
|
||||
}
|
||||
|
||||
// also add subdirs
|
||||
for i := 0; i < 256; i++ {
|
||||
subdir := hex.EncodeToString([]byte{byte(i)})
|
||||
dirs = append(dirs, l.Join(l.Path, defaultLayoutPaths[backend.PackFile], subdir))
|
||||
dirs = append(dirs, l.join(l.path, defaultLayoutPaths[backend.PackFile], subdir))
|
||||
}
|
||||
|
||||
return dirs
|
||||
|
@ -74,6 +81,6 @@ func (l *DefaultLayout) Basedir(t backend.FileType) (dirname string, subdirs boo
|
|||
subdirs = true
|
||||
}
|
||||
|
||||
dirname = l.Join(l.Path, defaultLayoutPaths[t])
|
||||
dirname = l.join(l.path, defaultLayoutPaths[t])
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,18 +1,24 @@
|
|||
package layout
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
)
|
||||
|
||||
// RESTLayout implements the default layout for the REST protocol.
|
||||
type RESTLayout struct {
|
||||
URL string
|
||||
Path string
|
||||
Join func(...string) string
|
||||
url string
|
||||
}
|
||||
|
||||
var restLayoutPaths = defaultLayoutPaths
|
||||
|
||||
func NewRESTLayout(url string) *RESTLayout {
|
||||
return &RESTLayout{
|
||||
url: url,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *RESTLayout) String() string {
|
||||
return "<RESTLayout>"
|
||||
}
|
||||
|
@ -25,10 +31,10 @@ func (l *RESTLayout) Name() string {
|
|||
// Dirname returns the directory path for a given file type and name.
|
||||
func (l *RESTLayout) Dirname(h backend.Handle) string {
|
||||
if h.Type == backend.ConfigFile {
|
||||
return l.URL + l.Join(l.Path, "/")
|
||||
return l.url + "/"
|
||||
}
|
||||
|
||||
return l.URL + l.Join(l.Path, "/", restLayoutPaths[h.Type]) + "/"
|
||||
return l.url + path.Join("/", restLayoutPaths[h.Type]) + "/"
|
||||
}
|
||||
|
||||
// Filename returns a path to a file, including its name.
|
||||
|
@ -39,18 +45,18 @@ func (l *RESTLayout) Filename(h backend.Handle) string {
|
|||
name = "config"
|
||||
}
|
||||
|
||||
return l.URL + l.Join(l.Path, "/", restLayoutPaths[h.Type], name)
|
||||
return l.url + path.Join("/", restLayoutPaths[h.Type], name)
|
||||
}
|
||||
|
||||
// Paths returns all directory names
|
||||
func (l *RESTLayout) Paths() (dirs []string) {
|
||||
for _, p := range restLayoutPaths {
|
||||
dirs = append(dirs, l.URL+l.Join(l.Path, p))
|
||||
dirs = append(dirs, l.url+path.Join("/", p))
|
||||
}
|
||||
return dirs
|
||||
}
|
||||
|
||||
// Basedir returns the base dir name for files of type t.
|
||||
func (l *RESTLayout) Basedir(t backend.FileType) (dirname string, subdirs bool) {
|
||||
return l.URL + l.Join(l.Path, restLayoutPaths[t]), false
|
||||
return l.url + path.Join("/", restLayoutPaths[t]), false
|
||||
}
|
||||
|
|
|
@ -1,79 +0,0 @@
|
|||
package layout
|
||||
|
||||
import (
|
||||
"github.com/restic/restic/internal/backend"
|
||||
)
|
||||
|
||||
// S3LegacyLayout implements the old layout used for s3 cloud storage backends, as
|
||||
// described in the Design document.
|
||||
type S3LegacyLayout struct {
|
||||
URL string
|
||||
Path string
|
||||
Join func(...string) string
|
||||
}
|
||||
|
||||
var s3LayoutPaths = map[backend.FileType]string{
|
||||
backend.PackFile: "data",
|
||||
backend.SnapshotFile: "snapshot",
|
||||
backend.IndexFile: "index",
|
||||
backend.LockFile: "lock",
|
||||
backend.KeyFile: "key",
|
||||
}
|
||||
|
||||
func (l *S3LegacyLayout) String() string {
|
||||
return "<S3LegacyLayout>"
|
||||
}
|
||||
|
||||
// Name returns the name for this layout.
|
||||
func (l *S3LegacyLayout) Name() string {
|
||||
return "s3legacy"
|
||||
}
|
||||
|
||||
// join calls Join with the first empty elements removed.
|
||||
func (l *S3LegacyLayout) join(url string, items ...string) string {
|
||||
for len(items) > 0 && items[0] == "" {
|
||||
items = items[1:]
|
||||
}
|
||||
|
||||
path := l.Join(items...)
|
||||
if path == "" || path[0] != '/' {
|
||||
if url != "" && url[len(url)-1] != '/' {
|
||||
url += "/"
|
||||
}
|
||||
}
|
||||
|
||||
return url + path
|
||||
}
|
||||
|
||||
// Dirname returns the directory path for a given file type and name.
|
||||
func (l *S3LegacyLayout) Dirname(h backend.Handle) string {
|
||||
if h.Type == backend.ConfigFile {
|
||||
return l.URL + l.Join(l.Path, "/")
|
||||
}
|
||||
|
||||
return l.join(l.URL, l.Path, s3LayoutPaths[h.Type]) + "/"
|
||||
}
|
||||
|
||||
// Filename returns a path to a file, including its name.
|
||||
func (l *S3LegacyLayout) Filename(h backend.Handle) string {
|
||||
name := h.Name
|
||||
|
||||
if h.Type == backend.ConfigFile {
|
||||
name = "config"
|
||||
}
|
||||
|
||||
return l.join(l.URL, l.Path, s3LayoutPaths[h.Type], name)
|
||||
}
|
||||
|
||||
// Paths returns all directory names
|
||||
func (l *S3LegacyLayout) Paths() (dirs []string) {
|
||||
for _, p := range s3LayoutPaths {
|
||||
dirs = append(dirs, l.Join(l.Path, p))
|
||||
}
|
||||
return dirs
|
||||
}
|
||||
|
||||
// Basedir returns the base dir name for type t.
|
||||
func (l *S3LegacyLayout) Basedir(t backend.FileType) (dirname string, subdirs bool) {
|
||||
return l.Join(l.Path, s3LayoutPaths[t]), false
|
||||
}
|
|
@ -1,16 +1,15 @@
|
|||
package layout
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/feature"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
|
@ -99,8 +98,8 @@ func TestDefaultLayout(t *testing.T) {
|
|||
|
||||
t.Run("Paths", func(t *testing.T) {
|
||||
l := &DefaultLayout{
|
||||
Path: tempdir,
|
||||
Join: filepath.Join,
|
||||
path: tempdir,
|
||||
join: filepath.Join,
|
||||
}
|
||||
|
||||
dirs := l.Paths()
|
||||
|
@ -128,8 +127,8 @@ func TestDefaultLayout(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) {
|
||||
l := &DefaultLayout{
|
||||
Path: test.path,
|
||||
Join: test.join,
|
||||
path: test.path,
|
||||
join: test.join,
|
||||
}
|
||||
|
||||
filename := l.Filename(test.Handle)
|
||||
|
@ -141,7 +140,7 @@ func TestDefaultLayout(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRESTLayout(t *testing.T) {
|
||||
path := rtest.TempDir(t)
|
||||
url := `https://hostname.foo`
|
||||
|
||||
var tests = []struct {
|
||||
backend.Handle
|
||||
|
@ -149,44 +148,43 @@ func TestRESTLayout(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
backend.Handle{Type: backend.PackFile, Name: "0123456"},
|
||||
filepath.Join(path, "data", "0123456"),
|
||||
strings.Join([]string{url, "data", "0123456"}, "/"),
|
||||
},
|
||||
{
|
||||
backend.Handle{Type: backend.ConfigFile, Name: "CFG"},
|
||||
filepath.Join(path, "config"),
|
||||
strings.Join([]string{url, "config"}, "/"),
|
||||
},
|
||||
{
|
||||
backend.Handle{Type: backend.SnapshotFile, Name: "123456"},
|
||||
filepath.Join(path, "snapshots", "123456"),
|
||||
strings.Join([]string{url, "snapshots", "123456"}, "/"),
|
||||
},
|
||||
{
|
||||
backend.Handle{Type: backend.IndexFile, Name: "123456"},
|
||||
filepath.Join(path, "index", "123456"),
|
||||
strings.Join([]string{url, "index", "123456"}, "/"),
|
||||
},
|
||||
{
|
||||
backend.Handle{Type: backend.LockFile, Name: "123456"},
|
||||
filepath.Join(path, "locks", "123456"),
|
||||
strings.Join([]string{url, "locks", "123456"}, "/"),
|
||||
},
|
||||
{
|
||||
backend.Handle{Type: backend.KeyFile, Name: "123456"},
|
||||
filepath.Join(path, "keys", "123456"),
|
||||
strings.Join([]string{url, "keys", "123456"}, "/"),
|
||||
},
|
||||
}
|
||||
|
||||
l := &RESTLayout{
|
||||
Path: path,
|
||||
Join: filepath.Join,
|
||||
url: url,
|
||||
}
|
||||
|
||||
t.Run("Paths", func(t *testing.T) {
|
||||
dirs := l.Paths()
|
||||
|
||||
want := []string{
|
||||
filepath.Join(path, "data"),
|
||||
filepath.Join(path, "snapshots"),
|
||||
filepath.Join(path, "index"),
|
||||
filepath.Join(path, "locks"),
|
||||
filepath.Join(path, "keys"),
|
||||
strings.Join([]string{url, "data"}, "/"),
|
||||
strings.Join([]string{url, "snapshots"}, "/"),
|
||||
strings.Join([]string{url, "index"}, "/"),
|
||||
strings.Join([]string{url, "locks"}, "/"),
|
||||
strings.Join([]string{url, "keys"}, "/"),
|
||||
}
|
||||
|
||||
sort.Strings(want)
|
||||
|
@ -215,59 +213,23 @@ func TestRESTLayoutURLs(t *testing.T) {
|
|||
dir string
|
||||
}{
|
||||
{
|
||||
&RESTLayout{URL: "https://hostname.foo", Path: "", Join: path.Join},
|
||||
&RESTLayout{url: "https://hostname.foo"},
|
||||
backend.Handle{Type: backend.PackFile, Name: "foobar"},
|
||||
"https://hostname.foo/data/foobar",
|
||||
"https://hostname.foo/data/",
|
||||
},
|
||||
{
|
||||
&RESTLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join},
|
||||
&RESTLayout{url: "https://hostname.foo:1234/prefix/repo"},
|
||||
backend.Handle{Type: backend.LockFile, Name: "foobar"},
|
||||
"https://hostname.foo:1234/prefix/repo/locks/foobar",
|
||||
"https://hostname.foo:1234/prefix/repo/locks/",
|
||||
},
|
||||
{
|
||||
&RESTLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join},
|
||||
&RESTLayout{url: "https://hostname.foo:1234/prefix/repo"},
|
||||
backend.Handle{Type: backend.ConfigFile, Name: "foobar"},
|
||||
"https://hostname.foo:1234/prefix/repo/config",
|
||||
"https://hostname.foo:1234/prefix/repo/",
|
||||
},
|
||||
{
|
||||
&S3LegacyLayout{URL: "https://hostname.foo", Path: "/", Join: path.Join},
|
||||
backend.Handle{Type: backend.PackFile, Name: "foobar"},
|
||||
"https://hostname.foo/data/foobar",
|
||||
"https://hostname.foo/data/",
|
||||
},
|
||||
{
|
||||
&S3LegacyLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "", Join: path.Join},
|
||||
backend.Handle{Type: backend.LockFile, Name: "foobar"},
|
||||
"https://hostname.foo:1234/prefix/repo/lock/foobar",
|
||||
"https://hostname.foo:1234/prefix/repo/lock/",
|
||||
},
|
||||
{
|
||||
&S3LegacyLayout{URL: "https://hostname.foo:1234/prefix/repo", Path: "/", Join: path.Join},
|
||||
backend.Handle{Type: backend.ConfigFile, Name: "foobar"},
|
||||
"https://hostname.foo:1234/prefix/repo/config",
|
||||
"https://hostname.foo:1234/prefix/repo/",
|
||||
},
|
||||
{
|
||||
&S3LegacyLayout{URL: "", Path: "", Join: path.Join},
|
||||
backend.Handle{Type: backend.PackFile, Name: "foobar"},
|
||||
"data/foobar",
|
||||
"data/",
|
||||
},
|
||||
{
|
||||
&S3LegacyLayout{URL: "", Path: "", Join: path.Join},
|
||||
backend.Handle{Type: backend.LockFile, Name: "foobar"},
|
||||
"lock/foobar",
|
||||
"lock/",
|
||||
},
|
||||
{
|
||||
&S3LegacyLayout{URL: "", Path: "/", Join: path.Join},
|
||||
backend.Handle{Type: backend.ConfigFile, Name: "foobar"},
|
||||
"/config",
|
||||
"/",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -284,165 +246,3 @@ func TestRESTLayoutURLs(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestS3LegacyLayout(t *testing.T) {
|
||||
path := rtest.TempDir(t)
|
||||
|
||||
var tests = []struct {
|
||||
backend.Handle
|
||||
filename string
|
||||
}{
|
||||
{
|
||||
backend.Handle{Type: backend.PackFile, Name: "0123456"},
|
||||
filepath.Join(path, "data", "0123456"),
|
||||
},
|
||||
{
|
||||
backend.Handle{Type: backend.ConfigFile, Name: "CFG"},
|
||||
filepath.Join(path, "config"),
|
||||
},
|
||||
{
|
||||
backend.Handle{Type: backend.SnapshotFile, Name: "123456"},
|
||||
filepath.Join(path, "snapshot", "123456"),
|
||||
},
|
||||
{
|
||||
backend.Handle{Type: backend.IndexFile, Name: "123456"},
|
||||
filepath.Join(path, "index", "123456"),
|
||||
},
|
||||
{
|
||||
backend.Handle{Type: backend.LockFile, Name: "123456"},
|
||||
filepath.Join(path, "lock", "123456"),
|
||||
},
|
||||
{
|
||||
backend.Handle{Type: backend.KeyFile, Name: "123456"},
|
||||
filepath.Join(path, "key", "123456"),
|
||||
},
|
||||
}
|
||||
|
||||
l := &S3LegacyLayout{
|
||||
Path: path,
|
||||
Join: filepath.Join,
|
||||
}
|
||||
|
||||
t.Run("Paths", func(t *testing.T) {
|
||||
dirs := l.Paths()
|
||||
|
||||
want := []string{
|
||||
filepath.Join(path, "data"),
|
||||
filepath.Join(path, "snapshot"),
|
||||
filepath.Join(path, "index"),
|
||||
filepath.Join(path, "lock"),
|
||||
filepath.Join(path, "key"),
|
||||
}
|
||||
|
||||
sort.Strings(want)
|
||||
sort.Strings(dirs)
|
||||
|
||||
if !reflect.DeepEqual(dirs, want) {
|
||||
t.Fatalf("wrong paths returned, want:\n %v\ngot:\n %v", want, dirs)
|
||||
}
|
||||
})
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(fmt.Sprintf("%v/%v", test.Type, test.Handle.Name), func(t *testing.T) {
|
||||
filename := l.Filename(test.Handle)
|
||||
if filename != test.filename {
|
||||
t.Fatalf("wrong filename, want %v, got %v", test.filename, filename)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectLayout(t *testing.T) {
|
||||
defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)()
|
||||
path := rtest.TempDir(t)
|
||||
|
||||
var tests = []struct {
|
||||
filename string
|
||||
want string
|
||||
}{
|
||||
{"repo-layout-default.tar.gz", "*layout.DefaultLayout"},
|
||||
{"repo-layout-s3legacy.tar.gz", "*layout.S3LegacyLayout"},
|
||||
}
|
||||
|
||||
var fs = &LocalFilesystem{}
|
||||
for _, test := range tests {
|
||||
for _, fs := range []Filesystem{fs, nil} {
|
||||
t.Run(fmt.Sprintf("%v/fs-%T", test.filename, fs), func(t *testing.T) {
|
||||
rtest.SetupTarTestFixture(t, path, filepath.Join("../testdata", test.filename))
|
||||
|
||||
layout, err := DetectLayout(context.TODO(), fs, filepath.Join(path, "repo"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if layout == nil {
|
||||
t.Fatal("wanted some layout, but detect returned nil")
|
||||
}
|
||||
|
||||
layoutName := fmt.Sprintf("%T", layout)
|
||||
if layoutName != test.want {
|
||||
t.Fatalf("want layout %v, got %v", test.want, layoutName)
|
||||
}
|
||||
|
||||
rtest.RemoveAll(t, filepath.Join(path, "repo"))
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLayout(t *testing.T) {
|
||||
defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)()
|
||||
path := rtest.TempDir(t)
|
||||
|
||||
var tests = []struct {
|
||||
layoutName string
|
||||
defaultLayoutName string
|
||||
want string
|
||||
}{
|
||||
{"default", "", "*layout.DefaultLayout"},
|
||||
{"s3legacy", "", "*layout.S3LegacyLayout"},
|
||||
{"", "", "*layout.DefaultLayout"},
|
||||
}
|
||||
|
||||
rtest.SetupTarTestFixture(t, path, filepath.Join("..", "testdata", "repo-layout-default.tar.gz"))
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.layoutName, func(t *testing.T) {
|
||||
layout, err := ParseLayout(context.TODO(), &LocalFilesystem{}, test.layoutName, test.defaultLayoutName, filepath.Join(path, "repo"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if layout == nil {
|
||||
t.Fatal("wanted some layout, but detect returned nil")
|
||||
}
|
||||
|
||||
// test that the functions work (and don't panic)
|
||||
_ = layout.Dirname(backend.Handle{Type: backend.PackFile})
|
||||
_ = layout.Filename(backend.Handle{Type: backend.PackFile, Name: "1234"})
|
||||
_ = layout.Paths()
|
||||
|
||||
layoutName := fmt.Sprintf("%T", layout)
|
||||
if layoutName != test.want {
|
||||
t.Fatalf("want layout %v, got %v", test.want, layoutName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLayoutInvalid(t *testing.T) {
|
||||
path := rtest.TempDir(t)
|
||||
|
||||
var invalidNames = []string{
|
||||
"foo", "bar", "local",
|
||||
}
|
||||
|
||||
for _, name := range invalidNames {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
layout, err := ParseLayout(context.TODO(), nil, name, "", path)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error not found for layout name %v, layout is %v", name, layout)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,8 +9,7 @@ import (
|
|||
|
||||
// Config holds all information needed to open a local repository.
|
||||
type Config struct {
|
||||
Path string
|
||||
Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect) (deprecated)"`
|
||||
Path string
|
||||
|
||||
Connections uint `option:"connections" help:"set a limit for the number of concurrent operations (default: 2)"`
|
||||
}
|
||||
|
|
|
@ -6,30 +6,22 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/feature"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestLayout(t *testing.T) {
|
||||
defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)()
|
||||
path := rtest.TempDir(t)
|
||||
|
||||
var tests = []struct {
|
||||
filename string
|
||||
layout string
|
||||
failureExpected bool
|
||||
packfiles map[string]bool
|
||||
}{
|
||||
{"repo-layout-default.tar.gz", "", false, map[string]bool{
|
||||
{"repo-layout-default.tar.gz", false, map[string]bool{
|
||||
"aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false,
|
||||
"fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false,
|
||||
"c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false,
|
||||
}},
|
||||
{"repo-layout-s3legacy.tar.gz", "", false, map[string]bool{
|
||||
"fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false,
|
||||
"c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false,
|
||||
"aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false,
|
||||
}},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -39,7 +31,6 @@ func TestLayout(t *testing.T) {
|
|||
repo := filepath.Join(path, "repo")
|
||||
be, err := Open(context.TODO(), Config{
|
||||
Path: repo,
|
||||
Layout: test.layout,
|
||||
Connections: 2,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -37,13 +37,8 @@ func NewFactory() location.Factory {
|
|||
return location.NewLimitedBackendFactory("local", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open))
|
||||
}
|
||||
|
||||
const defaultLayout = "default"
|
||||
|
||||
func open(ctx context.Context, cfg Config) (*Local, error) {
|
||||
l, err := layout.ParseLayout(ctx, &layout.LocalFilesystem{}, cfg.Layout, defaultLayout, cfg.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func open(cfg Config) (*Local, error) {
|
||||
l := layout.NewDefaultLayout(cfg.Path, filepath.Join)
|
||||
|
||||
fi, err := fs.Stat(l.Filename(backend.Handle{Type: backend.ConfigFile}))
|
||||
m := util.DeriveModesFromFileInfo(fi, err)
|
||||
|
@ -57,17 +52,17 @@ func open(ctx context.Context, cfg Config) (*Local, error) {
|
|||
}
|
||||
|
||||
// Open opens the local backend as specified by config.
|
||||
func Open(ctx context.Context, cfg Config) (*Local, error) {
|
||||
debug.Log("open local backend at %v (layout %q)", cfg.Path, cfg.Layout)
|
||||
return open(ctx, cfg)
|
||||
func Open(_ context.Context, cfg Config) (*Local, error) {
|
||||
debug.Log("open local backend at %v", cfg.Path)
|
||||
return open(cfg)
|
||||
}
|
||||
|
||||
// Create creates all the necessary files and directories for a new local
|
||||
// backend at dir. Afterwards a new config blob should be created.
|
||||
func Create(ctx context.Context, cfg Config) (*Local, error) {
|
||||
debug.Log("create local backend at %v (layout %q)", cfg.Path, cfg.Layout)
|
||||
func Create(_ context.Context, cfg Config) (*Local, error) {
|
||||
debug.Log("create local backend at %v", cfg.Path)
|
||||
|
||||
be, err := open(ctx, cfg)
|
||||
be, err := open(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
|
@ -66,7 +65,7 @@ func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error)
|
|||
be := &Backend{
|
||||
url: cfg.URL,
|
||||
client: http.Client{Transport: rt},
|
||||
Layout: &layout.RESTLayout{URL: url, Join: path.Join},
|
||||
Layout: layout.NewRESTLayout(url),
|
||||
connections: cfg.Connections,
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/layout"
|
||||
|
@ -37,9 +36,7 @@ func NewFactory() location.Factory {
|
|||
return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open)
|
||||
}
|
||||
|
||||
const defaultLayout = "default"
|
||||
|
||||
func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
debug.Log("open, config %#v", cfg)
|
||||
|
||||
if cfg.KeyID == "" && cfg.Secret.String() != "" {
|
||||
|
@ -83,15 +80,9 @@ func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, erro
|
|||
be := &Backend{
|
||||
client: client,
|
||||
cfg: cfg,
|
||||
Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join),
|
||||
}
|
||||
|
||||
l, err := layout.ParseLayout(ctx, be, cfg.Layout, defaultLayout, cfg.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
be.Layout = l
|
||||
|
||||
return be, nil
|
||||
}
|
||||
|
||||
|
@ -194,14 +185,14 @@ func getCredentials(cfg Config, tr http.RoundTripper) (*credentials.Credentials,
|
|||
|
||||
// Open opens the S3 backend at bucket and region. The bucket is created if it
|
||||
// does not exist yet.
|
||||
func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) {
|
||||
return open(ctx, cfg, rt)
|
||||
func Open(_ context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) {
|
||||
return open(cfg, rt)
|
||||
}
|
||||
|
||||
// Create opens the S3 backend at bucket and region and creates the bucket if
|
||||
// it does not exist yet.
|
||||
func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backend, error) {
|
||||
be, err := open(ctx, cfg, rt)
|
||||
be, err := open(cfg, rt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
|
@ -257,78 +248,6 @@ func (be *Backend) IsPermanentError(err error) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Join combines path components with slashes.
|
||||
func (be *Backend) Join(p ...string) string {
|
||||
return path.Join(p...)
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Name() string { return fi.name } // base name of the file
|
||||
func (fi *fileInfo) Size() int64 { return fi.size } // length in bytes for regular files; system-dependent for others
|
||||
func (fi *fileInfo) Mode() os.FileMode { return fi.mode } // file mode bits
|
||||
func (fi *fileInfo) ModTime() time.Time { return fi.modTime } // modification time
|
||||
func (fi *fileInfo) IsDir() bool { return fi.isDir } // abbreviation for Mode().IsDir()
|
||||
func (fi *fileInfo) Sys() interface{} { return nil } // underlying data source (can return nil)
|
||||
|
||||
// ReadDir returns the entries for a directory.
|
||||
func (be *Backend) ReadDir(ctx context.Context, dir string) (list []os.FileInfo, err error) {
|
||||
debug.Log("ReadDir(%v)", dir)
|
||||
|
||||
// make sure dir ends with a slash
|
||||
if dir[len(dir)-1] != '/' {
|
||||
dir += "/"
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
debug.Log("using ListObjectsV1(%v)", be.cfg.ListObjectsV1)
|
||||
|
||||
for obj := range be.client.ListObjects(ctx, be.cfg.Bucket, minio.ListObjectsOptions{
|
||||
Prefix: dir,
|
||||
Recursive: false,
|
||||
UseV1: be.cfg.ListObjectsV1,
|
||||
}) {
|
||||
if obj.Err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if obj.Key == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
name := strings.TrimPrefix(obj.Key, dir)
|
||||
// Sometimes s3 returns an entry for the dir itself. Ignore it.
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
entry := &fileInfo{
|
||||
name: name,
|
||||
size: obj.Size,
|
||||
modTime: obj.LastModified,
|
||||
}
|
||||
|
||||
if name[len(name)-1] == '/' {
|
||||
entry.isDir = true
|
||||
entry.mode = os.ModeDir | 0755
|
||||
entry.name = name[:len(name)-1]
|
||||
} else {
|
||||
entry.mode = 0644
|
||||
}
|
||||
|
||||
list = append(list, entry)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func (be *Backend) Connections() uint {
|
||||
return be.cfg.Connections
|
||||
}
|
||||
|
@ -526,40 +445,3 @@ func (be *Backend) Delete(ctx context.Context) error {
|
|||
|
||||
// Close does nothing
|
||||
func (be *Backend) Close() error { return nil }
|
||||
|
||||
// Rename moves a file based on the new layout l.
|
||||
func (be *Backend) Rename(ctx context.Context, h backend.Handle, l layout.Layout) error {
|
||||
debug.Log("Rename %v to %v", h, l)
|
||||
oldname := be.Filename(h)
|
||||
newname := l.Filename(h)
|
||||
|
||||
if oldname == newname {
|
||||
debug.Log(" %v is already renamed", newname)
|
||||
return nil
|
||||
}
|
||||
|
||||
debug.Log(" %v -> %v", oldname, newname)
|
||||
|
||||
src := minio.CopySrcOptions{
|
||||
Bucket: be.cfg.Bucket,
|
||||
Object: oldname,
|
||||
}
|
||||
|
||||
dst := minio.CopyDestOptions{
|
||||
Bucket: be.cfg.Bucket,
|
||||
Object: newname,
|
||||
}
|
||||
|
||||
_, err := be.client.CopyObject(ctx, dst, src)
|
||||
if err != nil && be.IsNotExist(err) {
|
||||
debug.Log("copy failed: %v, seems to already have been renamed", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
debug.Log("copy failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return be.client.RemoveObject(ctx, be.cfg.Bucket, oldname, minio.RemoveObjectOptions{})
|
||||
}
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
type Config struct {
|
||||
User, Host, Port, Path string
|
||||
|
||||
Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect) (deprecated)"`
|
||||
Command string `option:"command" help:"specify command to create sftp connection"`
|
||||
Args string `option:"args" help:"specify arguments for ssh"`
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/sftp"
|
||||
"github.com/restic/restic/internal/feature"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
|
@ -17,25 +16,18 @@ func TestLayout(t *testing.T) {
|
|||
t.Skip("sftp server binary not available")
|
||||
}
|
||||
|
||||
defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)()
|
||||
path := rtest.TempDir(t)
|
||||
|
||||
var tests = []struct {
|
||||
filename string
|
||||
layout string
|
||||
failureExpected bool
|
||||
packfiles map[string]bool
|
||||
}{
|
||||
{"repo-layout-default.tar.gz", "", false, map[string]bool{
|
||||
{"repo-layout-default.tar.gz", false, map[string]bool{
|
||||
"aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false,
|
||||
"fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false,
|
||||
"c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false,
|
||||
}},
|
||||
{"repo-layout-s3legacy.tar.gz", "", false, map[string]bool{
|
||||
"fc919a3b421850f6fa66ad22ebcf91e433e79ffef25becf8aef7c7b1eca91683": false,
|
||||
"c089d62788da14f8b7cbf77188305c0874906f0b73d3fce5a8869050e8d0c0e1": false,
|
||||
"aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false,
|
||||
}},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -46,7 +38,6 @@ func TestLayout(t *testing.T) {
|
|||
be, err := sftp.Open(context.TODO(), sftp.Config{
|
||||
Command: fmt.Sprintf("%q -e", sftpServer),
|
||||
Path: repo,
|
||||
Layout: test.layout,
|
||||
Connections: 5,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -50,8 +50,6 @@ func NewFactory() location.Factory {
|
|||
return location.NewLimitedBackendFactory("sftp", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open))
|
||||
}
|
||||
|
||||
const defaultLayout = "default"
|
||||
|
||||
func startClient(cfg Config) (*SFTP, error) {
|
||||
program, args, err := buildSSHCommand(cfg)
|
||||
if err != nil {
|
||||
|
@ -121,7 +119,13 @@ func startClient(cfg Config) (*SFTP, error) {
|
|||
}
|
||||
|
||||
_, posixRename := client.HasExtension("posix-rename@openssh.com")
|
||||
return &SFTP{c: client, cmd: cmd, result: ch, posixRename: posixRename}, nil
|
||||
return &SFTP{
|
||||
c: client,
|
||||
cmd: cmd,
|
||||
result: ch,
|
||||
posixRename: posixRename,
|
||||
Layout: layout.NewDefaultLayout(cfg.Path, path.Join),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// clientError returns an error if the client has exited. Otherwise, nil is
|
||||
|
@ -139,7 +143,7 @@ func (r *SFTP) clientError() error {
|
|||
|
||||
// Open opens an sftp backend as described by the config by running
|
||||
// "ssh" with the appropriate arguments (or cfg.Command, if set).
|
||||
func Open(ctx context.Context, cfg Config) (*SFTP, error) {
|
||||
func Open(_ context.Context, cfg Config) (*SFTP, error) {
|
||||
debug.Log("open backend with config %#v", cfg)
|
||||
|
||||
sftp, err := startClient(cfg)
|
||||
|
@ -148,18 +152,10 @@ func Open(ctx context.Context, cfg Config) (*SFTP, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return open(ctx, sftp, cfg)
|
||||
return open(sftp, cfg)
|
||||
}
|
||||
|
||||
func open(ctx context.Context, sftp *SFTP, cfg Config) (*SFTP, error) {
|
||||
var err error
|
||||
sftp.Layout, err = layout.ParseLayout(ctx, sftp, cfg.Layout, defaultLayout, cfg.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug.Log("layout: %v\n", sftp.Layout)
|
||||
|
||||
func open(sftp *SFTP, cfg Config) (*SFTP, error) {
|
||||
fi, err := sftp.c.Stat(sftp.Layout.Filename(backend.Handle{Type: backend.ConfigFile}))
|
||||
m := util.DeriveModesFromFileInfo(fi, err)
|
||||
debug.Log("using (%03O file, %03O dir) permissions", m.File, m.Dir)
|
||||
|
@ -195,21 +191,6 @@ func (r *SFTP) mkdirAllDataSubdirs(ctx context.Context, nconn uint) error {
|
|||
return g.Wait()
|
||||
}
|
||||
|
||||
// Join combines path components with slashes (according to the sftp spec).
|
||||
func (r *SFTP) Join(p ...string) string {
|
||||
return path.Join(p...)
|
||||
}
|
||||
|
||||
// ReadDir returns the entries for a directory.
|
||||
func (r *SFTP) ReadDir(_ context.Context, dir string) ([]os.FileInfo, error) {
|
||||
fi, err := r.c.ReadDir(dir)
|
||||
|
||||
// sftp client does not specify dir name on error, so add it here
|
||||
err = errors.Wrapf(err, "(%v)", dir)
|
||||
|
||||
return fi, err
|
||||
}
|
||||
|
||||
// IsNotExist returns true if the error is caused by a not existing file.
|
||||
func (r *SFTP) IsNotExist(err error) bool {
|
||||
return errors.Is(err, os.ErrNotExist)
|
||||
|
@ -266,11 +247,6 @@ func Create(ctx context.Context, cfg Config) (*SFTP, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
sftp.Layout, err = layout.ParseLayout(ctx, sftp, cfg.Layout, defaultLayout, cfg.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sftp.Modes = util.DefaultModes
|
||||
|
||||
// test if config file already exists
|
||||
|
@ -285,7 +261,7 @@ func Create(ctx context.Context, cfg Config) (*SFTP, error) {
|
|||
}
|
||||
|
||||
// repurpose existing connection
|
||||
return open(ctx, sftp, cfg)
|
||||
return open(sftp, cfg)
|
||||
}
|
||||
|
||||
func (r *SFTP) Connections() uint {
|
||||
|
@ -302,12 +278,6 @@ func (r *SFTP) HasAtomicReplace() bool {
|
|||
return r.posixRename
|
||||
}
|
||||
|
||||
// Join joins the given paths and cleans them afterwards. This always uses
|
||||
// forward slashes, which is required by sftp.
|
||||
func Join(parts ...string) string {
|
||||
return path.Clean(path.Join(parts...))
|
||||
}
|
||||
|
||||
// tempSuffix generates a random string suffix that should be sufficiently long
|
||||
// to avoid accidental conflicts
|
||||
func tempSuffix() string {
|
||||
|
@ -572,9 +542,9 @@ func (r *SFTP) Close() error {
|
|||
}
|
||||
|
||||
func (r *SFTP) deleteRecursive(ctx context.Context, name string) error {
|
||||
entries, err := r.ReadDir(ctx, name)
|
||||
entries, err := r.c.ReadDir(name)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "ReadDir")
|
||||
return errors.Wrapf(err, "ReadDir(%v)", name)
|
||||
}
|
||||
|
||||
for _, fi := range entries {
|
||||
|
@ -582,7 +552,7 @@ func (r *SFTP) deleteRecursive(ctx context.Context, name string) error {
|
|||
return ctx.Err()
|
||||
}
|
||||
|
||||
itemName := r.Join(name, fi.Name())
|
||||
itemName := path.Join(name, fi.Name())
|
||||
if fi.IsDir() {
|
||||
err := r.deleteRecursive(ctx, itemName)
|
||||
if err != nil {
|
||||
|
|
|
@ -72,10 +72,7 @@ func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (backend.Backen
|
|||
connections: cfg.Connections,
|
||||
container: cfg.Container,
|
||||
prefix: cfg.Prefix,
|
||||
Layout: &layout.DefaultLayout{
|
||||
Path: cfg.Prefix,
|
||||
Join: path.Join,
|
||||
},
|
||||
Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join),
|
||||
}
|
||||
|
||||
// Authenticate if needed
|
||||
|
|
Binary file not shown.
|
@ -8,8 +8,6 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/s3"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
|
@ -53,9 +51,6 @@ func New(repo restic.Repository, trackUnused bool) *Checker {
|
|||
return c
|
||||
}
|
||||
|
||||
// ErrLegacyLayout is returned when the repository uses the S3 legacy layout.
|
||||
var ErrLegacyLayout = errors.New("repository uses S3 legacy layout")
|
||||
|
||||
// ErrDuplicatePacks is returned when a pack is found in more than one index.
|
||||
type ErrDuplicatePacks struct {
|
||||
PackID restic.ID
|
||||
|
@ -75,16 +70,6 @@ func (e *ErrMixedPack) Error() string {
|
|||
return fmt.Sprintf("pack %v contains a mix of tree and data blobs", e.PackID.Str())
|
||||
}
|
||||
|
||||
// ErrOldIndexFormat is returned when an index with the old format is
|
||||
// found.
|
||||
type ErrOldIndexFormat struct {
|
||||
restic.ID
|
||||
}
|
||||
|
||||
func (err *ErrOldIndexFormat) Error() string {
|
||||
return fmt.Sprintf("index %v has old format", err.ID)
|
||||
}
|
||||
|
||||
func (c *Checker) LoadSnapshots(ctx context.Context) error {
|
||||
var err error
|
||||
c.snapshots, err = restic.MemorizeList(ctx, c.repo, restic.SnapshotFile)
|
||||
|
@ -112,14 +97,8 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e
|
|||
debug.Log("Start")
|
||||
|
||||
packToIndex := make(map[restic.ID]restic.IDSet)
|
||||
err := c.masterIndex.Load(ctx, c.repo, p, func(id restic.ID, idx *index.Index, oldFormat bool, err error) error {
|
||||
err := c.masterIndex.Load(ctx, c.repo, p, func(id restic.ID, idx *index.Index, err error) error {
|
||||
debug.Log("process index %v, err %v", id, err)
|
||||
|
||||
if oldFormat {
|
||||
debug.Log("index %v has old format", id)
|
||||
hints = append(hints, &ErrOldIndexFormat{id})
|
||||
}
|
||||
|
||||
err = errors.Wrapf(err, "error loading index %v", id)
|
||||
|
||||
if err != nil {
|
||||
|
@ -193,23 +172,11 @@ func (e *PackError) Error() string {
|
|||
return "pack " + e.ID.String() + ": " + e.Err.Error()
|
||||
}
|
||||
|
||||
func isS3Legacy(b backend.Backend) bool {
|
||||
be := backend.AsBackend[*s3.Backend](b)
|
||||
return be != nil && be.Layout.Name() == "s3legacy"
|
||||
}
|
||||
|
||||
// Packs checks that all packs referenced in the index are still available and
|
||||
// there are no packs that aren't in an index. errChan is closed after all
|
||||
// packs have been checked.
|
||||
func (c *Checker) Packs(ctx context.Context, errChan chan<- error) {
|
||||
defer close(errChan)
|
||||
|
||||
if r, ok := c.repo.(*repository.Repository); ok {
|
||||
if isS3Legacy(repository.AsS3Backend(r)) {
|
||||
errChan <- ErrLegacyLayout
|
||||
}
|
||||
}
|
||||
|
||||
debug.Log("checking for %d packs", len(c.packs))
|
||||
|
||||
debug.Log("listing repository packs")
|
||||
|
|
|
@ -6,8 +6,6 @@ var Flag = New()
|
|||
// flag names are written in kebab-case
|
||||
const (
|
||||
BackendErrorRedesign FlagName = "backend-error-redesign"
|
||||
DeprecateLegacyIndex FlagName = "deprecate-legacy-index"
|
||||
DeprecateS3LegacyLayout FlagName = "deprecate-s3-legacy-layout"
|
||||
DeviceIDForHardlinks FlagName = "device-id-for-hardlinks"
|
||||
ExplicitS3AnonymousAuth FlagName = "explicit-s3-anonymous-auth"
|
||||
SafeForgetKeepTags FlagName = "safe-forget-keep-tags"
|
||||
|
@ -16,8 +14,6 @@ const (
|
|||
func init() {
|
||||
Flag.SetFlags(map[FlagName]FlagDesc{
|
||||
BackendErrorRedesign: {Type: Beta, Description: "enforce timeouts for stuck HTTP requests and use new backend error handling design."},
|
||||
DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."},
|
||||
DeprecateS3LegacyLayout: {Type: Beta, Description: "disable support for S3 legacy layout used up to restic 0.7.0. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your S3 repository if necessary."},
|
||||
DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"},
|
||||
ExplicitS3AnonymousAuth: {Type: Beta, Description: "forbid anonymous S3 authentication unless `-o s3.unsafe-anonymous-auth=true` is set"},
|
||||
SafeForgetKeepTags: {Type: Beta, Description: "prevent deleting all snapshots if the tag passed to `forget --keep-tags tagname` does not exist"},
|
||||
|
|
|
@ -1,123 +0,0 @@
|
|||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/layout"
|
||||
"github.com/restic/restic/internal/backend/s3"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
func init() {
|
||||
register(&S3Layout{})
|
||||
}
|
||||
|
||||
// S3Layout migrates a repository on an S3 backend from the "s3legacy" to the
|
||||
// "default" layout.
|
||||
type S3Layout struct{}
|
||||
|
||||
// Check tests whether the migration can be applied.
|
||||
func (m *S3Layout) Check(_ context.Context, repo restic.Repository) (bool, string, error) {
|
||||
be := repository.AsS3Backend(repo.(*repository.Repository))
|
||||
if be == nil {
|
||||
debug.Log("backend is not s3")
|
||||
return false, "backend is not s3", nil
|
||||
}
|
||||
|
||||
if be.Layout.Name() != "s3legacy" {
|
||||
debug.Log("layout is not s3legacy")
|
||||
return false, "not using the legacy s3 layout", nil
|
||||
}
|
||||
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
func (m *S3Layout) RepoCheck() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func retry(max int, fail func(err error), f func() error) error {
|
||||
var err error
|
||||
for i := 0; i < max; i++ {
|
||||
err = f()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if fail != nil {
|
||||
fail(err)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// maxErrors for retrying renames on s3.
|
||||
const maxErrors = 20
|
||||
|
||||
func (m *S3Layout) moveFiles(ctx context.Context, be *s3.Backend, l layout.Layout, t restic.FileType) error {
|
||||
printErr := func(err error) {
|
||||
fmt.Fprintf(os.Stderr, "renaming file returned error: %v\n", err)
|
||||
}
|
||||
|
||||
return be.List(ctx, t, func(fi backend.FileInfo) error {
|
||||
h := backend.Handle{Type: t, Name: fi.Name}
|
||||
debug.Log("move %v", h)
|
||||
|
||||
return retry(maxErrors, printErr, func() error {
|
||||
return be.Rename(ctx, h, l)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Apply runs the migration.
|
||||
func (m *S3Layout) Apply(ctx context.Context, repo restic.Repository) error {
|
||||
be := repository.AsS3Backend(repo.(*repository.Repository))
|
||||
if be == nil {
|
||||
debug.Log("backend is not s3")
|
||||
return errors.New("backend is not s3")
|
||||
}
|
||||
|
||||
oldLayout := &layout.S3LegacyLayout{
|
||||
Path: be.Path(),
|
||||
Join: path.Join,
|
||||
}
|
||||
|
||||
newLayout := &layout.DefaultLayout{
|
||||
Path: be.Path(),
|
||||
Join: path.Join,
|
||||
}
|
||||
|
||||
be.Layout = oldLayout
|
||||
|
||||
for _, t := range []restic.FileType{
|
||||
restic.SnapshotFile,
|
||||
restic.PackFile,
|
||||
restic.KeyFile,
|
||||
restic.LockFile,
|
||||
} {
|
||||
err := m.moveFiles(ctx, be, newLayout, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
be.Layout = newLayout
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Name returns the name for this migration.
|
||||
func (m *S3Layout) Name() string {
|
||||
return "s3_layout"
|
||||
}
|
||||
|
||||
// Desc returns a short description what the migration does.
|
||||
func (m *S3Layout) Desc() string {
|
||||
return "move files from 's3legacy' to the 'default' repository layout"
|
||||
}
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
|
@ -12,7 +11,6 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/crypto"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/feature"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
|
@ -489,34 +487,15 @@ func (idx *Index) merge(idx2 *Index) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// isErrOldIndex returns true if the error may be caused by an old index
|
||||
// format.
|
||||
func isErrOldIndex(err error) bool {
|
||||
e, ok := err.(*json.UnmarshalTypeError)
|
||||
return ok && e.Value == "array"
|
||||
}
|
||||
|
||||
// DecodeIndex unserializes an index from buf.
|
||||
func DecodeIndex(buf []byte, id restic.ID) (idx *Index, oldFormat bool, err error) {
|
||||
func DecodeIndex(buf []byte, id restic.ID) (idx *Index, err error) {
|
||||
debug.Log("Start decoding index")
|
||||
idxJSON := &jsonIndex{}
|
||||
|
||||
err = json.Unmarshal(buf, idxJSON)
|
||||
if err != nil {
|
||||
debug.Log("Error %v", err)
|
||||
|
||||
if isErrOldIndex(err) {
|
||||
if feature.Flag.Enabled(feature.DeprecateLegacyIndex) {
|
||||
return nil, false, fmt.Errorf("index seems to use the legacy format. update it using `restic repair index`")
|
||||
}
|
||||
|
||||
debug.Log("index is probably old format, trying that")
|
||||
idx, err = decodeOldIndex(buf)
|
||||
idx.ids = append(idx.ids, id)
|
||||
return idx, err == nil, err
|
||||
}
|
||||
|
||||
return nil, false, errors.Wrap(err, "DecodeIndex")
|
||||
return nil, errors.Wrap(err, "DecodeIndex")
|
||||
}
|
||||
|
||||
idx = NewIndex()
|
||||
|
@ -537,38 +516,6 @@ func DecodeIndex(buf []byte, id restic.ID) (idx *Index, oldFormat bool, err erro
|
|||
idx.ids = append(idx.ids, id)
|
||||
idx.final = true
|
||||
|
||||
debug.Log("done")
|
||||
return idx, false, nil
|
||||
}
|
||||
|
||||
// DecodeOldIndex loads and unserializes an index in the old format from rd.
|
||||
func decodeOldIndex(buf []byte) (idx *Index, err error) {
|
||||
debug.Log("Start decoding old index")
|
||||
list := []*packJSON{}
|
||||
|
||||
err = json.Unmarshal(buf, &list)
|
||||
if err != nil {
|
||||
debug.Log("Error %#v", err)
|
||||
return nil, errors.Wrap(err, "Decode")
|
||||
}
|
||||
|
||||
idx = NewIndex()
|
||||
for _, pack := range list {
|
||||
packID := idx.addToPacks(pack.ID)
|
||||
|
||||
for _, blob := range pack.Blobs {
|
||||
idx.store(packID, restic.Blob{
|
||||
BlobHandle: restic.BlobHandle{
|
||||
Type: blob.Type,
|
||||
ID: blob.ID},
|
||||
Offset: blob.Offset,
|
||||
Length: blob.Length,
|
||||
// no compressed length in the old index format
|
||||
})
|
||||
}
|
||||
}
|
||||
idx.final = true
|
||||
|
||||
debug.Log("done")
|
||||
return idx, nil
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
// It is guaranteed that the function is not run concurrently. If the callback
|
||||
// returns an error, this function is cancelled and also returns that error.
|
||||
func ForAllIndexes(ctx context.Context, lister restic.Lister, repo restic.LoaderUnpacked,
|
||||
fn func(id restic.ID, index *Index, oldFormat bool, err error) error) error {
|
||||
fn func(id restic.ID, index *Index, err error) error) error {
|
||||
|
||||
// decoding an index can take quite some time such that this can be both CPU- or IO-bound
|
||||
// as the whole index is kept in memory anyways, a few workers too much don't matter
|
||||
|
@ -22,15 +22,14 @@ func ForAllIndexes(ctx context.Context, lister restic.Lister, repo restic.Loader
|
|||
return restic.ParallelList(ctx, lister, restic.IndexFile, workerCount, func(ctx context.Context, id restic.ID, _ int64) error {
|
||||
var err error
|
||||
var idx *Index
|
||||
oldFormat := false
|
||||
|
||||
buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)
|
||||
if err == nil {
|
||||
idx, oldFormat, err = DecodeIndex(buf, id)
|
||||
idx, err = DecodeIndex(buf, id)
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return fn(id, idx, oldFormat, err)
|
||||
return fn(id, idx, err)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ func TestRepositoryForAllIndexes(t *testing.T) {
|
|||
// check that all expected indexes are loaded without errors
|
||||
indexIDs := restic.NewIDSet()
|
||||
var indexErr error
|
||||
rtest.OK(t, index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, oldFormat bool, err error) error {
|
||||
rtest.OK(t, index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, err error) error {
|
||||
if err != nil {
|
||||
indexErr = err
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ func TestRepositoryForAllIndexes(t *testing.T) {
|
|||
// must failed with the returned error
|
||||
iterErr := errors.New("error to pass upwards")
|
||||
|
||||
err := index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, oldFormat bool, err error) error {
|
||||
err := index.ForAllIndexes(context.TODO(), repo, repo, func(id restic.ID, index *index.Index, err error) error {
|
||||
return iterErr
|
||||
})
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/feature"
|
||||
"github.com/restic/restic/internal/repository/index"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
|
@ -53,11 +52,9 @@ func TestIndexSerialize(t *testing.T) {
|
|||
rtest.OK(t, err)
|
||||
|
||||
idx2ID := restic.NewRandomID()
|
||||
idx2, oldFormat, err := index.DecodeIndex(wr.Bytes(), idx2ID)
|
||||
idx2, err := index.DecodeIndex(wr.Bytes(), idx2ID)
|
||||
rtest.OK(t, err)
|
||||
rtest.Assert(t, idx2 != nil,
|
||||
"nil returned for decoded index")
|
||||
rtest.Assert(t, !oldFormat, "new index format recognized as old format")
|
||||
rtest.Assert(t, idx2 != nil, "nil returned for decoded index")
|
||||
indexID, err := idx2.IDs()
|
||||
rtest.OK(t, err)
|
||||
rtest.Equals(t, indexID, restic.IDs{idx2ID})
|
||||
|
@ -123,13 +120,10 @@ func TestIndexSerialize(t *testing.T) {
|
|||
rtest.OK(t, err)
|
||||
rtest.Equals(t, restic.IDs{id}, ids)
|
||||
|
||||
idx3, oldFormat, err := index.DecodeIndex(wr3.Bytes(), id)
|
||||
idx3, err := index.DecodeIndex(wr3.Bytes(), id)
|
||||
rtest.OK(t, err)
|
||||
rtest.Assert(t, idx3 != nil,
|
||||
"nil returned for decoded index")
|
||||
rtest.Assert(t, idx3.Final(),
|
||||
"decoded index is not final")
|
||||
rtest.Assert(t, !oldFormat, "new index format recognized as old format")
|
||||
rtest.Assert(t, idx3 != nil, "nil returned for decoded index")
|
||||
rtest.Assert(t, idx3.Final(), "decoded index is not final")
|
||||
|
||||
// all new blobs must be in the index
|
||||
for _, testBlob := range newtests {
|
||||
|
@ -246,31 +240,6 @@ var docExampleV2 = []byte(`
|
|||
}
|
||||
`)
|
||||
|
||||
var docOldExample = []byte(`
|
||||
[ {
|
||||
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
|
||||
"blobs": [
|
||||
{
|
||||
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
|
||||
"type": "data",
|
||||
"offset": 0,
|
||||
"length": 38
|
||||
},{
|
||||
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
|
||||
"type": "tree",
|
||||
"offset": 38,
|
||||
"length": 112
|
||||
},
|
||||
{
|
||||
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
|
||||
"type": "data",
|
||||
"offset": 150,
|
||||
"length": 123
|
||||
}
|
||||
]
|
||||
} ]
|
||||
`)
|
||||
|
||||
var exampleTests = []struct {
|
||||
id, packID restic.ID
|
||||
tpe restic.BlobType
|
||||
|
@ -312,9 +281,8 @@ func TestIndexUnserialize(t *testing.T) {
|
|||
{docExampleV1, 1},
|
||||
{docExampleV2, 2},
|
||||
} {
|
||||
idx, oldFormat, err := index.DecodeIndex(task.idxBytes, restic.NewRandomID())
|
||||
idx, err := index.DecodeIndex(task.idxBytes, restic.NewRandomID())
|
||||
rtest.OK(t, err)
|
||||
rtest.Assert(t, !oldFormat, "new index format recognized as old format")
|
||||
|
||||
for _, test := range exampleTests {
|
||||
list := idx.Lookup(restic.BlobHandle{ID: test.id, Type: test.tpe}, nil)
|
||||
|
@ -387,7 +355,7 @@ func BenchmarkDecodeIndex(b *testing.B) {
|
|||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _, err := index.DecodeIndex(benchmarkIndexJSON, id)
|
||||
_, err := index.DecodeIndex(benchmarkIndexJSON, id)
|
||||
rtest.OK(b, err)
|
||||
}
|
||||
}
|
||||
|
@ -400,7 +368,7 @@ func BenchmarkDecodeIndexParallel(b *testing.B) {
|
|||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, _, err := index.DecodeIndex(benchmarkIndexJSON, id)
|
||||
_, err := index.DecodeIndex(benchmarkIndexJSON, id)
|
||||
rtest.OK(b, err)
|
||||
}
|
||||
})
|
||||
|
@ -426,27 +394,6 @@ func BenchmarkEncodeIndex(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIndexUnserializeOld(t *testing.T) {
|
||||
defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateLegacyIndex, false)()
|
||||
|
||||
idx, oldFormat, err := index.DecodeIndex(docOldExample, restic.NewRandomID())
|
||||
rtest.OK(t, err)
|
||||
rtest.Assert(t, oldFormat, "old index format recognized as new format")
|
||||
|
||||
for _, test := range exampleTests {
|
||||
list := idx.Lookup(restic.BlobHandle{ID: test.id, Type: test.tpe}, nil)
|
||||
if len(list) != 1 {
|
||||
t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list)
|
||||
}
|
||||
blob := list[0]
|
||||
|
||||
rtest.Equals(t, test.packID, blob.PackID)
|
||||
rtest.Equals(t, test.tpe, blob.Type)
|
||||
rtest.Equals(t, test.offset, blob.Offset)
|
||||
rtest.Equals(t, test.length, blob.Length)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexPacks(t *testing.T) {
|
||||
idx := index.NewIndex()
|
||||
packs := restic.NewIDSet()
|
||||
|
|
|
@ -265,7 +265,7 @@ func (mi *MasterIndex) MergeFinalIndexes() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (mi *MasterIndex) Load(ctx context.Context, r restic.ListerLoaderUnpacked, p *progress.Counter, cb func(id restic.ID, idx *Index, oldFormat bool, err error) error) error {
|
||||
func (mi *MasterIndex) Load(ctx context.Context, r restic.ListerLoaderUnpacked, p *progress.Counter, cb func(id restic.ID, idx *Index, err error) error) error {
|
||||
indexList, err := restic.MemorizeList(ctx, r, restic.IndexFile)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -284,12 +284,12 @@ func (mi *MasterIndex) Load(ctx context.Context, r restic.ListerLoaderUnpacked,
|
|||
defer p.Done()
|
||||
}
|
||||
|
||||
err = ForAllIndexes(ctx, indexList, r, func(id restic.ID, idx *Index, oldFormat bool, err error) error {
|
||||
err = ForAllIndexes(ctx, indexList, r, func(id restic.ID, idx *Index, err error) error {
|
||||
if p != nil {
|
||||
p.Add(1)
|
||||
}
|
||||
if cb != nil {
|
||||
err = cb(id, idx, oldFormat, err)
|
||||
err = cb(id, idx, err)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -365,8 +365,7 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud
|
|||
|
||||
var rewriteWg sync.WaitGroup
|
||||
type rewriteTask struct {
|
||||
idx *Index
|
||||
oldFormat bool
|
||||
idx *Index
|
||||
}
|
||||
rewriteCh := make(chan rewriteTask)
|
||||
loader := func() error {
|
||||
|
@ -376,13 +375,13 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud
|
|||
if err != nil {
|
||||
return fmt.Errorf("LoadUnpacked(%v): %w", id.Str(), err)
|
||||
}
|
||||
idx, oldFormat, err := DecodeIndex(buf, id)
|
||||
idx, err := DecodeIndex(buf, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case rewriteCh <- rewriteTask{idx, oldFormat}:
|
||||
case rewriteCh <- rewriteTask{idx}:
|
||||
case <-wgCtx.Done():
|
||||
return wgCtx.Err()
|
||||
}
|
||||
|
@ -411,8 +410,8 @@ func (mi *MasterIndex) Rewrite(ctx context.Context, repo restic.Unpacked, exclud
|
|||
defer close(saveCh)
|
||||
newIndex := NewIndex()
|
||||
for task := range rewriteCh {
|
||||
// always rewrite indexes using the old format, that include a pack that must be removed or that are not full
|
||||
if !task.oldFormat && len(task.idx.Packs().Intersect(excludePacks)) == 0 && IndexFull(task.idx) {
|
||||
// always rewrite indexes that include a pack that must be removed or that are not full
|
||||
if len(task.idx.Packs().Intersect(excludePacks)) == 0 && IndexFull(task.idx) {
|
||||
// make sure that each pack is only stored exactly once in the index
|
||||
excludePacks.Merge(task.idx.Packs())
|
||||
// index is already up to date
|
||||
|
|
|
@ -33,7 +33,7 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions,
|
|||
} else {
|
||||
printer.P("loading indexes...\n")
|
||||
mi := index.NewMasterIndex()
|
||||
err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error {
|
||||
err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, err error) error {
|
||||
if err != nil {
|
||||
printer.E("removing invalid index %v: %v\n", id, err)
|
||||
obsoleteIndexes = append(obsoleteIndexes, id)
|
||||
|
|
|
@ -4,10 +4,8 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -261,11 +259,7 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (*
|
|||
return nil, err
|
||||
}
|
||||
|
||||
idx, oldFormat, err := index.DecodeIndex(buf, id)
|
||||
if oldFormat {
|
||||
fmt.Fprintf(os.Stderr, "index %v has old format\n", id.Str())
|
||||
}
|
||||
return idx, err
|
||||
return index.DecodeIndex(buf, id)
|
||||
}
|
||||
|
||||
func TestRepositoryLoadUnpackedBroken(t *testing.T) {
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
package repository
|
||||
|
||||
import (
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/s3"
|
||||
)
|
||||
|
||||
// AsS3Backend extracts the S3 backend from a repository
|
||||
// TODO remove me once restic 0.17 was released
|
||||
func AsS3Backend(repo *Repository) *s3.Backend {
|
||||
return backend.AsBackend[*s3.Backend](repo.be)
|
||||
}
|
Loading…
Reference in a new issue