Use "pack file" instead of "data file" (#2885)

- changed variable names, especially changed DataFile into PackFile
- changed in some comments
- always use "pack file" in docu
This commit is contained in:
aawsome 2020-08-16 11:16:38 +02:00 committed by GitHub
parent 643bbbe156
commit 0fed6a8dfc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
45 changed files with 126 additions and 126 deletions

View file

@ -149,7 +149,7 @@ func runCat(gopts GlobalOptions, args []string) error {
switch tpe { switch tpe {
case "pack": case "pack":
h := restic.Handle{Type: restic.DataFile, Name: id.String()} h := restic.Handle{Type: restic.PackFile, Name: id.String()}
buf, err := backend.LoadAll(gopts.ctx, nil, repo.Backend(), h) buf, err := backend.LoadAll(gopts.ctx, nil, repo.Backend(), h)
if err != nil { if err != nil {
return err return err

View file

@ -84,8 +84,8 @@ type Blob struct {
func printPacks(repo *repository.Repository, wr io.Writer) error { func printPacks(repo *repository.Repository, wr io.Writer) error {
return repo.List(context.TODO(), restic.DataFile, func(id restic.ID, size int64) error { return repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
h := restic.Handle{Type: restic.DataFile, Name: id.String()} h := restic.Handle{Type: restic.PackFile, Name: id.String()}
blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), size) blobs, err := pack.List(repo.Key(), restic.ReaderAt(repo.Backend(), h), size)
if err != nil { if err != nil {

View file

@ -417,7 +417,7 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error {
packsFound := 0 packsFound := 0
debug.Log("Looking for packs...") debug.Log("Looking for packs...")
err := f.repo.List(ctx, restic.DataFile, func(id restic.ID, size int64) error { err := f.repo.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {
if allPacksFound { if allPacksFound {
return nil return nil
} }

View file

@ -50,7 +50,7 @@ func runList(cmd *cobra.Command, opts GlobalOptions, args []string) error {
var t restic.FileType var t restic.FileType
switch args[0] { switch args[0] {
case "packs": case "packs":
t = restic.DataFile t = restic.PackFile
case "index": case "index":
t = restic.IndexFile t = restic.IndexFile
case "snapshots": case "snapshots":

View file

@ -128,7 +128,7 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
} }
Verbosef("counting files in repo\n") Verbosef("counting files in repo\n")
err = repo.List(ctx, restic.DataFile, func(restic.ID, int64) error { err = repo.List(ctx, restic.PackFile, func(restic.ID, int64) error {
stats.packs++ stats.packs++
return nil return nil
}) })
@ -277,7 +277,7 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
if len(removePacks) != 0 { if len(removePacks) != 0 {
Verbosef("remove %d old packs\n", len(removePacks)) Verbosef("remove %d old packs\n", len(removePacks))
DeleteFiles(gopts, repo, removePacks, restic.DataFile) DeleteFiles(gopts, repo, removePacks, restic.PackFile)
} }
Verbosef("done\n") Verbosef("done\n")

View file

@ -53,7 +53,7 @@ func rebuildIndex(ctx context.Context, repo restic.Repository, ignorePacks resti
Verbosef("counting files in repo\n") Verbosef("counting files in repo\n")
var packs uint64 var packs uint64
err := repo.List(ctx, restic.DataFile, func(restic.ID, int64) error { err := repo.List(ctx, restic.PackFile, func(restic.ID, int64) error {
packs++ packs++
return nil return nil
}) })

View file

@ -386,12 +386,12 @@ func TestBackupSelfHealing(t *testing.T) {
} }
} }
// remove all data packs // remove all packs containing data blobs
rtest.OK(t, r.List(env.gopts.ctx, restic.DataFile, func(id restic.ID, size int64) error { rtest.OK(t, r.List(env.gopts.ctx, restic.PackFile, func(id restic.ID, size int64) error {
if treePacks.Has(id) { if treePacks.Has(id) {
return nil return nil
} }
return r.Backend().Remove(env.gopts.ctx, restic.Handle{Type: restic.DataFile, Name: id.String()}) return r.Backend().Remove(env.gopts.ctx, restic.Handle{Type: restic.PackFile, Name: id.String()})
})) }))
testRunRebuildIndex(t, env.gopts) testRunRebuildIndex(t, env.gopts)

View file

@ -134,10 +134,10 @@ If the repository structure is intact, restic will show that no errors were foun
check snapshots, trees and blobs check snapshots, trees and blobs
no errors were found no errors were found
By default, the ``check`` command does not verify that the actual data files By default, the ``check`` command does not verify that the actual pack files
on disk in the repository are unmodified, because doing so requires reading on disk in the repository are unmodified, because doing so requires reading
a copy of every data file in the repository. To tell restic to also verify the a copy of every pack file in the repository. To tell restic to also verify the
integrity of the data files in the repository, use the ``--read-data`` flag: integrity of the pack files in the repository, use the ``--read-data`` flag:
.. code-block:: console .. code-block:: console
@ -151,16 +151,16 @@ integrity of the data files in the repository, use the ``--read-data`` flag:
duration: 0:00 duration: 0:00
no errors were found no errors were found
.. note:: Since ``--read-data`` has to download all data files in the .. note:: Since ``--read-data`` has to download all pack files in the
repository, beware that it might incur higher bandwidth costs than usual repository, beware that it might incur higher bandwidth costs than usual
and also that it takes more time than the default ``check``. and also that it takes more time than the default ``check``.
Alternatively, use the ``--read-data-subset=n/t`` parameter to check only a Alternatively, use the ``--read-data-subset=n/t`` parameter to check only a
subset of the repository data files at a time. The parameter takes two values, subset of the repository pack files at a time. The parameter takes two values,
``n`` and ``t``. When the check command runs, all data files in the repository ``n`` and ``t``. When the check command runs, all pack files in the repository
are logically divided in ``t`` (roughly equal) groups, and only files that are logically divided in ``t`` (roughly equal) groups, and only files that
belong to group number ``n`` are checked. For example, the following commands belong to group number ``n`` are checked. For example, the following commands
check all repository data files over 5 separate invocations: check all repository pack files over 5 separate invocations:
.. code-block:: console .. code-block:: console

View file

@ -127,7 +127,7 @@ S3 Legacy Layout
Unfortunately during development the AWS S3 backend uses slightly different Unfortunately during development the AWS S3 backend uses slightly different
paths (directory names use singular instead of plural for ``key``, paths (directory names use singular instead of plural for ``key``,
``lock``, and ``snapshot`` files), and the data files are stored directly below ``lock``, and ``snapshot`` files), and the pack files are stored directly below
the ``data`` directory. The S3 Legacy repository layout looks like this: the ``data`` directory. The S3 Legacy repository layout looks like this:
:: ::

View file

@ -383,7 +383,7 @@ func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
// Delete removes all restic keys in the bucket. It will not remove the bucket itself. // Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *Backend) Delete(ctx context.Context) error { func (be *Backend) Delete(ctx context.Context) error {
alltypes := []restic.FileType{ alltypes := []restic.FileType{
restic.DataFile, restic.PackFile,
restic.KeyFile, restic.KeyFile,
restic.LockFile, restic.LockFile,
restic.SnapshotFile, restic.SnapshotFile,

View file

@ -172,7 +172,7 @@ func TestUploadLargeFile(t *testing.T) {
data := rtest.Random(23, 300*1024*1024) data := rtest.Random(23, 300*1024*1024)
id := restic.Hash(data) id := restic.Hash(data)
h := restic.Handle{Name: id.String(), Type: restic.DataFile} h := restic.Handle{Name: id.String(), Type: restic.PackFile}
t.Logf("hash of %d bytes: %v", len(data), id) t.Logf("hash of %d bytes: %v", len(data), id)

View file

@ -308,7 +308,7 @@ func (be *b2Backend) removeKeys(ctx context.Context, t restic.FileType) error {
// Delete removes all restic keys in the bucket. It will not remove the bucket itself. // Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *b2Backend) Delete(ctx context.Context) error { func (be *b2Backend) Delete(ctx context.Context) error {
alltypes := []restic.FileType{ alltypes := []restic.FileType{
restic.DataFile, restic.PackFile,
restic.KeyFile, restic.KeyFile,
restic.LockFile, restic.LockFile,
restic.SnapshotFile, restic.SnapshotFile,

View file

@ -78,7 +78,7 @@ func TestBackendListRetry(t *testing.T) {
} }
var listed []string var listed []string
err := retryBackend.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { err := retryBackend.List(context.TODO(), restic.PackFile, func(fi restic.FileInfo) error {
listed = append(listed, fi.Name) listed = append(listed, fi.Name)
return nil return nil
}) })
@ -112,7 +112,7 @@ func TestBackendListRetryErrorFn(t *testing.T) {
var listed []string var listed []string
run := 0 run := 0
err := retryBackend.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { err := retryBackend.List(context.TODO(), restic.PackFile, func(fi restic.FileInfo) error {
t.Logf("fn called for %v", fi.Name) t.Logf("fn called for %v", fi.Name)
run++ run++
// return an error for the third item in the list // return an error for the third item in the list
@ -168,7 +168,7 @@ func TestBackendListRetryErrorBackend(t *testing.T) {
} }
var listed []string var listed []string
err := retryBackend.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { err := retryBackend.List(context.TODO(), restic.PackFile, func(fi restic.FileInfo) error {
t.Logf("fn called for %v", fi.Name) t.Logf("fn called for %v", fi.Name)
listed = append(listed, fi.Name) listed = append(listed, fi.Name)
return nil return nil

View file

@ -439,7 +439,7 @@ func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
// Delete removes all restic keys in the bucket. It will not remove the bucket itself. // Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *Backend) Delete(ctx context.Context) error { func (be *Backend) Delete(ctx context.Context) error {
alltypes := []restic.FileType{ alltypes := []restic.FileType{
restic.DataFile, restic.PackFile,
restic.KeyFile, restic.KeyFile,
restic.LockFile, restic.LockFile,
restic.SnapshotFile, restic.SnapshotFile,

View file

@ -16,7 +16,7 @@ type DefaultLayout struct {
} }
var defaultLayoutPaths = map[restic.FileType]string{ var defaultLayoutPaths = map[restic.FileType]string{
restic.DataFile: "data", restic.PackFile: "data",
restic.SnapshotFile: "snapshots", restic.SnapshotFile: "snapshots",
restic.IndexFile: "index", restic.IndexFile: "index",
restic.LockFile: "locks", restic.LockFile: "locks",
@ -36,7 +36,7 @@ func (l *DefaultLayout) Name() string {
func (l *DefaultLayout) Dirname(h restic.Handle) string { func (l *DefaultLayout) Dirname(h restic.Handle) string {
p := defaultLayoutPaths[h.Type] p := defaultLayoutPaths[h.Type]
if h.Type == restic.DataFile && len(h.Name) > 2 { if h.Type == restic.PackFile && len(h.Name) > 2 {
p = l.Join(p, h.Name[:2]) + "/" p = l.Join(p, h.Name[:2]) + "/"
} }
@ -62,7 +62,7 @@ func (l *DefaultLayout) Paths() (dirs []string) {
// also add subdirs // also add subdirs
for i := 0; i < 256; i++ { for i := 0; i < 256; i++ {
subdir := hex.EncodeToString([]byte{byte(i)}) subdir := hex.EncodeToString([]byte{byte(i)})
dirs = append(dirs, l.Join(l.Path, defaultLayoutPaths[restic.DataFile], subdir)) dirs = append(dirs, l.Join(l.Path, defaultLayoutPaths[restic.PackFile], subdir))
} }
return dirs return dirs
@ -70,7 +70,7 @@ func (l *DefaultLayout) Paths() (dirs []string) {
// Basedir returns the base dir name for type t. // Basedir returns the base dir name for type t.
func (l *DefaultLayout) Basedir(t restic.FileType) (dirname string, subdirs bool) { func (l *DefaultLayout) Basedir(t restic.FileType) (dirname string, subdirs bool) {
if t == restic.DataFile { if t == restic.PackFile {
subdirs = true subdirs = true
} }

View file

@ -11,7 +11,7 @@ type S3LegacyLayout struct {
} }
var s3LayoutPaths = map[restic.FileType]string{ var s3LayoutPaths = map[restic.FileType]string{
restic.DataFile: "data", restic.PackFile: "data",
restic.SnapshotFile: "snapshot", restic.SnapshotFile: "snapshot",
restic.IndexFile: "index", restic.IndexFile: "index",
restic.LockFile: "lock", restic.LockFile: "lock",

View file

@ -25,7 +25,7 @@ func TestDefaultLayout(t *testing.T) {
{ {
tempdir, tempdir,
filepath.Join, filepath.Join,
restic.Handle{Type: restic.DataFile, Name: "0123456"}, restic.Handle{Type: restic.PackFile, Name: "0123456"},
filepath.Join(tempdir, "data", "01", "0123456"), filepath.Join(tempdir, "data", "01", "0123456"),
}, },
{ {
@ -61,7 +61,7 @@ func TestDefaultLayout(t *testing.T) {
{ {
"", "",
path.Join, path.Join,
restic.Handle{Type: restic.DataFile, Name: "0123456"}, restic.Handle{Type: restic.PackFile, Name: "0123456"},
"data/01/0123456", "data/01/0123456",
}, },
{ {
@ -148,7 +148,7 @@ func TestRESTLayout(t *testing.T) {
filename string filename string
}{ }{
{ {
restic.Handle{Type: restic.DataFile, Name: "0123456"}, restic.Handle{Type: restic.PackFile, Name: "0123456"},
filepath.Join(path, "data", "0123456"), filepath.Join(path, "data", "0123456"),
}, },
{ {
@ -216,7 +216,7 @@ func TestRESTLayoutURLs(t *testing.T) {
}{ }{
{ {
&RESTLayout{URL: "https://hostname.foo", Path: "", Join: path.Join}, &RESTLayout{URL: "https://hostname.foo", Path: "", Join: path.Join},
restic.Handle{Type: restic.DataFile, Name: "foobar"}, restic.Handle{Type: restic.PackFile, Name: "foobar"},
"https://hostname.foo/data/foobar", "https://hostname.foo/data/foobar",
"https://hostname.foo/data/", "https://hostname.foo/data/",
}, },
@ -234,7 +234,7 @@ func TestRESTLayoutURLs(t *testing.T) {
}, },
{ {
&S3LegacyLayout{URL: "https://hostname.foo", Path: "/", Join: path.Join}, &S3LegacyLayout{URL: "https://hostname.foo", Path: "/", Join: path.Join},
restic.Handle{Type: restic.DataFile, Name: "foobar"}, restic.Handle{Type: restic.PackFile, Name: "foobar"},
"https://hostname.foo/data/foobar", "https://hostname.foo/data/foobar",
"https://hostname.foo/data/", "https://hostname.foo/data/",
}, },
@ -252,7 +252,7 @@ func TestRESTLayoutURLs(t *testing.T) {
}, },
{ {
&S3LegacyLayout{URL: "", Path: "", Join: path.Join}, &S3LegacyLayout{URL: "", Path: "", Join: path.Join},
restic.Handle{Type: restic.DataFile, Name: "foobar"}, restic.Handle{Type: restic.PackFile, Name: "foobar"},
"data/foobar", "data/foobar",
"data/", "data/",
}, },
@ -294,7 +294,7 @@ func TestS3LegacyLayout(t *testing.T) {
filename string filename string
}{ }{
{ {
restic.Handle{Type: restic.DataFile, Name: "0123456"}, restic.Handle{Type: restic.PackFile, Name: "0123456"},
filepath.Join(path, "data", "0123456"), filepath.Join(path, "data", "0123456"),
}, },
{ {
@ -419,8 +419,8 @@ func TestParseLayout(t *testing.T) {
} }
// test that the functions work (and don't panic) // test that the functions work (and don't panic)
_ = layout.Dirname(restic.Handle{Type: restic.DataFile}) _ = layout.Dirname(restic.Handle{Type: restic.PackFile})
_ = layout.Filename(restic.Handle{Type: restic.DataFile, Name: "1234"}) _ = layout.Filename(restic.Handle{Type: restic.PackFile, Name: "1234"})
_ = layout.Paths() _ = layout.Paths()
layoutName := fmt.Sprintf("%T", layout) layoutName := fmt.Sprintf("%T", layout)

View file

@ -17,7 +17,7 @@ func TestLayout(t *testing.T) {
filename string filename string
layout string layout string
failureExpected bool failureExpected bool
datafiles map[string]bool packfiles map[string]bool
}{ }{
{"repo-layout-default.tar.gz", "", false, map[string]bool{ {"repo-layout-default.tar.gz", "", false, map[string]bool{
"aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false,
@ -48,9 +48,9 @@ func TestLayout(t *testing.T) {
t.Fatalf("Open() returned nil but no error") t.Fatalf("Open() returned nil but no error")
} }
datafiles := make(map[string]bool) packs := make(map[string]bool)
err = be.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { err = be.List(context.TODO(), restic.PackFile, func(fi restic.FileInfo) error {
datafiles[fi.Name] = false packs[fi.Name] = false
return nil return nil
}) })
@ -58,19 +58,19 @@ func TestLayout(t *testing.T) {
t.Fatalf("List() returned error %v", err) t.Fatalf("List() returned error %v", err)
} }
if len(datafiles) == 0 { if len(packs) == 0 {
t.Errorf("List() returned zero data files") t.Errorf("List() returned zero pack files")
} }
for id := range test.datafiles { for id := range test.packfiles {
if _, ok := datafiles[id]; !ok { if _, ok := packs[id]; !ok {
t.Errorf("datafile with id %v not found", id) t.Errorf("packfile with id %v not found", id)
} }
datafiles[id] = true packs[id] = true
} }
for id, v := range datafiles { for id, v := range packs {
if !v { if !v {
t.Errorf("unexpected id %v found", id) t.Errorf("unexpected id %v found", id)
} }

View file

@ -32,7 +32,7 @@ func TestRcloneExit(t *testing.T) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
_, err = be.Stat(context.TODO(), restic.Handle{ _, err = be.Stat(context.TODO(), restic.Handle{
Name: "foo", Name: "foo",
Type: restic.DataFile, Type: restic.PackFile,
}) })
rtest.Assert(t, err != nil, "expected an error") rtest.Assert(t, err != nil, "expected an error")
} }

View file

@ -442,7 +442,7 @@ func (b *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
// Delete removes all data in the backend. // Delete removes all data in the backend.
func (b *Backend) Delete(ctx context.Context) error { func (b *Backend) Delete(ctx context.Context) error {
alltypes := []restic.FileType{ alltypes := []restic.FileType{
restic.DataFile, restic.PackFile,
restic.KeyFile, restic.KeyFile,
restic.LockFile, restic.LockFile,
restic.SnapshotFile, restic.SnapshotFile,

View file

@ -123,7 +123,7 @@ func TestListAPI(t *testing.T) {
} }
var list []restic.FileInfo var list []restic.FileInfo
err = be.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { err = be.List(context.TODO(), restic.PackFile, func(fi restic.FileInfo) error {
list = append(list, fi) list = append(list, fi)
return nil return nil
}) })

View file

@ -445,7 +445,7 @@ func (be *Backend) List(ctx context.Context, t restic.FileType, fn func(restic.F
// Remove keys for a specified backend type. // Remove keys for a specified backend type.
func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error { func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
return be.List(ctx, restic.DataFile, func(fi restic.FileInfo) error { return be.List(ctx, restic.PackFile, func(fi restic.FileInfo) error {
return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name}) return be.Remove(ctx, restic.Handle{Type: t, Name: fi.Name})
}) })
} }
@ -453,7 +453,7 @@ func (be *Backend) removeKeys(ctx context.Context, t restic.FileType) error {
// Delete removes all restic keys in the bucket. It will not remove the bucket itself. // Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *Backend) Delete(ctx context.Context) error { func (be *Backend) Delete(ctx context.Context) error {
alltypes := []restic.FileType{ alltypes := []restic.FileType{
restic.DataFile, restic.PackFile,
restic.KeyFile, restic.KeyFile,
restic.LockFile, restic.LockFile,
restic.SnapshotFile, restic.SnapshotFile,

View file

@ -23,7 +23,7 @@ func TestLayout(t *testing.T) {
filename string filename string
layout string layout string
failureExpected bool failureExpected bool
datafiles map[string]bool packfiles map[string]bool
}{ }{
{"repo-layout-default.tar.gz", "", false, map[string]bool{ {"repo-layout-default.tar.gz", "", false, map[string]bool{
"aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false, "aa464e9fd598fe4202492ee317ffa728e82fa83a1de1a61996e5bd2d6651646c": false,
@ -55,26 +55,26 @@ func TestLayout(t *testing.T) {
t.Fatalf("Open() returned nil but no error") t.Fatalf("Open() returned nil but no error")
} }
datafiles := make(map[string]bool) packs := make(map[string]bool)
err = be.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { err = be.List(context.TODO(), restic.PackFile, func(fi restic.FileInfo) error {
datafiles[fi.Name] = false packs[fi.Name] = false
return nil return nil
}) })
rtest.OK(t, err) rtest.OK(t, err)
if len(datafiles) == 0 { if len(packs) == 0 {
t.Errorf("List() returned zero data files") t.Errorf("List() returned zero pack files")
} }
for id := range test.datafiles { for id := range test.packfiles {
if _, ok := datafiles[id]; !ok { if _, ok := packs[id]; !ok {
t.Errorf("datafile with id %v not found", id) t.Errorf("packfile with id %v not found", id)
} }
datafiles[id] = true packs[id] = true
} }
for id, v := range datafiles { for id, v := range packs {
if !v { if !v {
t.Errorf("unexpected id %v found", id) t.Errorf("unexpected id %v found", id)
} }

View file

@ -298,7 +298,7 @@ func (be *beSwift) IsNotExist(err error) bool {
// It will not remove the container itself. // It will not remove the container itself.
func (be *beSwift) Delete(ctx context.Context) error { func (be *beSwift) Delete(ctx context.Context) error {
alltypes := []restic.FileType{ alltypes := []restic.FileType{
restic.DataFile, restic.PackFile,
restic.KeyFile, restic.KeyFile,
restic.LockFile, restic.LockFile,
restic.SnapshotFile, restic.SnapshotFile,

View file

@ -13,7 +13,7 @@ import (
func saveRandomFile(t testing.TB, be restic.Backend, length int) ([]byte, restic.Handle) { func saveRandomFile(t testing.TB, be restic.Backend, length int) ([]byte, restic.Handle) {
data := test.Random(23, length) data := test.Random(23, length)
id := restic.Hash(data) id := restic.Hash(data)
handle := restic.Handle{Type: restic.DataFile, Name: id.String()} handle := restic.Handle{Type: restic.PackFile, Name: id.String()}
err := be.Save(context.TODO(), handle, restic.NewByteReader(data)) err := be.Save(context.TODO(), handle, restic.NewByteReader(data))
if err != nil { if err != nil {
t.Fatalf("Save() error: %+v", err) t.Fatalf("Save() error: %+v", err)
@ -146,7 +146,7 @@ func (s *Suite) BenchmarkSave(t *testing.B) {
length := 1<<24 + 2123 length := 1<<24 + 2123
data := test.Random(23, length) data := test.Random(23, length)
id := restic.Hash(data) id := restic.Hash(data)
handle := restic.Handle{Type: restic.DataFile, Name: id.String()} handle := restic.Handle{Type: restic.PackFile, Name: id.String()}
rd := restic.NewByteReader(data) rd := restic.NewByteReader(data)
t.SetBytes(int64(length)) t.SetBytes(int64(length))

View file

@ -123,7 +123,7 @@ func (s *Suite) TestLoad(t *testing.T) {
t.Fatalf("Load() did not return an error for invalid handle") t.Fatalf("Load() did not return an error for invalid handle")
} }
err = testLoad(b, restic.Handle{Type: restic.DataFile, Name: "foobar"}, 0, 0) err = testLoad(b, restic.Handle{Type: restic.PackFile, Name: "foobar"}, 0, 0)
if err == nil { if err == nil {
t.Fatalf("Load() did not return an error for non-existing blob") t.Fatalf("Load() did not return an error for non-existing blob")
} }
@ -133,7 +133,7 @@ func (s *Suite) TestLoad(t *testing.T) {
data := test.Random(23, length) data := test.Random(23, length)
id := restic.Hash(data) id := restic.Hash(data)
handle := restic.Handle{Type: restic.DataFile, Name: id.String()} handle := restic.Handle{Type: restic.PackFile, Name: id.String()}
err = b.Save(context.TODO(), handle, restic.NewByteReader(data)) err = b.Save(context.TODO(), handle, restic.NewByteReader(data))
if err != nil { if err != nil {
t.Fatalf("Save() error: %+v", err) t.Fatalf("Save() error: %+v", err)
@ -236,7 +236,7 @@ func (s *Suite) TestList(t *testing.T) {
// Check that the backend is empty to start with // Check that the backend is empty to start with
var found []string var found []string
err := b.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { err := b.List(context.TODO(), restic.PackFile, func(fi restic.FileInfo) error {
found = append(found, fi.Name) found = append(found, fi.Name)
return nil return nil
}) })
@ -252,7 +252,7 @@ func (s *Suite) TestList(t *testing.T) {
for i := 0; i < numTestFiles; i++ { for i := 0; i < numTestFiles; i++ {
data := test.Random(rand.Int(), rand.Intn(100)+55) data := test.Random(rand.Int(), rand.Intn(100)+55)
id := restic.Hash(data) id := restic.Hash(data)
h := restic.Handle{Type: restic.DataFile, Name: id.String()} h := restic.Handle{Type: restic.PackFile, Name: id.String()}
err := b.Save(context.TODO(), h, restic.NewByteReader(data)) err := b.Save(context.TODO(), h, restic.NewByteReader(data))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -281,7 +281,7 @@ func (s *Suite) TestList(t *testing.T) {
s.SetListMaxItems(test.maxItems) s.SetListMaxItems(test.maxItems)
} }
err := b.List(context.TODO(), restic.DataFile, func(fi restic.FileInfo) error { err := b.List(context.TODO(), restic.PackFile, func(fi restic.FileInfo) error {
id, err := restic.ParseID(fi.Name) id, err := restic.ParseID(fi.Name)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -319,7 +319,7 @@ func (s *Suite) TestList(t *testing.T) {
t.Logf("remove %d files", numTestFiles) t.Logf("remove %d files", numTestFiles)
handles := make([]restic.Handle, 0, len(list1)) handles := make([]restic.Handle, 0, len(list1))
for id := range list1 { for id := range list1 {
handles = append(handles, restic.Handle{Type: restic.DataFile, Name: id.String()}) handles = append(handles, restic.Handle{Type: restic.PackFile, Name: id.String()})
} }
err = s.delayedRemove(t, b, handles...) err = s.delayedRemove(t, b, handles...)
@ -342,7 +342,7 @@ func (s *Suite) TestListCancel(t *testing.T) {
for i := 0; i < numTestFiles; i++ { for i := 0; i < numTestFiles; i++ {
data := []byte(fmt.Sprintf("random test blob %v", i)) data := []byte(fmt.Sprintf("random test blob %v", i))
id := restic.Hash(data) id := restic.Hash(data)
h := restic.Handle{Type: restic.DataFile, Name: id.String()} h := restic.Handle{Type: restic.PackFile, Name: id.String()}
err := b.Save(context.TODO(), h, restic.NewByteReader(data)) err := b.Save(context.TODO(), h, restic.NewByteReader(data))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -355,7 +355,7 @@ func (s *Suite) TestListCancel(t *testing.T) {
cancel() cancel()
// pass in a cancelled context // pass in a cancelled context
err := b.List(ctx, restic.DataFile, func(fi restic.FileInfo) error { err := b.List(ctx, restic.PackFile, func(fi restic.FileInfo) error {
t.Errorf("got FileInfo %v for cancelled context", fi) t.Errorf("got FileInfo %v for cancelled context", fi)
return nil return nil
}) })
@ -370,7 +370,7 @@ func (s *Suite) TestListCancel(t *testing.T) {
defer cancel() defer cancel()
i := 0 i := 0
err := b.List(ctx, restic.DataFile, func(fi restic.FileInfo) error { err := b.List(ctx, restic.PackFile, func(fi restic.FileInfo) error {
i++ i++
// cancel the context on the first file // cancel the context on the first file
if i == 1 { if i == 1 {
@ -393,7 +393,7 @@ func (s *Suite) TestListCancel(t *testing.T) {
defer cancel() defer cancel()
i := 0 i := 0
err := b.List(ctx, restic.DataFile, func(fi restic.FileInfo) error { err := b.List(ctx, restic.PackFile, func(fi restic.FileInfo) error {
// cancel the context at the last file // cancel the context at the last file
i++ i++
if i == numTestFiles { if i == numTestFiles {
@ -420,7 +420,7 @@ func (s *Suite) TestListCancel(t *testing.T) {
i := 0 i := 0
// pass in a context with a timeout // pass in a context with a timeout
err := b.List(ctxTimeout, restic.DataFile, func(fi restic.FileInfo) error { err := b.List(ctxTimeout, restic.PackFile, func(fi restic.FileInfo) error {
i++ i++
// wait until the context is cancelled // wait until the context is cancelled
@ -483,7 +483,7 @@ func (s *Suite) TestSave(t *testing.T) {
copy(id[:], data) copy(id[:], data)
h := restic.Handle{ h := restic.Handle{
Type: restic.DataFile, Type: restic.PackFile,
Name: fmt.Sprintf("%s-%d", id, i), Name: fmt.Sprintf("%s-%d", id, i),
} }
err := b.Save(context.TODO(), h, restic.NewByteReader(data)) err := b.Save(context.TODO(), h, restic.NewByteReader(data))
@ -534,7 +534,7 @@ func (s *Suite) TestSave(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
h := restic.Handle{Type: restic.DataFile, Name: id.String()} h := restic.Handle{Type: restic.PackFile, Name: id.String()}
// wrap the tempfile in an errorCloser, so we can detect if the backend // wrap the tempfile in an errorCloser, so we can detect if the backend
// closes the reader // closes the reader
@ -575,7 +575,7 @@ func (s *Suite) TestSaveFilenames(t *testing.T) {
defer s.close(t, b) defer s.close(t, b)
for i, test := range filenameTests { for i, test := range filenameTests {
h := restic.Handle{Name: test.name, Type: restic.DataFile} h := restic.Handle{Name: test.name, Type: restic.PackFile}
err := b.Save(context.TODO(), h, restic.NewByteReader([]byte(test.data))) err := b.Save(context.TODO(), h, restic.NewByteReader([]byte(test.data)))
if err != nil { if err != nil {
t.Errorf("test %d failed: Save() returned %+v", i, err) t.Errorf("test %d failed: Save() returned %+v", i, err)
@ -698,7 +698,7 @@ func (s *Suite) TestBackend(t *testing.T) {
defer s.close(t, b) defer s.close(t, b)
for _, tpe := range []restic.FileType{ for _, tpe := range []restic.FileType{
restic.DataFile, restic.KeyFile, restic.LockFile, restic.PackFile, restic.KeyFile, restic.LockFile,
restic.SnapshotFile, restic.IndexFile, restic.SnapshotFile, restic.IndexFile,
} { } {
// detect non-existing files // detect non-existing files

View file

@ -25,11 +25,11 @@ func TestLoadAll(t *testing.T) {
data := rtest.Random(23+i, rand.Intn(MiB)+500*KiB) data := rtest.Random(23+i, rand.Intn(MiB)+500*KiB)
id := restic.Hash(data) id := restic.Hash(data)
h := restic.Handle{Name: id.String(), Type: restic.DataFile} h := restic.Handle{Name: id.String(), Type: restic.PackFile}
err := b.Save(context.TODO(), h, restic.NewByteReader(data)) err := b.Save(context.TODO(), h, restic.NewByteReader(data))
rtest.OK(t, err) rtest.OK(t, err)
buf, err := backend.LoadAll(context.TODO(), buf, b, restic.Handle{Type: restic.DataFile, Name: id.String()}) buf, err := backend.LoadAll(context.TODO(), buf, b, restic.Handle{Type: restic.PackFile, Name: id.String()})
rtest.OK(t, err) rtest.OK(t, err)
if len(buf) != len(data) { if len(buf) != len(data) {
@ -46,7 +46,7 @@ func TestLoadAll(t *testing.T) {
func save(t testing.TB, be restic.Backend, buf []byte) restic.Handle { func save(t testing.TB, be restic.Backend, buf []byte) restic.Handle {
id := restic.Hash(buf) id := restic.Hash(buf)
h := restic.Handle{Name: id.String(), Type: restic.DataFile} h := restic.Handle{Name: id.String(), Type: restic.PackFile}
err := be.Save(context.TODO(), h, restic.NewByteReader(buf)) err := be.Save(context.TODO(), h, restic.NewByteReader(buf))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -121,7 +121,7 @@ func (rd *mockReader) Close() error {
func TestDefaultLoad(t *testing.T) { func TestDefaultLoad(t *testing.T) {
h := restic.Handle{Name: "id", Type: restic.DataFile} h := restic.Handle{Name: "id", Type: restic.PackFile}
rd := &mockReader{} rd := &mockReader{}
// happy case, assert correct parameters are passed around and content stream is closed // happy case, assert correct parameters are passed around and content stream is closed

View file

@ -50,7 +50,7 @@ const cacheVersion = 1
var _ restic.Cache = &Cache{} var _ restic.Cache = &Cache{}
var cacheLayoutPaths = map[restic.FileType]string{ var cacheLayoutPaths = map[restic.FileType]string{
restic.DataFile: "data", restic.PackFile: "data",
restic.SnapshotFile: "snapshots", restic.SnapshotFile: "snapshots",
restic.IndexFile: "index", restic.IndexFile: "index",
} }

View file

@ -88,7 +88,7 @@ func TestFiles(t *testing.T) {
var tests = []restic.FileType{ var tests = []restic.FileType{
restic.SnapshotFile, restic.SnapshotFile,
restic.DataFile, restic.PackFile,
restic.IndexFile, restic.IndexFile,
} }
@ -144,7 +144,7 @@ func TestFileSaveWriter(t *testing.T) {
id := restic.ID{} id := restic.ID{}
copy(id[:], data) copy(id[:], data)
h := restic.Handle{ h := restic.Handle{
Type: restic.DataFile, Type: restic.PackFile,
Name: id.String(), Name: id.String(),
} }
@ -202,7 +202,7 @@ func TestFileLoad(t *testing.T) {
id := restic.ID{} id := restic.ID{}
copy(id[:], data) copy(id[:], data)
h := restic.Handle{ h := restic.Handle{
Type: restic.DataFile, Type: restic.PackFile,
Name: id.String(), Name: id.String(),
} }
if err := c.Save(h, bytes.NewReader(data)); err != nil { if err := c.Save(h, bytes.NewReader(data)); err != nil {

View file

@ -236,7 +236,7 @@ func (c *Checker) Packs(ctx context.Context, errChan chan<- error) {
debug.Log("listing repository packs") debug.Log("listing repository packs")
repoPacks := restic.NewIDSet() repoPacks := restic.NewIDSet()
err := c.repo.List(ctx, restic.DataFile, func(id restic.ID, size int64) error { err := c.repo.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {
repoPacks.Insert(id) repoPacks.Insert(id)
return nil return nil
}) })
@ -701,7 +701,7 @@ func (c *Checker) GetPacks() restic.IDSet {
// checkPack reads a pack and checks the integrity of all blobs. // checkPack reads a pack and checks the integrity of all blobs.
func checkPack(ctx context.Context, r restic.Repository, id restic.ID) error { func checkPack(ctx context.Context, r restic.Repository, id restic.ID) error {
debug.Log("checking pack %v", id) debug.Log("checking pack %v", id)
h := restic.Handle{Type: restic.DataFile, Name: id.String()} h := restic.Handle{Type: restic.PackFile, Name: id.String()}
packfile, hash, size, err := repository.DownloadAndHash(ctx, r.Backend(), h) packfile, hash, size, err := repository.DownloadAndHash(ctx, r.Backend(), h)
if err != nil { if err != nil {

View file

@ -82,7 +82,7 @@ func TestMissingPack(t *testing.T) {
repo := repository.TestOpenLocal(t, repodir) repo := repository.TestOpenLocal(t, repodir)
packHandle := restic.Handle{ packHandle := restic.Handle{
Type: restic.DataFile, Type: restic.PackFile,
Name: "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6", Name: "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6",
} }
test.OK(t, repo.Backend().Remove(context.TODO(), packHandle)) test.OK(t, repo.Backend().Remove(context.TODO(), packHandle))

View file

@ -71,7 +71,7 @@ func New(ctx context.Context, repo Lister, ignorePacks restic.IDSet, p *restic.P
// list the files in the repo, send to inputCh // list the files in the repo, send to inputCh
wg.Go(func() error { wg.Go(func() error {
defer close(inputCh) defer close(inputCh)
return repo.List(ctx, restic.DataFile, func(id restic.ID, size int64) error { return repo.List(ctx, restic.PackFile, func(id restic.ID, size int64) error {
if ignorePacks.Has(id) { if ignorePacks.Has(id) {
return nil return nil
} }

View file

@ -29,7 +29,7 @@ func createFilledRepo(t testing.TB, snapshots int, dup float32) (restic.Reposito
} }
func validateIndex(t testing.TB, repo restic.Repository, idx *Index) { func validateIndex(t testing.TB, repo restic.Repository, idx *Index) {
err := repo.List(context.TODO(), restic.DataFile, func(id restic.ID, size int64) error { err := repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
p, ok := idx.Packs[id] p, ok := idx.Packs[id]
if !ok { if !ok {
t.Errorf("pack %v missing from index", id.Str()) t.Errorf("pack %v missing from index", id.Str())
@ -395,7 +395,7 @@ func TestIndexAddRemovePack(t *testing.T) {
} }
var packID restic.ID var packID restic.ID
err = repo.List(context.TODO(), restic.DataFile, func(id restic.ID, size int64) error { err = repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
packID = id packID = id
return nil return nil
}) })

View file

@ -91,7 +91,7 @@ func (m *S3Layout) Apply(ctx context.Context, repo restic.Repository) error {
for _, t := range []restic.FileType{ for _, t := range []restic.FileType{
restic.SnapshotFile, restic.SnapshotFile,
restic.DataFile, restic.PackFile,
restic.KeyFile, restic.KeyFile,
restic.LockFile, restic.LockFile,
} { } {

View file

@ -126,7 +126,7 @@ func TestUnpackReadSeeker(t *testing.T) {
b := mem.New() b := mem.New()
id := restic.Hash(packData) id := restic.Hash(packData)
handle := restic.Handle{Type: restic.DataFile, Name: id.String()} handle := restic.Handle{Type: restic.PackFile, Name: id.String()}
rtest.OK(t, b.Save(context.TODO(), handle, restic.NewByteReader(packData))) rtest.OK(t, b.Save(context.TODO(), handle, restic.NewByteReader(packData)))
verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize)
} }
@ -139,7 +139,7 @@ func TestShortPack(t *testing.T) {
b := mem.New() b := mem.New()
id := restic.Hash(packData) id := restic.Hash(packData)
handle := restic.Handle{Type: restic.DataFile, Name: id.String()} handle := restic.Handle{Type: restic.PackFile, Name: id.String()}
rtest.OK(t, b.Save(context.TODO(), handle, restic.NewByteReader(packData))) rtest.OK(t, b.Save(context.TODO(), handle, restic.NewByteReader(packData)))
verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize)
} }

View file

@ -97,7 +97,7 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packe
} }
id := restic.IDFromHash(p.hw.Sum(nil)) id := restic.IDFromHash(p.hw.Sum(nil))
h := restic.Handle{Type: restic.DataFile, Name: id.String()} h := restic.Handle{Type: restic.PackFile, Name: id.String()}
rd, err := restic.NewFileReader(p.tmpfile) rd, err := restic.NewFileReader(p.tmpfile)
if err != nil { if err != nil {

View file

@ -34,7 +34,7 @@ func min(a, b int) int {
} }
func saveFile(t testing.TB, be Saver, length int, f *os.File, id restic.ID) { func saveFile(t testing.TB, be Saver, length int, f *os.File, id restic.ID) {
h := restic.Handle{Type: restic.DataFile, Name: id.String()} h := restic.Handle{Type: restic.PackFile, Name: id.String()}
t.Logf("save file %v", h) t.Logf("save file %v", h)
rd, err := restic.NewFileReader(f) rd, err := restic.NewFileReader(f)

View file

@ -26,7 +26,7 @@ func Repack(ctx context.Context, repo restic.Repository, packs restic.IDSet, kee
for packID := range packs { for packID := range packs {
// load the complete pack into a temp file // load the complete pack into a temp file
h := restic.Handle{Type: restic.DataFile, Name: packID.String()} h := restic.Handle{Type: restic.PackFile, Name: packID.String()}
tempfile, hash, packLength, err := DownloadAndHash(ctx, repo.Backend(), h) tempfile, hash, packLength, err := DownloadAndHash(ctx, repo.Backend(), h)
if err != nil { if err != nil {

View file

@ -62,7 +62,7 @@ func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2
blobs := restic.NewBlobSet() blobs := restic.NewBlobSet()
err := repo.List(context.TODO(), restic.DataFile, func(id restic.ID, size int64) error { err := repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
entries, _, err := repo.ListPack(context.TODO(), id, size) entries, _, err := repo.ListPack(context.TODO(), id, size)
if err != nil { if err != nil {
t.Fatalf("error listing pack %v: %v", id, err) t.Fatalf("error listing pack %v: %v", id, err)
@ -93,7 +93,7 @@ func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2
func listPacks(t *testing.T, repo restic.Repository) restic.IDSet { func listPacks(t *testing.T, repo restic.Repository) restic.IDSet {
list := restic.NewIDSet() list := restic.NewIDSet()
err := repo.List(context.TODO(), restic.DataFile, func(id restic.ID, size int64) error { err := repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
list.Insert(id) list.Insert(id)
return nil return nil
}) })
@ -130,7 +130,7 @@ func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs rest
} }
for id := range repackedBlobs { for id := range repackedBlobs {
err = repo.Backend().Remove(context.TODO(), restic.Handle{Type: restic.DataFile, Name: id.String()}) err = repo.Backend().Remove(context.TODO(), restic.Handle{Type: restic.PackFile, Name: id.String()})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -130,7 +130,7 @@ func sortCachedPacksFirst(cache haver, blobs []restic.PackedBlob) {
noncached := make([]restic.PackedBlob, 0, len(blobs)/2) noncached := make([]restic.PackedBlob, 0, len(blobs)/2)
for _, blob := range blobs { for _, blob := range blobs {
if cache.Has(restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()}) { if cache.Has(restic.Handle{Type: restic.PackFile, Name: blob.PackID.String()}) {
cached = append(cached, blob) cached = append(cached, blob)
continue continue
} }
@ -164,7 +164,7 @@ func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic.
} }
// load blob from pack // load blob from pack
h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()} h := restic.Handle{Type: restic.PackFile, Name: blob.PackID.String()}
switch { switch {
case cap(buf) < int(blob.Length): case cap(buf) < int(blob.Length):
@ -535,10 +535,10 @@ func (r *Repository) PrepareCache(indexIDs restic.IDSet) error {
} }
} }
// clear old data files // clear old packs
err = r.Cache.Clear(restic.DataFile, packs) err = r.Cache.Clear(restic.PackFile, packs)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "error clearing data files in cache: %v\n", err) fmt.Fprintf(os.Stderr, "error clearing pack files in cache: %v\n", err)
} }
treePacks := restic.NewIDSet() treePacks := restic.NewIDSet()
@ -552,8 +552,8 @@ func (r *Repository) PrepareCache(indexIDs restic.IDSet) error {
debug.Log("using readahead") debug.Log("using readahead")
cache := r.Cache.(*cache.Cache) cache := r.Cache.(*cache.Cache)
cache.PerformReadahead = func(h restic.Handle) bool { cache.PerformReadahead = func(h restic.Handle) bool {
if h.Type != restic.DataFile { if h.Type != restic.PackFile {
debug.Log("no readahead for %v, is not data file", h) debug.Log("no readahead for %v, is not a pack file", h)
return false return false
} }
@ -670,7 +670,7 @@ func (r *Repository) List(ctx context.Context, t restic.FileType, fn func(restic
// ListPack returns the list of blobs saved in the pack id and the length of // ListPack returns the list of blobs saved in the pack id and the length of
// the file as stored in the backend. // the file as stored in the backend.
func (r *Repository) ListPack(ctx context.Context, id restic.ID, size int64) ([]restic.Blob, int64, error) { func (r *Repository) ListPack(ctx context.Context, id restic.ID, size int64) ([]restic.Blob, int64, error) {
h := restic.Handle{Type: restic.DataFile, Name: id.String()} h := restic.Handle{Type: restic.PackFile, Name: id.String()}
blobs, err := pack.List(r.Key(), restic.ReaderAt(r.Backend(), h), size) blobs, err := pack.List(r.Key(), restic.ReaderAt(r.Backend(), h), size)
if err != nil { if err != nil {

View file

@ -27,15 +27,15 @@ func TestSortCachedPacksFirst(t *testing.T) {
blobs[i] = restic.PackedBlob{PackID: id} blobs[i] = restic.PackedBlob{PackID: id}
if i%3 == 0 { if i%3 == 0 {
h := restic.Handle{Name: id.String(), Type: restic.DataFile} h := restic.Handle{Name: id.String(), Type: restic.PackFile}
cache[h] = true cache[h] = true
} }
} }
copy(sorted[:], blobs[:]) copy(sorted[:], blobs[:])
sort.SliceStable(sorted[:], func(i, j int) bool { sort.SliceStable(sorted[:], func(i, j int) bool {
hi := restic.Handle{Type: restic.DataFile, Name: sorted[i].PackID.String()} hi := restic.Handle{Type: restic.PackFile, Name: sorted[i].PackID.String()}
hj := restic.Handle{Type: restic.DataFile, Name: sorted[j].PackID.String()} hj := restic.Handle{Type: restic.PackFile, Name: sorted[j].PackID.String()}
return cache.Has(hi) && !cache.Has(hj) return cache.Has(hi) && !cache.Has(hj)
}) })
@ -58,7 +58,7 @@ func BenchmarkSortCachedPacksFirst(b *testing.B) {
blobs[i] = restic.PackedBlob{PackID: id} blobs[i] = restic.PackedBlob{PackID: id}
if i%3 == 0 { if i%3 == 0 {
h := restic.Handle{Name: id.String(), Type: restic.DataFile} h := restic.Handle{Name: id.String(), Type: restic.PackFile}
cache[h] = true cache[h] = true
} }
} }

View file

@ -224,7 +224,7 @@ func BenchmarkLoadAndDecrypt(b *testing.B) {
dataID := restic.Hash(buf) dataID := restic.Hash(buf)
storageID, err := repo.SaveUnpacked(context.TODO(), restic.DataFile, buf) storageID, err := repo.SaveUnpacked(context.TODO(), restic.PackFile, buf)
rtest.OK(b, err) rtest.OK(b, err)
// rtest.OK(b, repo.Flush()) // rtest.OK(b, repo.Flush())
@ -232,7 +232,7 @@ func BenchmarkLoadAndDecrypt(b *testing.B) {
b.SetBytes(int64(length)) b.SetBytes(int64(length))
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
data, err := repo.LoadAndDecrypt(context.TODO(), nil, restic.DataFile, storageID) data, err := repo.LoadAndDecrypt(context.TODO(), nil, restic.PackFile, storageID)
rtest.OK(b, err) rtest.OK(b, err)
// See comment in BenchmarkLoadBlob. // See comment in BenchmarkLoadBlob.

View file

@ -11,7 +11,7 @@ type FileType string
// These are the different data types a backend can store. // These are the different data types a backend can store.
const ( const (
DataFile FileType = "data" PackFile FileType = "data" // use data, as packs are stored under /data in repo
KeyFile = "key" KeyFile = "key"
LockFile = "lock" LockFile = "lock"
SnapshotFile = "snapshot" SnapshotFile = "snapshot"
@ -40,7 +40,7 @@ func (h Handle) Valid() error {
} }
switch h.Type { switch h.Type {
case DataFile: case PackFile:
case KeyFile: case KeyFile:
case LockFile: case LockFile:
case SnapshotFile: case SnapshotFile:

View file

@ -9,7 +9,7 @@ var handleTests = []struct {
{Handle{Name: "foo"}, false}, {Handle{Name: "foo"}, false},
{Handle{Type: "foobar"}, false}, {Handle{Type: "foobar"}, false},
{Handle{Type: ConfigFile, Name: ""}, true}, {Handle{Type: ConfigFile, Name: ""}, true},
{Handle{Type: DataFile, Name: ""}, false}, {Handle{Type: PackFile, Name: ""}, false},
{Handle{Type: "", Name: "x"}, false}, {Handle{Type: "", Name: "x"}, false},
{Handle{Type: LockFile, Name: "010203040506"}, true}, {Handle{Type: LockFile, Name: "010203040506"}, true},
} }

View file

@ -221,7 +221,7 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) {
packData := make([]byte, int(end-start)) packData := make([]byte, int(end-start))
h := restic.Handle{Type: restic.DataFile, Name: pack.id.String()} h := restic.Handle{Type: restic.PackFile, Name: pack.id.String()}
err := r.packLoader(ctx, h, int(end-start), start, func(rd io.Reader) error { err := r.packLoader(ctx, h, int(end-start), start, func(rd io.Reader) error {
l, err := io.ReadFull(rd, packData) l, err := io.ReadFull(rd, packData)
if err != nil { if err != nil {