forked from TrueCloudLab/restic
Merge pull request #1048 from restic/cleanup-fuse-mount
Cleanup/fix fuse mount
This commit is contained in:
commit
53f8026018
20 changed files with 497 additions and 379 deletions
|
@ -96,14 +96,26 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
systemFuse.Debug = func(msg interface{}) {
|
||||||
|
debug.Log("fuse: %v", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := fuse.Config{
|
||||||
|
OwnerIsRoot: opts.OwnerRoot,
|
||||||
|
Host: opts.Host,
|
||||||
|
Tags: opts.Tags,
|
||||||
|
Paths: opts.Paths,
|
||||||
|
}
|
||||||
|
root, err := fuse.NewRoot(context.TODO(), repo, cfg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
Printf("Now serving the repository at %s\n", mountpoint)
|
Printf("Now serving the repository at %s\n", mountpoint)
|
||||||
Printf("Don't forget to umount after quitting!\n")
|
Printf("Don't forget to umount after quitting!\n")
|
||||||
|
|
||||||
root := fs.Tree{}
|
|
||||||
root.Add("snapshots", fuse.NewSnapshotsDir(repo, opts.OwnerRoot, opts.Paths, opts.Tags, opts.Host))
|
|
||||||
|
|
||||||
debug.Log("serving mount at %v", mountpoint)
|
debug.Log("serving mount at %v", mountpoint)
|
||||||
err = fs.Serve(c, &root)
|
err = fs.Serve(c, root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,15 +58,7 @@ func FindFilteredSnapshots(ctx context.Context, repo *repository.Repository, hos
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for id := range repo.List(ctx, restic.SnapshotFile) {
|
for _, sn := range restic.FindFilteredSnapshots(ctx, repo, host, tags, paths) {
|
||||||
sn, err := restic.LoadSnapshot(ctx, repo, id)
|
|
||||||
if err != nil {
|
|
||||||
Warnf("Ignoring %q, could not load snapshot: %v\n", id, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if (host != "" && host != sn.Hostname) || !sn.HasTags(tags) || !sn.HasPaths(paths) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
// +build ignore
|
|
||||||
// +build !openbsd
|
// +build !openbsd
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
@ -55,17 +55,15 @@ func waitForMount(t testing.TB, dir string) {
|
||||||
t.Errorf("subdir %q of dir %s never appeared", mountTestSubdir, dir)
|
t.Errorf("subdir %q of dir %s never appeared", mountTestSubdir, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
func mount(t testing.TB, global GlobalOptions, dir string) {
|
func testRunMount(t testing.TB, gopts GlobalOptions, dir string) {
|
||||||
cmd := &CmdMount{global: &global}
|
opts := MountOptions{}
|
||||||
OK(t, cmd.Mount(dir))
|
OK(t, runMount(opts, gopts, []string{dir}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func umount(t testing.TB, global GlobalOptions, dir string) {
|
func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
|
||||||
cmd := &CmdMount{global: &global}
|
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < mountWait; i++ {
|
for i := 0; i < mountWait; i++ {
|
||||||
if err = cmd.Umount(dir); err == nil {
|
if err = umount(dir); err == nil {
|
||||||
t.Logf("directory %v umounted", dir)
|
t.Logf("directory %v umounted", dir)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -87,9 +85,10 @@ func listSnapshots(t testing.TB, dir string) []string {
|
||||||
|
|
||||||
func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs) {
|
func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs) {
|
||||||
t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs)
|
t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs)
|
||||||
go mount(t, global, mountpoint)
|
|
||||||
|
go testRunMount(t, global, mountpoint)
|
||||||
waitForMount(t, mountpoint)
|
waitForMount(t, mountpoint)
|
||||||
defer umount(t, global, mountpoint)
|
defer testRunUmount(t, global, mountpoint)
|
||||||
|
|
||||||
if !snapshotsDirExists(t, mountpoint) {
|
if !snapshotsDirExists(t, mountpoint) {
|
||||||
t.Fatal(`virtual directory "snapshots" doesn't exist`)
|
t.Fatal(`virtual directory "snapshots" doesn't exist`)
|
||||||
|
@ -110,7 +109,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, id := range snapshotIDs {
|
for _, id := range snapshotIDs {
|
||||||
snapshot, err := restic.LoadSnapshot(repo, id)
|
snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
ts := snapshot.Time.Format(time.RFC3339)
|
ts := snapshot.Time.Format(time.RFC3339)
|
||||||
|
@ -144,45 +143,46 @@ func TestMount(t *testing.T) {
|
||||||
t.Skip("Skipping fuse tests")
|
t.Skip("Skipping fuse tests")
|
||||||
}
|
}
|
||||||
|
|
||||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||||
|
mountpoint, err := ioutil.TempDir(TestTempDir, "restic-test-mount-")
|
||||||
cmdInit(t, global)
|
|
||||||
repo, err := global.OpenRepository()
|
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
mountpoint, err := ioutil.TempDir(TestTempDir, "restic-test-mount-")
|
testRunInit(t, gopts)
|
||||||
|
|
||||||
|
repo, err := OpenRepository(gopts)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
// We remove the mountpoint now to check that cmdMount creates it
|
// We remove the mountpoint now to check that cmdMount creates it
|
||||||
RemoveAll(t, mountpoint)
|
RemoveAll(t, mountpoint)
|
||||||
|
|
||||||
checkSnapshots(t, global, repo, mountpoint, env.repo, []restic.ID{})
|
checkSnapshots(t, gopts, repo, mountpoint, env.repo, []restic.ID{})
|
||||||
|
|
||||||
SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
|
SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
|
||||||
|
|
||||||
// first backup
|
// first backup
|
||||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
testRunBackup(t, []string{env.testdata}, BackupOptions{}, gopts)
|
||||||
snapshotIDs := cmdList(t, global, "snapshots")
|
snapshotIDs := testRunList(t, "snapshots", gopts)
|
||||||
Assert(t, len(snapshotIDs) == 1,
|
Assert(t, len(snapshotIDs) == 1,
|
||||||
"expected one snapshot, got %v", snapshotIDs)
|
"expected one snapshot, got %v", snapshotIDs)
|
||||||
|
|
||||||
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
|
checkSnapshots(t, gopts, repo, mountpoint, env.repo, snapshotIDs)
|
||||||
|
|
||||||
// second backup, implicit incremental
|
// second backup, implicit incremental
|
||||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
testRunBackup(t, []string{env.testdata}, BackupOptions{}, gopts)
|
||||||
snapshotIDs = cmdList(t, global, "snapshots")
|
snapshotIDs = testRunList(t, "snapshots", gopts)
|
||||||
Assert(t, len(snapshotIDs) == 2,
|
Assert(t, len(snapshotIDs) == 2,
|
||||||
"expected two snapshots, got %v", snapshotIDs)
|
"expected two snapshots, got %v", snapshotIDs)
|
||||||
|
|
||||||
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
|
checkSnapshots(t, gopts, repo, mountpoint, env.repo, snapshotIDs)
|
||||||
|
|
||||||
// third backup, explicit incremental
|
// third backup, explicit incremental
|
||||||
cmdBackup(t, global, []string{env.testdata}, &snapshotIDs[0])
|
bopts := BackupOptions{Parent: snapshotIDs[0].String()}
|
||||||
snapshotIDs = cmdList(t, global, "snapshots")
|
testRunBackup(t, []string{env.testdata}, bopts, gopts)
|
||||||
|
snapshotIDs = testRunList(t, "snapshots", gopts)
|
||||||
Assert(t, len(snapshotIDs) == 3,
|
Assert(t, len(snapshotIDs) == 3,
|
||||||
"expected three snapshots, got %v", snapshotIDs)
|
"expected three snapshots, got %v", snapshotIDs)
|
||||||
|
|
||||||
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
|
checkSnapshots(t, gopts, repo, mountpoint, env.repo, snapshotIDs)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -191,10 +191,10 @@ func TestMountSameTimestamps(t *testing.T) {
|
||||||
t.Skip("Skipping fuse tests")
|
t.Skip("Skipping fuse tests")
|
||||||
}
|
}
|
||||||
|
|
||||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||||
SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
|
SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
|
||||||
|
|
||||||
repo, err := global.OpenRepository()
|
repo, err := OpenRepository(gopts)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
mountpoint, err := ioutil.TempDir(TestTempDir, "restic-test-mount-")
|
mountpoint, err := ioutil.TempDir(TestTempDir, "restic-test-mount-")
|
||||||
|
@ -206,6 +206,6 @@ func TestMountSameTimestamps(t *testing.T) {
|
||||||
restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"),
|
restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"),
|
||||||
}
|
}
|
||||||
|
|
||||||
checkSnapshots(t, global, repo, mountpoint, env.repo, ids)
|
checkSnapshots(t, gopts, repo, mountpoint, env.repo, ids)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,7 +142,7 @@ func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) {
|
||||||
|
|
||||||
debug.Log("process blobs")
|
debug.Log("process blobs")
|
||||||
cnt := 0
|
cnt := 0
|
||||||
for blob := range res.Index.Each(done) {
|
for blob := range res.Index.Each(ctx) {
|
||||||
c.packs.Insert(blob.PackID)
|
c.packs.Insert(blob.PackID)
|
||||||
c.blobs.Insert(blob.ID)
|
c.blobs.Insert(blob.ID)
|
||||||
c.blobRefs.M[blob.ID] = 0
|
c.blobRefs.M[blob.ID] = 0
|
||||||
|
|
36
src/restic/fuse/blob_size_cache.go
Normal file
36
src/restic/fuse/blob_size_cache.go
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
// +build !openbsd
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"restic"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BlobSizeCache caches the size of blobs in the repo.
|
||||||
|
type BlobSizeCache struct {
|
||||||
|
m map[restic.ID]uint
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBlobSizeCache returns a new blob size cache containing all entries from midx.
|
||||||
|
func NewBlobSizeCache(ctx context.Context, idx restic.Index) *BlobSizeCache {
|
||||||
|
m := make(map[restic.ID]uint, 1000)
|
||||||
|
for pb := range idx.Each(ctx) {
|
||||||
|
m[pb.ID] = uint(restic.PlaintextLength(int(pb.Length)))
|
||||||
|
}
|
||||||
|
return &BlobSizeCache{
|
||||||
|
m: m,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup returns the size of the blob id.
|
||||||
|
func (c *BlobSizeCache) Lookup(id restic.ID) (size uint, found bool) {
|
||||||
|
if c == nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
size, found = c.m[id]
|
||||||
|
return size, found
|
||||||
|
}
|
|
@ -19,18 +19,17 @@ var _ = fs.HandleReadDirAller(&dir{})
|
||||||
var _ = fs.NodeStringLookuper(&dir{})
|
var _ = fs.NodeStringLookuper(&dir{})
|
||||||
|
|
||||||
type dir struct {
|
type dir struct {
|
||||||
repo restic.Repository
|
root *Root
|
||||||
items map[string]*restic.Node
|
items map[string]*restic.Node
|
||||||
inode uint64
|
inode uint64
|
||||||
node *restic.Node
|
node *restic.Node
|
||||||
ownerIsRoot bool
|
|
||||||
|
|
||||||
blobsize *BlobSizeCache
|
blobsize *BlobSizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDir(ctx context.Context, repo restic.Repository, node *restic.Node, ownerIsRoot bool, blobsize *BlobSizeCache) (*dir, error) {
|
func newDir(ctx context.Context, root *Root, inode uint64, node *restic.Node) (*dir, error) {
|
||||||
debug.Log("new dir for %v (%v)", node.Name, node.Subtree.Str())
|
debug.Log("new dir for %v (%v)", node.Name, node.Subtree.Str())
|
||||||
tree, err := repo.LoadTree(ctx, *node.Subtree)
|
tree, err := root.repo.LoadTree(ctx, *node.Subtree)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
debug.Log(" error loading tree %v: %v", node.Subtree.Str(), err)
|
debug.Log(" error loading tree %v: %v", node.Subtree.Str(), err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -41,12 +40,10 @@ func newDir(ctx context.Context, repo restic.Repository, node *restic.Node, owne
|
||||||
}
|
}
|
||||||
|
|
||||||
return &dir{
|
return &dir{
|
||||||
repo: repo,
|
root: root,
|
||||||
node: node,
|
node: node,
|
||||||
items: items,
|
items: items,
|
||||||
inode: node.Inode,
|
inode: inode,
|
||||||
ownerIsRoot: ownerIsRoot,
|
|
||||||
blobsize: blobsize,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,16 +66,16 @@ func replaceSpecialNodes(ctx context.Context, repo restic.Repository, node *rest
|
||||||
return tree.Nodes, nil
|
return tree.Nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDirFromSnapshot(ctx context.Context, repo restic.Repository, snapshot SnapshotWithId, ownerIsRoot bool, blobsize *BlobSizeCache) (*dir, error) {
|
func newDirFromSnapshot(ctx context.Context, root *Root, inode uint64, snapshot *restic.Snapshot) (*dir, error) {
|
||||||
debug.Log("new dir for snapshot %v (%v)", snapshot.ID.Str(), snapshot.Tree.Str())
|
debug.Log("new dir for snapshot %v (%v)", snapshot.ID().Str(), snapshot.Tree.Str())
|
||||||
tree, err := repo.LoadTree(ctx, *snapshot.Tree)
|
tree, err := root.repo.LoadTree(ctx, *snapshot.Tree)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
debug.Log(" loadTree(%v) failed: %v", snapshot.ID.Str(), err)
|
debug.Log(" loadTree(%v) failed: %v", snapshot.ID().Str(), err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
items := make(map[string]*restic.Node)
|
items := make(map[string]*restic.Node)
|
||||||
for _, n := range tree.Nodes {
|
for _, n := range tree.Nodes {
|
||||||
nodes, err := replaceSpecialNodes(ctx, repo, n)
|
nodes, err := replaceSpecialNodes(ctx, root.repo, n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
debug.Log(" replaceSpecialNodes(%v) failed: %v", n, err)
|
debug.Log(" replaceSpecialNodes(%v) failed: %v", n, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -90,7 +87,7 @@ func newDirFromSnapshot(ctx context.Context, repo restic.Repository, snapshot Sn
|
||||||
}
|
}
|
||||||
|
|
||||||
return &dir{
|
return &dir{
|
||||||
repo: repo,
|
root: root,
|
||||||
node: &restic.Node{
|
node: &restic.Node{
|
||||||
UID: uint32(os.Getuid()),
|
UID: uint32(os.Getuid()),
|
||||||
GID: uint32(os.Getgid()),
|
GID: uint32(os.Getgid()),
|
||||||
|
@ -99,10 +96,8 @@ func newDirFromSnapshot(ctx context.Context, repo restic.Repository, snapshot Sn
|
||||||
ChangeTime: snapshot.Time,
|
ChangeTime: snapshot.Time,
|
||||||
Mode: os.ModeDir | 0555,
|
Mode: os.ModeDir | 0555,
|
||||||
},
|
},
|
||||||
items: items,
|
items: items,
|
||||||
inode: inodeFromBackendID(snapshot.ID),
|
inode: inode,
|
||||||
ownerIsRoot: ownerIsRoot,
|
|
||||||
blobsize: blobsize,
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,7 +106,7 @@ func (d *dir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||||
a.Inode = d.inode
|
a.Inode = d.inode
|
||||||
a.Mode = os.ModeDir | d.node.Mode
|
a.Mode = os.ModeDir | d.node.Mode
|
||||||
|
|
||||||
if !d.ownerIsRoot {
|
if !d.root.cfg.OwnerIsRoot {
|
||||||
a.Uid = d.node.UID
|
a.Uid = d.node.UID
|
||||||
a.Gid = d.node.GID
|
a.Gid = d.node.GID
|
||||||
}
|
}
|
||||||
|
@ -153,7 +148,7 @@ func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = append(ret, fuse.Dirent{
|
ret = append(ret, fuse.Dirent{
|
||||||
Inode: node.Inode,
|
Inode: fs.GenerateDynamicInode(d.inode, node.Name),
|
||||||
Type: typ,
|
Type: typ,
|
||||||
Name: node.Name,
|
Name: node.Name,
|
||||||
})
|
})
|
||||||
|
@ -171,11 +166,11 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||||
}
|
}
|
||||||
switch node.Type {
|
switch node.Type {
|
||||||
case "dir":
|
case "dir":
|
||||||
return newDir(ctx, d.repo, node, d.ownerIsRoot, d.blobsize)
|
return newDir(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), node)
|
||||||
case "file":
|
case "file":
|
||||||
return newFile(d.repo, node, d.ownerIsRoot, d.blobsize)
|
return newFile(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), node)
|
||||||
case "symlink":
|
case "symlink":
|
||||||
return newLink(d.repo, node, d.ownerIsRoot)
|
return newLink(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), node)
|
||||||
default:
|
default:
|
||||||
debug.Log(" node %v has unknown type %v", name, node.Type)
|
debug.Log(" node %v has unknown type %v", name, node.Type)
|
||||||
return nil, fuse.ENOENT
|
return nil, fuse.ENOENT
|
||||||
|
|
108
src/restic/fuse/dir_snapshots.go
Normal file
108
src/restic/fuse/dir_snapshots.go
Normal file
|
@ -0,0 +1,108 @@
|
||||||
|
// +build !openbsd
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"restic"
|
||||||
|
"restic/debug"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"bazil.org/fuse"
|
||||||
|
"bazil.org/fuse/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DirSnapshots is a fuse directory which contains snapshots.
|
||||||
|
type DirSnapshots struct {
|
||||||
|
inode uint64
|
||||||
|
root *Root
|
||||||
|
snapshots restic.Snapshots
|
||||||
|
names map[string]*restic.Snapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure that *DirSnapshots implements these interfaces
|
||||||
|
var _ = fs.HandleReadDirAller(&DirSnapshots{})
|
||||||
|
var _ = fs.NodeStringLookuper(&DirSnapshots{})
|
||||||
|
|
||||||
|
// NewDirSnapshots returns a new directory containing snapshots.
|
||||||
|
func NewDirSnapshots(root *Root, inode uint64, snapshots restic.Snapshots) *DirSnapshots {
|
||||||
|
debug.Log("create snapshots dir with %d snapshots, inode %d", len(snapshots), inode)
|
||||||
|
d := &DirSnapshots{
|
||||||
|
root: root,
|
||||||
|
inode: inode,
|
||||||
|
snapshots: snapshots,
|
||||||
|
names: make(map[string]*restic.Snapshot, len(snapshots)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sn := range snapshots {
|
||||||
|
name := sn.Time.Format(time.RFC3339)
|
||||||
|
for i := 1; ; i++ {
|
||||||
|
if _, ok := d.names[name]; !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
name = fmt.Sprintf("%s-%d", sn.Time.Format(time.RFC3339), i)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.names[name] = sn
|
||||||
|
debug.Log(" add snapshot %v as dir %v", sn.ID().Str(), name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attr returns the attributes for the root node.
|
||||||
|
func (d *DirSnapshots) Attr(ctx context.Context, attr *fuse.Attr) error {
|
||||||
|
attr.Inode = d.inode
|
||||||
|
attr.Mode = os.ModeDir | 0555
|
||||||
|
|
||||||
|
if !d.root.cfg.OwnerIsRoot {
|
||||||
|
attr.Uid = uint32(os.Getuid())
|
||||||
|
attr.Gid = uint32(os.Getgid())
|
||||||
|
}
|
||||||
|
debug.Log("attr: %v", attr)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDirAll returns all entries of the root node.
|
||||||
|
func (d *DirSnapshots) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||||
|
debug.Log("ReadDirAll()")
|
||||||
|
items := []fuse.Dirent{
|
||||||
|
{
|
||||||
|
Inode: d.inode,
|
||||||
|
Name: ".",
|
||||||
|
Type: fuse.DT_Dir,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Inode: d.root.inode,
|
||||||
|
Name: "..",
|
||||||
|
Type: fuse.DT_Dir,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name := range d.names {
|
||||||
|
items = append(items, fuse.Dirent{
|
||||||
|
Inode: fs.GenerateDynamicInode(d.inode, name),
|
||||||
|
Name: name,
|
||||||
|
Type: fuse.DT_Dir,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return items, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup returns a specific entry from the root node.
|
||||||
|
func (d *DirSnapshots) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||||
|
debug.Log("Lookup(%s)", name)
|
||||||
|
|
||||||
|
sn, ok := d.names[name]
|
||||||
|
if !ok {
|
||||||
|
return nil, fuse.ENOENT
|
||||||
|
}
|
||||||
|
|
||||||
|
return newDirFromSnapshot(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), sn)
|
||||||
|
}
|
|
@ -9,8 +9,6 @@ import (
|
||||||
"restic"
|
"restic"
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
|
|
||||||
scontext "context"
|
|
||||||
|
|
||||||
"bazil.org/fuse"
|
"bazil.org/fuse"
|
||||||
"bazil.org/fuse/fs"
|
"bazil.org/fuse/fs"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
@ -23,30 +21,23 @@ const blockSize = 512
|
||||||
var _ = fs.HandleReader(&file{})
|
var _ = fs.HandleReader(&file{})
|
||||||
var _ = fs.HandleReleaser(&file{})
|
var _ = fs.HandleReleaser(&file{})
|
||||||
|
|
||||||
// BlobLoader is an abstracted repository with a reduced set of methods used
|
|
||||||
// for fuse operations.
|
|
||||||
type BlobLoader interface {
|
|
||||||
LookupBlobSize(restic.ID, restic.BlobType) (uint, error)
|
|
||||||
LoadBlob(scontext.Context, restic.BlobType, restic.ID, []byte) (int, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type file struct {
|
type file struct {
|
||||||
repo BlobLoader
|
root *Root
|
||||||
node *restic.Node
|
node *restic.Node
|
||||||
ownerIsRoot bool
|
inode uint64
|
||||||
|
|
||||||
sizes []int
|
sizes []int
|
||||||
blobs [][]byte
|
blobs [][]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool, blobsize *BlobSizeCache) (fusefile *file, err error) {
|
func newFile(ctx context.Context, root *Root, inode uint64, node *restic.Node) (fusefile *file, err error) {
|
||||||
debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content))
|
debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content))
|
||||||
var bytes uint64
|
var bytes uint64
|
||||||
sizes := make([]int, len(node.Content))
|
sizes := make([]int, len(node.Content))
|
||||||
for i, id := range node.Content {
|
for i, id := range node.Content {
|
||||||
size, ok := blobsize.Lookup(id)
|
size, ok := root.blobSizeCache.Lookup(id)
|
||||||
if !ok {
|
if !ok {
|
||||||
size, err = repo.LookupBlobSize(id, restic.DataBlob)
|
size, err = root.repo.LookupBlobSize(id, restic.DataBlob)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -62,24 +53,24 @@ func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool, blobsize *Blo
|
||||||
}
|
}
|
||||||
|
|
||||||
return &file{
|
return &file{
|
||||||
repo: repo,
|
inode: inode,
|
||||||
node: node,
|
root: root,
|
||||||
sizes: sizes,
|
node: node,
|
||||||
blobs: make([][]byte, len(node.Content)),
|
sizes: sizes,
|
||||||
ownerIsRoot: ownerIsRoot,
|
blobs: make([][]byte, len(node.Content)),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
|
func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||||
debug.Log("Attr(%v)", f.node.Name)
|
debug.Log("Attr(%v)", f.node.Name)
|
||||||
a.Inode = f.node.Inode
|
a.Inode = f.inode
|
||||||
a.Mode = f.node.Mode
|
a.Mode = f.node.Mode
|
||||||
a.Size = f.node.Size
|
a.Size = f.node.Size
|
||||||
a.Blocks = (f.node.Size / blockSize) + 1
|
a.Blocks = (f.node.Size / blockSize) + 1
|
||||||
a.BlockSize = blockSize
|
a.BlockSize = blockSize
|
||||||
a.Nlink = uint32(f.node.Links)
|
a.Nlink = uint32(f.node.Links)
|
||||||
|
|
||||||
if !f.ownerIsRoot {
|
if !f.root.cfg.OwnerIsRoot {
|
||||||
a.Uid = f.node.UID
|
a.Uid = f.node.UID
|
||||||
a.Gid = f.node.GID
|
a.Gid = f.node.GID
|
||||||
}
|
}
|
||||||
|
@ -103,7 +94,7 @@ func (f *file) getBlobAt(ctx context.Context, i int) (blob []byte, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := restic.NewBlobBuffer(f.sizes[i])
|
buf := restic.NewBlobBuffer(f.sizes[i])
|
||||||
n, err := f.repo.LoadBlob(ctx, restic.DataBlob, f.node.Content[i], buf)
|
n, err := f.root.repo.LoadBlob(ctx, restic.DataBlob, f.node.Content[i], buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
debug.Log("LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
|
debug.Log("LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -14,6 +14,7 @@ import (
|
||||||
"restic/repository"
|
"restic/repository"
|
||||||
|
|
||||||
"bazil.org/fuse"
|
"bazil.org/fuse"
|
||||||
|
"bazil.org/fuse/fs"
|
||||||
|
|
||||||
"restic"
|
"restic"
|
||||||
. "restic/test"
|
. "restic/test"
|
||||||
|
@ -108,13 +109,21 @@ func TestFuseFile(t *testing.T) {
|
||||||
Size: filesize,
|
Size: filesize,
|
||||||
Content: content,
|
Content: content,
|
||||||
}
|
}
|
||||||
f, err := newFile(repo, node, false, nil)
|
root := &Root{
|
||||||
|
blobSizeCache: NewBlobSizeCache(context.TODO(), repo.Index()),
|
||||||
|
repo: repo,
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("blob cache has %d entries", len(root.blobSizeCache.m))
|
||||||
|
|
||||||
|
inode := fs.GenerateDynamicInode(1, "foo")
|
||||||
|
f, err := newFile(context.TODO(), root, inode, node)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
attr := fuse.Attr{}
|
attr := fuse.Attr{}
|
||||||
OK(t, f.Attr(ctx, &attr))
|
OK(t, f.Attr(ctx, &attr))
|
||||||
|
|
||||||
Equals(t, node.Inode, attr.Inode)
|
Equals(t, inode, attr.Inode)
|
||||||
Equals(t, node.Mode, attr.Mode)
|
Equals(t, node.Mode, attr.Mode)
|
||||||
Equals(t, node.Size, attr.Size)
|
Equals(t, node.Size, attr.Size)
|
||||||
Equals(t, (node.Size/uint64(attr.BlockSize))+1, attr.Blocks)
|
Equals(t, (node.Size/uint64(attr.BlockSize))+1, attr.Blocks)
|
||||||
|
|
|
@ -1,16 +0,0 @@
|
||||||
// +build !openbsd
|
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package fuse
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"restic"
|
|
||||||
)
|
|
||||||
|
|
||||||
// inodeFromBackendId returns a unique uint64 from a backend id.
|
|
||||||
// Endianness has no specific meaning, it is just the simplest way to
|
|
||||||
// transform a []byte to an uint64
|
|
||||||
func inodeFromBackendID(id restic.ID) uint64 {
|
|
||||||
return binary.BigEndian.Uint64(id[:8])
|
|
||||||
}
|
|
|
@ -15,12 +15,13 @@ import (
|
||||||
var _ = fs.NodeReadlinker(&link{})
|
var _ = fs.NodeReadlinker(&link{})
|
||||||
|
|
||||||
type link struct {
|
type link struct {
|
||||||
node *restic.Node
|
root *Root
|
||||||
ownerIsRoot bool
|
node *restic.Node
|
||||||
|
inode uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLink(repo restic.Repository, node *restic.Node, ownerIsRoot bool) (*link, error) {
|
func newLink(ctx context.Context, root *Root, inode uint64, node *restic.Node) (*link, error) {
|
||||||
return &link{node: node, ownerIsRoot: ownerIsRoot}, nil
|
return &link{root: root, inode: inode, node: node}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *link) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
|
func (l *link) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
|
||||||
|
@ -28,10 +29,10 @@ func (l *link) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *link) Attr(ctx context.Context, a *fuse.Attr) error {
|
func (l *link) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||||
a.Inode = l.node.Inode
|
a.Inode = l.inode
|
||||||
a.Mode = l.node.Mode
|
a.Mode = l.node.Mode
|
||||||
|
|
||||||
if !l.ownerIsRoot {
|
if !l.root.cfg.OwnerIsRoot {
|
||||||
a.Uid = l.node.UID
|
a.Uid = l.node.UID
|
||||||
a.Gid = l.node.GID
|
a.Gid = l.node.GID
|
||||||
}
|
}
|
||||||
|
|
121
src/restic/fuse/root.go
Normal file
121
src/restic/fuse/root.go
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
// +build !openbsd
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package fuse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"restic"
|
||||||
|
"restic/debug"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"bazil.org/fuse"
|
||||||
|
"bazil.org/fuse/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config holds settings for the fuse mount.
|
||||||
|
type Config struct {
|
||||||
|
OwnerIsRoot bool
|
||||||
|
Host string
|
||||||
|
Tags []string
|
||||||
|
Paths []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root is the root node of the fuse mount of a repository.
|
||||||
|
type Root struct {
|
||||||
|
repo restic.Repository
|
||||||
|
cfg Config
|
||||||
|
inode uint64
|
||||||
|
snapshots restic.Snapshots
|
||||||
|
dirSnapshots *DirSnapshots
|
||||||
|
blobSizeCache *BlobSizeCache
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure that *Root implements these interfaces
|
||||||
|
var _ = fs.HandleReadDirAller(&Root{})
|
||||||
|
var _ = fs.NodeStringLookuper(&Root{})
|
||||||
|
|
||||||
|
// NewRoot initializes a new root node from a repository.
|
||||||
|
func NewRoot(ctx context.Context, repo restic.Repository, cfg Config) (*Root, error) {
|
||||||
|
debug.Log("NewRoot(), config %v", cfg)
|
||||||
|
|
||||||
|
snapshots := restic.FindFilteredSnapshots(ctx, repo, cfg.Host, cfg.Tags, cfg.Paths)
|
||||||
|
debug.Log("found %d matching snapshots", len(snapshots))
|
||||||
|
|
||||||
|
root := &Root{
|
||||||
|
repo: repo,
|
||||||
|
cfg: cfg,
|
||||||
|
inode: 1,
|
||||||
|
snapshots: snapshots,
|
||||||
|
}
|
||||||
|
|
||||||
|
root.dirSnapshots = NewDirSnapshots(root, fs.GenerateDynamicInode(root.inode, "snapshots"), snapshots)
|
||||||
|
root.blobSizeCache = NewBlobSizeCache(ctx, repo.Index())
|
||||||
|
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root is just there to satisfy fs.Root, it returns itself.
|
||||||
|
func (r *Root) Root() (fs.Node, error) {
|
||||||
|
debug.Log("Root()")
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attr returns the attributes for the root node.
|
||||||
|
func (r *Root) Attr(ctx context.Context, attr *fuse.Attr) error {
|
||||||
|
attr.Inode = r.inode
|
||||||
|
attr.Mode = os.ModeDir | 0555
|
||||||
|
|
||||||
|
if !r.cfg.OwnerIsRoot {
|
||||||
|
attr.Uid = uint32(os.Getuid())
|
||||||
|
attr.Gid = uint32(os.Getgid())
|
||||||
|
}
|
||||||
|
debug.Log("attr: %v", attr)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDirAll returns all entries of the root node.
|
||||||
|
func (r *Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||||
|
debug.Log("ReadDirAll()")
|
||||||
|
items := []fuse.Dirent{
|
||||||
|
{
|
||||||
|
Inode: r.inode,
|
||||||
|
Name: ".",
|
||||||
|
Type: fuse.DT_Dir,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Inode: r.inode,
|
||||||
|
Name: "..",
|
||||||
|
Type: fuse.DT_Dir,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Inode: fs.GenerateDynamicInode(r.inode, "snapshots"),
|
||||||
|
Name: "snapshots",
|
||||||
|
Type: fuse.DT_Dir,
|
||||||
|
},
|
||||||
|
// {
|
||||||
|
// Inode: fs.GenerateDynamicInode(0, "tags"),
|
||||||
|
// Name: "tags",
|
||||||
|
// Type: fuse.DT_Dir,
|
||||||
|
// },
|
||||||
|
// {
|
||||||
|
// Inode: fs.GenerateDynamicInode(0, "hosts"),
|
||||||
|
// Name: "hosts",
|
||||||
|
// Type: fuse.DT_Dir,
|
||||||
|
// },
|
||||||
|
}
|
||||||
|
|
||||||
|
return items, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup returns a specific entry from the root node.
|
||||||
|
func (r *Root) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||||
|
debug.Log("Lookup(%s)", name)
|
||||||
|
switch name {
|
||||||
|
case "snapshots":
|
||||||
|
return r.dirSnapshots, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fuse.ENOENT
|
||||||
|
}
|
|
@ -1,194 +0,0 @@
|
||||||
// +build !openbsd
|
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package fuse
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"bazil.org/fuse"
|
|
||||||
"bazil.org/fuse/fs"
|
|
||||||
|
|
||||||
"restic"
|
|
||||||
"restic/debug"
|
|
||||||
"restic/repository"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BlobSizeCache caches the size of blobs in the repo.
|
|
||||||
type BlobSizeCache struct {
|
|
||||||
m map[restic.ID]uint
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBlobSizeCache returns a new blob size cache containing all entries from midx.
|
|
||||||
func NewBlobSizeCache(midx *repository.MasterIndex) *BlobSizeCache {
|
|
||||||
m := make(map[restic.ID]uint, 1000)
|
|
||||||
for _, idx := range midx.All() {
|
|
||||||
for pb := range idx.Each(nil) {
|
|
||||||
m[pb.ID] = pb.Length
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &BlobSizeCache{
|
|
||||||
m: m,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lookup returns the size of the blob id.
|
|
||||||
func (c *BlobSizeCache) Lookup(id restic.ID) (size uint, found bool) {
|
|
||||||
if c == nil {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
size, found = c.m[id]
|
|
||||||
return size, found
|
|
||||||
}
|
|
||||||
|
|
||||||
type SnapshotWithId struct {
|
|
||||||
*restic.Snapshot
|
|
||||||
restic.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
// These lines statically ensure that a *SnapshotsDir implement the given
|
|
||||||
// interfaces; a misplaced refactoring of the implementation that breaks
|
|
||||||
// the interface will be catched by the compiler
|
|
||||||
var _ = fs.HandleReadDirAller(&SnapshotsDir{})
|
|
||||||
var _ = fs.NodeStringLookuper(&SnapshotsDir{})
|
|
||||||
|
|
||||||
type SnapshotsDir struct {
|
|
||||||
repo restic.Repository
|
|
||||||
ownerIsRoot bool
|
|
||||||
paths []string
|
|
||||||
tags []string
|
|
||||||
host string
|
|
||||||
|
|
||||||
blobsize *BlobSizeCache
|
|
||||||
|
|
||||||
// knownSnapshots maps snapshot timestamp to the snapshot
|
|
||||||
sync.RWMutex
|
|
||||||
knownSnapshots map[string]SnapshotWithId
|
|
||||||
processed restic.IDSet
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSnapshotsDir returns a new dir object for the snapshots.
|
|
||||||
func NewSnapshotsDir(repo restic.Repository, ownerIsRoot bool, paths []string, tags []string, host string) *SnapshotsDir {
|
|
||||||
debug.Log("fuse mount initiated")
|
|
||||||
return &SnapshotsDir{
|
|
||||||
repo: repo,
|
|
||||||
ownerIsRoot: ownerIsRoot,
|
|
||||||
paths: paths,
|
|
||||||
tags: tags,
|
|
||||||
host: host,
|
|
||||||
knownSnapshots: make(map[string]SnapshotWithId),
|
|
||||||
processed: restic.NewIDSet(),
|
|
||||||
blobsize: NewBlobSizeCache(repo.Index().(*repository.MasterIndex)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sn *SnapshotsDir) Attr(ctx context.Context, attr *fuse.Attr) error {
|
|
||||||
attr.Inode = 0
|
|
||||||
attr.Mode = os.ModeDir | 0555
|
|
||||||
|
|
||||||
if !sn.ownerIsRoot {
|
|
||||||
attr.Uid = uint32(os.Getuid())
|
|
||||||
attr.Gid = uint32(os.Getgid())
|
|
||||||
}
|
|
||||||
debug.Log("attr is %v", attr)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sn *SnapshotsDir) updateCache(ctx context.Context) error {
|
|
||||||
debug.Log("called")
|
|
||||||
sn.Lock()
|
|
||||||
defer sn.Unlock()
|
|
||||||
|
|
||||||
for id := range sn.repo.List(ctx, restic.SnapshotFile) {
|
|
||||||
if sn.processed.Has(id) {
|
|
||||||
debug.Log("skipping snapshot %v, already in list", id.Str())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
debug.Log("found snapshot id %v", id.Str())
|
|
||||||
snapshot, err := restic.LoadSnapshot(ctx, sn.repo, id)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter snapshots we don't care for.
|
|
||||||
if (sn.host != "" && sn.host != snapshot.Hostname) ||
|
|
||||||
!snapshot.HasTags(sn.tags) ||
|
|
||||||
!snapshot.HasPaths(sn.paths) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
timestamp := snapshot.Time.Format(time.RFC3339)
|
|
||||||
for i := 1; ; i++ {
|
|
||||||
if _, ok := sn.knownSnapshots[timestamp]; !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
timestamp = fmt.Sprintf("%s-%d", snapshot.Time.Format(time.RFC3339), i)
|
|
||||||
}
|
|
||||||
|
|
||||||
debug.Log(" add %v as dir %v", id.Str(), timestamp)
|
|
||||||
sn.knownSnapshots[timestamp] = SnapshotWithId{snapshot, id}
|
|
||||||
sn.processed.Insert(id)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sn *SnapshotsDir) get(name string) (snapshot SnapshotWithId, ok bool) {
|
|
||||||
sn.RLock()
|
|
||||||
snapshot, ok = sn.knownSnapshots[name]
|
|
||||||
sn.RUnlock()
|
|
||||||
debug.Log("get(%s) -> %v %v", name, snapshot, ok)
|
|
||||||
return snapshot, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sn *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
|
||||||
debug.Log("called")
|
|
||||||
err := sn.updateCache(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sn.RLock()
|
|
||||||
defer sn.RUnlock()
|
|
||||||
|
|
||||||
ret := make([]fuse.Dirent, 0)
|
|
||||||
for timestamp, snapshot := range sn.knownSnapshots {
|
|
||||||
ret = append(ret, fuse.Dirent{
|
|
||||||
Inode: inodeFromBackendID(snapshot.ID),
|
|
||||||
Type: fuse.DT_Dir,
|
|
||||||
Name: timestamp,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
debug.Log(" -> %d entries", len(ret))
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sn *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
|
||||||
debug.Log("Lookup(%s)", name)
|
|
||||||
snapshot, ok := sn.get(name)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
// We don't know about it, update the cache
|
|
||||||
err := sn.updateCache(ctx)
|
|
||||||
if err != nil {
|
|
||||||
debug.Log(" Lookup(%s) -> err %v", name, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
snapshot, ok = sn.get(name)
|
|
||||||
if !ok {
|
|
||||||
// We still don't know about it, this time it really doesn't exist
|
|
||||||
debug.Log(" Lookup(%s) -> not found", name)
|
|
||||||
return nil, fuse.ENOENT
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return newDirFromSnapshot(ctx, sn.repo, snapshot, sn.ownerIsRoot, sn.blobsize)
|
|
||||||
}
|
|
|
@ -58,4 +58,9 @@ type Index interface {
|
||||||
Has(ID, BlobType) bool
|
Has(ID, BlobType) bool
|
||||||
Lookup(ID, BlobType) ([]PackedBlob, error)
|
Lookup(ID, BlobType) ([]PackedBlob, error)
|
||||||
Count(BlobType) uint
|
Count(BlobType) uint
|
||||||
|
|
||||||
|
// Each returns a channel that yields all blobs known to the index. When
|
||||||
|
// the context is cancelled, the background goroutine terminates. This
|
||||||
|
// blocks any modification of the index.
|
||||||
|
Each(ctx context.Context) <-chan PackedBlob
|
||||||
}
|
}
|
||||||
|
|
|
@ -206,10 +206,10 @@ func (idx *Index) AddToSupersedes(ids ...restic.ID) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Each returns a channel that yields all blobs known to the index. If done is
|
// Each returns a channel that yields all blobs known to the index. When the
|
||||||
// closed, the background goroutine terminates. This blocks any modification of
|
// context is cancelled, the background goroutine terminates. This blocks any
|
||||||
// the index.
|
// modification of the index.
|
||||||
func (idx *Index) Each(done chan struct{}) <-chan restic.PackedBlob {
|
func (idx *Index) Each(ctx context.Context) <-chan restic.PackedBlob {
|
||||||
idx.m.Lock()
|
idx.m.Lock()
|
||||||
|
|
||||||
ch := make(chan restic.PackedBlob)
|
ch := make(chan restic.PackedBlob)
|
||||||
|
@ -223,7 +223,7 @@ func (idx *Index) Each(done chan struct{}) <-chan restic.PackedBlob {
|
||||||
for h, packs := range idx.pack {
|
for h, packs := range idx.pack {
|
||||||
for _, blob := range packs {
|
for _, blob := range packs {
|
||||||
select {
|
select {
|
||||||
case <-done:
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case ch <- restic.PackedBlob{
|
case ch <- restic.PackedBlob{
|
||||||
Blob: restic.Blob{
|
Blob: restic.Blob{
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package repository
|
package repository
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"restic"
|
"restic"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
@ -188,6 +189,35 @@ func (mi *MasterIndex) All() []*Index {
|
||||||
return mi.idx
|
return mi.idx
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Each returns a channel that yields all blobs known to the index. When the
|
||||||
|
// context is cancelled, the background goroutine terminates. This blocks any
|
||||||
|
// modification of the index.
|
||||||
|
func (mi *MasterIndex) Each(ctx context.Context) <-chan restic.PackedBlob {
|
||||||
|
mi.idxMutex.RLock()
|
||||||
|
|
||||||
|
ch := make(chan restic.PackedBlob)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer mi.idxMutex.RUnlock()
|
||||||
|
defer func() {
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
|
||||||
|
for _, idx := range mi.idx {
|
||||||
|
idxCh := idx.Each(ctx)
|
||||||
|
for pb := range idxCh {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case ch <- pb:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch
|
||||||
|
}
|
||||||
|
|
||||||
// RebuildIndex combines all known indexes to a new index, leaving out any
|
// RebuildIndex combines all known indexes to a new index, leaving out any
|
||||||
// packs whose ID is contained in packBlacklist. The new index contains the IDs
|
// packs whose ID is contained in packBlacklist. The new index contains the IDs
|
||||||
// of all known indexes in the "supersedes" field.
|
// of all known indexes in the "supersedes" field.
|
||||||
|
@ -198,13 +228,14 @@ func (mi *MasterIndex) RebuildIndex(packBlacklist restic.IDSet) (*Index, error)
|
||||||
debug.Log("start rebuilding index of %d indexes, pack blacklist: %v", len(mi.idx), packBlacklist)
|
debug.Log("start rebuilding index of %d indexes, pack blacklist: %v", len(mi.idx), packBlacklist)
|
||||||
|
|
||||||
newIndex := NewIndex()
|
newIndex := NewIndex()
|
||||||
done := make(chan struct{})
|
|
||||||
defer close(done)
|
ctx, cancel := context.WithCancel(context.TODO())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
for i, idx := range mi.idx {
|
for i, idx := range mi.idx {
|
||||||
debug.Log("adding index %d", i)
|
debug.Log("adding index %d", i)
|
||||||
|
|
||||||
for pb := range idx.Each(done) {
|
for pb := range idx.Each(ctx) {
|
||||||
if packBlacklist.Has(pb.PackID) {
|
if packBlacklist.Has(pb.PackID) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -373,7 +373,7 @@ func TestRepositoryIncrementalIndex(t *testing.T) {
|
||||||
idx, err := repository.LoadIndex(context.TODO(), repo, id)
|
idx, err := repository.LoadIndex(context.TODO(), repo, id)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
for pb := range idx.Each(nil) {
|
for pb := range idx.Each(context.TODO()) {
|
||||||
if _, ok := packEntries[pb.PackID]; !ok {
|
if _, ok := packEntries[pb.PackID]; !ok {
|
||||||
packEntries[pb.PackID] = make(map[restic.ID]struct{})
|
packEntries[pb.PackID] = make(map[restic.ID]struct{})
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,8 +6,6 @@ import (
|
||||||
"os/user"
|
"os/user"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"restic/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Snapshot is the state of a resource at one point in time.
|
// Snapshot is the state of a resource at one point in time.
|
||||||
|
@ -172,45 +170,20 @@ func (sn *Snapshot) SamePaths(paths []string) bool {
|
||||||
return sn.HasPaths(paths)
|
return sn.HasPaths(paths)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrNoSnapshotFound is returned when no snapshot for the given criteria could be found.
|
// Snapshots is a list of snapshots.
|
||||||
var ErrNoSnapshotFound = errors.New("no snapshot found")
|
type Snapshots []*Snapshot
|
||||||
|
|
||||||
// FindLatestSnapshot finds latest snapshot with optional target/directory, tags and hostname filters.
|
// Len returns the number of snapshots in sn.
|
||||||
func FindLatestSnapshot(ctx context.Context, repo Repository, targets []string, tags []string, hostname string) (ID, error) {
|
func (sn Snapshots) Len() int {
|
||||||
var (
|
return len(sn)
|
||||||
latest time.Time
|
|
||||||
latestID ID
|
|
||||||
found bool
|
|
||||||
)
|
|
||||||
|
|
||||||
for snapshotID := range repo.List(ctx, SnapshotFile) {
|
|
||||||
snapshot, err := LoadSnapshot(ctx, repo, snapshotID)
|
|
||||||
if err != nil {
|
|
||||||
return ID{}, errors.Errorf("Error listing snapshot: %v", err)
|
|
||||||
}
|
|
||||||
if snapshot.Time.After(latest) && (hostname == "" || hostname == snapshot.Hostname) && snapshot.HasTags(tags) && snapshot.HasPaths(targets) {
|
|
||||||
latest = snapshot.Time
|
|
||||||
latestID = snapshotID
|
|
||||||
found = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
return ID{}, ErrNoSnapshotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return latestID, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
|
// Less returns true iff the ith snapshot has been made after the jth.
|
||||||
// the string as closely as possible.
|
func (sn Snapshots) Less(i, j int) bool {
|
||||||
func FindSnapshot(repo Repository, s string) (ID, error) {
|
return sn[i].Time.After(sn[j].Time)
|
||||||
|
}
|
||||||
// find snapshot id with prefix
|
|
||||||
name, err := Find(repo.Backend(), SnapshotFile, s)
|
// Swap exchanges the two snapshots.
|
||||||
if err != nil {
|
func (sn Snapshots) Swap(i, j int) {
|
||||||
return ID{}, err
|
sn[i], sn[j] = sn[j], sn[i]
|
||||||
}
|
|
||||||
|
|
||||||
return ParseID(name)
|
|
||||||
}
|
}
|
||||||
|
|
72
src/restic/snapshot_find.go
Normal file
72
src/restic/snapshot_find.go
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
package restic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"restic/errors"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrNoSnapshotFound is returned when no snapshot for the given criteria could be found.
|
||||||
|
var ErrNoSnapshotFound = errors.New("no snapshot found")
|
||||||
|
|
||||||
|
// FindLatestSnapshot finds latest snapshot with optional target/directory, tags and hostname filters.
|
||||||
|
func FindLatestSnapshot(ctx context.Context, repo Repository, targets []string, tags []string, hostname string) (ID, error) {
|
||||||
|
var (
|
||||||
|
latest time.Time
|
||||||
|
latestID ID
|
||||||
|
found bool
|
||||||
|
)
|
||||||
|
|
||||||
|
for snapshotID := range repo.List(ctx, SnapshotFile) {
|
||||||
|
snapshot, err := LoadSnapshot(ctx, repo, snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
return ID{}, errors.Errorf("Error listing snapshot: %v", err)
|
||||||
|
}
|
||||||
|
if snapshot.Time.After(latest) && (hostname == "" || hostname == snapshot.Hostname) && snapshot.HasTags(tags) && snapshot.HasPaths(targets) {
|
||||||
|
latest = snapshot.Time
|
||||||
|
latestID = snapshotID
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return ID{}, ErrNoSnapshotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return latestID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
|
||||||
|
// the string as closely as possible.
|
||||||
|
func FindSnapshot(repo Repository, s string) (ID, error) {
|
||||||
|
|
||||||
|
// find snapshot id with prefix
|
||||||
|
name, err := Find(repo.Backend(), SnapshotFile, s)
|
||||||
|
if err != nil {
|
||||||
|
return ID{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ParseID(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindFilteredSnapshots yields Snapshots filtered from the list of all
|
||||||
|
// snapshots.
|
||||||
|
func FindFilteredSnapshots(ctx context.Context, repo Repository, host string, tags []string, paths []string) Snapshots {
|
||||||
|
results := make(Snapshots, 0, 20)
|
||||||
|
|
||||||
|
for id := range repo.List(ctx, SnapshotFile) {
|
||||||
|
sn, err := LoadSnapshot(ctx, repo, id)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "could not load snapshot %v: %v\n", id.Str(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if (host != "" && host != sn.Hostname) || !sn.HasTags(tags) || !sn.HasPaths(paths) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
results = append(results, sn)
|
||||||
|
}
|
||||||
|
return results
|
||||||
|
}
|
|
@ -6,24 +6,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Snapshots is a list of snapshots.
|
|
||||||
type Snapshots []*Snapshot
|
|
||||||
|
|
||||||
// Len returns the number of snapshots in sn.
|
|
||||||
func (sn Snapshots) Len() int {
|
|
||||||
return len(sn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Less returns true iff the ith snapshot has been made after the jth.
|
|
||||||
func (sn Snapshots) Less(i, j int) bool {
|
|
||||||
return sn[i].Time.After(sn[j].Time)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap exchanges the two snapshots.
|
|
||||||
func (sn Snapshots) Swap(i, j int) {
|
|
||||||
sn[i], sn[j] = sn[j], sn[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpirePolicy configures which snapshots should be automatically removed.
|
// ExpirePolicy configures which snapshots should be automatically removed.
|
||||||
type ExpirePolicy struct {
|
type ExpirePolicy struct {
|
||||||
Last int // keep the last n snapshots
|
Last int // keep the last n snapshots
|
Loading…
Reference in a new issue