forked from TrueCloudLab/restic
Merge pull request #1048 from restic/cleanup-fuse-mount
Cleanup/fix fuse mount
This commit is contained in:
commit
53f8026018
20 changed files with 497 additions and 379 deletions
|
@ -96,14 +96,26 @@ func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
systemFuse.Debug = func(msg interface{}) {
|
||||
debug.Log("fuse: %v", msg)
|
||||
}
|
||||
|
||||
cfg := fuse.Config{
|
||||
OwnerIsRoot: opts.OwnerRoot,
|
||||
Host: opts.Host,
|
||||
Tags: opts.Tags,
|
||||
Paths: opts.Paths,
|
||||
}
|
||||
root, err := fuse.NewRoot(context.TODO(), repo, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Printf("Now serving the repository at %s\n", mountpoint)
|
||||
Printf("Don't forget to umount after quitting!\n")
|
||||
|
||||
root := fs.Tree{}
|
||||
root.Add("snapshots", fuse.NewSnapshotsDir(repo, opts.OwnerRoot, opts.Paths, opts.Tags, opts.Host))
|
||||
|
||||
debug.Log("serving mount at %v", mountpoint)
|
||||
err = fs.Serve(c, &root)
|
||||
err = fs.Serve(c, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -58,15 +58,7 @@ func FindFilteredSnapshots(ctx context.Context, repo *repository.Repository, hos
|
|||
return
|
||||
}
|
||||
|
||||
for id := range repo.List(ctx, restic.SnapshotFile) {
|
||||
sn, err := restic.LoadSnapshot(ctx, repo, id)
|
||||
if err != nil {
|
||||
Warnf("Ignoring %q, could not load snapshot: %v\n", id, err)
|
||||
continue
|
||||
}
|
||||
if (host != "" && host != sn.Hostname) || !sn.HasTags(tags) || !sn.HasPaths(paths) {
|
||||
continue
|
||||
}
|
||||
for _, sn := range restic.FindFilteredSnapshots(ctx, repo, host, tags, paths) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
// +build ignore
|
||||
// +build !openbsd
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -55,17 +55,15 @@ func waitForMount(t testing.TB, dir string) {
|
|||
t.Errorf("subdir %q of dir %s never appeared", mountTestSubdir, dir)
|
||||
}
|
||||
|
||||
func mount(t testing.TB, global GlobalOptions, dir string) {
|
||||
cmd := &CmdMount{global: &global}
|
||||
OK(t, cmd.Mount(dir))
|
||||
func testRunMount(t testing.TB, gopts GlobalOptions, dir string) {
|
||||
opts := MountOptions{}
|
||||
OK(t, runMount(opts, gopts, []string{dir}))
|
||||
}
|
||||
|
||||
func umount(t testing.TB, global GlobalOptions, dir string) {
|
||||
cmd := &CmdMount{global: &global}
|
||||
|
||||
func testRunUmount(t testing.TB, gopts GlobalOptions, dir string) {
|
||||
var err error
|
||||
for i := 0; i < mountWait; i++ {
|
||||
if err = cmd.Umount(dir); err == nil {
|
||||
if err = umount(dir); err == nil {
|
||||
t.Logf("directory %v umounted", dir)
|
||||
return
|
||||
}
|
||||
|
@ -87,9 +85,10 @@ func listSnapshots(t testing.TB, dir string) []string {
|
|||
|
||||
func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs) {
|
||||
t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs)
|
||||
go mount(t, global, mountpoint)
|
||||
|
||||
go testRunMount(t, global, mountpoint)
|
||||
waitForMount(t, mountpoint)
|
||||
defer umount(t, global, mountpoint)
|
||||
defer testRunUmount(t, global, mountpoint)
|
||||
|
||||
if !snapshotsDirExists(t, mountpoint) {
|
||||
t.Fatal(`virtual directory "snapshots" doesn't exist`)
|
||||
|
@ -110,7 +109,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit
|
|||
}
|
||||
|
||||
for _, id := range snapshotIDs {
|
||||
snapshot, err := restic.LoadSnapshot(repo, id)
|
||||
snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id)
|
||||
OK(t, err)
|
||||
|
||||
ts := snapshot.Time.Format(time.RFC3339)
|
||||
|
@ -144,45 +143,46 @@ func TestMount(t *testing.T) {
|
|||
t.Skip("Skipping fuse tests")
|
||||
}
|
||||
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
|
||||
cmdInit(t, global)
|
||||
repo, err := global.OpenRepository()
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
mountpoint, err := ioutil.TempDir(TestTempDir, "restic-test-mount-")
|
||||
OK(t, err)
|
||||
|
||||
mountpoint, err := ioutil.TempDir(TestTempDir, "restic-test-mount-")
|
||||
testRunInit(t, gopts)
|
||||
|
||||
repo, err := OpenRepository(gopts)
|
||||
OK(t, err)
|
||||
|
||||
// We remove the mountpoint now to check that cmdMount creates it
|
||||
RemoveAll(t, mountpoint)
|
||||
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, []restic.ID{})
|
||||
checkSnapshots(t, gopts, repo, mountpoint, env.repo, []restic.ID{})
|
||||
|
||||
SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz"))
|
||||
|
||||
// first backup
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
snapshotIDs := cmdList(t, global, "snapshots")
|
||||
testRunBackup(t, []string{env.testdata}, BackupOptions{}, gopts)
|
||||
snapshotIDs := testRunList(t, "snapshots", gopts)
|
||||
Assert(t, len(snapshotIDs) == 1,
|
||||
"expected one snapshot, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
|
||||
checkSnapshots(t, gopts, repo, mountpoint, env.repo, snapshotIDs)
|
||||
|
||||
// second backup, implicit incremental
|
||||
cmdBackup(t, global, []string{env.testdata}, nil)
|
||||
snapshotIDs = cmdList(t, global, "snapshots")
|
||||
testRunBackup(t, []string{env.testdata}, BackupOptions{}, gopts)
|
||||
snapshotIDs = testRunList(t, "snapshots", gopts)
|
||||
Assert(t, len(snapshotIDs) == 2,
|
||||
"expected two snapshots, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
|
||||
checkSnapshots(t, gopts, repo, mountpoint, env.repo, snapshotIDs)
|
||||
|
||||
// third backup, explicit incremental
|
||||
cmdBackup(t, global, []string{env.testdata}, &snapshotIDs[0])
|
||||
snapshotIDs = cmdList(t, global, "snapshots")
|
||||
bopts := BackupOptions{Parent: snapshotIDs[0].String()}
|
||||
testRunBackup(t, []string{env.testdata}, bopts, gopts)
|
||||
snapshotIDs = testRunList(t, "snapshots", gopts)
|
||||
Assert(t, len(snapshotIDs) == 3,
|
||||
"expected three snapshots, got %v", snapshotIDs)
|
||||
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, snapshotIDs)
|
||||
checkSnapshots(t, gopts, repo, mountpoint, env.repo, snapshotIDs)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -191,10 +191,10 @@ func TestMountSameTimestamps(t *testing.T) {
|
|||
t.Skip("Skipping fuse tests")
|
||||
}
|
||||
|
||||
withTestEnvironment(t, func(env *testEnvironment, global GlobalOptions) {
|
||||
withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
|
||||
SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz"))
|
||||
|
||||
repo, err := global.OpenRepository()
|
||||
repo, err := OpenRepository(gopts)
|
||||
OK(t, err)
|
||||
|
||||
mountpoint, err := ioutil.TempDir(TestTempDir, "restic-test-mount-")
|
||||
|
@ -206,6 +206,6 @@ func TestMountSameTimestamps(t *testing.T) {
|
|||
restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"),
|
||||
}
|
||||
|
||||
checkSnapshots(t, global, repo, mountpoint, env.repo, ids)
|
||||
checkSnapshots(t, gopts, repo, mountpoint, env.repo, ids)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) {
|
|||
|
||||
debug.Log("process blobs")
|
||||
cnt := 0
|
||||
for blob := range res.Index.Each(done) {
|
||||
for blob := range res.Index.Each(ctx) {
|
||||
c.packs.Insert(blob.PackID)
|
||||
c.blobs.Insert(blob.ID)
|
||||
c.blobRefs.M[blob.ID] = 0
|
||||
|
|
36
src/restic/fuse/blob_size_cache.go
Normal file
36
src/restic/fuse/blob_size_cache.go
Normal file
|
@ -0,0 +1,36 @@
|
|||
// +build !openbsd
|
||||
// +build !windows
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"restic"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// BlobSizeCache caches the size of blobs in the repo.
|
||||
type BlobSizeCache struct {
|
||||
m map[restic.ID]uint
|
||||
}
|
||||
|
||||
// NewBlobSizeCache returns a new blob size cache containing all entries from midx.
|
||||
func NewBlobSizeCache(ctx context.Context, idx restic.Index) *BlobSizeCache {
|
||||
m := make(map[restic.ID]uint, 1000)
|
||||
for pb := range idx.Each(ctx) {
|
||||
m[pb.ID] = uint(restic.PlaintextLength(int(pb.Length)))
|
||||
}
|
||||
return &BlobSizeCache{
|
||||
m: m,
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup returns the size of the blob id.
|
||||
func (c *BlobSizeCache) Lookup(id restic.ID) (size uint, found bool) {
|
||||
if c == nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
size, found = c.m[id]
|
||||
return size, found
|
||||
}
|
|
@ -19,18 +19,17 @@ var _ = fs.HandleReadDirAller(&dir{})
|
|||
var _ = fs.NodeStringLookuper(&dir{})
|
||||
|
||||
type dir struct {
|
||||
repo restic.Repository
|
||||
root *Root
|
||||
items map[string]*restic.Node
|
||||
inode uint64
|
||||
node *restic.Node
|
||||
ownerIsRoot bool
|
||||
|
||||
blobsize *BlobSizeCache
|
||||
}
|
||||
|
||||
func newDir(ctx context.Context, repo restic.Repository, node *restic.Node, ownerIsRoot bool, blobsize *BlobSizeCache) (*dir, error) {
|
||||
func newDir(ctx context.Context, root *Root, inode uint64, node *restic.Node) (*dir, error) {
|
||||
debug.Log("new dir for %v (%v)", node.Name, node.Subtree.Str())
|
||||
tree, err := repo.LoadTree(ctx, *node.Subtree)
|
||||
tree, err := root.repo.LoadTree(ctx, *node.Subtree)
|
||||
if err != nil {
|
||||
debug.Log(" error loading tree %v: %v", node.Subtree.Str(), err)
|
||||
return nil, err
|
||||
|
@ -41,12 +40,10 @@ func newDir(ctx context.Context, repo restic.Repository, node *restic.Node, owne
|
|||
}
|
||||
|
||||
return &dir{
|
||||
repo: repo,
|
||||
root: root,
|
||||
node: node,
|
||||
items: items,
|
||||
inode: node.Inode,
|
||||
ownerIsRoot: ownerIsRoot,
|
||||
blobsize: blobsize,
|
||||
inode: inode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -69,16 +66,16 @@ func replaceSpecialNodes(ctx context.Context, repo restic.Repository, node *rest
|
|||
return tree.Nodes, nil
|
||||
}
|
||||
|
||||
func newDirFromSnapshot(ctx context.Context, repo restic.Repository, snapshot SnapshotWithId, ownerIsRoot bool, blobsize *BlobSizeCache) (*dir, error) {
|
||||
debug.Log("new dir for snapshot %v (%v)", snapshot.ID.Str(), snapshot.Tree.Str())
|
||||
tree, err := repo.LoadTree(ctx, *snapshot.Tree)
|
||||
func newDirFromSnapshot(ctx context.Context, root *Root, inode uint64, snapshot *restic.Snapshot) (*dir, error) {
|
||||
debug.Log("new dir for snapshot %v (%v)", snapshot.ID().Str(), snapshot.Tree.Str())
|
||||
tree, err := root.repo.LoadTree(ctx, *snapshot.Tree)
|
||||
if err != nil {
|
||||
debug.Log(" loadTree(%v) failed: %v", snapshot.ID.Str(), err)
|
||||
debug.Log(" loadTree(%v) failed: %v", snapshot.ID().Str(), err)
|
||||
return nil, err
|
||||
}
|
||||
items := make(map[string]*restic.Node)
|
||||
for _, n := range tree.Nodes {
|
||||
nodes, err := replaceSpecialNodes(ctx, repo, n)
|
||||
nodes, err := replaceSpecialNodes(ctx, root.repo, n)
|
||||
if err != nil {
|
||||
debug.Log(" replaceSpecialNodes(%v) failed: %v", n, err)
|
||||
return nil, err
|
||||
|
@ -90,7 +87,7 @@ func newDirFromSnapshot(ctx context.Context, repo restic.Repository, snapshot Sn
|
|||
}
|
||||
|
||||
return &dir{
|
||||
repo: repo,
|
||||
root: root,
|
||||
node: &restic.Node{
|
||||
UID: uint32(os.Getuid()),
|
||||
GID: uint32(os.Getgid()),
|
||||
|
@ -100,9 +97,7 @@ func newDirFromSnapshot(ctx context.Context, repo restic.Repository, snapshot Sn
|
|||
Mode: os.ModeDir | 0555,
|
||||
},
|
||||
items: items,
|
||||
inode: inodeFromBackendID(snapshot.ID),
|
||||
ownerIsRoot: ownerIsRoot,
|
||||
blobsize: blobsize,
|
||||
inode: inode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -111,7 +106,7 @@ func (d *dir) Attr(ctx context.Context, a *fuse.Attr) error {
|
|||
a.Inode = d.inode
|
||||
a.Mode = os.ModeDir | d.node.Mode
|
||||
|
||||
if !d.ownerIsRoot {
|
||||
if !d.root.cfg.OwnerIsRoot {
|
||||
a.Uid = d.node.UID
|
||||
a.Gid = d.node.GID
|
||||
}
|
||||
|
@ -153,7 +148,7 @@ func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
|||
}
|
||||
|
||||
ret = append(ret, fuse.Dirent{
|
||||
Inode: node.Inode,
|
||||
Inode: fs.GenerateDynamicInode(d.inode, node.Name),
|
||||
Type: typ,
|
||||
Name: node.Name,
|
||||
})
|
||||
|
@ -171,11 +166,11 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
|||
}
|
||||
switch node.Type {
|
||||
case "dir":
|
||||
return newDir(ctx, d.repo, node, d.ownerIsRoot, d.blobsize)
|
||||
return newDir(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), node)
|
||||
case "file":
|
||||
return newFile(d.repo, node, d.ownerIsRoot, d.blobsize)
|
||||
return newFile(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), node)
|
||||
case "symlink":
|
||||
return newLink(d.repo, node, d.ownerIsRoot)
|
||||
return newLink(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), node)
|
||||
default:
|
||||
debug.Log(" node %v has unknown type %v", name, node.Type)
|
||||
return nil, fuse.ENOENT
|
||||
|
|
108
src/restic/fuse/dir_snapshots.go
Normal file
108
src/restic/fuse/dir_snapshots.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
// +build !openbsd
|
||||
// +build !windows
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"restic"
|
||||
"restic/debug"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
)
|
||||
|
||||
// DirSnapshots is a fuse directory which contains snapshots.
|
||||
type DirSnapshots struct {
|
||||
inode uint64
|
||||
root *Root
|
||||
snapshots restic.Snapshots
|
||||
names map[string]*restic.Snapshot
|
||||
}
|
||||
|
||||
// ensure that *DirSnapshots implements these interfaces
|
||||
var _ = fs.HandleReadDirAller(&DirSnapshots{})
|
||||
var _ = fs.NodeStringLookuper(&DirSnapshots{})
|
||||
|
||||
// NewDirSnapshots returns a new directory containing snapshots.
|
||||
func NewDirSnapshots(root *Root, inode uint64, snapshots restic.Snapshots) *DirSnapshots {
|
||||
debug.Log("create snapshots dir with %d snapshots, inode %d", len(snapshots), inode)
|
||||
d := &DirSnapshots{
|
||||
root: root,
|
||||
inode: inode,
|
||||
snapshots: snapshots,
|
||||
names: make(map[string]*restic.Snapshot, len(snapshots)),
|
||||
}
|
||||
|
||||
for _, sn := range snapshots {
|
||||
name := sn.Time.Format(time.RFC3339)
|
||||
for i := 1; ; i++ {
|
||||
if _, ok := d.names[name]; !ok {
|
||||
break
|
||||
}
|
||||
|
||||
name = fmt.Sprintf("%s-%d", sn.Time.Format(time.RFC3339), i)
|
||||
}
|
||||
|
||||
d.names[name] = sn
|
||||
debug.Log(" add snapshot %v as dir %v", sn.ID().Str(), name)
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// Attr returns the attributes for the root node.
|
||||
func (d *DirSnapshots) Attr(ctx context.Context, attr *fuse.Attr) error {
|
||||
attr.Inode = d.inode
|
||||
attr.Mode = os.ModeDir | 0555
|
||||
|
||||
if !d.root.cfg.OwnerIsRoot {
|
||||
attr.Uid = uint32(os.Getuid())
|
||||
attr.Gid = uint32(os.Getgid())
|
||||
}
|
||||
debug.Log("attr: %v", attr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadDirAll returns all entries of the root node.
|
||||
func (d *DirSnapshots) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
debug.Log("ReadDirAll()")
|
||||
items := []fuse.Dirent{
|
||||
{
|
||||
Inode: d.inode,
|
||||
Name: ".",
|
||||
Type: fuse.DT_Dir,
|
||||
},
|
||||
{
|
||||
Inode: d.root.inode,
|
||||
Name: "..",
|
||||
Type: fuse.DT_Dir,
|
||||
},
|
||||
}
|
||||
|
||||
for name := range d.names {
|
||||
items = append(items, fuse.Dirent{
|
||||
Inode: fs.GenerateDynamicInode(d.inode, name),
|
||||
Name: name,
|
||||
Type: fuse.DT_Dir,
|
||||
})
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// Lookup returns a specific entry from the root node.
|
||||
func (d *DirSnapshots) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
debug.Log("Lookup(%s)", name)
|
||||
|
||||
sn, ok := d.names[name]
|
||||
if !ok {
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
return newDirFromSnapshot(ctx, d.root, fs.GenerateDynamicInode(d.inode, name), sn)
|
||||
}
|
|
@ -9,8 +9,6 @@ import (
|
|||
"restic"
|
||||
"restic/debug"
|
||||
|
||||
scontext "context"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -23,30 +21,23 @@ const blockSize = 512
|
|||
var _ = fs.HandleReader(&file{})
|
||||
var _ = fs.HandleReleaser(&file{})
|
||||
|
||||
// BlobLoader is an abstracted repository with a reduced set of methods used
|
||||
// for fuse operations.
|
||||
type BlobLoader interface {
|
||||
LookupBlobSize(restic.ID, restic.BlobType) (uint, error)
|
||||
LoadBlob(scontext.Context, restic.BlobType, restic.ID, []byte) (int, error)
|
||||
}
|
||||
|
||||
type file struct {
|
||||
repo BlobLoader
|
||||
root *Root
|
||||
node *restic.Node
|
||||
ownerIsRoot bool
|
||||
inode uint64
|
||||
|
||||
sizes []int
|
||||
blobs [][]byte
|
||||
}
|
||||
|
||||
func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool, blobsize *BlobSizeCache) (fusefile *file, err error) {
|
||||
func newFile(ctx context.Context, root *Root, inode uint64, node *restic.Node) (fusefile *file, err error) {
|
||||
debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content))
|
||||
var bytes uint64
|
||||
sizes := make([]int, len(node.Content))
|
||||
for i, id := range node.Content {
|
||||
size, ok := blobsize.Lookup(id)
|
||||
size, ok := root.blobSizeCache.Lookup(id)
|
||||
if !ok {
|
||||
size, err = repo.LookupBlobSize(id, restic.DataBlob)
|
||||
size, err = root.repo.LookupBlobSize(id, restic.DataBlob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -62,24 +53,24 @@ func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool, blobsize *Blo
|
|||
}
|
||||
|
||||
return &file{
|
||||
repo: repo,
|
||||
inode: inode,
|
||||
root: root,
|
||||
node: node,
|
||||
sizes: sizes,
|
||||
blobs: make([][]byte, len(node.Content)),
|
||||
ownerIsRoot: ownerIsRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
debug.Log("Attr(%v)", f.node.Name)
|
||||
a.Inode = f.node.Inode
|
||||
a.Inode = f.inode
|
||||
a.Mode = f.node.Mode
|
||||
a.Size = f.node.Size
|
||||
a.Blocks = (f.node.Size / blockSize) + 1
|
||||
a.BlockSize = blockSize
|
||||
a.Nlink = uint32(f.node.Links)
|
||||
|
||||
if !f.ownerIsRoot {
|
||||
if !f.root.cfg.OwnerIsRoot {
|
||||
a.Uid = f.node.UID
|
||||
a.Gid = f.node.GID
|
||||
}
|
||||
|
@ -103,7 +94,7 @@ func (f *file) getBlobAt(ctx context.Context, i int) (blob []byte, err error) {
|
|||
}
|
||||
|
||||
buf := restic.NewBlobBuffer(f.sizes[i])
|
||||
n, err := f.repo.LoadBlob(ctx, restic.DataBlob, f.node.Content[i], buf)
|
||||
n, err := f.root.repo.LoadBlob(ctx, restic.DataBlob, f.node.Content[i], buf)
|
||||
if err != nil {
|
||||
debug.Log("LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
|
||||
return nil, err
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"restic/repository"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
|
||||
"restic"
|
||||
. "restic/test"
|
||||
|
@ -108,13 +109,21 @@ func TestFuseFile(t *testing.T) {
|
|||
Size: filesize,
|
||||
Content: content,
|
||||
}
|
||||
f, err := newFile(repo, node, false, nil)
|
||||
root := &Root{
|
||||
blobSizeCache: NewBlobSizeCache(context.TODO(), repo.Index()),
|
||||
repo: repo,
|
||||
}
|
||||
|
||||
t.Logf("blob cache has %d entries", len(root.blobSizeCache.m))
|
||||
|
||||
inode := fs.GenerateDynamicInode(1, "foo")
|
||||
f, err := newFile(context.TODO(), root, inode, node)
|
||||
OK(t, err)
|
||||
|
||||
attr := fuse.Attr{}
|
||||
OK(t, f.Attr(ctx, &attr))
|
||||
|
||||
Equals(t, node.Inode, attr.Inode)
|
||||
Equals(t, inode, attr.Inode)
|
||||
Equals(t, node.Mode, attr.Mode)
|
||||
Equals(t, node.Size, attr.Size)
|
||||
Equals(t, (node.Size/uint64(attr.BlockSize))+1, attr.Blocks)
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
// +build !openbsd
|
||||
// +build !windows
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"restic"
|
||||
)
|
||||
|
||||
// inodeFromBackendId returns a unique uint64 from a backend id.
|
||||
// Endianness has no specific meaning, it is just the simplest way to
|
||||
// transform a []byte to an uint64
|
||||
func inodeFromBackendID(id restic.ID) uint64 {
|
||||
return binary.BigEndian.Uint64(id[:8])
|
||||
}
|
|
@ -15,12 +15,13 @@ import (
|
|||
var _ = fs.NodeReadlinker(&link{})
|
||||
|
||||
type link struct {
|
||||
root *Root
|
||||
node *restic.Node
|
||||
ownerIsRoot bool
|
||||
inode uint64
|
||||
}
|
||||
|
||||
func newLink(repo restic.Repository, node *restic.Node, ownerIsRoot bool) (*link, error) {
|
||||
return &link{node: node, ownerIsRoot: ownerIsRoot}, nil
|
||||
func newLink(ctx context.Context, root *Root, inode uint64, node *restic.Node) (*link, error) {
|
||||
return &link{root: root, inode: inode, node: node}, nil
|
||||
}
|
||||
|
||||
func (l *link) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
|
||||
|
@ -28,10 +29,10 @@ func (l *link) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string,
|
|||
}
|
||||
|
||||
func (l *link) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
a.Inode = l.node.Inode
|
||||
a.Inode = l.inode
|
||||
a.Mode = l.node.Mode
|
||||
|
||||
if !l.ownerIsRoot {
|
||||
if !l.root.cfg.OwnerIsRoot {
|
||||
a.Uid = l.node.UID
|
||||
a.Gid = l.node.GID
|
||||
}
|
||||
|
|
121
src/restic/fuse/root.go
Normal file
121
src/restic/fuse/root.go
Normal file
|
@ -0,0 +1,121 @@
|
|||
// +build !openbsd
|
||||
// +build !windows
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"os"
|
||||
"restic"
|
||||
"restic/debug"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
)
|
||||
|
||||
// Config holds settings for the fuse mount.
|
||||
type Config struct {
|
||||
OwnerIsRoot bool
|
||||
Host string
|
||||
Tags []string
|
||||
Paths []string
|
||||
}
|
||||
|
||||
// Root is the root node of the fuse mount of a repository.
|
||||
type Root struct {
|
||||
repo restic.Repository
|
||||
cfg Config
|
||||
inode uint64
|
||||
snapshots restic.Snapshots
|
||||
dirSnapshots *DirSnapshots
|
||||
blobSizeCache *BlobSizeCache
|
||||
}
|
||||
|
||||
// ensure that *Root implements these interfaces
|
||||
var _ = fs.HandleReadDirAller(&Root{})
|
||||
var _ = fs.NodeStringLookuper(&Root{})
|
||||
|
||||
// NewRoot initializes a new root node from a repository.
|
||||
func NewRoot(ctx context.Context, repo restic.Repository, cfg Config) (*Root, error) {
|
||||
debug.Log("NewRoot(), config %v", cfg)
|
||||
|
||||
snapshots := restic.FindFilteredSnapshots(ctx, repo, cfg.Host, cfg.Tags, cfg.Paths)
|
||||
debug.Log("found %d matching snapshots", len(snapshots))
|
||||
|
||||
root := &Root{
|
||||
repo: repo,
|
||||
cfg: cfg,
|
||||
inode: 1,
|
||||
snapshots: snapshots,
|
||||
}
|
||||
|
||||
root.dirSnapshots = NewDirSnapshots(root, fs.GenerateDynamicInode(root.inode, "snapshots"), snapshots)
|
||||
root.blobSizeCache = NewBlobSizeCache(ctx, repo.Index())
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Root is just there to satisfy fs.Root, it returns itself.
|
||||
func (r *Root) Root() (fs.Node, error) {
|
||||
debug.Log("Root()")
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Attr returns the attributes for the root node.
|
||||
func (r *Root) Attr(ctx context.Context, attr *fuse.Attr) error {
|
||||
attr.Inode = r.inode
|
||||
attr.Mode = os.ModeDir | 0555
|
||||
|
||||
if !r.cfg.OwnerIsRoot {
|
||||
attr.Uid = uint32(os.Getuid())
|
||||
attr.Gid = uint32(os.Getgid())
|
||||
}
|
||||
debug.Log("attr: %v", attr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadDirAll returns all entries of the root node.
|
||||
func (r *Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
debug.Log("ReadDirAll()")
|
||||
items := []fuse.Dirent{
|
||||
{
|
||||
Inode: r.inode,
|
||||
Name: ".",
|
||||
Type: fuse.DT_Dir,
|
||||
},
|
||||
{
|
||||
Inode: r.inode,
|
||||
Name: "..",
|
||||
Type: fuse.DT_Dir,
|
||||
},
|
||||
{
|
||||
Inode: fs.GenerateDynamicInode(r.inode, "snapshots"),
|
||||
Name: "snapshots",
|
||||
Type: fuse.DT_Dir,
|
||||
},
|
||||
// {
|
||||
// Inode: fs.GenerateDynamicInode(0, "tags"),
|
||||
// Name: "tags",
|
||||
// Type: fuse.DT_Dir,
|
||||
// },
|
||||
// {
|
||||
// Inode: fs.GenerateDynamicInode(0, "hosts"),
|
||||
// Name: "hosts",
|
||||
// Type: fuse.DT_Dir,
|
||||
// },
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// Lookup returns a specific entry from the root node.
|
||||
func (r *Root) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
debug.Log("Lookup(%s)", name)
|
||||
switch name {
|
||||
case "snapshots":
|
||||
return r.dirSnapshots, nil
|
||||
}
|
||||
|
||||
return nil, fuse.ENOENT
|
||||
}
|
|
@ -1,194 +0,0 @@
|
|||
// +build !openbsd
|
||||
// +build !windows
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
|
||||
"restic"
|
||||
"restic/debug"
|
||||
"restic/repository"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// BlobSizeCache caches the size of blobs in the repo.
|
||||
type BlobSizeCache struct {
|
||||
m map[restic.ID]uint
|
||||
}
|
||||
|
||||
// NewBlobSizeCache returns a new blob size cache containing all entries from midx.
|
||||
func NewBlobSizeCache(midx *repository.MasterIndex) *BlobSizeCache {
|
||||
m := make(map[restic.ID]uint, 1000)
|
||||
for _, idx := range midx.All() {
|
||||
for pb := range idx.Each(nil) {
|
||||
m[pb.ID] = pb.Length
|
||||
}
|
||||
}
|
||||
return &BlobSizeCache{
|
||||
m: m,
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup returns the size of the blob id.
|
||||
func (c *BlobSizeCache) Lookup(id restic.ID) (size uint, found bool) {
|
||||
if c == nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
size, found = c.m[id]
|
||||
return size, found
|
||||
}
|
||||
|
||||
type SnapshotWithId struct {
|
||||
*restic.Snapshot
|
||||
restic.ID
|
||||
}
|
||||
|
||||
// These lines statically ensure that a *SnapshotsDir implement the given
|
||||
// interfaces; a misplaced refactoring of the implementation that breaks
|
||||
// the interface will be catched by the compiler
|
||||
var _ = fs.HandleReadDirAller(&SnapshotsDir{})
|
||||
var _ = fs.NodeStringLookuper(&SnapshotsDir{})
|
||||
|
||||
type SnapshotsDir struct {
|
||||
repo restic.Repository
|
||||
ownerIsRoot bool
|
||||
paths []string
|
||||
tags []string
|
||||
host string
|
||||
|
||||
blobsize *BlobSizeCache
|
||||
|
||||
// knownSnapshots maps snapshot timestamp to the snapshot
|
||||
sync.RWMutex
|
||||
knownSnapshots map[string]SnapshotWithId
|
||||
processed restic.IDSet
|
||||
}
|
||||
|
||||
// NewSnapshotsDir returns a new dir object for the snapshots.
|
||||
func NewSnapshotsDir(repo restic.Repository, ownerIsRoot bool, paths []string, tags []string, host string) *SnapshotsDir {
|
||||
debug.Log("fuse mount initiated")
|
||||
return &SnapshotsDir{
|
||||
repo: repo,
|
||||
ownerIsRoot: ownerIsRoot,
|
||||
paths: paths,
|
||||
tags: tags,
|
||||
host: host,
|
||||
knownSnapshots: make(map[string]SnapshotWithId),
|
||||
processed: restic.NewIDSet(),
|
||||
blobsize: NewBlobSizeCache(repo.Index().(*repository.MasterIndex)),
|
||||
}
|
||||
}
|
||||
|
||||
func (sn *SnapshotsDir) Attr(ctx context.Context, attr *fuse.Attr) error {
|
||||
attr.Inode = 0
|
||||
attr.Mode = os.ModeDir | 0555
|
||||
|
||||
if !sn.ownerIsRoot {
|
||||
attr.Uid = uint32(os.Getuid())
|
||||
attr.Gid = uint32(os.Getgid())
|
||||
}
|
||||
debug.Log("attr is %v", attr)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sn *SnapshotsDir) updateCache(ctx context.Context) error {
|
||||
debug.Log("called")
|
||||
sn.Lock()
|
||||
defer sn.Unlock()
|
||||
|
||||
for id := range sn.repo.List(ctx, restic.SnapshotFile) {
|
||||
if sn.processed.Has(id) {
|
||||
debug.Log("skipping snapshot %v, already in list", id.Str())
|
||||
continue
|
||||
}
|
||||
|
||||
debug.Log("found snapshot id %v", id.Str())
|
||||
snapshot, err := restic.LoadSnapshot(ctx, sn.repo, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter snapshots we don't care for.
|
||||
if (sn.host != "" && sn.host != snapshot.Hostname) ||
|
||||
!snapshot.HasTags(sn.tags) ||
|
||||
!snapshot.HasPaths(sn.paths) {
|
||||
continue
|
||||
}
|
||||
|
||||
timestamp := snapshot.Time.Format(time.RFC3339)
|
||||
for i := 1; ; i++ {
|
||||
if _, ok := sn.knownSnapshots[timestamp]; !ok {
|
||||
break
|
||||
}
|
||||
|
||||
timestamp = fmt.Sprintf("%s-%d", snapshot.Time.Format(time.RFC3339), i)
|
||||
}
|
||||
|
||||
debug.Log(" add %v as dir %v", id.Str(), timestamp)
|
||||
sn.knownSnapshots[timestamp] = SnapshotWithId{snapshot, id}
|
||||
sn.processed.Insert(id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sn *SnapshotsDir) get(name string) (snapshot SnapshotWithId, ok bool) {
|
||||
sn.RLock()
|
||||
snapshot, ok = sn.knownSnapshots[name]
|
||||
sn.RUnlock()
|
||||
debug.Log("get(%s) -> %v %v", name, snapshot, ok)
|
||||
return snapshot, ok
|
||||
}
|
||||
|
||||
func (sn *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
debug.Log("called")
|
||||
err := sn.updateCache(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sn.RLock()
|
||||
defer sn.RUnlock()
|
||||
|
||||
ret := make([]fuse.Dirent, 0)
|
||||
for timestamp, snapshot := range sn.knownSnapshots {
|
||||
ret = append(ret, fuse.Dirent{
|
||||
Inode: inodeFromBackendID(snapshot.ID),
|
||||
Type: fuse.DT_Dir,
|
||||
Name: timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
debug.Log(" -> %d entries", len(ret))
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (sn *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
||||
debug.Log("Lookup(%s)", name)
|
||||
snapshot, ok := sn.get(name)
|
||||
|
||||
if !ok {
|
||||
// We don't know about it, update the cache
|
||||
err := sn.updateCache(ctx)
|
||||
if err != nil {
|
||||
debug.Log(" Lookup(%s) -> err %v", name, err)
|
||||
return nil, err
|
||||
}
|
||||
snapshot, ok = sn.get(name)
|
||||
if !ok {
|
||||
// We still don't know about it, this time it really doesn't exist
|
||||
debug.Log(" Lookup(%s) -> not found", name)
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
}
|
||||
|
||||
return newDirFromSnapshot(ctx, sn.repo, snapshot, sn.ownerIsRoot, sn.blobsize)
|
||||
}
|
|
@ -58,4 +58,9 @@ type Index interface {
|
|||
Has(ID, BlobType) bool
|
||||
Lookup(ID, BlobType) ([]PackedBlob, error)
|
||||
Count(BlobType) uint
|
||||
|
||||
// Each returns a channel that yields all blobs known to the index. When
|
||||
// the context is cancelled, the background goroutine terminates. This
|
||||
// blocks any modification of the index.
|
||||
Each(ctx context.Context) <-chan PackedBlob
|
||||
}
|
||||
|
|
|
@ -206,10 +206,10 @@ func (idx *Index) AddToSupersedes(ids ...restic.ID) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Each returns a channel that yields all blobs known to the index. If done is
|
||||
// closed, the background goroutine terminates. This blocks any modification of
|
||||
// the index.
|
||||
func (idx *Index) Each(done chan struct{}) <-chan restic.PackedBlob {
|
||||
// Each returns a channel that yields all blobs known to the index. When the
|
||||
// context is cancelled, the background goroutine terminates. This blocks any
|
||||
// modification of the index.
|
||||
func (idx *Index) Each(ctx context.Context) <-chan restic.PackedBlob {
|
||||
idx.m.Lock()
|
||||
|
||||
ch := make(chan restic.PackedBlob)
|
||||
|
@ -223,7 +223,7 @@ func (idx *Index) Each(done chan struct{}) <-chan restic.PackedBlob {
|
|||
for h, packs := range idx.pack {
|
||||
for _, blob := range packs {
|
||||
select {
|
||||
case <-done:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case ch <- restic.PackedBlob{
|
||||
Blob: restic.Blob{
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"restic"
|
||||
"sync"
|
||||
|
||||
|
@ -188,6 +189,35 @@ func (mi *MasterIndex) All() []*Index {
|
|||
return mi.idx
|
||||
}
|
||||
|
||||
// Each returns a channel that yields all blobs known to the index. When the
|
||||
// context is cancelled, the background goroutine terminates. This blocks any
|
||||
// modification of the index.
|
||||
func (mi *MasterIndex) Each(ctx context.Context) <-chan restic.PackedBlob {
|
||||
mi.idxMutex.RLock()
|
||||
|
||||
ch := make(chan restic.PackedBlob)
|
||||
|
||||
go func() {
|
||||
defer mi.idxMutex.RUnlock()
|
||||
defer func() {
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
for _, idx := range mi.idx {
|
||||
idxCh := idx.Each(ctx)
|
||||
for pb := range idxCh {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case ch <- pb:
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// RebuildIndex combines all known indexes to a new index, leaving out any
|
||||
// packs whose ID is contained in packBlacklist. The new index contains the IDs
|
||||
// of all known indexes in the "supersedes" field.
|
||||
|
@ -198,13 +228,14 @@ func (mi *MasterIndex) RebuildIndex(packBlacklist restic.IDSet) (*Index, error)
|
|||
debug.Log("start rebuilding index of %d indexes, pack blacklist: %v", len(mi.idx), packBlacklist)
|
||||
|
||||
newIndex := NewIndex()
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
for i, idx := range mi.idx {
|
||||
debug.Log("adding index %d", i)
|
||||
|
||||
for pb := range idx.Each(done) {
|
||||
for pb := range idx.Each(ctx) {
|
||||
if packBlacklist.Has(pb.PackID) {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -373,7 +373,7 @@ func TestRepositoryIncrementalIndex(t *testing.T) {
|
|||
idx, err := repository.LoadIndex(context.TODO(), repo, id)
|
||||
OK(t, err)
|
||||
|
||||
for pb := range idx.Each(nil) {
|
||||
for pb := range idx.Each(context.TODO()) {
|
||||
if _, ok := packEntries[pb.PackID]; !ok {
|
||||
packEntries[pb.PackID] = make(map[restic.ID]struct{})
|
||||
}
|
||||
|
|
|
@ -6,8 +6,6 @@ import (
|
|||
"os/user"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"restic/errors"
|
||||
)
|
||||
|
||||
// Snapshot is the state of a resource at one point in time.
|
||||
|
@ -172,45 +170,20 @@ func (sn *Snapshot) SamePaths(paths []string) bool {
|
|||
return sn.HasPaths(paths)
|
||||
}
|
||||
|
||||
// ErrNoSnapshotFound is returned when no snapshot for the given criteria could be found.
|
||||
var ErrNoSnapshotFound = errors.New("no snapshot found")
|
||||
// Snapshots is a list of snapshots.
|
||||
type Snapshots []*Snapshot
|
||||
|
||||
// FindLatestSnapshot finds latest snapshot with optional target/directory, tags and hostname filters.
|
||||
func FindLatestSnapshot(ctx context.Context, repo Repository, targets []string, tags []string, hostname string) (ID, error) {
|
||||
var (
|
||||
latest time.Time
|
||||
latestID ID
|
||||
found bool
|
||||
)
|
||||
|
||||
for snapshotID := range repo.List(ctx, SnapshotFile) {
|
||||
snapshot, err := LoadSnapshot(ctx, repo, snapshotID)
|
||||
if err != nil {
|
||||
return ID{}, errors.Errorf("Error listing snapshot: %v", err)
|
||||
}
|
||||
if snapshot.Time.After(latest) && (hostname == "" || hostname == snapshot.Hostname) && snapshot.HasTags(tags) && snapshot.HasPaths(targets) {
|
||||
latest = snapshot.Time
|
||||
latestID = snapshotID
|
||||
found = true
|
||||
}
|
||||
// Len returns the number of snapshots in sn.
|
||||
func (sn Snapshots) Len() int {
|
||||
return len(sn)
|
||||
}
|
||||
|
||||
if !found {
|
||||
return ID{}, ErrNoSnapshotFound
|
||||
// Less returns true iff the ith snapshot has been made after the jth.
|
||||
func (sn Snapshots) Less(i, j int) bool {
|
||||
return sn[i].Time.After(sn[j].Time)
|
||||
}
|
||||
|
||||
return latestID, nil
|
||||
}
|
||||
|
||||
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
|
||||
// the string as closely as possible.
|
||||
func FindSnapshot(repo Repository, s string) (ID, error) {
|
||||
|
||||
// find snapshot id with prefix
|
||||
name, err := Find(repo.Backend(), SnapshotFile, s)
|
||||
if err != nil {
|
||||
return ID{}, err
|
||||
}
|
||||
|
||||
return ParseID(name)
|
||||
// Swap exchanges the two snapshots.
|
||||
func (sn Snapshots) Swap(i, j int) {
|
||||
sn[i], sn[j] = sn[j], sn[i]
|
||||
}
|
||||
|
|
72
src/restic/snapshot_find.go
Normal file
72
src/restic/snapshot_find.go
Normal file
|
@ -0,0 +1,72 @@
|
|||
package restic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"restic/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrNoSnapshotFound is returned when no snapshot for the given criteria could be found.
|
||||
var ErrNoSnapshotFound = errors.New("no snapshot found")
|
||||
|
||||
// FindLatestSnapshot finds latest snapshot with optional target/directory, tags and hostname filters.
|
||||
func FindLatestSnapshot(ctx context.Context, repo Repository, targets []string, tags []string, hostname string) (ID, error) {
|
||||
var (
|
||||
latest time.Time
|
||||
latestID ID
|
||||
found bool
|
||||
)
|
||||
|
||||
for snapshotID := range repo.List(ctx, SnapshotFile) {
|
||||
snapshot, err := LoadSnapshot(ctx, repo, snapshotID)
|
||||
if err != nil {
|
||||
return ID{}, errors.Errorf("Error listing snapshot: %v", err)
|
||||
}
|
||||
if snapshot.Time.After(latest) && (hostname == "" || hostname == snapshot.Hostname) && snapshot.HasTags(tags) && snapshot.HasPaths(targets) {
|
||||
latest = snapshot.Time
|
||||
latestID = snapshotID
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return ID{}, ErrNoSnapshotFound
|
||||
}
|
||||
|
||||
return latestID, nil
|
||||
}
|
||||
|
||||
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
|
||||
// the string as closely as possible.
|
||||
func FindSnapshot(repo Repository, s string) (ID, error) {
|
||||
|
||||
// find snapshot id with prefix
|
||||
name, err := Find(repo.Backend(), SnapshotFile, s)
|
||||
if err != nil {
|
||||
return ID{}, err
|
||||
}
|
||||
|
||||
return ParseID(name)
|
||||
}
|
||||
|
||||
// FindFilteredSnapshots yields Snapshots filtered from the list of all
|
||||
// snapshots.
|
||||
func FindFilteredSnapshots(ctx context.Context, repo Repository, host string, tags []string, paths []string) Snapshots {
|
||||
results := make(Snapshots, 0, 20)
|
||||
|
||||
for id := range repo.List(ctx, SnapshotFile) {
|
||||
sn, err := LoadSnapshot(ctx, repo, id)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "could not load snapshot %v: %v\n", id.Str(), err)
|
||||
continue
|
||||
}
|
||||
if (host != "" && host != sn.Hostname) || !sn.HasTags(tags) || !sn.HasPaths(paths) {
|
||||
continue
|
||||
}
|
||||
|
||||
results = append(results, sn)
|
||||
}
|
||||
return results
|
||||
}
|
|
@ -6,24 +6,6 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// Snapshots is a list of snapshots.
|
||||
type Snapshots []*Snapshot
|
||||
|
||||
// Len returns the number of snapshots in sn.
|
||||
func (sn Snapshots) Len() int {
|
||||
return len(sn)
|
||||
}
|
||||
|
||||
// Less returns true iff the ith snapshot has been made after the jth.
|
||||
func (sn Snapshots) Less(i, j int) bool {
|
||||
return sn[i].Time.After(sn[j].Time)
|
||||
}
|
||||
|
||||
// Swap exchanges the two snapshots.
|
||||
func (sn Snapshots) Swap(i, j int) {
|
||||
sn[i], sn[j] = sn[j], sn[i]
|
||||
}
|
||||
|
||||
// ExpirePolicy configures which snapshots should be automatically removed.
|
||||
type ExpirePolicy struct {
|
||||
Last int // keep the last n snapshots
|
Loading…
Reference in a new issue