Add more checks for canceled contexts

This commit is contained in:
Michael Eischer 2024-07-31 19:30:47 +02:00
parent 8d5e188218
commit ae1cb889dd
17 changed files with 87 additions and 3 deletions

View file

@ -177,6 +177,10 @@ func (c *Comparer) printDir(ctx context.Context, mode string, stats *DiffStat, b
}
for _, node := range tree.Nodes {
if ctx.Err() != nil {
return ctx.Err()
}
name := path.Join(prefix, node.Name)
if node.Type == "dir" {
name += "/"
@ -204,6 +208,10 @@ func (c *Comparer) collectDir(ctx context.Context, blobs restic.BlobSet, id rest
}
for _, node := range tree.Nodes {
if ctx.Err() != nil {
return ctx.Err()
}
addBlobs(blobs, node)
if node.Type == "dir" {
@ -255,6 +263,10 @@ func (c *Comparer) diffTree(ctx context.Context, stats *DiffStatsContainer, pref
tree1Nodes, tree2Nodes, names := uniqueNodeNames(tree1, tree2)
for _, name := range names {
if ctx.Err() != nil {
return ctx.Err()
}
node1, t1 := tree1Nodes[name]
node2, t2 := tree2Nodes[name]

View file

@ -85,6 +85,10 @@ func printFromTree(ctx context.Context, tree *restic.Tree, repo restic.BlobLoade
item := filepath.Join(prefix, pathComponents[0])
l := len(pathComponents)
for _, node := range tree.Nodes {
if ctx.Err() != nil {
return ctx.Err()
}
// If dumping something in the highest level it will just take the
// first item it finds and dump that according to the switch case below.
if node.Name == pathComponents[0] {

View file

@ -377,6 +377,10 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error {
if node.Type == "file" && f.blobIDs != nil {
for _, id := range node.Content {
if ctx.Err() != nil {
return ctx.Err()
}
idStr := id.String()
if _, ok := f.blobIDs[idStr]; !ok {
// Look for short ID form

View file

@ -246,6 +246,10 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
printer.P("Applying Policy: %v\n", policy)
for k, snapshotGroup := range snapshotGroups {
if ctx.Err() != nil {
return ctx.Err()
}
if gopts.Verbose >= 1 && !gopts.JSON {
err = PrintSnapshotGroupHeader(globalOptions.stdout, k)
if err != nil {

View file

@ -118,6 +118,10 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
return nil
}
if ctx.Err() != nil {
return ctx.Err()
}
tree := restic.NewTree(len(roots))
for id := range roots {
var subtreeID = id

View file

@ -81,6 +81,10 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
}
for k, list := range snapshotGroups {
if ctx.Err() != nil {
return ctx.Err()
}
if opts.Last {
// This branch should be removed in the same time
// that --last.
@ -101,6 +105,10 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
}
for k, list := range snapshotGroups {
if ctx.Err() != nil {
return ctx.Err()
}
if grouped {
err := PrintSnapshotGroupHeader(globalOptions.stdout, k)
if err != nil {

View file

@ -578,6 +578,10 @@ func (r *SFTP) deleteRecursive(ctx context.Context, name string) error {
}
for _, fi := range entries {
if ctx.Err() != nil {
return ctx.Err()
}
itemName := r.Join(name, fi.Name())
if fi.IsDir() {
err := r.deleteRecursive(ctx, itemName)

View file

@ -107,6 +107,10 @@ func (d *dir) open(ctx context.Context) error {
}
items := make(map[string]*restic.Node)
for _, n := range tree.Nodes {
if ctx.Err() != nil {
return ctx.Err()
}
nodes, err := replaceSpecialNodes(ctx, d.root.repo, n)
if err != nil {
debug.Log(" replaceSpecialNodes(%v) failed: %v", n, err)
@ -171,6 +175,10 @@ func (d *dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
})
for _, node := range d.items {
if ctx.Err() != nil {
return nil, ctx.Err()
}
name := cleanupNodeName(node.Name)
var typ fuse.DirentType
switch node.Type {

View file

@ -66,12 +66,16 @@ func (f *file) Attr(_ context.Context, a *fuse.Attr) error {
}
func (f *file) Open(_ context.Context, _ *fuse.OpenRequest, _ *fuse.OpenResponse) (fs.Handle, error) {
func (f *file) Open(ctx context.Context, _ *fuse.OpenRequest, _ *fuse.OpenResponse) (fs.Handle, error) {
debug.Log("open file %v with %d blobs", f.node.Name, len(f.node.Content))
var bytes uint64
cumsize := make([]uint64, 1+len(f.node.Content))
for i, id := range f.node.Content {
if ctx.Err() != nil {
return nil, ctx.Err()
}
size, found := f.root.repo.LookupBlobSize(restic.DataBlob, id)
if !found {
return nil, errors.Errorf("id %v not found in repository", id)

View file

@ -78,6 +78,10 @@ func (d *SnapshotsDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
}
for name, entry := range meta.names {
if ctx.Err() != nil {
return nil, ctx.Err()
}
d := fuse.Dirent{
Inode: inodeFromName(d.inode, name),
Name: name,

View file

@ -95,6 +95,10 @@ func checkPackInner(ctx context.Context, r *Repository, id restic.ID, blobs []re
it := newPackBlobIterator(id, newBufReader(bufRd), 0, blobs, r.Key(), dec)
for {
if ctx.Err() != nil {
return ctx.Err()
}
val, err := it.Next()
if err == errPackEOF {
break

View file

@ -1000,6 +1000,10 @@ func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBl
it := newPackBlobIterator(packID, newByteReader(data), dataStart, blobs, key, dec)
for {
if ctx.Err() != nil {
return ctx.Err()
}
val, err := it.Next()
if err == errPackEOF {
break

View file

@ -134,6 +134,10 @@ func (f *SnapshotFilter) FindAll(ctx context.Context, be Lister, loader LoaderUn
ids := NewIDSet()
// Process all snapshot IDs given as arguments.
for _, s := range snapshotIDs {
if ctx.Err() != nil {
return ctx.Err()
}
var sn *Snapshot
if s == "latest" {
if usedFilter {

View file

@ -122,6 +122,10 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
// create packInfo from fileInfo
for _, file := range r.files {
if ctx.Err() != nil {
return ctx.Err()
}
fileBlobs := file.blobs.(restic.IDs)
largeFile := len(fileBlobs) > largeFileBlobCount
var packsMap map[restic.ID][]fileBlobInfo

View file

@ -450,7 +450,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error {
},
leaveDir: func(node *restic.Node, target, location string, expectedFilenames []string) error {
if res.opts.Delete {
if err := res.removeUnexpectedFiles(target, location, expectedFilenames); err != nil {
if err := res.removeUnexpectedFiles(ctx, target, location, expectedFilenames); err != nil {
return err
}
}
@ -469,7 +469,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error {
return err
}
func (res *Restorer) removeUnexpectedFiles(target, location string, expectedFilenames []string) error {
func (res *Restorer) removeUnexpectedFiles(ctx context.Context, target, location string, expectedFilenames []string) error {
if !res.opts.Delete {
panic("internal error")
}
@ -487,6 +487,10 @@ func (res *Restorer) removeUnexpectedFiles(target, location string, expectedFile
}
for _, entry := range entries {
if ctx.Err() != nil {
return ctx.Err()
}
if _, ok := keep[toComparableFilename(entry)]; ok {
continue
}

View file

@ -116,6 +116,10 @@ func (t *TreeRewriter) RewriteTree(ctx context.Context, repo BlobLoadSaver, node
tb := restic.NewTreeJSONBuilder()
for _, node := range curTree.Nodes {
if ctx.Err() != nil {
return restic.ID{}, ctx.Err()
}
path := path.Join(nodepath, node.Name)
node = t.opts.RewriteNode(node, path)
if node == nil {

View file

@ -57,6 +57,10 @@ func walk(ctx context.Context, repo restic.BlobLoader, prefix string, parentTree
})
for _, node := range tree.Nodes {
if ctx.Err() != nil {
return ctx.Err()
}
p := path.Join(prefix, node.Name)
if node.Type == "" {