2018-03-30 20:43:18 +00:00
|
|
|
package archiver
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2024-01-06 18:03:11 +00:00
|
|
|
"fmt"
|
2018-03-30 20:43:18 +00:00
|
|
|
"os"
|
|
|
|
"path"
|
|
|
|
"runtime"
|
|
|
|
"sort"
|
2024-01-06 18:08:24 +00:00
|
|
|
"strings"
|
2024-02-22 21:14:48 +00:00
|
|
|
"sync"
|
2018-03-30 20:43:18 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/restic/restic/internal/debug"
|
|
|
|
"github.com/restic/restic/internal/errors"
|
2024-03-09 16:44:48 +00:00
|
|
|
"github.com/restic/restic/internal/feature"
|
2018-03-30 20:43:18 +00:00
|
|
|
"github.com/restic/restic/internal/fs"
|
|
|
|
"github.com/restic/restic/internal/restic"
|
2022-05-27 17:08:50 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2018-03-30 20:43:18 +00:00
|
|
|
)
|
|
|
|
|
2018-07-31 15:25:25 +00:00
|
|
|
// SelectByNameFunc returns true for all items that should be included (files and
|
|
|
|
// dirs). If false is returned, files are ignored and dirs are not even walked.
|
|
|
|
type SelectByNameFunc func(item string) bool
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
// SelectFunc returns true for all items that should be included (files and
|
|
|
|
// dirs). If false is returned, files are ignored and dirs are not even walked.
|
2024-08-27 10:07:26 +00:00
|
|
|
type SelectFunc func(item string, fi os.FileInfo, fs fs.FS) bool
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
// ErrorFunc is called when an error during archiving occurs. When nil is
|
|
|
|
// returned, the archiver continues, otherwise it aborts and passes the error
|
|
|
|
// up the call stack.
|
2022-05-20 22:31:26 +00:00
|
|
|
type ErrorFunc func(file string, err error) error
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
// ItemStats collects some statistics about a particular file or directory.
|
|
|
|
type ItemStats struct {
|
2022-05-01 12:41:36 +00:00
|
|
|
DataBlobs int // number of new data blobs added for this item
|
|
|
|
DataSize uint64 // sum of the sizes of all new data blobs
|
|
|
|
DataSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
|
|
|
|
TreeBlobs int // number of new tree blobs added for this item
|
|
|
|
TreeSize uint64 // sum of the sizes of all new tree blobs
|
|
|
|
TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2024-02-23 20:46:39 +00:00
|
|
|
type ChangeStats struct {
|
|
|
|
New uint
|
|
|
|
Changed uint
|
|
|
|
Unchanged uint
|
|
|
|
}
|
|
|
|
|
2024-02-22 21:14:48 +00:00
|
|
|
type Summary struct {
|
2024-11-01 14:50:09 +00:00
|
|
|
BackupStart time.Time
|
|
|
|
BackupEnd time.Time
|
2024-02-23 20:46:39 +00:00
|
|
|
Files, Dirs ChangeStats
|
2024-02-22 21:14:48 +00:00
|
|
|
ProcessedBytes uint64
|
|
|
|
ItemStats
|
|
|
|
}
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
// Add adds other to the current ItemStats.
|
|
|
|
func (s *ItemStats) Add(other ItemStats) {
|
|
|
|
s.DataBlobs += other.DataBlobs
|
|
|
|
s.DataSize += other.DataSize
|
2022-05-01 12:41:36 +00:00
|
|
|
s.DataSizeInRepo += other.DataSizeInRepo
|
2018-03-30 20:43:18 +00:00
|
|
|
s.TreeBlobs += other.TreeBlobs
|
|
|
|
s.TreeSize += other.TreeSize
|
2022-05-01 12:41:36 +00:00
|
|
|
s.TreeSizeInRepo += other.TreeSizeInRepo
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2024-05-19 13:11:32 +00:00
|
|
|
type archiverRepo interface {
|
|
|
|
restic.Loader
|
|
|
|
restic.BlobSaver
|
|
|
|
restic.SaverUnpacked
|
|
|
|
|
|
|
|
Config() restic.Config
|
|
|
|
StartPackUploader(ctx context.Context, wg *errgroup.Group)
|
|
|
|
Flush(ctx context.Context) error
|
|
|
|
}
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
// Archiver saves a directory structure to the repo.
|
2024-08-27 09:26:52 +00:00
|
|
|
//
|
|
|
|
// An Archiver has a number of worker goroutines handling saving the different
|
|
|
|
// data structures to the repository, the details are implemented by the
|
|
|
|
// fileSaver, blobSaver, and treeSaver types.
|
|
|
|
//
|
|
|
|
// The main goroutine (the one calling Snapshot()) traverses the directory tree
|
|
|
|
// and delegates all work to these worker pools. They return a futureNode which
|
|
|
|
// can be resolved later, by calling Wait() on it.
|
2018-03-30 20:43:18 +00:00
|
|
|
type Archiver struct {
|
2024-05-19 13:11:32 +00:00
|
|
|
Repo archiverRepo
|
2018-07-31 15:25:25 +00:00
|
|
|
SelectByName SelectByNameFunc
|
|
|
|
Select SelectFunc
|
|
|
|
FS fs.FS
|
|
|
|
Options Options
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2024-08-27 09:26:52 +00:00
|
|
|
blobSaver *blobSaver
|
|
|
|
fileSaver *fileSaver
|
|
|
|
treeSaver *treeSaver
|
2024-02-22 21:14:48 +00:00
|
|
|
mu sync.Mutex
|
|
|
|
summary *Summary
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
// Error is called for all errors that occur during backup.
|
|
|
|
Error ErrorFunc
|
|
|
|
|
|
|
|
// CompleteItem is called for all files and dirs once they have been
|
|
|
|
// processed successfully. The parameter item contains the path as it will
|
|
|
|
// be in the snapshot after saving. s contains some statistics about this
|
|
|
|
// particular file/dir.
|
|
|
|
//
|
2022-10-22 10:05:49 +00:00
|
|
|
// Once reading a file has completed successfully (but not saving it yet),
|
|
|
|
// CompleteItem will be called with current == nil.
|
|
|
|
//
|
2018-03-30 20:43:18 +00:00
|
|
|
// CompleteItem may be called asynchronously from several different
|
|
|
|
// goroutines!
|
|
|
|
CompleteItem func(item string, previous, current *restic.Node, s ItemStats, d time.Duration)
|
|
|
|
|
|
|
|
// StartFile is called when a file is being processed by a worker.
|
|
|
|
StartFile func(filename string)
|
|
|
|
|
|
|
|
// CompleteBlob is called for all saved blobs for files.
|
2022-10-15 13:21:17 +00:00
|
|
|
CompleteBlob func(bytes uint64)
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
// WithAtime configures if the access time for files and directories should
|
|
|
|
// be saved. Enabling it may result in much metadata, so it's off by
|
|
|
|
// default.
|
2020-07-08 07:59:00 +00:00
|
|
|
WithAtime bool
|
|
|
|
|
|
|
|
// Flags controlling change detection. See doc/040_backup.rst for details.
|
|
|
|
ChangeIgnoreFlags uint
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2020-07-08 07:59:00 +00:00
|
|
|
// Flags for the ChangeIgnoreFlags bitfield.
|
|
|
|
const (
|
|
|
|
ChangeIgnoreCtime = 1 << iota
|
|
|
|
ChangeIgnoreInode
|
|
|
|
)
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
// Options is used to configure the archiver.
|
|
|
|
type Options struct {
|
2022-09-24 09:57:16 +00:00
|
|
|
// ReadConcurrency sets how many files are read in concurrently. If
|
2018-03-30 20:43:18 +00:00
|
|
|
// it's set to zero, at most two files are read in concurrently (which
|
|
|
|
// turned out to be a good default for most situations).
|
2022-09-24 09:57:16 +00:00
|
|
|
ReadConcurrency uint
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
// SaveBlobConcurrency sets how many blobs are hashed and saved
|
|
|
|
// concurrently. If it's set to zero, the default is the number of CPUs
|
|
|
|
// available in the system.
|
|
|
|
SaveBlobConcurrency uint
|
2018-04-30 13:13:03 +00:00
|
|
|
|
|
|
|
// SaveTreeConcurrency sets how many trees are marshalled and saved to the
|
|
|
|
// repo concurrently.
|
|
|
|
SaveTreeConcurrency uint
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ApplyDefaults returns a copy of o with the default options set for all unset
|
|
|
|
// fields.
|
|
|
|
func (o Options) ApplyDefaults() Options {
|
2022-09-24 09:57:16 +00:00
|
|
|
if o.ReadConcurrency == 0 {
|
2018-03-30 20:43:18 +00:00
|
|
|
// two is a sweet spot for almost all situations. We've done some
|
|
|
|
// experiments documented here:
|
|
|
|
// https://github.com/borgbackup/borg/issues/3500
|
2022-09-24 09:57:16 +00:00
|
|
|
o.ReadConcurrency = 2
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if o.SaveBlobConcurrency == 0 {
|
2021-08-07 21:18:37 +00:00
|
|
|
// blob saving is CPU bound due to hash checking and encryption
|
|
|
|
// the actual upload is handled by the repository itself
|
|
|
|
o.SaveBlobConcurrency = uint(runtime.GOMAXPROCS(0))
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2018-04-30 13:13:03 +00:00
|
|
|
if o.SaveTreeConcurrency == 0 {
|
2021-08-07 21:33:43 +00:00
|
|
|
// can either wait for a file, wait for a tree, serialize a tree or wait for saveblob
|
|
|
|
// the last two are cpu-bound and thus mutually exclusive.
|
2024-08-27 09:26:52 +00:00
|
|
|
// Also allow waiting for FileReadConcurrency files, this is the maximum of files
|
2021-08-07 21:33:43 +00:00
|
|
|
// which currently can be in progress. The main backup loop blocks when trying to queue
|
|
|
|
// more files to read.
|
2022-09-24 09:57:16 +00:00
|
|
|
o.SaveTreeConcurrency = uint(runtime.GOMAXPROCS(0)) + o.ReadConcurrency
|
2018-04-30 13:13:03 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
return o
|
|
|
|
}
|
|
|
|
|
|
|
|
// New initializes a new archiver.
|
2024-08-27 10:07:26 +00:00
|
|
|
func New(repo archiverRepo, filesystem fs.FS, opts Options) *Archiver {
|
2018-03-30 20:43:18 +00:00
|
|
|
arch := &Archiver{
|
2018-07-31 15:25:25 +00:00
|
|
|
Repo: repo,
|
2024-02-10 21:58:10 +00:00
|
|
|
SelectByName: func(_ string) bool { return true },
|
2024-08-27 10:07:26 +00:00
|
|
|
Select: func(_ string, _ os.FileInfo, _ fs.FS) bool { return true },
|
|
|
|
FS: filesystem,
|
2018-07-31 15:25:25 +00:00
|
|
|
Options: opts.ApplyDefaults(),
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
CompleteItem: func(string, *restic.Node, *restic.Node, ItemStats, time.Duration) {},
|
|
|
|
StartFile: func(string) {},
|
2022-10-15 13:21:17 +00:00
|
|
|
CompleteBlob: func(uint64) {},
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return arch
|
|
|
|
}
|
|
|
|
|
2018-05-08 20:28:37 +00:00
|
|
|
// error calls arch.Error if it is set and the error is different from context.Canceled.
|
2022-05-20 22:31:26 +00:00
|
|
|
func (arch *Archiver) error(item string, err error) error {
|
2018-03-30 20:43:18 +00:00
|
|
|
if arch.Error == nil || err == nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-05-08 20:28:37 +00:00
|
|
|
if err == context.Canceled {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-01-06 18:08:24 +00:00
|
|
|
// not all errors include the filepath, thus add it if it is missing
|
|
|
|
if !strings.Contains(err.Error(), item) {
|
|
|
|
err = fmt.Errorf("%v: %w", item, err)
|
|
|
|
}
|
|
|
|
|
2022-05-20 22:31:26 +00:00
|
|
|
errf := arch.Error(item, err)
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != errf {
|
|
|
|
debug.Log("item %v: error was filtered by handler, before: %q, after: %v", item, err, errf)
|
|
|
|
}
|
|
|
|
return errf
|
|
|
|
}
|
|
|
|
|
2024-02-22 21:14:48 +00:00
|
|
|
func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s ItemStats, d time.Duration) {
|
|
|
|
arch.CompleteItem(item, previous, current, s, d)
|
|
|
|
|
|
|
|
arch.mu.Lock()
|
|
|
|
defer arch.mu.Unlock()
|
|
|
|
|
|
|
|
arch.summary.ItemStats.Add(s)
|
|
|
|
|
|
|
|
if current != nil {
|
|
|
|
arch.summary.ProcessedBytes += current.Size
|
|
|
|
} else {
|
|
|
|
// last item or an error occurred
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
switch current.Type {
|
2024-07-09 17:51:44 +00:00
|
|
|
case restic.NodeTypeDir:
|
2024-02-22 21:14:48 +00:00
|
|
|
switch {
|
|
|
|
case previous == nil:
|
|
|
|
arch.summary.Dirs.New++
|
|
|
|
case previous.Equals(*current):
|
|
|
|
arch.summary.Dirs.Unchanged++
|
|
|
|
default:
|
|
|
|
arch.summary.Dirs.Changed++
|
|
|
|
}
|
|
|
|
|
2024-07-09 17:51:44 +00:00
|
|
|
case restic.NodeTypeFile:
|
2024-02-22 21:14:48 +00:00
|
|
|
switch {
|
|
|
|
case previous == nil:
|
|
|
|
arch.summary.Files.New++
|
|
|
|
case previous.Equals(*current):
|
|
|
|
arch.summary.Files.Unchanged++
|
|
|
|
default:
|
|
|
|
arch.summary.Files.Changed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-16 06:05:26 +00:00
|
|
|
// nodeFromFileInfo returns the restic node from an os.FileInfo.
|
2024-01-31 19:48:03 +00:00
|
|
|
func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) {
|
2024-08-28 08:58:07 +00:00
|
|
|
node, err := arch.FS.NodeFromFileInfo(filename, fi, ignoreXattrListError)
|
2018-03-30 20:43:18 +00:00
|
|
|
if !arch.WithAtime {
|
|
|
|
node.AccessTime = node.ModTime
|
|
|
|
}
|
2024-03-09 16:44:48 +00:00
|
|
|
if feature.Flag.Enabled(feature.DeviceIDForHardlinks) {
|
2024-07-09 17:51:44 +00:00
|
|
|
if node.Links == 1 || node.Type == restic.NodeTypeDir {
|
2024-03-09 16:44:48 +00:00
|
|
|
// the DeviceID is only necessary for hardlinked files
|
|
|
|
// when using subvolumes or snapshots their deviceIDs tend to change which causes
|
|
|
|
// restic to upload new tree blobs
|
|
|
|
node.DeviceID = 0
|
|
|
|
}
|
2024-03-09 16:38:41 +00:00
|
|
|
}
|
2022-08-19 21:08:13 +00:00
|
|
|
// overwrite name to match that within the snapshot
|
|
|
|
node.Name = path.Base(snPath)
|
2024-09-14 16:59:59 +00:00
|
|
|
// do not filter error for nodes of irregular or invalid type
|
|
|
|
if node.Type != restic.NodeTypeIrregular && node.Type != restic.NodeTypeInvalid && err != nil {
|
2024-08-03 17:10:11 +00:00
|
|
|
err = fmt.Errorf("incomplete metadata for %v: %w", filename, err)
|
|
|
|
return node, arch.error(filename, err)
|
2024-01-06 18:03:11 +00:00
|
|
|
}
|
|
|
|
return node, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// loadSubtree tries to load the subtree referenced by node. In case of an error, nil is returned.
|
2020-08-07 22:05:07 +00:00
|
|
|
// If there is no node to load, then nil is returned without an error.
|
|
|
|
func (arch *Archiver) loadSubtree(ctx context.Context, node *restic.Node) (*restic.Tree, error) {
|
2024-07-09 17:51:44 +00:00
|
|
|
if node == nil || node.Type != restic.NodeTypeDir || node.Subtree == nil {
|
2020-08-07 22:05:07 +00:00
|
|
|
return nil, nil
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2022-06-12 12:38:19 +00:00
|
|
|
tree, err := restic.LoadTree(ctx, arch.Repo, *node.Subtree)
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
|
|
|
debug.Log("unable to load tree %v: %v", node.Subtree.Str(), err)
|
2020-08-07 22:05:07 +00:00
|
|
|
// a tree in the repository is not readable -> warn the user
|
|
|
|
return nil, arch.wrapLoadTreeError(*node.Subtree, err)
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2020-08-07 22:05:07 +00:00
|
|
|
return tree, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error {
|
2024-05-19 12:54:50 +00:00
|
|
|
if _, ok := arch.Repo.LookupBlobSize(restic.TreeBlob, id); ok {
|
2020-08-07 22:05:07 +00:00
|
|
|
err = errors.Errorf("tree %v could not be loaded; the repository could be damaged: %v", id, err)
|
|
|
|
} else {
|
2022-12-27 17:25:39 +00:00
|
|
|
err = errors.Errorf("tree %v is not known; the repository could be damaged, run `repair index` to try to repair it", id)
|
2020-08-07 22:05:07 +00:00
|
|
|
}
|
|
|
|
return err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2024-02-23 19:22:14 +00:00
|
|
|
// saveDir stores a directory in the repo and returns the node. snPath is the
|
2018-03-30 20:43:18 +00:00
|
|
|
// path within the current snapshot.
|
2024-08-27 09:26:52 +00:00
|
|
|
func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete fileCompleteFunc) (d futureNode, err error) {
|
2018-03-30 20:43:18 +00:00
|
|
|
debug.Log("%v %v", snPath, dir)
|
|
|
|
|
2024-01-31 19:48:03 +00:00
|
|
|
treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi, false)
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2024-06-29 15:15:29 +00:00
|
|
|
names, err := fs.Readdirnames(arch.FS, dir, fs.O_NOFOLLOW)
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
2020-02-17 08:22:32 +00:00
|
|
|
sort.Strings(names)
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2024-08-27 09:26:52 +00:00
|
|
|
nodes := make([]futureNode, 0, len(names))
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
for _, name := range names {
|
2018-05-08 20:28:37 +00:00
|
|
|
// test if context has been cancelled
|
|
|
|
if ctx.Err() != nil {
|
2018-05-12 21:54:20 +00:00
|
|
|
debug.Log("context has been cancelled, aborting")
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, ctx.Err()
|
2018-05-08 20:28:37 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
pathname := arch.FS.Join(dir, name)
|
|
|
|
oldNode := previous.Find(name)
|
|
|
|
snItem := join(snPath, name)
|
2024-02-23 19:22:14 +00:00
|
|
|
fn, excluded, err := arch.save(ctx, snItem, pathname, oldNode)
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
// return error early if possible
|
|
|
|
if err != nil {
|
2022-05-20 22:31:26 +00:00
|
|
|
err = arch.error(pathname, err)
|
2018-03-30 20:43:18 +00:00
|
|
|
if err == nil {
|
|
|
|
// ignore error
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if excluded {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-04-30 13:13:03 +00:00
|
|
|
nodes = append(nodes, fn)
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2022-05-29 09:57:10 +00:00
|
|
|
fn := arch.treeSaver.Save(ctx, snPath, dir, treeNode, nodes, complete)
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2022-05-29 09:57:10 +00:00
|
|
|
return fn, nil
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2024-08-27 09:26:52 +00:00
|
|
|
// futureNode holds a reference to a channel that returns a FutureNodeResult
|
2022-05-29 09:57:10 +00:00
|
|
|
// or a reference to an already existing result. If the result is available
|
2023-12-06 12:11:55 +00:00
|
|
|
// immediately, then storing a reference directly requires less memory than
|
2022-05-29 09:57:10 +00:00
|
|
|
// using the indirection via a channel.
|
2024-08-27 09:26:52 +00:00
|
|
|
type futureNode struct {
|
2022-05-29 09:57:10 +00:00
|
|
|
ch <-chan futureNodeResult
|
|
|
|
res *futureNodeResult
|
|
|
|
}
|
|
|
|
|
|
|
|
type futureNodeResult struct {
|
2018-03-30 20:43:18 +00:00
|
|
|
snPath, target string
|
|
|
|
|
|
|
|
node *restic.Node
|
|
|
|
stats ItemStats
|
|
|
|
err error
|
2022-05-29 09:57:10 +00:00
|
|
|
}
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2024-08-27 09:26:52 +00:00
|
|
|
func newFutureNode() (futureNode, chan<- futureNodeResult) {
|
2022-05-29 09:57:10 +00:00
|
|
|
ch := make(chan futureNodeResult, 1)
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{ch: ch}, ch
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2024-08-27 09:26:52 +00:00
|
|
|
func newFutureNodeWithResult(res futureNodeResult) futureNode {
|
|
|
|
return futureNode{
|
2022-05-29 09:57:10 +00:00
|
|
|
res: &res,
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-08-27 09:26:52 +00:00
|
|
|
func (fn *futureNode) take(ctx context.Context) futureNodeResult {
|
2022-05-29 09:57:10 +00:00
|
|
|
if fn.res != nil {
|
|
|
|
res := fn.res
|
|
|
|
// free result
|
|
|
|
fn.res = nil
|
|
|
|
return *res
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case res, ok := <-fn.ch:
|
|
|
|
if ok {
|
|
|
|
// free channel
|
|
|
|
fn.ch = nil
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
case <-ctx.Done():
|
2024-03-29 23:19:58 +00:00
|
|
|
return futureNodeResult{err: ctx.Err()}
|
2022-05-29 09:57:10 +00:00
|
|
|
}
|
2022-08-19 21:19:29 +00:00
|
|
|
return futureNodeResult{err: errors.Errorf("no result")}
|
2022-05-29 09:57:10 +00:00
|
|
|
}
|
|
|
|
|
2020-07-09 20:35:04 +00:00
|
|
|
// allBlobsPresent checks if all blobs (contents) of the given node are
|
|
|
|
// present in the index.
|
|
|
|
func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool {
|
|
|
|
// check if all blobs are contained in index
|
|
|
|
for _, id := range previous.Content {
|
2024-05-19 12:54:50 +00:00
|
|
|
if _, ok := arch.Repo.LookupBlobSize(restic.DataBlob, id); !ok {
|
2020-07-09 20:35:04 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2024-02-23 19:22:14 +00:00
|
|
|
// save saves a target (file or directory) to the repo. If the item is
|
2018-07-31 15:25:25 +00:00
|
|
|
// excluded, this function returns a nil node and error, with excluded set to
|
2018-05-12 19:59:38 +00:00
|
|
|
// true.
|
2018-03-30 20:43:18 +00:00
|
|
|
//
|
2018-07-31 15:25:25 +00:00
|
|
|
// Errors and completion needs to be handled by the caller.
|
2018-03-30 20:43:18 +00:00
|
|
|
//
|
|
|
|
// snPath is the path within the current snapshot.
|
2024-08-27 09:26:52 +00:00
|
|
|
func (arch *Archiver) save(ctx context.Context, snPath, target string, previous *restic.Node) (fn futureNode, excluded bool, err error) {
|
2018-04-30 13:13:03 +00:00
|
|
|
start := time.Now()
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
debug.Log("%v target %q, previous %v", snPath, target, previous)
|
|
|
|
abstarget, err := arch.FS.Abs(target)
|
|
|
|
if err != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, false, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2024-11-02 16:41:40 +00:00
|
|
|
filterError := func(err error) (futureNode, bool, error) {
|
|
|
|
err = arch.error(abstarget, err)
|
|
|
|
if err != nil {
|
|
|
|
return futureNode{}, false, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
return futureNode{}, true, nil
|
|
|
|
}
|
2018-07-31 15:25:25 +00:00
|
|
|
// exclude files by path before running Lstat to reduce number of lstat calls
|
|
|
|
if !arch.SelectByName(abstarget) {
|
|
|
|
debug.Log("%v is excluded by path", target)
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, true, nil
|
2018-07-31 15:25:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// get file info and run remaining select functions that require file information
|
archiver: Use lstat before open/fstat
The previous code tried to be as efficient as possible and only do a
single open() on an item to save, and then fstat() on the fd to find out
what the item is (file, dir, other). For normal files, it would then
start reading the data without opening the file again, so it could not
be exchanged for e.g. a symlink.
This behavior starts the watchdog on my machine when /dev is saved
with restic, and after a few seconds, the machine reboots.
This commit reverts the behavior to the strategy the old archiver code
used: run lstat(), then decide what to do. For normal files, open the
file and then run fstat() on the fd to verify it's still a normal file,
then start reading the data.
The downside is that for normal files we now do two stat() calls
(lstat+fstat) instead of only one. On the upside, this does not start
the watchdog. :)
2018-05-01 21:05:50 +00:00
|
|
|
fi, err := arch.FS.Lstat(target)
|
|
|
|
if err != nil {
|
|
|
|
debug.Log("lstat() for %v returned error: %v", target, err)
|
2024-11-02 16:41:40 +00:00
|
|
|
return filterError(err)
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
2024-08-27 10:07:26 +00:00
|
|
|
if !arch.Select(abstarget, fi, arch.FS) {
|
2020-10-05 09:11:04 +00:00
|
|
|
debug.Log("%v is excluded", target)
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, true, nil
|
2020-10-05 09:11:04 +00:00
|
|
|
}
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
switch {
|
2024-07-21 12:40:33 +00:00
|
|
|
case fi.Mode().IsRegular():
|
2018-03-30 20:43:18 +00:00
|
|
|
debug.Log(" %v regular file", target)
|
|
|
|
|
2020-10-02 12:09:01 +00:00
|
|
|
// check if the file has not changed before performing a fopen operation (more expensive, specially
|
|
|
|
// in network filesystems)
|
2024-08-27 12:35:40 +00:00
|
|
|
if previous != nil && !fileChanged(arch.FS, fi, previous, arch.ChangeIgnoreFlags) {
|
2020-10-02 12:09:01 +00:00
|
|
|
if arch.allBlobsPresent(previous) {
|
|
|
|
debug.Log("%v hasn't changed, using old list of blobs", target)
|
2024-02-22 21:14:48 +00:00
|
|
|
arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start))
|
2022-10-15 13:21:17 +00:00
|
|
|
arch.CompleteBlob(previous.Size)
|
2024-01-31 19:48:03 +00:00
|
|
|
node, err := arch.nodeFromFileInfo(snPath, target, fi, false)
|
2020-10-02 12:09:01 +00:00
|
|
|
if err != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, false, err
|
2020-10-02 12:09:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// copy list of blobs
|
2022-05-29 09:57:10 +00:00
|
|
|
node.Content = previous.Content
|
2020-10-02 12:09:01 +00:00
|
|
|
|
2022-05-29 09:57:10 +00:00
|
|
|
fn = newFutureNodeWithResult(futureNodeResult{
|
|
|
|
snPath: snPath,
|
|
|
|
target: target,
|
|
|
|
node: node,
|
|
|
|
})
|
2020-10-02 12:09:01 +00:00
|
|
|
return fn, false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("%v hasn't changed, but contents are missing!", target)
|
|
|
|
// There are contents missing - inform user!
|
|
|
|
err := errors.Errorf("parts of %v not found in the repository index; storing the file again", target)
|
2022-05-20 22:31:26 +00:00
|
|
|
err = arch.error(abstarget, err)
|
2021-01-30 16:25:10 +00:00
|
|
|
if err != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, false, err
|
2021-01-30 16:25:10 +00:00
|
|
|
}
|
2020-10-02 12:09:01 +00:00
|
|
|
}
|
|
|
|
|
archiver: Use lstat before open/fstat
The previous code tried to be as efficient as possible and only do a
single open() on an item to save, and then fstat() on the fd to find out
what the item is (file, dir, other). For normal files, it would then
start reading the data without opening the file again, so it could not
be exchanged for e.g. a symlink.
This behavior starts the watchdog on my machine when /dev is saved
with restic, and after a few seconds, the machine reboots.
This commit reverts the behavior to the strategy the old archiver code
used: run lstat(), then decide what to do. For normal files, open the
file and then run fstat() on the fd to verify it's still a normal file,
then start reading the data.
The downside is that for normal files we now do two stat() calls
(lstat+fstat) instead of only one. On the upside, this does not start
the watchdog. :)
2018-05-01 21:05:50 +00:00
|
|
|
// reopen file and do an fstat() on the open file to check it is still
|
|
|
|
// a file (and has not been exchanged for e.g. a symlink)
|
2018-05-20 14:05:53 +00:00
|
|
|
file, err := arch.FS.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0)
|
archiver: Use lstat before open/fstat
The previous code tried to be as efficient as possible and only do a
single open() on an item to save, and then fstat() on the fd to find out
what the item is (file, dir, other). For normal files, it would then
start reading the data without opening the file again, so it could not
be exchanged for e.g. a symlink.
This behavior starts the watchdog on my machine when /dev is saved
with restic, and after a few seconds, the machine reboots.
This commit reverts the behavior to the strategy the old archiver code
used: run lstat(), then decide what to do. For normal files, open the
file and then run fstat() on the fd to verify it's still a normal file,
then start reading the data.
The downside is that for normal files we now do two stat() calls
(lstat+fstat) instead of only one. On the upside, this does not start
the watchdog. :)
2018-05-01 21:05:50 +00:00
|
|
|
if err != nil {
|
|
|
|
debug.Log("Openfile() for %v returned error: %v", target, err)
|
2024-11-02 16:41:40 +00:00
|
|
|
return filterError(err)
|
archiver: Use lstat before open/fstat
The previous code tried to be as efficient as possible and only do a
single open() on an item to save, and then fstat() on the fd to find out
what the item is (file, dir, other). For normal files, it would then
start reading the data without opening the file again, so it could not
be exchanged for e.g. a symlink.
This behavior starts the watchdog on my machine when /dev is saved
with restic, and after a few seconds, the machine reboots.
This commit reverts the behavior to the strategy the old archiver code
used: run lstat(), then decide what to do. For normal files, open the
file and then run fstat() on the fd to verify it's still a normal file,
then start reading the data.
The downside is that for normal files we now do two stat() calls
(lstat+fstat) instead of only one. On the upside, this does not start
the watchdog. :)
2018-05-01 21:05:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fi, err = file.Stat()
|
|
|
|
if err != nil {
|
|
|
|
debug.Log("stat() on opened file %v returned error: %v", target, err)
|
|
|
|
_ = file.Close()
|
2024-11-02 16:41:40 +00:00
|
|
|
return filterError(err)
|
archiver: Use lstat before open/fstat
The previous code tried to be as efficient as possible and only do a
single open() on an item to save, and then fstat() on the fd to find out
what the item is (file, dir, other). For normal files, it would then
start reading the data without opening the file again, so it could not
be exchanged for e.g. a symlink.
This behavior starts the watchdog on my machine when /dev is saved
with restic, and after a few seconds, the machine reboots.
This commit reverts the behavior to the strategy the old archiver code
used: run lstat(), then decide what to do. For normal files, open the
file and then run fstat() on the fd to verify it's still a normal file,
then start reading the data.
The downside is that for normal files we now do two stat() calls
(lstat+fstat) instead of only one. On the upside, this does not start
the watchdog. :)
2018-05-01 21:05:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// make sure it's still a file
|
2024-07-21 12:40:33 +00:00
|
|
|
if !fi.Mode().IsRegular() {
|
2024-11-02 16:41:40 +00:00
|
|
|
err = errors.Errorf("file %v changed type, refusing to archive", target)
|
2020-02-07 21:14:50 +00:00
|
|
|
_ = file.Close()
|
2024-11-02 16:41:40 +00:00
|
|
|
return filterError(err)
|
archiver: Use lstat before open/fstat
The previous code tried to be as efficient as possible and only do a
single open() on an item to save, and then fstat() on the fd to find out
what the item is (file, dir, other). For normal files, it would then
start reading the data without opening the file again, so it could not
be exchanged for e.g. a symlink.
This behavior starts the watchdog on my machine when /dev is saved
with restic, and after a few seconds, the machine reboots.
This commit reverts the behavior to the strategy the old archiver code
used: run lstat(), then decide what to do. For normal files, open the
file and then run fstat() on the fd to verify it's still a normal file,
then start reading the data.
The downside is that for normal files we now do two stat() calls
(lstat+fstat) instead of only one. On the upside, this does not start
the watchdog. :)
2018-05-01 21:05:50 +00:00
|
|
|
}
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
// Save will close the file, we don't need to do that
|
2022-05-29 09:57:10 +00:00
|
|
|
fn = arch.fileSaver.Save(ctx, snPath, target, file, fi, func() {
|
2018-03-30 20:43:18 +00:00
|
|
|
arch.StartFile(snPath)
|
2022-10-22 10:05:49 +00:00
|
|
|
}, func() {
|
2024-02-22 21:14:48 +00:00
|
|
|
arch.trackItem(snPath, nil, nil, ItemStats{}, 0)
|
2018-03-30 20:43:18 +00:00
|
|
|
}, func(node *restic.Node, stats ItemStats) {
|
2024-02-22 21:14:48 +00:00
|
|
|
arch.trackItem(snPath, previous, node, stats, time.Since(start))
|
2018-03-30 20:43:18 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
case fi.IsDir():
|
|
|
|
debug.Log(" %v dir", target)
|
|
|
|
|
|
|
|
snItem := snPath + "/"
|
2020-08-07 22:05:07 +00:00
|
|
|
oldSubtree, err := arch.loadSubtree(ctx, previous)
|
|
|
|
if err != nil {
|
2022-05-20 22:31:26 +00:00
|
|
|
err = arch.error(abstarget, err)
|
2021-01-30 16:25:10 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, false, err
|
2020-08-07 22:05:07 +00:00
|
|
|
}
|
2018-04-30 13:13:03 +00:00
|
|
|
|
2024-02-23 19:22:14 +00:00
|
|
|
fn, err = arch.saveDir(ctx, snPath, target, fi, oldSubtree,
|
2020-04-22 20:23:02 +00:00
|
|
|
func(node *restic.Node, stats ItemStats) {
|
2024-02-22 21:14:48 +00:00
|
|
|
arch.trackItem(snItem, previous, node, stats, time.Since(start))
|
2020-04-22 20:23:02 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
2018-05-12 21:08:00 +00:00
|
|
|
debug.Log("SaveDir for %v returned error: %v", snPath, err)
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, false, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case fi.Mode()&os.ModeSocket > 0:
|
|
|
|
debug.Log(" %v is a socket, ignoring", target)
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, true, nil
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
debug.Log(" %v other", target)
|
|
|
|
|
2024-01-31 19:48:03 +00:00
|
|
|
node, err := arch.nodeFromFileInfo(snPath, target, fi, false)
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, false, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
2022-05-29 09:57:10 +00:00
|
|
|
fn = newFutureNodeWithResult(futureNodeResult{
|
|
|
|
snPath: snPath,
|
|
|
|
target: target,
|
|
|
|
node: node,
|
|
|
|
})
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2018-04-30 13:13:03 +00:00
|
|
|
debug.Log("return after %.3f", time.Since(start).Seconds())
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
return fn, false, nil
|
|
|
|
}
|
|
|
|
|
2020-07-08 07:59:00 +00:00
|
|
|
// fileChanged tries to detect whether a file's content has changed compared
|
|
|
|
// to the contents of node, which describes the same path in the parent backup.
|
|
|
|
// It should only be run for regular files.
|
2024-08-27 12:35:40 +00:00
|
|
|
func fileChanged(fs fs.FS, fi os.FileInfo, node *restic.Node, ignoreFlags uint) bool {
|
2020-07-08 07:59:00 +00:00
|
|
|
switch {
|
|
|
|
case node == nil:
|
2018-03-30 20:43:18 +00:00
|
|
|
return true
|
2024-07-09 17:51:44 +00:00
|
|
|
case node.Type != restic.NodeTypeFile:
|
2020-07-08 07:59:00 +00:00
|
|
|
// We're only called for regular files, so this is a type change.
|
2018-03-30 20:43:18 +00:00
|
|
|
return true
|
2020-07-08 07:59:00 +00:00
|
|
|
case uint64(fi.Size()) != node.Size:
|
2018-03-30 20:43:18 +00:00
|
|
|
return true
|
2020-07-08 07:59:00 +00:00
|
|
|
case !fi.ModTime().Equal(node.ModTime):
|
2019-03-20 01:27:37 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-07-08 07:59:00 +00:00
|
|
|
checkCtime := ignoreFlags&ChangeIgnoreCtime == 0
|
|
|
|
checkInode := ignoreFlags&ChangeIgnoreInode == 0
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2020-07-08 07:59:00 +00:00
|
|
|
extFI := fs.ExtendedStat(fi)
|
|
|
|
switch {
|
|
|
|
case checkCtime && !extFI.ChangeTime.Equal(node.ChangeTime):
|
|
|
|
return true
|
|
|
|
case checkInode && node.Inode != extFI.Inode:
|
2018-03-30 20:43:18 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// join returns all elements separated with a forward slash.
|
|
|
|
func join(elem ...string) string {
|
|
|
|
return path.Join(elem...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// statDir returns the file info for the directory. Symbolic links are
|
|
|
|
// resolved. If the target directory is not a directory, an error is returned.
|
|
|
|
func (arch *Archiver) statDir(dir string) (os.FileInfo, error) {
|
|
|
|
fi, err := arch.FS.Stat(dir)
|
|
|
|
if err != nil {
|
2022-10-16 09:32:38 +00:00
|
|
|
return nil, errors.WithStack(err)
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tpe := fi.Mode() & (os.ModeType | os.ModeCharDevice)
|
|
|
|
if tpe != os.ModeDir {
|
|
|
|
return fi, errors.Errorf("path is not a directory: %v", dir)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fi, nil
|
|
|
|
}
|
|
|
|
|
2024-02-23 19:22:14 +00:00
|
|
|
// saveTree stores a Tree in the repo, returned is the tree. snPath is the path
|
2018-03-30 20:43:18 +00:00
|
|
|
// within the current snapshot.
|
2024-08-27 09:26:52 +00:00
|
|
|
func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree, previous *restic.Tree, complete fileCompleteFunc) (futureNode, int, error) {
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2022-08-19 21:08:13 +00:00
|
|
|
var node *restic.Node
|
|
|
|
if snPath != "/" {
|
|
|
|
if atree.FileInfoPath == "" {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, 0, errors.Errorf("FileInfoPath for %v is empty", snPath)
|
2022-08-19 21:08:13 +00:00
|
|
|
}
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2022-08-19 21:08:13 +00:00
|
|
|
fi, err := arch.statDir(atree.FileInfoPath)
|
|
|
|
if err != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, 0, err
|
2022-08-19 21:08:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("%v, dir node data loaded from %v", snPath, atree.FileInfoPath)
|
2024-07-24 17:40:53 +00:00
|
|
|
// in some cases reading xattrs for directories above the backup source is not allowed
|
2024-01-31 19:48:03 +00:00
|
|
|
// thus ignore errors for such folders.
|
|
|
|
node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi, true)
|
2022-08-19 21:08:13 +00:00
|
|
|
if err != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, 0, err
|
2022-08-19 21:08:13 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// fake root node
|
|
|
|
node = &restic.Node{}
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("%v (%v nodes), parent %v", snPath, len(atree.Nodes), previous)
|
|
|
|
nodeNames := atree.NodeNames()
|
2024-08-27 09:26:52 +00:00
|
|
|
nodes := make([]futureNode, 0, len(nodeNames))
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2018-05-12 21:07:16 +00:00
|
|
|
// iterate over the nodes of atree in lexicographic (=deterministic) order
|
2021-09-26 15:18:42 +00:00
|
|
|
for _, name := range nodeNames {
|
2018-05-12 21:07:16 +00:00
|
|
|
subatree := atree.Nodes[name]
|
|
|
|
|
2018-05-08 20:28:37 +00:00
|
|
|
// test if context has been cancelled
|
|
|
|
if ctx.Err() != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, 0, ctx.Err()
|
2018-05-08 20:28:37 +00:00
|
|
|
}
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
// this is a leaf node
|
2021-01-29 10:10:28 +00:00
|
|
|
if subatree.Leaf() {
|
2024-02-23 19:22:14 +00:00
|
|
|
fn, excluded, err := arch.save(ctx, join(snPath, name), subatree.Path, previous.Find(name))
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2022-05-20 22:31:26 +00:00
|
|
|
err = arch.error(subatree.Path, err)
|
2018-03-30 20:43:18 +00:00
|
|
|
if err == nil {
|
|
|
|
// ignore error
|
|
|
|
continue
|
|
|
|
}
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, 0, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, 0, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if !excluded {
|
2022-08-19 21:08:13 +00:00
|
|
|
nodes = append(nodes, fn)
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
snItem := join(snPath, name) + "/"
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
oldNode := previous.Find(name)
|
2020-08-07 22:05:07 +00:00
|
|
|
oldSubtree, err := arch.loadSubtree(ctx, oldNode)
|
|
|
|
if err != nil {
|
2022-05-20 22:31:26 +00:00
|
|
|
err = arch.error(join(snPath, name), err)
|
2021-01-30 16:25:10 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, 0, err
|
2020-08-07 22:05:07 +00:00
|
|
|
}
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
// not a leaf node, archive subtree
|
2024-02-23 19:22:14 +00:00
|
|
|
fn, _, err := arch.saveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) {
|
2024-02-22 21:14:48 +00:00
|
|
|
arch.trackItem(snItem, oldNode, n, is, time.Since(start))
|
2022-08-19 21:08:13 +00:00
|
|
|
})
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
2024-08-27 09:26:52 +00:00
|
|
|
return futureNode{}, 0, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
2022-08-19 21:08:13 +00:00
|
|
|
nodes = append(nodes, fn)
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2022-08-19 21:08:13 +00:00
|
|
|
fn := arch.treeSaver.Save(ctx, snPath, atree.FileInfoPath, node, nodes, complete)
|
|
|
|
return fn, len(nodes), nil
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// resolveRelativeTargets replaces targets that only contain relative
|
|
|
|
// directories ("." or "../../") with the contents of the directory. Each
|
|
|
|
// element of target is processed with fs.Clean().
|
2020-02-17 08:22:32 +00:00
|
|
|
func resolveRelativeTargets(filesys fs.FS, targets []string) ([]string, error) {
|
2018-03-30 20:43:18 +00:00
|
|
|
debug.Log("targets before resolving: %v", targets)
|
|
|
|
result := make([]string, 0, len(targets))
|
|
|
|
for _, target := range targets {
|
2024-08-30 09:25:51 +00:00
|
|
|
if target != "" && filesys.VolumeName(target) == target {
|
|
|
|
// special case to allow users to also specify a volume name "C:" instead of a path "C:\"
|
|
|
|
target = target + filesys.Separator()
|
|
|
|
} else {
|
|
|
|
target = filesys.Clean(target)
|
|
|
|
}
|
2020-02-17 08:22:32 +00:00
|
|
|
pc, _ := pathComponents(filesys, target, false)
|
2018-03-30 20:43:18 +00:00
|
|
|
if len(pc) > 0 {
|
|
|
|
result = append(result, target)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("replacing %q with readdir(%q)", target, target)
|
2024-06-29 15:15:29 +00:00
|
|
|
entries, err := fs.Readdirnames(filesys, target, fs.O_NOFOLLOW)
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-17 08:22:32 +00:00
|
|
|
sort.Strings(entries)
|
2018-03-30 20:43:18 +00:00
|
|
|
|
|
|
|
for _, name := range entries {
|
2020-02-17 08:22:32 +00:00
|
|
|
result = append(result, filesys.Join(target, name))
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("targets after resolving: %v", result)
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// SnapshotOptions collect attributes for a new snapshot.
|
|
|
|
type SnapshotOptions struct {
|
2020-04-13 02:29:13 +00:00
|
|
|
Tags restic.TagList
|
2018-03-30 20:43:18 +00:00
|
|
|
Hostname string
|
|
|
|
Excludes []string
|
2024-02-22 21:25:14 +00:00
|
|
|
BackupStart time.Time
|
2018-03-30 20:43:18 +00:00
|
|
|
Time time.Time
|
2022-10-03 12:48:14 +00:00
|
|
|
ParentSnapshot *restic.Snapshot
|
2023-06-19 17:30:41 +00:00
|
|
|
ProgramVersion string
|
2024-05-22 14:38:00 +00:00
|
|
|
// SkipIfUnchanged omits the snapshot creation if it is identical to the parent snapshot.
|
|
|
|
SkipIfUnchanged bool
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// loadParentTree loads a tree referenced by snapshot id. If id is null, nil is returned.
|
2022-10-03 12:48:14 +00:00
|
|
|
func (arch *Archiver) loadParentTree(ctx context.Context, sn *restic.Snapshot) *restic.Tree {
|
|
|
|
if sn == nil {
|
2018-03-30 20:43:18 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if sn.Tree == nil {
|
2022-10-03 12:48:14 +00:00
|
|
|
debug.Log("snapshot %v has empty tree %v", *sn.ID())
|
2018-03-30 20:43:18 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
debug.Log("load parent tree %v", *sn.Tree)
|
2022-06-12 12:38:19 +00:00
|
|
|
tree, err := restic.LoadTree(ctx, arch.Repo, *sn.Tree)
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
|
|
|
debug.Log("unable to load tree %v: %v", *sn.Tree, err)
|
2022-05-20 22:31:26 +00:00
|
|
|
_ = arch.error("/", arch.wrapLoadTreeError(*sn.Tree, err))
|
2018-03-30 20:43:18 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return tree
|
|
|
|
}
|
|
|
|
|
|
|
|
// runWorkers starts the worker pools, which are stopped when the context is cancelled.
|
2022-05-27 17:08:50 +00:00
|
|
|
func (arch *Archiver) runWorkers(ctx context.Context, wg *errgroup.Group) {
|
2024-08-27 09:26:52 +00:00
|
|
|
arch.blobSaver = newBlobSaver(ctx, wg, arch.Repo, arch.Options.SaveBlobConcurrency)
|
2018-04-30 13:13:03 +00:00
|
|
|
|
2024-08-27 09:26:52 +00:00
|
|
|
arch.fileSaver = newFileSaver(ctx, wg,
|
2018-05-12 19:40:31 +00:00
|
|
|
arch.blobSaver.Save,
|
2018-04-29 11:20:12 +00:00
|
|
|
arch.Repo.Config().ChunkerPolynomial,
|
2022-09-24 09:57:16 +00:00
|
|
|
arch.Options.ReadConcurrency, arch.Options.SaveBlobConcurrency)
|
2018-03-30 20:43:18 +00:00
|
|
|
arch.fileSaver.CompleteBlob = arch.CompleteBlob
|
|
|
|
arch.fileSaver.NodeFromFileInfo = arch.nodeFromFileInfo
|
2018-04-30 13:13:03 +00:00
|
|
|
|
2024-08-27 09:26:52 +00:00
|
|
|
arch.treeSaver = newTreeSaver(ctx, wg, arch.Options.SaveTreeConcurrency, arch.blobSaver.Save, arch.Error)
|
2022-05-27 17:08:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (arch *Archiver) stopWorkers() {
|
|
|
|
arch.blobSaver.TriggerShutdown()
|
|
|
|
arch.fileSaver.TriggerShutdown()
|
|
|
|
arch.treeSaver.TriggerShutdown()
|
2022-06-05 13:48:10 +00:00
|
|
|
arch.blobSaver = nil
|
|
|
|
arch.fileSaver = nil
|
|
|
|
arch.treeSaver = nil
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Snapshot saves several targets and returns a snapshot.
|
2024-02-22 21:14:48 +00:00
|
|
|
func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, *Summary, error) {
|
2024-11-01 14:50:09 +00:00
|
|
|
arch.summary = &Summary{
|
|
|
|
BackupStart: opts.BackupStart,
|
|
|
|
}
|
2024-02-22 21:14:48 +00:00
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
cleanTargets, err := resolveRelativeTargets(arch.FS, targets)
|
|
|
|
if err != nil {
|
2024-02-22 21:14:48 +00:00
|
|
|
return nil, restic.ID{}, nil, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2024-08-27 09:26:52 +00:00
|
|
|
atree, err := newTree(arch.FS, cleanTargets)
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
2024-02-22 21:14:48 +00:00
|
|
|
return nil, restic.ID{}, nil, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2020-12-30 16:31:22 +00:00
|
|
|
var rootTreeID restic.ID
|
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
wgUp, wgUpCtx := errgroup.WithContext(ctx)
|
|
|
|
arch.Repo.StartPackUploader(wgUpCtx, wgUp)
|
2018-05-08 20:28:37 +00:00
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
wgUp.Go(func() error {
|
|
|
|
wg, wgCtx := errgroup.WithContext(wgUpCtx)
|
|
|
|
start := time.Now()
|
2018-05-20 13:58:55 +00:00
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
wg.Go(func() error {
|
|
|
|
arch.runWorkers(wgCtx, wg)
|
2018-05-08 20:28:37 +00:00
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
debug.Log("starting snapshot")
|
2024-02-23 19:22:14 +00:00
|
|
|
fn, nodeCount, err := arch.saveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) {
|
2024-02-22 21:14:48 +00:00
|
|
|
arch.trackItem("/", nil, nil, is, time.Since(start))
|
2022-08-19 21:08:13 +00:00
|
|
|
})
|
2021-08-07 20:52:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2022-08-19 21:08:13 +00:00
|
|
|
fnr := fn.take(wgCtx)
|
|
|
|
if fnr.err != nil {
|
|
|
|
return fnr.err
|
2021-08-07 20:52:05 +00:00
|
|
|
}
|
|
|
|
|
2022-08-19 21:08:13 +00:00
|
|
|
if wgCtx.Err() != nil {
|
|
|
|
return wgCtx.Err()
|
2022-05-21 11:33:08 +00:00
|
|
|
}
|
2022-08-19 21:08:13 +00:00
|
|
|
|
|
|
|
if nodeCount == 0 {
|
|
|
|
return errors.New("snapshot is empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
rootTreeID = *fnr.node.Subtree
|
2021-08-07 20:52:05 +00:00
|
|
|
arch.stopWorkers()
|
2022-08-19 21:08:13 +00:00
|
|
|
return nil
|
2021-08-07 20:52:05 +00:00
|
|
|
})
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
err = wg.Wait()
|
|
|
|
debug.Log("err is %v", err)
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
if err != nil {
|
|
|
|
debug.Log("error while saving tree: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return arch.Repo.Flush(ctx)
|
|
|
|
})
|
|
|
|
err = wgUp.Wait()
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
2024-02-22 21:14:48 +00:00
|
|
|
return nil, restic.ID{}, nil, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2024-05-22 14:38:00 +00:00
|
|
|
if opts.ParentSnapshot != nil && opts.SkipIfUnchanged {
|
|
|
|
ps := opts.ParentSnapshot
|
|
|
|
if ps.Tree != nil && rootTreeID.Equal(*ps.Tree) {
|
|
|
|
return nil, restic.ID{}, arch.summary, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-30 20:43:18 +00:00
|
|
|
sn, err := restic.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time)
|
2020-02-12 21:37:37 +00:00
|
|
|
if err != nil {
|
2024-02-22 21:14:48 +00:00
|
|
|
return nil, restic.ID{}, nil, err
|
2020-02-12 21:37:37 +00:00
|
|
|
}
|
|
|
|
|
2023-06-19 17:30:41 +00:00
|
|
|
sn.ProgramVersion = opts.ProgramVersion
|
2018-03-30 20:43:18 +00:00
|
|
|
sn.Excludes = opts.Excludes
|
2022-10-03 12:48:14 +00:00
|
|
|
if opts.ParentSnapshot != nil {
|
|
|
|
sn.Parent = opts.ParentSnapshot.ID()
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
sn.Tree = &rootTreeID
|
2024-11-01 14:50:09 +00:00
|
|
|
arch.summary.BackupEnd = time.Now()
|
2024-02-22 21:25:14 +00:00
|
|
|
sn.Summary = &restic.SnapshotSummary{
|
2024-11-01 14:50:09 +00:00
|
|
|
BackupStart: arch.summary.BackupStart,
|
|
|
|
BackupEnd: arch.summary.BackupEnd,
|
2024-02-22 21:25:14 +00:00
|
|
|
|
|
|
|
FilesNew: arch.summary.Files.New,
|
|
|
|
FilesChanged: arch.summary.Files.Changed,
|
|
|
|
FilesUnmodified: arch.summary.Files.Unchanged,
|
|
|
|
DirsNew: arch.summary.Dirs.New,
|
|
|
|
DirsChanged: arch.summary.Dirs.Changed,
|
|
|
|
DirsUnmodified: arch.summary.Dirs.Unchanged,
|
|
|
|
DataBlobs: arch.summary.ItemStats.DataBlobs,
|
|
|
|
TreeBlobs: arch.summary.ItemStats.TreeBlobs,
|
|
|
|
DataAdded: arch.summary.ItemStats.DataSize + arch.summary.ItemStats.TreeSize,
|
2024-02-25 19:40:52 +00:00
|
|
|
DataAddedPacked: arch.summary.ItemStats.DataSizeInRepo + arch.summary.ItemStats.TreeSizeInRepo,
|
2024-02-22 21:25:14 +00:00
|
|
|
TotalFilesProcessed: arch.summary.Files.New + arch.summary.Files.Changed + arch.summary.Files.Unchanged,
|
|
|
|
TotalBytesProcessed: arch.summary.ProcessedBytes,
|
|
|
|
}
|
2018-03-30 20:43:18 +00:00
|
|
|
|
2022-06-12 12:38:19 +00:00
|
|
|
id, err := restic.SaveSnapshot(ctx, arch.Repo, sn)
|
2018-03-30 20:43:18 +00:00
|
|
|
if err != nil {
|
2024-02-22 21:14:48 +00:00
|
|
|
return nil, restic.ID{}, nil, err
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|
|
|
|
|
2024-02-22 21:14:48 +00:00
|
|
|
return sn, id, arch.summary, nil
|
2018-03-30 20:43:18 +00:00
|
|
|
}
|