Merge pull request #5023 from MichaelEischer/cleanup-archiver

archiver: use FS interface nearly everywhere and cleanup exports
This commit is contained in:
Michael Eischer 2024-08-31 18:14:47 +02:00 committed by GitHub
commit 3b438e5c7c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
31 changed files with 933 additions and 883 deletions

View file

@ -20,10 +20,12 @@ import (
"github.com/restic/restic/internal/archiver" "github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/textfile" "github.com/restic/restic/internal/textfile"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/backup" "github.com/restic/restic/internal/ui/backup"
"github.com/restic/restic/internal/ui/termstatus" "github.com/restic/restic/internal/ui/termstatus"
) )
@ -66,7 +68,7 @@ Exit status is 12 if the password is incorrect.
// BackupOptions bundles all options for the backup command. // BackupOptions bundles all options for the backup command.
type BackupOptions struct { type BackupOptions struct {
excludePatternOptions filter.ExcludePatternOptions
Parent string Parent string
GroupBy restic.SnapshotGroupByOptions GroupBy restic.SnapshotGroupByOptions
@ -108,7 +110,7 @@ func init() {
f.VarP(&backupOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')") f.VarP(&backupOptions.GroupBy, "group-by", "g", "`group` snapshots by host, paths and/or tags, separated by comma (disable grouping with '')")
f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the source files/directories (overrides the "parent" flag)`) f.BoolVarP(&backupOptions.Force, "force", "f", false, `force re-reading the source files/directories (overrides the "parent" flag)`)
initExcludePatternOptions(f, &backupOptions.excludePatternOptions) backupOptions.ExcludePatternOptions.Add(f)
f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems, don't cross filesystem boundaries and subvolumes") f.BoolVarP(&backupOptions.ExcludeOtherFS, "one-file-system", "x", false, "exclude other file systems, don't cross filesystem boundaries and subvolumes")
f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes `filename[:header]`, exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)") f.StringArrayVar(&backupOptions.ExcludeIfPresent, "exclude-if-present", nil, "takes `filename[:header]`, exclude contents of directories containing filename (except filename itself) if header of that file is as provided (can be specified multiple times)")
@ -297,7 +299,7 @@ func (opts BackupOptions) Check(gopts GlobalOptions, args []string) error {
// collectRejectByNameFuncs returns a list of all functions which may reject data // collectRejectByNameFuncs returns a list of all functions which may reject data
// from being saved in a snapshot based on path only // from being saved in a snapshot based on path only
func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []RejectByNameFunc, err error) { func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (fs []archiver.RejectByNameFunc, err error) {
// exclude restic cache // exclude restic cache
if repo.Cache != nil { if repo.Cache != nil {
f, err := rejectResticCache(repo) f, err := rejectResticCache(repo)
@ -308,23 +310,12 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (
fs = append(fs, f) fs = append(fs, f)
} }
fsPatterns, err := opts.excludePatternOptions.CollectPatterns() fsPatterns, err := opts.ExcludePatternOptions.CollectPatterns(Warnf)
if err != nil { if err != nil {
return nil, err return nil, err
} }
fs = append(fs, fsPatterns...) for _, pat := range fsPatterns {
fs = append(fs, archiver.RejectByNameFunc(pat))
if opts.ExcludeCaches {
opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55")
}
for _, spec := range opts.ExcludeIfPresent {
f, err := rejectIfPresent(spec)
if err != nil {
return nil, err
}
fs = append(fs, f)
} }
return fs, nil return fs, nil
@ -332,25 +323,43 @@ func collectRejectByNameFuncs(opts BackupOptions, repo *repository.Repository) (
// collectRejectFuncs returns a list of all functions which may reject data // collectRejectFuncs returns a list of all functions which may reject data
// from being saved in a snapshot based on path and file info // from being saved in a snapshot based on path and file info
func collectRejectFuncs(opts BackupOptions, targets []string) (fs []RejectFunc, err error) { func collectRejectFuncs(opts BackupOptions, targets []string, fs fs.FS) (funcs []archiver.RejectFunc, err error) {
// allowed devices // allowed devices
if opts.ExcludeOtherFS && !opts.Stdin { if opts.ExcludeOtherFS && !opts.Stdin && !opts.StdinCommand {
f, err := rejectByDevice(targets) f, err := archiver.RejectByDevice(targets, fs)
if err != nil { if err != nil {
return nil, err return nil, err
} }
fs = append(fs, f) funcs = append(funcs, f)
} }
if len(opts.ExcludeLargerThan) != 0 && !opts.Stdin { if len(opts.ExcludeLargerThan) != 0 && !opts.Stdin && !opts.StdinCommand {
f, err := rejectBySize(opts.ExcludeLargerThan) maxSize, err := ui.ParseBytes(opts.ExcludeLargerThan)
if err != nil { if err != nil {
return nil, err return nil, err
} }
fs = append(fs, f)
f, err := archiver.RejectBySize(maxSize)
if err != nil {
return nil, err
}
funcs = append(funcs, f)
} }
return fs, nil if opts.ExcludeCaches {
opts.ExcludeIfPresent = append(opts.ExcludeIfPresent, "CACHEDIR.TAG:Signature: 8a477f597d28d172789f06886806bc55")
}
for _, spec := range opts.ExcludeIfPresent {
f, err := archiver.RejectIfPresent(spec, Warnf)
if err != nil {
return nil, err
}
funcs = append(funcs, f)
}
return funcs, nil
} }
// collectTargets returns a list of target files/dirs from several sources. // collectTargets returns a list of target files/dirs from several sources.
@ -505,12 +514,6 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
return err return err
} }
// rejectFuncs collect functions that can reject items from the backup based on path and file info
rejectFuncs, err := collectRejectFuncs(opts, targets)
if err != nil {
return err
}
var parentSnapshot *restic.Snapshot var parentSnapshot *restic.Snapshot
if !opts.Stdin { if !opts.Stdin {
parentSnapshot, err = findParentSnapshot(ctx, repo, opts, targets, timeStamp) parentSnapshot, err = findParentSnapshot(ctx, repo, opts, targets, timeStamp)
@ -532,30 +535,11 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
} }
bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term)
err = repo.LoadIndex(ctx, bar) err = repo.LoadIndex(ctx, bar)
if err != nil { if err != nil {
return err return err
} }
selectByNameFilter := func(item string) bool {
for _, reject := range rejectByNameFuncs {
if reject(item) {
return false
}
}
return true
}
selectFilter := func(item string, fi os.FileInfo) bool {
for _, reject := range rejectFuncs {
if reject(item, fi) {
return false
}
}
return true
}
var targetFS fs.FS = fs.Local{} var targetFS fs.FS = fs.Local{}
if runtime.GOOS == "windows" && opts.UseFsSnapshot { if runtime.GOOS == "windows" && opts.UseFsSnapshot {
if err = fs.HasSufficientPrivilegesForVSS(); err != nil { if err = fs.HasSufficientPrivilegesForVSS(); err != nil {
@ -598,6 +582,15 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
targets = []string{filename} targets = []string{filename}
} }
// rejectFuncs collect functions that can reject items from the backup based on path and file info
rejectFuncs, err := collectRejectFuncs(opts, targets, targetFS)
if err != nil {
return err
}
selectByNameFilter := archiver.CombineRejectByNames(rejectByNameFuncs)
selectFilter := archiver.CombineRejects(rejectFuncs)
wg, wgCtx := errgroup.WithContext(ctx) wg, wgCtx := errgroup.WithContext(ctx)
cancelCtx, cancel := context.WithCancel(wgCtx) cancelCtx, cancel := context.WithCancel(wgCtx)
defer cancel() defer cancel()

View file

@ -7,6 +7,7 @@ import (
"github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/restorer" "github.com/restic/restic/internal/restorer"
"github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui"
@ -49,8 +50,8 @@ Exit status is 12 if the password is incorrect.
// RestoreOptions collects all options for the restore command. // RestoreOptions collects all options for the restore command.
type RestoreOptions struct { type RestoreOptions struct {
excludePatternOptions filter.ExcludePatternOptions
includePatternOptions filter.IncludePatternOptions
Target string Target string
restic.SnapshotFilter restic.SnapshotFilter
DryRun bool DryRun bool
@ -68,8 +69,8 @@ func init() {
flags := cmdRestore.Flags() flags := cmdRestore.Flags()
flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to") flags.StringVarP(&restoreOptions.Target, "target", "t", "", "directory to extract data to")
initExcludePatternOptions(flags, &restoreOptions.excludePatternOptions) restoreOptions.ExcludePatternOptions.Add(flags)
initIncludePatternOptions(flags, &restoreOptions.includePatternOptions) restoreOptions.IncludePatternOptions.Add(flags)
initSingleSnapshotFilter(flags, &restoreOptions.SnapshotFilter) initSingleSnapshotFilter(flags, &restoreOptions.SnapshotFilter)
flags.BoolVar(&restoreOptions.DryRun, "dry-run", false, "do not write any data, just show what would be done") flags.BoolVar(&restoreOptions.DryRun, "dry-run", false, "do not write any data, just show what would be done")
@ -82,12 +83,12 @@ func init() {
func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
term *termstatus.Terminal, args []string) error { term *termstatus.Terminal, args []string) error {
excludePatternFns, err := opts.excludePatternOptions.CollectPatterns() excludePatternFns, err := opts.ExcludePatternOptions.CollectPatterns(Warnf)
if err != nil { if err != nil {
return err return err
} }
includePatternFns, err := opts.includePatternOptions.CollectPatterns() includePatternFns, err := opts.IncludePatternOptions.CollectPatterns(Warnf)
if err != nil { if err != nil {
return err return err
} }

View file

@ -10,6 +10,7 @@ import (
"github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/walker" "github.com/restic/restic/internal/walker"
@ -88,7 +89,7 @@ type RewriteOptions struct {
Metadata snapshotMetadataArgs Metadata snapshotMetadataArgs
restic.SnapshotFilter restic.SnapshotFilter
excludePatternOptions filter.ExcludePatternOptions
} }
var rewriteOptions RewriteOptions var rewriteOptions RewriteOptions
@ -103,7 +104,7 @@ func init() {
f.StringVar(&rewriteOptions.Metadata.Time, "new-time", "", "replace time of the backup") f.StringVar(&rewriteOptions.Metadata.Time, "new-time", "", "replace time of the backup")
initMultiSnapshotFilter(f, &rewriteOptions.SnapshotFilter, true) initMultiSnapshotFilter(f, &rewriteOptions.SnapshotFilter, true)
initExcludePatternOptions(f, &rewriteOptions.excludePatternOptions) rewriteOptions.ExcludePatternOptions.Add(f)
} }
type rewriteFilterFunc func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error) type rewriteFilterFunc func(ctx context.Context, sn *restic.Snapshot) (restic.ID, error)
@ -113,7 +114,7 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti
return false, errors.Errorf("snapshot %v has nil tree", sn.ID().Str()) return false, errors.Errorf("snapshot %v has nil tree", sn.ID().Str())
} }
rejectByNameFuncs, err := opts.excludePatternOptions.CollectPatterns() rejectByNameFuncs, err := opts.ExcludePatternOptions.CollectPatterns(Warnf)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -263,7 +264,7 @@ func filterAndReplaceSnapshot(ctx context.Context, repo restic.Repository, sn *r
} }
func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, args []string) error { func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, args []string) error {
if opts.excludePatternOptions.Empty() && opts.Metadata.empty() { if opts.ExcludePatternOptions.Empty() && opts.Metadata.empty() {
return errors.Fatal("Nothing to do: no excludes provided and no new metadata provided") return errors.Fatal("Nothing to do: no excludes provided and no new metadata provided")
} }

View file

@ -5,6 +5,7 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui"
@ -12,7 +13,7 @@ import (
func testRunRewriteExclude(t testing.TB, gopts GlobalOptions, excludes []string, forget bool, metadata snapshotMetadataArgs) { func testRunRewriteExclude(t testing.TB, gopts GlobalOptions, excludes []string, forget bool, metadata snapshotMetadataArgs) {
opts := RewriteOptions{ opts := RewriteOptions{
excludePatternOptions: excludePatternOptions{ ExcludePatternOptions: filter.ExcludePatternOptions{
Excludes: excludes, Excludes: excludes,
}, },
Forget: forget, Forget: forget,

View file

@ -1,347 +1,16 @@
package main package main
import ( import (
"bufio" "github.com/restic/restic/internal/archiver"
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/textfile"
"github.com/restic/restic/internal/ui"
"github.com/spf13/pflag"
) )
type rejectionCache struct {
m map[string]bool
mtx sync.Mutex
}
// Lock locks the mutex in rc.
func (rc *rejectionCache) Lock() {
if rc != nil {
rc.mtx.Lock()
}
}
// Unlock unlocks the mutex in rc.
func (rc *rejectionCache) Unlock() {
if rc != nil {
rc.mtx.Unlock()
}
}
// Get returns the last stored value for dir and a second boolean that
// indicates whether that value was actually written to the cache. It is the
// callers responsibility to call rc.Lock and rc.Unlock before using this
// method, otherwise data races may occur.
func (rc *rejectionCache) Get(dir string) (bool, bool) {
if rc == nil || rc.m == nil {
return false, false
}
v, ok := rc.m[dir]
return v, ok
}
// Store stores a new value for dir. It is the callers responsibility to call
// rc.Lock and rc.Unlock before using this method, otherwise data races may
// occur.
func (rc *rejectionCache) Store(dir string, rejected bool) {
if rc == nil {
return
}
if rc.m == nil {
rc.m = make(map[string]bool)
}
rc.m[dir] = rejected
}
// RejectByNameFunc is a function that takes a filename of a
// file that would be included in the backup. The function returns true if it
// should be excluded (rejected) from the backup.
type RejectByNameFunc func(path string) bool
// RejectFunc is a function that takes a filename and os.FileInfo of a
// file that would be included in the backup. The function returns true if it
// should be excluded (rejected) from the backup.
type RejectFunc func(path string, fi os.FileInfo) bool
// rejectByPattern returns a RejectByNameFunc which rejects files that match
// one of the patterns.
func rejectByPattern(patterns []string) RejectByNameFunc {
parsedPatterns := filter.ParsePatterns(patterns)
return func(item string) bool {
matched, err := filter.List(parsedPatterns, item)
if err != nil {
Warnf("error for exclude pattern: %v", err)
}
if matched {
debug.Log("path %q excluded by an exclude pattern", item)
return true
}
return false
}
}
// Same as `rejectByPattern` but case insensitive.
func rejectByInsensitivePattern(patterns []string) RejectByNameFunc {
for index, path := range patterns {
patterns[index] = strings.ToLower(path)
}
rejFunc := rejectByPattern(patterns)
return func(item string) bool {
return rejFunc(strings.ToLower(item))
}
}
// rejectIfPresent returns a RejectByNameFunc which itself returns whether a path
// should be excluded. The RejectByNameFunc considers a file to be excluded when
// it resides in a directory with an exclusion file, that is specified by
// excludeFileSpec in the form "filename[:content]". The returned error is
// non-nil if the filename component of excludeFileSpec is empty. If rc is
// non-nil, it is going to be used in the RejectByNameFunc to expedite the evaluation
// of a directory based on previous visits.
func rejectIfPresent(excludeFileSpec string) (RejectByNameFunc, error) {
if excludeFileSpec == "" {
return nil, errors.New("name for exclusion tagfile is empty")
}
colon := strings.Index(excludeFileSpec, ":")
if colon == 0 {
return nil, fmt.Errorf("no name for exclusion tagfile provided")
}
tf, tc := "", ""
if colon > 0 {
tf = excludeFileSpec[:colon]
tc = excludeFileSpec[colon+1:]
} else {
tf = excludeFileSpec
}
debug.Log("using %q as exclusion tagfile", tf)
rc := &rejectionCache{}
fn := func(filename string) bool {
return isExcludedByFile(filename, tf, tc, rc)
}
return fn, nil
}
// isExcludedByFile interprets filename as a path and returns true if that file
// is in an excluded directory. A directory is identified as excluded if it contains a
// tagfile which bears the name specified in tagFilename and starts with
// header. If rc is non-nil, it is used to expedite the evaluation of a
// directory based on previous visits.
func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache) bool {
if tagFilename == "" {
return false
}
dir, base := filepath.Split(filename)
if base == tagFilename {
return false // do not exclude the tagfile itself
}
rc.Lock()
defer rc.Unlock()
rejected, visited := rc.Get(dir)
if visited {
return rejected
}
rejected = isDirExcludedByFile(dir, tagFilename, header)
rc.Store(dir, rejected)
return rejected
}
func isDirExcludedByFile(dir, tagFilename, header string) bool {
tf := filepath.Join(dir, tagFilename)
_, err := fs.Lstat(tf)
if os.IsNotExist(err) {
return false
}
if err != nil {
Warnf("could not access exclusion tagfile: %v", err)
return false
}
// when no signature is given, the mere presence of tf is enough reason
// to exclude filename
if len(header) == 0 {
return true
}
// From this stage, errors mean tagFilename exists but it is malformed.
// Warnings will be generated so that the user is informed that the
// indented ignore-action is not performed.
f, err := os.Open(tf)
if err != nil {
Warnf("could not open exclusion tagfile: %v", err)
return false
}
defer func() {
_ = f.Close()
}()
buf := make([]byte, len(header))
_, err = io.ReadFull(f, buf)
// EOF is handled with a dedicated message, otherwise the warning were too cryptic
if err == io.EOF {
Warnf("invalid (too short) signature in exclusion tagfile %q\n", tf)
return false
}
if err != nil {
Warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err)
return false
}
if !bytes.Equal(buf, []byte(header)) {
Warnf("invalid signature in exclusion tagfile %q\n", tf)
return false
}
return true
}
// DeviceMap is used to track allowed source devices for backup. This is used to
// check for crossing mount points during backup (for --one-file-system). It
// maps the name of a source path to its device ID.
type DeviceMap map[string]uint64
// NewDeviceMap creates a new device map from the list of source paths.
func NewDeviceMap(allowedSourcePaths []string) (DeviceMap, error) {
deviceMap := make(map[string]uint64)
for _, item := range allowedSourcePaths {
item, err := filepath.Abs(filepath.Clean(item))
if err != nil {
return nil, err
}
fi, err := fs.Lstat(item)
if err != nil {
return nil, err
}
id, err := fs.DeviceID(fi)
if err != nil {
return nil, err
}
deviceMap[item] = id
}
if len(deviceMap) == 0 {
return nil, errors.New("zero allowed devices")
}
return deviceMap, nil
}
// IsAllowed returns true if the path is located on an allowed device.
func (m DeviceMap) IsAllowed(item string, deviceID uint64) (bool, error) {
for dir := item; ; dir = filepath.Dir(dir) {
debug.Log("item %v, test dir %v", item, dir)
// find a parent directory that is on an allowed device (otherwise
// we would not traverse the directory at all)
allowedID, ok := m[dir]
if !ok {
if dir == filepath.Dir(dir) {
// arrived at root, no allowed device found. this should not happen.
break
}
continue
}
// if the item has a different device ID than the parent directory,
// we crossed a file system boundary
if allowedID != deviceID {
debug.Log("item %v (dir %v) on disallowed device %d", item, dir, deviceID)
return false, nil
}
// item is on allowed device, accept it
debug.Log("item %v allowed", item)
return true, nil
}
return false, fmt.Errorf("item %v (device ID %v) not found, deviceMap: %v", item, deviceID, m)
}
// rejectByDevice returns a RejectFunc that rejects files which are on a
// different file systems than the files/dirs in samples.
func rejectByDevice(samples []string) (RejectFunc, error) {
deviceMap, err := NewDeviceMap(samples)
if err != nil {
return nil, err
}
debug.Log("allowed devices: %v\n", deviceMap)
return func(item string, fi os.FileInfo) bool {
id, err := fs.DeviceID(fi)
if err != nil {
// This should never happen because gatherDevices() would have
// errored out earlier. If it still does that's a reason to panic.
panic(err)
}
allowed, err := deviceMap.IsAllowed(filepath.Clean(item), id)
if err != nil {
// this should not happen
panic(fmt.Sprintf("error checking device ID of %v: %v", item, err))
}
if allowed {
// accept item
return false
}
// reject everything except directories
if !fi.IsDir() {
return true
}
// special case: make sure we keep mountpoints (directories which
// contain a mounted file system). Test this by checking if the parent
// directory would be included.
parentDir := filepath.Dir(filepath.Clean(item))
parentFI, err := fs.Lstat(parentDir)
if err != nil {
debug.Log("item %v: error running lstat() on parent directory: %v", item, err)
// if in doubt, reject
return true
}
parentDeviceID, err := fs.DeviceID(parentFI)
if err != nil {
debug.Log("item %v: getting device ID of parent directory: %v", item, err)
// if in doubt, reject
return true
}
parentAllowed, err := deviceMap.IsAllowed(parentDir, parentDeviceID)
if err != nil {
debug.Log("item %v: error checking parent directory: %v", item, err)
// if in doubt, reject
return true
}
if parentAllowed {
// we found a mount point, so accept the directory
return false
}
// reject everything else
return true
}, nil
}
// rejectResticCache returns a RejectByNameFunc that rejects the restic cache // rejectResticCache returns a RejectByNameFunc that rejects the restic cache
// directory (if set). // directory (if set).
func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) { func rejectResticCache(repo *repository.Repository) (archiver.RejectByNameFunc, error) {
if repo.Cache == nil { if repo.Cache == nil {
return func(string) bool { return func(string) bool {
return false return false
@ -362,137 +31,3 @@ func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) {
return false return false
}, nil }, nil
} }
func rejectBySize(maxSizeStr string) (RejectFunc, error) {
maxSize, err := ui.ParseBytes(maxSizeStr)
if err != nil {
return nil, err
}
return func(item string, fi os.FileInfo) bool {
// directory will be ignored
if fi.IsDir() {
return false
}
filesize := fi.Size()
if filesize > maxSize {
debug.Log("file %s is oversize: %d", item, filesize)
return true
}
return false
}, nil
}
// readPatternsFromFiles reads all files and returns the list of
// patterns. For each line, leading and trailing white space is removed
// and comment lines are ignored. For each remaining pattern, environment
// variables are resolved. For adding a literal dollar sign ($), write $$ to
// the file.
func readPatternsFromFiles(files []string) ([]string, error) {
getenvOrDollar := func(s string) string {
if s == "$" {
return "$"
}
return os.Getenv(s)
}
var patterns []string
for _, filename := range files {
err := func() (err error) {
data, err := textfile.Read(filename)
if err != nil {
return err
}
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
// ignore empty lines
if line == "" {
continue
}
// strip comments
if strings.HasPrefix(line, "#") {
continue
}
line = os.Expand(line, getenvOrDollar)
patterns = append(patterns, line)
}
return scanner.Err()
}()
if err != nil {
return nil, fmt.Errorf("failed to read patterns from file %q: %w", filename, err)
}
}
return patterns, nil
}
type excludePatternOptions struct {
Excludes []string
InsensitiveExcludes []string
ExcludeFiles []string
InsensitiveExcludeFiles []string
}
func initExcludePatternOptions(f *pflag.FlagSet, opts *excludePatternOptions) {
f.StringArrayVarP(&opts.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
f.StringArrayVar(&opts.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames")
f.StringArrayVar(&opts.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)")
f.StringArrayVar(&opts.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns")
}
func (opts *excludePatternOptions) Empty() bool {
return len(opts.Excludes) == 0 && len(opts.InsensitiveExcludes) == 0 && len(opts.ExcludeFiles) == 0 && len(opts.InsensitiveExcludeFiles) == 0
}
func (opts excludePatternOptions) CollectPatterns() ([]RejectByNameFunc, error) {
var fs []RejectByNameFunc
// add patterns from file
if len(opts.ExcludeFiles) > 0 {
excludePatterns, err := readPatternsFromFiles(opts.ExcludeFiles)
if err != nil {
return nil, err
}
if err := filter.ValidatePatterns(excludePatterns); err != nil {
return nil, errors.Fatalf("--exclude-file: %s", err)
}
opts.Excludes = append(opts.Excludes, excludePatterns...)
}
if len(opts.InsensitiveExcludeFiles) > 0 {
excludes, err := readPatternsFromFiles(opts.InsensitiveExcludeFiles)
if err != nil {
return nil, err
}
if err := filter.ValidatePatterns(excludes); err != nil {
return nil, errors.Fatalf("--iexclude-file: %s", err)
}
opts.InsensitiveExcludes = append(opts.InsensitiveExcludes, excludes...)
}
if len(opts.InsensitiveExcludes) > 0 {
if err := filter.ValidatePatterns(opts.InsensitiveExcludes); err != nil {
return nil, errors.Fatalf("--iexclude: %s", err)
}
fs = append(fs, rejectByInsensitivePattern(opts.InsensitiveExcludes))
}
if len(opts.Excludes) > 0 {
if err := filter.ValidatePatterns(opts.Excludes); err != nil {
return nil, errors.Fatalf("--exclude: %s", err)
}
fs = append(fs, rejectByPattern(opts.Excludes))
}
return fs, nil
}

View file

@ -5,6 +5,7 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/restic/restic/internal/filter"
rtest "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test"
) )
@ -17,14 +18,14 @@ func TestBackupFailsWhenUsingInvalidPatterns(t *testing.T) {
var err error var err error
// Test --exclude // Test --exclude
err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{excludePatternOptions: excludePatternOptions{Excludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{ExcludePatternOptions: filter.ExcludePatternOptions{Excludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts)
rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided: rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided:
*[._]log[.-][0-9] *[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error()) !*[._]log[.-][0-9]`, err.Error())
// Test --iexclude // Test --iexclude
err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{ExcludePatternOptions: filter.ExcludePatternOptions{InsensitiveExcludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts)
rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided: rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided:
*[._]log[.-][0-9] *[._]log[.-][0-9]
@ -47,14 +48,14 @@ func TestBackupFailsWhenUsingInvalidPatternsFromFile(t *testing.T) {
var err error var err error
// Test --exclude-file: // Test --exclude-file:
err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{excludePatternOptions: excludePatternOptions{ExcludeFiles: []string{excludeFile}}}, env.gopts) err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{ExcludePatternOptions: filter.ExcludePatternOptions{ExcludeFiles: []string{excludeFile}}}, env.gopts)
rtest.Equals(t, `Fatal: --exclude-file: invalid pattern(s) provided: rtest.Equals(t, `Fatal: --exclude-file: invalid pattern(s) provided:
*[._]log[.-][0-9] *[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error()) !*[._]log[.-][0-9]`, err.Error())
// Test --iexclude-file // Test --iexclude-file
err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludeFiles: []string{excludeFile}}}, env.gopts) err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{"testdata"}, BackupOptions{ExcludePatternOptions: filter.ExcludePatternOptions{InsensitiveExcludeFiles: []string{excludeFile}}}, env.gopts)
rtest.Equals(t, `Fatal: --iexclude-file: invalid pattern(s) provided: rtest.Equals(t, `Fatal: --iexclude-file: invalid pattern(s) provided:
*[._]log[.-][0-9] *[._]log[.-][0-9]
@ -70,28 +71,28 @@ func TestRestoreFailsWhenUsingInvalidPatterns(t *testing.T) {
var err error var err error
// Test --exclude // Test --exclude
err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{Excludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) err = testRunRestoreAssumeFailure("latest", RestoreOptions{ExcludePatternOptions: filter.ExcludePatternOptions{Excludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts)
rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided: rtest.Equals(t, `Fatal: --exclude: invalid pattern(s) provided:
*[._]log[.-][0-9] *[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error()) !*[._]log[.-][0-9]`, err.Error())
// Test --iexclude // Test --iexclude
err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) err = testRunRestoreAssumeFailure("latest", RestoreOptions{ExcludePatternOptions: filter.ExcludePatternOptions{InsensitiveExcludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts)
rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided: rtest.Equals(t, `Fatal: --iexclude: invalid pattern(s) provided:
*[._]log[.-][0-9] *[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error()) !*[._]log[.-][0-9]`, err.Error())
// Test --include // Test --include
err = testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{Includes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) err = testRunRestoreAssumeFailure("latest", RestoreOptions{IncludePatternOptions: filter.IncludePatternOptions{Includes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts)
rtest.Equals(t, `Fatal: --include: invalid pattern(s) provided: rtest.Equals(t, `Fatal: --include: invalid pattern(s) provided:
*[._]log[.-][0-9] *[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error()) !*[._]log[.-][0-9]`, err.Error())
// Test --iinclude // Test --iinclude
err = testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{InsensitiveIncludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts) err = testRunRestoreAssumeFailure("latest", RestoreOptions{IncludePatternOptions: filter.IncludePatternOptions{InsensitiveIncludes: []string{"*[._]log[.-][0-9]", "!*[._]log[.-][0-9]"}}}, env.gopts)
rtest.Equals(t, `Fatal: --iinclude: invalid pattern(s) provided: rtest.Equals(t, `Fatal: --iinclude: invalid pattern(s) provided:
*[._]log[.-][0-9] *[._]log[.-][0-9]
@ -111,22 +112,22 @@ func TestRestoreFailsWhenUsingInvalidPatternsFromFile(t *testing.T) {
t.Fatalf("Could not write include file: %v", fileErr) t.Fatalf("Could not write include file: %v", fileErr)
} }
err := testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{IncludeFiles: []string{patternsFile}}}, env.gopts) err := testRunRestoreAssumeFailure("latest", RestoreOptions{IncludePatternOptions: filter.IncludePatternOptions{IncludeFiles: []string{patternsFile}}}, env.gopts)
rtest.Equals(t, `Fatal: --include-file: invalid pattern(s) provided: rtest.Equals(t, `Fatal: --include-file: invalid pattern(s) provided:
*[._]log[.-][0-9] *[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error()) !*[._]log[.-][0-9]`, err.Error())
err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{ExcludeFiles: []string{patternsFile}}}, env.gopts) err = testRunRestoreAssumeFailure("latest", RestoreOptions{ExcludePatternOptions: filter.ExcludePatternOptions{ExcludeFiles: []string{patternsFile}}}, env.gopts)
rtest.Equals(t, `Fatal: --exclude-file: invalid pattern(s) provided: rtest.Equals(t, `Fatal: --exclude-file: invalid pattern(s) provided:
*[._]log[.-][0-9] *[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error()) !*[._]log[.-][0-9]`, err.Error())
err = testRunRestoreAssumeFailure("latest", RestoreOptions{includePatternOptions: includePatternOptions{InsensitiveIncludeFiles: []string{patternsFile}}}, env.gopts) err = testRunRestoreAssumeFailure("latest", RestoreOptions{IncludePatternOptions: filter.IncludePatternOptions{InsensitiveIncludeFiles: []string{patternsFile}}}, env.gopts)
rtest.Equals(t, `Fatal: --iinclude-file: invalid pattern(s) provided: rtest.Equals(t, `Fatal: --iinclude-file: invalid pattern(s) provided:
*[._]log[.-][0-9] *[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error()) !*[._]log[.-][0-9]`, err.Error())
err = testRunRestoreAssumeFailure("latest", RestoreOptions{excludePatternOptions: excludePatternOptions{InsensitiveExcludeFiles: []string{patternsFile}}}, env.gopts) err = testRunRestoreAssumeFailure("latest", RestoreOptions{ExcludePatternOptions: filter.ExcludePatternOptions{InsensitiveExcludeFiles: []string{patternsFile}}}, env.gopts)
rtest.Equals(t, `Fatal: --iexclude-file: invalid pattern(s) provided: rtest.Equals(t, `Fatal: --iexclude-file: invalid pattern(s) provided:
*[._]log[.-][0-9] *[._]log[.-][0-9]
!*[._]log[.-][0-9]`, err.Error()) !*[._]log[.-][0-9]`, err.Error())

View file

@ -25,7 +25,7 @@ type SelectByNameFunc func(item string) bool
// SelectFunc returns true for all items that should be included (files and // SelectFunc returns true for all items that should be included (files and
// dirs). If false is returned, files are ignored and dirs are not even walked. // dirs). If false is returned, files are ignored and dirs are not even walked.
type SelectFunc func(item string, fi os.FileInfo) bool type SelectFunc func(item string, fi os.FileInfo, fs fs.FS) bool
// ErrorFunc is called when an error during archiving occurs. When nil is // ErrorFunc is called when an error during archiving occurs. When nil is
// returned, the archiver continues, otherwise it aborts and passes the error // returned, the archiver continues, otherwise it aborts and passes the error
@ -75,6 +75,14 @@ type archiverRepo interface {
} }
// Archiver saves a directory structure to the repo. // Archiver saves a directory structure to the repo.
//
// An Archiver has a number of worker goroutines handling saving the different
// data structures to the repository, the details are implemented by the
// fileSaver, blobSaver, and treeSaver types.
//
// The main goroutine (the one calling Snapshot()) traverses the directory tree
// and delegates all work to these worker pools. They return a futureNode which
// can be resolved later, by calling Wait() on it.
type Archiver struct { type Archiver struct {
Repo archiverRepo Repo archiverRepo
SelectByName SelectByNameFunc SelectByName SelectByNameFunc
@ -82,9 +90,9 @@ type Archiver struct {
FS fs.FS FS fs.FS
Options Options Options Options
blobSaver *BlobSaver blobSaver *blobSaver
fileSaver *FileSaver fileSaver *fileSaver
treeSaver *TreeSaver treeSaver *treeSaver
mu sync.Mutex mu sync.Mutex
summary *Summary summary *Summary
@ -160,7 +168,7 @@ func (o Options) ApplyDefaults() Options {
if o.SaveTreeConcurrency == 0 { if o.SaveTreeConcurrency == 0 {
// can either wait for a file, wait for a tree, serialize a tree or wait for saveblob // can either wait for a file, wait for a tree, serialize a tree or wait for saveblob
// the last two are cpu-bound and thus mutually exclusive. // the last two are cpu-bound and thus mutually exclusive.
// Also allow waiting for FileReadConcurrency files, this is the maximum of FutureFiles // Also allow waiting for FileReadConcurrency files, this is the maximum of files
// which currently can be in progress. The main backup loop blocks when trying to queue // which currently can be in progress. The main backup loop blocks when trying to queue
// more files to read. // more files to read.
o.SaveTreeConcurrency = uint(runtime.GOMAXPROCS(0)) + o.ReadConcurrency o.SaveTreeConcurrency = uint(runtime.GOMAXPROCS(0)) + o.ReadConcurrency
@ -170,12 +178,12 @@ func (o Options) ApplyDefaults() Options {
} }
// New initializes a new archiver. // New initializes a new archiver.
func New(repo archiverRepo, fs fs.FS, opts Options) *Archiver { func New(repo archiverRepo, filesystem fs.FS, opts Options) *Archiver {
arch := &Archiver{ arch := &Archiver{
Repo: repo, Repo: repo,
SelectByName: func(_ string) bool { return true }, SelectByName: func(_ string) bool { return true },
Select: func(_ string, _ os.FileInfo) bool { return true }, Select: func(_ string, _ os.FileInfo, _ fs.FS) bool { return true },
FS: fs, FS: filesystem,
Options: opts.ApplyDefaults(), Options: opts.ApplyDefaults(),
CompleteItem: func(string, *restic.Node, *restic.Node, ItemStats, time.Duration) {}, CompleteItem: func(string, *restic.Node, *restic.Node, ItemStats, time.Duration) {},
@ -297,27 +305,27 @@ func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error {
// saveDir stores a directory in the repo and returns the node. snPath is the // saveDir stores a directory in the repo and returns the node. snPath is the
// path within the current snapshot. // path within the current snapshot.
func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) { func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete fileCompleteFunc) (d futureNode, err error) {
debug.Log("%v %v", snPath, dir) debug.Log("%v %v", snPath, dir)
treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi, false) treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi, false)
if err != nil { if err != nil {
return FutureNode{}, err return futureNode{}, err
} }
names, err := fs.Readdirnames(arch.FS, dir, fs.O_NOFOLLOW) names, err := fs.Readdirnames(arch.FS, dir, fs.O_NOFOLLOW)
if err != nil { if err != nil {
return FutureNode{}, err return futureNode{}, err
} }
sort.Strings(names) sort.Strings(names)
nodes := make([]FutureNode, 0, len(names)) nodes := make([]futureNode, 0, len(names))
for _, name := range names { for _, name := range names {
// test if context has been cancelled // test if context has been cancelled
if ctx.Err() != nil { if ctx.Err() != nil {
debug.Log("context has been cancelled, aborting") debug.Log("context has been cancelled, aborting")
return FutureNode{}, ctx.Err() return futureNode{}, ctx.Err()
} }
pathname := arch.FS.Join(dir, name) pathname := arch.FS.Join(dir, name)
@ -333,7 +341,7 @@ func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi
continue continue
} }
return FutureNode{}, err return futureNode{}, err
} }
if excluded { if excluded {
@ -348,11 +356,11 @@ func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi
return fn, nil return fn, nil
} }
// FutureNode holds a reference to a channel that returns a FutureNodeResult // futureNode holds a reference to a channel that returns a FutureNodeResult
// or a reference to an already existing result. If the result is available // or a reference to an already existing result. If the result is available
// immediately, then storing a reference directly requires less memory than // immediately, then storing a reference directly requires less memory than
// using the indirection via a channel. // using the indirection via a channel.
type FutureNode struct { type futureNode struct {
ch <-chan futureNodeResult ch <-chan futureNodeResult
res *futureNodeResult res *futureNodeResult
} }
@ -365,18 +373,18 @@ type futureNodeResult struct {
err error err error
} }
func newFutureNode() (FutureNode, chan<- futureNodeResult) { func newFutureNode() (futureNode, chan<- futureNodeResult) {
ch := make(chan futureNodeResult, 1) ch := make(chan futureNodeResult, 1)
return FutureNode{ch: ch}, ch return futureNode{ch: ch}, ch
} }
func newFutureNodeWithResult(res futureNodeResult) FutureNode { func newFutureNodeWithResult(res futureNodeResult) futureNode {
return FutureNode{ return futureNode{
res: &res, res: &res,
} }
} }
func (fn *FutureNode) take(ctx context.Context) futureNodeResult { func (fn *futureNode) take(ctx context.Context) futureNodeResult {
if fn.res != nil { if fn.res != nil {
res := fn.res res := fn.res
// free result // free result
@ -415,19 +423,19 @@ func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool {
// Errors and completion needs to be handled by the caller. // Errors and completion needs to be handled by the caller.
// //
// snPath is the path within the current snapshot. // snPath is the path within the current snapshot.
func (arch *Archiver) save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) { func (arch *Archiver) save(ctx context.Context, snPath, target string, previous *restic.Node) (fn futureNode, excluded bool, err error) {
start := time.Now() start := time.Now()
debug.Log("%v target %q, previous %v", snPath, target, previous) debug.Log("%v target %q, previous %v", snPath, target, previous)
abstarget, err := arch.FS.Abs(target) abstarget, err := arch.FS.Abs(target)
if err != nil { if err != nil {
return FutureNode{}, false, err return futureNode{}, false, err
} }
// exclude files by path before running Lstat to reduce number of lstat calls // exclude files by path before running Lstat to reduce number of lstat calls
if !arch.SelectByName(abstarget) { if !arch.SelectByName(abstarget) {
debug.Log("%v is excluded by path", target) debug.Log("%v is excluded by path", target)
return FutureNode{}, true, nil return futureNode{}, true, nil
} }
// get file info and run remaining select functions that require file information // get file info and run remaining select functions that require file information
@ -436,13 +444,13 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous
debug.Log("lstat() for %v returned error: %v", target, err) debug.Log("lstat() for %v returned error: %v", target, err)
err = arch.error(abstarget, err) err = arch.error(abstarget, err)
if err != nil { if err != nil {
return FutureNode{}, false, errors.WithStack(err) return futureNode{}, false, errors.WithStack(err)
} }
return FutureNode{}, true, nil return futureNode{}, true, nil
} }
if !arch.Select(abstarget, fi) { if !arch.Select(abstarget, fi, arch.FS) {
debug.Log("%v is excluded", target) debug.Log("%v is excluded", target)
return FutureNode{}, true, nil return futureNode{}, true, nil
} }
switch { switch {
@ -451,14 +459,14 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous
// check if the file has not changed before performing a fopen operation (more expensive, specially // check if the file has not changed before performing a fopen operation (more expensive, specially
// in network filesystems) // in network filesystems)
if previous != nil && !fileChanged(fi, previous, arch.ChangeIgnoreFlags) { if previous != nil && !fileChanged(arch.FS, fi, previous, arch.ChangeIgnoreFlags) {
if arch.allBlobsPresent(previous) { if arch.allBlobsPresent(previous) {
debug.Log("%v hasn't changed, using old list of blobs", target) debug.Log("%v hasn't changed, using old list of blobs", target)
arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start)) arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start))
arch.CompleteBlob(previous.Size) arch.CompleteBlob(previous.Size)
node, err := arch.nodeFromFileInfo(snPath, target, fi, false) node, err := arch.nodeFromFileInfo(snPath, target, fi, false)
if err != nil { if err != nil {
return FutureNode{}, false, err return futureNode{}, false, err
} }
// copy list of blobs // copy list of blobs
@ -477,7 +485,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous
err := errors.Errorf("parts of %v not found in the repository index; storing the file again", target) err := errors.Errorf("parts of %v not found in the repository index; storing the file again", target)
err = arch.error(abstarget, err) err = arch.error(abstarget, err)
if err != nil { if err != nil {
return FutureNode{}, false, err return futureNode{}, false, err
} }
} }
@ -488,9 +496,9 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous
debug.Log("Openfile() for %v returned error: %v", target, err) debug.Log("Openfile() for %v returned error: %v", target, err)
err = arch.error(abstarget, err) err = arch.error(abstarget, err)
if err != nil { if err != nil {
return FutureNode{}, false, errors.WithStack(err) return futureNode{}, false, errors.WithStack(err)
} }
return FutureNode{}, true, nil return futureNode{}, true, nil
} }
fi, err = file.Stat() fi, err = file.Stat()
@ -499,9 +507,9 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous
_ = file.Close() _ = file.Close()
err = arch.error(abstarget, err) err = arch.error(abstarget, err)
if err != nil { if err != nil {
return FutureNode{}, false, errors.WithStack(err) return futureNode{}, false, errors.WithStack(err)
} }
return FutureNode{}, true, nil return futureNode{}, true, nil
} }
// make sure it's still a file // make sure it's still a file
@ -510,9 +518,9 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous
_ = file.Close() _ = file.Close()
err = arch.error(abstarget, err) err = arch.error(abstarget, err)
if err != nil { if err != nil {
return FutureNode{}, false, err return futureNode{}, false, err
} }
return FutureNode{}, true, nil return futureNode{}, true, nil
} }
// Save will close the file, we don't need to do that // Save will close the file, we don't need to do that
@ -533,7 +541,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous
err = arch.error(abstarget, err) err = arch.error(abstarget, err)
} }
if err != nil { if err != nil {
return FutureNode{}, false, err return futureNode{}, false, err
} }
fn, err = arch.saveDir(ctx, snPath, target, fi, oldSubtree, fn, err = arch.saveDir(ctx, snPath, target, fi, oldSubtree,
@ -542,19 +550,19 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous
}) })
if err != nil { if err != nil {
debug.Log("SaveDir for %v returned error: %v", snPath, err) debug.Log("SaveDir for %v returned error: %v", snPath, err)
return FutureNode{}, false, err return futureNode{}, false, err
} }
case fi.Mode()&os.ModeSocket > 0: case fi.Mode()&os.ModeSocket > 0:
debug.Log(" %v is a socket, ignoring", target) debug.Log(" %v is a socket, ignoring", target)
return FutureNode{}, true, nil return futureNode{}, true, nil
default: default:
debug.Log(" %v other", target) debug.Log(" %v other", target)
node, err := arch.nodeFromFileInfo(snPath, target, fi, false) node, err := arch.nodeFromFileInfo(snPath, target, fi, false)
if err != nil { if err != nil {
return FutureNode{}, false, err return futureNode{}, false, err
} }
fn = newFutureNodeWithResult(futureNodeResult{ fn = newFutureNodeWithResult(futureNodeResult{
snPath: snPath, snPath: snPath,
@ -571,7 +579,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous
// fileChanged tries to detect whether a file's content has changed compared // fileChanged tries to detect whether a file's content has changed compared
// to the contents of node, which describes the same path in the parent backup. // to the contents of node, which describes the same path in the parent backup.
// It should only be run for regular files. // It should only be run for regular files.
func fileChanged(fi os.FileInfo, node *restic.Node, ignoreFlags uint) bool { func fileChanged(fs fs.FS, fi os.FileInfo, node *restic.Node, ignoreFlags uint) bool {
switch { switch {
case node == nil: case node == nil:
return true return true
@ -621,17 +629,17 @@ func (arch *Archiver) statDir(dir string) (os.FileInfo, error) {
// saveTree stores a Tree in the repo, returned is the tree. snPath is the path // saveTree stores a Tree in the repo, returned is the tree. snPath is the path
// within the current snapshot. // within the current snapshot.
func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree, complete CompleteFunc) (FutureNode, int, error) { func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree, previous *restic.Tree, complete fileCompleteFunc) (futureNode, int, error) {
var node *restic.Node var node *restic.Node
if snPath != "/" { if snPath != "/" {
if atree.FileInfoPath == "" { if atree.FileInfoPath == "" {
return FutureNode{}, 0, errors.Errorf("FileInfoPath for %v is empty", snPath) return futureNode{}, 0, errors.Errorf("FileInfoPath for %v is empty", snPath)
} }
fi, err := arch.statDir(atree.FileInfoPath) fi, err := arch.statDir(atree.FileInfoPath)
if err != nil { if err != nil {
return FutureNode{}, 0, err return futureNode{}, 0, err
} }
debug.Log("%v, dir node data loaded from %v", snPath, atree.FileInfoPath) debug.Log("%v, dir node data loaded from %v", snPath, atree.FileInfoPath)
@ -639,7 +647,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree,
// thus ignore errors for such folders. // thus ignore errors for such folders.
node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi, true) node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi, true)
if err != nil { if err != nil {
return FutureNode{}, 0, err return futureNode{}, 0, err
} }
} else { } else {
// fake root node // fake root node
@ -648,7 +656,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree,
debug.Log("%v (%v nodes), parent %v", snPath, len(atree.Nodes), previous) debug.Log("%v (%v nodes), parent %v", snPath, len(atree.Nodes), previous)
nodeNames := atree.NodeNames() nodeNames := atree.NodeNames()
nodes := make([]FutureNode, 0, len(nodeNames)) nodes := make([]futureNode, 0, len(nodeNames))
// iterate over the nodes of atree in lexicographic (=deterministic) order // iterate over the nodes of atree in lexicographic (=deterministic) order
for _, name := range nodeNames { for _, name := range nodeNames {
@ -656,7 +664,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree,
// test if context has been cancelled // test if context has been cancelled
if ctx.Err() != nil { if ctx.Err() != nil {
return FutureNode{}, 0, ctx.Err() return futureNode{}, 0, ctx.Err()
} }
// this is a leaf node // this is a leaf node
@ -669,11 +677,11 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree,
// ignore error // ignore error
continue continue
} }
return FutureNode{}, 0, err return futureNode{}, 0, err
} }
if err != nil { if err != nil {
return FutureNode{}, 0, err return futureNode{}, 0, err
} }
if !excluded { if !excluded {
@ -691,7 +699,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree,
err = arch.error(join(snPath, name), err) err = arch.error(join(snPath, name), err)
} }
if err != nil { if err != nil {
return FutureNode{}, 0, err return futureNode{}, 0, err
} }
// not a leaf node, archive subtree // not a leaf node, archive subtree
@ -699,7 +707,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree,
arch.trackItem(snItem, oldNode, n, is, time.Since(start)) arch.trackItem(snItem, oldNode, n, is, time.Since(start))
}) })
if err != nil { if err != nil {
return FutureNode{}, 0, err return futureNode{}, 0, err
} }
nodes = append(nodes, fn) nodes = append(nodes, fn)
} }
@ -779,16 +787,16 @@ func (arch *Archiver) loadParentTree(ctx context.Context, sn *restic.Snapshot) *
// runWorkers starts the worker pools, which are stopped when the context is cancelled. // runWorkers starts the worker pools, which are stopped when the context is cancelled.
func (arch *Archiver) runWorkers(ctx context.Context, wg *errgroup.Group) { func (arch *Archiver) runWorkers(ctx context.Context, wg *errgroup.Group) {
arch.blobSaver = NewBlobSaver(ctx, wg, arch.Repo, arch.Options.SaveBlobConcurrency) arch.blobSaver = newBlobSaver(ctx, wg, arch.Repo, arch.Options.SaveBlobConcurrency)
arch.fileSaver = NewFileSaver(ctx, wg, arch.fileSaver = newFileSaver(ctx, wg,
arch.blobSaver.Save, arch.blobSaver.Save,
arch.Repo.Config().ChunkerPolynomial, arch.Repo.Config().ChunkerPolynomial,
arch.Options.ReadConcurrency, arch.Options.SaveBlobConcurrency) arch.Options.ReadConcurrency, arch.Options.SaveBlobConcurrency)
arch.fileSaver.CompleteBlob = arch.CompleteBlob arch.fileSaver.CompleteBlob = arch.CompleteBlob
arch.fileSaver.NodeFromFileInfo = arch.nodeFromFileInfo arch.fileSaver.NodeFromFileInfo = arch.nodeFromFileInfo
arch.treeSaver = NewTreeSaver(ctx, wg, arch.Options.SaveTreeConcurrency, arch.blobSaver.Save, arch.Error) arch.treeSaver = newTreeSaver(ctx, wg, arch.Options.SaveTreeConcurrency, arch.blobSaver.Save, arch.Error)
} }
func (arch *Archiver) stopWorkers() { func (arch *Archiver) stopWorkers() {
@ -809,7 +817,7 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps
return nil, restic.ID{}, nil, err return nil, restic.ID{}, nil, err
} }
atree, err := NewTree(arch.FS, cleanTargets) atree, err := newTree(arch.FS, cleanTargets)
if err != nil { if err != nil {
return nil, restic.ID{}, nil, err return nil, restic.ID{}, nil, err
} }

View file

@ -686,10 +686,11 @@ func TestFileChanged(t *testing.T) {
} }
save(t, filename, content) save(t, filename, content)
fs := &fs.Local{}
fiBefore := lstat(t, filename) fiBefore := lstat(t, filename)
node := nodeFromFI(t, filename, fiBefore) node := nodeFromFI(t, filename, fiBefore)
if fileChanged(fiBefore, node, 0) { if fileChanged(fs, fiBefore, node, 0) {
t.Fatalf("unchanged file detected as changed") t.Fatalf("unchanged file detected as changed")
} }
@ -699,12 +700,12 @@ func TestFileChanged(t *testing.T) {
if test.SameFile { if test.SameFile {
// file should be detected as unchanged // file should be detected as unchanged
if fileChanged(fiAfter, node, test.ChangeIgnore) { if fileChanged(fs, fiAfter, node, test.ChangeIgnore) {
t.Fatalf("unmodified file detected as changed") t.Fatalf("unmodified file detected as changed")
} }
} else { } else {
// file should be detected as changed // file should be detected as changed
if !fileChanged(fiAfter, node, test.ChangeIgnore) && !test.SameFile { if !fileChanged(fs, fiAfter, node, test.ChangeIgnore) && !test.SameFile {
t.Fatalf("modified file detected as unchanged") t.Fatalf("modified file detected as unchanged")
} }
} }
@ -721,7 +722,7 @@ func TestFilChangedSpecialCases(t *testing.T) {
t.Run("nil-node", func(t *testing.T) { t.Run("nil-node", func(t *testing.T) {
fi := lstat(t, filename) fi := lstat(t, filename)
if !fileChanged(fi, nil, 0) { if !fileChanged(&fs.Local{}, fi, nil, 0) {
t.Fatal("nil node detected as unchanged") t.Fatal("nil node detected as unchanged")
} }
}) })
@ -730,7 +731,7 @@ func TestFilChangedSpecialCases(t *testing.T) {
fi := lstat(t, filename) fi := lstat(t, filename)
node := nodeFromFI(t, filename, fi) node := nodeFromFI(t, filename, fi)
node.Type = "symlink" node.Type = "symlink"
if !fileChanged(fi, node, 0) { if !fileChanged(&fs.Local{}, fi, node, 0) {
t.Fatal("node with changed type detected as unchanged") t.Fatal("node with changed type detected as unchanged")
} }
}) })
@ -1121,7 +1122,7 @@ func TestArchiverSaveTree(t *testing.T) {
test.prepare(t) test.prepare(t)
} }
atree, err := NewTree(testFS, test.targets) atree, err := newTree(testFS, test.targets)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1529,7 +1530,7 @@ func TestArchiverSnapshotSelect(t *testing.T) {
}, },
"other": TestFile{Content: "another file"}, "other": TestFile{Content: "another file"},
}, },
selFn: func(item string, fi os.FileInfo) bool { selFn: func(item string, fi os.FileInfo, _ fs.FS) bool {
return true return true
}, },
}, },
@ -1546,7 +1547,7 @@ func TestArchiverSnapshotSelect(t *testing.T) {
}, },
"other": TestFile{Content: "another file"}, "other": TestFile{Content: "another file"},
}, },
selFn: func(item string, fi os.FileInfo) bool { selFn: func(item string, fi os.FileInfo, _ fs.FS) bool {
return false return false
}, },
err: "snapshot is empty", err: "snapshot is empty",
@ -1573,7 +1574,7 @@ func TestArchiverSnapshotSelect(t *testing.T) {
}, },
"other": TestFile{Content: "another file"}, "other": TestFile{Content: "another file"},
}, },
selFn: func(item string, fi os.FileInfo) bool { selFn: func(item string, fi os.FileInfo, _ fs.FS) bool {
return filepath.Ext(item) != ".txt" return filepath.Ext(item) != ".txt"
}, },
}, },
@ -1597,8 +1598,8 @@ func TestArchiverSnapshotSelect(t *testing.T) {
}, },
"other": TestFile{Content: "another file"}, "other": TestFile{Content: "another file"},
}, },
selFn: func(item string, fi os.FileInfo) bool { selFn: func(item string, fi os.FileInfo, fs fs.FS) bool {
return filepath.Base(item) != "subdir" return fs.Base(item) != "subdir"
}, },
}, },
{ {
@ -1606,8 +1607,8 @@ func TestArchiverSnapshotSelect(t *testing.T) {
src: TestDir{ src: TestDir{
"foo": TestFile{Content: "foo"}, "foo": TestFile{Content: "foo"},
}, },
selFn: func(item string, fi os.FileInfo) bool { selFn: func(item string, fi os.FileInfo, fs fs.FS) bool {
return filepath.IsAbs(item) return fs.IsAbs(item)
}, },
}, },
} }

View file

@ -9,22 +9,22 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
// Saver allows saving a blob. // saver allows saving a blob.
type Saver interface { type saver interface {
SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error)
} }
// BlobSaver concurrently saves incoming blobs to the repo. // blobSaver concurrently saves incoming blobs to the repo.
type BlobSaver struct { type blobSaver struct {
repo Saver repo saver
ch chan<- saveBlobJob ch chan<- saveBlobJob
} }
// NewBlobSaver returns a new blob. A worker pool is started, it is stopped // newBlobSaver returns a new blob. A worker pool is started, it is stopped
// when ctx is cancelled. // when ctx is cancelled.
func NewBlobSaver(ctx context.Context, wg *errgroup.Group, repo Saver, workers uint) *BlobSaver { func newBlobSaver(ctx context.Context, wg *errgroup.Group, repo saver, workers uint) *blobSaver {
ch := make(chan saveBlobJob) ch := make(chan saveBlobJob)
s := &BlobSaver{ s := &blobSaver{
repo: repo, repo: repo,
ch: ch, ch: ch,
} }
@ -38,13 +38,13 @@ func NewBlobSaver(ctx context.Context, wg *errgroup.Group, repo Saver, workers u
return s return s
} }
func (s *BlobSaver) TriggerShutdown() { func (s *blobSaver) TriggerShutdown() {
close(s.ch) close(s.ch)
} }
// Save stores a blob in the repo. It checks the index and the known blobs // Save stores a blob in the repo. It checks the index and the known blobs
// before saving anything. It takes ownership of the buffer passed in. // before saving anything. It takes ownership of the buffer passed in.
func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf *Buffer, filename string, cb func(res SaveBlobResponse)) { func (s *blobSaver) Save(ctx context.Context, t restic.BlobType, buf *buffer, filename string, cb func(res saveBlobResponse)) {
select { select {
case s.ch <- saveBlobJob{BlobType: t, buf: buf, fn: filename, cb: cb}: case s.ch <- saveBlobJob{BlobType: t, buf: buf, fn: filename, cb: cb}:
case <-ctx.Done(): case <-ctx.Done():
@ -54,26 +54,26 @@ func (s *BlobSaver) Save(ctx context.Context, t restic.BlobType, buf *Buffer, fi
type saveBlobJob struct { type saveBlobJob struct {
restic.BlobType restic.BlobType
buf *Buffer buf *buffer
fn string fn string
cb func(res SaveBlobResponse) cb func(res saveBlobResponse)
} }
type SaveBlobResponse struct { type saveBlobResponse struct {
id restic.ID id restic.ID
length int length int
sizeInRepo int sizeInRepo int
known bool known bool
} }
func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) (SaveBlobResponse, error) { func (s *blobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) (saveBlobResponse, error) {
id, known, sizeInRepo, err := s.repo.SaveBlob(ctx, t, buf, restic.ID{}, false) id, known, sizeInRepo, err := s.repo.SaveBlob(ctx, t, buf, restic.ID{}, false)
if err != nil { if err != nil {
return SaveBlobResponse{}, err return saveBlobResponse{}, err
} }
return SaveBlobResponse{ return saveBlobResponse{
id: id, id: id,
length: len(buf), length: len(buf),
sizeInRepo: sizeInRepo, sizeInRepo: sizeInRepo,
@ -81,7 +81,7 @@ func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte)
}, nil }, nil
} }
func (s *BlobSaver) worker(ctx context.Context, jobs <-chan saveBlobJob) error { func (s *blobSaver) worker(ctx context.Context, jobs <-chan saveBlobJob) error {
for { for {
var job saveBlobJob var job saveBlobJob
var ok bool var ok bool

View file

@ -38,20 +38,20 @@ func TestBlobSaver(t *testing.T) {
wg, ctx := errgroup.WithContext(ctx) wg, ctx := errgroup.WithContext(ctx)
saver := &saveFail{} saver := &saveFail{}
b := NewBlobSaver(ctx, wg, saver, uint(runtime.NumCPU())) b := newBlobSaver(ctx, wg, saver, uint(runtime.NumCPU()))
var wait sync.WaitGroup var wait sync.WaitGroup
var results []SaveBlobResponse var results []saveBlobResponse
var lock sync.Mutex var lock sync.Mutex
wait.Add(20) wait.Add(20)
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
buf := &Buffer{Data: []byte(fmt.Sprintf("foo%d", i))} buf := &buffer{Data: []byte(fmt.Sprintf("foo%d", i))}
idx := i idx := i
lock.Lock() lock.Lock()
results = append(results, SaveBlobResponse{}) results = append(results, saveBlobResponse{})
lock.Unlock() lock.Unlock()
b.Save(ctx, restic.DataBlob, buf, "file", func(res SaveBlobResponse) { b.Save(ctx, restic.DataBlob, buf, "file", func(res saveBlobResponse) {
lock.Lock() lock.Lock()
results[idx] = res results[idx] = res
lock.Unlock() lock.Unlock()
@ -95,11 +95,11 @@ func TestBlobSaverError(t *testing.T) {
failAt: int32(test.failAt), failAt: int32(test.failAt),
} }
b := NewBlobSaver(ctx, wg, saver, uint(runtime.NumCPU())) b := newBlobSaver(ctx, wg, saver, uint(runtime.NumCPU()))
for i := 0; i < test.blobs; i++ { for i := 0; i < test.blobs; i++ {
buf := &Buffer{Data: []byte(fmt.Sprintf("foo%d", i))} buf := &buffer{Data: []byte(fmt.Sprintf("foo%d", i))}
b.Save(ctx, restic.DataBlob, buf, "errfile", func(res SaveBlobResponse) {}) b.Save(ctx, restic.DataBlob, buf, "errfile", func(res saveBlobResponse) {})
} }
b.TriggerShutdown() b.TriggerShutdown()

View file

@ -1,14 +1,14 @@
package archiver package archiver
// Buffer is a reusable buffer. After the buffer has been used, Release should // buffer is a reusable buffer. After the buffer has been used, Release should
// be called so the underlying slice is put back into the pool. // be called so the underlying slice is put back into the pool.
type Buffer struct { type buffer struct {
Data []byte Data []byte
pool *BufferPool pool *bufferPool
} }
// Release puts the buffer back into the pool it came from. // Release puts the buffer back into the pool it came from.
func (b *Buffer) Release() { func (b *buffer) Release() {
pool := b.pool pool := b.pool
if pool == nil || cap(b.Data) > pool.defaultSize { if pool == nil || cap(b.Data) > pool.defaultSize {
return return
@ -20,32 +20,32 @@ func (b *Buffer) Release() {
} }
} }
// BufferPool implements a limited set of reusable buffers. // bufferPool implements a limited set of reusable buffers.
type BufferPool struct { type bufferPool struct {
ch chan *Buffer ch chan *buffer
defaultSize int defaultSize int
} }
// NewBufferPool initializes a new buffer pool. The pool stores at most max // newBufferPool initializes a new buffer pool. The pool stores at most max
// items. New buffers are created with defaultSize. Buffers that have grown // items. New buffers are created with defaultSize. Buffers that have grown
// larger are not put back. // larger are not put back.
func NewBufferPool(max int, defaultSize int) *BufferPool { func newBufferPool(max int, defaultSize int) *bufferPool {
b := &BufferPool{ b := &bufferPool{
ch: make(chan *Buffer, max), ch: make(chan *buffer, max),
defaultSize: defaultSize, defaultSize: defaultSize,
} }
return b return b
} }
// Get returns a new buffer, either from the pool or newly allocated. // Get returns a new buffer, either from the pool or newly allocated.
func (pool *BufferPool) Get() *Buffer { func (pool *bufferPool) Get() *buffer {
select { select {
case buf := <-pool.ch: case buf := <-pool.ch:
return buf return buf
default: default:
} }
b := &Buffer{ b := &buffer{
Data: make([]byte, pool.defaultSize), Data: make([]byte, pool.defaultSize),
pool: pool, pool: pool,
} }

View file

@ -1,12 +1,3 @@
// Package archiver contains the code which reads files, splits them into // Package archiver contains the code which reads files, splits them into
// chunks and saves the data to the repository. // chunks and saves the data to the repository.
//
// An Archiver has a number of worker goroutines handling saving the different
// data structures to the repository, the details are implemented by the
// FileSaver, BlobSaver, and TreeSaver types.
//
// The main goroutine (the one calling Snapshot()) traverses the directory tree
// and delegates all work to these worker pools. They return a type
// (FutureFile, FutureBlob, and FutureTree) which can be resolved later, by
// calling Wait() on it.
package archiver package archiver

View file

@ -0,0 +1,332 @@
package archiver
import (
"bytes"
"fmt"
"io"
"os"
"strings"
"sync"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
)
// RejectByNameFunc is a function that takes a filename of a
// file that would be included in the backup. The function returns true if it
// should be excluded (rejected) from the backup.
type RejectByNameFunc func(path string) bool
// RejectFunc is a function that takes a filename and os.FileInfo of a
// file that would be included in the backup. The function returns true if it
// should be excluded (rejected) from the backup.
type RejectFunc func(path string, fi os.FileInfo, fs fs.FS) bool
func CombineRejectByNames(funcs []RejectByNameFunc) SelectByNameFunc {
return func(item string) bool {
for _, reject := range funcs {
if reject(item) {
return false
}
}
return true
}
}
func CombineRejects(funcs []RejectFunc) SelectFunc {
return func(item string, fi os.FileInfo, fs fs.FS) bool {
for _, reject := range funcs {
if reject(item, fi, fs) {
return false
}
}
return true
}
}
type rejectionCache struct {
m map[string]bool
mtx sync.Mutex
}
func newRejectionCache() *rejectionCache {
return &rejectionCache{m: make(map[string]bool)}
}
// Lock locks the mutex in rc.
func (rc *rejectionCache) Lock() {
rc.mtx.Lock()
}
// Unlock unlocks the mutex in rc.
func (rc *rejectionCache) Unlock() {
rc.mtx.Unlock()
}
// Get returns the last stored value for dir and a second boolean that
// indicates whether that value was actually written to the cache. It is the
// callers responsibility to call rc.Lock and rc.Unlock before using this
// method, otherwise data races may occur.
func (rc *rejectionCache) Get(dir string) (bool, bool) {
v, ok := rc.m[dir]
return v, ok
}
// Store stores a new value for dir. It is the callers responsibility to call
// rc.Lock and rc.Unlock before using this method, otherwise data races may
// occur.
func (rc *rejectionCache) Store(dir string, rejected bool) {
rc.m[dir] = rejected
}
// RejectIfPresent returns a RejectByNameFunc which itself returns whether a path
// should be excluded. The RejectByNameFunc considers a file to be excluded when
// it resides in a directory with an exclusion file, that is specified by
// excludeFileSpec in the form "filename[:content]". The returned error is
// non-nil if the filename component of excludeFileSpec is empty. If rc is
// non-nil, it is going to be used in the RejectByNameFunc to expedite the evaluation
// of a directory based on previous visits.
func RejectIfPresent(excludeFileSpec string, warnf func(msg string, args ...interface{})) (RejectFunc, error) {
if excludeFileSpec == "" {
return nil, errors.New("name for exclusion tagfile is empty")
}
colon := strings.Index(excludeFileSpec, ":")
if colon == 0 {
return nil, fmt.Errorf("no name for exclusion tagfile provided")
}
tf, tc := "", ""
if colon > 0 {
tf = excludeFileSpec[:colon]
tc = excludeFileSpec[colon+1:]
} else {
tf = excludeFileSpec
}
debug.Log("using %q as exclusion tagfile", tf)
rc := newRejectionCache()
return func(filename string, _ os.FileInfo, fs fs.FS) bool {
return isExcludedByFile(filename, tf, tc, rc, fs, warnf)
}, nil
}
// isExcludedByFile interprets filename as a path and returns true if that file
// is in an excluded directory. A directory is identified as excluded if it contains a
// tagfile which bears the name specified in tagFilename and starts with
// header. If rc is non-nil, it is used to expedite the evaluation of a
// directory based on previous visits.
func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache, fs fs.FS, warnf func(msg string, args ...interface{})) bool {
if tagFilename == "" {
return false
}
if fs.Base(filename) == tagFilename {
return false // do not exclude the tagfile itself
}
rc.Lock()
defer rc.Unlock()
dir := fs.Dir(filename)
rejected, visited := rc.Get(dir)
if visited {
return rejected
}
rejected = isDirExcludedByFile(dir, tagFilename, header, fs, warnf)
rc.Store(dir, rejected)
return rejected
}
func isDirExcludedByFile(dir, tagFilename, header string, fs fs.FS, warnf func(msg string, args ...interface{})) bool {
tf := fs.Join(dir, tagFilename)
_, err := fs.Lstat(tf)
if errors.Is(err, os.ErrNotExist) {
return false
}
if err != nil {
warnf("could not access exclusion tagfile: %v", err)
return false
}
// when no signature is given, the mere presence of tf is enough reason
// to exclude filename
if len(header) == 0 {
return true
}
// From this stage, errors mean tagFilename exists but it is malformed.
// Warnings will be generated so that the user is informed that the
// indented ignore-action is not performed.
f, err := fs.OpenFile(tf, os.O_RDONLY, 0)
if err != nil {
warnf("could not open exclusion tagfile: %v", err)
return false
}
defer func() {
_ = f.Close()
}()
buf := make([]byte, len(header))
_, err = io.ReadFull(f, buf)
// EOF is handled with a dedicated message, otherwise the warning were too cryptic
if err == io.EOF {
warnf("invalid (too short) signature in exclusion tagfile %q\n", tf)
return false
}
if err != nil {
warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err)
return false
}
if !bytes.Equal(buf, []byte(header)) {
warnf("invalid signature in exclusion tagfile %q\n", tf)
return false
}
return true
}
// deviceMap is used to track allowed source devices for backup. This is used to
// check for crossing mount points during backup (for --one-file-system). It
// maps the name of a source path to its device ID.
type deviceMap map[string]uint64
// newDeviceMap creates a new device map from the list of source paths.
func newDeviceMap(allowedSourcePaths []string, fs fs.FS) (deviceMap, error) {
deviceMap := make(map[string]uint64)
for _, item := range allowedSourcePaths {
item, err := fs.Abs(fs.Clean(item))
if err != nil {
return nil, err
}
fi, err := fs.Lstat(item)
if err != nil {
return nil, err
}
id, err := fs.DeviceID(fi)
if err != nil {
return nil, err
}
deviceMap[item] = id
}
if len(deviceMap) == 0 {
return nil, errors.New("zero allowed devices")
}
return deviceMap, nil
}
// IsAllowed returns true if the path is located on an allowed device.
func (m deviceMap) IsAllowed(item string, deviceID uint64, fs fs.FS) (bool, error) {
for dir := item; ; dir = fs.Dir(dir) {
debug.Log("item %v, test dir %v", item, dir)
// find a parent directory that is on an allowed device (otherwise
// we would not traverse the directory at all)
allowedID, ok := m[dir]
if !ok {
if dir == fs.Dir(dir) {
// arrived at root, no allowed device found. this should not happen.
break
}
continue
}
// if the item has a different device ID than the parent directory,
// we crossed a file system boundary
if allowedID != deviceID {
debug.Log("item %v (dir %v) on disallowed device %d", item, dir, deviceID)
return false, nil
}
// item is on allowed device, accept it
debug.Log("item %v allowed", item)
return true, nil
}
return false, fmt.Errorf("item %v (device ID %v) not found, deviceMap: %v", item, deviceID, m)
}
// RejectByDevice returns a RejectFunc that rejects files which are on a
// different file systems than the files/dirs in samples.
func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) {
deviceMap, err := newDeviceMap(samples, filesystem)
if err != nil {
return nil, err
}
debug.Log("allowed devices: %v\n", deviceMap)
return func(item string, fi os.FileInfo, fs fs.FS) bool {
id, err := fs.DeviceID(fi)
if err != nil {
// This should never happen because gatherDevices() would have
// errored out earlier. If it still does that's a reason to panic.
panic(err)
}
allowed, err := deviceMap.IsAllowed(fs.Clean(item), id, fs)
if err != nil {
// this should not happen
panic(fmt.Sprintf("error checking device ID of %v: %v", item, err))
}
if allowed {
// accept item
return false
}
// reject everything except directories
if !fi.IsDir() {
return true
}
// special case: make sure we keep mountpoints (directories which
// contain a mounted file system). Test this by checking if the parent
// directory would be included.
parentDir := fs.Dir(fs.Clean(item))
parentFI, err := fs.Lstat(parentDir)
if err != nil {
debug.Log("item %v: error running lstat() on parent directory: %v", item, err)
// if in doubt, reject
return true
}
parentDeviceID, err := fs.DeviceID(parentFI)
if err != nil {
debug.Log("item %v: getting device ID of parent directory: %v", item, err)
// if in doubt, reject
return true
}
parentAllowed, err := deviceMap.IsAllowed(parentDir, parentDeviceID, fs)
if err != nil {
debug.Log("item %v: error checking parent directory: %v", item, err)
// if in doubt, reject
return true
}
if parentAllowed {
// we found a mount point, so accept the directory
return false
}
// reject everything else
return true
}, nil
}
func RejectBySize(maxSize int64) (RejectFunc, error) {
return func(item string, fi os.FileInfo, _ fs.FS) bool {
// directory will be ignored
if fi.IsDir() {
return false
}
filesize := fi.Size()
if filesize > maxSize {
debug.Log("file %s is oversize: %d", item, filesize)
return true
}
return false
}, nil
}

View file

@ -1,67 +1,14 @@
package main package archiver
import ( import (
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/test" "github.com/restic/restic/internal/test"
) )
func TestRejectByPattern(t *testing.T) {
var tests = []struct {
filename string
reject bool
}{
{filename: "/home/user/foo.go", reject: true},
{filename: "/home/user/foo.c", reject: false},
{filename: "/home/user/foobar", reject: false},
{filename: "/home/user/foobar/x", reject: true},
{filename: "/home/user/README", reject: false},
{filename: "/home/user/README.md", reject: true},
}
patterns := []string{"*.go", "README.md", "/home/user/foobar/*"}
for _, tc := range tests {
t.Run("", func(t *testing.T) {
reject := rejectByPattern(patterns)
res := reject(tc.filename)
if res != tc.reject {
t.Fatalf("wrong result for filename %v: want %v, got %v",
tc.filename, tc.reject, res)
}
})
}
}
func TestRejectByInsensitivePattern(t *testing.T) {
var tests = []struct {
filename string
reject bool
}{
{filename: "/home/user/foo.GO", reject: true},
{filename: "/home/user/foo.c", reject: false},
{filename: "/home/user/foobar", reject: false},
{filename: "/home/user/FOObar/x", reject: true},
{filename: "/home/user/README", reject: false},
{filename: "/home/user/readme.md", reject: true},
}
patterns := []string{"*.go", "README.md", "/home/user/foobar/*"}
for _, tc := range tests {
t.Run("", func(t *testing.T) {
reject := rejectByInsensitivePattern(patterns)
res := reject(tc.filename)
if res != tc.reject {
t.Fatalf("wrong result for filename %v: want %v, got %v",
tc.filename, tc.reject, res)
}
})
}
}
func TestIsExcludedByFile(t *testing.T) { func TestIsExcludedByFile(t *testing.T) {
const ( const (
tagFilename = "CACHEDIR.TAG" tagFilename = "CACHEDIR.TAG"
@ -102,7 +49,7 @@ func TestIsExcludedByFile(t *testing.T) {
if tc.content == "" { if tc.content == "" {
h = "" h = ""
} }
if got := isExcludedByFile(foo, tagFilename, h, nil); tc.want != got { if got := isExcludedByFile(foo, tagFilename, h, newRejectionCache(), &fs.Local{}, func(msg string, args ...interface{}) { t.Logf(msg, args...) }); tc.want != got {
t.Fatalf("expected %v, got %v", tc.want, got) t.Fatalf("expected %v, got %v", tc.want, got)
} }
}) })
@ -153,8 +100,8 @@ func TestMultipleIsExcludedByFile(t *testing.T) {
// create two rejection functions, one that tests for the NOFOO file // create two rejection functions, one that tests for the NOFOO file
// and one for the NOBAR file // and one for the NOBAR file
fooExclude, _ := rejectIfPresent("NOFOO") fooExclude, _ := RejectIfPresent("NOFOO", nil)
barExclude, _ := rejectIfPresent("NOBAR") barExclude, _ := RejectIfPresent("NOBAR", nil)
// To mock the archiver scanning walk, we create filepath.WalkFn // To mock the archiver scanning walk, we create filepath.WalkFn
// that tests against the two rejection functions and stores // that tests against the two rejection functions and stores
@ -164,8 +111,8 @@ func TestMultipleIsExcludedByFile(t *testing.T) {
if err != nil { if err != nil {
return err return err
} }
excludedByFoo := fooExclude(p) excludedByFoo := fooExclude(p, nil, &fs.Local{})
excludedByBar := barExclude(p) excludedByBar := barExclude(p, nil, &fs.Local{})
excluded := excludedByFoo || excludedByBar excluded := excludedByFoo || excludedByBar
// the log message helps debugging in case the test fails // the log message helps debugging in case the test fails
t.Logf("%q: %v || %v = %v", p, excludedByFoo, excludedByBar, excluded) t.Logf("%q: %v || %v = %v", p, excludedByFoo, excludedByBar, excluded)
@ -192,9 +139,6 @@ func TestMultipleIsExcludedByFile(t *testing.T) {
func TestIsExcludedByFileSize(t *testing.T) { func TestIsExcludedByFileSize(t *testing.T) {
tempDir := test.TempDir(t) tempDir := test.TempDir(t)
// Max size of file is set to be 1k
maxSizeStr := "1k"
// Create some files in a temporary directory. // Create some files in a temporary directory.
// Files in UPPERCASE will be used as exclusion triggers later on. // Files in UPPERCASE will be used as exclusion triggers later on.
// We will test the inclusion later, so we add the expected value as // We will test the inclusion later, so we add the expected value as
@ -238,7 +182,7 @@ func TestIsExcludedByFileSize(t *testing.T) {
test.OKs(t, errs) // see if anything went wrong during the creation test.OKs(t, errs) // see if anything went wrong during the creation
// create rejection function // create rejection function
sizeExclude, _ := rejectBySize(maxSizeStr) sizeExclude, _ := RejectBySize(1024)
// To mock the archiver scanning walk, we create filepath.WalkFn // To mock the archiver scanning walk, we create filepath.WalkFn
// that tests against the two rejection functions and stores // that tests against the two rejection functions and stores
@ -249,7 +193,7 @@ func TestIsExcludedByFileSize(t *testing.T) {
return err return err
} }
excluded := sizeExclude(p, fi) excluded := sizeExclude(p, fi, nil)
// the log message helps debugging in case the test fails // the log message helps debugging in case the test fails
t.Logf("%q: dir:%t; size:%d; excluded:%v", p, fi.IsDir(), fi.Size(), excluded) t.Logf("%q: dir:%t; size:%d; excluded:%v", p, fi.IsDir(), fi.Size(), excluded)
m[p] = !excluded m[p] = !excluded
@ -268,7 +212,7 @@ func TestIsExcludedByFileSize(t *testing.T) {
} }
func TestDeviceMap(t *testing.T) { func TestDeviceMap(t *testing.T) {
deviceMap := DeviceMap{ deviceMap := deviceMap{
filepath.FromSlash("/"): 1, filepath.FromSlash("/"): 1,
filepath.FromSlash("/usr/local"): 5, filepath.FromSlash("/usr/local"): 5,
} }
@ -299,7 +243,7 @@ func TestDeviceMap(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
res, err := deviceMap.IsAllowed(filepath.FromSlash(test.item), test.deviceID) res, err := deviceMap.IsAllowed(filepath.FromSlash(test.item), test.deviceID, &fs.Local{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View file

@ -15,13 +15,13 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
// SaveBlobFn saves a blob to a repo. // saveBlobFn saves a blob to a repo.
type SaveBlobFn func(context.Context, restic.BlobType, *Buffer, string, func(res SaveBlobResponse)) type saveBlobFn func(context.Context, restic.BlobType, *buffer, string, func(res saveBlobResponse))
// FileSaver concurrently saves incoming files to the repo. // fileSaver concurrently saves incoming files to the repo.
type FileSaver struct { type fileSaver struct {
saveFilePool *BufferPool saveFilePool *bufferPool
saveBlob SaveBlobFn saveBlob saveBlobFn
pol chunker.Pol pol chunker.Pol
@ -32,18 +32,18 @@ type FileSaver struct {
NodeFromFileInfo func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) NodeFromFileInfo func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error)
} }
// NewFileSaver returns a new file saver. A worker pool with fileWorkers is // newFileSaver returns a new file saver. A worker pool with fileWorkers is
// started, it is stopped when ctx is cancelled. // started, it is stopped when ctx is cancelled.
func NewFileSaver(ctx context.Context, wg *errgroup.Group, save SaveBlobFn, pol chunker.Pol, fileWorkers, blobWorkers uint) *FileSaver { func newFileSaver(ctx context.Context, wg *errgroup.Group, save saveBlobFn, pol chunker.Pol, fileWorkers, blobWorkers uint) *fileSaver {
ch := make(chan saveFileJob) ch := make(chan saveFileJob)
debug.Log("new file saver with %v file workers and %v blob workers", fileWorkers, blobWorkers) debug.Log("new file saver with %v file workers and %v blob workers", fileWorkers, blobWorkers)
poolSize := fileWorkers + blobWorkers poolSize := fileWorkers + blobWorkers
s := &FileSaver{ s := &fileSaver{
saveBlob: save, saveBlob: save,
saveFilePool: NewBufferPool(int(poolSize), chunker.MaxSize), saveFilePool: newBufferPool(int(poolSize), chunker.MaxSize),
pol: pol, pol: pol,
ch: ch, ch: ch,
@ -60,18 +60,18 @@ func NewFileSaver(ctx context.Context, wg *errgroup.Group, save SaveBlobFn, pol
return s return s
} }
func (s *FileSaver) TriggerShutdown() { func (s *fileSaver) TriggerShutdown() {
close(s.ch) close(s.ch)
} }
// CompleteFunc is called when the file has been saved. // fileCompleteFunc is called when the file has been saved.
type CompleteFunc func(*restic.Node, ItemStats) type fileCompleteFunc func(*restic.Node, ItemStats)
// Save stores the file f and returns the data once it has been completed. The // Save stores the file f and returns the data once it has been completed. The
// file is closed by Save. completeReading is only called if the file was read // file is closed by Save. completeReading is only called if the file was read
// successfully. complete is always called. If completeReading is called, then // successfully. complete is always called. If completeReading is called, then
// this will always happen before calling complete. // this will always happen before calling complete.
func (s *FileSaver) Save(ctx context.Context, snPath string, target string, file fs.File, fi os.FileInfo, start func(), completeReading func(), complete CompleteFunc) FutureNode { func (s *fileSaver) Save(ctx context.Context, snPath string, target string, file fs.File, fi os.FileInfo, start func(), completeReading func(), complete fileCompleteFunc) futureNode {
fn, ch := newFutureNode() fn, ch := newFutureNode()
job := saveFileJob{ job := saveFileJob{
snPath: snPath, snPath: snPath,
@ -105,11 +105,11 @@ type saveFileJob struct {
start func() start func()
completeReading func() completeReading func()
complete CompleteFunc complete fileCompleteFunc
} }
// saveFile stores the file f in the repo, then closes it. // saveFile stores the file f in the repo, then closes it.
func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, target string, f fs.File, fi os.FileInfo, start func(), finishReading func(), finish func(res futureNodeResult)) { func (s *fileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, target string, f fs.File, fi os.FileInfo, start func(), finishReading func(), finish func(res futureNodeResult)) {
start() start()
fnr := futureNodeResult{ fnr := futureNodeResult{
@ -205,7 +205,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat
node.Content = append(node.Content, restic.ID{}) node.Content = append(node.Content, restic.ID{})
lock.Unlock() lock.Unlock()
s.saveBlob(ctx, restic.DataBlob, buf, target, func(sbr SaveBlobResponse) { s.saveBlob(ctx, restic.DataBlob, buf, target, func(sbr saveBlobResponse) {
lock.Lock() lock.Lock()
if !sbr.known { if !sbr.known {
fnr.stats.DataBlobs++ fnr.stats.DataBlobs++
@ -246,7 +246,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat
completeBlob() completeBlob()
} }
func (s *FileSaver) worker(ctx context.Context, jobs <-chan saveFileJob) { func (s *fileSaver) worker(ctx context.Context, jobs <-chan saveFileJob) {
// a worker has one chunker which is reused for each file (because it contains a rather large buffer) // a worker has one chunker which is reused for each file (because it contains a rather large buffer)
chnker := chunker.New(nil, s.pol) chnker := chunker.New(nil, s.pol)

View file

@ -30,11 +30,11 @@ func createTestFiles(t testing.TB, num int) (files []string) {
return files return files
} }
func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Context, *errgroup.Group) { func startFileSaver(ctx context.Context, t testing.TB) (*fileSaver, context.Context, *errgroup.Group) {
wg, ctx := errgroup.WithContext(ctx) wg, ctx := errgroup.WithContext(ctx)
saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *Buffer, _ string, cb func(SaveBlobResponse)) { saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *buffer, _ string, cb func(saveBlobResponse)) {
cb(SaveBlobResponse{ cb(saveBlobResponse{
id: restic.Hash(buf.Data), id: restic.Hash(buf.Data),
length: len(buf.Data), length: len(buf.Data),
sizeInRepo: len(buf.Data), sizeInRepo: len(buf.Data),
@ -48,7 +48,7 @@ func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Cont
t.Fatal(err) t.Fatal(err)
} }
s := NewFileSaver(ctx, wg, saveBlob, pol, workers, workers) s := newFileSaver(ctx, wg, saveBlob, pol, workers, workers)
s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) {
return fs.NodeFromFileInfo(filename, fi, ignoreXattrListError) return fs.NodeFromFileInfo(filename, fi, ignoreXattrListError)
} }
@ -69,7 +69,7 @@ func TestFileSaver(t *testing.T) {
testFs := fs.Local{} testFs := fs.Local{}
s, ctx, wg := startFileSaver(ctx, t) s, ctx, wg := startFileSaver(ctx, t)
var results []FutureNode var results []futureNode
for _, filename := range files { for _, filename := range files {
f, err := testFs.OpenFile(filename, os.O_RDONLY, 0) f, err := testFs.OpenFile(filename, os.O_RDONLY, 0)

View file

@ -3,7 +3,6 @@ package archiver
import ( import (
"context" "context"
"os" "os"
"path/filepath"
"sort" "sort"
"github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/debug"
@ -22,11 +21,11 @@ type Scanner struct {
} }
// NewScanner initializes a new Scanner. // NewScanner initializes a new Scanner.
func NewScanner(fs fs.FS) *Scanner { func NewScanner(filesystem fs.FS) *Scanner {
return &Scanner{ return &Scanner{
FS: fs, FS: filesystem,
SelectByName: func(_ string) bool { return true }, SelectByName: func(_ string) bool { return true },
Select: func(_ string, _ os.FileInfo) bool { return true }, Select: func(_ string, _ os.FileInfo, _ fs.FS) bool { return true },
Error: func(_ string, err error) error { return err }, Error: func(_ string, err error) error { return err },
Result: func(_ string, _ ScanStats) {}, Result: func(_ string, _ ScanStats) {},
} }
@ -38,7 +37,7 @@ type ScanStats struct {
Bytes uint64 Bytes uint64
} }
func (s *Scanner) scanTree(ctx context.Context, stats ScanStats, tree Tree) (ScanStats, error) { func (s *Scanner) scanTree(ctx context.Context, stats ScanStats, tree tree) (ScanStats, error) {
// traverse the path in the file system for all leaf nodes // traverse the path in the file system for all leaf nodes
if tree.Leaf() { if tree.Leaf() {
abstarget, err := s.FS.Abs(tree.Path) abstarget, err := s.FS.Abs(tree.Path)
@ -83,7 +82,7 @@ func (s *Scanner) Scan(ctx context.Context, targets []string) error {
debug.Log("clean targets %v", cleanTargets) debug.Log("clean targets %v", cleanTargets)
// we're using the same tree representation as the archiver does // we're using the same tree representation as the archiver does
tree, err := NewTree(s.FS, cleanTargets) tree, err := newTree(s.FS, cleanTargets)
if err != nil { if err != nil {
return err return err
} }
@ -115,7 +114,7 @@ func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (Sca
} }
// run remaining select functions that require file information // run remaining select functions that require file information
if !s.Select(target, fi) { if !s.Select(target, fi, s.FS) {
return stats, nil return stats, nil
} }
@ -131,7 +130,7 @@ func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (Sca
sort.Strings(names) sort.Strings(names)
for _, name := range names { for _, name := range names {
stats, err = s.scan(ctx, stats, filepath.Join(target, name)) stats, err = s.scan(ctx, stats, s.FS.Join(target, name))
if err != nil { if err != nil {
return stats, err return stats, err
} }

View file

@ -56,7 +56,7 @@ func TestScanner(t *testing.T) {
}, },
}, },
}, },
selFn: func(item string, fi os.FileInfo) bool { selFn: func(item string, fi os.FileInfo, fs fs.FS) bool {
if fi.IsDir() { if fi.IsDir() {
return true return true
} }

View file

@ -9,7 +9,7 @@ import (
"github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/fs"
) )
// Tree recursively defines how a snapshot should look like when // tree recursively defines how a snapshot should look like when
// archived. // archived.
// //
// When `Path` is set, this is a leaf node and the contents of `Path` should be // When `Path` is set, this is a leaf node and the contents of `Path` should be
@ -20,8 +20,8 @@ import (
// //
// `FileInfoPath` is used to extract metadata for intermediate (=non-leaf) // `FileInfoPath` is used to extract metadata for intermediate (=non-leaf)
// trees. // trees.
type Tree struct { type tree struct {
Nodes map[string]Tree Nodes map[string]tree
Path string // where the files/dirs to be saved are found Path string // where the files/dirs to be saved are found
FileInfoPath string // where the dir can be found that is not included itself, but its subdirs FileInfoPath string // where the dir can be found that is not included itself, but its subdirs
Root string // parent directory of the tree Root string // parent directory of the tree
@ -95,13 +95,13 @@ func rootDirectory(fs fs.FS, target string) string {
} }
// Add adds a new file or directory to the tree. // Add adds a new file or directory to the tree.
func (t *Tree) Add(fs fs.FS, path string) error { func (t *tree) Add(fs fs.FS, path string) error {
if path == "" { if path == "" {
panic("invalid path (empty string)") panic("invalid path (empty string)")
} }
if t.Nodes == nil { if t.Nodes == nil {
t.Nodes = make(map[string]Tree) t.Nodes = make(map[string]tree)
} }
pc, virtualPrefix := pathComponents(fs, path, false) pc, virtualPrefix := pathComponents(fs, path, false)
@ -111,7 +111,7 @@ func (t *Tree) Add(fs fs.FS, path string) error {
name := pc[0] name := pc[0]
root := rootDirectory(fs, path) root := rootDirectory(fs, path)
tree := Tree{Root: root} tree := tree{Root: root}
origName := name origName := name
i := 0 i := 0
@ -152,63 +152,63 @@ func (t *Tree) Add(fs fs.FS, path string) error {
} }
// add adds a new target path into the tree. // add adds a new target path into the tree.
func (t *Tree) add(fs fs.FS, target, root string, pc []string) error { func (t *tree) add(fs fs.FS, target, root string, pc []string) error {
if len(pc) == 0 { if len(pc) == 0 {
return errors.Errorf("invalid path %q", target) return errors.Errorf("invalid path %q", target)
} }
if t.Nodes == nil { if t.Nodes == nil {
t.Nodes = make(map[string]Tree) t.Nodes = make(map[string]tree)
} }
name := pc[0] name := pc[0]
if len(pc) == 1 { if len(pc) == 1 {
tree, ok := t.Nodes[name] node, ok := t.Nodes[name]
if !ok { if !ok {
t.Nodes[name] = Tree{Path: target} t.Nodes[name] = tree{Path: target}
return nil return nil
} }
if tree.Path != "" { if node.Path != "" {
return errors.Errorf("path is already set for target %v", target) return errors.Errorf("path is already set for target %v", target)
} }
tree.Path = target node.Path = target
t.Nodes[name] = tree t.Nodes[name] = node
return nil return nil
} }
tree := Tree{} node := tree{}
if other, ok := t.Nodes[name]; ok { if other, ok := t.Nodes[name]; ok {
tree = other node = other
} }
subroot := fs.Join(root, name) subroot := fs.Join(root, name)
tree.FileInfoPath = subroot node.FileInfoPath = subroot
err := tree.add(fs, target, subroot, pc[1:]) err := node.add(fs, target, subroot, pc[1:])
if err != nil { if err != nil {
return err return err
} }
t.Nodes[name] = tree t.Nodes[name] = node
return nil return nil
} }
func (t Tree) String() string { func (t tree) String() string {
return formatTree(t, "") return formatTree(t, "")
} }
// Leaf returns true if this is a leaf node, which means Path is set to a // Leaf returns true if this is a leaf node, which means Path is set to a
// non-empty string and the contents of Path should be inserted at this point // non-empty string and the contents of Path should be inserted at this point
// in the tree. // in the tree.
func (t Tree) Leaf() bool { func (t tree) Leaf() bool {
return t.Path != "" return t.Path != ""
} }
// NodeNames returns the sorted list of subtree names. // NodeNames returns the sorted list of subtree names.
func (t Tree) NodeNames() []string { func (t tree) NodeNames() []string {
// iterate over the nodes of atree in lexicographic (=deterministic) order // iterate over the nodes of atree in lexicographic (=deterministic) order
names := make([]string, 0, len(t.Nodes)) names := make([]string, 0, len(t.Nodes))
for name := range t.Nodes { for name := range t.Nodes {
@ -219,7 +219,7 @@ func (t Tree) NodeNames() []string {
} }
// formatTree returns a text representation of the tree t. // formatTree returns a text representation of the tree t.
func formatTree(t Tree, indent string) (s string) { func formatTree(t tree, indent string) (s string) {
for name, node := range t.Nodes { for name, node := range t.Nodes {
s += fmt.Sprintf("%v/%v, root %q, path %q, meta %q\n", indent, name, node.Root, node.Path, node.FileInfoPath) s += fmt.Sprintf("%v/%v, root %q, path %q, meta %q\n", indent, name, node.Root, node.Path, node.FileInfoPath)
s += formatTree(node, indent+" ") s += formatTree(node, indent+" ")
@ -228,7 +228,7 @@ func formatTree(t Tree, indent string) (s string) {
} }
// unrollTree unrolls the tree so that only leaf nodes have Path set. // unrollTree unrolls the tree so that only leaf nodes have Path set.
func unrollTree(f fs.FS, t *Tree) error { func unrollTree(f fs.FS, t *tree) error {
// if the current tree is a leaf node (Path is set) and has additional // if the current tree is a leaf node (Path is set) and has additional
// nodes, add the contents of Path to the nodes. // nodes, add the contents of Path to the nodes.
if t.Path != "" && len(t.Nodes) > 0 { if t.Path != "" && len(t.Nodes) > 0 {
@ -252,7 +252,7 @@ func unrollTree(f fs.FS, t *Tree) error {
return errors.Errorf("tree unrollTree: collision on path, node %#v, path %q", node, f.Join(t.Path, entry)) return errors.Errorf("tree unrollTree: collision on path, node %#v, path %q", node, f.Join(t.Path, entry))
} }
t.Nodes[entry] = Tree{Path: f.Join(t.Path, entry)} t.Nodes[entry] = tree{Path: f.Join(t.Path, entry)}
} }
t.Path = "" t.Path = ""
} }
@ -269,10 +269,10 @@ func unrollTree(f fs.FS, t *Tree) error {
return nil return nil
} }
// NewTree creates a Tree from the target files/directories. // newTree creates a Tree from the target files/directories.
func NewTree(fs fs.FS, targets []string) (*Tree, error) { func newTree(fs fs.FS, targets []string) (*tree, error) {
debug.Log("targets: %v", targets) debug.Log("targets: %v", targets)
tree := &Tree{} tree := &tree{}
seen := make(map[string]struct{}) seen := make(map[string]struct{})
for _, target := range targets { for _, target := range targets {
target = fs.Clean(target) target = fs.Clean(target)

View file

@ -9,20 +9,20 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
// TreeSaver concurrently saves incoming trees to the repo. // treeSaver concurrently saves incoming trees to the repo.
type TreeSaver struct { type treeSaver struct {
saveBlob SaveBlobFn saveBlob saveBlobFn
errFn ErrorFunc errFn ErrorFunc
ch chan<- saveTreeJob ch chan<- saveTreeJob
} }
// NewTreeSaver returns a new tree saver. A worker pool with treeWorkers is // newTreeSaver returns a new tree saver. A worker pool with treeWorkers is
// started, it is stopped when ctx is cancelled. // started, it is stopped when ctx is cancelled.
func NewTreeSaver(ctx context.Context, wg *errgroup.Group, treeWorkers uint, saveBlob SaveBlobFn, errFn ErrorFunc) *TreeSaver { func newTreeSaver(ctx context.Context, wg *errgroup.Group, treeWorkers uint, saveBlob saveBlobFn, errFn ErrorFunc) *treeSaver {
ch := make(chan saveTreeJob) ch := make(chan saveTreeJob)
s := &TreeSaver{ s := &treeSaver{
ch: ch, ch: ch,
saveBlob: saveBlob, saveBlob: saveBlob,
errFn: errFn, errFn: errFn,
@ -37,12 +37,12 @@ func NewTreeSaver(ctx context.Context, wg *errgroup.Group, treeWorkers uint, sav
return s return s
} }
func (s *TreeSaver) TriggerShutdown() { func (s *treeSaver) TriggerShutdown() {
close(s.ch) close(s.ch)
} }
// Save stores the dir d and returns the data once it has been completed. // Save stores the dir d and returns the data once it has been completed.
func (s *TreeSaver) Save(ctx context.Context, snPath string, target string, node *restic.Node, nodes []FutureNode, complete CompleteFunc) FutureNode { func (s *treeSaver) Save(ctx context.Context, snPath string, target string, node *restic.Node, nodes []futureNode, complete fileCompleteFunc) futureNode {
fn, ch := newFutureNode() fn, ch := newFutureNode()
job := saveTreeJob{ job := saveTreeJob{
snPath: snPath, snPath: snPath,
@ -66,13 +66,13 @@ type saveTreeJob struct {
snPath string snPath string
target string target string
node *restic.Node node *restic.Node
nodes []FutureNode nodes []futureNode
ch chan<- futureNodeResult ch chan<- futureNodeResult
complete CompleteFunc complete fileCompleteFunc
} }
// save stores the nodes as a tree in the repo. // save stores the nodes as a tree in the repo.
func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, ItemStats, error) { func (s *treeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, ItemStats, error) {
var stats ItemStats var stats ItemStats
node := job.node node := job.node
nodes := job.nodes nodes := job.nodes
@ -84,7 +84,7 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I
for i, fn := range nodes { for i, fn := range nodes {
// fn is a copy, so clear the original value explicitly // fn is a copy, so clear the original value explicitly
nodes[i] = FutureNode{} nodes[i] = futureNode{}
fnr := fn.take(ctx) fnr := fn.take(ctx)
// return the error if it wasn't ignored // return the error if it wasn't ignored
@ -128,9 +128,9 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I
return nil, stats, err return nil, stats, err
} }
b := &Buffer{Data: buf} b := &buffer{Data: buf}
ch := make(chan SaveBlobResponse, 1) ch := make(chan saveBlobResponse, 1)
s.saveBlob(ctx, restic.TreeBlob, b, job.target, func(res SaveBlobResponse) { s.saveBlob(ctx, restic.TreeBlob, b, job.target, func(res saveBlobResponse) {
ch <- res ch <- res
}) })
@ -149,7 +149,7 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I
} }
} }
func (s *TreeSaver) worker(ctx context.Context, jobs <-chan saveTreeJob) error { func (s *treeSaver) worker(ctx context.Context, jobs <-chan saveTreeJob) error {
for { for {
var job saveTreeJob var job saveTreeJob
var ok bool var ok bool

View file

@ -12,8 +12,8 @@ import (
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *Buffer, _ string, cb func(res SaveBlobResponse)) { func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *buffer, _ string, cb func(res saveBlobResponse)) {
cb(SaveBlobResponse{ cb(saveBlobResponse{
id: restic.NewRandomID(), id: restic.NewRandomID(),
known: false, known: false,
length: len(buf.Data), length: len(buf.Data),
@ -21,7 +21,7 @@ func treeSaveHelper(_ context.Context, _ restic.BlobType, buf *Buffer, _ string,
}) })
} }
func setupTreeSaver() (context.Context, context.CancelFunc, *TreeSaver, func() error) { func setupTreeSaver() (context.Context, context.CancelFunc, *treeSaver, func() error) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
wg, ctx := errgroup.WithContext(ctx) wg, ctx := errgroup.WithContext(ctx)
@ -29,7 +29,7 @@ func setupTreeSaver() (context.Context, context.CancelFunc, *TreeSaver, func() e
return err return err
} }
b := NewTreeSaver(ctx, wg, uint(runtime.NumCPU()), treeSaveHelper, errFn) b := newTreeSaver(ctx, wg, uint(runtime.NumCPU()), treeSaveHelper, errFn)
shutdown := func() error { shutdown := func() error {
b.TriggerShutdown() b.TriggerShutdown()
@ -43,7 +43,7 @@ func TestTreeSaver(t *testing.T) {
ctx, cancel, b, shutdown := setupTreeSaver() ctx, cancel, b, shutdown := setupTreeSaver()
defer cancel() defer cancel()
var results []FutureNode var results []futureNode
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
node := &restic.Node{ node := &restic.Node{
@ -83,13 +83,13 @@ func TestTreeSaverError(t *testing.T) {
ctx, cancel, b, shutdown := setupTreeSaver() ctx, cancel, b, shutdown := setupTreeSaver()
defer cancel() defer cancel()
var results []FutureNode var results []futureNode
for i := 0; i < test.trees; i++ { for i := 0; i < test.trees; i++ {
node := &restic.Node{ node := &restic.Node{
Name: fmt.Sprintf("file-%d", i), Name: fmt.Sprintf("file-%d", i),
} }
nodes := []FutureNode{ nodes := []futureNode{
newFutureNodeWithResult(futureNodeResult{node: &restic.Node{ newFutureNodeWithResult(futureNodeResult{node: &restic.Node{
Name: fmt.Sprintf("child-%d", i), Name: fmt.Sprintf("child-%d", i),
}}), }}),
@ -128,7 +128,7 @@ func TestTreeSaverDuplicates(t *testing.T) {
node := &restic.Node{ node := &restic.Node{
Name: "file", Name: "file",
} }
nodes := []FutureNode{ nodes := []futureNode{
newFutureNodeWithResult(futureNodeResult{node: &restic.Node{ newFutureNodeWithResult(futureNodeResult{node: &restic.Node{
Name: "child", Name: "child",
}}), }}),

View file

@ -12,7 +12,7 @@ import (
) )
// debug.Log requires Tree.String. // debug.Log requires Tree.String.
var _ fmt.Stringer = Tree{} var _ fmt.Stringer = tree{}
func TestPathComponents(t *testing.T) { func TestPathComponents(t *testing.T) {
var tests = []struct { var tests = []struct {
@ -142,20 +142,20 @@ func TestTree(t *testing.T) {
var tests = []struct { var tests = []struct {
targets []string targets []string
src TestDir src TestDir
want Tree want tree
unix bool unix bool
win bool win bool
mustError bool mustError bool
}{ }{
{ {
targets: []string{"foo"}, targets: []string{"foo"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": {Path: "foo", Root: "."}, "foo": {Path: "foo", Root: "."},
}}, }},
}, },
{ {
targets: []string{"foo", "bar", "baz"}, targets: []string{"foo", "bar", "baz"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": {Path: "foo", Root: "."}, "foo": {Path: "foo", Root: "."},
"bar": {Path: "bar", Root: "."}, "bar": {Path: "bar", Root: "."},
"baz": {Path: "baz", Root: "."}, "baz": {Path: "baz", Root: "."},
@ -163,8 +163,8 @@ func TestTree(t *testing.T) {
}, },
{ {
targets: []string{"foo/user1", "foo/user2", "foo/other"}, targets: []string{"foo/user1", "foo/user2", "foo/other"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/user1")}, "user1": {Path: filepath.FromSlash("foo/user1")},
"user2": {Path: filepath.FromSlash("foo/user2")}, "user2": {Path: filepath.FromSlash("foo/user2")},
"other": {Path: filepath.FromSlash("foo/other")}, "other": {Path: filepath.FromSlash("foo/other")},
@ -173,9 +173,9 @@ func TestTree(t *testing.T) {
}, },
{ {
targets: []string{"foo/work/user1", "foo/work/user2"}, targets: []string{"foo/work/user1", "foo/work/user2"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/work/user1")}, "user1": {Path: filepath.FromSlash("foo/work/user1")},
"user2": {Path: filepath.FromSlash("foo/work/user2")}, "user2": {Path: filepath.FromSlash("foo/work/user2")},
}}, }},
@ -184,50 +184,50 @@ func TestTree(t *testing.T) {
}, },
{ {
targets: []string{"foo/user1", "bar/user1", "foo/other"}, targets: []string{"foo/user1", "bar/user1", "foo/other"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/user1")}, "user1": {Path: filepath.FromSlash("foo/user1")},
"other": {Path: filepath.FromSlash("foo/other")}, "other": {Path: filepath.FromSlash("foo/other")},
}}, }},
"bar": {Root: ".", FileInfoPath: "bar", Nodes: map[string]Tree{ "bar": {Root: ".", FileInfoPath: "bar", Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("bar/user1")}, "user1": {Path: filepath.FromSlash("bar/user1")},
}}, }},
}}, }},
}, },
{ {
targets: []string{"../work"}, targets: []string{"../work"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"work": {Root: "..", Path: filepath.FromSlash("../work")}, "work": {Root: "..", Path: filepath.FromSlash("../work")},
}}, }},
}, },
{ {
targets: []string{"../work/other"}, targets: []string{"../work/other"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{ "work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]tree{
"other": {Path: filepath.FromSlash("../work/other")}, "other": {Path: filepath.FromSlash("../work/other")},
}}, }},
}}, }},
}, },
{ {
targets: []string{"foo/user1", "../work/other", "foo/user2"}, targets: []string{"foo/user1", "../work/other", "foo/user2"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/user1")}, "user1": {Path: filepath.FromSlash("foo/user1")},
"user2": {Path: filepath.FromSlash("foo/user2")}, "user2": {Path: filepath.FromSlash("foo/user2")},
}}, }},
"work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]Tree{ "work": {Root: "..", FileInfoPath: filepath.FromSlash("../work"), Nodes: map[string]tree{
"other": {Path: filepath.FromSlash("../work/other")}, "other": {Path: filepath.FromSlash("../work/other")},
}}, }},
}}, }},
}, },
{ {
targets: []string{"foo/user1", "../foo/other", "foo/user2"}, targets: []string{"foo/user1", "../foo/other", "foo/user2"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/user1")}, "user1": {Path: filepath.FromSlash("foo/user1")},
"user2": {Path: filepath.FromSlash("foo/user2")}, "user2": {Path: filepath.FromSlash("foo/user2")},
}}, }},
"foo-1": {Root: "..", FileInfoPath: filepath.FromSlash("../foo"), Nodes: map[string]Tree{ "foo-1": {Root: "..", FileInfoPath: filepath.FromSlash("../foo"), Nodes: map[string]tree{
"other": {Path: filepath.FromSlash("../foo/other")}, "other": {Path: filepath.FromSlash("../foo/other")},
}}, }},
}}, }},
@ -240,11 +240,11 @@ func TestTree(t *testing.T) {
}, },
}, },
targets: []string{"foo", "foo/work"}, targets: []string{"foo", "foo/work"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": { "foo": {
Root: ".", Root: ".",
FileInfoPath: "foo", FileInfoPath: "foo",
Nodes: map[string]Tree{ Nodes: map[string]tree{
"file": {Path: filepath.FromSlash("foo/file")}, "file": {Path: filepath.FromSlash("foo/file")},
"work": {Path: filepath.FromSlash("foo/work")}, "work": {Path: filepath.FromSlash("foo/work")},
}, },
@ -261,11 +261,11 @@ func TestTree(t *testing.T) {
}, },
}, },
targets: []string{"foo/work", "foo"}, targets: []string{"foo/work", "foo"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": { "foo": {
Root: ".", Root: ".",
FileInfoPath: "foo", FileInfoPath: "foo",
Nodes: map[string]Tree{ Nodes: map[string]tree{
"file": {Path: filepath.FromSlash("foo/file")}, "file": {Path: filepath.FromSlash("foo/file")},
"work": {Path: filepath.FromSlash("foo/work")}, "work": {Path: filepath.FromSlash("foo/work")},
}, },
@ -282,11 +282,11 @@ func TestTree(t *testing.T) {
}, },
}, },
targets: []string{"foo/work", "foo/work/user2"}, targets: []string{"foo/work", "foo/work/user2"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"work": { "work": {
FileInfoPath: filepath.FromSlash("foo/work"), FileInfoPath: filepath.FromSlash("foo/work"),
Nodes: map[string]Tree{ Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/work/user1")}, "user1": {Path: filepath.FromSlash("foo/work/user1")},
"user2": {Path: filepath.FromSlash("foo/work/user2")}, "user2": {Path: filepath.FromSlash("foo/work/user2")},
}, },
@ -304,10 +304,10 @@ func TestTree(t *testing.T) {
}, },
}, },
targets: []string{"foo/work/user2", "foo/work"}, targets: []string{"foo/work/user2", "foo/work"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"work": {FileInfoPath: filepath.FromSlash("foo/work"), "work": {FileInfoPath: filepath.FromSlash("foo/work"),
Nodes: map[string]Tree{ Nodes: map[string]tree{
"user1": {Path: filepath.FromSlash("foo/work/user1")}, "user1": {Path: filepath.FromSlash("foo/work/user1")},
"user2": {Path: filepath.FromSlash("foo/work/user2")}, "user2": {Path: filepath.FromSlash("foo/work/user2")},
}, },
@ -332,12 +332,12 @@ func TestTree(t *testing.T) {
}, },
}, },
targets: []string{"foo/work/user2/data/secret", "foo"}, targets: []string{"foo/work/user2/data/secret", "foo"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"other": {Path: filepath.FromSlash("foo/other")}, "other": {Path: filepath.FromSlash("foo/other")},
"work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{
"user2": {FileInfoPath: filepath.FromSlash("foo/work/user2"), Nodes: map[string]Tree{ "user2": {FileInfoPath: filepath.FromSlash("foo/work/user2"), Nodes: map[string]tree{
"data": {FileInfoPath: filepath.FromSlash("foo/work/user2/data"), Nodes: map[string]Tree{ "data": {FileInfoPath: filepath.FromSlash("foo/work/user2/data"), Nodes: map[string]tree{
"secret": { "secret": {
Path: filepath.FromSlash("foo/work/user2/data/secret"), Path: filepath.FromSlash("foo/work/user2/data/secret"),
}, },
@ -368,10 +368,10 @@ func TestTree(t *testing.T) {
}, },
unix: true, unix: true,
targets: []string{"mnt/driveA", "mnt/driveA/work/driveB"}, targets: []string{"mnt/driveA", "mnt/driveA/work/driveB"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"mnt": {Root: ".", FileInfoPath: filepath.FromSlash("mnt"), Nodes: map[string]Tree{ "mnt": {Root: ".", FileInfoPath: filepath.FromSlash("mnt"), Nodes: map[string]tree{
"driveA": {FileInfoPath: filepath.FromSlash("mnt/driveA"), Nodes: map[string]Tree{ "driveA": {FileInfoPath: filepath.FromSlash("mnt/driveA"), Nodes: map[string]tree{
"work": {FileInfoPath: filepath.FromSlash("mnt/driveA/work"), Nodes: map[string]Tree{ "work": {FileInfoPath: filepath.FromSlash("mnt/driveA/work"), Nodes: map[string]tree{
"driveB": { "driveB": {
Path: filepath.FromSlash("mnt/driveA/work/driveB"), Path: filepath.FromSlash("mnt/driveA/work/driveB"),
}, },
@ -384,9 +384,9 @@ func TestTree(t *testing.T) {
}, },
{ {
targets: []string{"foo/work/user", "foo/work/user"}, targets: []string{"foo/work/user", "foo/work/user"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{
"user": {Path: filepath.FromSlash("foo/work/user")}, "user": {Path: filepath.FromSlash("foo/work/user")},
}}, }},
}}, }},
@ -394,9 +394,9 @@ func TestTree(t *testing.T) {
}, },
{ {
targets: []string{"./foo/work/user", "foo/work/user"}, targets: []string{"./foo/work/user", "foo/work/user"},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]Tree{ "foo": {Root: ".", FileInfoPath: "foo", Nodes: map[string]tree{
"work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]Tree{ "work": {FileInfoPath: filepath.FromSlash("foo/work"), Nodes: map[string]tree{
"user": {Path: filepath.FromSlash("foo/work/user")}, "user": {Path: filepath.FromSlash("foo/work/user")},
}}, }},
}}, }},
@ -405,10 +405,10 @@ func TestTree(t *testing.T) {
{ {
win: true, win: true,
targets: []string{`c:\users\foobar\temp`}, targets: []string{`c:\users\foobar\temp`},
want: Tree{Nodes: map[string]Tree{ want: tree{Nodes: map[string]tree{
"c": {Root: `c:\`, FileInfoPath: `c:\`, Nodes: map[string]Tree{ "c": {Root: `c:\`, FileInfoPath: `c:\`, Nodes: map[string]tree{
"users": {FileInfoPath: `c:\users`, Nodes: map[string]Tree{ "users": {FileInfoPath: `c:\users`, Nodes: map[string]tree{
"foobar": {FileInfoPath: `c:\users\foobar`, Nodes: map[string]Tree{ "foobar": {FileInfoPath: `c:\users\foobar`, Nodes: map[string]tree{
"temp": {Path: `c:\users\foobar\temp`}, "temp": {Path: `c:\users\foobar\temp`},
}}, }},
}}, }},
@ -445,7 +445,7 @@ func TestTree(t *testing.T) {
back := rtest.Chdir(t, tempdir) back := rtest.Chdir(t, tempdir)
defer back() defer back()
tree, err := NewTree(fs.Local{}, test.targets) tree, err := newTree(fs.Local{}, test.targets)
if test.mustError { if test.mustError {
if err == nil { if err == nil {
t.Fatal("expected error, got nil") t.Fatal("expected error, got nil")

162
internal/filter/exclude.go Normal file
View file

@ -0,0 +1,162 @@
package filter
import (
"bufio"
"bytes"
"fmt"
"os"
"strings"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/textfile"
"github.com/spf13/pflag"
)
// RejectByNameFunc is a function that takes a filename of a
// file that would be included in the backup. The function returns true if it
// should be excluded (rejected) from the backup.
type RejectByNameFunc func(path string) bool
// RejectByPattern returns a RejectByNameFunc which rejects files that match
// one of the patterns.
func RejectByPattern(patterns []string, warnf func(msg string, args ...interface{})) RejectByNameFunc {
parsedPatterns := ParsePatterns(patterns)
return func(item string) bool {
matched, err := List(parsedPatterns, item)
if err != nil {
warnf("error for exclude pattern: %v", err)
}
if matched {
debug.Log("path %q excluded by an exclude pattern", item)
return true
}
return false
}
}
// RejectByInsensitivePattern is like RejectByPattern but case insensitive.
func RejectByInsensitivePattern(patterns []string, warnf func(msg string, args ...interface{})) RejectByNameFunc {
for index, path := range patterns {
patterns[index] = strings.ToLower(path)
}
rejFunc := RejectByPattern(patterns, warnf)
return func(item string) bool {
return rejFunc(strings.ToLower(item))
}
}
// readPatternsFromFiles reads all files and returns the list of
// patterns. For each line, leading and trailing white space is removed
// and comment lines are ignored. For each remaining pattern, environment
// variables are resolved. For adding a literal dollar sign ($), write $$ to
// the file.
func readPatternsFromFiles(files []string) ([]string, error) {
getenvOrDollar := func(s string) string {
if s == "$" {
return "$"
}
return os.Getenv(s)
}
var patterns []string
for _, filename := range files {
err := func() (err error) {
data, err := textfile.Read(filename)
if err != nil {
return err
}
scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
// ignore empty lines
if line == "" {
continue
}
// strip comments
if strings.HasPrefix(line, "#") {
continue
}
line = os.Expand(line, getenvOrDollar)
patterns = append(patterns, line)
}
return scanner.Err()
}()
if err != nil {
return nil, fmt.Errorf("failed to read patterns from file %q: %w", filename, err)
}
}
return patterns, nil
}
type ExcludePatternOptions struct {
Excludes []string
InsensitiveExcludes []string
ExcludeFiles []string
InsensitiveExcludeFiles []string
}
func (opts *ExcludePatternOptions) Add(f *pflag.FlagSet) {
f.StringArrayVarP(&opts.Excludes, "exclude", "e", nil, "exclude a `pattern` (can be specified multiple times)")
f.StringArrayVar(&opts.InsensitiveExcludes, "iexclude", nil, "same as --exclude `pattern` but ignores the casing of filenames")
f.StringArrayVar(&opts.ExcludeFiles, "exclude-file", nil, "read exclude patterns from a `file` (can be specified multiple times)")
f.StringArrayVar(&opts.InsensitiveExcludeFiles, "iexclude-file", nil, "same as --exclude-file but ignores casing of `file`names in patterns")
}
func (opts *ExcludePatternOptions) Empty() bool {
return len(opts.Excludes) == 0 && len(opts.InsensitiveExcludes) == 0 && len(opts.ExcludeFiles) == 0 && len(opts.InsensitiveExcludeFiles) == 0
}
func (opts ExcludePatternOptions) CollectPatterns(warnf func(msg string, args ...interface{})) ([]RejectByNameFunc, error) {
var fs []RejectByNameFunc
// add patterns from file
if len(opts.ExcludeFiles) > 0 {
excludePatterns, err := readPatternsFromFiles(opts.ExcludeFiles)
if err != nil {
return nil, err
}
if err := ValidatePatterns(excludePatterns); err != nil {
return nil, errors.Fatalf("--exclude-file: %s", err)
}
opts.Excludes = append(opts.Excludes, excludePatterns...)
}
if len(opts.InsensitiveExcludeFiles) > 0 {
excludes, err := readPatternsFromFiles(opts.InsensitiveExcludeFiles)
if err != nil {
return nil, err
}
if err := ValidatePatterns(excludes); err != nil {
return nil, errors.Fatalf("--iexclude-file: %s", err)
}
opts.InsensitiveExcludes = append(opts.InsensitiveExcludes, excludes...)
}
if len(opts.InsensitiveExcludes) > 0 {
if err := ValidatePatterns(opts.InsensitiveExcludes); err != nil {
return nil, errors.Fatalf("--iexclude: %s", err)
}
fs = append(fs, RejectByInsensitivePattern(opts.InsensitiveExcludes, warnf))
}
if len(opts.Excludes) > 0 {
if err := ValidatePatterns(opts.Excludes); err != nil {
return nil, errors.Fatalf("--exclude: %s", err)
}
fs = append(fs, RejectByPattern(opts.Excludes, warnf))
}
return fs, nil
}

View file

@ -0,0 +1,59 @@
package filter
import (
"testing"
)
func TestRejectByPattern(t *testing.T) {
var tests = []struct {
filename string
reject bool
}{
{filename: "/home/user/foo.go", reject: true},
{filename: "/home/user/foo.c", reject: false},
{filename: "/home/user/foobar", reject: false},
{filename: "/home/user/foobar/x", reject: true},
{filename: "/home/user/README", reject: false},
{filename: "/home/user/README.md", reject: true},
}
patterns := []string{"*.go", "README.md", "/home/user/foobar/*"}
for _, tc := range tests {
t.Run("", func(t *testing.T) {
reject := RejectByPattern(patterns, nil)
res := reject(tc.filename)
if res != tc.reject {
t.Fatalf("wrong result for filename %v: want %v, got %v",
tc.filename, tc.reject, res)
}
})
}
}
func TestRejectByInsensitivePattern(t *testing.T) {
var tests = []struct {
filename string
reject bool
}{
{filename: "/home/user/foo.GO", reject: true},
{filename: "/home/user/foo.c", reject: false},
{filename: "/home/user/foobar", reject: false},
{filename: "/home/user/FOObar/x", reject: true},
{filename: "/home/user/README", reject: false},
{filename: "/home/user/readme.md", reject: true},
}
patterns := []string{"*.go", "README.md", "/home/user/foobar/*"}
for _, tc := range tests {
t.Run("", func(t *testing.T) {
reject := RejectByInsensitivePattern(patterns, nil)
res := reject(tc.filename)
if res != tc.reject {
t.Fatalf("wrong result for filename %v: want %v, got %v",
tc.filename, tc.reject, res)
}
})
}
}

View file

@ -1,10 +1,9 @@
package main package filter
import ( import (
"strings" "strings"
"github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/filter"
"github.com/spf13/pflag" "github.com/spf13/pflag"
) )
@ -12,21 +11,21 @@ import (
// in the restore process and returns whether it should be included. // in the restore process and returns whether it should be included.
type IncludeByNameFunc func(item string) (matched bool, childMayMatch bool) type IncludeByNameFunc func(item string) (matched bool, childMayMatch bool)
type includePatternOptions struct { type IncludePatternOptions struct {
Includes []string Includes []string
InsensitiveIncludes []string InsensitiveIncludes []string
IncludeFiles []string IncludeFiles []string
InsensitiveIncludeFiles []string InsensitiveIncludeFiles []string
} }
func initIncludePatternOptions(f *pflag.FlagSet, opts *includePatternOptions) { func (opts *IncludePatternOptions) Add(f *pflag.FlagSet) {
f.StringArrayVarP(&opts.Includes, "include", "i", nil, "include a `pattern` (can be specified multiple times)") f.StringArrayVarP(&opts.Includes, "include", "i", nil, "include a `pattern` (can be specified multiple times)")
f.StringArrayVar(&opts.InsensitiveIncludes, "iinclude", nil, "same as --include `pattern` but ignores the casing of filenames") f.StringArrayVar(&opts.InsensitiveIncludes, "iinclude", nil, "same as --include `pattern` but ignores the casing of filenames")
f.StringArrayVar(&opts.IncludeFiles, "include-file", nil, "read include patterns from a `file` (can be specified multiple times)") f.StringArrayVar(&opts.IncludeFiles, "include-file", nil, "read include patterns from a `file` (can be specified multiple times)")
f.StringArrayVar(&opts.InsensitiveIncludeFiles, "iinclude-file", nil, "same as --include-file but ignores casing of `file`names in patterns") f.StringArrayVar(&opts.InsensitiveIncludeFiles, "iinclude-file", nil, "same as --include-file but ignores casing of `file`names in patterns")
} }
func (opts includePatternOptions) CollectPatterns() ([]IncludeByNameFunc, error) { func (opts IncludePatternOptions) CollectPatterns(warnf func(msg string, args ...interface{})) ([]IncludeByNameFunc, error) {
var fs []IncludeByNameFunc var fs []IncludeByNameFunc
if len(opts.IncludeFiles) > 0 { if len(opts.IncludeFiles) > 0 {
includePatterns, err := readPatternsFromFiles(opts.IncludeFiles) includePatterns, err := readPatternsFromFiles(opts.IncludeFiles)
@ -34,7 +33,7 @@ func (opts includePatternOptions) CollectPatterns() ([]IncludeByNameFunc, error)
return nil, err return nil, err
} }
if err := filter.ValidatePatterns(includePatterns); err != nil { if err := ValidatePatterns(includePatterns); err != nil {
return nil, errors.Fatalf("--include-file: %s", err) return nil, errors.Fatalf("--include-file: %s", err)
} }
@ -47,7 +46,7 @@ func (opts includePatternOptions) CollectPatterns() ([]IncludeByNameFunc, error)
return nil, err return nil, err
} }
if err := filter.ValidatePatterns(includePatterns); err != nil { if err := ValidatePatterns(includePatterns); err != nil {
return nil, errors.Fatalf("--iinclude-file: %s", err) return nil, errors.Fatalf("--iinclude-file: %s", err)
} }
@ -55,45 +54,45 @@ func (opts includePatternOptions) CollectPatterns() ([]IncludeByNameFunc, error)
} }
if len(opts.InsensitiveIncludes) > 0 { if len(opts.InsensitiveIncludes) > 0 {
if err := filter.ValidatePatterns(opts.InsensitiveIncludes); err != nil { if err := ValidatePatterns(opts.InsensitiveIncludes); err != nil {
return nil, errors.Fatalf("--iinclude: %s", err) return nil, errors.Fatalf("--iinclude: %s", err)
} }
fs = append(fs, includeByInsensitivePattern(opts.InsensitiveIncludes)) fs = append(fs, IncludeByInsensitivePattern(opts.InsensitiveIncludes, warnf))
} }
if len(opts.Includes) > 0 { if len(opts.Includes) > 0 {
if err := filter.ValidatePatterns(opts.Includes); err != nil { if err := ValidatePatterns(opts.Includes); err != nil {
return nil, errors.Fatalf("--include: %s", err) return nil, errors.Fatalf("--include: %s", err)
} }
fs = append(fs, includeByPattern(opts.Includes)) fs = append(fs, IncludeByPattern(opts.Includes, warnf))
} }
return fs, nil return fs, nil
} }
// includeByPattern returns a IncludeByNameFunc which includes files that match // IncludeByPattern returns a IncludeByNameFunc which includes files that match
// one of the patterns. // one of the patterns.
func includeByPattern(patterns []string) IncludeByNameFunc { func IncludeByPattern(patterns []string, warnf func(msg string, args ...interface{})) IncludeByNameFunc {
parsedPatterns := filter.ParsePatterns(patterns) parsedPatterns := ParsePatterns(patterns)
return func(item string) (matched bool, childMayMatch bool) { return func(item string) (matched bool, childMayMatch bool) {
matched, childMayMatch, err := filter.ListWithChild(parsedPatterns, item) matched, childMayMatch, err := ListWithChild(parsedPatterns, item)
if err != nil { if err != nil {
Warnf("error for include pattern: %v", err) warnf("error for include pattern: %v", err)
} }
return matched, childMayMatch return matched, childMayMatch
} }
} }
// includeByInsensitivePattern returns a IncludeByNameFunc which includes files that match // IncludeByInsensitivePattern returns a IncludeByNameFunc which includes files that match
// one of the patterns, ignoring the casing of the filenames. // one of the patterns, ignoring the casing of the filenames.
func includeByInsensitivePattern(patterns []string) IncludeByNameFunc { func IncludeByInsensitivePattern(patterns []string, warnf func(msg string, args ...interface{})) IncludeByNameFunc {
for index, path := range patterns { for index, path := range patterns {
patterns[index] = strings.ToLower(path) patterns[index] = strings.ToLower(path)
} }
includeFunc := includeByPattern(patterns) includeFunc := IncludeByPattern(patterns, warnf)
return func(item string) (matched bool, childMayMatch bool) { return func(item string) (matched bool, childMayMatch bool) {
return includeFunc(strings.ToLower(item)) return includeFunc(strings.ToLower(item))
} }

View file

@ -1,4 +1,4 @@
package main package filter
import ( import (
"testing" "testing"
@ -21,7 +21,7 @@ func TestIncludeByPattern(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.filename, func(t *testing.T) { t.Run(tc.filename, func(t *testing.T) {
includeFunc := includeByPattern(patterns) includeFunc := IncludeByPattern(patterns, nil)
matched, _ := includeFunc(tc.filename) matched, _ := includeFunc(tc.filename)
if matched != tc.include { if matched != tc.include {
t.Fatalf("wrong result for filename %v: want %v, got %v", t.Fatalf("wrong result for filename %v: want %v, got %v",
@ -48,7 +48,7 @@ func TestIncludeByInsensitivePattern(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.filename, func(t *testing.T) { t.Run(tc.filename, func(t *testing.T) {
includeFunc := includeByInsensitivePattern(patterns) includeFunc := IncludeByInsensitivePattern(patterns, nil)
matched, _ := includeFunc(tc.filename) matched, _ := includeFunc(tc.filename)
if matched != tc.include { if matched != tc.include {
t.Fatalf("wrong result for filename %v: want %v, got %v", t.Fatalf("wrong result for filename %v: want %v, got %v",

View file

@ -10,9 +10,9 @@ import (
"github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/errors"
) )
// DeviceID extracts the device ID from an os.FileInfo object by casting it // deviceID extracts the device ID from an os.FileInfo object by casting it
// to syscall.Stat_t // to syscall.Stat_t
func DeviceID(fi os.FileInfo) (deviceID uint64, err error) { func deviceID(fi os.FileInfo) (deviceID uint64, err error) {
if fi == nil { if fi == nil {
return 0, errors.New("unable to determine device: fi is nil") return 0, errors.New("unable to determine device: fi is nil")
} }

View file

@ -9,8 +9,8 @@ import (
"github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/errors"
) )
// DeviceID extracts the device ID from an os.FileInfo object by casting it // deviceID extracts the device ID from an os.FileInfo object by casting it
// to syscall.Stat_t // to syscall.Stat_t
func DeviceID(fi os.FileInfo) (deviceID uint64, err error) { func deviceID(_ os.FileInfo) (deviceID uint64, err error) {
return 0, errors.New("Device IDs are not supported on Windows") return 0, errors.New("Device IDs are not supported on Windows")
} }

View file

@ -46,6 +46,17 @@ func (fs Local) Lstat(name string) (os.FileInfo, error) {
return os.Lstat(fixpath(name)) return os.Lstat(fixpath(name))
} }
// DeviceID extracts the DeviceID from the given FileInfo. If the fs does
// not support a DeviceID, it returns an error instead
func (fs Local) DeviceID(fi os.FileInfo) (id uint64, err error) {
return deviceID(fi)
}
// ExtendedStat converts the give FileInfo into ExtendedFileInfo.
func (fs Local) ExtendedStat(fi os.FileInfo) ExtendedFileInfo {
return ExtendedStat(fi)
}
// Join joins any number of path elements into a single path, adding a // Join joins any number of path elements into a single path, adding a
// Separator if necessary. Join calls Clean on the result; in particular, all // Separator if necessary. Join calls Clean on the result; in particular, all
// empty strings are ignored. On Windows, the result is a UNC path if and only // empty strings are ignored. On Windows, the result is a UNC path if and only

View file

@ -122,6 +122,16 @@ func (fs *Reader) Lstat(name string) (os.FileInfo, error) {
return nil, pathError("lstat", name, os.ErrNotExist) return nil, pathError("lstat", name, os.ErrNotExist)
} }
func (fs *Reader) DeviceID(_ os.FileInfo) (deviceID uint64, err error) {
return 0, errors.New("Device IDs are not supported")
}
func (fs *Reader) ExtendedStat(fi os.FileInfo) ExtendedFileInfo {
return ExtendedFileInfo{
FileInfo: fi,
}
}
// Join joins any number of path elements into a single path, adding a // Join joins any number of path elements into a single path, adding a
// Separator if necessary. Join calls Clean on the result; in particular, all // Separator if necessary. Join calls Clean on the result; in particular, all
// empty strings are ignored. On Windows, the result is a UNC path if and only // empty strings are ignored. On Windows, the result is a UNC path if and only

View file

@ -10,6 +10,8 @@ type FS interface {
OpenFile(name string, flag int, perm os.FileMode) (File, error) OpenFile(name string, flag int, perm os.FileMode) (File, error)
Stat(name string) (os.FileInfo, error) Stat(name string) (os.FileInfo, error)
Lstat(name string) (os.FileInfo, error) Lstat(name string) (os.FileInfo, error)
DeviceID(fi os.FileInfo) (deviceID uint64, err error)
ExtendedStat(fi os.FileInfo) ExtendedFileInfo
Join(elem ...string) string Join(elem ...string) string
Separator() string Separator() string