forked from TrueCloudLab/rclone
b4216648e4
Before this change, bisync could only detect changes based on modtime, and would refuse to run if either path lacked modtime support. This made bisync unavailable for many of rclone's backends. Additionally, bisync did not account for the Fs's precision when comparing modtimes, meaning that they could only be reliably compared within the same side -- not against the opposite side. Size and checksum (even when available) were ignored completely for deltas. After this change, bisync now fully supports comparing based on any combination of size, modtime, and checksum, lifting the prior restriction on backends without modtime support. The comparison logic considers the backend's precision, hash types, and other features as appropriate. The comparison features optionally use a new --compare flag (which takes any combination of size,modtime,checksum) and even supports some combinations not otherwise supported in `sync` (like comparing all three at the same time.) By default (without the --compare flag), bisync inherits the same comparison options as `sync` (that is: size and modtime by default, unless modified with flags such as --checksum or --size-only.) If the --compare flag is set, it will override these defaults. If --compare includes checksum and both remotes support checksums but have no hash types in common with each other, checksums will be considered only for comparisons within the same side (to determine what has changed since the prior sync), but not for comparisons against the opposite side. If one side supports checksums and the other does not, checksums will only be considered on the side that supports them. When comparing with checksum and/or size without modtime, bisync cannot determine whether a file is newer or older -- only whether it is changed or unchanged. (If it is changed on both sides, bisync still does the standard equality-check to avoid declaring a sync conflict unless it absolutely has to.) Also included are some new flags to customize the checksum comparison behavior on backends where hashes are slow or unavailable. --no-slow-hash and --slow-hash-sync-only allow selectively ignoring checksums on backends such as local where they are slow. --download-hash allows computing them by downloading when (and only when) they're otherwise not available. Of course, this option probably won't be practical with large files, but may be a good option for syncing small-but-important files with maximum accuracy (for example, a source code repo on a crypt remote.) An additional advantage over methods like cryptcheck is that the original file is not required for comparison (for example, --download-hash can be used to bisync two different crypt remotes with different passwords.) Additionally, all of the above are now considered during the final --check-sync for much-improved accuracy (before this change, it only compared filenames!) Many other details are explained in the included docs.
259 lines
9.3 KiB
Go
259 lines
9.3 KiB
Go
// Package bisync implements bisync
|
|
// Copyright (c) 2017-2020 Chris Nelson
|
|
package bisync
|
|
|
|
import (
|
|
"context"
|
|
"crypto/md5"
|
|
"encoding/hex"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/rclone/rclone/cmd"
|
|
"github.com/rclone/rclone/cmd/bisync/bilib"
|
|
"github.com/rclone/rclone/fs"
|
|
"github.com/rclone/rclone/fs/config"
|
|
"github.com/rclone/rclone/fs/config/flags"
|
|
"github.com/rclone/rclone/fs/filter"
|
|
"github.com/rclone/rclone/fs/hash"
|
|
|
|
"github.com/spf13/cobra"
|
|
)
|
|
|
|
// TestFunc allows mocking errors during tests
|
|
type TestFunc func()
|
|
|
|
// Options keep bisync options
|
|
type Options struct {
|
|
Resync bool
|
|
CheckAccess bool
|
|
CheckFilename string
|
|
CheckSync CheckSyncMode
|
|
CreateEmptySrcDirs bool
|
|
RemoveEmptyDirs bool
|
|
MaxDelete int // percentage from 0 to 100
|
|
Force bool
|
|
FiltersFile string
|
|
Workdir string
|
|
OrigBackupDir string
|
|
BackupDir1 string
|
|
BackupDir2 string
|
|
DryRun bool
|
|
NoCleanup bool
|
|
SaveQueues bool // save extra debugging files (test only flag)
|
|
IgnoreListingChecksum bool
|
|
Resilient bool
|
|
TestFn TestFunc // test-only option, for mocking errors
|
|
Retries int
|
|
Compare CompareOpt
|
|
CompareFlag string
|
|
}
|
|
|
|
// Default values
|
|
const (
|
|
DefaultMaxDelete int = 50
|
|
DefaultCheckFilename string = "RCLONE_TEST"
|
|
)
|
|
|
|
// DefaultWorkdir is default working directory
|
|
var DefaultWorkdir = filepath.Join(config.GetCacheDir(), "bisync")
|
|
|
|
// CheckSyncMode controls when to compare final listings
|
|
type CheckSyncMode int
|
|
|
|
// CheckSync modes
|
|
const (
|
|
CheckSyncTrue CheckSyncMode = iota // Compare final listings (default)
|
|
CheckSyncFalse // Disable comparison of final listings
|
|
CheckSyncOnly // Only compare listings from the last run, do not sync
|
|
)
|
|
|
|
func (x CheckSyncMode) String() string {
|
|
switch x {
|
|
case CheckSyncTrue:
|
|
return "true"
|
|
case CheckSyncFalse:
|
|
return "false"
|
|
case CheckSyncOnly:
|
|
return "only"
|
|
}
|
|
return "unknown"
|
|
}
|
|
|
|
// Set a CheckSync mode from a string
|
|
func (x *CheckSyncMode) Set(s string) error {
|
|
switch strings.ToLower(s) {
|
|
case "true":
|
|
*x = CheckSyncTrue
|
|
case "false":
|
|
*x = CheckSyncFalse
|
|
case "only":
|
|
*x = CheckSyncOnly
|
|
default:
|
|
return fmt.Errorf("unknown check-sync mode for bisync: %q", s)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// Type of the CheckSync value
|
|
func (x *CheckSyncMode) Type() string {
|
|
return "string"
|
|
}
|
|
|
|
// Opt keeps command line options
|
|
var Opt Options
|
|
|
|
func init() {
|
|
Opt.Retries = 3
|
|
cmd.Root.AddCommand(commandDefinition)
|
|
cmdFlags := commandDefinition.Flags()
|
|
// when adding new flags, remember to also update the rc params:
|
|
// cmd/bisync/rc.go cmd/bisync/help.go (not docs/content/rc.md)
|
|
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.", "")
|
|
flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."), "")
|
|
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "")
|
|
flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose", "")
|
|
flags.FVarP(cmdFlags, &Opt.CheckSync, "check-sync", "", "Controls comparison of final listings: true|false|only (default: true)", "")
|
|
flags.BoolVarP(cmdFlags, &Opt.CreateEmptySrcDirs, "create-empty-src-dirs", "", Opt.CreateEmptySrcDirs, "Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)", "")
|
|
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove ALL empty directories at the final cleanup step.", "")
|
|
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "")
|
|
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "")
|
|
flags.StringVarP(cmdFlags, &Opt.BackupDir1, "backup-dir1", "", Opt.BackupDir1, "--backup-dir for Path1. Must be a non-overlapping path on the same remote.", "")
|
|
flags.StringVarP(cmdFlags, &Opt.BackupDir2, "backup-dir2", "", Opt.BackupDir2, "--backup-dir for Path2. Must be a non-overlapping path on the same remote.", "")
|
|
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
|
|
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
|
|
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
|
|
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
|
|
flags.IntVarP(cmdFlags, &Opt.Retries, "retries", "", Opt.Retries, "Retry operations this many times if they fail", "")
|
|
flags.StringVarP(cmdFlags, &Opt.CompareFlag, "compare", "", Opt.CompareFlag, "Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')", "")
|
|
flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "")
|
|
flags.BoolVarP(cmdFlags, &Opt.Compare.SlowHashSyncOnly, "slow-hash-sync-only", "", Opt.Compare.SlowHashSyncOnly, "Ignore slow checksums for listings and deltas, but still consider them during sync calls.", "")
|
|
flags.BoolVarP(cmdFlags, &Opt.Compare.DownloadHash, "download-hash", "", Opt.Compare.DownloadHash, "Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)", "")
|
|
}
|
|
|
|
// bisync command definition
|
|
var commandDefinition = &cobra.Command{
|
|
Use: "bisync remote1:path1 remote2:path2",
|
|
Short: shortHelp,
|
|
Long: longHelp,
|
|
Annotations: map[string]string{
|
|
"versionIntroduced": "v1.58",
|
|
"groups": "Filter,Copy,Important",
|
|
"status": "Beta",
|
|
},
|
|
RunE: func(command *cobra.Command, args []string) error {
|
|
cmd.CheckArgs(2, 2, command, args)
|
|
fs1, file1, fs2, file2 := cmd.NewFsSrcDstFiles(args)
|
|
if file1 != "" || file2 != "" {
|
|
return errors.New("paths must be existing directories")
|
|
}
|
|
|
|
ctx := context.Background()
|
|
opt := Opt
|
|
opt.applyContext(ctx)
|
|
if tzLocal {
|
|
TZ = time.Local
|
|
}
|
|
|
|
commonHashes := fs1.Hashes().Overlap(fs2.Hashes())
|
|
isDropbox1 := strings.HasPrefix(fs1.String(), "Dropbox")
|
|
isDropbox2 := strings.HasPrefix(fs2.String(), "Dropbox")
|
|
if commonHashes == hash.Set(0) && (isDropbox1 || isDropbox2) {
|
|
ci := fs.GetConfig(ctx)
|
|
if !ci.DryRun && !ci.RefreshTimes {
|
|
fs.Debugf(nil, "Using flag --refresh-times is recommended")
|
|
}
|
|
}
|
|
|
|
fs.Logf(nil, "bisync is IN BETA. Don't use in production!")
|
|
cmd.Run(false, true, command, func() error {
|
|
err := Bisync(ctx, fs1, fs2, &opt)
|
|
if err == ErrBisyncAborted {
|
|
os.Exit(2)
|
|
}
|
|
return err
|
|
})
|
|
return nil
|
|
},
|
|
}
|
|
|
|
func (opt *Options) applyContext(ctx context.Context) {
|
|
maxDelete := DefaultMaxDelete
|
|
ci := fs.GetConfig(ctx)
|
|
if ci.MaxDelete >= 0 {
|
|
maxDelete = int(ci.MaxDelete)
|
|
}
|
|
if maxDelete < 0 {
|
|
maxDelete = 0
|
|
}
|
|
if maxDelete > 100 {
|
|
maxDelete = 100
|
|
}
|
|
opt.MaxDelete = maxDelete
|
|
// reset MaxDelete for fs/operations, bisync handles this parameter specially
|
|
ci.MaxDelete = -1
|
|
opt.DryRun = ci.DryRun
|
|
}
|
|
|
|
func (opt *Options) setDryRun(ctx context.Context) context.Context {
|
|
ctxNew, ci := fs.AddConfig(ctx)
|
|
ci.DryRun = opt.DryRun
|
|
return ctxNew
|
|
}
|
|
|
|
func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
|
|
filtersFile := opt.FiltersFile
|
|
if filtersFile == "" {
|
|
return ctx, nil
|
|
}
|
|
|
|
f, err := os.Open(filtersFile)
|
|
if err != nil {
|
|
return ctx, fmt.Errorf("specified filters file does not exist: %s", filtersFile)
|
|
}
|
|
|
|
fs.Infof(nil, "Using filters file %s", filtersFile)
|
|
hasher := md5.New()
|
|
if _, err := io.Copy(hasher, f); err != nil {
|
|
_ = f.Close()
|
|
return ctx, err
|
|
}
|
|
gotHash := hex.EncodeToString(hasher.Sum(nil))
|
|
_ = f.Close()
|
|
|
|
hashFile := filtersFile + ".md5"
|
|
wantHash, err := os.ReadFile(hashFile)
|
|
if err != nil && !opt.Resync {
|
|
return ctx, fmt.Errorf("filters file md5 hash not found (must run --resync): %s", filtersFile)
|
|
}
|
|
|
|
if gotHash != string(wantHash) && !opt.Resync {
|
|
return ctx, fmt.Errorf("filters file has changed (must run --resync): %s", filtersFile)
|
|
}
|
|
|
|
if opt.Resync {
|
|
if opt.DryRun {
|
|
fs.Infof(nil, "Skipped storing filters file hash to %s as --dry-run is set", hashFile)
|
|
} else {
|
|
fs.Infof(nil, "Storing filters file hash to %s", hashFile)
|
|
if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
|
|
return ctx, err
|
|
}
|
|
}
|
|
}
|
|
|
|
// Prepend our filter file first in the list
|
|
filterOpt := filter.GetConfig(ctx).Opt
|
|
filterOpt.FilterFrom = append([]string{filtersFile}, filterOpt.FilterFrom...)
|
|
newFilter, err := filter.NewFilter(&filterOpt)
|
|
if err != nil {
|
|
return ctx, fmt.Errorf("invalid filters file: %s: %w", filtersFile, err)
|
|
}
|
|
|
|
return filter.ReplaceConfig(ctx, newFilter), nil
|
|
}
|