From c0968a0987e15027513ff6fe1728c9e61245a154 Mon Sep 17 00:00:00 2001 From: nielash Date: Sun, 1 Oct 2023 05:02:56 -0400 Subject: [PATCH] operations: add logger to log list of sync results -- fixes #7282 Logger instruments the Sync routine with a status report for each file pair, making it possible to output a list of the synced files, along with their attributes and sigil categorization (match/differ/missing/etc.) It is very customizable by passing in a custom LoggerFn, options, and io.Writers to be written to. Possible uses include: - allow sync to write path lists to a file, in the same format as rclone check - allow sync to output a --dest-after file using the same format flags as lsf - receive results as JSON when calling sync from an internal function - predict the post-sync state of the destination For usage examples, see bisync.WriteResults() or sync.SyncLoggerFn() --- fs/operations/check.go | 4 +- fs/operations/logger.go | 369 ++++++++++++++++++++++++++++++++++++ fs/operations/operations.go | 52 ++++- fs/sync/sync.go | 52 ++++- fs/sync/sync_test.go | 323 +++++++++++++++++++++++++++++++ go.mod | 1 + go.sum | 1 + 7 files changed, 791 insertions(+), 11 deletions(-) create mode 100644 fs/operations/logger.go diff --git a/fs/operations/check.go b/fs/operations/check.go index 239b3a72f..328be2128 100644 --- a/fs/operations/check.go +++ b/fs/operations/check.go @@ -67,10 +67,10 @@ func (c *checkMarch) report(o fs.DirEntry, out io.Writer, sigil rune) { func (c *checkMarch) reportFilename(filename string, out io.Writer, sigil rune) { if out != nil { - syncFprintf(out, "%s\n", filename) + SyncFprintf(out, "%s\n", filename) } if c.opt.Combined != nil { - syncFprintf(c.opt.Combined, "%c %s\n", sigil, filename) + SyncFprintf(c.opt.Combined, "%c %s\n", sigil, filename) } } diff --git a/fs/operations/logger.go b/fs/operations/logger.go new file mode 100644 index 000000000..9d0e213b4 --- /dev/null +++ b/fs/operations/logger.go @@ -0,0 +1,369 @@ +package operations + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config/flags" + "github.com/rclone/rclone/fs/hash" + "github.com/spf13/pflag" +) + +// Sigil represents the rune (-+=*!?) used by Logger to categorize files by their match/differ/missing status. +type Sigil rune + +// String converts sigil to more human-readable string +func (sigil Sigil) String() string { + switch sigil { + case '-': + return "MissingOnSrc" + case '+': + return "MissingOnDst" + case '=': + return "Match" + case '*': + return "Differ" + case '!': + return "Error" + // case '.': + // return "Completed" + case '?': + return "Other" + } + return "unknown" +} + +// Writer directs traffic from sigil -> LoggerOpt.Writer +func (sigil Sigil) Writer(opt LoggerOpt) io.Writer { + switch sigil { + case '-': + return opt.MissingOnSrc + case '+': + return opt.MissingOnDst + case '=': + return opt.Match + case '*': + return opt.Differ + case '!': + return opt.Error + } + return nil +} + +// Sigil constants +const ( + MissingOnSrc Sigil = '-' + MissingOnDst Sigil = '+' + Match Sigil = '=' + Differ Sigil = '*' + TransferError Sigil = '!' + Other Sigil = '?' // reserved but not currently used +) + +// LoggerFn uses fs.DirEntry instead of fs.Object so it can include Dirs +// For LoggerFn example, see bisync.WriteResults() or sync.SyncLoggerFn() +// Usage example: s.logger(ctx, operations.Differ, src, dst, nil) +type LoggerFn func(ctx context.Context, sigil Sigil, src, dst fs.DirEntry, err error) +type loggerContextKey struct{} +type loggerOptContextKey struct{} + +var loggerKey = loggerContextKey{} +var loggerOptKey = loggerOptContextKey{} + +// LoggerOpt contains options for the Sync Logger functions +// TODO: refactor Check in here too? +type LoggerOpt struct { + // Fdst, Fsrc fs.Fs // fses to check + // Check checkFn // function to use for checking + // OneWay bool // one way only? + LoggerFn LoggerFn // function to use for logging + Combined io.Writer // a file with file names with leading sigils + MissingOnSrc io.Writer // files only in the destination + MissingOnDst io.Writer // files only in the source + Match io.Writer // matching files + Differ io.Writer // differing files + Error io.Writer // files with errors of some kind + DestAfter io.Writer // files that exist on the destination post-sync + JSON *bytes.Buffer // used by bisync to read/write struct as JSON + DeleteModeOff bool //affects whether Logger expects MissingOnSrc to be deleted + + // lsf options for destAfter + ListFormat ListFormat + JSONOpt ListJSONOpt + LJ *listJSON + Format string + TimeFormat string + Separator string + DirSlash bool + // Recurse bool + HashType hash.Type + FilesOnly bool + DirsOnly bool + Csv bool + Absolute bool +} + +// AddLoggerFlags adds the logger flags to the cmdFlags command +func AddLoggerFlags(cmdFlags *pflag.FlagSet, opt *LoggerOpt, combined, missingOnSrc, missingOnDst, match, differ, errFile, destAfter *string) { + flags.StringVarP(cmdFlags, combined, "combined", "", *combined, "Make a combined report of changes to this file", "Sync") + flags.StringVarP(cmdFlags, missingOnSrc, "missing-on-src", "", *missingOnSrc, "Report all files missing from the source to this file", "Sync") + flags.StringVarP(cmdFlags, missingOnDst, "missing-on-dst", "", *missingOnDst, "Report all files missing from the destination to this file", "Sync") + flags.StringVarP(cmdFlags, match, "match", "", *match, "Report all matching files to this file", "Sync") + flags.StringVarP(cmdFlags, differ, "differ", "", *differ, "Report all non-matching files to this file", "Sync") + flags.StringVarP(cmdFlags, errFile, "error", "", *errFile, "Report all files with errors (hashing or reading) to this file", "Sync") + flags.StringVarP(cmdFlags, destAfter, "dest-after", "", *destAfter, "Report all files that exist on the dest post-sync", "Sync") + + // lsf flags for destAfter + flags.StringVarP(cmdFlags, &opt.Format, "format", "F", "p", "Output format - see lsf help for details", "Sync") + flags.StringVarP(cmdFlags, &opt.Separator, "separator", "s", ";", "Separator for the items in the format", "Sync") + flags.BoolVarP(cmdFlags, &opt.DirSlash, "dir-slash", "d", true, "Append a slash to directory names", "Sync") + flags.FVarP(cmdFlags, &opt.HashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash", "Sync") + flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", true, "Only list files", "Sync") + flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Only list directories", "Sync") + flags.BoolVarP(cmdFlags, &opt.Csv, "csv", "", false, "Output in CSV format", "Sync") + flags.BoolVarP(cmdFlags, &opt.Absolute, "absolute", "", false, "Put a leading / in front of path names", "Sync") + // flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing", "") +} + +// WithLogger stores logger in ctx and returns a copy of ctx in which loggerKey = logger +func WithLogger(ctx context.Context, logger LoggerFn) context.Context { + return context.WithValue(ctx, loggerKey, logger) +} + +// WithLoggerOpt stores loggerOpt in ctx and returns a copy of ctx in which loggerOptKey = loggerOpt +func WithLoggerOpt(ctx context.Context, loggerOpt LoggerOpt) context.Context { + return context.WithValue(ctx, loggerOptKey, loggerOpt) +} + +// GetLogger attempts to retrieve LoggerFn from context, returns it if found, otherwise returns no-op function +func GetLogger(ctx context.Context) (LoggerFn, bool) { + logger, ok := ctx.Value(loggerKey).(LoggerFn) + if !ok { + logger = func(ctx context.Context, sigil Sigil, src, dst fs.DirEntry, err error) {} + } + return logger, ok +} + +// GetLoggerOpt attempts to retrieve LoggerOpt from context, returns it if found, otherwise returns NewLoggerOpt() +func GetLoggerOpt(ctx context.Context) LoggerOpt { + loggerOpt, ok := ctx.Value(loggerOptKey).(LoggerOpt) + if ok { + return loggerOpt + } + return NewLoggerOpt() +} + +// WithSyncLogger starts a new logger with the options passed in and saves it to ctx for retrieval later +func WithSyncLogger(ctx context.Context, opt LoggerOpt) context.Context { + ctx = WithLoggerOpt(ctx, opt) + return WithLogger(ctx, func(ctx context.Context, sigil Sigil, src, dst fs.DirEntry, err error) { + if opt.LoggerFn != nil { + opt.LoggerFn(ctx, sigil, src, dst, err) + } else { + SyncFprintf(opt.Combined, "%c %s\n", sigil, dst.Remote()) + } + }) +} + +// NewLoggerOpt returns a new LoggerOpt struct with defaults +func NewLoggerOpt() LoggerOpt { + opt := LoggerOpt{ + Combined: new(bytes.Buffer), + MissingOnSrc: new(bytes.Buffer), + MissingOnDst: new(bytes.Buffer), + Match: new(bytes.Buffer), + Differ: new(bytes.Buffer), + Error: new(bytes.Buffer), + DestAfter: new(bytes.Buffer), + JSON: new(bytes.Buffer), + } + return opt +} + +// Winner predicts which side (src or dst) should end up winning out on the dst. +type Winner struct { + Obj fs.DirEntry // the object that should exist on dst post-sync, if any + Side string // whether the winning object was from the src or dst + Err error // whether there's an error preventing us from predicting winner correctly (not whether there was a sync error more generally) +} + +// WinningSide can be called in a LoggerFn to predict what the dest will look like post-sync +// +// This attempts to account for every case in which dst (intentionally) does not match src after a sync. +// +// Known issues / cases we can't confidently predict yet: +// +// --max-duration / CutoffModeHard +// --compare-dest / --copy-dest (because equal() is called multiple times for the same file) +// server-side moves of an entire dir at once (because we never get the individual file objects in the dir) +// High-level retries, because there would be dupes (use --retries 1 to disable) +// Possibly some error scenarios +func WinningSide(ctx context.Context, sigil Sigil, src, dst fs.DirEntry, err error) Winner { + winner := Winner{nil, "none", nil} + opt := GetLoggerOpt(ctx) + ci := fs.GetConfig(ctx) + + if err == fs.ErrorIsDir { + winner.Err = err + if sigil == MissingOnSrc { + if (opt.DeleteModeOff || ci.DryRun) && dst != nil { + winner.Obj = dst + winner.Side = "dst" // whatever's on dst will remain so after DryRun + return winner + } + return winner // none, because dst should just get deleted + } + if sigil == MissingOnDst && ci.DryRun { + return winner // none, because it does not currently exist on dst, and will still not exist after DryRun + } else if ci.DryRun && dst != nil { + winner.Obj = dst + winner.Side = "dst" + } else if src != nil { + winner.Obj = src + winner.Side = "src" + } + return winner + } + + _, srcOk := src.(fs.Object) + _, dstOk := dst.(fs.Object) + if !srcOk && !dstOk { + return winner // none, because we don't have enough info to continue. + } + + switch sigil { + case MissingOnSrc: + if opt.DeleteModeOff || ci.DryRun { // i.e. it's a copy, not sync (or it's a DryRun) + winner.Obj = dst + winner.Side = "dst" // whatever's on dst will remain so after DryRun + return winner + } + return winner // none, because dst should just get deleted + case Match, Differ, MissingOnDst: + if sigil == MissingOnDst && ci.DryRun { + return winner // none, because it does not currently exist on dst, and will still not exist after DryRun + } + winner.Obj = src + winner.Side = "src" // presume dst will end up matching src unless changed below + if sigil == Match && (ci.SizeOnly || ci.CheckSum || ci.IgnoreSize || ci.UpdateOlder || ci.NoUpdateModTime) { + winner.Obj = dst + winner.Side = "dst" // ignore any differences with src because of user flags + } + if ci.IgnoreTimes { + winner.Obj = src + winner.Side = "src" // copy src to dst unconditionally + } + if (sigil == Match || sigil == Differ) && (ci.IgnoreExisting || ci.Immutable) { + winner.Obj = dst + winner.Side = "dst" // dst should remain unchanged if it already exists (and we know it does because it's Match or Differ) + } + if ci.DryRun { + winner.Obj = dst + winner.Side = "dst" // dst should remain unchanged after DryRun (note that we handled MissingOnDst earlier) + } + return winner + case TransferError: + winner.Obj = dst + winner.Side = "dst" // usually, dst should not change if there's an error + if dst == nil { + winner.Obj = src + winner.Side = "src" // but if for some reason we have a src and not a dst, go with it + } + if winner.Obj != nil { + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, errors.New("max transfer duration reached as set by --max-duration")) { + winner.Err = err // we can't confidently predict what survives if CutoffModeHard + } + return winner // we know at least one of the objects + } + } + // should only make it this far if it's TransferError and both src and dst are nil + winner.Side = "none" + winner.Err = fmt.Errorf("unknown case -- can't determine winner. %v", err) + fs.Debugf(winner.Obj, "%v", winner.Err) + return winner +} + +// SetListFormat sets opt.ListFormat for destAfter +// TODO: possibly refactor duplicate code from cmd/lsf, where this is mostly copied from +func (opt *LoggerOpt) SetListFormat(ctx context.Context, cmdFlags *pflag.FlagSet) { + // Work out if the separatorFlag was supplied or not + separatorFlag := cmdFlags.Lookup("separator") + separatorFlagSupplied := separatorFlag != nil && separatorFlag.Changed + // Default the separator to , if using CSV + if opt.Csv && !separatorFlagSupplied { + opt.Separator = "," + } + + var list ListFormat + list.SetSeparator(opt.Separator) + list.SetCSV(opt.Csv) + list.SetDirSlash(opt.DirSlash) + list.SetAbsolute(opt.Absolute) + var JSONOpt = ListJSONOpt{ + NoModTime: true, + NoMimeType: true, + DirsOnly: opt.DirsOnly, + FilesOnly: opt.FilesOnly, + // Recurse: opt.Recurse, + } + + for _, char := range opt.Format { + switch char { + case 'p': + list.AddPath() + case 't': + list.AddModTime(opt.TimeFormat) + JSONOpt.NoModTime = false + case 's': + list.AddSize() + case 'h': + list.AddHash(opt.HashType) + JSONOpt.ShowHash = true + JSONOpt.HashTypes = []string{opt.HashType.String()} + case 'i': + list.AddID() + case 'm': + list.AddMimeType() + JSONOpt.NoMimeType = false + case 'e': + list.AddEncrypted() + JSONOpt.ShowEncrypted = true + case 'o': + list.AddOrigID() + JSONOpt.ShowOrigIDs = true + case 'T': + list.AddTier() + case 'M': + list.AddMetadata() + JSONOpt.Metadata = true + default: + fs.Errorf(nil, "unknown format character %q", char) + } + } + opt.ListFormat = list + opt.JSONOpt = JSONOpt +} + +// NewListJSON makes a new *listJSON for destAfter +func (opt *LoggerOpt) NewListJSON(ctx context.Context, fdst fs.Fs, remote string) { + opt.LJ, _ = newListJSON(ctx, fdst, remote, &opt.JSONOpt) + fs.Debugf(nil, "%v", opt.LJ) +} + +// JSONEntry returns a *ListJSONItem for destAfter +func (opt *LoggerOpt) JSONEntry(ctx context.Context, entry fs.DirEntry) (*ListJSONItem, error) { + return opt.LJ.entry(ctx, entry) +} + +// PrintDestAfter writes a *ListJSONItem to opt.DestAfter +func (opt *LoggerOpt) PrintDestAfter(ctx context.Context, sigil Sigil, src, dst fs.DirEntry, err error) { + entry := WinningSide(ctx, sigil, src, dst, err) + if entry.Obj != nil { + JSONEntry, _ := opt.JSONEntry(ctx, entry.Obj) + _, _ = fmt.Fprintln(opt.DestAfter, opt.ListFormat.Format(JSONEntry)) + } +} diff --git a/fs/operations/operations.go b/fs/operations/operations.go index d426c673c..efa3d4bab 100644 --- a/fs/operations/operations.go +++ b/fs/operations/operations.go @@ -180,12 +180,15 @@ func logModTimeUpload(dst fs.Object) { func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) bool { ci := fs.GetConfig(ctx) + logger, _ := GetLogger(ctx) if sizeDiffers(ctx, src, dst) { fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size()) + logger(ctx, Differ, src, dst, nil) return false } if opt.sizeOnly { fs.Debugf(src, "Sizes identical") + logger(ctx, Match, src, dst, nil) return true } @@ -197,6 +200,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) same, ht, _ := CheckHashes(ctx, src, dst) if !same { fs.Debugf(src, "%v differ", ht) + logger(ctx, Differ, src, dst, nil) return false } if ht == hash.None { @@ -210,6 +214,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) } else { fs.Debugf(src, "Size and %v of src and dst objects identical", ht) } + logger(ctx, Match, src, dst, nil) return true } @@ -219,12 +224,14 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) modifyWindow := fs.GetModifyWindow(ctx, src.Fs(), dst.Fs()) if modifyWindow == fs.ModTimeNotSupported { fs.Debugf(src, "Sizes identical") + logger(ctx, Match, src, dst, nil) return true } dstModTime := dst.ModTime(ctx) dt := dstModTime.Sub(srcModTime) if dt < modifyWindow && dt > -modifyWindow { fs.Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow) + logger(ctx, Match, src, dst, nil) return true } @@ -235,10 +242,12 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) same, ht, _ := CheckHashes(ctx, src, dst) if !same { fs.Debugf(src, "%v differ", ht) + logger(ctx, Differ, src, dst, nil) return false } if ht == hash.None && !ci.RefreshTimes { // if couldn't check hash, return that they differ + logger(ctx, Differ, src, dst, nil) return false } @@ -249,6 +258,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) // Error if objects are treated as immutable if ci.Immutable { fs.Errorf(dst, "Timestamp mismatch between immutable objects") + logger(ctx, Differ, src, dst, nil) return false } // Update the mtime of the dst object here @@ -256,6 +266,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) if errors.Is(err, fs.ErrorCantSetModTime) { logModTimeUpload(dst) fs.Infof(dst, "src and dst identical but can't set mod time without re-uploading") + logger(ctx, Differ, src, dst, nil) return false } else if errors.Is(err, fs.ErrorCantSetModTimeWithoutDelete) { logModTimeUpload(dst) @@ -268,6 +279,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) fs.Errorf(dst, "failed to delete before re-upload: %v", err) } } + logger(ctx, Differ, src, dst, nil) return false } else if err != nil { err = fs.CountError(err) @@ -277,6 +289,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) } } } + logger(ctx, Match, src, dst, nil) return true } @@ -513,6 +526,8 @@ func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, b err := DeleteFileWithBackupDir(ctx, dst, backupDir) if err != nil { errorCount.Add(1) + logger, _ := GetLogger(ctx) + logger(ctx, TransferError, nil, dst, err) if fserrors.IsFatalError(err) { fs.Errorf(dst, "Got fatal error on delete: %s", err) fatalErrorCount.Add(1) @@ -670,12 +685,12 @@ var SyncPrintf = func(format string, a ...interface{}) { fmt.Printf(format, a...) } -// Synchronized fmt.Fprintf +// SyncFprintf - Synchronized fmt.Fprintf // // Ignores errors from Fprintf. // // Prints to stdout if w is nil -func syncFprintf(w io.Writer, format string, a ...interface{}) { +func SyncFprintf(w io.Writer, format string, a ...interface{}) { if w == nil || w == os.Stdout { SyncPrintf(format, a...) } else { @@ -747,7 +762,7 @@ func CountStringField(count int64, humanReadable bool, rawWidth int) string { func List(ctx context.Context, f fs.Fs, w io.Writer) error { ci := fs.GetConfig(ctx) return ListFn(ctx, f, func(o fs.Object) { - syncFprintf(w, "%s %s\n", SizeStringField(o.Size(), ci.HumanReadable, 9), o.Remote()) + SyncFprintf(w, "%s %s\n", SizeStringField(o.Size(), ci.HumanReadable, 9), o.Remote()) }) } @@ -764,7 +779,7 @@ func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error { tr.Done(ctx, nil) }() modTime := o.ModTime(ctx) - syncFprintf(w, "%s %s %s\n", SizeStringField(o.Size(), ci.HumanReadable, 9), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote()) + SyncFprintf(w, "%s %s %s\n", SizeStringField(o.Size(), ci.HumanReadable, 9), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote()) }) } @@ -864,7 +879,7 @@ func HashLister(ctx context.Context, ht hash.Type, outputBase64 bool, downloadFl fs.Errorf(o, "%v", fs.CountError(err)) return } - syncFprintf(w, "%*s %s\n", width, sum, o.Remote()) + SyncFprintf(w, "%*s %s\n", width, sum, o.Remote()) }() }) wg.Wait() @@ -889,7 +904,7 @@ func HashSumStream(ht hash.Type, outputBase64 bool, in io.ReadCloser, w io.Write return fmt.Errorf("hasher returned an error: %w", err) } width := hash.Width(ht, outputBase64) - syncFprintf(w, "%*s -\n", width, sum) + SyncFprintf(w, "%*s -\n", width, sum) return nil } @@ -925,7 +940,7 @@ func ListDir(ctx context.Context, f fs.Fs, w io.Writer) error { return walk.ListR(ctx, f, "", false, ConfigMaxDepth(ctx, false), walk.ListDirs, func(entries fs.DirEntries) error { entries.ForDir(func(dir fs.Directory) { if dir != nil { - syncFprintf(w, "%s %13s %s %s\n", SizeStringField(dir.Size(), ci.HumanReadable, 12), dir.ModTime(ctx).Local().Format("2006-01-02 15:04:05"), CountStringField(dir.Items(), ci.HumanReadable, 9), dir.Remote()) + SyncFprintf(w, "%s %13s %s %s\n", SizeStringField(dir.Size(), ci.HumanReadable, 12), dir.ModTime(ctx).Local().Format("2006-01-02 15:04:05"), CountStringField(dir.Items(), ci.HumanReadable, 9), dir.Remote()) } }) return nil @@ -1494,18 +1509,22 @@ func CompareOrCopyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, Comp // transferred or not. func NeedTransfer(ctx context.Context, dst, src fs.Object) bool { ci := fs.GetConfig(ctx) + logger, _ := GetLogger(ctx) if dst == nil { fs.Debugf(src, "Need to transfer - File not found at Destination") + logger(ctx, MissingOnDst, src, nil, nil) return true } // If we should ignore existing files, don't transfer if ci.IgnoreExisting { fs.Debugf(src, "Destination exists, skipping") + logger(ctx, Match, src, dst, nil) return false } // If we should upload unconditionally if ci.IgnoreTimes { fs.Debugf(src, "Transferring unconditionally as --ignore-times is in use") + logger(ctx, Differ, src, dst, nil) return true } // If UpdateOlder is in effect, skip if dst is newer than src @@ -1524,6 +1543,7 @@ func NeedTransfer(ctx context.Context, dst, src fs.Object) bool { switch { case dt >= modifyWindow: fs.Debugf(src, "Destination is newer than source, skipping") + logger(ctx, Match, src, dst, nil) return false case dt <= -modifyWindow: // force --checksum on for the check and do update modtimes by default @@ -1707,10 +1727,16 @@ func MoveBackupDir(ctx context.Context, backupDir fs.Fs, dst fs.Object) (err err // moveOrCopyFile moves or copies a single file possibly to a new name func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) { ci := fs.GetConfig(ctx) + logger, usingLogger := GetLogger(ctx) dstFilePath := path.Join(fdst.Root(), dstFileName) srcFilePath := path.Join(fsrc.Root(), srcFileName) if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath { fs.Debugf(fdst, "don't need to copy/move %s, it is already at target location", dstFileName) + if usingLogger { + srcObj, _ := fsrc.NewObject(ctx, srcFileName) + dstObj, _ := fsrc.NewObject(ctx, dstFileName) + logger(ctx, Match, srcObj, dstObj, nil) + } return nil } @@ -1723,6 +1749,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str // Find src object srcObj, err := fsrc.NewObject(ctx, srcFileName) if err != nil { + logger(ctx, TransferError, srcObj, nil, err) return err } @@ -1733,6 +1760,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str if errors.Is(err, fs.ErrorObjectNotFound) { dstObj = nil } else if err != nil { + logger(ctx, TransferError, nil, dstObj, err) return err } } @@ -1744,11 +1772,13 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str if !cp && fdst.Name() == fsrc.Name() && fdst.Features().CaseInsensitive && dstFileName != srcFileName && strings.EqualFold(dstFilePath, srcFilePath) { // Create random name to temporarily move file to tmpObjName := dstFileName + "-rclone-move-" + random.String(8) - _, err := fdst.NewObject(ctx, tmpObjName) + tmpObjFail, err := fdst.NewObject(ctx, tmpObjName) if err != fs.ErrorObjectNotFound { if err == nil { + logger(ctx, TransferError, nil, tmpObjFail, err) return errors.New("found an already existing file with a randomly generated name. Try the operation again") } + logger(ctx, TransferError, nil, tmpObjFail, err) return fmt.Errorf("error while attempting to move file to a temporary location: %w", err) } tr := accounting.Stats(ctx).NewTransfer(srcObj) @@ -1757,9 +1787,11 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str }() tmpObj, err := Op(ctx, fdst, nil, tmpObjName, srcObj) if err != nil { + logger(ctx, TransferError, srcObj, tmpObj, err) return fmt.Errorf("error while moving file to temporary location: %w", err) } _, err = Op(ctx, fdst, nil, dstFileName, tmpObj) + logger(ctx, MissingOnDst, tmpObj, nil, err) return err } @@ -1797,9 +1829,11 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str if dstObj != nil && backupDir != nil { err = MoveBackupDir(ctx, backupDir, dstObj) if err != nil { + logger(ctx, TransferError, dstObj, nil, err) return fmt.Errorf("moving to --backup-dir failed: %w", err) } // If successful zero out the dstObj as it is no longer there + logger(ctx, MissingOnDst, dstObj, nil, nil) dstObj = nil } @@ -1808,8 +1842,10 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str if !cp { if ci.IgnoreExisting { fs.Debugf(srcObj, "Not removing source file as destination file exists and --ignore-existing is set") + logger(ctx, Match, srcObj, dstObj, nil) } else { err = DeleteFile(ctx, srcObj) + logger(ctx, Differ, srcObj, dstObj, nil) } } } diff --git a/fs/sync/sync.go b/fs/sync/sync.go index 18a8b67b7..9154171c5 100644 --- a/fs/sync/sync.go +++ b/fs/sync/sync.go @@ -82,6 +82,8 @@ type syncCopyMove struct { backupDir fs.Fs // place to store overwrites/deletes checkFirst bool // if set run all the checkers before starting transfers maxDurationEndTime time.Time // end time if --max-duration is set + logger operations.LoggerFn // LoggerFn used to report the results of a sync (or bisync) to an io.Writer + usingLogger bool // whether we are using logger } type trackRenamesStrategy byte @@ -135,6 +137,16 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete trackRenamesCh: make(chan fs.Object, ci.Checkers), checkFirst: ci.CheckFirst, } + + s.logger, s.usingLogger = operations.GetLogger(ctx) + + if deleteMode == fs.DeleteModeOff { + loggerOpt := operations.GetLoggerOpt(ctx) + loggerOpt.DeleteModeOff = true + loggerOpt.LoggerFn = s.logger + ctx = operations.WithLoggerOpt(ctx, loggerOpt) + } + backlog := ci.MaxBacklog if s.checkFirst { fs.Infof(s.fdst, "Running all checks before starting transfers") @@ -345,6 +357,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, pair.Dst, pair.Src, s.compareCopyDest, s.backupDir) if err != nil { s.processError(err) + s.logger(s.ctx, operations.TransferError, pair.Src, pair.Dst, err) } if NoNeedTransfer { needTransfer = false @@ -362,6 +375,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W err := operations.MoveBackupDir(s.ctx, s.backupDir, pair.Dst) if err != nil { s.processError(err) + s.logger(s.ctx, operations.TransferError, pair.Src, pair.Dst, err) } else { // If successful zero out the dst as it is no longer there and copy the file pair.Dst = nil @@ -394,7 +408,9 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W return } } else { - s.processError(operations.DeleteFile(s.ctx, src)) + deleteFileErr := operations.DeleteFile(s.ctx, src) + s.processError(deleteFileErr) + s.logger(s.ctx, operations.TransferError, pair.Src, pair.Dst, deleteFileErr) } } } @@ -446,6 +462,9 @@ func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs, _, err = operations.Copy(ctx, fdst, dst, src.Remote(), src) } s.processError(err) + if err != nil { + s.logger(ctx, operations.TransferError, src, dst, err) + } } } @@ -556,6 +575,16 @@ func (s *syncCopyMove) stopDeleters() { func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error { if accounting.Stats(s.ctx).Errored() && !s.ci.IgnoreErrors { fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting) + // log all deletes as errors + for remote, o := range s.dstFiles { + if checkSrcMap { + _, exists := s.srcFiles[remote] + if exists { + continue + } + } + s.logger(s.ctx, operations.TransferError, nil, o, fs.ErrorNotDeleting) + } return fs.ErrorNotDeleting } @@ -967,10 +996,23 @@ func (s *syncCopyMove) run() error { // DstOnly have an object which is in the destination only func (s *syncCopyMove) DstOnly(dst fs.DirEntry) (recurse bool) { if s.deleteMode == fs.DeleteModeOff { + if s.usingLogger { + switch x := dst.(type) { + case fs.Object: + s.logger(s.ctx, operations.MissingOnSrc, nil, x, nil) + case fs.Directory: + // it's a directory that we'd normally skip, because we're not deleting anything on the dest + // however, to make sure every file is logged, we need to list it, so we need to return true here. + // we skip this when not using logger. + s.logger(s.ctx, operations.MissingOnSrc, nil, dst, fs.ErrorIsDir) + return true + } + } return false } switch x := dst.(type) { case fs.Object: + s.logger(s.ctx, operations.MissingOnSrc, nil, x, nil) switch s.deleteMode { case fs.DeleteModeAfter: // record object as needs deleting @@ -992,6 +1034,7 @@ func (s *syncCopyMove) DstOnly(dst fs.DirEntry) (recurse bool) { if s.fdst.Features().CanHaveEmptyDirectories { s.dstEmptyDirsMu.Lock() s.dstEmptyDirs[dst.Remote()] = dst + s.logger(s.ctx, operations.MissingOnSrc, nil, dst, fs.ErrorIsDir) s.dstEmptyDirsMu.Unlock() } return true @@ -1009,6 +1052,7 @@ func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) { } switch x := src.(type) { case fs.Object: + s.logger(s.ctx, operations.MissingOnDst, x, nil, nil) // If it's a copy operation, // remove parent directory from srcEmptyDirs // since it's not really empty @@ -1028,6 +1072,7 @@ func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) { NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, nil, x, s.compareCopyDest, s.backupDir) if err != nil { s.processError(err) + s.logger(s.ctx, operations.TransferError, x, nil, err) } if !NoNeedTransfer { // No need to check since doesn't exist @@ -1044,6 +1089,7 @@ func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) { s.srcEmptyDirsMu.Lock() s.srcParentDirCheck(src) s.srcEmptyDirs[src.Remote()] = src + s.logger(s.ctx, operations.MissingOnDst, src, nil, fs.ErrorIsDir) s.srcEmptyDirsMu.Unlock() return true default: @@ -1065,6 +1111,7 @@ func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse } dstX, ok := dst.(fs.Object) if ok { + // No logger here because we'll handle it in equal() ok = s.toBeChecked.Put(s.inCtx, fs.ObjectPair{Src: srcX, Dst: dstX}) if !ok { return false @@ -1074,11 +1121,13 @@ func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse err := errors.New("can't overwrite directory with file") fs.Errorf(dst, "%v", err) s.processError(err) + s.logger(ctx, operations.TransferError, srcX, dstX, err) } case fs.Directory: // Do the same thing to the entire contents of the directory _, ok := dst.(fs.Directory) if ok { + s.logger(s.ctx, operations.Match, src, dst, fs.ErrorIsDir) // Only record matched (src & dst) empty dirs when performing move if s.DoMove { // Record the src directory for deletion @@ -1094,6 +1143,7 @@ func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse err := errors.New("can't overwrite file with directory") fs.Errorf(dst, "%v", err) s.processError(err) + s.logger(ctx, operations.TransferError, src.(fs.ObjectInfo), dst.(fs.ObjectInfo), err) default: panic("Bad object in DirEntries") } diff --git a/fs/sync/sync_test.go b/fs/sync/sync_test.go index f24a783ed..e757af77c 100644 --- a/fs/sync/sync_test.go +++ b/fs/sync/sync_test.go @@ -3,14 +3,20 @@ package sync import ( + "bytes" "context" "errors" "fmt" + "os" + "os/exec" "runtime" + "sort" "strings" "testing" "time" + mutex "sync" // renamed as "sync" already in use + _ "github.com/rclone/rclone/backend/all" // import all backends "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" @@ -45,7 +51,9 @@ func TestCopyWithDryRun(t *testing.T) { r.Mkdir(ctx, r.Fremote) ci.DryRun = true + ctx = predictDstFromLogger(ctx) err := CopyDir(ctx, r.Fremote, r.Flocal, false) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // error expected here because dry-run require.NoError(t, err) r.CheckLocalItems(t, file1) @@ -59,8 +67,10 @@ func TestCopy(t *testing.T) { file1 := r.WriteFile("sub dir/hello world", "hello world", t1) r.Mkdir(ctx, r.Fremote) + ctx = predictDstFromLogger(ctx) err := CopyDir(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file1) r.CheckRemoteItems(t, file1) @@ -76,8 +86,10 @@ func TestCopyMissingDirectory(t *testing.T) { t.Fatal(err) } + ctx = predictDstFromLogger(ctx) err = CopyDir(ctx, r.Fremote, nonExistingFs, false) require.Error(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) } // Now with --no-traverse @@ -90,8 +102,10 @@ func TestCopyNoTraverse(t *testing.T) { file1 := r.WriteFile("sub dir/hello world", "hello world", t1) + ctx = predictDstFromLogger(ctx) err := CopyDir(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file1) r.CheckRemoteItems(t, file1) @@ -107,8 +121,10 @@ func TestCopyCheckFirst(t *testing.T) { file1 := r.WriteFile("sub dir/hello world", "hello world", t1) + ctx = predictDstFromLogger(ctx) err := CopyDir(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file1) r.CheckRemoteItems(t, file1) @@ -125,8 +141,10 @@ func TestSyncNoTraverse(t *testing.T) { file1 := r.WriteFile("sub dir/hello world", "hello world", t1) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file1) r.CheckRemoteItems(t, file1) @@ -143,8 +161,10 @@ func TestCopyWithDepth(t *testing.T) { // Check the MaxDepth too ci.MaxDepth = 1 + ctx = predictDstFromLogger(ctx) err := CopyDir(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file1, file2) r.CheckRemoteItems(t, file2) @@ -169,8 +189,10 @@ func testCopyWithFilesFrom(t *testing.T, noTraverse bool) { ci.NoTraverse = noTraverse + ctx = predictDstFromLogger(ctx) err = CopyDir(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file1, file2) r.CheckRemoteItems(t, file1) @@ -187,8 +209,10 @@ func TestCopyEmptyDirectories(t *testing.T) { require.NoError(t, err) r.Mkdir(ctx, r.Fremote) + ctx = predictDstFromLogger(ctx) err = CopyDir(ctx, r.Fremote, r.Flocal, true) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckRemoteListing( t, @@ -211,8 +235,10 @@ func TestMoveEmptyDirectories(t *testing.T) { require.NoError(t, err) r.Mkdir(ctx, r.Fremote) + ctx = predictDstFromLogger(ctx) err = MoveDir(ctx, r.Fremote, r.Flocal, false, true) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckRemoteListing( t, @@ -235,8 +261,10 @@ func TestSyncEmptyDirectories(t *testing.T) { require.NoError(t, err) r.Mkdir(ctx, r.Fremote) + ctx = predictDstFromLogger(ctx) err = Sync(ctx, r.Fremote, r.Flocal, true) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckRemoteListing( t, @@ -262,8 +290,10 @@ func TestServerSideCopy(t *testing.T) { defer finaliseCopy() t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy) + ctx = predictDstFromLogger(ctx) err = CopyDir(ctx, FremoteCopy, r.Fremote, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) fstest.CheckItems(t, FremoteCopy, file1) } @@ -280,8 +310,10 @@ func TestCopyAfterDelete(t *testing.T) { err := operations.Mkdir(ctx, r.Flocal, "") require.NoError(t, err) + ctx = predictDstFromLogger(ctx) err = CopyDir(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t) r.CheckRemoteItems(t, file1) @@ -294,8 +326,10 @@ func TestCopyRedownload(t *testing.T) { file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1) r.CheckRemoteItems(t, file1) + ctx = predictDstFromLogger(ctx) err := CopyDir(ctx, r.Flocal, r.Fremote, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // Test with combined precision of local and remote as we copied it there and back r.CheckLocalListing(t, []fstest.Item{file1}, nil) @@ -314,8 +348,10 @@ func TestSyncBasedOnCheckSum(t *testing.T) { r.CheckLocalItems(t, file1) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // We should have transferred exactly one file. assert.Equal(t, toyFileTransfers(r), accounting.GlobalStats().GetTransfers()) @@ -326,8 +362,10 @@ func TestSyncBasedOnCheckSum(t *testing.T) { r.CheckLocalItems(t, file2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // We should have transferred no files assert.Equal(t, int64(0), accounting.GlobalStats().GetTransfers()) @@ -348,8 +386,10 @@ func TestSyncSizeOnly(t *testing.T) { r.CheckLocalItems(t, file1) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // We should have transferred exactly one file. assert.Equal(t, toyFileTransfers(r), accounting.GlobalStats().GetTransfers()) @@ -360,8 +400,10 @@ func TestSyncSizeOnly(t *testing.T) { r.CheckLocalItems(t, file2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // We should have transferred no files assert.Equal(t, int64(0), accounting.GlobalStats().GetTransfers()) @@ -382,8 +424,10 @@ func TestSyncIgnoreSize(t *testing.T) { r.CheckLocalItems(t, file1) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // We should have transferred exactly one file. assert.Equal(t, toyFileTransfers(r), accounting.GlobalStats().GetTransfers()) @@ -394,8 +438,10 @@ func TestSyncIgnoreSize(t *testing.T) { r.CheckLocalItems(t, file2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // We should have transferred no files assert.Equal(t, int64(0), accounting.GlobalStats().GetTransfers()) @@ -411,8 +457,10 @@ func TestSyncIgnoreTimes(t *testing.T) { r.CheckRemoteItems(t, file1) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // We should have transferred exactly 0 files because the // files were identical. @@ -421,8 +469,10 @@ func TestSyncIgnoreTimes(t *testing.T) { ci.IgnoreTimes = true accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // We should have transferred exactly one file even though the // files were identical. @@ -441,18 +491,22 @@ func TestSyncIgnoreExisting(t *testing.T) { ci.IgnoreExisting = true accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) r.CheckLocalItems(t, file1) r.CheckRemoteItems(t, file1) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // Change everything r.WriteFile("existing", "newpotatoes", t2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) // Items should not change r.CheckRemoteItems(t, file1) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) } func TestSyncIgnoreErrors(t *testing.T) { @@ -490,8 +544,10 @@ func TestSyncIgnoreErrors(t *testing.T) { ) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) _ = fs.CountError(errors.New("boom")) assert.NoError(t, Sync(ctx, r.Fremote, r.Flocal, false)) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalListing( t, @@ -530,8 +586,10 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) { ci.DryRun = true accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file1) r.CheckRemoteItems(t, file2) @@ -539,8 +597,10 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) { ci.DryRun = false accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file1) r.CheckRemoteItems(t, file1) @@ -565,8 +625,10 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) { r.CheckRemoteItems(t, file2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file1) r.CheckRemoteItems(t, file2) @@ -586,8 +648,10 @@ func TestSyncDoesntUpdateModtime(t *testing.T) { r.CheckRemoteItems(t, file2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file1) r.CheckRemoteItems(t, file1) @@ -606,8 +670,10 @@ func TestSyncAfterAddingAFile(t *testing.T) { r.CheckRemoteItems(t, file1) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file1, file2) r.CheckRemoteItems(t, file1, file2) } @@ -621,8 +687,10 @@ func TestSyncAfterChangingFilesSizeOnly(t *testing.T) { r.CheckLocalItems(t, file2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file2) r.CheckRemoteItems(t, file2) } @@ -644,8 +712,10 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) { r.CheckLocalItems(t, file2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file2) r.CheckRemoteItems(t, file2) } @@ -661,9 +731,11 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) { ci.DryRun = true accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) ci.DryRun = false require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file3, file1) r.CheckRemoteItems(t, file3, file2) @@ -679,8 +751,10 @@ func testSyncAfterRemovingAFileAndAddingAFile(ctx context.Context, t *testing.T) r.CheckLocalItems(t, file1, file3) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file1, file3) r.CheckRemoteItems(t, file1, file3) } @@ -724,8 +798,10 @@ func testSyncAfterRemovingAFileAndAddingAFileSubDir(ctx context.Context, t *test ) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalListing( t, @@ -788,10 +864,12 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) { }, ) + ctx = predictDstFromLogger(ctx) accounting.GlobalStats().ResetCounters() _ = fs.CountError(errors.New("boom")) err := Sync(ctx, r.Fremote, r.Flocal, false) assert.Equal(t, fs.ErrorNotDeleting, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalListing( t, @@ -861,8 +939,10 @@ func TestCopyDeleteBefore(t *testing.T) { r.CheckLocalItems(t, file2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := CopyDir(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckRemoteItems(t, file1, file2) r.CheckLocalItems(t, file2) @@ -884,15 +964,19 @@ func TestSyncWithExclude(t *testing.T) { ctx = filter.ReplaceConfig(ctx, fi) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckRemoteItems(t, file2, file1) // Now sync the other way round and check enormous doesn't get // deleted as it is excluded from the sync accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, r.Flocal, r.Fremote, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file2, file1, file3) } @@ -913,15 +997,19 @@ func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) { ctx = filter.ReplaceConfig(ctx, fi) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckRemoteItems(t, file2) // Check sync the other way round to make sure enormous gets // deleted even though it is excluded accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, r.Flocal, r.Fremote, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalItems(t, file2) } @@ -950,9 +1038,11 @@ func TestSyncWithUpdateOlder(t *testing.T) { ci.UpdateOlder = true ci.ModifyWindow = fs.ModTimeNotSupported + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) require.NoError(t, err) r.CheckRemoteItems(t, oneO, twoF, threeO, fourF, fiveF) + // testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // no modtime if r.Fremote.Hashes().Count() == 0 { t.Logf("Skip test with --checksum as no hashes supported") @@ -994,8 +1084,10 @@ func testSyncWithMaxDuration(t *testing.T, cutoffMode fs.CutoffMode) { r.CheckRemoteItems(t) accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) // not currently supported (but tests do pass for CutoffModeSoft) startTime := time.Now() err := Sync(ctx, r.Fremote, r.Flocal, false) + // testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) require.True(t, errors.Is(err, ErrorMaxDurationReached)) if cutoffMode == fs.CutoffModeHard { @@ -1042,7 +1134,9 @@ func TestSyncWithTrackRenames(t *testing.T) { f2 := r.WriteFile("yam", "Yam Content", t2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) require.NoError(t, Sync(ctx, r.Fremote, r.Flocal, false)) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckRemoteItems(t, f1, f2) r.CheckLocalItems(t, f1, f2) @@ -1051,7 +1145,9 @@ func TestSyncWithTrackRenames(t *testing.T) { f2 = r.RenameFile(f2, "yaml") accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) require.NoError(t, Sync(ctx, r.Fremote, r.Flocal, false)) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckRemoteItems(t, f1, f2) @@ -1110,7 +1206,9 @@ func TestSyncWithTrackRenamesStrategyModtime(t *testing.T) { f2 := r.WriteFile("yam", "Yam Content", t2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) require.NoError(t, Sync(ctx, r.Fremote, r.Flocal, false)) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckRemoteItems(t, f1, f2) r.CheckLocalItems(t, f1, f2) @@ -1119,7 +1217,9 @@ func TestSyncWithTrackRenamesStrategyModtime(t *testing.T) { f2 = r.RenameFile(f2, "yaml") accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) require.NoError(t, Sync(ctx, r.Fremote, r.Flocal, false)) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckRemoteItems(t, f1, f2) @@ -1145,7 +1245,9 @@ func TestSyncWithTrackRenamesStrategyLeaf(t *testing.T) { f2 := r.WriteFile("sub/yam", "Yam Content", t2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) require.NoError(t, Sync(ctx, r.Fremote, r.Flocal, false)) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckRemoteItems(t, f1, f2) r.CheckLocalItems(t, f1, f2) @@ -1154,7 +1256,9 @@ func TestSyncWithTrackRenamesStrategyLeaf(t *testing.T) { f2 = r.RenameFile(f2, "yam") accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) require.NoError(t, Sync(ctx, r.Fremote, r.Flocal, false)) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckRemoteItems(t, f1, f2) @@ -1200,8 +1304,10 @@ func testServerSideMove(ctx context.Context, t *testing.T, r *fstest.Run, withFi // Do server-side move accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) // not currently supported -- doesn't list all contents of dir. err = MoveDir(ctx, FremoteMove, r.Fremote, testDeleteEmptyDirs, false) require.NoError(t, err) + // testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) if withFilter { r.CheckRemoteItems(t, file2) @@ -1227,8 +1333,10 @@ func testServerSideMove(ctx context.Context, t *testing.T, r *fstest.Run, withFi // Move it back to a new empty remote, dst does not exist this time accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) err = MoveDir(ctx, FremoteMove2, FremoteMove, testDeleteEmptyDirs, false) require.NoError(t, err) + // testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) if withFilter { fstest.CheckItems(t, FremoteMove2, file1, file3u) @@ -1252,8 +1360,10 @@ func TestMoveWithDeleteEmptySrcDirs(t *testing.T) { r.Mkdir(ctx, r.Fremote) // run move with --delete-empty-src-dirs + ctx = predictDstFromLogger(ctx) err := MoveDir(ctx, r.Fremote, r.Flocal, true, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalListing( t, @@ -1270,8 +1380,10 @@ func TestMoveWithoutDeleteEmptySrcDirs(t *testing.T) { file2 := r.WriteFile("nested/sub dir/file", "nested", t1) r.Mkdir(ctx, r.Fremote) + ctx = predictDstFromLogger(ctx) err := MoveDir(ctx, r.Fremote, r.Flocal, false, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalListing( t, @@ -1295,8 +1407,10 @@ func TestMoveWithIgnoreExisting(t *testing.T) { ci.IgnoreExisting = true accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) err := MoveDir(ctx, r.Fremote, r.Flocal, false, false) require.NoError(t, err) + // testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) r.CheckLocalListing( t, []fstest.Item{}, @@ -1314,8 +1428,10 @@ func TestMoveWithIgnoreExisting(t *testing.T) { // Recreate first file with modified content file1b := r.WriteFile("existing", "newpotatoes", t2) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = MoveDir(ctx, r.Fremote, r.Flocal, false, false) require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // Source items should still exist in modified state r.CheckLocalListing( t, @@ -1379,8 +1495,10 @@ func TestServerSideMoveOverlap(t *testing.T) { r.CheckRemoteItems(t, file1) // Subdir move with no filters should return ErrorCantMoveOverlapping + ctx = predictDstFromLogger(ctx) err = MoveDir(ctx, FremoteMove, r.Fremote, false, false) assert.EqualError(t, err, fs.ErrorOverlapping.Error()) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // Now try with a filter which should also fail with ErrorCantMoveOverlapping fi, err := filter.NewFilter(nil) @@ -1388,8 +1506,10 @@ func TestServerSideMoveOverlap(t *testing.T) { fi.Opt.MinSize = 40 ctx = filter.ReplaceConfig(ctx, fi) + ctx = predictDstFromLogger(ctx) err = MoveDir(ctx, FremoteMove, r.Fremote, false, false) assert.EqualError(t, err, fs.ErrorOverlapping.Error()) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) } // Test a sync with overlap @@ -1407,10 +1527,18 @@ func TestSyncOverlap(t *testing.T) { assert.Equal(t, fs.ErrorOverlapping.Error(), err.Error()) } + ctx = predictDstFromLogger(ctx) checkErr(Sync(ctx, FremoteSync, r.Fremote, false)) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) + ctx = predictDstFromLogger(ctx) checkErr(Sync(ctx, r.Fremote, FremoteSync, false)) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) + ctx = predictDstFromLogger(ctx) checkErr(Sync(ctx, r.Fremote, r.Fremote, false)) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) + ctx = predictDstFromLogger(ctx) checkErr(Sync(ctx, FremoteSync, FremoteSync, false)) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) } // Test a sync with filtered overlap @@ -1453,29 +1581,63 @@ func TestSyncOverlapWithFilter(t *testing.T) { } accounting.GlobalStats().ResetCounters() + filterCtx = predictDstFromLogger(filterCtx) checkNoErr(Sync(filterCtx, FremoteSync, r.Fremote, false)) checkErr(Sync(ctx, FremoteSync, r.Fremote, false)) checkNoErr(Sync(filterCtx, r.Fremote, FremoteSync, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkErr(Sync(ctx, r.Fremote, FremoteSync, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkErr(Sync(filterCtx, r.Fremote, r.Fremote, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkErr(Sync(ctx, r.Fremote, r.Fremote, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkErr(Sync(filterCtx, FremoteSync, FremoteSync, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkErr(Sync(ctx, FremoteSync, FremoteSync, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkNoErr(Sync(filterCtx, FremoteSync2, r.Fremote, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkErr(Sync(ctx, FremoteSync2, r.Fremote, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkNoErr(Sync(filterCtx, r.Fremote, FremoteSync2, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkErr(Sync(ctx, r.Fremote, FremoteSync2, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkErr(Sync(filterCtx, FremoteSync2, FremoteSync2, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkErr(Sync(ctx, FremoteSync2, FremoteSync2, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkNoErr(Sync(filterCtx, FremoteSync3, r.Fremote, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkErr(Sync(ctx, FremoteSync3, r.Fremote, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) // Destination is excluded so this test makes no sense // checkNoErr(Sync(filterCtx, r.Fremote, FremoteSync3, false)) checkErr(Sync(ctx, r.Fremote, FremoteSync3, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkErr(Sync(filterCtx, FremoteSync3, FremoteSync3, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) + filterCtx = predictDstFromLogger(filterCtx) checkErr(Sync(ctx, FremoteSync3, FremoteSync3, false)) + testLoggerVsLsf(filterCtx, r.Fremote, operations.GetLoggerOpt(filterCtx).JSON, t) } // Test with CompareDest set @@ -1494,7 +1656,9 @@ func TestSyncCompareDest(t *testing.T) { r.CheckLocalItems(t, file1) accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) // not currently supported due to duplicate equal() checks err = Sync(ctx, fdst, r.Flocal, false) + // testLoggerVsLsf(ctx, fdst, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) file1dst := file1 @@ -1508,7 +1672,9 @@ func TestSyncCompareDest(t *testing.T) { r.CheckLocalItems(t, file1b) accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) err = Sync(ctx, fdst, r.Flocal, false) + // testLoggerVsLsf(ctx, fdst, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) file1bdst := file1b @@ -1524,7 +1690,9 @@ func TestSyncCompareDest(t *testing.T) { r.CheckLocalItems(t, file1c) accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) err = Sync(ctx, fdst, r.Flocal, false) + // testLoggerVsLsf(ctx, fdst, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) r.CheckRemoteItems(t, file2, file3) @@ -1536,14 +1704,18 @@ func TestSyncCompareDest(t *testing.T) { r.CheckLocalItems(t, file1c, file5) accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) err = Sync(ctx, fdst, r.Flocal, false) + // testLoggerVsLsf(ctx, fdst, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) r.CheckRemoteItems(t, file2, file3, file4) // check new dest, new compare accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) err = Sync(ctx, fdst, r.Flocal, false) + // testLoggerVsLsf(ctx, fdst, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) r.CheckRemoteItems(t, file2, file3, file4) @@ -1570,7 +1742,9 @@ func TestSyncCompareDest(t *testing.T) { r.CheckLocalItems(t, file1c, file5b) accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) err = Sync(ctx, fdst, r.Flocal, false) + // testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) r.CheckRemoteItems(t, file2, file3, file4) @@ -1584,7 +1758,9 @@ func TestSyncCompareDest(t *testing.T) { r.CheckLocalItems(t, file1c, file5c) accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) err = Sync(ctx, fdst, r.Flocal, false) + // testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) file5cdst := file5c @@ -1615,7 +1791,9 @@ func TestSyncMultipleCompareDest(t *testing.T) { accounting.GlobalStats().ResetCounters() fdst, err := fs.NewFs(ctx, r.FremoteName+"/dest") require.NoError(t, err) + // ctx = predictDstFromLogger(ctx) require.NoError(t, Sync(ctx, fdst, r.Flocal, false)) + // testLoggerVsLsf(ctx, fdst, operations.GetLoggerOpt(ctx).JSON, t) fdest3 := fsrc3 fdest3.Path = "dest/3" @@ -1644,7 +1822,9 @@ func TestSyncCopyDest(t *testing.T) { r.CheckLocalItems(t, file1) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, fdst, r.Flocal, false) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) file1dst := file1 @@ -1658,7 +1838,9 @@ func TestSyncCopyDest(t *testing.T) { r.CheckLocalItems(t, file1b) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, fdst, r.Flocal, false) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) file1bdst := file1b @@ -1677,7 +1859,9 @@ func TestSyncCopyDest(t *testing.T) { r.CheckLocalItems(t, file1c) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, fdst, r.Flocal, false) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) file2dst := file2 @@ -1694,6 +1878,8 @@ func TestSyncCopyDest(t *testing.T) { r.CheckLocalItems(t, file1c, file5) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) err = Sync(ctx, fdst, r.Flocal, false) require.NoError(t, err) @@ -1704,7 +1890,9 @@ func TestSyncCopyDest(t *testing.T) { // check new dest, new copy accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, fdst, r.Flocal, false) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) r.CheckRemoteItems(t, file2, file2dst, file3, file4, file4dst) @@ -1716,7 +1904,9 @@ func TestSyncCopyDest(t *testing.T) { r.CheckLocalItems(t, file1c, file5, file7) accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, fdst, r.Flocal, false) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) file7dst := file7 @@ -1928,7 +2118,9 @@ func TestSyncUTFNorm(t *testing.T) { r.CheckRemoteItems(t, file2) accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) + // testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // can't test this on macOS require.NoError(t, err) // We should have transferred exactly one file, but kept the @@ -1954,7 +2146,9 @@ func TestSyncImmutable(t *testing.T) { // Should succeed accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) require.NoError(t, err) r.CheckLocalItems(t, file1) r.CheckRemoteItems(t, file1) @@ -1966,7 +2160,9 @@ func TestSyncImmutable(t *testing.T) { // Should fail with ErrorImmutableModified and not modify local or remote files accounting.GlobalStats().ResetCounters() + ctx = predictDstFromLogger(ctx) err = Sync(ctx, r.Fremote, r.Flocal, false) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) assert.EqualError(t, err, fs.ErrorImmutableModified.Error()) r.CheckLocalItems(t, file2) r.CheckRemoteItems(t, file1) @@ -1993,7 +2189,9 @@ func TestSyncIgnoreCase(t *testing.T) { // Should not copy files that are differently-cased but otherwise identical accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) + // testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // can't test this on macOS require.NoError(t, err) r.CheckLocalItems(t, file1) r.CheckRemoteItems(t, file2) @@ -2025,7 +2223,9 @@ func TestMaxTransfer(t *testing.T) { accounting.GlobalStats().ResetCounters() + // ctx = predictDstFromLogger(ctx) // not currently supported err := Sync(ctx, r.Fremote, r.Flocal, false) + // testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) expectedErr := fserrors.FsError(accounting.ErrorMaxTransferLimitReachedFatal) if cutoff != fs.CutoffModeHard { expectedErr = accounting.ErrorMaxTransferLimitReachedGraceful @@ -2075,7 +2275,9 @@ func testSyncConcurrent(t *testing.T, subtest string) { r.CheckRemoteItems(t, itemsBefore...) stats.ResetErrors() + ctx = predictDstFromLogger(ctx) err := Sync(ctx, r.Fremote, r.Flocal, false) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) if errors.Is(err, fs.ErrorCantUploadEmptyFiles) { t.Skipf("Skip test because remote cannot upload empty files") } @@ -2091,3 +2293,124 @@ func TestSyncConcurrentDelete(t *testing.T) { func TestSyncConcurrentTruncate(t *testing.T) { testSyncConcurrent(t, "truncate") } + +// for testing logger: +func predictDstFromLogger(ctx context.Context) context.Context { + opt := operations.NewLoggerOpt() + var lock mutex.Mutex + + opt.LoggerFn = func(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) { + lock.Lock() + defer lock.Unlock() + + // ignore dirs for our purposes here + if err == fs.ErrorIsDir { + return + } + winner := operations.WinningSide(ctx, sigil, src, dst, err) + if winner.Obj != nil { + file := winner.Obj + obj, ok := file.(fs.ObjectInfo) + checksum := "" + if ok { + if obj.Fs().Hashes().GetOne() == hash.MD5 { + // skip if no MD5 + checksum, _ = obj.Hash(ctx, obj.Fs().Hashes().GetOne()) + } + } + errMsg := "" + if winner.Err != nil { + errMsg = ";" + winner.Err.Error() + } + operations.SyncFprintf(opt.JSON, "%s;%s;%v;%s%s\n", file.ModTime(ctx).Local().Format("2006-01-02 15:04:05.000000000"), checksum, file.Size(), file.Remote(), errMsg) + } + } + return operations.WithSyncLogger(ctx, opt) +} + +func DstLsf(ctx context.Context, Fremote fs.Fs) *bytes.Buffer { + var opt = operations.ListJSONOpt{ + NoModTime: false, + NoMimeType: true, + DirsOnly: false, + FilesOnly: true, + Recurse: true, + ShowHash: true, + HashTypes: []string{"MD5"}, + } + + var list operations.ListFormat + + list.SetSeparator(";") + timeFormat := operations.FormatForLSFPrecision(Fremote.Precision()) + list.AddModTime(timeFormat) + list.AddHash(hash.MD5) + list.AddSize() + list.AddPath() + + out := new(bytes.Buffer) + + err := operations.ListJSON(ctx, Fremote, "", &opt, func(item *operations.ListJSONItem) error { + _, _ = fmt.Fprintln(out, list.Format(item)) + return nil + }) + if err != nil { + fs.Errorf(Fremote, "ListJSON error: %v", err) + } + + return out +} + +func LoggerMatchesLsf(logger, lsf *bytes.Buffer) error { + loggerSplit := bytes.Split(logger.Bytes(), []byte("\n")) + sort.SliceStable(loggerSplit, func(i int, j int) bool { return string(loggerSplit[i]) < string(loggerSplit[j]) }) + lsfSplit := bytes.Split(lsf.Bytes(), []byte("\n")) + sort.SliceStable(lsfSplit, func(i int, j int) bool { return string(lsfSplit[i]) < string(lsfSplit[j]) }) + + loggerJoined := bytes.Join(loggerSplit, []byte("\n")) + lsfJoined := bytes.Join(lsfSplit, []byte("\n")) + + if bytes.Equal(loggerJoined, lsfJoined) { + return nil + } + Diff(string(loggerJoined), string(lsfJoined)) + return fmt.Errorf("logger does not match lsf! \nlogger: \n%s \nlsf: \n%s", loggerJoined, lsfJoined) +} + +func Diff(rev1, rev2 string) { + fmt.Printf("Diff of %q and %q\n", "logger", "lsf") + cmd := exec.Command("bash", "-c", fmt.Sprintf(`diff <(echo "%s") <(echo "%s")`, rev1, rev2)) + out, _ := cmd.Output() + _, _ = os.Stdout.Write(out) +} + +func testLoggerVsLsf(ctx context.Context, Fremote fs.Fs, logger *bytes.Buffer, t *testing.T) { + var newlogger bytes.Buffer + canTestModtime := fs.GetModifyWindow(ctx, Fremote) != fs.ModTimeNotSupported + canTestHash := Fremote.Hashes().Contains(hash.MD5) + if !canTestHash || !canTestModtime { + loggerSplit := bytes.Split(logger.Bytes(), []byte("\n")) + for i, line := range loggerSplit { + elements := bytes.Split(line, []byte(";")) + if len(elements) >= 2 { + if !canTestModtime { + elements[0] = []byte("") + } + if !canTestHash { + elements[1] = []byte("") + } + } + loggerSplit[i] = bytes.Join(elements, []byte(";")) + } + newlogger.Write(bytes.Join(loggerSplit, []byte("\n"))) + } else { + newlogger.Write(logger.Bytes()) + } + + lsf := DstLsf(ctx, Fremote) + err := LoggerMatchesLsf(&newlogger, lsf) + r := fstest.NewRun(t) + if r.Flocal.Precision() == Fremote.Precision() && r.Flocal.Hashes().Contains(hash.MD5) && canTestHash { + require.NoError(t, err) + } +} diff --git a/go.mod b/go.mod index 51f828948..8f1db25d0 100644 --- a/go.mod +++ b/go.mod @@ -158,6 +158,7 @@ require ( github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 // indirect github.com/relvacode/iso8601 v1.3.0 // indirect github.com/rs/xid v1.5.0 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect diff --git a/go.sum b/go.sum index 2e0dbbe73..ef1dd4fdc 100644 --- a/go.sum +++ b/go.sum @@ -472,6 +472,7 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=