diff --git a/amazonclouddrive/amazonclouddrive.go b/amazonclouddrive/amazonclouddrive.go index 5caf5d1fe..b9d8b5163 100644 --- a/amazonclouddrive/amazonclouddrive.go +++ b/amazonclouddrive/amazonclouddrive.go @@ -164,7 +164,7 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) { // // HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"} if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") { - fs.Logf(f, "403 \"Authorization header requires...\" error received - retry") + fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry") return true, err } } @@ -917,12 +917,12 @@ func (o *Object) readMetaData() (err error) { func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { - fs.Logf(o, "Failed to read metadata: %v", err) + fs.Debugf(o, "Failed to read metadata: %v", err) return time.Now() } modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate) if err != nil { - fs.Logf(o, "Failed to read mtime from object: %v", err) + fs.Debugf(o, "Failed to read mtime from object: %v", err) return time.Now() } return modTime diff --git a/b2/b2.go b/b2/b2.go index 53d1e8694..9f928898e 100644 --- a/b2/b2.go +++ b/b2/b2.go @@ -490,7 +490,7 @@ func (f *Fs) list(dir string, level int, prefix string, limit int, hidden bool, return nil } if !strings.HasPrefix(file.Name, f.root) { - fs.Logf(f, "Odd name received %q", file.Name) + fs.Debugf(f, "Odd name received %q", file.Name) continue } remote := file.Name[len(f.root):] diff --git a/cmd/cmd.go b/cmd/cmd.go index 2dfa69948..782aedbfd 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -275,7 +275,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) { log.Fatalf("Failed to %s: %v", cmd.Name(), err) } if showStats && (!fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0) { - fs.Logf(nil, "%s", fs.Stats) + fs.Infof(nil, "%s", fs.Stats) } if fs.Config.Verbose { fs.Debugf(nil, "Go routines at exit %d\n", runtime.NumGoroutine()) @@ -345,7 +345,7 @@ func initConfig() { // Setup CPU profiling if desired if *cpuProfile != "" { - fs.Logf(nil, "Creating CPU profile %q\n", *cpuProfile) + fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile) f, err := os.Create(*cpuProfile) if err != nil { fs.Stats.Error() @@ -362,7 +362,7 @@ func initConfig() { // Setup memory profiling if desired if *memProfile != "" { defer func() { - fs.Logf(nil, "Saving Memory profile %q\n", *memProfile) + fs.Infof(nil, "Saving Memory profile %q\n", *memProfile) f, err := os.Create(*memProfile) if err != nil { fs.Stats.Error() diff --git a/drive/drive.go b/drive/drive.go index 32ea2b33b..d268680ee 100644 --- a/drive/drive.go +++ b/drive/drive.go @@ -929,12 +929,12 @@ func (o *Object) readMetaData() (err error) { func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { - fs.Logf(o, "Failed to read metadata: %v", err) + fs.Debugf(o, "Failed to read metadata: %v", err) return time.Now() } modTime, err := time.Parse(timeFormatIn, o.modifiedDate) if err != nil { - fs.Logf(o, "Failed to read mtime from object: %v", err) + fs.Debugf(o, "Failed to read mtime from object: %v", err) return time.Now() } return modTime @@ -1103,7 +1103,7 @@ func (o *Object) Remove() error { func (o *Object) MimeType() string { err := o.readMetaData() if err != nil { - fs.Logf(o, "Failed to read metadata: %v", err) + fs.Debugf(o, "Failed to read metadata: %v", err) return "" } return o.mimeType diff --git a/dropbox/dropbox.go b/dropbox/dropbox.go index b2a04a771..b4e1a39b1 100644 --- a/dropbox/dropbox.go +++ b/dropbox/dropbox.go @@ -287,7 +287,7 @@ func (f *Fs) list(out fs.ListOpts, dir string) { // This notifies of a deleted object } else { if len(entry.Path) <= 1 || entry.Path[0] != '/' { - fs.Logf(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path) + fs.Debugf(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path) continue } @@ -699,7 +699,7 @@ func (o *Object) readMetaData() (err error) { func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { - fs.Logf(o, "Failed to read metadata: %v", err) + fs.Debugf(o, "Failed to read metadata: %v", err) return time.Now() } return o.modTime diff --git a/fs/accounting.go b/fs/accounting.go index d5f5fec3f..72b4db762 100644 --- a/fs/accounting.go +++ b/fs/accounting.go @@ -33,7 +33,7 @@ func startTokenBucket() { if currLimit.bandwidth > 0 { tokenBucket = tb.NewBucket(int64(currLimit.bandwidth), 100*time.Millisecond) - Logf(nil, "Starting bandwidth limiter at %vBytes/s", &currLimit.bandwidth) + Infof(nil, "Starting bandwidth limiter at %vBytes/s", &currLimit.bandwidth) // Start the SIGUSR2 signal handler to toggle bandwidth. // This function does nothing in windows systems. @@ -60,7 +60,7 @@ func startTokenTicker() { if tokenBucket != nil { err := tokenBucket.Close() if err != nil { - Logf(nil, "Error closing token bucket: %v", err) + Debugf(nil, "Error closing token bucket: %v", err) } } @@ -203,7 +203,7 @@ Elapsed time: %10v // Log outputs the StatsInfo to the log func (s *StatsInfo) Log() { - Logf(nil, "%v\n", s) + Infof(nil, "%v\n", s) } // Bytes updates the stats for bytes bytes diff --git a/fs/operations.go b/fs/operations.go index 136702046..9be889f57 100644 --- a/fs/operations.go +++ b/fs/operations.go @@ -32,12 +32,12 @@ func CalculateModifyWindow(fs ...Fs) { Config.ModifyWindow = precision } if precision == ModTimeNotSupported { - Debugf(f, "Modify window not supported") + Infof(f, "Modify window not supported") return } } } - Debugf(fs[0], "Modify window is %s", Config.ModifyWindow) + Infof(fs[0], "Modify window is %s", Config.ModifyWindow) } // HashEquals checks to see if src == dst, but ignores empty strings @@ -181,7 +181,7 @@ func equal(src, dst Object, sizeOnly, checkSum bool) bool { Stats.Error() Errorf(dst, "Failed to set modification time: %v", err) } else { - Debugf(src, "Updated modification time in destination") + Infof(src, "Updated modification time in destination") } } return true @@ -217,10 +217,10 @@ func removeFailedCopy(dst Object) bool { if dst == nil { return false } - Debugf(dst, "Removing failed copy") + Infof(dst, "Removing failed copy") removeErr := dst.Remove() if removeErr != nil { - Debugf(dst, "Failed to remove failed copy: %s", removeErr) + Infof(dst, "Failed to remove failed copy: %s", removeErr) return false } return true @@ -341,7 +341,7 @@ func Copy(f Fs, dst Object, remote string, src Object) (err error) { } } - Debugf(src, actionTaken) + Infof(src, actionTaken) return err } @@ -365,7 +365,7 @@ func Move(fdst Fs, dst Object, remote string, src Object) (err error) { _, err := doMove(src, remote) switch err { case nil: - Debugf(src, "Moved (server side)") + Infof(src, "Moved (server side)") return nil case ErrorCantMove: Debugf(src, "Can't move, switching to copy") @@ -424,7 +424,7 @@ func deleteFileWithBackupDir(dst Object, backupDir Fs) (err error) { Stats.Error() Errorf(dst, "Couldn't %s: %v", action, err) } else { - Debugf(dst, actioned) + Infof(dst, actioned) } Stats.DoneChecking(dst.Remote()) return err @@ -458,7 +458,7 @@ func deleteFilesWithBackupDir(toBeDeleted ObjectsChan, backupDir Fs) error { } }() } - Logf(nil, "Waiting for deletions to finish") + Infof(nil, "Waiting for deletions to finish") wg.Wait() if errorCount > 0 { return errors.Errorf("failed to delete %d files", errorCount) @@ -587,7 +587,7 @@ func readFilesMap(fs Fs, includeAll bool, dir string) (files map[string]Object, if _, ok := files[remote]; !ok { files[remote] = o if _, ok := normalised[normalisedRemote]; ok { - Logf(o, "Warning: File found with same name but different case on %v", o.Fs()) + Logf(o, "File found with same name but different case on %v", o.Fs()) } } else { Logf(o, "Duplicate file detected") @@ -609,7 +609,7 @@ func readFilesMaps(fdst Fs, fdstIncludeAll bool, fsrc Fs, fsrcIncludeAll bool, d list := func(fs Fs, includeAll bool, pMap *map[string]Object, pErr *error) { defer wg.Done() - Logf(fs, "Building file list") + Infof(fs, "Building file list") files, listErr := readFilesMap(fs, includeAll, dir) if listErr != nil { Errorf(fs, "Error building file list: %v", listErr) @@ -757,7 +757,7 @@ func Check(fdst, fsrc Fs) error { }() } - Logf(fdst, "Waiting for checks to finish") + Infof(fdst, "Waiting for checks to finish") checkerWg.Wait() Logf(fdst, "%d differences found", Stats.GetErrors()) if noHashes > 0 { @@ -1009,7 +1009,7 @@ func dedupeRename(remote string, objs []Object) { Errorf(o, "Failed to rename: %v", err) continue } - Logf(newObj, "renamed from: %v", o) + Infof(newObj, "renamed from: %v", o) } else { Logf(remote, "Not renaming to %q as --dry-run", newName) } @@ -1145,7 +1145,7 @@ var _ pflag.Value = (*DeduplicateMode)(nil) // delete all but one or rename them to be different. Only useful with // Google Drive which can have duplicate file names. func Deduplicate(f Fs, mode DeduplicateMode) error { - Logf(f, "Looking for duplicates using %v mode.", mode) + Infof(f, "Looking for duplicates using %v mode.", mode) files := map[string][]Object{} list := NewLister().Start(f, "") for { diff --git a/fs/sync.go b/fs/sync.go index 72b24c495..8d2445ae0 100644 --- a/fs/sync.go +++ b/fs/sync.go @@ -392,7 +392,7 @@ func (s *syncCopyMove) startCheckers() { // This stops the background checkers func (s *syncCopyMove) stopCheckers() { close(s.toBeChecked) - Logf(s.fdst, "Waiting for checks to finish") + Infof(s.fdst, "Waiting for checks to finish") s.checkerWg.Wait() } @@ -407,7 +407,7 @@ func (s *syncCopyMove) startTransfers() { // This stops the background transfers func (s *syncCopyMove) stopTransfers() { close(s.toBeUploaded) - Logf(s.fdst, "Waiting for transfers to finish") + Infof(s.fdst, "Waiting for transfers to finish") s.transfersWg.Wait() } @@ -428,7 +428,7 @@ func (s *syncCopyMove) stopRenamers() { return } close(s.toBeRenamed) - Logf(s.fdst, "Waiting for renames to finish") + Infof(s.fdst, "Waiting for renames to finish") s.renamerWg.Wait() } @@ -551,7 +551,7 @@ func (s *syncCopyMove) popRenameMap(hash string) (dst Object) { // makeRenameMap builds a map of the destination files by hash that // match sizes in the slice of objects in s.renameCheck func (s *syncCopyMove) makeRenameMap() { - Debugf(s.fdst, "Making map for --track-renames") + Infof(s.fdst, "Making map for --track-renames") // first make a map of possible sizes we need to check possibleSizes := map[int64]struct{}{} @@ -584,7 +584,7 @@ func (s *syncCopyMove) makeRenameMap() { }() } wg.Wait() - Debugf(s.fdst, "Finished making map for --track-renames") + Infof(s.fdst, "Finished making map for --track-renames") } // tryRename renames a src object when doing track renames if @@ -614,7 +614,7 @@ func (s *syncCopyMove) tryRename(src Object) bool { delete(s.dstFiles, dst.Remote()) s.dstFilesMu.Unlock() - Debugf(src, "Renamed from %q", dst.Remote()) + Infof(src, "Renamed from %q", dst.Remote()) return true } @@ -1110,9 +1110,9 @@ func MoveDir(fdst, fsrc Fs) error { err := fdstDirMove(fsrc) switch err { case ErrorCantDirMove, ErrorDirExists: - Debugf(fdst, "Server side directory move failed - fallback to file moves: %v", err) + Infof(fdst, "Server side directory move failed - fallback to file moves: %v", err) case nil: - Debugf(fdst, "Server side directory move succeeded") + Infof(fdst, "Server side directory move succeeded") return nil default: Stats.Error()