Rename log functions and factor into own file

This commit is contained in:
Nick Craig-Wood 2017-02-09 11:01:20 +00:00
parent 40c02989f1
commit 80f53176d9
43 changed files with 514 additions and 483 deletions

View file

@ -34,7 +34,7 @@ quicktest:
# Do source code quality checks
check: rclone
ifdef GO_LATEST
go vet $(GO_FILES)
go tool vet -printfuncs Debugf,Infof,Logf,Errorf . 2>&1 | grep -E -v ^vendor/ ; test $$? -eq 1
errcheck $(GO_FILES)
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
go list ./... | grep -v /vendor/ | xargs -i golint {} | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1

View file

@ -157,14 +157,14 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
if resp != nil {
if resp.StatusCode == 401 {
f.tokenRenewer.Invalidate()
fs.Debug(f, "401 error received - invalidating token")
fs.Debugf(f, "401 error received - invalidating token")
return true, err
}
// Work around receiving this error sporadically on authentication
//
// HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
fs.Log(f, "403 \"Authorization header requires...\" error received - retry")
fs.Logf(f, "403 \"Authorization header requires...\" error received - retry")
return true, err
}
}
@ -280,7 +280,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
//fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response
var subFolder *acd.Folder
@ -290,18 +290,18 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
})
if err != nil {
if err == acd.ErrorNodeNotFound {
//fs.Debug(f, "...Not found")
//fs.Debugf(f, "...Not found")
return "", false, nil
}
//fs.Debug(f, "...Error %v", err)
//fs.Debugf(f, "...Error %v", err)
return "", false, err
}
if subFolder.Status != nil && *subFolder.Status != statusAvailable {
fs.Debug(f, "Ignoring folder %q in state %q", leaf, *subFolder.Status)
fs.Debugf(f, "Ignoring folder %q in state %q", leaf, *subFolder.Status)
time.Sleep(1 * time.Second) // FIXME wait for problem to go away!
return "", false, nil
}
//fs.Debug(f, "...Found(%q, %v)", *subFolder.Id, leaf)
//fs.Debugf(f, "...Found(%q, %v)", *subFolder.Id, leaf)
return *subFolder.Id, true, nil
}
@ -397,7 +397,7 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
// ListDir reads the directory specified by the job into out, returning any more jobs
func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
fs.Debug(f, "Reading %q", job.Path)
fs.Debugf(f, "Reading %q", job.Path)
maxTries := fs.Config.LowLevelRetries
for tries := 1; tries <= maxTries; tries++ {
_, err = f.listAll(job.DirID, "", false, false, func(node *acd.Node) bool {
@ -433,7 +433,7 @@ func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.
return false
})
if fs.IsRetryError(err) {
fs.Debug(f, "Directory listing error for %q: %v - low level retry %d/%d", job.Path, err, tries, maxTries)
fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", job.Path, err, tries, maxTries)
continue
}
if err != nil {
@ -441,7 +441,7 @@ func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.
}
break
}
fs.Debug(f, "Finished reading %q", job.Path)
fs.Debugf(f, "Finished reading %q", job.Path)
return jobs, err
}
@ -488,13 +488,13 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
buf := make([]byte, 1)
n, err := in.Read(buf)
if !(n == 0 && err == io.EOF) {
fs.Debug(src, "Upload error detected but didn't finish upload: %v (%q)", inErr, httpStatus)
fs.Debugf(src, "Upload error detected but didn't finish upload: %v (%q)", inErr, httpStatus)
return false, inInfo, inErr
}
// Don't wait for uploads - assume they will appear later
if *uploadWaitPerGB <= 0 {
fs.Debug(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
return false, inInfo, inErr
}
@ -505,27 +505,27 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
const sleepTime = 5 * time.Second // sleep between tries
retries := int((timeToWait + sleepTime - 1) / sleepTime) // number of retries, rounded up
fs.Debug(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
remote := src.Remote()
for i := 1; i <= retries; i++ {
o, err := f.NewObject(remote)
if err == fs.ErrorObjectNotFound {
fs.Debug(src, "Object not found - waiting (%d/%d)", i, retries)
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
} else if err != nil {
fs.Debug(src, "Object returned error - waiting (%d/%d): %v", i, retries, err)
fs.Debugf(src, "Object returned error - waiting (%d/%d): %v", i, retries, err)
} else {
if src.Size() == o.Size() {
fs.Debug(src, "Object found with correct size %d after waiting (%d/%d) - %v - returning with no error", src.Size(), i, retries, sleepTime*time.Duration(i-1))
fs.Debugf(src, "Object found with correct size %d after waiting (%d/%d) - %v - returning with no error", src.Size(), i, retries, sleepTime*time.Duration(i-1))
info = &acd.File{
Node: o.(*Object).info,
}
return true, info, nil
}
fs.Debug(src, "Object found but wrong size %d vs %d - waiting (%d/%d)", src.Size(), o.Size(), i, retries)
fs.Debugf(src, "Object found but wrong size %d vs %d - waiting (%d/%d)", src.Size(), o.Size(), i, retries)
}
time.Sleep(sleepTime)
}
fs.Debug(src, "Giving up waiting for object - returning original error: %v (%q)", inErr, httpStatus)
fs.Debugf(src, "Giving up waiting for object - returning original error: %v (%q)", inErr, httpStatus)
return false, inInfo, inErr
}
@ -558,7 +558,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
return nil, err
}
if size > warnFileSize {
fs.Debug(f, "Warning: file %q may fail because it is too big. Use --max-size=%dM to skip large files.", remote, warnFileSize>>20)
fs.Debugf(f, "Warning: file %q may fail because it is too big. Use --max-size=%dM to skip large files.", remote, warnFileSize>>20)
}
folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
var info *acd.File
@ -607,7 +607,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't move - not same remote type")
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
@ -648,7 +648,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// finished if src not found and dst found
break
}
fs.Debug(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, fs.Config.LowLevelRetries)
time.Sleep(1 * time.Second)
}
return dstObj, nil
@ -672,19 +672,19 @@ func (f *Fs) DirMove(src fs.Fs) (err error) {
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsDirMove)$
srcFs, ok := src.(*Fs)
if !ok {
fs.Debug(src, "DirMove error: not same remote type")
fs.Debugf(src, "DirMove error: not same remote type")
return fs.ErrorCantDirMove
}
// Check if destination exists
if f.dirCache.FoundRoot() {
fs.Debug(src, "DirMove error: destination exists")
fs.Debugf(src, "DirMove error: destination exists")
return fs.ErrorDirExists
}
// Refuse to move to or from the root
if f.root == "" || srcFs.root == "" {
fs.Debug(src, "DirMove error: Can't move root")
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
@ -697,12 +697,12 @@ func (f *Fs) DirMove(src fs.Fs) (err error) {
// Find the ID of the source and make a node from it
err = srcFs.dirCache.FindRoot(false)
if err != nil {
fs.Debug(src, "DirMove error: error finding src root: %v", err)
fs.Debugf(src, "DirMove error: error finding src root: %v", err)
return err
}
srcDirectoryID, err := srcFs.dirCache.RootParentID()
if err != nil {
fs.Debug(src, "DirMove error: error finding src RootParentID: %v", err)
fs.Debugf(src, "DirMove error: error finding src RootParentID: %v", err)
return err
}
srcLeaf, _ := dircache.SplitPath(srcFs.root)
@ -715,12 +715,12 @@ func (f *Fs) DirMove(src fs.Fs) (err error) {
return srcFs.shouldRetry(nil, err)
})
if err != nil {
fs.Debug(src, "DirMove error: error reading src metadata: %v", err)
fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
return err
}
err = json.Unmarshal([]byte(jsonStr), &srcInfo)
if err != nil {
fs.Debug(src, "DirMove error: error reading unpacking src metadata: %v", err)
fs.Debugf(src, "DirMove error: error reading unpacking src metadata: %v", err)
return err
}
@ -759,7 +759,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
empty = false
return true
default:
fs.Debug("Found ASSET %s", *node.Id)
fs.Debugf("Found ASSET %s", *node.Id)
}
return false
})
@ -817,7 +817,7 @@ func (f *Fs) Hashes() fs.HashSet {
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// srcObj, ok := src.(*Object)
// if !ok {
// fs.Debug(src, "Can't copy - not same remote type")
// fs.Debugf(src, "Can't copy - not same remote type")
// return nil, fs.ErrorCantCopy
// }
// srcFs := srcObj.fs
@ -917,12 +917,12 @@ func (o *Object) readMetaData() (err error) {
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate)
if err != nil {
fs.Log(o, "Failed to read mtime from object: %v", err)
fs.Logf(o, "Failed to read mtime from object: %v", err)
return time.Now()
}
return modTime
@ -943,7 +943,7 @@ func (o *Object) Storable() bool {
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
bigObject := o.Size() >= int64(tempLinkThreshold)
if bigObject {
fs.Debug(o, "Dowloading large object via tempLink")
fs.Debugf(o, "Dowloading large object via tempLink")
}
file := acd.File{Node: o.info}
var resp *http.Response
@ -1051,30 +1051,30 @@ func (f *Fs) removeParent(info *acd.Node, parentID string) error {
// moveNode moves the node given from the srcLeaf,srcDirectoryID to
// the dstLeaf,dstDirectoryID
func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
// fs.Debug(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
// fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
cantMove := fs.ErrorCantMove
if useDirErrorMsgs {
cantMove = fs.ErrorCantDirMove
}
if len(srcInfo.Parents) > 1 && srcLeaf != dstLeaf {
fs.Debug(name, "Move error: object is attached to multiple parents and should be renamed. This would change the name of the node in all parents.")
fs.Debugf(name, "Move error: object is attached to multiple parents and should be renamed. This would change the name of the node in all parents.")
return cantMove
}
if srcLeaf != dstLeaf {
// fs.Debug(name, "renaming")
// fs.Debugf(name, "renaming")
_, err = f.renameNode(srcInfo, dstLeaf)
if err != nil {
fs.Debug(name, "Move: quick path rename failed: %v", err)
fs.Debugf(name, "Move: quick path rename failed: %v", err)
goto OnConflict
}
}
if srcDirectoryID != dstDirectoryID {
// fs.Debug(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
// fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
err = f.replaceParent(srcInfo, srcDirectoryID, dstDirectoryID)
if err != nil {
fs.Debug(name, "Move: quick path parent replace failed: %v", err)
fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
return err
}
}
@ -1082,38 +1082,38 @@ func (f *Fs) moveNode(name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, s
return nil
OnConflict:
fs.Debug(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
// fs.Debug(name, "Trashing file")
// fs.Debugf(name, "Trashing file")
err = f.removeNode(srcInfo)
if err != nil {
fs.Debug(name, "Move: remove node failed: %v", err)
fs.Debugf(name, "Move: remove node failed: %v", err)
return err
}
// fs.Debug(name, "Renaming file")
// fs.Debugf(name, "Renaming file")
_, err = f.renameNode(srcInfo, dstLeaf)
if err != nil {
fs.Debug(name, "Move: rename node failed: %v", err)
fs.Debugf(name, "Move: rename node failed: %v", err)
return err
}
// note: replacing parent is forbidden by API, modifying them individually is
// okay though
// fs.Debug(name, "Adding target parent")
// fs.Debugf(name, "Adding target parent")
err = f.addParent(srcInfo, dstDirectoryID)
if err != nil {
fs.Debug(name, "Move: addParent failed: %v", err)
fs.Debugf(name, "Move: addParent failed: %v", err)
return err
}
// fs.Debug(name, "removing original parent")
// fs.Debugf(name, "removing original parent")
err = f.removeParent(srcInfo, srcDirectoryID)
if err != nil {
fs.Debug(name, "Move: removeParent failed: %v", err)
fs.Debugf(name, "Move: removeParent failed: %v", err)
return err
}
// fs.Debug(name, "Restoring")
// fs.Debugf(name, "Restoring")
_, err = f.restoreNode(srcInfo)
if err != nil {
fs.Debug(name, "Move: restoreNode node failed: %v", err)
fs.Debugf(name, "Move: restoreNode node failed: %v", err)
return err
}
return nil

View file

@ -173,12 +173,12 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
var err error
retryAfter, err = strconv.Atoi(retryAfterString)
if err != nil {
fs.ErrorLog(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
}
}
retryAfterDuration := time.Duration(retryAfter) * time.Second
if f.pacer.GetSleep() < retryAfterDuration {
fs.Debug(f, "Setting sleep to %v after error: %v", retryAfterDuration, err)
fs.Debugf(f, "Setting sleep to %v after error: %v", retryAfterDuration, err)
// We set 1/2 the value here because the pacer will double it immediately
f.pacer.SetSleep(retryAfterDuration / 2)
}
@ -191,7 +191,7 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
// deserve to be retried. It returns the err as a convenience
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
if resp != nil && resp.StatusCode == 401 {
fs.Debug(f, "Unauthorized: %v", err)
fs.Debugf(f, "Unauthorized: %v", err)
// Reauth
authErr := f.authorizeAccount()
if authErr != nil {
@ -208,7 +208,7 @@ func errorHandler(resp *http.Response) error {
errResponse := new(api.Error)
err := rest.DecodeJSON(resp, &errResponse)
if err != nil {
fs.Debug(nil, "Couldn't decode error response: %v", err)
fs.Debugf(nil, "Couldn't decode error response: %v", err)
}
if errResponse.Code == "" {
errResponse.Code = "unknown"
@ -259,7 +259,7 @@ func NewFs(name, root string) (fs.Fs, error) {
if *b2TestMode != "" {
testMode := strings.TrimSpace(*b2TestMode)
f.srv.SetHeader(testModeHeader, testMode)
fs.Debug(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
}
// Fill up the buffer tokens
for i := 0; i < fs.Config.Transfers; i++ {
@ -373,7 +373,7 @@ func (f *Fs) getUploadBlock() []byte {
if buf == nil {
buf = make([]byte, chunkSize)
}
// fs.Debug(f, "Getting upload block %p", buf)
// fs.Debugf(f, "Getting upload block %p", buf)
return buf
}
@ -383,7 +383,7 @@ func (f *Fs) putUploadBlock(buf []byte) {
if len(buf) != int(chunkSize) {
panic("bad blocksize returned to pool")
}
// fs.Debug(f, "Returning upload block %p", buf)
// fs.Debugf(f, "Returning upload block %p", buf)
f.bufferTokens <- buf
}
@ -490,7 +490,7 @@ func (f *Fs) list(dir string, level int, prefix string, limit int, hidden bool,
return nil
}
if !strings.HasPrefix(file.Name, f.root) {
fs.Log(f, "Odd name received %q", file.Name)
fs.Logf(f, "Odd name received %q", file.Name)
continue
}
remote := file.Name[len(f.root):]
@ -700,7 +700,7 @@ func (f *Fs) Mkdir(dir string) error {
return nil
}
if getBucketErr != fs.ErrorDirNotFound {
fs.Debug(f, "Error checking bucket exists: %v", getBucketErr)
fs.Debugf(f, "Error checking bucket exists: %v", getBucketErr)
}
}
}
@ -807,13 +807,13 @@ func (f *Fs) purge(oldOnly bool) error {
fs.Stats.Checking(remote)
if oldOnly && last != remote {
if object.Action == "hide" {
fs.Debug(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
toBeDeleted <- object
} else {
fs.Debug(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
}
} else {
fs.Debug(remote, "Deleting (id %q)", object.ID)
fs.Debugf(remote, "Deleting (id %q)", object.ID)
toBeDeleted <- object
}
last = remote
@ -986,7 +986,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
}
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
if err != nil {
fs.Debug(o, "Failed to parse mod time string %q: %v", timeString, err)
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return err
}
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
@ -1108,12 +1108,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Read sha1 from header if it isn't set
if o.sha1 == "" {
o.sha1 = resp.Header.Get(sha1Header)
fs.Debug(o, "Reading sha1 from header - %q", o.sha1)
fs.Debugf(o, "Reading sha1 from header - %q", o.sha1)
// if sha1 header is "none" (in big files), then need
// to read it from the metadata
if o.sha1 == "none" {
o.sha1 = resp.Header.Get(sha1InfoHeader)
fs.Debug(o, "Reading sha1 from info - %q", o.sha1)
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
}
}
// Don't check length or hash on partial content
@ -1299,7 +1299,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) {
retry, err := o.fs.shouldRetry(resp, err)
// On retryable error clear UploadURL
if retry {
fs.Debug(o, "Clearing upload URL because of error: %v", err)
fs.Debugf(o, "Clearing upload URL because of error: %v", err)
upload = nil
}
return retry, err

View file

@ -132,7 +132,7 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
up.sha1s[part-1] = calculatedSHA1
size := int64(len(body))
err := up.f.pacer.Call(func() (bool, error) {
fs.Debug(up.o, "Sending chunk %d length %d", part, len(body))
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL
upload, err := up.getUploadURL()
@ -181,16 +181,16 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
retry, err := up.f.shouldRetry(resp, err)
// On retryable error clear PartUploadURL
if retry {
fs.Debug(up.o, "Clearing part upload URL because of error: %v", err)
fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err)
upload = nil
}
up.returnUploadURL(upload)
return retry, err
})
if err != nil {
fs.Debug(up.o, "Error sending chunk %d: %v", part, err)
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else {
fs.Debug(up.o, "Done sending chunk %d", part)
fs.Debugf(up.o, "Done sending chunk %d", part)
}
return err
}
@ -235,7 +235,7 @@ func (up *largeUpload) cancel() error {
// Upload uploads the chunks from the input
func (up *largeUpload) Upload() error {
fs.Debug(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size
errs := make(chan error, 1)
var wg sync.WaitGroup
@ -289,14 +289,14 @@ outer:
}
}
if err != nil {
fs.Debug(up.o, "Cancelling large file upload due to error: %v", err)
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel()
if cancelErr != nil {
fs.ErrorLog(up.o, "Failed to cancel large file upload: %v", cancelErr)
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
}
// Check any errors
fs.Debug(up.o, "Finishing large file upload")
fs.Debugf(up.o, "Finishing large file upload")
return up.finish()
}

View file

@ -247,22 +247,22 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
err = f()
if !Retry || (err == nil && !fs.Stats.Errored()) {
if try > 1 {
fs.ErrorLog(nil, "Attempt %d/%d succeeded", try, *retries)
fs.Errorf(nil, "Attempt %d/%d succeeded", try, *retries)
}
break
}
if fs.IsFatalError(err) {
fs.ErrorLog(nil, "Fatal error received - not attempting retries")
fs.Errorf(nil, "Fatal error received - not attempting retries")
break
}
if fs.IsNoRetryError(err) {
fs.ErrorLog(nil, "Can't retry this error - not attempting retries")
fs.Errorf(nil, "Can't retry this error - not attempting retries")
break
}
if err != nil {
fs.ErrorLog(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
fs.Errorf(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, fs.Stats.GetErrors(), err)
} else {
fs.ErrorLog(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
fs.Errorf(nil, "Attempt %d/%d failed with %d errors", try, *retries, fs.Stats.GetErrors())
}
if try < *retries {
fs.Stats.ResetErrors()
@ -275,10 +275,10 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
log.Fatalf("Failed to %s: %v", cmd.Name(), err)
}
if showStats && (!fs.Config.Quiet || fs.Stats.Errored() || *statsInterval > 0) {
fs.Log(nil, "%s", fs.Stats)
fs.Logf(nil, "%s", fs.Stats)
}
if fs.Config.Verbose {
fs.Debug(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
fs.Debugf(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
}
if fs.Stats.Errored() {
os.Exit(1)
@ -330,7 +330,7 @@ func initConfig() {
}
_, err = f.Seek(0, os.SEEK_END)
if err != nil {
fs.ErrorLog(nil, "Failed to seek log file to end: %v", err)
fs.Errorf(nil, "Failed to seek log file to end: %v", err)
}
log.SetOutput(f)
fs.DebugLogger.SetOutput(f)
@ -341,11 +341,11 @@ func initConfig() {
fs.LoadConfig()
// Write the args for debug purposes
fs.Debug("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
// Setup CPU profiling if desired
if *cpuProfile != "" {
fs.Log(nil, "Creating CPU profile %q\n", *cpuProfile)
fs.Logf(nil, "Creating CPU profile %q\n", *cpuProfile)
f, err := os.Create(*cpuProfile)
if err != nil {
fs.Stats.Error()
@ -362,7 +362,7 @@ func initConfig() {
// Setup memory profiling if desired
if *memProfile != "" {
defer func() {
fs.Log(nil, "Saving Memory profile %q\n", *memProfile)
fs.Logf(nil, "Saving Memory profile %q\n", *memProfile)
f, err := os.Create(*memProfile)
if err != nil {
fs.Stats.Error()
@ -382,7 +382,7 @@ func initConfig() {
}
if m, _ := regexp.MatchString("^(bits|bytes)$", *dataRateUnit); m == false {
fs.ErrorLog(nil, "Invalid unit passed to --stats-unit. Defaulting to bytes.")
fs.Errorf(nil, "Invalid unit passed to --stats-unit. Defaulting to bytes.")
fs.Config.DataRateUnit = "bytes"
} else {
fs.Config.DataRateUnit = *dataRateUnit

View file

@ -41,8 +41,8 @@ var commandDefintion = &cobra.Command{
runtime.GC()
runtime.ReadMemStats(&after)
usedMemory := after.Alloc - before.Alloc
fs.Log(nil, "%d objects took %d bytes, %.1f bytes/object", len(objs), usedMemory, float64(usedMemory)/float64(len(objs)))
fs.Log(nil, "System memory changed from %d to %d bytes a change of %d bytes", before.Sys, after.Sys, after.Sys-before.Sys)
fs.Logf(nil, "%d objects took %d bytes, %.1f bytes/object", len(objs), usedMemory, float64(usedMemory)/float64(len(objs)))
fs.Logf(nil, "System memory changed from %d to %d bytes a change of %d bytes", before.Sys, after.Sys, after.Sys-before.Sys)
return nil
})
},

View file

@ -70,13 +70,13 @@ func (d *Dir) readDir() error {
defer d.mu.Unlock()
when := time.Now()
if d.read.IsZero() {
fs.Debug(d.path, "Reading directory")
fs.Debugf(d.path, "Reading directory")
} else {
age := when.Sub(d.read)
if age < dirCacheTime {
return nil
}
fs.Debug(d.path, "Re-reading directory (%v old)", age)
fs.Debugf(d.path, "Re-reading directory (%v old)", age)
}
objs, dirs, err := fs.NewLister().SetLevel(1).Start(d.f, d.path).GetAll()
if err == fs.ErrorDirNotFound {
@ -152,7 +152,7 @@ var _ fusefs.Node = (*Dir)(nil)
// Attr updates the attribes of a directory
func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) error {
fs.Debug(d.path, "Dir.Attr")
fs.Debugf(d.path, "Dir.Attr")
a.Gid = gid
a.Uid = uid
a.Mode = os.ModeDir | dirPerms
@ -200,15 +200,15 @@ var _ fusefs.NodeRequestLookuper = (*Dir)(nil)
// Lookup need not to handle the names "." and "..".
func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fusefs.Node, err error) {
path := path.Join(d.path, req.Name)
fs.Debug(path, "Dir.Lookup")
fs.Debugf(path, "Dir.Lookup")
item, err := d.lookupNode(req.Name)
if err != nil {
if err != fuse.ENOENT {
fs.ErrorLog(path, "Dir.Lookup error: %v", err)
fs.Errorf(path, "Dir.Lookup error: %v", err)
}
return nil, err
}
fs.Debug(path, "Dir.Lookup OK")
fs.Debugf(path, "Dir.Lookup OK")
return item.node, nil
}
@ -217,10 +217,10 @@ var _ fusefs.HandleReadDirAller = (*Dir)(nil)
// ReadDirAll reads the contents of the directory
func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error) {
fs.Debug(d.path, "Dir.ReadDirAll")
fs.Debugf(d.path, "Dir.ReadDirAll")
err = d.readDir()
if err != nil {
fs.Debug(d.path, "Dir.ReadDirAll error: %v", err)
fs.Debugf(d.path, "Dir.ReadDirAll error: %v", err)
return nil, err
}
d.mu.RLock()
@ -242,12 +242,12 @@ func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error)
}
default:
err = errors.Errorf("unknown type %T", item)
fs.ErrorLog(d.path, "Dir.ReadDirAll error: %v", err)
fs.Errorf(d.path, "Dir.ReadDirAll error: %v", err)
return nil, err
}
dirents = append(dirents, dirent)
}
fs.Debug(d.path, "Dir.ReadDirAll OK with %d entries", len(dirents))
fs.Debugf(d.path, "Dir.ReadDirAll OK with %d entries", len(dirents))
return dirents, nil
}
@ -256,16 +256,16 @@ var _ fusefs.NodeCreater = (*Dir)(nil)
// Create makes a new file
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fusefs.Node, fusefs.Handle, error) {
path := path.Join(d.path, req.Name)
fs.Debug(path, "Dir.Create")
fs.Debugf(path, "Dir.Create")
src := newCreateInfo(d.f, path)
// This gets added to the directory when the file is written
file := newFile(d, nil)
fh, err := newWriteFileHandle(d, file, src)
if err != nil {
fs.ErrorLog(path, "Dir.Create error: %v", err)
fs.Errorf(path, "Dir.Create error: %v", err)
return nil, nil, err
}
fs.Debug(path, "Dir.Create OK")
fs.Debugf(path, "Dir.Create OK")
return file, fh, nil
}
@ -274,10 +274,10 @@ var _ fusefs.NodeMkdirer = (*Dir)(nil)
// Mkdir creates a new directory
func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fusefs.Node, error) {
path := path.Join(d.path, req.Name)
fs.Debug(path, "Dir.Mkdir")
fs.Debugf(path, "Dir.Mkdir")
err := d.f.Mkdir(path)
if err != nil {
fs.ErrorLog(path, "Dir.Mkdir failed to create directory: %v", err)
fs.Errorf(path, "Dir.Mkdir failed to create directory: %v", err)
return nil, err
}
fsDir := &fs.Dir{
@ -286,7 +286,7 @@ func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fusefs.Node, e
}
dir := newDir(d.f, fsDir)
d.addObject(fsDir, dir)
fs.Debug(path, "Dir.Mkdir OK")
fs.Debugf(path, "Dir.Mkdir OK")
return dir, nil
}
@ -297,17 +297,17 @@ var _ fusefs.NodeRemover = (*Dir)(nil)
// may correspond to a file (unlink) or to a directory (rmdir).
func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
path := path.Join(d.path, req.Name)
fs.Debug(path, "Dir.Remove")
fs.Debugf(path, "Dir.Remove")
item, err := d.lookupNode(req.Name)
if err != nil {
fs.ErrorLog(path, "Dir.Remove error: %v", err)
fs.Errorf(path, "Dir.Remove error: %v", err)
return err
}
switch x := item.o.(type) {
case fs.Object:
err = x.Remove()
if err != nil {
fs.ErrorLog(path, "Dir.Remove file error: %v", err)
fs.Errorf(path, "Dir.Remove file error: %v", err)
return err
}
case *fs.Dir:
@ -315,27 +315,27 @@ func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
dir := item.node.(*Dir)
empty, err := dir.isEmpty()
if err != nil {
fs.ErrorLog(path, "Dir.Remove dir error: %v", err)
fs.Errorf(path, "Dir.Remove dir error: %v", err)
return err
}
if !empty {
// return fuse.ENOTEMPTY - doesn't exist though so use EEXIST
fs.ErrorLog(path, "Dir.Remove not empty")
fs.Errorf(path, "Dir.Remove not empty")
return fuse.EEXIST
}
// remove directory
err = d.f.Rmdir(path)
if err != nil {
fs.ErrorLog(path, "Dir.Remove failed to remove directory: %v", err)
fs.Errorf(path, "Dir.Remove failed to remove directory: %v", err)
return err
}
default:
fs.ErrorLog(path, "Dir.Remove unknown type %T", item)
fs.Errorf(path, "Dir.Remove unknown type %T", item)
return errors.Errorf("unknown type %T", item)
}
// Remove the item from the directory listing
d.delObject(req.Name)
fs.Debug(path, "Dir.Remove OK")
fs.Debugf(path, "Dir.Remove OK")
return nil
}
@ -348,14 +348,14 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
destDir, ok := newDir.(*Dir)
if !ok {
err := errors.Errorf("Unknown Dir type %T", newDir)
fs.ErrorLog(oldPath, "Dir.Rename error: %v", err)
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
return err
}
newPath := path.Join(destDir.path, req.NewName)
fs.Debug(oldPath, "Dir.Rename to %q", newPath)
fs.Debugf(oldPath, "Dir.Rename to %q", newPath)
oldItem, err := d.lookupNode(req.OldName)
if err != nil {
fs.ErrorLog(oldPath, "Dir.Rename error: %v", err)
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
return err
}
var newObj fs.BasicInfo
@ -365,12 +365,12 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
do, ok := d.f.(fs.Mover)
if !ok {
err := errors.Errorf("Fs %q can't Move files", d.f)
fs.ErrorLog(oldPath, "Dir.Rename error: %v", err)
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
return err
}
newObject, err := do.Move(oldObject, newPath)
if err != nil {
fs.ErrorLog(oldPath, "Dir.Rename error: %v", err)
fs.Errorf(oldPath, "Dir.Rename error: %v", err)
return err
}
newObj = newObject
@ -378,22 +378,22 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
oldDir := oldItem.node.(*Dir)
empty, err := oldDir.isEmpty()
if err != nil {
fs.ErrorLog(oldPath, "Dir.Rename dir error: %v", err)
fs.Errorf(oldPath, "Dir.Rename dir error: %v", err)
return err
}
if !empty {
// return fuse.ENOTEMPTY - doesn't exist though so use EEXIST
fs.ErrorLog(oldPath, "Dir.Rename can't rename non empty directory")
fs.Errorf(oldPath, "Dir.Rename can't rename non empty directory")
return fuse.EEXIST
}
err = d.f.Rmdir(oldPath)
if err != nil {
fs.ErrorLog(oldPath, "Dir.Rename failed to remove directory: %v", err)
fs.Errorf(oldPath, "Dir.Rename failed to remove directory: %v", err)
return err
}
err = d.f.Mkdir(newPath)
if err != nil {
fs.ErrorLog(newPath, "Dir.Rename failed to create directory: %v", err)
fs.Errorf(newPath, "Dir.Rename failed to create directory: %v", err)
return err
}
newObj = &fs.Dir{
@ -402,7 +402,7 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
}
default:
err = errors.Errorf("unknown type %T", oldItem)
fs.ErrorLog(d.path, "Dir.ReadDirAll error: %v", err)
fs.Errorf(d.path, "Dir.ReadDirAll error: %v", err)
return err
}
@ -417,7 +417,7 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
// fallback to Copy/Delete if no Move?
// if dir is empty then can move it
fs.ErrorLog(newPath, "Dir.Rename renamed from %q", oldPath)
fs.Errorf(newPath, "Dir.Rename renamed from %q", oldPath)
return nil
}

View file

@ -45,7 +45,7 @@ var _ fusefs.Node = (*File)(nil)
func (f *File) Attr(ctx context.Context, a *fuse.Attr) error {
f.mu.Lock()
defer f.mu.Unlock()
fs.Debug(f.o, "File.Attr")
fs.Debugf(f.o, "File.Attr")
a.Gid = gid
a.Uid = uid
a.Mode = filePerms
@ -111,7 +111,7 @@ func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenR
return nil, err
}
fs.Debug(o, "File.Open")
fs.Debugf(o, "File.Open")
switch {
case req.Flags.IsReadOnly():

View file

@ -23,7 +23,7 @@ var _ fusefs.FS = (*FS)(nil)
// Root returns the root node
func (f *FS) Root() (fusefs.Node, error) {
fs.Debug(f.f, "Root()")
fs.Debugf(f.f, "Root()")
fsDir := &fs.Dir{
Name: "",
When: time.Now(),
@ -75,7 +75,7 @@ func mountOptions(device string) (options []fuse.MountOption) {
// returns an error, and an error channel for the serve process to
// report an error when fusermount is called.
func mount(f fs.Fs, mountpoint string) (<-chan error, error) {
fs.Debug(f, "Mounting on %q", mountpoint)
fs.Debugf(f, "Mounting on %q", mountpoint)
c, err := fuse.Mount(mountpoint, mountOptions(f.Name()+":"+f.Root())...)
if err != nil {
return nil, err

View file

@ -143,7 +143,7 @@ mount won't do that, so will be less reliable than the rclone command.
func Mount(f fs.Fs, mountpoint string) error {
if debugFUSE {
fuse.Debug = func(msg interface{}) {
fs.Debug("fuse", "%v", msg)
fs.Debugf("fuse", "%v", msg)
}
}

View file

@ -50,23 +50,23 @@ func (fh *ReadFileHandle) seek(offset int64, reopen bool) error {
// Can we seek it directly?
oldReader := fh.r.GetReader()
if do, ok := oldReader.(io.Seeker); !reopen && ok {
fs.Debug(fh.o, "ReadFileHandle.seek from %d to %d (io.Seeker)", fh.offset, offset)
fs.Debugf(fh.o, "ReadFileHandle.seek from %d to %d (io.Seeker)", fh.offset, offset)
_, err := do.Seek(offset, 0)
if err != nil {
fs.Debug(fh.o, "ReadFileHandle.Read io.Seeker failed: %v", err)
fs.Debugf(fh.o, "ReadFileHandle.Read io.Seeker failed: %v", err)
return err
}
} else {
fs.Debug(fh.o, "ReadFileHandle.seek from %d to %d", fh.offset, offset)
fs.Debugf(fh.o, "ReadFileHandle.seek from %d to %d", fh.offset, offset)
// if not re-open with a seek
r, err := fh.o.Open(&fs.SeekOption{Offset: offset})
if err != nil {
fs.Debug(fh.o, "ReadFileHandle.Read seek failed: %v", err)
fs.Debugf(fh.o, "ReadFileHandle.Read seek failed: %v", err)
return err
}
err = oldReader.Close()
if err != nil {
fs.Debug(fh.o, "ReadFileHandle.Read seek close old failed: %v", err)
fs.Debugf(fh.o, "ReadFileHandle.Read seek close old failed: %v", err)
}
// fh.r = fs.NewAccount(r, fh.o) // account the transfer
fh.r.UpdateReader(r)
@ -79,9 +79,9 @@ func (fh *ReadFileHandle) seek(offset int64, reopen bool) error {
func (fh *ReadFileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) (err error) {
fh.mu.Lock()
defer fh.mu.Unlock()
fs.Debug(fh.o, "ReadFileHandle.Read size %d offset %d", req.Size, req.Offset)
fs.Debugf(fh.o, "ReadFileHandle.Read size %d offset %d", req.Size, req.Offset)
if fh.closed {
fs.ErrorLog(fh.o, "ReadFileHandle.Read error: %v", errClosedFileHandle)
fs.Errorf(fh.o, "ReadFileHandle.Read error: %v", errClosedFileHandle)
return errClosedFileHandle
}
doSeek := req.Offset != fh.offset
@ -96,7 +96,7 @@ func (fh *ReadFileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp
// file - if so just return EOF leaving the underlying
// file in an unchanged state.
if req.Offset >= fh.o.Size() {
fs.Debug(fh.o, "ReadFileHandle.Read attempt to read beyond end of file: %d > %d", req.Offset, fh.o.Size())
fs.Debugf(fh.o, "ReadFileHandle.Read attempt to read beyond end of file: %d > %d", req.Offset, fh.o.Size())
resp.Data = nil
return nil
}
@ -129,16 +129,16 @@ func (fh *ReadFileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp
break
}
retries++
fs.ErrorLog(fh.o, "ReadFileHandle.Read error: low level retry %d/%d: %v", retries, fs.Config.LowLevelRetries, err)
fs.Errorf(fh.o, "ReadFileHandle.Read error: low level retry %d/%d: %v", retries, fs.Config.LowLevelRetries, err)
doSeek = true
doReopen = true
}
if err != nil {
fs.ErrorLog(fh.o, "ReadFileHandle.Read error: %v", err)
fs.Errorf(fh.o, "ReadFileHandle.Read error: %v", err)
} else {
resp.Data = buf[:n]
fh.offset = newOffset
fs.Debug(fh.o, "ReadFileHandle.Read OK")
fs.Debugf(fh.o, "ReadFileHandle.Read OK")
}
return err
}
@ -165,7 +165,7 @@ var _ fusefs.HandleFlusher = (*ReadFileHandle)(nil)
func (fh *ReadFileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
fh.mu.Lock()
defer fh.mu.Unlock()
fs.Debug(fh.o, "ReadFileHandle.Flush")
fs.Debugf(fh.o, "ReadFileHandle.Flush")
// Ignore the Flush as there is nothing we can sensibly do and
// it seems quite common for Flush to be called from
@ -174,17 +174,17 @@ func (fh *ReadFileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) err
// If Read hasn't been called then ignore the Flush - Release
// will pick it up
if !fh.readCalled {
fs.Debug(fh.o, "ReadFileHandle.Flush ignoring flush on unread handle")
fs.Debugf(fh.o, "ReadFileHandle.Flush ignoring flush on unread handle")
return nil
}
err := fh.close()
if err != nil {
fs.ErrorLog(fh.o, "ReadFileHandle.Flush error: %v", err)
fs.Errorf(fh.o, "ReadFileHandle.Flush error: %v", err)
return err
}
}
fs.Debug(fh.o, "ReadFileHandle.Flush OK")
fs.Debugf(fh.o, "ReadFileHandle.Flush OK")
return nil
}
@ -198,15 +198,15 @@ func (fh *ReadFileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest)
fh.mu.Lock()
defer fh.mu.Unlock()
if fh.closed {
fs.Debug(fh.o, "ReadFileHandle.Release nothing to do")
fs.Debugf(fh.o, "ReadFileHandle.Release nothing to do")
return nil
}
fs.Debug(fh.o, "ReadFileHandle.Release closing")
fs.Debugf(fh.o, "ReadFileHandle.Release closing")
err := fh.close()
if err != nil {
fs.ErrorLog(fh.o, "ReadFileHandle.Release error: %v", err)
fs.Errorf(fh.o, "ReadFileHandle.Release error: %v", err)
} else {
fs.Debug(fh.o, "ReadFileHandle.Release OK")
fs.Debugf(fh.o, "ReadFileHandle.Release OK")
}
return err
}

View file

@ -54,11 +54,11 @@ var _ fusefs.HandleWriter = (*WriteFileHandle)(nil)
// Write data to the file handle
func (fh *WriteFileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
fs.Debug(fh.remote, "WriteFileHandle.Write len=%d", len(req.Data))
fs.Debugf(fh.remote, "WriteFileHandle.Write len=%d", len(req.Data))
fh.mu.Lock()
defer fh.mu.Unlock()
if fh.closed {
fs.ErrorLog(fh.remote, "WriteFileHandle.Write error: %v", errClosedFileHandle)
fs.Errorf(fh.remote, "WriteFileHandle.Write error: %v", errClosedFileHandle)
return errClosedFileHandle
}
fh.writeCalled = true
@ -67,10 +67,10 @@ func (fh *WriteFileHandle) Write(ctx context.Context, req *fuse.WriteRequest, re
resp.Size = n
fh.file.written(int64(n))
if err != nil {
fs.ErrorLog(fh.remote, "WriteFileHandle.Write error: %v", err)
fs.Errorf(fh.remote, "WriteFileHandle.Write error: %v", err)
return err
}
fs.Debug(fh.remote, "WriteFileHandle.Write OK (%d bytes written)", n)
fs.Debugf(fh.remote, "WriteFileHandle.Write OK (%d bytes written)", n)
return nil
}
@ -119,19 +119,19 @@ var _ fusefs.HandleFlusher = (*WriteFileHandle)(nil)
func (fh *WriteFileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
fh.mu.Lock()
defer fh.mu.Unlock()
fs.Debug(fh.remote, "WriteFileHandle.Flush")
fs.Debugf(fh.remote, "WriteFileHandle.Flush")
// If Write hasn't been called then ignore the Flush - Release
// will pick it up
if !fh.writeCalled {
fs.Debug(fh.remote, "WriteFileHandle.Flush ignoring flush on unwritten handle")
fs.Debugf(fh.remote, "WriteFileHandle.Flush ignoring flush on unwritten handle")
return nil
}
err := fh.close()
if err != nil {
fs.ErrorLog(fh.remote, "WriteFileHandle.Flush error: %v", err)
fs.Errorf(fh.remote, "WriteFileHandle.Flush error: %v", err)
} else {
fs.Debug(fh.remote, "WriteFileHandle.Flush OK")
fs.Debugf(fh.remote, "WriteFileHandle.Flush OK")
}
return err
}
@ -146,15 +146,15 @@ func (fh *WriteFileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest
fh.mu.Lock()
defer fh.mu.Unlock()
if fh.closed {
fs.Debug(fh.remote, "WriteFileHandle.Release nothing to do")
fs.Debugf(fh.remote, "WriteFileHandle.Release nothing to do")
return nil
}
fs.Debug(fh.remote, "WriteFileHandle.Release closing")
fs.Debugf(fh.remote, "WriteFileHandle.Release closing")
err := fh.close()
if err != nil {
fs.ErrorLog(fh.remote, "WriteFileHandle.Release error: %v", err)
fs.Errorf(fh.remote, "WriteFileHandle.Release error: %v", err)
} else {
fs.Debug(fh.remote, "WriteFileHandle.Release OK")
fs.Debugf(fh.remote, "WriteFileHandle.Release OK")
}
return err
}

View file

@ -12,5 +12,5 @@ import (
// redirectStderr to the file passed in
func redirectStderr(f *os.File) {
fs.ErrorLog(nil, "Can't redirect stderr to file")
fs.Errorf(nil, "Can't redirect stderr to file")
}

View file

@ -269,7 +269,7 @@ func (f *Fs) DirMove(src fs.Fs) error {
}
srcFs, ok := src.(*Fs)
if !ok {
fs.Debug(srcFs, "Can't move directory - not same remote type")
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return do(srcFs.Fs)
@ -345,7 +345,7 @@ func (o *Object) Remote() string {
remote := o.Object.Remote()
decryptedName, err := o.f.cipher.DecryptFileName(remote)
if err != nil {
fs.Debug(remote, "Undecryptable file name: %v", err)
fs.Debugf(remote, "Undecryptable file name: %v", err)
return remote
}
return decryptedName
@ -355,7 +355,7 @@ func (o *Object) Remote() string {
func (o *Object) Size() int64 {
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
if err != nil {
fs.Debug(o, "Bad size for decrypt: %v", err)
fs.Debugf(o, "Bad size for decrypt: %v", err)
}
return size
}
@ -375,7 +375,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
offset = x.Offset
default:
if option.Mandatory() {
fs.Log(o, "Unsupported mandatory option: %v", option)
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
@ -408,7 +408,7 @@ func (f *Fs) newDir(dir *fs.Dir) *fs.Dir {
remote := dir.Name
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debug(remote, "Undecryptable dir name: %v", err)
fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else {
new.Name = decryptedRemote
}
@ -493,11 +493,11 @@ func (lo *ListOpts) Add(obj fs.Object) (abort bool) {
remote := obj.Remote()
decryptedRemote, err := lo.f.cipher.DecryptFileName(remote)
if err != nil {
fs.Debug(remote, "Skipping undecryptable file name: %v", err)
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
return lo.ListOpts.IsFinished()
}
if *cryptShowMapping {
fs.Log(decryptedRemote, "Encrypts to %q", remote)
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
return lo.ListOpts.Add(lo.f.newObject(obj))
}
@ -509,11 +509,11 @@ func (lo *ListOpts) AddDir(dir *fs.Dir) (abort bool) {
remote := dir.Name
decryptedRemote, err := lo.f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debug(remote, "Skipping undecryptable dir name: %v", err)
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
return lo.ListOpts.IsFinished()
}
if *cryptShowMapping {
fs.Log(decryptedRemote, "Encrypts to %q", remote)
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
return lo.ListOpts.AddDir(lo.f.newDir(dir))
}
@ -523,7 +523,7 @@ func (lo *ListOpts) AddDir(dir *fs.Dir) (abort bool) {
func (lo *ListOpts) IncludeDirectory(remote string) bool {
decryptedRemote, err := lo.f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debug(remote, "Not including undecryptable directory name: %v", err)
fs.Debugf(remote, "Not including undecryptable directory name: %v", err)
return false
}
return lo.ListOpts.IncludeDirectory(decryptedRemote)

View file

@ -38,7 +38,7 @@ func listDir(f ListDirer, out fs.ListOpts, dirID string, path string) {
jobs, err := f.ListDir(out, job)
if err != nil {
out.SetError(err)
fs.Debug(f, "Error reading %s: %s", path, err)
fs.Debugf(f, "Error reading %s: %s", path, err)
} else {
traversing.Add(len(jobs))
go func() {

View file

@ -440,7 +440,7 @@ func (f *Fs) findExportFormat(filepath string, item *drive.File) (extension, lin
// Warn about unknown export formats
for mimeType := range item.ExportLinks {
if _, ok := mimeTypeToExtension[mimeType]; !ok {
fs.Debug(filepath, "Unknown export type %q - ignoring", mimeType)
fs.Debugf(filepath, "Unknown export type %q - ignoring", mimeType)
}
}
@ -458,7 +458,7 @@ func (f *Fs) findExportFormat(filepath string, item *drive.File) (extension, lin
// ListDir reads the directory specified by the job into out, returning any more jobs
func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
fs.Debug(f, "Reading %q", job.Path)
fs.Debugf(f, "Reading %q", job.Path)
_, err = f.listAll(job.DirID, "", false, false, func(item *drive.File) bool {
remote := job.Path + item.Title
switch {
@ -493,7 +493,7 @@ func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.
// If item has export links then it is a google doc
extension, link := f.findExportFormat(remote, item)
if extension == "" {
fs.Debug(remote, "No export formats found")
fs.Debugf(remote, "No export formats found")
} else {
o, err := f.newObjectWithInfo(remote+"."+extension, item)
if err != nil {
@ -509,15 +509,15 @@ func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.
return true
}
} else {
fs.Debug(f, "Skip google document: %q", remote)
fs.Debugf(f, "Skip google document: %q", remote)
}
}
default:
fs.Debug(remote, "Ignoring unknown object")
fs.Debugf(remote, "Ignoring unknown object")
}
return false
})
fs.Debug(f, "Finished reading %q", job.Path)
fs.Debugf(f, "Finished reading %q", job.Path)
return jobs, err
}
@ -679,7 +679,7 @@ func (f *Fs) Precision() time.Duration {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
if srcObj.isDocument {
@ -744,7 +744,7 @@ func (f *Fs) Purge() error {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't move - not same remote type")
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
if srcObj.isDocument {
@ -788,7 +788,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) DirMove(src fs.Fs) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debug(srcFs, "Can't move directory - not same remote type")
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
@ -799,7 +799,7 @@ func (f *Fs) DirMove(src fs.Fs) error {
// Refuse to move to or from the root
if f.root == "" || srcFs.root == "" {
fs.Debug(src, "DirMove error: Can't move root")
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
@ -871,12 +871,12 @@ func (o *Object) Size() int64 {
// how big it is
_, res, err := o.httpResponse("HEAD", nil)
if err != nil {
fs.ErrorLog(o, "Error reading size: %v", err)
fs.Errorf(o, "Error reading size: %v", err)
return 0
}
_ = res.Body.Close()
o.bytes = res.ContentLength
// fs.Debug(o, "Read size of document: %v", o.bytes)
// fs.Debugf(o, "Read size of document: %v", o.bytes)
}
return o.bytes
}
@ -929,12 +929,12 @@ func (o *Object) readMetaData() (err error) {
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
if err != nil {
fs.Log(o, "Failed to read mtime from object: %v", err)
fs.Logf(o, "Failed to read mtime from object: %v", err)
return time.Now()
}
return modTime
@ -1012,7 +1012,7 @@ func (file *openFile) Read(p []byte) (n int, err error) {
func (file *openFile) Close() (err error) {
// If end of file, update bytes read
if file.eof {
// fs.Debug(file.o, "Updating size of doc after download to %v", file.bytes)
// fs.Debugf(file.o, "Updating size of doc after download to %v", file.bytes)
file.o.bytes = file.bytes
}
return file.in.Close()
@ -1103,7 +1103,7 @@ func (o *Object) Remove() error {
func (o *Object) MimeType() string {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType

View file

@ -209,7 +209,7 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debug(rx.remote, "Sending chunk %d length %d", start, reqSize)
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(start, buf)
again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {

View file

@ -279,7 +279,7 @@ func (f *Fs) list(out fs.ListOpts, dir string) {
out.SetError(err)
break
}
fs.Debug(f, "%d delta entries received", len(deltaPage.Entries))
fs.Debugf(f, "%d delta entries received", len(deltaPage.Entries))
for i := range deltaPage.Entries {
deltaEntry := &deltaPage.Entries[i]
entry := deltaEntry.Entry
@ -287,7 +287,7 @@ func (f *Fs) list(out fs.ListOpts, dir string) {
// This notifies of a deleted object
} else {
if len(entry.Path) <= 1 || entry.Path[0] != '/' {
fs.Log(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path)
fs.Logf(f, "dropbox API inconsistency: a path should always start with a slash and be at least 2 characters: %s", entry.Path)
continue
}
@ -500,7 +500,7 @@ func (f *Fs) Precision() time.Duration {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
@ -543,7 +543,7 @@ func (f *Fs) Purge() error {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't move - not same remote type")
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
@ -573,7 +573,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) DirMove(src fs.Fs) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debug(srcFs, "Can't move directory - not same remote type")
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
@ -699,7 +699,7 @@ func (o *Object) readMetaData() (err error) {
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
return o.modTime
@ -728,7 +728,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
offset = x.Offset
default:
if option.Mandatory() {
fs.Log(o, "Unsupported mandatory option: %v", option)
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
@ -752,7 +752,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
fs.Log(o, "File name disallowed - not uploading")
fs.Logf(o, "File name disallowed - not uploading")
return nil
}
entry, err := o.fs.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "")
@ -773,7 +773,7 @@ func (o *Object) Remove() error {
func (o *Object) MimeType() string {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType

View file

@ -52,7 +52,7 @@ func (tree *nameTreeNode) getTreeNode(path string) *nameTreeNode {
for _, component := range strings.Split(path, "/") {
if len(component) == 0 {
fs.Stats.Error()
fs.ErrorLog(tree, "getTreeNode: path component is empty (full path %q)", path)
fs.Errorf(tree, "getTreeNode: path component is empty (full path %q)", path)
return nil
}
@ -79,7 +79,7 @@ func (tree *nameTreeNode) PutCaseCorrectPath(caseCorrectPath string) {
for _, component := range strings.Split(caseCorrectPath, "/") {
if len(component) == 0 {
fs.Stats.Error()
fs.ErrorLog(tree, "PutCaseCorrectPath: path component is empty (full path %q)", caseCorrectPath)
fs.Errorf(tree, "PutCaseCorrectPath: path component is empty (full path %q)", caseCorrectPath)
return
}
lowercase := strings.ToLower(component)
@ -96,7 +96,7 @@ func (tree *nameTreeNode) PutCaseCorrectPath(caseCorrectPath string) {
func (tree *nameTreeNode) PutCaseCorrectDirectoryName(parentPath string, caseCorrectDirectoryName string) {
if len(caseCorrectDirectoryName) == 0 {
fs.Stats.Error()
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: empty caseCorrectDirectoryName is not allowed (parentPath: %q)", parentPath)
fs.Errorf(tree, "PutCaseCorrectDirectoryName: empty caseCorrectDirectoryName is not allowed (parentPath: %q)", parentPath)
return
}
@ -113,7 +113,7 @@ func (tree *nameTreeNode) PutCaseCorrectDirectoryName(parentPath string, caseCor
} else {
if len(directory.CaseCorrectName) > 0 {
fs.Stats.Error()
fs.ErrorLog(tree, "PutCaseCorrectDirectoryName: directory %q is already exists under parent path %q", caseCorrectDirectoryName, parentPath)
fs.Errorf(tree, "PutCaseCorrectDirectoryName: directory %q is already exists under parent path %q", caseCorrectDirectoryName, parentPath)
return
}
@ -129,7 +129,7 @@ func (tree *nameTreeNode) PutFile(parentPath string, caseCorrectFileName string,
if node.Files[caseCorrectFileName] != nil {
fs.Stats.Error()
fs.ErrorLog(tree, "PutFile: file %q is already exists at %q", caseCorrectFileName, parentPath)
fs.Errorf(tree, "PutFile: file %q is already exists at %q", caseCorrectFileName, parentPath)
return
}
@ -148,7 +148,7 @@ func (tree *nameTreeNode) GetPathWithCorrectCase(path string) *string {
for _, component := range strings.Split(path, "/") {
if component == "" {
fs.Stats.Error()
fs.ErrorLog(tree, "GetPathWithCorrectCase: path component is empty (full path %q)", path)
fs.Errorf(tree, "GetPathWithCorrectCase: path component is empty (full path %q)", path)
return nil
}
@ -188,7 +188,7 @@ func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFile
caseCorrectName := directory.CaseCorrectName
if caseCorrectName == "" {
fs.Stats.Error()
fs.ErrorLog(tree, "WalkFiles: exact name of the directory %q is unknown (parent path: %q)", lowerCaseName, currentPath)
fs.Errorf(tree, "WalkFiles: exact name of the directory %q is unknown (parent path: %q)", lowerCaseName, currentPath)
continue
}

View file

@ -33,7 +33,7 @@ func startTokenBucket() {
if currLimit.bandwidth > 0 {
tokenBucket = tb.NewBucket(int64(currLimit.bandwidth), 100*time.Millisecond)
Log(nil, "Starting bandwidth limiter at %vBytes/s", &currLimit.bandwidth)
Logf(nil, "Starting bandwidth limiter at %vBytes/s", &currLimit.bandwidth)
// Start the SIGUSR2 signal handler to toggle bandwidth.
// This function does nothing in windows systems.
@ -60,17 +60,17 @@ func startTokenTicker() {
if tokenBucket != nil {
err := tokenBucket.Close()
if err != nil {
Log(nil, "Error closing token bucket: %v", err)
Logf(nil, "Error closing token bucket: %v", err)
}
}
// Set new bandwidth. If unlimited, set tokenbucket to nil.
if limitNow.bandwidth > 0 {
tokenBucket = tb.NewBucket(int64(limitNow.bandwidth), 100*time.Millisecond)
Log(nil, "Scheduled bandwidth change. Limit set to %vBytes/s", &limitNow.bandwidth)
Logf(nil, "Scheduled bandwidth change. Limit set to %vBytes/s", &limitNow.bandwidth)
} else {
tokenBucket = nil
Log(nil, "Scheduled bandwidth change. Bandwidth limits disabled")
Logf(nil, "Scheduled bandwidth change. Bandwidth limits disabled")
}
currLimit = limitNow
@ -203,7 +203,7 @@ Elapsed time: %10v
// Log outputs the StatsInfo to the log
func (s *StatsInfo) Log() {
Log(nil, "%v\n", s)
Logf(nil, "%v\n", s)
}
// Bytes updates the stats for bytes bytes
@ -355,7 +355,7 @@ func NewAccountSizeNameWithBuffer(in io.ReadCloser, size int64, name string) *Ac
const buffers = memUsed / bufSize
newIn, err := newAsyncReader(in, buffers, bufSize)
if err != nil {
ErrorLog(name, "Failed to make buffer: %v", err)
Errorf(name, "Failed to make buffer: %v", err)
} else {
in = newIn
}
@ -591,7 +591,7 @@ func (a *accountStream) Read(p []byte) (n int, err error) {
func AccountByPart(obj Object) *Account {
acc := Stats.inProgress.get(obj.Remote())
if acc == nil {
Debug(obj, "Didn't find object to account part transfer")
Debugf(obj, "Didn't find object to account part transfer")
return nil
}
acc.disableWholeFileAccounting()

View file

@ -27,7 +27,7 @@ func startSignalHandler() {
s = "enabled"
}
tokenBucketMu.Unlock()
Log(nil, "Bandwidth limit %s by user", s)
Logf(nil, "Bandwidth limit %s by user", s)
}
}()
}

View file

@ -275,10 +275,10 @@ func makeConfigPath() string {
}
// Default to ./.rclone.conf (current working directory)
ErrorLog(nil, "Couldn't find home directory or read HOME or XDG_CONFIG_HOME environment variables.")
ErrorLog(nil, "Defaulting to storing config in current directory.")
ErrorLog(nil, "Use -config flag to workaround.")
ErrorLog(nil, "Error was: %v", err)
Errorf(nil, "Couldn't find home directory or read HOME or XDG_CONFIG_HOME environment variables.")
Errorf(nil, "Defaulting to storing config in current directory.")
Errorf(nil, "Use -config flag to workaround.")
Errorf(nil, "Error was: %v", err)
return hiddenConfigFileName
}
@ -357,7 +357,7 @@ func LoadConfig() {
var err error
configData, err = loadConfigFile()
if err == errorConfigFileNotFound {
Log(nil, "Config file %q not found - using defaults", ConfigPath)
Logf(nil, "Config file %q not found - using defaults", ConfigPath)
configData, _ = goconfig.LoadFromReader(&bytes.Buffer{})
} else if err != nil {
log.Fatalf("Failed to load config file %q: %v", ConfigPath, err)
@ -432,7 +432,7 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
fmt.Println("Using RCLONE_CONFIG_PASS returned:", err)
envpw = ""
} else {
Debug(nil, "Using RCLONE_CONFIG_PASS password.")
Debugf(nil, "Using RCLONE_CONFIG_PASS password.")
}
}
if len(configKey) == 0 {
@ -456,7 +456,7 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
}
// Retry
ErrorLog(nil, "Couldn't decrypt configuration, most likely wrong password.")
Errorf(nil, "Couldn't decrypt configuration, most likely wrong password.")
configKey = nil
envpw = ""
}
@ -560,7 +560,7 @@ func SaveConfig() {
}
err = os.Chmod(ConfigPath, 0600)
if err != nil {
ErrorLog(nil, "Failed to set permissions on config file: %v", err)
Errorf(nil, "Failed to set permissions on config file: %v", err)
}
return
}
@ -607,7 +607,7 @@ func SaveConfig() {
err = os.Chmod(ConfigPath, 0600)
if err != nil {
ErrorLog(nil, "Failed to set permissions on config file: %v", err)
Errorf(nil, "Failed to set permissions on config file: %v", err)
}
}
@ -809,7 +809,7 @@ func OkRemote(name string) bool {
configData.DeleteSection(name)
return true
default:
ErrorLog(nil, "Bad choice %c", i)
Errorf(nil, "Bad choice %c", i)
}
return false
}
@ -868,7 +868,7 @@ func ChooseOption(o *Option) string {
case 'n':
return ""
default:
ErrorLog(nil, "Bad choice %c", i)
Errorf(nil, "Bad choice %c", i)
}
return MustObscure(password)
}
@ -1128,7 +1128,7 @@ func ConfigFileGetBool(section, key string, defaultVal ...bool) bool {
if found {
newBool, err := strconv.ParseBool(newValue)
if err != nil {
ErrorLog(nil, "Couldn't parse %q into bool - ignoring: %v", envKey, err)
Errorf(nil, "Couldn't parse %q into bool - ignoring: %v", envKey, err)
} else {
defaultVal = []bool{newBool}
}
@ -1146,7 +1146,7 @@ func ConfigFileGetInt(section, key string, defaultVal ...int) int {
if found {
newInt, err := strconv.Atoi(newValue)
if err != nil {
ErrorLog(nil, "Couldn't parse %q into int - ignoring: %v", envKey, err)
Errorf(nil, "Couldn't parse %q into int - ignoring: %v", envKey, err)
} else {
defaultVal = []int{newInt}
}

View file

@ -232,7 +232,7 @@ func NewFilter() (f *Filter, err error) {
return nil, err
}
f.ModTimeTo = time.Now().Add(-duration)
Debug(nil, "--min-age %v to %v", duration, f.ModTimeTo)
Debugf(nil, "--min-age %v to %v", duration, f.ModTimeTo)
}
if *maxAge != "" {
duration, err := ParseDuration(*maxAge)
@ -243,7 +243,7 @@ func NewFilter() (f *Filter, err error) {
if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) {
return nil, errors.New("argument --min-age can't be larger than --max-age")
}
Debug(nil, "--max-age %v to %v", duration, f.ModTimeFrom)
Debugf(nil, "--max-age %v to %v", duration, f.ModTimeFrom)
}
if *dumpFilters {
fmt.Println("--- start filters ---")

View file

@ -2,11 +2,9 @@
package fs
import (
"fmt"
"io"
"log"
"math"
"os"
"path/filepath"
"regexp"
"sort"
@ -614,40 +612,6 @@ func NewFs(path string) (Fs, error) {
return fsInfo.NewFs(configName, fsPath)
}
// DebugLogger - logs to Stdout
var DebugLogger = log.New(os.Stdout, "", log.LstdFlags)
// makeLog produces a log string from the arguments passed in
func makeLog(o interface{}, text string, args ...interface{}) string {
out := fmt.Sprintf(text, args...)
if o == nil {
return out
}
return fmt.Sprintf("%v: %s", o, out)
}
// Debug writes debugging output for this Object or Fs
func Debug(o interface{}, text string, args ...interface{}) {
if Config.Verbose {
DebugLogger.Print(makeLog(o, text, args...))
}
}
// Log writes log output for this Object or Fs. This should be
// considered to be Info level logging.
func Log(o interface{}, text string, args ...interface{}) {
if !Config.Quiet {
log.Print(makeLog(o, text, args...))
}
}
// ErrorLog writes error log output for this Object or Fs. It
// unconditionally logs a message regardless of Config.Quiet or
// Config.Verbose.
func ErrorLog(o interface{}, text string, args ...interface{}) {
log.Print(makeLog(o, text, args...))
}
// CheckClose is a utility function used to check the return from
// Close in a defer statement.
func CheckClose(c io.Closer, err *error) {

View file

@ -170,13 +170,13 @@ func checkServerTime(req *http.Request, resp *http.Response) {
}
date, err := http.ParseTime(dateString)
if err != nil {
Debug(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err)
Debugf(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err)
return
}
dt := time.Since(date)
const window = 5 * 60 * time.Second
if dt > window || dt < -window {
Log(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt)
Logf(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt)
}
checkedHostMu.Lock()
checkedHost[host] = struct{}{}
@ -219,30 +219,30 @@ func cleanAuth(buf []byte) []byte {
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
// Force user agent
req.Header.Set("User-Agent", UserAgent)
// Log request
// Logf request
if t.logHeader || t.logBody || t.logAuth {
buf, _ := httputil.DumpRequestOut(req, t.logBody)
if !t.logAuth {
buf = cleanAuth(buf)
}
Debug(nil, "%s", separatorReq)
Debug(nil, "%s (req %p)", "HTTP REQUEST", req)
Debug(nil, "%s", string(buf))
Debug(nil, "%s", separatorReq)
Debugf(nil, "%s", separatorReq)
Debugf(nil, "%s (req %p)", "HTTP REQUEST", req)
Debugf(nil, "%s", string(buf))
Debugf(nil, "%s", separatorReq)
}
// Do round trip
resp, err = t.Transport.RoundTrip(req)
// Log response
// Logf response
if t.logHeader || t.logBody || t.logAuth {
Debug(nil, "%s", separatorResp)
Debug(nil, "%s (req %p)", "HTTP RESPONSE", req)
Debugf(nil, "%s", separatorResp)
Debugf(nil, "%s (req %p)", "HTTP RESPONSE", req)
if err != nil {
Debug(nil, "Error: %v", err)
Debugf(nil, "Error: %v", err)
} else {
buf, _ := httputil.DumpResponse(resp, t.logBody)
Debug(nil, "%s", string(buf))
Debugf(nil, "%s", string(buf))
}
Debug(nil, "%s", separatorResp)
Debugf(nil, "%s", separatorResp)
}
if err == nil {
checkServerTime(req, resp)

67
fs/log.go Normal file
View file

@ -0,0 +1,67 @@
// Logging for rclone
package fs
import (
"fmt"
"log"
"os"
)
// LogLevel describes rclone's logs. These are a subset of the syslog log levels.
type LogLevel byte
// Log levels - a subset of the syslog logs
const (
LogLevelEmergency = iota
LogLevelAlert
LogLevelCritical
LogLevelError // Error - can't be suppressed
LogLevelWarning
LogLevelNotice // Normal logging, -q suppresses
LogLevelInfo // Transfers, needs -v
LogLevelDebug // Debug level, needs -vv
)
// Outside world interface
// DebugLogger - logs to Stdout
var DebugLogger = log.New(os.Stdout, "", log.LstdFlags)
// makeLog produces a log string from the arguments passed in
func makeLog(o interface{}, text string, args ...interface{}) string {
out := fmt.Sprintf(text, args...)
if o == nil {
return out
}
return fmt.Sprintf("%v: %s", o, out)
}
// Errorf writes error log output for this Object or Fs. It
// unconditionally logs a message regardless of Config.Quiet or
// Config.Verbose.
func Errorf(o interface{}, text string, args ...interface{}) {
log.Print(makeLog(o, text, args...))
}
// Logf writes log output for this Object or Fs. This should be
// considered to be Info level logging.
func Logf(o interface{}, text string, args ...interface{}) {
if !Config.Quiet {
log.Print(makeLog(o, text, args...))
}
}
// Infof writes info on transfers for this Object or Fs
func Infof(o interface{}, text string, args ...interface{}) {
if Config.Verbose {
DebugLogger.Print(makeLog(o, text, args...))
}
}
// Debugf writes debugging output for this Object or Fs
func Debugf(o interface{}, text string, args ...interface{}) {
if Config.Verbose {
DebugLogger.Print(makeLog(o, text, args...))
}
}

View file

@ -32,12 +32,12 @@ func CalculateModifyWindow(fs ...Fs) {
Config.ModifyWindow = precision
}
if precision == ModTimeNotSupported {
Debug(f, "Modify window not supported")
Debugf(f, "Modify window not supported")
return
}
}
}
Debug(fs[0], "Modify window is %s", Config.ModifyWindow)
Debugf(fs[0], "Modify window is %s", Config.ModifyWindow)
}
// HashEquals checks to see if src == dst, but ignores empty strings
@ -64,7 +64,7 @@ func HashEquals(src, dst string) bool {
// If an error is returned it will return equal as false
func CheckHashes(src, dst Object) (equal bool, hash HashType, err error) {
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
// Debug(nil, "Shared hashes: %v", common)
// Debugf(nil, "Shared hashes: %v", common)
if common.Count() == 0 {
return true, HashNone, nil
}
@ -72,7 +72,7 @@ func CheckHashes(src, dst Object) (equal bool, hash HashType, err error) {
srcHash, err := src.Hash(hash)
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to calculate src hash: %v", err)
Errorf(src, "Failed to calculate src hash: %v", err)
return false, hash, err
}
if srcHash == "" {
@ -81,7 +81,7 @@ func CheckHashes(src, dst Object) (equal bool, hash HashType, err error) {
dstHash, err := dst.Hash(hash)
if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to calculate dst hash: %v", err)
Errorf(dst, "Failed to calculate dst hash: %v", err)
return false, hash, err
}
if dstHash == "" {
@ -115,12 +115,12 @@ func Equal(src, dst Object) bool {
func equal(src, dst Object, sizeOnly, checkSum bool) bool {
if !Config.IgnoreSize {
if src.Size() != dst.Size() {
Debug(src, "Sizes differ")
Debugf(src, "Sizes differ")
return false
}
}
if sizeOnly {
Debug(src, "Sizes identical")
Debugf(src, "Sizes identical")
return true
}
@ -131,20 +131,20 @@ func equal(src, dst Object, sizeOnly, checkSum bool) bool {
// Check the hash
same, hash, _ := CheckHashes(src, dst)
if !same {
Debug(src, "%v differ", hash)
Debugf(src, "%v differ", hash)
return false
}
if hash == HashNone {
Debug(src, "Size of src and dst objects identical")
Debugf(src, "Size of src and dst objects identical")
} else {
Debug(src, "Size and %v of src and dst objects identical", hash)
Debugf(src, "Size and %v of src and dst objects identical", hash)
}
return true
}
// Sizes the same so check the mtime
if Config.ModifyWindow == ModTimeNotSupported {
Debug(src, "Sizes identical")
Debugf(src, "Sizes identical")
return true
}
srcModTime := src.ModTime()
@ -152,16 +152,16 @@ func equal(src, dst Object, sizeOnly, checkSum bool) bool {
dt := dstModTime.Sub(srcModTime)
ModifyWindow := Config.ModifyWindow
if dt < ModifyWindow && dt > -ModifyWindow {
Debug(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, ModifyWindow)
return true
}
Debug(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
Debugf(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
// Check if the hashes are the same
same, hash, _ := CheckHashes(src, dst)
if !same {
Debug(src, "%v differ", hash)
Debugf(src, "%v differ", hash)
return false
}
if hash == HashNone {
@ -175,13 +175,13 @@ func equal(src, dst Object, sizeOnly, checkSum bool) bool {
// mtime of the dst object here
err := dst.SetModTime(srcModTime)
if err == ErrorCantSetModTime {
Debug(src, "src and dst identical but can't set mod time without re-uploading")
Debugf(src, "src and dst identical but can't set mod time without re-uploading")
return false
} else if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to set modification time: %v", err)
Errorf(dst, "Failed to set modification time: %v", err)
} else {
Debug(src, "Updated modification time in destination")
Debugf(src, "Updated modification time in destination")
}
}
return true
@ -202,7 +202,7 @@ func MimeType(o ObjectInfo) (mimeType string) {
// Read the MimeType from the optional interface if available
if do, ok := o.(MimeTyper); ok {
mimeType = do.MimeType()
Debug(o, "Read MimeType as %q", mimeType)
Debugf(o, "Read MimeType as %q", mimeType)
if mimeType != "" {
return mimeType
}
@ -217,10 +217,10 @@ func removeFailedCopy(dst Object) bool {
if dst == nil {
return false
}
Debug(dst, "Removing failed copy")
Debugf(dst, "Removing failed copy")
removeErr := dst.Remove()
if removeErr != nil {
Debug(dst, "Failed to remove failed copy: %s", removeErr)
Debugf(dst, "Failed to remove failed copy: %s", removeErr)
return false
}
return true
@ -241,7 +241,7 @@ func (o *overrideRemoteObject) Remote() string {
// remote as the name of the new object.
func Copy(f Fs, dst Object, remote string, src Object) (err error) {
if Config.DryRun {
Log(src, "Not copying as --dry-run")
Logf(src, "Not copying as --dry-run")
return nil
}
maxTries := Config.LowLevelRetries
@ -290,7 +290,7 @@ func Copy(f Fs, dst Object, remote string, src Object) (err error) {
}
// Retry if err returned a retry error
if IsRetryError(err) || ShouldRetry(err) {
Debug(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
continue
}
// otherwise finish
@ -298,7 +298,7 @@ func Copy(f Fs, dst Object, remote string, src Object) (err error) {
}
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to copy: %v", err)
Errorf(src, "Failed to copy: %v", err)
return err
}
@ -306,7 +306,7 @@ func Copy(f Fs, dst Object, remote string, src Object) (err error) {
if !Config.IgnoreSize && src.Size() != dst.Size() {
Stats.Error()
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
ErrorLog(dst, "%v", err)
Errorf(dst, "%v", err)
removeFailedCopy(dst)
return err
}
@ -315,7 +315,7 @@ func Copy(f Fs, dst Object, remote string, src Object) (err error) {
// TODO(klauspost): This could be extended, so we always create a hash type matching
// the destination, and calculate it while sending.
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
// Debug(src, "common hashes: %v", common)
// Debugf(src, "common hashes: %v", common)
if !Config.SizeOnly && common.Count() > 0 {
// Get common hash type
hashType := common.GetOne()
@ -324,24 +324,24 @@ func Copy(f Fs, dst Object, remote string, src Object) (err error) {
srcSum, err = src.Hash(hashType)
if err != nil {
Stats.Error()
ErrorLog(src, "Failed to read src hash: %v", err)
Errorf(src, "Failed to read src hash: %v", err)
} else if srcSum != "" {
var dstSum string
dstSum, err = dst.Hash(hashType)
if err != nil {
Stats.Error()
ErrorLog(dst, "Failed to read hash: %v", err)
Errorf(dst, "Failed to read hash: %v", err)
} else if !Config.IgnoreSize && !HashEquals(srcSum, dstSum) {
Stats.Error()
err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
ErrorLog(dst, "%v", err)
Errorf(dst, "%v", err)
removeFailedCopy(dst)
return err
}
}
}
Debug(src, actionTaken)
Debugf(src, actionTaken)
return err
}
@ -349,7 +349,7 @@ func Copy(f Fs, dst Object, remote string, src Object) (err error) {
// remote as the name of the new object.
func Move(fdst Fs, dst Object, remote string, src Object) (err error) {
if Config.DryRun {
Log(src, "Not moving as --dry-run")
Logf(src, "Not moving as --dry-run")
return nil
}
// See if we have Move available
@ -365,20 +365,20 @@ func Move(fdst Fs, dst Object, remote string, src Object) (err error) {
_, err := doMove(src, remote)
switch err {
case nil:
Debug(src, "Moved (server side)")
Debugf(src, "Moved (server side)")
return nil
case ErrorCantMove:
Debug(src, "Can't move, switching to copy")
Debugf(src, "Can't move, switching to copy")
default:
Stats.Error()
ErrorLog(dst, "Couldn't move: %v", err)
Errorf(dst, "Couldn't move: %v", err)
return err
}
}
// Move not found or didn't work so copy dst <- src
err = Copy(fdst, dst, remote, src)
if err != nil {
ErrorLog(src, "Not deleting source as copy failed: %v", err)
Errorf(src, "Not deleting source as copy failed: %v", err)
return err
}
// Delete src if no error on copy
@ -408,7 +408,7 @@ func deleteFileWithBackupDir(dst Object, backupDir Fs) (err error) {
action, actioned, actioning = "move into backup dir", "Moved into backup dir", "moving into backup dir"
}
if Config.DryRun {
Log(dst, "Not %s as --dry-run", actioning)
Logf(dst, "Not %s as --dry-run", actioning)
} else if backupDir != nil {
if !SameConfig(dst.Fs(), backupDir) {
err = errors.New("parameter to --backup-dir has to be on the same remote as destination")
@ -422,9 +422,9 @@ func deleteFileWithBackupDir(dst Object, backupDir Fs) (err error) {
}
if err != nil {
Stats.Error()
ErrorLog(dst, "Couldn't %s: %v", action, err)
Errorf(dst, "Couldn't %s: %v", action, err)
} else {
Debug(dst, actioned)
Debugf(dst, actioned)
}
Stats.DoneChecking(dst.Remote())
return err
@ -458,7 +458,7 @@ func deleteFilesWithBackupDir(toBeDeleted ObjectsChan, backupDir Fs) error {
}
}()
}
Log(nil, "Waiting for deletions to finish")
Logf(nil, "Waiting for deletions to finish")
wg.Wait()
if errorCount > 0 {
return errors.Errorf("failed to delete %d files", errorCount)
@ -501,7 +501,7 @@ func readFilesFn(fs Fs, includeAll bool, dir string, add func(Object) error) (er
list.SetError(err)
}
} else {
Debug(o, "Excluded from sync (and deletion)")
Debugf(o, "Excluded from sync (and deletion)")
}
}
return list.Error()
@ -550,13 +550,13 @@ func ListDirSorted(fs Fs, includeAll bool, dir string) (entries DirEntries, err
if includeAll || Config.Filter.IncludeObject(o) {
entries = append(entries, o)
} else {
Debug(o, "Excluded from sync (and deletion)")
Debugf(o, "Excluded from sync (and deletion)")
}
} else if dir != nil {
if includeAll || Config.Filter.IncludeDirectory(dir.Remote()) {
entries = append(entries, dir)
} else {
Debug(dir, "Excluded from sync (and deletion)")
Debugf(dir, "Excluded from sync (and deletion)")
}
} else {
// finishd since err, o, dir == nil
@ -587,10 +587,10 @@ func readFilesMap(fs Fs, includeAll bool, dir string) (files map[string]Object,
if _, ok := files[remote]; !ok {
files[remote] = o
if _, ok := normalised[normalisedRemote]; ok {
Log(o, "Warning: File found with same name but different case on %v", o.Fs())
Logf(o, "Warning: File found with same name but different case on %v", o.Fs())
}
} else {
Log(o, "Duplicate file detected")
Logf(o, "Duplicate file detected")
}
normalised[normalisedRemote] = struct{}{}
return nil
@ -609,13 +609,13 @@ func readFilesMaps(fdst Fs, fdstIncludeAll bool, fsrc Fs, fsrcIncludeAll bool, d
list := func(fs Fs, includeAll bool, pMap *map[string]Object, pErr *error) {
defer wg.Done()
Log(fs, "Building file list")
Logf(fs, "Building file list")
files, listErr := readFilesMap(fs, includeAll, dir)
if listErr != nil {
ErrorLog(fs, "Error building file list: %v", listErr)
Errorf(fs, "Error building file list: %v", listErr)
*pErr = listErr
} else {
Debug(fs, "Done building file list")
Debugf(fs, "Done building file list")
*pMap = files
}
}
@ -673,7 +673,7 @@ func checkIdentical(dst, src Object) (differ bool, noHash bool) {
defer Stats.DoneChecking(src.Remote())
if src.Size() != dst.Size() {
Stats.Error()
ErrorLog(src, "Sizes differ")
Errorf(src, "Sizes differ")
return true, false
}
if !Config.SizeOnly {
@ -687,11 +687,11 @@ func checkIdentical(dst, src Object) (differ bool, noHash bool) {
}
if !same {
Stats.Error()
ErrorLog(src, "%v differ", hash)
Errorf(src, "%v differ", hash)
return true, false
}
}
Debug(src, "OK")
Debugf(src, "OK")
return false, false
}
@ -718,17 +718,17 @@ func Check(fdst, fsrc Fs) error {
}
}
Log(fdst, "%d files not in %v", len(dstFiles), fsrc)
Logf(fdst, "%d files not in %v", len(dstFiles), fsrc)
for _, dst := range dstFiles {
Stats.Error()
ErrorLog(dst, "File not in %v", fsrc)
Errorf(dst, "File not in %v", fsrc)
atomic.AddInt32(&differences, 1)
}
Log(fsrc, "%d files not in %s", len(srcFiles), fdst)
Logf(fsrc, "%d files not in %s", len(srcFiles), fdst)
for _, src := range srcFiles {
Stats.Error()
ErrorLog(src, "File not in %v", fdst)
Errorf(src, "File not in %v", fdst)
atomic.AddInt32(&differences, 1)
}
@ -757,11 +757,11 @@ func Check(fdst, fsrc Fs) error {
}()
}
Log(fdst, "Waiting for checks to finish")
Logf(fdst, "Waiting for checks to finish")
checkerWg.Wait()
Log(fdst, "%d differences found", Stats.GetErrors())
Logf(fdst, "%d differences found", Stats.GetErrors())
if noHashes > 0 {
Log(fdst, "%d hashes could not be checked", noHashes)
Logf(fdst, "%d hashes could not be checked", noHashes)
}
if differences > 0 {
return errors.Errorf("%d differences found", differences)
@ -865,7 +865,7 @@ func hashLister(ht HashType, f Fs, w io.Writer) error {
if err == ErrHashUnsupported {
sum = "UNSUPPORTED"
} else if err != nil {
Debug(o, "Failed to read %v: %v", ht, err)
Debugf(o, "Failed to read %v: %v", ht, err)
sum = "ERROR"
}
syncFprintf(w, "%*s %s\n", HashWidth[ht], sum, o.Remote())
@ -906,7 +906,7 @@ func ListDir(f Fs, w io.Writer) error {
// Mkdir makes a destination directory or container
func Mkdir(f Fs, dir string) error {
if Config.DryRun {
Log(f, "Not making directory as dry run is set")
Logf(f, "Not making directory as dry run is set")
return nil
}
err := f.Mkdir(dir)
@ -922,9 +922,9 @@ func Mkdir(f Fs, dir string) error {
func TryRmdir(f Fs, dir string) error {
if Config.DryRun {
if dir != "" {
Log(dir, "Not deleting as dry run is set")
Logf(dir, "Not deleting as dry run is set")
} else {
Log(f, "Not deleting as dry run is set")
Logf(f, "Not deleting as dry run is set")
}
return nil
}
@ -948,7 +948,7 @@ func Purge(f Fs) error {
if doPurge := f.Features().Purge; doPurge != nil {
doFallbackPurge = false
if Config.DryRun {
Log(f, "Not purging as --dry-run set")
Logf(f, "Not purging as --dry-run set")
} else {
err = doPurge()
if err == ErrorCantPurge {
@ -1006,12 +1006,12 @@ func dedupeRename(remote string, objs []Object) {
newObj, err := doMove(o, newName)
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to rename: %v", err)
Errorf(o, "Failed to rename: %v", err)
continue
}
Log(newObj, "renamed from: %v", o)
Logf(newObj, "renamed from: %v", o)
} else {
Log(remote, "Not renaming to %q as --dry-run", newName)
Logf(remote, "Not renaming to %q as --dry-run", newName)
}
}
}
@ -1024,7 +1024,7 @@ func dedupeDeleteAllButOne(keep int, remote string, objs []Object) {
}
_ = DeleteFile(o)
}
Log(remote, "Deleted %d extra copies", len(objs)-1)
Logf(remote, "Deleted %d extra copies", len(objs)-1)
}
// dedupeDeleteIdentical deletes all but one of identical (by hash) copies
@ -1042,7 +1042,7 @@ func dedupeDeleteIdentical(remote string, objs []Object) []Object {
objs = nil
for md5sum, hashObjs := range byHash {
if len(hashObjs) > 1 {
Log(remote, "Deleting %d/%d identical duplicates (md5sum %q)", len(hashObjs)-1, len(hashObjs), md5sum)
Logf(remote, "Deleting %d/%d identical duplicates (md5sum %q)", len(hashObjs)-1, len(hashObjs), md5sum)
for _, o := range hashObjs[1:] {
_ = DeleteFile(o)
}
@ -1145,7 +1145,7 @@ var _ pflag.Value = (*DeduplicateMode)(nil)
// delete all but one or rename them to be different. Only useful with
// Google Drive which can have duplicate file names.
func Deduplicate(f Fs, mode DeduplicateMode) error {
Log(f, "Looking for duplicates using %v mode.", mode)
Logf(f, "Looking for duplicates using %v mode.", mode)
files := map[string][]Object{}
list := NewLister().Start(f, "")
for {
@ -1162,10 +1162,10 @@ func Deduplicate(f Fs, mode DeduplicateMode) error {
}
for remote, objs := range files {
if len(objs) > 1 {
Log(remote, "Found %d duplicates - deleting identical copies", len(objs))
Logf(remote, "Found %d duplicates - deleting identical copies", len(objs))
objs = dedupeDeleteIdentical(remote, objs)
if len(objs) <= 1 {
Log(remote, "All duplicates removed")
Logf(remote, "All duplicates removed")
continue
}
switch mode {
@ -1206,7 +1206,7 @@ func listToChan(list *Lister) ObjectsChan {
if err != nil {
if err != ErrorDirNotFound {
Stats.Error()
ErrorLog(nil, "Failed to list: %v", err)
Errorf(nil, "Failed to list: %v", err)
}
return
}
@ -1229,7 +1229,7 @@ func CleanUp(f Fs) error {
return errors.Errorf("%v doesn't support cleanup", f)
}
if Config.DryRun {
Log(f, "Not running cleanup as --dry-run set")
Logf(f, "Not running cleanup as --dry-run set")
return nil
}
return doCleanUp()
@ -1276,7 +1276,7 @@ func Cat(f Fs, w io.Writer, offset, count int64) error {
in, err := o.Open(options...)
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to open: %v", err)
Errorf(o, "Failed to open: %v", err)
return
}
if count >= 0 {
@ -1291,7 +1291,7 @@ func Cat(f Fs, w io.Writer, offset, count int64) error {
err = in.Close()
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to close: %v", err)
Errorf(o, "Failed to close: %v", err)
}
}()
// take the lock just before we output stuff, so at the last possible moment
@ -1300,7 +1300,7 @@ func Cat(f Fs, w io.Writer, offset, count int64) error {
_, err = io.Copy(w, in)
if err != nil {
Stats.Error()
ErrorLog(o, "Failed to send to output: %v", err)
Errorf(o, "Failed to send to output: %v", err)
}
})
}
@ -1315,7 +1315,7 @@ func Rmdirs(f Fs, dir string) error {
o, dir, err := list.Get()
if err != nil {
Stats.Error()
ErrorLog(f, "Failed to list: %v", err)
Errorf(f, "Failed to list: %v", err)
return err
} else if dir != nil {
// add a new directory as empty
@ -1357,7 +1357,7 @@ func Rmdirs(f Fs, dir string) error {
err := TryRmdir(f, dir)
if err != nil {
Stats.Error()
ErrorLog(dir, "Failed to rmdir: %v", err)
Errorf(dir, "Failed to rmdir: %v", err)
return err
}
}

View file

@ -473,7 +473,7 @@ func TestCheck(t *testing.T) {
defer r.Finalise()
check := func(i int, wantErrors int64) {
fs.Debug(r.fremote, "%d: Starting check test", i)
fs.Debugf(r.fremote, "%d: Starting check test", i)
oldErrors := fs.Stats.GetErrors()
err := fs.Check(r.flocal, r.fremote)
gotErrors := fs.Stats.GetErrors() - oldErrors
@ -486,7 +486,7 @@ func TestCheck(t *testing.T) {
if wantErrors != gotErrors {
t.Errorf("%d: Expecting %d errors but got %d", i, wantErrors, gotErrors)
}
fs.Debug(r.fremote, "%d: Ending check test", i)
fs.Debugf(r.fremote, "%d: Ending check test", i)
}
file1 := r.WriteBoth("rutabaga", "is tasty", t3)

View file

@ -72,17 +72,17 @@ func newSyncCopyMove(fdst, fsrc Fs, deleteMode DeleteMode, DoMove bool) (*syncCo
trackRenamesCh: make(chan Object, Config.Checkers),
}
if s.noTraverse && s.deleteMode != DeleteModeOff {
ErrorLog(nil, "Ignoring --no-traverse with sync")
Errorf(nil, "Ignoring --no-traverse with sync")
s.noTraverse = false
}
if s.trackRenames {
// Don't track renames for remotes without server-side move support.
if !CanServerSideMove(fdst) {
ErrorLog(fdst, "Ignoring --track-renames as the destination does not support server-side move or copy")
Errorf(fdst, "Ignoring --track-renames as the destination does not support server-side move or copy")
s.trackRenames = false
}
if s.commonHash == HashNone {
ErrorLog(fdst, "Ignoring --track-renames as the source and destination do not have a common hash")
Errorf(fdst, "Ignoring --track-renames as the source and destination do not have a common hash")
s.trackRenames = false
}
}
@ -92,7 +92,7 @@ func newSyncCopyMove(fdst, fsrc Fs, deleteMode DeleteMode, DoMove bool) (*syncCo
s.deleteMode = DeleteModeAfter
}
if s.noTraverse {
ErrorLog(nil, "Ignoring --no-traverse with --track-renames")
Errorf(nil, "Ignoring --no-traverse with --track-renames")
s.noTraverse = false
}
}
@ -195,17 +195,17 @@ func (s *syncCopyMove) readDstFiles() {
// transferred or not.
func NeedTransfer(dst, src Object) bool {
if dst == nil {
Debug(src, "Couldn't find file - need to transfer")
Debugf(src, "Couldn't find file - need to transfer")
return true
}
// If we should ignore existing files, don't transfer
if Config.IgnoreExisting {
Debug(src, "Destination exists, skipping")
Debugf(src, "Destination exists, skipping")
return false
}
// If we should upload unconditionally
if Config.IgnoreTimes {
Debug(src, "Transferring unconditionally as --ignore-times is in use")
Debugf(src, "Transferring unconditionally as --ignore-times is in use")
return true
}
// If UpdateOlder is in effect, skip if dst is newer than src
@ -223,21 +223,21 @@ func NeedTransfer(dst, src Object) bool {
}
switch {
case dt >= modifyWindow:
Debug(src, "Destination is newer than source, skipping")
Debugf(src, "Destination is newer than source, skipping")
return false
case dt <= -modifyWindow:
Debug(src, "Destination is older than source, transferring")
Debugf(src, "Destination is older than source, transferring")
default:
if src.Size() == dst.Size() {
Debug(src, "Destination mod time is within %v of source and sizes identical, skipping", modifyWindow)
Debugf(src, "Destination mod time is within %v of source and sizes identical, skipping", modifyWindow)
return false
}
Debug(src, "Destination mod time is within %v of source but sizes differ, transferring", modifyWindow)
Debugf(src, "Destination mod time is within %v of source but sizes differ, transferring", modifyWindow)
}
} else {
// Check to see if changed or not
if Equal(src, dst) {
Debug(src, "Unchanged skipping")
Debugf(src, "Unchanged skipping")
return false
}
}
@ -392,7 +392,7 @@ func (s *syncCopyMove) startCheckers() {
// This stops the background checkers
func (s *syncCopyMove) stopCheckers() {
close(s.toBeChecked)
Log(s.fdst, "Waiting for checks to finish")
Logf(s.fdst, "Waiting for checks to finish")
s.checkerWg.Wait()
}
@ -407,7 +407,7 @@ func (s *syncCopyMove) startTransfers() {
// This stops the background transfers
func (s *syncCopyMove) stopTransfers() {
close(s.toBeUploaded)
Log(s.fdst, "Waiting for transfers to finish")
Logf(s.fdst, "Waiting for transfers to finish")
s.transfersWg.Wait()
}
@ -428,7 +428,7 @@ func (s *syncCopyMove) stopRenamers() {
return
}
close(s.toBeRenamed)
Log(s.fdst, "Waiting for renames to finish")
Logf(s.fdst, "Waiting for renames to finish")
s.renamerWg.Wait()
}
@ -484,7 +484,7 @@ func (s *syncCopyMove) stopDeleters() {
// have been found have been removed from dstFiles already.
func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
if Stats.Errored() {
ErrorLog(s.fdst, "%v", ErrorNotDeleting)
Errorf(s.fdst, "%v", ErrorNotDeleting)
return ErrorNotDeleting
}
@ -515,7 +515,7 @@ func (s *syncCopyMove) renameHash(obj Object) (hash string) {
var err error
hash, err = obj.Hash(s.commonHash)
if err != nil {
Debug(obj, "Hash failed: %v", err)
Debugf(obj, "Hash failed: %v", err)
return ""
}
if hash == "" {
@ -551,7 +551,7 @@ func (s *syncCopyMove) popRenameMap(hash string) (dst Object) {
// makeRenameMap builds a map of the destination files by hash that
// match sizes in the slice of objects in s.renameCheck
func (s *syncCopyMove) makeRenameMap() {
Debug(s.fdst, "Making map for --track-renames")
Debugf(s.fdst, "Making map for --track-renames")
// first make a map of possible sizes we need to check
possibleSizes := map[int64]struct{}{}
@ -584,7 +584,7 @@ func (s *syncCopyMove) makeRenameMap() {
}()
}
wg.Wait()
Debug(s.fdst, "Finished making map for --track-renames")
Debugf(s.fdst, "Finished making map for --track-renames")
}
// tryRename renames a src object when doing track renames if
@ -605,7 +605,7 @@ func (s *syncCopyMove) tryRename(src Object) bool {
err := MoveFile(s.fdst, s.fdst, src.Remote(), dst.Remote())
if err != nil {
Debug(src, "Failed to rename to %q: %v", dst.Remote(), err)
Debugf(src, "Failed to rename to %q: %v", dst.Remote(), err)
return false
}
@ -614,7 +614,7 @@ func (s *syncCopyMove) tryRename(src Object) bool {
delete(s.dstFiles, dst.Remote())
s.dstFilesMu.Unlock()
Debug(src, "Renamed from %q", dst.Remote())
Debugf(src, "Renamed from %q", dst.Remote())
return true
}
@ -627,7 +627,7 @@ func (s *syncCopyMove) tryRename(src Object) bool {
// dir is the start directory, "" for root
func (s *syncCopyMove) runRecursive() error {
if Same(s.fdst, s.fsrc) {
ErrorLog(s.fdst, "Nothing to do as source and destination are the same")
Errorf(s.fdst, "Nothing to do as source and destination are the same")
return nil
}
@ -694,7 +694,7 @@ func (s *syncCopyMove) runRecursive() error {
if err != nil {
dst = nil
if err != ErrorObjectNotFound {
Debug(src, "Error making NewObject: %v", err)
Debugf(src, "Error making NewObject: %v", err)
}
}
} else {
@ -739,7 +739,7 @@ func (s *syncCopyMove) runRecursive() error {
// Delete files during or after
if s.deleteMode == DeleteModeDuring || s.deleteMode == DeleteModeAfter {
if err != nil {
ErrorLog(s.fdst, "%v", ErrorNotDeleting)
Errorf(s.fdst, "%v", ErrorNotDeleting)
} else {
err = s.deleteFiles(false)
}
@ -775,7 +775,7 @@ func (s *syncCopyMove) runDirAtATime() error {
}
if Same(s.fdst, s.fsrc) {
ErrorLog(s.fdst, "Nothing to do as source and destination are the same")
Errorf(s.fdst, "Nothing to do as source and destination are the same")
return nil
}
@ -859,7 +859,7 @@ func (s *syncCopyMove) runDirAtATime() error {
// Delete files after
if s.deleteMode == DeleteModeAfter {
if s.currentError() != nil {
ErrorLog(s.fdst, "%v", ErrorNotDeleting)
Errorf(s.fdst, "%v", ErrorNotDeleting)
} else {
s.processError(s.deleteFiles(false))
}
@ -941,7 +941,7 @@ func (s *syncCopyMove) transfer(dst, src BasicInfo, job listDirJob, jobs *[]list
} else {
// FIXME src is file, dst is directory
err := errors.New("can't overwrite directory with file")
ErrorLog(srcX, "%v", err)
Errorf(srcX, "%v", err)
s.processError(err)
}
case *Dir:
@ -958,7 +958,7 @@ func (s *syncCopyMove) transfer(dst, src BasicInfo, job listDirJob, jobs *[]list
} else {
// FIXME src is dir, dst is file
err := errors.New("can't overwrite file with directory")
ErrorLog(dstX, "%v", err)
Errorf(dstX, "%v", err)
s.processError(err)
}
default:
@ -1027,7 +1027,7 @@ func (s *syncCopyMove) _runDirAtATime(job listDirJob) (jobs []listDirJob) {
iSrc-- // retry the src
}
}
// Debug(nil, "src = %v, dst = %v", src, dst)
// Debugf(nil, "src = %v, dst = %v", src, dst)
switch {
case src == nil:
s.dstOnly(dst, job, &jobs)
@ -1096,27 +1096,27 @@ func moveDir(fdst, fsrc Fs) error {
// MoveDir moves fsrc into fdst
func MoveDir(fdst, fsrc Fs) error {
if Same(fdst, fsrc) {
ErrorLog(fdst, "Nothing to do as source and destination are the same")
Errorf(fdst, "Nothing to do as source and destination are the same")
return nil
}
// First attempt to use DirMover if exists, same Fs and no filters are active
if fdstDirMove := fdst.Features().DirMove; fdstDirMove != nil && SameConfig(fsrc, fdst) && Config.Filter.InActive() {
if Config.DryRun {
Log(fdst, "Not doing server side directory move as --dry-run")
Logf(fdst, "Not doing server side directory move as --dry-run")
return nil
}
Debug(fdst, "Using server side directory move")
Debugf(fdst, "Using server side directory move")
err := fdstDirMove(fsrc)
switch err {
case ErrorCantDirMove, ErrorDirExists:
Debug(fdst, "Server side directory move failed - fallback to file moves: %v", err)
Debugf(fdst, "Server side directory move failed - fallback to file moves: %v", err)
case nil:
Debug(fdst, "Server side directory move succeeded")
Debugf(fdst, "Server side directory move succeeded")
return nil
default:
Stats.Error()
ErrorLog(fdst, "Server side directory move failed: %v", err)
Errorf(fdst, "Server side directory move failed: %v", err)
return err
}
}
@ -1124,7 +1124,7 @@ func MoveDir(fdst, fsrc Fs) error {
// The two remotes mustn't overlap if we didn't do server side move
if Overlapping(fdst, fsrc) {
err := ErrorCantMoveOverlapping
ErrorLog(fdst, "%v", err)
Errorf(fdst, "%v", err)
return err
}

View file

@ -321,7 +321,7 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
defer r.Finalise()
if r.fremote.Hashes().Count() == 0 {
t.Log("Can't check this if no hashes supported")
t.Logf("Can't check this if no hashes supported")
return
}

View file

@ -339,7 +339,7 @@ func (f *Fs) list(dir string, level int, fn listFn) error {
}
for _, object := range objects.Items {
if !strings.HasPrefix(object.Name, root) {
fs.Log(f, "Odd name received %q", object.Name)
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
@ -505,7 +505,7 @@ func (f *Fs) Precision() time.Duration {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
@ -575,7 +575,7 @@ func (o *Object) setMetaData(info *storage.Object) {
// Read md5sum
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
if err != nil {
fs.Log(o, "Bad MD5 decode: %v", err)
fs.Logf(o, "Bad MD5 decode: %v", err)
} else {
o.md5sum = hex.EncodeToString(md5sumData)
}
@ -588,13 +588,13 @@ func (o *Object) setMetaData(info *storage.Object) {
o.modTime = modTime
return
}
fs.Debug(o, "Failed to read mtime from metadata: %s", err)
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormatIn, info.Updated)
if err != nil {
fs.Log(o, "Bad time decode: %v", err)
fs.Logf(o, "Bad time decode: %v", err)
} else {
o.modTime = modTime
}
@ -627,7 +627,7 @@ func (o *Object) readMetaData() (err error) {
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
// fs.Log(o, "Failed to read metadata: %v", err)
// fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
return o.modTime

View file

@ -127,7 +127,7 @@ func (f *Fs) getCredentials() (err error) {
if err != nil {
return err
}
// fs.Debug(f, "Got credentials %+v", result)
// fs.Debugf(f, "Got credentials %+v", result)
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
return errors.New("couldn't read token, result and expired from credentials")
}
@ -137,7 +137,7 @@ func (f *Fs) getCredentials() (err error) {
return err
}
f.expires = expires
fs.Debug(f, "Got swift credentials (expiry %v in %v)", f.expires, f.expires.Sub(time.Now()))
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, f.expires.Sub(time.Now()))
return nil
}

View file

@ -309,7 +309,7 @@ func (f *Fs) cleanRemote(name string) string {
if !utf8.ValidString(name) {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name)
fs.Debugf(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
@ -446,7 +446,7 @@ func (f *Fs) Purge() error {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't move - not same remote type")
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
@ -496,7 +496,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) DirMove(src fs.Fs) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debug(srcFs, "Can't move directory - not same remote type")
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
// Check if source exists
@ -601,24 +601,24 @@ func (o *Object) Storable() bool {
// Check for control characters in the remote name and show non storable
for _, c := range o.Remote() {
if c >= 0x00 && c < 0x20 || c == 0x7F {
fs.Debug(o.fs, "Can't store file with control characters: %q", o.Remote())
fs.Debugf(o.fs, "Can't store file with control characters: %q", o.Remote())
return false
}
}
mode := o.info.Mode()
// On windows a file with os.ModeSymlink represents a file with reparse points
if runtime.GOOS == "windows" && (mode&os.ModeSymlink) != 0 {
fs.Debug(o, "Clearing symlink bit to allow a file with reparse points to be copied")
fs.Debugf(o, "Clearing symlink bit to allow a file with reparse points to be copied")
mode &^= os.ModeSymlink
}
if mode&os.ModeSymlink != 0 {
fs.Debug(o, "Can't follow symlink without -L/--copy-links")
fs.Debugf(o, "Can't follow symlink without -L/--copy-links")
return false
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
fs.Debug(o, "Can't transfer non file/directory")
fs.Debugf(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
// fs.Debug(o, "Skipping directory")
// fs.Debugf(o, "Skipping directory")
return false
}
return true
@ -662,7 +662,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
offset = x.Offset
default:
if option.Mandatory() {
fs.Log(o, "Unsupported mandatory option: %v", option)
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
@ -714,9 +714,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
err = closeErr
}
if err != nil {
fs.Debug(o, "Removing partially written file on error: %v", err)
fs.Debugf(o, "Removing partially written file on error: %v", err)
if removeErr := os.Remove(o.path); removeErr != nil {
fs.ErrorLog(o, "Failed to remove partially written file: %v", removeErr)
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
}
return err
}
@ -859,7 +859,7 @@ func cleanWindowsName(f *Fs, name string) string {
if name2 != original && f != nil {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Debug(f, "Replacing invalid characters in %q to %q", name, name2)
fs.Debugf(f, "Replacing invalid characters in %q to %q", name, name2)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()

View file

@ -23,7 +23,7 @@ func readDevice(fi os.FileInfo) uint64 {
}
statT, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
fs.Debug(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
return devUnset
}
return uint64(statT.Dev)

View file

@ -98,9 +98,9 @@ func putToken(name string, token *oauth2.Token) error {
if tokenString != old {
err = fs.ConfigSetValueAndSave(name, fs.ConfigToken, tokenString)
if err != nil {
fs.ErrorLog(nil, "Failed to save new token in config file: %v", err)
fs.Errorf(nil, "Failed to save new token in config file: %v", err)
} else {
fs.Debug(name, "Saved new token in config file")
fs.Debugf(name, "Saved new token in config file")
}
}
return nil
@ -370,7 +370,7 @@ type authServer struct {
// startWebServer runs an internal web server to receive config details
func (s *authServer) Start() {
fs.Debug(nil, "Starting auth server on %s", s.bindAddress)
fs.Debugf(nil, "Starting auth server on %s", s.bindAddress)
mux := http.NewServeMux()
server := &http.Server{
Addr: s.bindAddress,
@ -387,15 +387,15 @@ func (s *authServer) Start() {
})
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "text/html")
fs.Debug(nil, "Received request on auth server")
fs.Debugf(nil, "Received request on auth server")
code := req.FormValue("code")
if code != "" {
state := req.FormValue("state")
if state != s.state {
fs.Debug(nil, "State did not match: want %q got %q", s.state, state)
fs.Debugf(nil, "State did not match: want %q got %q", s.state, state)
fmt.Fprintf(w, "<h1>Failure</h1>\n<p>Auth state doesn't match</p>")
} else {
fs.Debug(nil, "Successfully got code")
fs.Debugf(nil, "Successfully got code")
if s.code != nil {
fmt.Fprintf(w, "<h1>Success</h1>\n<p>Go back to rclone to continue</p>")
s.code <- code
@ -405,7 +405,7 @@ func (s *authServer) Start() {
}
return
}
fs.Debug(nil, "No code found on request")
fs.Debugf(nil, "No code found on request")
w.WriteHeader(500)
fmt.Fprintf(w, "<h1>Failed!</h1>\nNo code found returned by remote server.")
@ -417,11 +417,11 @@ func (s *authServer) Start() {
log.Fatalf("Failed to start auth webserver: %v", err)
}
err = server.Serve(s.listener)
fs.Debug(nil, "Closed auth server with error: %v", err)
fs.Debugf(nil, "Closed auth server with error: %v", err)
}
func (s *authServer) Stop() {
fs.Debug(nil, "Closing auth server")
fs.Debugf(nil, "Closing auth server")
if s.code != nil {
close(s.code)
s.code = nil

View file

@ -39,16 +39,16 @@ func (r *Renew) renewOnExpiry() {
<-expiry
uploads := atomic.LoadInt32(&r.uploads)
if uploads != 0 {
fs.Debug(r.name, "Token expired - %d uploads in progress - refreshing", uploads)
fs.Debugf(r.name, "Token expired - %d uploads in progress - refreshing", uploads)
// Do a transaction
err := r.run()
if err == nil {
fs.Debug(r.name, "Token refresh successful")
fs.Debugf(r.name, "Token refresh successful")
} else {
fs.ErrorLog(r.name, "Token refresh failed: %v", err)
fs.Errorf(r.name, "Token refresh failed: %v", err)
}
} else {
fs.Debug(r.name, "Token expired but no uploads in progress - doing nothing")
fs.Debugf(r.name, "Token expired but no uploads in progress - doing nothing")
}
}
}

View file

@ -169,7 +169,7 @@ func errorHandler(resp *http.Response) error {
errResponse := new(api.Error)
err := rest.DecodeJSON(resp, &errResponse)
if err != nil {
fs.Debug(nil, "Couldn't decode error response: %v", err)
fs.Debugf(nil, "Couldn't decode error response: %v", err)
}
if errResponse.ErrorInfo.Code == "" {
errResponse.ErrorInfo.Code = resp.Status
@ -272,7 +272,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
// fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
parent, ok := f.dirCache.GetInv(pathID)
if !ok {
return "", false, errors.New("couldn't find parent ID")
@ -299,7 +299,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
// fs.Debug(f, "CreateDir(%q, %q)\n", pathID, leaf)
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response
var info *api.Item
opts := rest.Opts{
@ -386,7 +386,7 @@ OUTER:
// ListDir reads the directory specified by the job into out, returning any more jobs
func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
fs.Debug(f, "Reading %q", job.Path)
fs.Debugf(f, "Reading %q", job.Path)
_, err = f.listAll(job.DirID, false, false, func(info *api.Item) bool {
remote := job.Path + info.Name
if info.Folder != nil {
@ -419,7 +419,7 @@ func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.
}
return false
})
fs.Debug(f, "Finished reading %q", job.Path)
fs.Debugf(f, "Finished reading %q", job.Path)
return jobs, err
}
@ -592,7 +592,7 @@ func (f *Fs) waitForJob(location string, o *Object) error {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err := srcObj.readMetaData()
@ -700,7 +700,7 @@ func (o *Object) Hash(t fs.HashType) (string, error) {
func (o *Object) Size() int64 {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return 0
}
return o.size
@ -764,7 +764,7 @@ func (o *Object) readMetaData() (err error) {
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
return o.modTime
@ -883,7 +883,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
}
// Create upload session
fs.Debug(o, "Starting multipart upload")
fs.Debugf(o, "Starting multipart upload")
session, err := o.createUploadSession()
if err != nil {
return err
@ -893,10 +893,10 @@ func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
// Cancel the session if something went wrong
defer func() {
if err != nil {
fs.Debug(o, "Cancelling multipart upload")
fs.Debugf(o, "Cancelling multipart upload")
cancelErr := o.cancelUploadSession(uploadURL)
if cancelErr != nil {
fs.Log(o, "Failed to cancel multipart upload: %v", err)
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
}
}()
@ -915,7 +915,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64) (err error) {
if err != nil {
return err
}
fs.Debug(o, "Uploading segment %d/%d size %d", position, size, n)
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
err = o.uploadFragment(uploadURL, position, size, buf)
if err != nil {
return err

View file

@ -208,7 +208,7 @@ func (p *Pacer) beginCall() {
p.mu.Lock()
// Restart the timer
go func(t time.Duration) {
// fs.Debug(f, "New sleep for %v at %v", t, time.Now())
// fs.Debugf(f, "New sleep for %v at %v", t, time.Now())
time.Sleep(t)
p.pacer <- struct{}{}
}(p.sleepTime)
@ -236,7 +236,7 @@ func (p *Pacer) defaultPacer(retry bool) {
p.sleepTime = p.maxSleep
}
if p.sleepTime != oldSleepTime {
fs.Debug("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
}
} else {
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
@ -244,7 +244,7 @@ func (p *Pacer) defaultPacer(retry bool) {
p.sleepTime = p.minSleep
}
if p.sleepTime != oldSleepTime {
fs.Debug("pacer", "Reducing sleep to %v", p.sleepTime)
fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
}
}
}
@ -263,7 +263,7 @@ func (p *Pacer) acdPacer(retry bool) {
if consecutiveRetries == 0 {
if p.sleepTime != p.minSleep {
p.sleepTime = p.minSleep
fs.Debug("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
}
} else {
if consecutiveRetries > 9 {
@ -277,7 +277,7 @@ func (p *Pacer) acdPacer(retry bool) {
if p.sleepTime < p.minSleep {
p.sleepTime = p.minSleep
}
fs.Debug("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
}
}
@ -295,7 +295,7 @@ func (p *Pacer) drivePacer(retry bool) {
if consecutiveRetries == 0 {
if p.sleepTime != p.minSleep {
p.sleepTime = p.minSleep
fs.Debug("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
}
} else {
if consecutiveRetries > 5 {
@ -304,7 +304,7 @@ func (p *Pacer) drivePacer(retry bool) {
// consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16
// maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds
p.sleepTime = time.Second<<uint(consecutiveRetries-1) + time.Duration(rand.Int63n(int64(time.Second)))
fs.Debug("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
}
}
@ -336,7 +336,7 @@ func (p *Pacer) call(fn Paced, retries int) (err error) {
if !retry {
break
}
fs.Debug("pacer", "low level retry %d/%d (error %v)", i, retries, err)
fs.Debugf("pacer", "low level retry %d/%d (error %v)", i, retries, err)
}
if retry {
err = fs.RetryError(err)

View file

@ -349,7 +349,7 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
ses := session.New()
c := s3.New(ses, awsConfig)
if region == "other-v2-signature" {
fs.Debug(name, "Using v2 auth")
fs.Debugf(name, "Using v2 auth")
signer := func(req *request.Request) {
// Ignore AnonymousCredentials object
if req.Config.Credentials == credentials.AnonymousCredentials {
@ -426,7 +426,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *s3.Object) (fs.Object, error
if info != nil {
// Set info but not meta
if info.LastModified == nil {
fs.Log(o, "Failed to read last modified")
fs.Logf(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = *info.LastModified
@ -488,12 +488,12 @@ func (f *Fs) list(dir string, level int, fn listFn) error {
if level == 1 {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix.Prefix == nil {
fs.Log(f, "Nil common prefix received")
fs.Logf(f, "Nil common prefix received")
continue
}
remote := *commonPrefix.Prefix
if !strings.HasPrefix(remote, f.root) {
fs.Log(f, "Odd name received %q", remote)
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[rootLength:]
@ -509,7 +509,7 @@ func (f *Fs) list(dir string, level int, fn listFn) error {
for _, object := range resp.Contents {
key := aws.StringValue(object.Key)
if !strings.HasPrefix(key, f.root) {
fs.Log(f, "Odd name received %q", key)
fs.Logf(f, "Odd name received %q", key)
continue
}
remote := key[rootLength:]
@ -697,7 +697,7 @@ func (f *Fs) Precision() time.Duration {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcFs := srcObj.fs
@ -751,7 +751,7 @@ func (o *Object) Hash(t fs.HashType) (string, error) {
etag := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(etag) {
// fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
// fs.Debugf(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
return "", nil
}
return etag, nil
@ -793,7 +793,7 @@ func (o *Object) readMetaData() (err error) {
o.bytes = size
o.meta = resp.Metadata
if resp.LastModified == nil {
fs.Log(o, "Failed to read last modified from HEAD: %v", err)
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
o.lastModified = time.Now()
} else {
o.lastModified = *resp.LastModified
@ -809,18 +809,18 @@ func (o *Object) readMetaData() (err error) {
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
// read mtime out of metadata if available
d, ok := o.meta[metaMtime]
if !ok || d == nil {
// fs.Debug(o, "No metadata")
// fs.Debugf(o, "No metadata")
return o.lastModified
}
modTime, err := swift.FloatStringToTime(*d)
if err != nil {
fs.Log(o, "Failed to read mtime from object: %v", err)
fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
}
return modTime
@ -835,7 +835,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
if o.bytes >= maxSizeForCopy {
fs.Debug(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
return nil
}
@ -878,7 +878,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
req.Range = &value
default:
if option.Mandatory() {
fs.Log(o, "Unsupported mandatory option: %v", option)
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
@ -957,7 +957,7 @@ func (o *Object) Remove() error {
func (o *Object) MimeType() string {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType

View file

@ -164,7 +164,7 @@ func NewFs(name, root string) (fs.Fs, error) {
go func() {
// FIXME re-open the connection here...
err := f.sshClient.Conn.Wait()
fs.ErrorLog(f, "SSH connection closed: %v", err)
fs.Errorf(f, "SSH connection closed: %v", err)
}()
return f, nil
}
@ -241,7 +241,7 @@ func (f *Fs) list(out fs.ListOpts, dir string, level int, wg *sync.WaitGroup, to
infos, err := f.sftpClient.ReadDir(sftpDir)
if err != nil {
err = errors.Wrapf(err, "error listing %q", dir)
fs.ErrorLog(f, "Listing failed: %v", err)
fs.Errorf(f, "Listing failed: %v", err)
out.SetError(err)
return
}
@ -363,7 +363,7 @@ func (f *Fs) Rmdir(dir string) error {
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't move - not same remote type")
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
err := f.mkParentDir(remote)
@ -395,7 +395,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) DirMove(src fs.Fs) error {
srcFs, ok := src.(*Fs)
if !ok {
fs.Debug(srcFs, "Can't move directory - not same remote type")
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
@ -410,7 +410,7 @@ func (f *Fs) DirMove(src fs.Fs) error {
// Refuse to move to or from the root
if f.root == "" || srcFs.root == "" {
fs.Debug(src, "DirMove error: Can't move root")
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
@ -540,7 +540,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
offset = x.Offset
default:
if option.Mandatory() {
fs.Log(o, "Unsupported mandatory option: %v", option)
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
@ -571,9 +571,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
remove := func() {
removeErr := o.fs.sftpClient.Remove(o.path())
if removeErr != nil {
fs.Debug(src, "Failed to remove: %v", removeErr)
fs.Debugf(src, "Failed to remove: %v", removeErr)
} else {
fs.Debug(src, "Removed after failed upload: %v", err)
fs.Debugf(src, "Removed after failed upload: %v", err)
}
}
_, err = file.ReadFrom(in)

View file

@ -29,7 +29,7 @@ func (l *stringLock) Lock(ID string) {
}
// Wait for the channel to be closed
l.mu.Unlock()
fs.Log(nil, "Waiting for stringLock on %q", ID)
fs.Logf(nil, "Waiting for stringLock on %q", ID)
<-ch
l.mu.Lock()
}

View file

@ -301,7 +301,7 @@ func (f *Fs) listContainerRoot(container, root string, dir string, level int, fn
}
}
if !strings.HasPrefix(object.Name, root) {
fs.Log(f, "Odd name received %q", object.Name)
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[rootLength:]
@ -484,7 +484,7 @@ func (f *Fs) Purge() error {
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debug(src, "Can't copy - not same remote type")
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcFs := srcObj.fs
@ -534,7 +534,7 @@ func (o *Object) Hash(t fs.HashType) (string, error) {
return "", err
}
if isDynamicLargeObject || isStaticLargeObject {
fs.Debug(o, "Returning empty Md5sum for swift large object")
fs.Debugf(o, "Returning empty Md5sum for swift large object")
return "", nil
}
return strings.ToLower(o.info.Hash), nil
@ -598,12 +598,12 @@ func (o *Object) readMetaData() (err error) {
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Debug(o, "Failed to read metadata: %s", err)
fs.Debugf(o, "Failed to read metadata: %s", err)
return o.info.LastModified
}
modTime, err := o.headers.ObjectMetadata().GetModTime()
if err != nil {
// fs.Log(o, "Failed to read mtime from object: %v", err)
// fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.info.LastModified
}
return modTime
@ -664,11 +664,11 @@ func (o *Object) removeSegments(except string) error {
return nil
}
if except != "" && strings.HasPrefix(remote, except) {
// fs.Debug(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.fs.segmentsContainer)
// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.fs.segmentsContainer)
return nil
}
segmentPath := segmentsRoot + remote
fs.Debug(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
fs.Debugf(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
return o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
})
if err != nil {
@ -677,7 +677,7 @@ func (o *Object) removeSegments(except string) error {
// remove the segments container if empty, ignore errors
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
if err == nil {
fs.Debug(o, "Removed empty container %q", o.fs.segmentsContainer)
fs.Debugf(o, "Removed empty container %q", o.fs.segmentsContainer)
}
return nil
}
@ -717,7 +717,7 @@ func (o *Object) updateChunks(in io.Reader, headers swift.Headers, size int64, c
headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
segmentReader := io.LimitReader(in, n)
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
fs.Debug(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
_, err := o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
if err != nil {
return "", err
@ -770,7 +770,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
if isDynamicLargeObject {
err = o.removeSegments(uniquePrefix)
if err != nil {
fs.Log(o, "Failed to remove old segments - carrying on with upload: %v", err)
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
}
}

View file

@ -193,7 +193,7 @@ func (f *Fs) listDir(fn listFn) (err error) {
return err
}
default:
fs.Debug(f, "Unknown resource type %q", element.ResourceType)
fs.Debugf(f, "Unknown resource type %q", element.ResourceType)
}
}
}
@ -347,7 +347,7 @@ func (o *Object) setMetaData(info *yandex.ResourceInfoResponse) {
}
t, err := time.Parse(time.RFC3339Nano, modTimeString)
if err != nil {
fs.Log("Failed to parse modtime from %q: %v", modTimeString, err)
fs.Logf("Failed to parse modtime from %q: %v", modTimeString, err)
} else {
o.modTime = t
}
@ -494,7 +494,7 @@ func (o *Object) Size() int64 {
func (o *Object) ModTime() time.Time {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
return o.modTime
@ -614,7 +614,7 @@ func mkDirFullPath(client *yandex.Client, path string) error {
func (o *Object) MimeType() string {
err := o.readMetaData()
if err != nil {
fs.Log(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType