2018-01-12 16:30:54 +00:00
// Package operations does generic operations on filesystems and objects
package operations
2014-03-28 17:56:04 +00:00
import (
2017-02-13 10:48:26 +00:00
"bytes"
2018-04-06 18:13:27 +00:00
"context"
2019-10-26 19:27:33 +00:00
"encoding/base64"
2018-05-13 11:15:05 +00:00
"encoding/csv"
2019-10-26 19:27:33 +00:00
"encoding/hex"
2022-05-24 10:31:48 +00:00
"encoding/json"
2021-11-04 10:12:57 +00:00
"errors"
2014-03-28 17:56:04 +00:00
"fmt"
2014-08-01 16:58:39 +00:00
"io"
2021-07-28 16:05:21 +00:00
"mime"
2019-12-18 17:02:13 +00:00
"net/http"
2020-06-05 15:13:10 +00:00
"os"
2015-03-01 12:38:31 +00:00
"path"
2019-02-28 11:39:32 +00:00
"path/filepath"
operations: fix renaming a file on macOS
Before this change, a file would sometimes be silently deleted instead of
renamed on macOS, due to its unique handling of unicode normalization. Rclone
already had a SameObject check in place for case insensitivity before deleting
the source (for example if "hello.txt" was renamed to "HELLO.txt"), but had no
such check for unicode normalization. After this change, the delete is skipped
on macOS if the src and dst filenames normalize to the same NFC string.
Example of the previous behavior:
~ % rclone touch /Users/nielash/rename_test/ö
~ % rclone lsl /Users/nielash/rename_test/ö
0 2023-11-21 17:28:06.170486000 ö
~ % rclone moveto /Users/nielash/rename_test/ö /Users/nielash/rename_test/ö -vv
2023/11/21 17:28:51 DEBUG : rclone: Version "v1.64.0" starting with parameters ["rclone" "moveto" "/Users/nielash/rename_test/ö" "/Users/nielash/rename_test/ö" "-vv"]
2023/11/21 17:28:51 DEBUG : Creating backend with remote "/Users/nielash/rename_test/ö"
2023/11/21 17:28:51 DEBUG : Using config file from "/Users/nielash/.config/rclone/rclone.conf"
2023/11/21 17:28:51 DEBUG : fs cache: adding new entry for parent of "/Users/nielash/rename_test/ö", "/Users/nielash/rename_test"
2023/11/21 17:28:51 DEBUG : Creating backend with remote "/Users/nielash/rename_test/"
2023/11/21 17:28:51 DEBUG : fs cache: renaming cache item "/Users/nielash/rename_test/" to be canonical "/Users/nielash/rename_test"
2023/11/21 17:28:51 DEBUG : ö: Size and modification time the same (differ by 0s, within tolerance 1ns)
2023/11/21 17:28:51 DEBUG : ö: Unchanged skipping
2023/11/21 17:28:51 INFO : ö: Deleted
2023/11/21 17:28:51 INFO :
Transferred: 0 B / 0 B, -, 0 B/s, ETA -
Checks: 1 / 1, 100%
Deleted: 1 (files), 0 (dirs)
Elapsed time: 0.0s
2023/11/21 17:28:51 DEBUG : 5 go routines active
~ % rclone lsl /Users/nielash/rename_test/
~ %
2023-11-20 16:04:54 +00:00
"runtime"
2016-03-05 16:10:51 +00:00
"sort"
2018-01-06 14:39:31 +00:00
"strconv"
2016-01-23 20:16:47 +00:00
"strings"
2014-03-28 17:56:04 +00:00
"sync"
2015-10-02 18:48:48 +00:00
"sync/atomic"
2017-08-03 19:42:35 +00:00
"time"
2016-01-23 20:16:47 +00:00
2019-07-28 17:47:38 +00:00
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
2020-03-20 18:43:29 +00:00
"github.com/rclone/rclone/fs/config"
2021-02-09 10:12:23 +00:00
"github.com/rclone/rclone/fs/filter"
2019-07-28 17:47:38 +00:00
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/walk"
2020-06-05 15:13:10 +00:00
"github.com/rclone/rclone/lib/atexit"
2019-08-06 11:44:08 +00:00
"github.com/rclone/rclone/lib/random"
2019-07-28 17:47:38 +00:00
"github.com/rclone/rclone/lib/readers"
2019-01-15 16:43:55 +00:00
"golang.org/x/sync/errgroup"
operations: fix renaming a file on macOS
Before this change, a file would sometimes be silently deleted instead of
renamed on macOS, due to its unique handling of unicode normalization. Rclone
already had a SameObject check in place for case insensitivity before deleting
the source (for example if "hello.txt" was renamed to "HELLO.txt"), but had no
such check for unicode normalization. After this change, the delete is skipped
on macOS if the src and dst filenames normalize to the same NFC string.
Example of the previous behavior:
~ % rclone touch /Users/nielash/rename_test/ö
~ % rclone lsl /Users/nielash/rename_test/ö
0 2023-11-21 17:28:06.170486000 ö
~ % rclone moveto /Users/nielash/rename_test/ö /Users/nielash/rename_test/ö -vv
2023/11/21 17:28:51 DEBUG : rclone: Version "v1.64.0" starting with parameters ["rclone" "moveto" "/Users/nielash/rename_test/ö" "/Users/nielash/rename_test/ö" "-vv"]
2023/11/21 17:28:51 DEBUG : Creating backend with remote "/Users/nielash/rename_test/ö"
2023/11/21 17:28:51 DEBUG : Using config file from "/Users/nielash/.config/rclone/rclone.conf"
2023/11/21 17:28:51 DEBUG : fs cache: adding new entry for parent of "/Users/nielash/rename_test/ö", "/Users/nielash/rename_test"
2023/11/21 17:28:51 DEBUG : Creating backend with remote "/Users/nielash/rename_test/"
2023/11/21 17:28:51 DEBUG : fs cache: renaming cache item "/Users/nielash/rename_test/" to be canonical "/Users/nielash/rename_test"
2023/11/21 17:28:51 DEBUG : ö: Size and modification time the same (differ by 0s, within tolerance 1ns)
2023/11/21 17:28:51 DEBUG : ö: Unchanged skipping
2023/11/21 17:28:51 INFO : ö: Deleted
2023/11/21 17:28:51 INFO :
Transferred: 0 B / 0 B, -, 0 B/s, ETA -
Checks: 1 / 1, 100%
Deleted: 1 (files), 0 (dirs)
Elapsed time: 0.0s
2023/11/21 17:28:51 DEBUG : 5 go routines active
~ % rclone lsl /Users/nielash/rename_test/
~ %
2023-11-20 16:04:54 +00:00
"golang.org/x/text/unicode/norm"
2014-03-28 17:56:04 +00:00
)
2016-01-11 12:39:33 +00:00
// CheckHashes checks the two files to see if they have common
// known hash types and compares them
2014-03-28 17:56:04 +00:00
//
2022-08-05 15:35:41 +00:00
// Returns.
2015-08-20 19:48:58 +00:00
//
2016-01-24 18:06:57 +00:00
// equal - which is equality of the hashes
//
// hash - the HashType. This is HashNone if either of the hashes were
// unset or a compatible hash couldn't be found.
//
// err - may return an error which will already have been logged
2014-03-28 17:56:04 +00:00
//
2015-08-20 19:48:58 +00:00
// If an error is returned it will return equal as false
2019-06-17 08:34:30 +00:00
func CheckHashes ( ctx context . Context , src fs . ObjectInfo , dst fs . Object ) ( equal bool , ht hash . Type , err error ) {
2016-01-11 12:39:33 +00:00
common := src . Fs ( ) . Hashes ( ) . Overlap ( dst . Fs ( ) . Hashes ( ) )
2018-01-12 16:30:54 +00:00
// fs.Debugf(nil, "Shared hashes: %v", common)
2016-01-11 12:39:33 +00:00
if common . Count ( ) == 0 {
2018-01-18 20:27:52 +00:00
return true , hash . None , nil
2016-01-11 12:39:33 +00:00
}
2019-08-10 09:28:26 +00:00
equal , ht , _ , _ , err = checkHashes ( ctx , src , dst , common . GetOne ( ) )
return equal , ht , err
}
2021-11-09 09:45:36 +00:00
var errNoHash = errors . New ( "no hash available" )
2019-08-10 09:28:26 +00:00
// checkHashes does the work of CheckHashes but takes a hash.Type and
// returns the effective hash type used.
func checkHashes ( ctx context . Context , src fs . ObjectInfo , dst fs . Object , ht hash . Type ) ( equal bool , htOut hash . Type , srcHash , dstHash string , err error ) {
// Calculate hashes in parallel
g , ctx := errgroup . WithContext ( ctx )
2021-11-09 09:45:36 +00:00
var srcErr , dstErr error
2019-08-10 09:28:26 +00:00
g . Go ( func ( ) ( err error ) {
2021-11-09 09:45:36 +00:00
srcHash , srcErr = src . Hash ( ctx , ht )
if srcErr != nil {
return srcErr
}
if srcHash == "" {
fs . Debugf ( src , "Src hash empty - aborting Dst hash check" )
return errNoHash
2019-08-10 09:28:26 +00:00
}
2021-11-09 09:45:36 +00:00
return nil
2019-08-10 09:28:26 +00:00
} )
g . Go ( func ( ) ( err error ) {
2021-11-09 09:45:36 +00:00
dstHash , dstErr = dst . Hash ( ctx , ht )
if dstErr != nil {
return dstErr
}
if dstHash == "" {
2022-07-04 09:18:04 +00:00
fs . Debugf ( dst , "Dst hash empty - aborting Src hash check" )
2021-11-09 09:45:36 +00:00
return errNoHash
2019-08-10 09:28:26 +00:00
}
2021-11-09 09:45:36 +00:00
return nil
2019-08-10 09:28:26 +00:00
} )
err = g . Wait ( )
2021-11-09 09:45:36 +00:00
if err == errNoHash {
2019-08-10 09:28:26 +00:00
return true , hash . None , srcHash , dstHash , nil
2015-08-20 19:48:58 +00:00
}
2021-11-09 09:45:36 +00:00
if srcErr != nil {
err = fs . CountError ( srcErr )
2022-07-04 09:18:04 +00:00
fs . Errorf ( src , "Failed to calculate src hash: %v" , err )
2021-11-09 09:45:36 +00:00
}
if dstErr != nil {
err = fs . CountError ( dstErr )
2022-07-04 09:18:04 +00:00
fs . Errorf ( dst , "Failed to calculate dst hash: %v" , err )
2021-11-09 09:45:36 +00:00
}
if err != nil {
return false , ht , srcHash , dstHash , err
2014-03-28 17:56:04 +00:00
}
2017-02-23 11:23:19 +00:00
if srcHash != dstHash {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "%v = %s (%v)" , ht , srcHash , src . Fs ( ) )
fs . Debugf ( dst , "%v = %s (%v)" , ht , dstHash , dst . Fs ( ) )
2019-08-13 15:43:24 +00:00
} else {
fs . Debugf ( src , "%v = %s OK" , ht , srcHash )
2017-02-23 11:23:19 +00:00
}
2019-08-10 09:28:26 +00:00
return srcHash == dstHash , ht , srcHash , dstHash , nil
2014-03-28 17:56:04 +00:00
}
2015-09-22 17:47:16 +00:00
// Equal checks to see if the src and dst objects are equal by looking at
2016-01-11 12:39:33 +00:00
// size, mtime and hash
2014-03-28 17:56:04 +00:00
//
// If the src and dst size are different then it is considered to be
2015-06-06 07:38:45 +00:00
// not equal. If --size-only is in effect then this is the only check
2016-06-17 16:20:08 +00:00
// that is done. If --ignore-size is in effect then this check is
// skipped and the files are considered the same size.
2014-03-28 17:56:04 +00:00
//
// If the size is the same and the mtime is the same then it is
2015-06-06 07:38:45 +00:00
// considered to be equal. This check is skipped if using --checksum.
2014-03-28 17:56:04 +00:00
//
2015-06-06 07:38:45 +00:00
// If the size is the same and mtime is different, unreadable or
2016-01-11 12:39:33 +00:00
// --checksum is set and the hash is the same then the file is
2015-06-06 07:38:45 +00:00
// considered to be equal. In this case the mtime on the dst is
// updated if --checksum is not set.
2014-03-28 17:56:04 +00:00
//
// Otherwise the file is considered to be not equal including if there
// were errors reading info.
2019-06-17 08:34:30 +00:00
func Equal ( ctx context . Context , src fs . ObjectInfo , dst fs . Object ) bool {
2020-11-05 11:33:32 +00:00
return equal ( ctx , src , dst , defaultEqualOpt ( ctx ) )
2016-12-18 10:03:56 +00:00
}
2018-01-31 16:15:30 +00:00
// sizeDiffers compare the size of src and dst taking into account the
// various ways of ignoring sizes
2020-11-05 11:33:32 +00:00
func sizeDiffers ( ctx context . Context , src , dst fs . ObjectInfo ) bool {
ci := fs . GetConfig ( ctx )
if ci . IgnoreSize || src . Size ( ) < 0 || dst . Size ( ) < 0 {
2018-01-31 16:15:30 +00:00
return false
}
return src . Size ( ) != dst . Size ( )
}
2019-01-10 11:07:10 +00:00
var checksumWarning sync . Once
2019-06-08 13:08:23 +00:00
// options for equal function()
type equalOpt struct {
sizeOnly bool // if set only check size
checkSum bool // if set check checksum+size instead of modtime+size
updateModTime bool // if set update the modtime if hashes identical and checking with modtime+size
forceModTimeMatch bool // if set assume modtimes match
}
// default set of options for equal()
2020-11-05 11:33:32 +00:00
func defaultEqualOpt ( ctx context . Context ) equalOpt {
ci := fs . GetConfig ( ctx )
2019-06-08 13:08:23 +00:00
return equalOpt {
2020-11-05 11:33:32 +00:00
sizeOnly : ci . SizeOnly ,
checkSum : ci . CheckSum ,
updateModTime : ! ci . NoUpdateModTime ,
2019-06-08 13:08:23 +00:00
forceModTimeMatch : false ,
}
}
2020-09-29 16:03:25 +00:00
var modTimeUploadOnce sync . Once
// emit a log if we are about to upload a file to set its modification time
func logModTimeUpload ( dst fs . Object ) {
modTimeUploadOnce . Do ( func ( ) {
fs . Logf ( dst . Fs ( ) , "Forced to upload files to set modification times on this backend." )
} )
}
bisync: full support for comparing checksum, size, modtime - fixes #5679 fixes #5683 fixes #5684 fixes #5675
Before this change, bisync could only detect changes based on modtime, and
would refuse to run if either path lacked modtime support. This made bisync
unavailable for many of rclone's backends. Additionally, bisync did not account
for the Fs's precision when comparing modtimes, meaning that they could only be
reliably compared within the same side -- not against the opposite side. Size
and checksum (even when available) were ignored completely for deltas.
After this change, bisync now fully supports comparing based on any combination
of size, modtime, and checksum, lifting the prior restriction on backends
without modtime support. The comparison logic considers the backend's
precision, hash types, and other features as appropriate.
The comparison features optionally use a new --compare flag (which takes any
combination of size,modtime,checksum) and even supports some combinations not
otherwise supported in `sync` (like comparing all three at the same time.) By
default (without the --compare flag), bisync inherits the same comparison
options as `sync` (that is: size and modtime by default, unless modified with
flags such as --checksum or --size-only.) If the --compare flag is set, it will
override these defaults.
If --compare includes checksum and both remotes support checksums but have no
hash types in common with each other, checksums will be considered only for
comparisons within the same side (to determine what has changed since the prior
sync), but not for comparisons against the opposite side. If one side supports
checksums and the other does not, checksums will only be considered on the side
that supports them. When comparing with checksum and/or size without modtime,
bisync cannot determine whether a file is newer or older -- only whether it is
changed or unchanged. (If it is changed on both sides, bisync still does the
standard equality-check to avoid declaring a sync conflict unless it absolutely
has to.)
Also included are some new flags to customize the checksum comparison behavior
on backends where hashes are slow or unavailable. --no-slow-hash and
--slow-hash-sync-only allow selectively ignoring checksums on backends such as
local where they are slow. --download-hash allows computing them by downloading
when (and only when) they're otherwise not available. Of course, this option
probably won't be practical with large files, but may be a good option for
syncing small-but-important files with maximum accuracy (for example, a source
code repo on a crypt remote.) An additional advantage over methods like
cryptcheck is that the original file is not required for comparison (for
example, --download-hash can be used to bisync two different crypt remotes with
different passwords.)
Additionally, all of the above are now considered during the final --check-sync
for much-improved accuracy (before this change, it only compared filenames!)
Many other details are explained in the included docs.
2023-12-01 00:44:38 +00:00
// EqualFn allows replacing Equal() with a custom function during NeedTransfer()
type EqualFn func ( ctx context . Context , src fs . ObjectInfo , dst fs . Object ) bool
type equalFnContextKey struct { }
var equalFnKey = equalFnContextKey { }
// WithEqualFn stores equalFn in ctx and returns a copy of ctx in which equalFnKey = equalFn
func WithEqualFn ( ctx context . Context , equalFn EqualFn ) context . Context {
return context . WithValue ( ctx , equalFnKey , equalFn )
}
2019-06-08 13:08:23 +00:00
func equal ( ctx context . Context , src fs . ObjectInfo , dst fs . Object , opt equalOpt ) bool {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2023-10-01 09:02:56 +00:00
logger , _ := GetLogger ( ctx )
2020-11-05 11:33:32 +00:00
if sizeDiffers ( ctx , src , dst ) {
2018-01-31 16:15:30 +00:00
fs . Debugf ( src , "Sizes differ (src %d vs dst %d)" , src . Size ( ) , dst . Size ( ) )
2023-10-01 09:02:56 +00:00
logger ( ctx , Differ , src , dst , nil )
2018-01-31 16:15:30 +00:00
return false
2014-03-28 17:56:04 +00:00
}
2019-06-08 13:08:23 +00:00
if opt . sizeOnly {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Sizes identical" )
2023-10-01 09:02:56 +00:00
logger ( ctx , Match , src , dst , nil )
2015-06-06 07:38:45 +00:00
return true
}
2014-03-28 17:56:04 +00:00
2016-11-28 17:08:15 +00:00
// Assert: Size is equal or being ignored
// If checking checksum and not modtime
2019-06-08 13:08:23 +00:00
if opt . checkSum {
2016-11-28 17:08:15 +00:00
// Check the hash
2019-06-17 08:34:30 +00:00
same , ht , _ := CheckHashes ( ctx , src , dst )
2016-11-28 17:08:15 +00:00
if ! same {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "%v differ" , ht )
2023-10-01 09:02:56 +00:00
logger ( ctx , Differ , src , dst , nil )
2016-11-28 17:08:15 +00:00
return false
2015-08-20 19:48:58 +00:00
}
2018-01-18 20:27:52 +00:00
if ht == hash . None {
2020-10-05 09:23:23 +00:00
common := src . Fs ( ) . Hashes ( ) . Overlap ( dst . Fs ( ) . Hashes ( ) )
if common . Count ( ) == 0 {
checksumWarning . Do ( func ( ) {
fs . Logf ( dst . Fs ( ) , "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only" )
} )
}
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Size of src and dst objects identical" )
2015-06-03 14:08:27 +00:00
} else {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Size and %v of src and dst objects identical" , ht )
2015-06-03 14:08:27 +00:00
}
2023-10-01 09:02:56 +00:00
logger ( ctx , Match , src , dst , nil )
2016-11-28 17:08:15 +00:00
return true
}
2019-06-17 08:34:30 +00:00
srcModTime := src . ModTime ( ctx )
2019-06-08 13:08:23 +00:00
if ! opt . forceModTimeMatch {
// Sizes the same so check the mtime
2020-11-05 16:27:01 +00:00
modifyWindow := fs . GetModifyWindow ( ctx , src . Fs ( ) , dst . Fs ( ) )
2019-06-08 13:08:23 +00:00
if modifyWindow == fs . ModTimeNotSupported {
fs . Debugf ( src , "Sizes identical" )
2023-10-01 09:02:56 +00:00
logger ( ctx , Match , src , dst , nil )
2019-06-08 13:08:23 +00:00
return true
}
dstModTime := dst . ModTime ( ctx )
dt := dstModTime . Sub ( srcModTime )
if dt < modifyWindow && dt > - modifyWindow {
fs . Debugf ( src , "Size and modification time the same (differ by %s, within tolerance %s)" , dt , modifyWindow )
2023-10-01 09:02:56 +00:00
logger ( ctx , Match , src , dst , nil )
2019-06-08 13:08:23 +00:00
return true
}
2014-03-28 17:56:04 +00:00
2019-06-08 13:08:23 +00:00
fs . Debugf ( src , "Modification times differ by %s: %v, %v" , dt , srcModTime , dstModTime )
}
2016-11-28 17:08:15 +00:00
// Check if the hashes are the same
2019-06-17 08:34:30 +00:00
same , ht , _ := CheckHashes ( ctx , src , dst )
2014-03-28 17:56:04 +00:00
if ! same {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "%v differ" , ht )
2023-10-01 09:02:56 +00:00
logger ( ctx , Differ , src , dst , nil )
2016-11-28 17:08:15 +00:00
return false
}
2020-11-05 11:33:32 +00:00
if ht == hash . None && ! ci . RefreshTimes {
2016-11-28 17:08:15 +00:00
// if couldn't check hash, return that they differ
2023-10-01 09:02:56 +00:00
logger ( ctx , Differ , src , dst , nil )
2014-03-28 17:56:04 +00:00
return false
}
2016-11-28 17:08:15 +00:00
// mod time differs but hash is the same to reset mod time if required
2019-06-08 13:08:23 +00:00
if opt . updateModTime {
2020-06-05 15:13:10 +00:00
if ! SkipDestructive ( ctx , src , "update modification time" ) {
2017-09-02 08:29:01 +00:00
// Size and hash the same but mtime different
// Error if objects are treated as immutable
2020-11-05 11:33:32 +00:00
if ci . Immutable {
2020-12-31 18:11:12 +00:00
fs . Errorf ( dst , "Timestamp mismatch between immutable objects" )
2023-10-01 09:02:56 +00:00
logger ( ctx , Differ , src , dst , nil )
2017-09-02 08:29:01 +00:00
return false
}
// Update the mtime of the dst object here
2019-06-17 08:34:30 +00:00
err := dst . SetModTime ( ctx , srcModTime )
2023-06-07 08:19:16 +00:00
if errors . Is ( err , fs . ErrorCantSetModTime ) {
2020-09-29 16:03:25 +00:00
logModTimeUpload ( dst )
fs . Infof ( dst , "src and dst identical but can't set mod time without re-uploading" )
2023-10-01 09:02:56 +00:00
logger ( ctx , Differ , src , dst , nil )
2017-06-13 12:58:39 +00:00
return false
2023-06-07 08:19:16 +00:00
} else if errors . Is ( err , fs . ErrorCantSetModTimeWithoutDelete ) {
2020-09-29 16:03:25 +00:00
logModTimeUpload ( dst )
fs . Infof ( dst , "src and dst identical but can't set mod time without deleting and re-uploading" )
2018-03-13 16:05:06 +00:00
// Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file
// put in the BackupDir than deleted which is what will happen if we don't delete it.
2020-11-05 11:33:32 +00:00
if ci . BackupDir == "" {
2019-06-17 08:34:30 +00:00
err = dst . Remove ( ctx )
2018-03-13 16:05:06 +00:00
if err != nil {
fs . Errorf ( dst , "failed to delete before re-upload: %v" , err )
}
2017-06-13 12:58:39 +00:00
}
2023-10-01 09:02:56 +00:00
logger ( ctx , Differ , src , dst , nil )
2017-02-15 23:09:44 +00:00
return false
} else if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( dst , "Failed to set modification time: %v" , err )
2017-02-15 23:09:44 +00:00
} else {
2018-01-12 16:30:54 +00:00
fs . Infof ( src , "Updated modification time in destination" )
2017-02-15 23:09:44 +00:00
}
2016-03-22 15:07:10 +00:00
}
2015-06-03 14:08:27 +00:00
}
2023-10-01 09:02:56 +00:00
logger ( ctx , Match , src , dst , nil )
2014-03-28 17:56:04 +00:00
return true
}
2020-02-13 14:24:00 +00:00
// CommonHash returns a single hash.Type and a HashOption with that
// type which is in common between the two fs.Fs.
2020-11-05 11:33:32 +00:00
func CommonHash ( ctx context . Context , fa , fb fs . Info ) ( hash . Type , * fs . HashesOption ) {
ci := fs . GetConfig ( ctx )
2020-02-13 14:24:00 +00:00
// work out which hash to use - limit to 1 hash in common
var common hash . Set
hashType := hash . None
2020-11-05 11:33:32 +00:00
if ! ci . IgnoreChecksum {
2020-02-13 14:24:00 +00:00
common = fb . Hashes ( ) . Overlap ( fa . Hashes ( ) )
if common . Count ( ) > 0 {
hashType = common . GetOne ( )
common = hash . Set ( hashType )
}
}
return hashType , & fs . HashesOption { Hashes : common }
}
2019-06-15 12:54:17 +00:00
// SameObject returns true if src and dst could be pointing to the
// same object.
func SameObject ( src , dst fs . Object ) bool {
2021-01-26 17:44:24 +00:00
srcFs , dstFs := src . Fs ( ) , dst . Fs ( )
if ! SameConfig ( srcFs , dstFs ) {
// If same remote type then check ID of objects if available
doSrcID , srcIDOK := src . ( fs . IDer )
doDstID , dstIDOK := dst . ( fs . IDer )
if srcIDOK && dstIDOK && SameRemoteType ( srcFs , dstFs ) {
srcID , dstID := doSrcID . ID ( ) , doDstID . ID ( )
if srcID != "" && srcID == dstID {
return true
}
}
2019-06-15 12:54:17 +00:00
return false
}
2021-01-26 17:44:24 +00:00
srcPath := path . Join ( srcFs . Root ( ) , src . Remote ( ) )
dstPath := path . Join ( dstFs . Root ( ) , dst . Remote ( ) )
operations: fix renaming a file on macOS
Before this change, a file would sometimes be silently deleted instead of
renamed on macOS, due to its unique handling of unicode normalization. Rclone
already had a SameObject check in place for case insensitivity before deleting
the source (for example if "hello.txt" was renamed to "HELLO.txt"), but had no
such check for unicode normalization. After this change, the delete is skipped
on macOS if the src and dst filenames normalize to the same NFC string.
Example of the previous behavior:
~ % rclone touch /Users/nielash/rename_test/ö
~ % rclone lsl /Users/nielash/rename_test/ö
0 2023-11-21 17:28:06.170486000 ö
~ % rclone moveto /Users/nielash/rename_test/ö /Users/nielash/rename_test/ö -vv
2023/11/21 17:28:51 DEBUG : rclone: Version "v1.64.0" starting with parameters ["rclone" "moveto" "/Users/nielash/rename_test/ö" "/Users/nielash/rename_test/ö" "-vv"]
2023/11/21 17:28:51 DEBUG : Creating backend with remote "/Users/nielash/rename_test/ö"
2023/11/21 17:28:51 DEBUG : Using config file from "/Users/nielash/.config/rclone/rclone.conf"
2023/11/21 17:28:51 DEBUG : fs cache: adding new entry for parent of "/Users/nielash/rename_test/ö", "/Users/nielash/rename_test"
2023/11/21 17:28:51 DEBUG : Creating backend with remote "/Users/nielash/rename_test/"
2023/11/21 17:28:51 DEBUG : fs cache: renaming cache item "/Users/nielash/rename_test/" to be canonical "/Users/nielash/rename_test"
2023/11/21 17:28:51 DEBUG : ö: Size and modification time the same (differ by 0s, within tolerance 1ns)
2023/11/21 17:28:51 DEBUG : ö: Unchanged skipping
2023/11/21 17:28:51 INFO : ö: Deleted
2023/11/21 17:28:51 INFO :
Transferred: 0 B / 0 B, -, 0 B/s, ETA -
Checks: 1 / 1, 100%
Deleted: 1 (files), 0 (dirs)
Elapsed time: 0.0s
2023/11/21 17:28:51 DEBUG : 5 go routines active
~ % rclone lsl /Users/nielash/rename_test/
~ %
2023-11-20 16:04:54 +00:00
if srcFs . Features ( ) . IsLocal && dstFs . Features ( ) . IsLocal && runtime . GOOS == "darwin" {
if norm . NFC . String ( srcPath ) == norm . NFC . String ( dstPath ) {
return true
}
}
2019-06-15 12:54:17 +00:00
if dst . Fs ( ) . Features ( ) . CaseInsensitive {
srcPath = strings . ToLower ( srcPath )
dstPath = strings . ToLower ( dstPath )
}
return srcPath == dstPath
}
2016-10-22 16:53:10 +00:00
// Move src object to dst or fdst if nil. If dst is nil then it uses
// remote as the name of the new object.
2017-12-01 15:31:20 +00:00
//
2018-09-20 09:46:44 +00:00
// Note that you must check the destination does not exist before
// calling this and pass it as dst. If you pass dst=nil and the
// destination does exist then this may create duplicates or return
// errors.
//
2017-12-01 15:31:20 +00:00
// It returns the destination object if possible. Note that this may
// be nil.
2024-01-04 11:28:47 +00:00
//
// This is accounted as a check.
2019-06-17 08:34:30 +00:00
func Move ( ctx context . Context , fdst fs . Fs , dst fs . Object , remote string , src fs . Object ) ( newDst fs . Object , err error ) {
2024-01-04 11:28:47 +00:00
return move ( ctx , fdst , dst , remote , src , false )
}
// MoveTransfer moves src object to dst or fdst if nil. If dst is nil
// then it uses remote as the name of the new object.
//
// This is identical to Move but is accounted as a transfer.
func MoveTransfer ( ctx context . Context , fdst fs . Fs , dst fs . Object , remote string , src fs . Object ) ( newDst fs . Object , err error ) {
return move ( ctx , fdst , dst , remote , src , true )
}
// move - see Move for help
func move ( ctx context . Context , fdst fs . Fs , dst fs . Object , remote string , src fs . Object , isTransfer bool ) ( newDst fs . Object , err error ) {
2022-07-04 15:26:08 +00:00
ci := fs . GetConfig ( ctx )
2024-01-04 11:28:47 +00:00
var tr * accounting . Transfer
if isTransfer {
2024-01-18 16:44:13 +00:00
tr = accounting . Stats ( ctx ) . NewTransfer ( src , fdst )
2024-01-04 11:28:47 +00:00
} else {
tr = accounting . Stats ( ctx ) . NewCheckingTransfer ( src , "moving" )
}
2019-04-23 15:19:12 +00:00
defer func ( ) {
2020-03-30 17:12:32 +00:00
if err == nil {
accounting . Stats ( ctx ) . Renames ( 1 )
}
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2019-04-23 15:19:12 +00:00
} ( )
2017-12-01 15:31:20 +00:00
newDst = dst
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , src , "move" ) {
2020-11-05 07:15:42 +00:00
in := tr . Account ( ctx , nil )
in . DryRun ( src . Size ( ) )
2017-12-01 15:31:20 +00:00
return newDst , nil
2016-10-22 16:53:10 +00:00
}
// See if we have Move available
2022-07-04 15:26:08 +00:00
if doMove := fdst . Features ( ) . Move ; doMove != nil && ( SameConfig ( src . Fs ( ) , fdst ) || ( SameRemoteType ( src . Fs ( ) , fdst ) && ( fdst . Features ( ) . ServerSideAcrossConfigs || ci . ServerSideAcrossConfigs ) ) ) {
2019-06-10 10:01:13 +00:00
// Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive)
2019-06-15 12:54:17 +00:00
if dst != nil && ! SameObject ( src , dst ) {
2019-06-17 08:34:30 +00:00
err = DeleteFile ( ctx , dst )
2016-10-22 16:53:10 +00:00
if err != nil {
2017-12-01 15:31:20 +00:00
return newDst , err
2016-10-22 16:53:10 +00:00
}
}
// Move dst <- src
2021-08-04 10:03:17 +00:00
in := tr . Account ( ctx , nil ) // account the transfer
2023-07-30 04:39:01 +00:00
in . ServerSideTransferStart ( )
2019-06-17 08:34:30 +00:00
newDst , err = doMove ( ctx , src , remote )
2016-10-22 16:53:10 +00:00
switch err {
case nil :
2020-11-09 12:36:10 +00:00
if newDst != nil && src . String ( ) != newDst . String ( ) {
2020-10-26 20:44:01 +00:00
fs . Infof ( src , "Moved (server-side) to: %s" , newDst . String ( ) )
} else {
fs . Infof ( src , "Moved (server-side)" )
}
2023-07-30 04:39:01 +00:00
in . ServerSideMoveEnd ( newDst . Size ( ) ) // account the bytes for the server-side transfer
2021-08-04 10:03:17 +00:00
_ = in . Close ( )
2017-12-01 15:31:20 +00:00
return newDst , nil
2018-01-12 16:30:54 +00:00
case fs . ErrorCantMove :
fs . Debugf ( src , "Can't move, switching to copy" )
2021-08-04 10:03:17 +00:00
_ = in . Close ( )
2016-10-22 16:53:10 +00:00
default :
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( src , "Couldn't move: %v" , err )
2021-08-04 10:03:17 +00:00
_ = in . Close ( )
2017-12-01 15:31:20 +00:00
return newDst , err
2016-10-22 16:53:10 +00:00
}
}
// Move not found or didn't work so copy dst <- src
2019-06-17 08:34:30 +00:00
newDst , err = Copy ( ctx , fdst , dst , remote , src )
2016-10-22 16:53:10 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . Errorf ( src , "Not deleting source as copy failed: %v" , err )
2017-12-01 15:31:20 +00:00
return newDst , err
2016-10-22 16:53:10 +00:00
}
// Delete src if no error on copy
2019-06-17 08:34:30 +00:00
return newDst , DeleteFile ( ctx , src )
2016-10-22 16:53:10 +00:00
}
2020-10-13 21:43:40 +00:00
// CanServerSideMove returns true if fdst support server-side moves or
// server-side copies
2017-01-10 20:03:55 +00:00
//
// Some remotes simulate rename by server-side copy and delete, so include
// remotes that implements either Mover or Copier.
2018-01-12 16:30:54 +00:00
func CanServerSideMove ( fdst fs . Fs ) bool {
2017-01-13 17:21:47 +00:00
canMove := fdst . Features ( ) . Move != nil
canCopy := fdst . Features ( ) . Copy != nil
2017-01-10 20:03:55 +00:00
return canMove || canCopy
}
2019-03-10 16:50:28 +00:00
// SuffixName adds the current --suffix to the remote, obeying
// --suffix-keep-extension if set
2020-11-05 11:33:32 +00:00
func SuffixName ( ctx context . Context , remote string ) string {
ci := fs . GetConfig ( ctx )
if ci . Suffix == "" {
2019-03-10 16:50:28 +00:00
return remote
}
2020-11-05 11:33:32 +00:00
if ci . SuffixKeepExtension {
2023-03-26 15:55:03 +00:00
var (
base = remote
exts = ""
first = true
ext = path . Ext ( remote )
)
for ext != "" {
// Look second and subsequent extensions in mime types.
// If they aren't found then don't keep it as an extension.
if ! first && mime . TypeByExtension ( ext ) == "" {
break
}
base = base [ : len ( base ) - len ( ext ) ]
exts = ext + exts
first = false
ext = path . Ext ( base )
}
return base + ci . Suffix + exts
2019-03-10 16:50:28 +00:00
}
2020-11-05 11:33:32 +00:00
return remote + ci . Suffix
2019-03-10 16:50:28 +00:00
}
2018-01-12 16:30:54 +00:00
// DeleteFileWithBackupDir deletes a single file respecting --dry-run
2017-01-10 21:47:03 +00:00
// and accumulating stats and errors.
//
// If backupDir is set then it moves the file to there instead of
// deleting
2019-06-17 08:34:30 +00:00
func DeleteFileWithBackupDir ( ctx context . Context , dst fs . Object , backupDir fs . Fs ) ( err error ) {
2023-02-06 10:30:22 +00:00
tr := accounting . Stats ( ctx ) . NewCheckingTransfer ( dst , "deleting" )
2019-07-22 19:11:46 +00:00
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2019-07-22 19:11:46 +00:00
} ( )
2023-03-08 18:40:37 +00:00
err = accounting . Stats ( ctx ) . DeleteFile ( ctx , dst . Size ( ) )
if err != nil {
return err
2018-01-22 18:53:18 +00:00
}
2020-03-20 18:43:29 +00:00
action , actioned := "delete" , "Deleted"
2017-01-10 21:47:03 +00:00
if backupDir != nil {
2020-03-20 18:43:29 +00:00
action , actioned = "move into backup dir" , "Moved into backup dir"
2017-01-10 21:47:03 +00:00
}
2020-06-05 15:13:10 +00:00
skip := SkipDestructive ( ctx , dst , action )
if skip {
2020-03-20 18:43:29 +00:00
// do nothing
2017-01-10 21:47:03 +00:00
} else if backupDir != nil {
2019-06-23 03:50:09 +00:00
err = MoveBackupDir ( ctx , backupDir , dst )
2017-01-10 21:47:03 +00:00
} else {
2019-06-17 08:34:30 +00:00
err = dst . Remove ( ctx )
2017-01-10 21:47:03 +00:00
}
if err != nil {
2018-01-12 16:30:54 +00:00
fs . Errorf ( dst , "Couldn't %s: %v" , action , err )
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2020-06-05 15:13:10 +00:00
} else if ! skip {
2018-01-12 16:30:54 +00:00
fs . Infof ( dst , actioned )
2016-03-05 16:10:51 +00:00
}
2016-06-25 13:27:44 +00:00
return err
2016-03-05 16:10:51 +00:00
}
2017-01-10 21:47:03 +00:00
// DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors.
//
// If useBackupDir is set and --backup-dir is in effect then it moves
// the file to there instead of deleting
2019-06-17 08:34:30 +00:00
func DeleteFile ( ctx context . Context , dst fs . Object ) ( err error ) {
return DeleteFileWithBackupDir ( ctx , dst , nil )
2017-01-10 21:47:03 +00:00
}
2018-01-12 16:30:54 +00:00
// DeleteFilesWithBackupDir removes all the files passed in the
2017-01-10 21:47:03 +00:00
// channel
//
// If backupDir is set the files will be placed into that directory
// instead of being deleted.
2019-06-17 08:34:30 +00:00
func DeleteFilesWithBackupDir ( ctx context . Context , toBeDeleted fs . ObjectsChan , backupDir fs . Fs ) error {
2014-03-28 17:56:04 +00:00
var wg sync . WaitGroup
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2023-02-07 10:56:03 +00:00
wg . Add ( ci . Checkers )
2023-08-18 14:56:26 +00:00
var errorCount atomic . Int32
var fatalErrorCount atomic . Int32
2018-01-22 18:53:18 +00:00
2023-02-07 10:56:03 +00:00
for i := 0 ; i < ci . Checkers ; i ++ {
2014-03-28 17:56:04 +00:00
go func ( ) {
defer wg . Done ( )
2015-09-22 17:47:16 +00:00
for dst := range toBeDeleted {
2019-06-17 08:34:30 +00:00
err := DeleteFileWithBackupDir ( ctx , dst , backupDir )
2016-06-25 13:27:44 +00:00
if err != nil {
2023-08-18 14:56:26 +00:00
errorCount . Add ( 1 )
2023-10-01 09:02:56 +00:00
logger , _ := GetLogger ( ctx )
logger ( ctx , TransferError , nil , dst , err )
2018-01-22 18:53:18 +00:00
if fserrors . IsFatalError ( err ) {
2023-10-09 15:56:13 +00:00
fs . Errorf ( dst , "Got fatal error on delete: %s" , err )
2023-08-18 14:56:26 +00:00
fatalErrorCount . Add ( 1 )
2018-01-22 18:53:18 +00:00
return
}
2016-06-25 13:27:44 +00:00
}
2014-03-28 17:56:04 +00:00
}
} ( )
}
2020-02-09 19:30:41 +00:00
fs . Debugf ( nil , "Waiting for deletions to finish" )
2014-03-28 17:56:04 +00:00
wg . Wait ( )
2023-08-18 14:56:26 +00:00
if errorCount . Load ( ) > 0 {
err := fmt . Errorf ( "failed to delete %d files" , errorCount . Load ( ) )
if fatalErrorCount . Load ( ) > 0 {
2018-01-22 18:53:18 +00:00
return fserrors . FatalError ( err )
}
return err
2016-06-25 13:27:44 +00:00
}
return nil
2014-03-28 17:56:04 +00:00
}
2017-01-10 21:47:03 +00:00
// DeleteFiles removes all the files passed in the channel
2019-06-17 08:34:30 +00:00
func DeleteFiles ( ctx context . Context , toBeDeleted fs . ObjectsChan ) error {
return DeleteFilesWithBackupDir ( ctx , toBeDeleted , nil )
2017-01-10 21:47:03 +00:00
}
2019-02-11 01:36:47 +00:00
// SameRemoteType returns true if fdst and fsrc are the same type
func SameRemoteType ( fdst , fsrc fs . Info ) bool {
return fmt . Sprintf ( "%T" , fdst ) == fmt . Sprintf ( "%T" , fsrc )
}
2017-01-11 14:59:53 +00:00
// SameConfig returns true if fdst and fsrc are using the same config
// file entry
2018-01-12 16:30:54 +00:00
func SameConfig ( fdst , fsrc fs . Info ) bool {
2017-01-11 14:59:53 +00:00
return fdst . Name ( ) == fsrc . Name ( )
}
2020-11-16 03:04:29 +00:00
// SameConfigArr returns true if any of []fsrcs has same config file entry with fdst
func SameConfigArr ( fdst fs . Info , fsrcs [ ] fs . Fs ) bool {
for _ , fsrc := range fsrcs {
if fdst . Name ( ) == fsrc . Name ( ) {
return true
}
}
return false
}
2015-09-22 17:47:16 +00:00
// Same returns true if fdst and fsrc point to the same underlying Fs
2018-01-12 16:30:54 +00:00
func Same ( fdst , fsrc fs . Info ) bool {
2019-02-14 12:06:26 +00:00
return SameConfig ( fdst , fsrc ) && strings . Trim ( fdst . Root ( ) , "/" ) == strings . Trim ( fsrc . Root ( ) , "/" )
2015-09-01 19:50:28 +00:00
}
2023-07-15 08:10:26 +00:00
// fixRoot returns the Root with a trailing / if not empty.
//
// It returns a case folded version for case insensitive file systems
func fixRoot ( f fs . Info ) ( s string , folded string ) {
s = strings . Trim ( filepath . ToSlash ( f . Root ( ) ) , "/" )
2019-06-23 03:52:09 +00:00
if s != "" {
s += "/"
}
2023-07-15 08:10:26 +00:00
folded = s
2019-06-23 03:52:09 +00:00
if f . Features ( ) . CaseInsensitive {
2023-07-15 08:10:26 +00:00
folded = strings . ToLower ( s )
2019-06-23 03:52:09 +00:00
}
2023-07-15 08:10:26 +00:00
return s , folded
2019-06-23 03:52:09 +00:00
}
2022-06-01 17:24:54 +00:00
// OverlappingFilterCheck returns true if fdst and fsrc point to the same
// underlying Fs and they overlap without fdst being excluded by any filter rule.
func OverlappingFilterCheck ( ctx context . Context , fdst fs . Fs , fsrc fs . Fs ) bool {
if ! SameConfig ( fdst , fsrc ) {
return false
}
2023-07-15 08:10:26 +00:00
fdstRoot , fdstRootFolded := fixRoot ( fdst )
fsrcRoot , fsrcRootFolded := fixRoot ( fsrc )
if fdstRootFolded == fsrcRootFolded {
return true
} else if strings . HasPrefix ( fdstRootFolded , fsrcRootFolded ) {
2022-06-01 17:24:54 +00:00
fdstRelative := fdstRoot [ len ( fsrcRoot ) : ]
2023-07-15 08:10:26 +00:00
return filterCheck ( ctx , fsrc , fdstRelative )
} else if strings . HasPrefix ( fsrcRootFolded , fdstRootFolded ) {
fsrcRelative := fsrcRoot [ len ( fdstRoot ) : ]
return filterCheck ( ctx , fdst , fsrcRelative )
2022-06-01 17:24:54 +00:00
}
2023-07-15 08:10:26 +00:00
return false
2022-06-01 17:24:54 +00:00
}
2023-07-15 08:10:26 +00:00
// filterCheck checks if dir is included in f
func filterCheck ( ctx context . Context , f fs . Fs , dir string ) bool {
2022-06-01 17:24:54 +00:00
fi := filter . GetConfig ( ctx )
2023-07-15 08:10:26 +00:00
includeDirectory := fi . IncludeDirectory ( ctx , f )
include , err := includeDirectory ( dir )
if err != nil {
fs . Errorf ( f , "Failed to discover whether directory is included: %v" , err )
return true
2022-06-01 17:24:54 +00:00
}
return include
}
2019-06-23 03:52:09 +00:00
// SameDir returns true if fdst and fsrc point to the same
// underlying Fs and they are the same directory.
func SameDir ( fdst , fsrc fs . Info ) bool {
if ! SameConfig ( fdst , fsrc ) {
return false
}
2023-07-15 08:10:26 +00:00
_ , fdstRootFolded := fixRoot ( fdst )
_ , fsrcRootFolded := fixRoot ( fsrc )
return fdstRootFolded == fsrcRootFolded
2019-06-23 03:52:09 +00:00
}
2020-06-12 16:01:23 +00:00
// Retry runs fn up to maxTries times if it returns a retriable error
2021-03-11 14:44:01 +00:00
func Retry ( ctx context . Context , o interface { } , maxTries int , fn func ( ) error ) ( err error ) {
2020-06-12 16:01:23 +00:00
for tries := 1 ; tries <= maxTries ; tries ++ {
// Call the function which might error
err = fn ( )
if err == nil {
break
}
// Retry if err returned a retry error
2021-03-11 14:44:01 +00:00
if fserrors . ContextError ( ctx , & err ) {
break
}
2020-06-12 16:01:23 +00:00
if fserrors . IsRetryError ( err ) || fserrors . ShouldRetry ( err ) {
fs . Debugf ( o , "Received error: %v - low level retry %d/%d" , err , tries , maxTries )
continue
}
break
}
return err
}
2015-09-22 17:47:16 +00:00
// ListFn lists the Fs to the supplied function
2014-03-28 17:56:04 +00:00
//
// Lists in parallel which may get them out of order
2019-06-17 08:34:30 +00:00
func ListFn ( ctx context . Context , f fs . Fs , fn func ( fs . Object ) ) error {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
return walk . ListR ( ctx , f , "" , false , ci . MaxDepth , walk . ListObjects , func ( entries fs . DirEntries ) error {
2017-02-24 22:51:01 +00:00
entries . ForObject ( fn )
return nil
} )
2014-03-28 17:56:04 +00:00
}
2023-03-03 14:17:02 +00:00
// StdoutMutex mutex for synchronized output on stdout
var StdoutMutex sync . Mutex
2015-02-28 15:30:40 +00:00
2023-07-01 14:28:10 +00:00
// SyncPrintf is a global var holding the Printf function so that it
// can be overridden.
//
// This writes to stdout holding the StdoutMutex. If you are going to
// override it and write to os.Stdout then you should hold the
// StdoutMutex too.
2020-12-18 12:45:58 +00:00
var SyncPrintf = func ( format string , a ... interface { } ) {
2023-07-01 14:28:10 +00:00
StdoutMutex . Lock ( )
defer StdoutMutex . Unlock ( )
2020-12-18 12:45:58 +00:00
fmt . Printf ( format , a ... )
}
2023-10-01 09:02:56 +00:00
// SyncFprintf - Synchronized fmt.Fprintf
2015-09-22 06:31:12 +00:00
//
2022-08-05 15:35:41 +00:00
// Ignores errors from Fprintf.
2020-12-18 12:45:58 +00:00
//
2023-07-01 14:28:10 +00:00
// Prints to stdout if w is nil
2023-10-01 09:02:56 +00:00
func SyncFprintf ( w io . Writer , format string , a ... interface { } ) {
2021-07-07 15:34:16 +00:00
if w == nil || w == os . Stdout {
2020-12-18 12:45:58 +00:00
SyncPrintf ( format , a ... )
} else {
2023-07-01 14:28:10 +00:00
StdoutMutex . Lock ( )
defer StdoutMutex . Unlock ( )
2020-12-18 12:45:58 +00:00
_ , _ = fmt . Fprintf ( w , format , a ... )
}
2015-02-28 15:30:40 +00:00
}
2021-04-02 14:11:21 +00:00
// SizeString make string representation of size for output
//
// Optional human-readable format including a binary suffix
func SizeString ( size int64 , humanReadable bool ) string {
if humanReadable {
if size < 0 {
return "-" + fs . SizeSuffix ( - size ) . String ( )
}
return fs . SizeSuffix ( size ) . String ( )
}
return strconv . FormatInt ( size , 10 )
}
// SizeStringField make string representation of size for output in fixed width field
//
// Optional human-readable format including a binary suffix
// Argument rawWidth is used to format field with of raw value. When humanReadable
// option the width is hard coded to 9, since SizeSuffix strings have precision 3
// and longest value will be "999.999Ei". This way the width can be optimized
// depending to the humanReadable option. To always use a longer width the return
// value can always be fed into another format string with a specific field with.
func SizeStringField ( size int64 , humanReadable bool , rawWidth int ) string {
str := SizeString ( size , humanReadable )
if humanReadable {
return fmt . Sprintf ( "%9s" , str )
}
return fmt . Sprintf ( "%[2]*[1]s" , str , rawWidth )
}
// CountString make string representation of count for output
//
// Optional human-readable format including a decimal suffix
func CountString ( count int64 , humanReadable bool ) string {
if humanReadable {
if count < 0 {
return "-" + fs . CountSuffix ( - count ) . String ( )
}
return fs . CountSuffix ( count ) . String ( )
}
return strconv . FormatInt ( count , 10 )
}
// CountStringField make string representation of count for output in fixed width field
//
// Similar to SizeStringField, but human readable with decimal prefix and field width 8
// since there is no 'i' in the decimal prefix symbols (e.g. "999.999E")
func CountStringField ( count int64 , humanReadable bool , rawWidth int ) string {
str := CountString ( count , humanReadable )
if humanReadable {
return fmt . Sprintf ( "%8s" , str )
}
return fmt . Sprintf ( "%[2]*[1]s" , str , rawWidth )
}
2015-09-15 14:46:06 +00:00
// List the Fs to the supplied writer
2014-07-12 11:09:20 +00:00
//
2022-08-05 15:35:41 +00:00
// Shows size and path - obeys includes and excludes.
2014-07-12 11:09:20 +00:00
//
// Lists in parallel which may get them out of order
2019-06-17 08:34:30 +00:00
func List ( ctx context . Context , f fs . Fs , w io . Writer ) error {
2021-04-02 14:11:21 +00:00
ci := fs . GetConfig ( ctx )
2019-06-17 08:34:30 +00:00
return ListFn ( ctx , f , func ( o fs . Object ) {
2023-10-01 09:02:56 +00:00
SyncFprintf ( w , "%s %s\n" , SizeStringField ( o . Size ( ) , ci . HumanReadable , 9 ) , o . Remote ( ) )
2014-07-12 11:09:20 +00:00
} )
}
2015-09-22 17:47:16 +00:00
// ListLong lists the Fs to the supplied writer
2014-07-12 11:09:20 +00:00
//
2022-08-05 15:35:41 +00:00
// Shows size, mod time and path - obeys includes and excludes.
2014-07-12 11:09:20 +00:00
//
// Lists in parallel which may get them out of order
2019-06-17 08:34:30 +00:00
func ListLong ( ctx context . Context , f fs . Fs , w io . Writer ) error {
2021-04-02 14:11:21 +00:00
ci := fs . GetConfig ( ctx )
2019-06-17 08:34:30 +00:00
return ListFn ( ctx , f , func ( o fs . Object ) {
2023-02-06 10:30:22 +00:00
tr := accounting . Stats ( ctx ) . NewCheckingTransfer ( o , "listing" )
2019-07-22 19:11:46 +00:00
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , nil )
2019-07-22 19:11:46 +00:00
} ( )
2019-06-17 08:34:30 +00:00
modTime := o . ModTime ( ctx )
2023-10-01 09:02:56 +00:00
SyncFprintf ( w , "%s %s %s\n" , SizeStringField ( o . Size ( ) , ci . HumanReadable , 9 ) , modTime . Local ( ) . Format ( "2006-01-02 15:04:05.000000000" ) , o . Remote ( ) )
2014-07-12 11:09:20 +00:00
} )
}
bisync: full support for comparing checksum, size, modtime - fixes #5679 fixes #5683 fixes #5684 fixes #5675
Before this change, bisync could only detect changes based on modtime, and
would refuse to run if either path lacked modtime support. This made bisync
unavailable for many of rclone's backends. Additionally, bisync did not account
for the Fs's precision when comparing modtimes, meaning that they could only be
reliably compared within the same side -- not against the opposite side. Size
and checksum (even when available) were ignored completely for deltas.
After this change, bisync now fully supports comparing based on any combination
of size, modtime, and checksum, lifting the prior restriction on backends
without modtime support. The comparison logic considers the backend's
precision, hash types, and other features as appropriate.
The comparison features optionally use a new --compare flag (which takes any
combination of size,modtime,checksum) and even supports some combinations not
otherwise supported in `sync` (like comparing all three at the same time.) By
default (without the --compare flag), bisync inherits the same comparison
options as `sync` (that is: size and modtime by default, unless modified with
flags such as --checksum or --size-only.) If the --compare flag is set, it will
override these defaults.
If --compare includes checksum and both remotes support checksums but have no
hash types in common with each other, checksums will be considered only for
comparisons within the same side (to determine what has changed since the prior
sync), but not for comparisons against the opposite side. If one side supports
checksums and the other does not, checksums will only be considered on the side
that supports them. When comparing with checksum and/or size without modtime,
bisync cannot determine whether a file is newer or older -- only whether it is
changed or unchanged. (If it is changed on both sides, bisync still does the
standard equality-check to avoid declaring a sync conflict unless it absolutely
has to.)
Also included are some new flags to customize the checksum comparison behavior
on backends where hashes are slow or unavailable. --no-slow-hash and
--slow-hash-sync-only allow selectively ignoring checksums on backends such as
local where they are slow. --download-hash allows computing them by downloading
when (and only when) they're otherwise not available. Of course, this option
probably won't be practical with large files, but may be a good option for
syncing small-but-important files with maximum accuracy (for example, a source
code repo on a crypt remote.) An additional advantage over methods like
cryptcheck is that the original file is not required for comparison (for
example, --download-hash can be used to bisync two different crypt remotes with
different passwords.)
Additionally, all of the above are now considered during the final --check-sync
for much-improved accuracy (before this change, it only compared filenames!)
Many other details are explained in the included docs.
2023-12-01 00:44:38 +00:00
// HashSum returns the human-readable hash for ht passed in. This may
2019-10-26 19:27:33 +00:00
// be UNSUPPORTED or ERROR. If it isn't returning a valid hash it will
// return an error.
bisync: full support for comparing checksum, size, modtime - fixes #5679 fixes #5683 fixes #5684 fixes #5675
Before this change, bisync could only detect changes based on modtime, and
would refuse to run if either path lacked modtime support. This made bisync
unavailable for many of rclone's backends. Additionally, bisync did not account
for the Fs's precision when comparing modtimes, meaning that they could only be
reliably compared within the same side -- not against the opposite side. Size
and checksum (even when available) were ignored completely for deltas.
After this change, bisync now fully supports comparing based on any combination
of size, modtime, and checksum, lifting the prior restriction on backends
without modtime support. The comparison logic considers the backend's
precision, hash types, and other features as appropriate.
The comparison features optionally use a new --compare flag (which takes any
combination of size,modtime,checksum) and even supports some combinations not
otherwise supported in `sync` (like comparing all three at the same time.) By
default (without the --compare flag), bisync inherits the same comparison
options as `sync` (that is: size and modtime by default, unless modified with
flags such as --checksum or --size-only.) If the --compare flag is set, it will
override these defaults.
If --compare includes checksum and both remotes support checksums but have no
hash types in common with each other, checksums will be considered only for
comparisons within the same side (to determine what has changed since the prior
sync), but not for comparisons against the opposite side. If one side supports
checksums and the other does not, checksums will only be considered on the side
that supports them. When comparing with checksum and/or size without modtime,
bisync cannot determine whether a file is newer or older -- only whether it is
changed or unchanged. (If it is changed on both sides, bisync still does the
standard equality-check to avoid declaring a sync conflict unless it absolutely
has to.)
Also included are some new flags to customize the checksum comparison behavior
on backends where hashes are slow or unavailable. --no-slow-hash and
--slow-hash-sync-only allow selectively ignoring checksums on backends such as
local where they are slow. --download-hash allows computing them by downloading
when (and only when) they're otherwise not available. Of course, this option
probably won't be practical with large files, but may be a good option for
syncing small-but-important files with maximum accuracy (for example, a source
code repo on a crypt remote.) An additional advantage over methods like
cryptcheck is that the original file is not required for comparison (for
example, --download-hash can be used to bisync two different crypt remotes with
different passwords.)
Additionally, all of the above are now considered during the final --check-sync
for much-improved accuracy (before this change, it only compared filenames!)
Many other details are explained in the included docs.
2023-12-01 00:44:38 +00:00
func HashSum ( ctx context . Context , ht hash . Type , base64Encoded bool , downloadFlag bool , o fs . Object ) ( string , error ) {
2020-12-18 12:45:58 +00:00
var sum string
2019-07-22 19:11:46 +00:00
var err error
2020-12-18 12:45:58 +00:00
// If downloadFlag is true, download and hash the file.
// If downloadFlag is false, call o.Hash asking the remote for the hash
if downloadFlag {
// Setup: Define accounting, open the file with NewReOpen to provide restarts, account for the transfer, and setup a multi-hasher with the appropriate type
// Execution: io.Copy file to hasher, get hash and encode in hex
2024-01-18 16:44:13 +00:00
tr := accounting . Stats ( ctx ) . NewTransfer ( o , nil )
2020-12-18 12:45:58 +00:00
defer func ( ) {
tr . Done ( ctx , err )
} ( )
// Open with NewReOpen to provide restarts
var options [ ] fs . OpenOption
for _ , option := range fs . GetConfig ( ctx ) . DownloadHeaders {
options = append ( options , option )
}
2023-10-08 10:39:26 +00:00
var in io . ReadCloser
in , err = Open ( ctx , o , options ... )
2020-12-18 12:45:58 +00:00
if err != nil {
2021-11-02 23:34:20 +00:00
return "ERROR" , fmt . Errorf ( "failed to open file %v: %w" , o , err )
2020-12-18 12:45:58 +00:00
}
// Account and buffer the transfer
in = tr . Account ( ctx , in ) . WithBuffer ( )
// Setup hasher
hasher , err := hash . NewMultiHasherTypes ( hash . NewHashSet ( ht ) )
if err != nil {
2021-11-02 23:34:20 +00:00
return "UNSUPPORTED" , fmt . Errorf ( "hash unsupported: %w" , err )
2020-12-18 12:45:58 +00:00
}
// Copy to hasher, downloading the file and passing directly to hash
_ , err = io . Copy ( hasher , in )
if err != nil {
2021-11-02 23:34:20 +00:00
return "ERROR" , fmt . Errorf ( "failed to copy file to hasher: %w" , err )
2020-12-18 12:45:58 +00:00
}
2021-11-02 23:34:20 +00:00
// Get hash as hex or base64 encoded string
sum , err = hasher . SumString ( ht , base64Encoded )
2020-12-18 12:45:58 +00:00
if err != nil {
2021-11-02 23:34:20 +00:00
return "ERROR" , fmt . Errorf ( "hasher returned an error: %w" , err )
2020-12-18 12:45:58 +00:00
}
} else {
2023-02-06 10:30:22 +00:00
tr := accounting . Stats ( ctx ) . NewCheckingTransfer ( o , "hashing" )
2020-12-18 12:45:58 +00:00
defer func ( ) {
tr . Done ( ctx , err )
} ( )
sum , err = o . Hash ( ctx , ht )
2021-11-02 23:34:20 +00:00
if base64Encoded {
hexBytes , _ := hex . DecodeString ( sum )
sum = base64 . URLEncoding . EncodeToString ( hexBytes )
}
2020-12-18 12:45:58 +00:00
if err == hash . ErrUnsupported {
2021-11-02 23:34:20 +00:00
return "" , fmt . Errorf ( "hash unsupported: %w" , err )
2021-10-07 12:37:31 +00:00
}
if err != nil {
2021-11-15 12:17:30 +00:00
return "" , fmt . Errorf ( "failed to get hash %v from backend: %w" , ht , err )
2020-12-18 12:45:58 +00:00
}
2018-01-06 17:53:37 +00:00
}
2020-12-18 12:45:58 +00:00
return sum , nil
2014-07-12 11:09:20 +00:00
}
2020-12-18 12:45:58 +00:00
// HashLister does an md5sum equivalent for the hash type passed in
// Updated to handle both standard hex encoding and base64
// Updated to perform multiple hashes concurrently
func HashLister ( ctx context . Context , ht hash . Type , outputBase64 bool , downloadFlag bool , f fs . Fs , w io . Writer ) error {
2021-11-02 23:34:20 +00:00
width := hash . Width ( ht , outputBase64 )
2023-02-07 10:56:03 +00:00
// Use --checkers concurrency unless downloading in which case use --transfers
concurrency := fs . GetConfig ( ctx ) . Checkers
if downloadFlag {
concurrency = fs . GetConfig ( ctx ) . Transfers
}
concurrencyControl := make ( chan struct { } , concurrency )
2020-12-18 12:45:58 +00:00
var wg sync . WaitGroup
err := ListFn ( ctx , f , func ( o fs . Object ) {
wg . Add ( 1 )
concurrencyControl <- struct { } { }
go func ( ) {
defer func ( ) {
<- concurrencyControl
wg . Done ( )
} ( )
bisync: full support for comparing checksum, size, modtime - fixes #5679 fixes #5683 fixes #5684 fixes #5675
Before this change, bisync could only detect changes based on modtime, and
would refuse to run if either path lacked modtime support. This made bisync
unavailable for many of rclone's backends. Additionally, bisync did not account
for the Fs's precision when comparing modtimes, meaning that they could only be
reliably compared within the same side -- not against the opposite side. Size
and checksum (even when available) were ignored completely for deltas.
After this change, bisync now fully supports comparing based on any combination
of size, modtime, and checksum, lifting the prior restriction on backends
without modtime support. The comparison logic considers the backend's
precision, hash types, and other features as appropriate.
The comparison features optionally use a new --compare flag (which takes any
combination of size,modtime,checksum) and even supports some combinations not
otherwise supported in `sync` (like comparing all three at the same time.) By
default (without the --compare flag), bisync inherits the same comparison
options as `sync` (that is: size and modtime by default, unless modified with
flags such as --checksum or --size-only.) If the --compare flag is set, it will
override these defaults.
If --compare includes checksum and both remotes support checksums but have no
hash types in common with each other, checksums will be considered only for
comparisons within the same side (to determine what has changed since the prior
sync), but not for comparisons against the opposite side. If one side supports
checksums and the other does not, checksums will only be considered on the side
that supports them. When comparing with checksum and/or size without modtime,
bisync cannot determine whether a file is newer or older -- only whether it is
changed or unchanged. (If it is changed on both sides, bisync still does the
standard equality-check to avoid declaring a sync conflict unless it absolutely
has to.)
Also included are some new flags to customize the checksum comparison behavior
on backends where hashes are slow or unavailable. --no-slow-hash and
--slow-hash-sync-only allow selectively ignoring checksums on backends such as
local where they are slow. --download-hash allows computing them by downloading
when (and only when) they're otherwise not available. Of course, this option
probably won't be practical with large files, but may be a good option for
syncing small-but-important files with maximum accuracy (for example, a source
code repo on a crypt remote.) An additional advantage over methods like
cryptcheck is that the original file is not required for comparison (for
example, --download-hash can be used to bisync two different crypt remotes with
different passwords.)
Additionally, all of the above are now considered during the final --check-sync
for much-improved accuracy (before this change, it only compared filenames!)
Many other details are explained in the included docs.
2023-12-01 00:44:38 +00:00
sum , err := HashSum ( ctx , ht , outputBase64 , downloadFlag , o )
2021-10-07 12:37:31 +00:00
if err != nil {
fs . Errorf ( o , "%v" , fs . CountError ( err ) )
return
}
2023-10-01 09:02:56 +00:00
SyncFprintf ( w , "%*s %s\n" , width , sum , o . Remote ( ) )
2020-12-18 12:45:58 +00:00
} ( )
2019-10-26 19:27:33 +00:00
} )
2020-12-18 12:45:58 +00:00
wg . Wait ( )
return err
2019-10-26 19:27:33 +00:00
}
2021-11-02 23:34:20 +00:00
// HashSumStream outputs a line compatible with md5sum to w based on the
// input stream in and the hash type ht passed in. If outputBase64 is
// set then the hash will be base64 instead of hexadecimal.
func HashSumStream ( ht hash . Type , outputBase64 bool , in io . ReadCloser , w io . Writer ) error {
hasher , err := hash . NewMultiHasherTypes ( hash . NewHashSet ( ht ) )
if err != nil {
return fmt . Errorf ( "hash unsupported: %w" , err )
}
written , err := io . Copy ( hasher , in )
fs . Debugf ( nil , "Creating %s hash of %d bytes read from input stream" , ht , written )
if err != nil {
return fmt . Errorf ( "failed to copy input to hasher: %w" , err )
}
sum , err := hasher . SumString ( ht , outputBase64 )
if err != nil {
return fmt . Errorf ( "hasher returned an error: %w" , err )
}
width := hash . Width ( ht , outputBase64 )
2023-10-01 09:02:56 +00:00
SyncFprintf ( w , "%*s -\n" , width , sum )
2021-11-02 23:34:20 +00:00
return nil
}
2015-10-02 18:48:48 +00:00
// Count counts the objects and their sizes in the Fs
2015-11-24 16:54:12 +00:00
//
// Obeys includes and excludes
2022-04-06 12:15:07 +00:00
func Count ( ctx context . Context , f fs . Fs ) ( objects int64 , size int64 , sizelessObjects int64 , err error ) {
2019-06-17 08:34:30 +00:00
err = ListFn ( ctx , f , func ( o fs . Object ) {
2015-10-02 18:48:48 +00:00
atomic . AddInt64 ( & objects , 1 )
2019-05-28 18:51:25 +00:00
objectSize := o . Size ( )
2022-04-06 12:15:07 +00:00
if objectSize < 0 {
atomic . AddInt64 ( & sizelessObjects , 1 )
} else if objectSize > 0 {
2019-05-28 18:51:25 +00:00
atomic . AddInt64 ( & size , objectSize )
}
2015-10-02 18:48:48 +00:00
} )
return
}
2017-02-24 22:51:01 +00:00
// ConfigMaxDepth returns the depth to use for a recursive or non recursive listing.
2020-11-05 11:33:32 +00:00
func ConfigMaxDepth ( ctx context . Context , recursive bool ) int {
ci := fs . GetConfig ( ctx )
depth := ci . MaxDepth
2017-02-24 22:51:01 +00:00
if ! recursive && depth < 0 {
depth = 1
}
return depth
}
2015-09-22 17:47:16 +00:00
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
2019-06-17 08:34:30 +00:00
func ListDir ( ctx context . Context , f fs . Fs , w io . Writer ) error {
2021-04-02 14:11:21 +00:00
ci := fs . GetConfig ( ctx )
2020-11-05 11:33:32 +00:00
return walk . ListR ( ctx , f , "" , false , ConfigMaxDepth ( ctx , false ) , walk . ListDirs , func ( entries fs . DirEntries ) error {
2018-01-12 16:30:54 +00:00
entries . ForDir ( func ( dir fs . Directory ) {
2017-02-24 22:51:01 +00:00
if dir != nil {
2023-10-01 09:02:56 +00:00
SyncFprintf ( w , "%s %13s %s %s\n" , SizeStringField ( dir . Size ( ) , ci . HumanReadable , 12 ) , dir . ModTime ( ctx ) . Local ( ) . Format ( "2006-01-02 15:04:05" ) , CountStringField ( dir . Items ( ) , ci . HumanReadable , 9 ) , dir . Remote ( ) )
2017-02-24 22:51:01 +00:00
}
} )
return nil
} )
2014-03-28 17:56:04 +00:00
}
2015-09-22 17:47:16 +00:00
// Mkdir makes a destination directory or container
2019-06-17 08:34:30 +00:00
func Mkdir ( ctx context . Context , f fs . Fs , dir string ) error {
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , fs . LogDirName ( f , dir ) , "make directory" ) {
2016-02-28 19:47:22 +00:00
return nil
}
2018-01-12 16:30:54 +00:00
fs . Debugf ( fs . LogDirName ( f , dir ) , "Making directory" )
2019-06-17 08:34:30 +00:00
err := f . Mkdir ( ctx , dir )
2014-03-28 17:56:04 +00:00
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2014-03-28 17:56:04 +00:00
return err
}
return nil
}
2016-02-25 20:05:34 +00:00
// TryRmdir removes a container but not if not empty. It doesn't
// count errors but may return one.
2019-06-17 08:34:30 +00:00
func TryRmdir ( ctx context . Context , f fs . Fs , dir string ) error {
2020-10-28 12:54:31 +00:00
accounting . Stats ( ctx ) . DeletedDirs ( 1 )
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , fs . LogDirName ( f , dir ) , "remove directory" ) {
2016-02-25 20:05:34 +00:00
return nil
2014-03-28 17:56:04 +00:00
}
2021-07-23 19:10:27 +00:00
fs . Infof ( fs . LogDirName ( f , dir ) , "Removing directory" )
2019-06-17 08:34:30 +00:00
return f . Rmdir ( ctx , dir )
2016-02-25 20:05:34 +00:00
}
// Rmdir removes a container but not if not empty
2019-06-17 08:34:30 +00:00
func Rmdir ( ctx context . Context , f fs . Fs , dir string ) error {
err := TryRmdir ( ctx , f , dir )
2016-02-25 20:05:34 +00:00
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2016-02-25 20:05:34 +00:00
return err
}
return err
2014-03-28 17:56:04 +00:00
}
2017-12-07 12:25:56 +00:00
// Purge removes a directory and all of its contents
2020-06-04 21:25:14 +00:00
func Purge ( ctx context . Context , f fs . Fs , dir string ) ( err error ) {
2015-11-08 14:16:00 +00:00
doFallbackPurge := true
2020-06-04 21:25:14 +00:00
if doPurge := f . Features ( ) . Purge ; doPurge != nil {
doFallbackPurge = false
2020-10-28 12:54:31 +00:00
accounting . Stats ( ctx ) . DeletedDirs ( 1 )
2020-06-04 21:25:14 +00:00
if SkipDestructive ( ctx , fs . LogDirName ( f , dir ) , "purge directory" ) {
return nil
}
err = doPurge ( ctx , dir )
2023-06-07 08:19:16 +00:00
if errors . Is ( err , fs . ErrorCantPurge ) {
2020-06-04 21:25:14 +00:00
doFallbackPurge = true
2014-03-28 17:56:04 +00:00
}
2015-11-08 14:16:00 +00:00
}
if doFallbackPurge {
2014-07-25 17:19:49 +00:00
// DeleteFiles and Rmdir observe --dry-run
2019-06-17 08:34:30 +00:00
err = DeleteFiles ( ctx , listToChan ( ctx , f , dir ) )
2016-06-25 13:27:44 +00:00
if err != nil {
return err
}
2019-06-17 08:34:30 +00:00
err = Rmdirs ( ctx , f , dir , false )
2014-07-25 17:19:49 +00:00
}
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2014-07-25 17:19:49 +00:00
return err
2014-03-28 17:56:04 +00:00
}
return nil
}
2015-12-02 22:25:32 +00:00
// Delete removes all the contents of a container. Unlike Purge, it
// obeys includes and excludes.
2019-06-17 08:34:30 +00:00
func Delete ( ctx context . Context , f fs . Fs ) error {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2023-02-07 10:56:03 +00:00
delChan := make ( fs . ObjectsChan , ci . Checkers )
2016-06-25 13:27:44 +00:00
delErr := make ( chan error , 1 )
2015-12-02 22:25:32 +00:00
go func ( ) {
2019-06-17 08:34:30 +00:00
delErr <- DeleteFiles ( ctx , delChan )
2015-12-02 22:25:32 +00:00
} ( )
2019-06-17 08:34:30 +00:00
err := ListFn ( ctx , f , func ( o fs . Object ) {
2018-08-04 10:16:43 +00:00
delChan <- o
2015-12-02 22:25:32 +00:00
} )
2018-08-04 10:16:43 +00:00
close ( delChan )
2016-06-25 13:27:44 +00:00
delError := <- delErr
if err == nil {
err = delError
}
2015-12-02 22:25:32 +00:00
return err
}
2016-01-31 12:58:41 +00:00
2017-02-24 22:51:01 +00:00
// listToChan will transfer all objects in the listing to the output
2016-04-21 19:06:21 +00:00
//
// If an error occurs, the error will be logged, and it will close the
// channel.
//
// If the error was ErrorDirNotFound then it will be ignored
2019-06-17 08:34:30 +00:00
func listToChan ( ctx context . Context , f fs . Fs , dir string ) fs . ObjectsChan {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
o := make ( fs . ObjectsChan , ci . Checkers )
2016-04-21 19:06:21 +00:00
go func ( ) {
defer close ( o )
2020-11-05 11:33:32 +00:00
err := walk . ListR ( ctx , f , dir , true , ci . MaxDepth , walk . ListObjects , func ( entries fs . DirEntries ) error {
2018-01-12 16:30:54 +00:00
entries . ForObject ( func ( obj fs . Object ) {
2017-02-24 22:51:01 +00:00
o <- obj
} )
return nil
} )
2019-01-21 16:53:05 +00:00
if err != nil && err != fs . ErrorDirNotFound {
2021-11-04 10:12:57 +00:00
err = fmt . Errorf ( "failed to list: %w" , err )
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2019-01-21 16:53:05 +00:00
fs . Errorf ( nil , "%v" , err )
}
2016-04-21 19:06:21 +00:00
} ( )
return o
}
2016-07-01 15:35:36 +00:00
// CleanUp removes the trash for the Fs
2019-06-17 08:34:30 +00:00
func CleanUp ( ctx context . Context , f fs . Fs ) error {
2017-01-13 17:21:47 +00:00
doCleanUp := f . Features ( ) . CleanUp
if doCleanUp == nil {
2021-11-04 10:12:57 +00:00
return fmt . Errorf ( "%v doesn't support cleanup" , f )
2016-07-01 15:35:36 +00:00
}
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , f , "clean up old files" ) {
2016-07-02 15:58:50 +00:00
return nil
}
2019-06-17 08:34:30 +00:00
return doCleanUp ( ctx )
2016-07-01 15:35:36 +00:00
}
2016-08-18 21:43:02 +00:00
2017-02-09 11:25:36 +00:00
// wrap a Reader and a Closer together into a ReadCloser
type readCloser struct {
io . Reader
2017-11-11 18:43:00 +00:00
io . Closer
2017-02-09 11:25:36 +00:00
}
2016-08-18 21:43:02 +00:00
// Cat any files to the io.Writer
2017-02-08 08:09:41 +00:00
//
// if offset == 0 it will be ignored
// if offset > 0 then the file will be seeked to that offset
// if offset < 0 then the file will be seeked that far from the end
//
// if count < 0 then it will be ignored
// if count >= 0 then only that many characters will be output
2023-04-24 11:01:53 +00:00
func Cat ( ctx context . Context , f fs . Fs , w io . Writer , offset , count int64 , sep [ ] byte ) error {
2016-08-18 21:43:02 +00:00
var mu sync . Mutex
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2019-06-17 08:34:30 +00:00
return ListFn ( ctx , f , func ( o fs . Object ) {
2016-09-12 17:15:58 +00:00
var err error
2024-01-18 16:44:13 +00:00
tr := accounting . Stats ( ctx ) . NewTransfer ( o , nil )
2016-09-12 17:15:58 +00:00
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2016-09-12 17:15:58 +00:00
} ( )
2018-02-19 16:12:43 +00:00
opt := fs . RangeOption { Start : offset , End : - 1 }
2017-02-09 11:46:53 +00:00
size := o . Size ( )
2018-02-19 16:12:43 +00:00
if opt . Start < 0 {
opt . Start += size
}
if count >= 0 {
opt . End = opt . Start + count - 1
2017-02-08 08:09:41 +00:00
}
2018-01-12 16:30:54 +00:00
var options [ ] fs . OpenOption
2018-02-19 16:12:43 +00:00
if opt . Start > 0 || opt . End >= 0 {
options = append ( options , & opt )
2017-02-08 08:09:41 +00:00
}
2020-11-05 11:33:32 +00:00
for _ , option := range ci . DownloadHeaders {
2020-04-23 10:32:27 +00:00
options = append ( options , option )
}
2023-10-08 10:39:26 +00:00
var in io . ReadCloser
in , err = Open ( ctx , o , options ... )
2016-08-18 21:43:02 +00:00
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( o , "Failed to open: %v" , err )
2016-08-18 21:43:02 +00:00
return
}
2017-02-08 08:09:41 +00:00
if count >= 0 {
2017-02-09 11:25:36 +00:00
in = & readCloser { Reader : & io . LimitedReader { R : in , N : count } , Closer : in }
2017-02-08 08:09:41 +00:00
}
2020-06-04 14:09:03 +00:00
in = tr . Account ( ctx , in ) . WithBuffer ( ) // account and buffer the transfer
2017-02-08 08:09:41 +00:00
// take the lock just before we output stuff, so at the last possible moment
mu . Lock ( )
defer mu . Unlock ( )
2017-02-09 11:25:36 +00:00
_ , err = io . Copy ( w , in )
2016-08-18 21:43:02 +00:00
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( o , "Failed to send to output: %v" , err )
2016-08-18 21:43:02 +00:00
}
2023-04-24 11:01:53 +00:00
if len ( sep ) >= 0 {
_ , err = w . Write ( sep )
if err != nil {
err = fs . CountError ( err )
fs . Errorf ( o , "Failed to send separator to output: %v" , err )
}
}
2016-08-18 21:43:02 +00:00
} )
}
2016-11-27 11:49:31 +00:00
2017-08-03 19:42:35 +00:00
// Rcat reads data from the Reader until EOF and uploads it to a file on remote
2022-11-08 17:42:18 +00:00
func Rcat ( ctx context . Context , fdst fs . Fs , dstFileName string , in io . ReadCloser , modTime time . Time , meta fs . Metadata ) ( dst fs . Object , err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2024-01-18 16:44:13 +00:00
tr := accounting . Stats ( ctx ) . NewTransferRemoteSize ( dstFileName , - 1 , nil , fdst )
2017-08-03 19:42:35 +00:00
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2017-08-03 19:42:35 +00:00
} ( )
2020-06-04 14:09:03 +00:00
in = tr . Account ( ctx , in ) . WithBuffer ( )
2017-08-03 19:42:35 +00:00
2018-01-12 16:30:54 +00:00
readCounter := readers . NewCountingReader ( in )
2020-02-14 12:47:11 +00:00
var trackingIn io . Reader
var hasher * hash . MultiHasher
var options [ ] fs . OpenOption
2020-11-05 11:33:32 +00:00
if ! ci . IgnoreChecksum {
2020-02-14 12:47:11 +00:00
hashes := hash . NewHashSet ( fdst . Hashes ( ) . GetOne ( ) ) // just pick one hash
hashOption := & fs . HashesOption { Hashes : hashes }
options = append ( options , hashOption )
hasher , err = hash . NewMultiHasherTypes ( hashes )
if err != nil {
return nil , err
}
trackingIn = io . TeeReader ( readCounter , hasher )
} else {
trackingIn = readCounter
}
2020-11-05 11:33:32 +00:00
for _ , option := range ci . UploadHeaders {
2020-04-23 10:32:48 +00:00
options = append ( options , option )
}
2022-05-24 14:46:07 +00:00
if ci . MetadataSet != nil {
options = append ( options , fs . MetadataOption ( ci . MetadataSet ) )
}
2017-09-11 06:25:34 +00:00
2018-01-12 16:30:54 +00:00
compare := func ( dst fs . Object ) error {
2020-02-14 12:47:11 +00:00
var sums map [ hash . Type ] string
2022-07-07 10:31:53 +00:00
opt := defaultEqualOpt ( ctx )
2020-02-14 12:47:11 +00:00
if hasher != nil {
2022-07-07 10:31:53 +00:00
// force --checksum on if we have hashes
opt . checkSum = true
2020-02-14 12:47:11 +00:00
sums = hasher . Sums ( )
}
2022-11-08 17:42:18 +00:00
src := object . NewStaticObjectInfo ( dstFileName , modTime , int64 ( readCounter . BytesRead ( ) ) , false , sums , fdst ) . WithMetadata ( meta )
2022-07-07 10:31:53 +00:00
if ! equal ( ctx , src , dst , opt ) {
2021-11-04 10:12:57 +00:00
err = fmt . Errorf ( "corrupted on transfer" )
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( dst , "%v" , err )
2017-09-11 06:26:53 +00:00
return err
2017-09-11 06:25:34 +00:00
}
2017-09-11 06:26:53 +00:00
return nil
}
// check if file small enough for direct upload
2020-11-05 11:33:32 +00:00
buf := make ( [ ] byte , ci . StreamingUploadCutoff )
2017-09-11 06:26:53 +00:00
if n , err := io . ReadFull ( trackingIn , buf ) ; err == io . EOF || err == io . ErrUnexpectedEOF {
2018-01-12 16:30:54 +00:00
fs . Debugf ( fdst , "File to upload is small (%d bytes), uploading instead of streaming" , n )
2022-11-08 17:42:18 +00:00
src := object . NewMemoryObject ( dstFileName , modTime , buf [ : n ] ) . WithMetadata ( meta )
2019-06-17 08:34:30 +00:00
return Copy ( ctx , fdst , nil , dstFileName , src )
2017-09-11 06:25:34 +00:00
}
2017-11-11 18:43:00 +00:00
// Make a new ReadCloser with the bits we've already read
in = & readCloser {
Reader : io . MultiReader ( bytes . NewReader ( buf ) , trackingIn ) ,
Closer : in ,
}
2017-09-11 06:25:34 +00:00
2017-08-03 19:42:35 +00:00
fStreamTo := fdst
canStream := fdst . Features ( ) . PutStream != nil
if ! canStream {
2018-01-12 16:30:54 +00:00
fs . Debugf ( fdst , "Target remote doesn't support streaming uploads, creating temporary local FS to spool file" )
2020-11-05 15:18:51 +00:00
tmpLocalFs , err := fs . TemporaryLocalFs ( ctx )
2017-08-03 19:42:35 +00:00
if err != nil {
2022-06-08 20:54:39 +00:00
return nil , fmt . Errorf ( "failed to create temporary local FS to spool file: %w" , err )
2017-08-03 19:42:35 +00:00
}
defer func ( ) {
2019-06-17 08:34:30 +00:00
err := Purge ( ctx , tmpLocalFs , "" )
2017-08-03 19:42:35 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . Infof ( tmpLocalFs , "Failed to cleanup temporary FS: %v" , err )
2017-08-03 19:42:35 +00:00
}
} ( )
fStreamTo = tmpLocalFs
}
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , dstFileName , "upload from pipe" ) {
2017-08-03 19:42:35 +00:00
// prevents "broken pipe" errors
2022-08-20 14:38:02 +00:00
_ , err = io . Copy ( io . Discard , in )
2017-09-16 20:49:08 +00:00
return nil , err
2017-08-03 19:42:35 +00:00
}
2022-11-08 17:42:18 +00:00
objInfo := object . NewStaticObjectInfo ( dstFileName , modTime , - 1 , false , nil , nil ) . WithMetadata ( meta )
2020-02-14 12:47:11 +00:00
if dst , err = fStreamTo . Features ( ) . PutStream ( ctx , in , objInfo , options ... ) ; err != nil {
2017-09-16 20:49:08 +00:00
return dst , err
2017-08-03 19:42:35 +00:00
}
2017-09-16 20:49:08 +00:00
if err = compare ( dst ) ; err != nil {
return dst , err
2017-09-11 06:26:53 +00:00
}
if ! canStream {
2017-12-01 15:16:11 +00:00
// copy dst (which is the local object we have just streamed to) to the remote
2022-11-08 17:42:18 +00:00
newCtx := ctx
if ci . Metadata && len ( meta ) != 0 {
// If we have metadata and we are setting it then use
// the --metadataset mechanism to supply it to Copy
var newCi * fs . ConfigInfo
newCtx , newCi = fs . AddConfig ( ctx )
if len ( newCi . MetadataSet ) == 0 {
newCi . MetadataSet = meta
} else {
var newMeta fs . Metadata
newMeta . Merge ( meta )
newMeta . Merge ( newCi . MetadataSet ) // --metadata-set takes priority
newCi . MetadataSet = newMeta
}
}
return Copy ( newCtx , fdst , nil , dstFileName , dst )
2017-09-11 06:26:53 +00:00
}
2017-09-16 20:49:08 +00:00
return dst , nil
2017-08-03 19:42:35 +00:00
}
2018-03-29 07:10:19 +00:00
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
2020-05-31 21:18:01 +00:00
func PublicLink ( ctx context . Context , f fs . Fs , remote string , expire fs . Duration , unlink bool ) ( string , error ) {
2018-03-29 07:10:19 +00:00
doPublicLink := f . Features ( ) . PublicLink
if doPublicLink == nil {
2021-11-04 10:12:57 +00:00
return "" , fmt . Errorf ( "%v doesn't support public links" , f )
2018-03-29 07:10:19 +00:00
}
2020-05-31 21:18:01 +00:00
return doPublicLink ( ctx , remote , expire , unlink )
2018-03-29 07:10:19 +00:00
}
2016-11-27 11:49:31 +00:00
// Rmdirs removes any empty directories (or directories only
// containing empty directories) under f, including f.
2021-02-09 10:12:23 +00:00
//
// Rmdirs obeys the filters
2019-06-17 08:34:30 +00:00
func Rmdirs ( ctx context . Context , f fs . Fs , dir string , leaveRoot bool ) error {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2021-02-09 10:12:23 +00:00
fi := filter . GetConfig ( ctx )
2016-11-27 11:49:31 +00:00
dirEmpty := make ( map [ string ] bool )
2018-10-26 22:47:23 +00:00
dirEmpty [ dir ] = ! leaveRoot
2021-02-09 10:12:23 +00:00
err := walk . Walk ( ctx , f , dir , false , ci . MaxDepth , func ( dirPath string , entries fs . DirEntries , err error ) error {
2016-11-27 11:49:31 +00:00
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( f , "Failed to list %q: %v" , dirPath , err )
2017-02-24 22:51:01 +00:00
return nil
}
for _ , entry := range entries {
switch x := entry . ( type ) {
2018-01-12 16:30:54 +00:00
case fs . Directory :
2017-02-24 22:51:01 +00:00
// add a new directory as empty
2017-06-30 12:37:29 +00:00
dir := x . Remote ( )
2017-02-24 22:51:01 +00:00
_ , found := dirEmpty [ dir ]
if ! found {
dirEmpty [ dir ] = true
2016-11-27 11:49:31 +00:00
}
2018-01-12 16:30:54 +00:00
case fs . Object :
2017-02-24 22:51:01 +00:00
// mark the parents of the file as being non-empty
dir := x . Remote ( )
for dir != "" {
dir = path . Dir ( dir )
if dir == "." || dir == "/" {
dir = ""
}
empty , found := dirEmpty [ dir ]
// End if we reach a directory which is non-empty
if found && ! empty {
break
}
dirEmpty [ dir ] = false
2016-11-27 11:49:31 +00:00
}
}
}
2017-02-24 22:51:01 +00:00
return nil
} )
if err != nil {
2021-11-04 10:12:57 +00:00
return fmt . Errorf ( "failed to rmdirs: %w" , err )
2016-11-27 11:49:31 +00:00
}
2023-08-17 10:05:12 +00:00
// Group directories to delete by level
var toDelete [ ] [ ] string
2016-11-27 11:49:31 +00:00
for dir , empty := range dirEmpty {
if empty {
2023-08-17 10:05:12 +00:00
// If a filter matches the directory then that
// directory is a candidate for deletion
if fi . IncludeRemote ( dir + "/" ) {
level := strings . Count ( dir , "/" ) + 1
// The root directory "" is at the top level
if dir == "" {
level = 0
}
if len ( toDelete ) < level + 1 {
toDelete = append ( toDelete , make ( [ ] [ ] string , level + 1 - len ( toDelete ) ) ... )
}
toDelete [ level ] = append ( toDelete [ level ] , dir )
}
2016-11-27 11:49:31 +00:00
}
}
2023-08-17 10:05:12 +00:00
var (
errMu sync . Mutex
errCount int
lastError error
)
// Delete all directories at the same level in parallel
for level := len ( toDelete ) - 1 ; level >= 0 ; level -- {
dirs := toDelete [ level ]
if len ( dirs ) == 0 {
2021-02-09 10:12:23 +00:00
continue
}
2023-08-17 10:05:12 +00:00
fs . Debugf ( nil , "removing %d level %d directories" , len ( dirs ) , level )
sort . Strings ( dirs )
g , gCtx := errgroup . WithContext ( ctx )
g . SetLimit ( ci . Checkers )
for _ , dir := range dirs {
// End early if error
if gCtx . Err ( ) != nil {
break
}
dir := dir
g . Go ( func ( ) error {
err := TryRmdir ( gCtx , f , dir )
if err != nil {
err = fs . CountError ( err )
fs . Errorf ( dir , "Failed to rmdir: %v" , err )
errMu . Lock ( )
lastError = err
errCount += 1
errMu . Unlock ( )
}
return nil // don't return errors, just count them
} )
}
err := g . Wait ( )
2016-11-27 11:49:31 +00:00
if err != nil {
return err
}
}
2023-08-17 10:05:12 +00:00
if lastError != nil {
return fmt . Errorf ( "failed to remove %d directories: last error: %w" , errCount , lastError )
}
2016-11-27 11:49:31 +00:00
return nil
}
2016-10-23 16:34:17 +00:00
2019-07-08 01:02:53 +00:00
// GetCompareDest sets up --compare-dest
2020-11-16 03:04:29 +00:00
func GetCompareDest ( ctx context . Context ) ( CompareDest [ ] fs . Fs , err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2020-11-16 03:04:29 +00:00
CompareDest , err = cache . GetArr ( ctx , ci . CompareDest )
2019-07-08 01:02:53 +00:00
if err != nil {
2022-06-08 20:54:39 +00:00
return nil , fserrors . FatalError ( fmt . Errorf ( "failed to make fs for --compare-dest %q: %w" , ci . CompareDest , err ) )
2019-07-08 01:02:53 +00:00
}
return CompareDest , nil
}
// compareDest checks --compare-dest to see if src needs to
// be copied
//
// Returns True if src is in --compare-dest
func compareDest ( ctx context . Context , dst , src fs . Object , CompareDest fs . Fs ) ( NoNeedTransfer bool , err error ) {
var remote string
if dst == nil {
remote = src . Remote ( )
} else {
remote = dst . Remote ( )
}
CompareDestFile , err := CompareDest . NewObject ( ctx , remote )
switch err {
case fs . ErrorObjectNotFound :
return false , nil
case nil :
break
default :
return false , err
}
2021-06-14 20:19:12 +00:00
opt := defaultEqualOpt ( ctx )
opt . updateModTime = false
if equal ( ctx , src , CompareDestFile , opt ) {
2019-07-08 01:02:53 +00:00
fs . Debugf ( src , "Destination found in --compare-dest, skipping" )
return true , nil
}
return false , nil
}
// GetCopyDest sets up --copy-dest
2020-11-16 03:04:29 +00:00
func GetCopyDest ( ctx context . Context , fdst fs . Fs ) ( CopyDest [ ] fs . Fs , err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2020-11-16 03:04:29 +00:00
CopyDest , err = cache . GetArr ( ctx , ci . CopyDest )
2019-07-08 01:02:53 +00:00
if err != nil {
2022-06-08 20:54:39 +00:00
return nil , fserrors . FatalError ( fmt . Errorf ( "failed to make fs for --copy-dest %q: %w" , ci . CopyDest , err ) )
2019-07-08 01:02:53 +00:00
}
2020-11-16 03:04:29 +00:00
if ! SameConfigArr ( fdst , CopyDest ) {
2019-07-08 01:02:53 +00:00
return nil , fserrors . FatalError ( errors . New ( "parameter to --copy-dest has to be on the same remote as destination" ) )
}
2020-11-16 03:04:29 +00:00
for _ , cf := range CopyDest {
if cf . Features ( ) . Copy == nil {
return nil , fserrors . FatalError ( errors . New ( "can't use --copy-dest on a remote which doesn't support server side copy" ) )
}
2019-07-08 01:02:53 +00:00
}
2020-11-16 03:04:29 +00:00
2019-07-08 01:02:53 +00:00
return CopyDest , nil
}
// copyDest checks --copy-dest to see if src needs to
// be copied
//
// Returns True if src was copied from --copy-dest
func copyDest ( ctx context . Context , fdst fs . Fs , dst , src fs . Object , CopyDest , backupDir fs . Fs ) ( NoNeedTransfer bool , err error ) {
var remote string
if dst == nil {
remote = src . Remote ( )
} else {
remote = dst . Remote ( )
}
CopyDestFile , err := CopyDest . NewObject ( ctx , remote )
switch err {
case fs . ErrorObjectNotFound :
return false , nil
case nil :
break
default :
return false , err
}
2020-11-05 11:33:32 +00:00
opt := defaultEqualOpt ( ctx )
2019-06-08 13:08:23 +00:00
opt . updateModTime = false
if equal ( ctx , src , CopyDestFile , opt ) {
2019-07-08 01:02:53 +00:00
if dst == nil || ! Equal ( ctx , src , dst ) {
if dst != nil && backupDir != nil {
err = MoveBackupDir ( ctx , backupDir , dst )
if err != nil {
2021-11-04 10:12:57 +00:00
return false , fmt . Errorf ( "moving to --backup-dir failed: %w" , err )
2019-07-08 01:02:53 +00:00
}
// If successful zero out the dstObj as it is no longer there
dst = nil
}
_ , err := Copy ( ctx , fdst , dst , remote , CopyDestFile )
if err != nil {
fs . Errorf ( src , "Destination found in --copy-dest, error copying" )
return false , nil
}
2020-10-13 21:43:40 +00:00
fs . Debugf ( src , "Destination found in --copy-dest, using server-side copy" )
2019-07-08 01:02:53 +00:00
return true , nil
}
fs . Debugf ( src , "Unchanged skipping" )
return true , nil
}
fs . Debugf ( src , "Destination not found in --copy-dest" )
return false , nil
}
// CompareOrCopyDest checks --compare-dest and --copy-dest to see if src
// does not need to be copied
//
// Returns True if src does not need to be copied
2020-11-16 03:04:29 +00:00
func CompareOrCopyDest ( ctx context . Context , fdst fs . Fs , dst , src fs . Object , CompareOrCopyDest [ ] fs . Fs , backupDir fs . Fs ) ( NoNeedTransfer bool , err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2020-11-16 03:04:29 +00:00
if len ( ci . CompareDest ) > 0 {
for _ , compareF := range CompareOrCopyDest {
NoNeedTransfer , err := compareDest ( ctx , dst , src , compareF )
if NoNeedTransfer || err != nil {
return NoNeedTransfer , err
}
}
} else if len ( ci . CopyDest ) > 0 {
for _ , copyF := range CompareOrCopyDest {
NoNeedTransfer , err := copyDest ( ctx , fdst , dst , src , copyF , backupDir )
if NoNeedTransfer || err != nil {
return NoNeedTransfer , err
}
}
2019-07-08 01:02:53 +00:00
}
return false , nil
}
2018-01-12 16:30:54 +00:00
// NeedTransfer checks to see if src needs to be copied to dst using
// the current config.
//
// Returns a flag which indicates whether the file needs to be
// transferred or not.
2019-06-17 08:34:30 +00:00
func NeedTransfer ( ctx context . Context , dst , src fs . Object ) bool {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2023-10-01 09:02:56 +00:00
logger , _ := GetLogger ( ctx )
2018-01-12 16:30:54 +00:00
if dst == nil {
2019-10-10 12:44:05 +00:00
fs . Debugf ( src , "Need to transfer - File not found at Destination" )
2023-10-01 09:02:56 +00:00
logger ( ctx , MissingOnDst , src , nil , nil )
2018-01-12 16:30:54 +00:00
return true
}
// If we should ignore existing files, don't transfer
2020-11-05 11:33:32 +00:00
if ci . IgnoreExisting {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Destination exists, skipping" )
2023-10-01 09:02:56 +00:00
logger ( ctx , Match , src , dst , nil )
2018-01-12 16:30:54 +00:00
return false
}
// If we should upload unconditionally
2020-11-05 11:33:32 +00:00
if ci . IgnoreTimes {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Transferring unconditionally as --ignore-times is in use" )
2023-10-01 09:02:56 +00:00
logger ( ctx , Differ , src , dst , nil )
2018-01-12 16:30:54 +00:00
return true
}
// If UpdateOlder is in effect, skip if dst is newer than src
2020-11-05 11:33:32 +00:00
if ci . UpdateOlder {
2019-06-17 08:34:30 +00:00
srcModTime := src . ModTime ( ctx )
dstModTime := dst . ModTime ( ctx )
2018-01-12 16:30:54 +00:00
dt := dstModTime . Sub ( srcModTime )
// If have a mutually agreed precision then use that
2020-11-05 16:27:01 +00:00
modifyWindow := fs . GetModifyWindow ( ctx , dst . Fs ( ) , src . Fs ( ) )
2018-01-12 16:30:54 +00:00
if modifyWindow == fs . ModTimeNotSupported {
// Otherwise use 1 second as a safe default as
// the resolution of the time a file was
// uploaded.
modifyWindow = time . Second
}
switch {
case dt >= modifyWindow :
fs . Debugf ( src , "Destination is newer than source, skipping" )
2023-10-01 09:02:56 +00:00
logger ( ctx , Match , src , dst , nil )
2018-01-12 16:30:54 +00:00
return false
case dt <= - modifyWindow :
2019-06-08 13:08:23 +00:00
// force --checksum on for the check and do update modtimes by default
2020-11-05 11:33:32 +00:00
opt := defaultEqualOpt ( ctx )
2019-06-08 13:08:23 +00:00
opt . forceModTimeMatch = true
if equal ( ctx , src , dst , opt ) {
fs . Debugf ( src , "Unchanged skipping" )
return false
}
2018-01-12 16:30:54 +00:00
default :
2019-06-08 13:08:23 +00:00
// Do a size only compare unless --checksum is set
2020-11-05 11:33:32 +00:00
opt := defaultEqualOpt ( ctx )
opt . sizeOnly = ! ci . CheckSum
2019-06-08 13:08:23 +00:00
if equal ( ctx , src , dst , opt ) {
fs . Debugf ( src , "Destination mod time is within %v of source and files identical, skipping" , modifyWindow )
2018-01-12 16:30:54 +00:00
return false
}
2019-06-08 13:08:23 +00:00
fs . Debugf ( src , "Destination mod time is within %v of source but files differ, transferring" , modifyWindow )
2018-01-12 16:30:54 +00:00
}
} else {
// Check to see if changed or not
bisync: full support for comparing checksum, size, modtime - fixes #5679 fixes #5683 fixes #5684 fixes #5675
Before this change, bisync could only detect changes based on modtime, and
would refuse to run if either path lacked modtime support. This made bisync
unavailable for many of rclone's backends. Additionally, bisync did not account
for the Fs's precision when comparing modtimes, meaning that they could only be
reliably compared within the same side -- not against the opposite side. Size
and checksum (even when available) were ignored completely for deltas.
After this change, bisync now fully supports comparing based on any combination
of size, modtime, and checksum, lifting the prior restriction on backends
without modtime support. The comparison logic considers the backend's
precision, hash types, and other features as appropriate.
The comparison features optionally use a new --compare flag (which takes any
combination of size,modtime,checksum) and even supports some combinations not
otherwise supported in `sync` (like comparing all three at the same time.) By
default (without the --compare flag), bisync inherits the same comparison
options as `sync` (that is: size and modtime by default, unless modified with
flags such as --checksum or --size-only.) If the --compare flag is set, it will
override these defaults.
If --compare includes checksum and both remotes support checksums but have no
hash types in common with each other, checksums will be considered only for
comparisons within the same side (to determine what has changed since the prior
sync), but not for comparisons against the opposite side. If one side supports
checksums and the other does not, checksums will only be considered on the side
that supports them. When comparing with checksum and/or size without modtime,
bisync cannot determine whether a file is newer or older -- only whether it is
changed or unchanged. (If it is changed on both sides, bisync still does the
standard equality-check to avoid declaring a sync conflict unless it absolutely
has to.)
Also included are some new flags to customize the checksum comparison behavior
on backends where hashes are slow or unavailable. --no-slow-hash and
--slow-hash-sync-only allow selectively ignoring checksums on backends such as
local where they are slow. --download-hash allows computing them by downloading
when (and only when) they're otherwise not available. Of course, this option
probably won't be practical with large files, but may be a good option for
syncing small-but-important files with maximum accuracy (for example, a source
code repo on a crypt remote.) An additional advantage over methods like
cryptcheck is that the original file is not required for comparison (for
example, --download-hash can be used to bisync two different crypt remotes with
different passwords.)
Additionally, all of the above are now considered during the final --check-sync
for much-improved accuracy (before this change, it only compared filenames!)
Many other details are explained in the included docs.
2023-12-01 00:44:38 +00:00
equalFn , ok := ctx . Value ( equalFnKey ) . ( EqualFn )
if ok {
return ! equalFn ( ctx , src , dst )
}
operations: fix renaming a file on macOS
Before this change, a file would sometimes be silently deleted instead of
renamed on macOS, due to its unique handling of unicode normalization. Rclone
already had a SameObject check in place for case insensitivity before deleting
the source (for example if "hello.txt" was renamed to "HELLO.txt"), but had no
such check for unicode normalization. After this change, the delete is skipped
on macOS if the src and dst filenames normalize to the same NFC string.
Example of the previous behavior:
~ % rclone touch /Users/nielash/rename_test/ö
~ % rclone lsl /Users/nielash/rename_test/ö
0 2023-11-21 17:28:06.170486000 ö
~ % rclone moveto /Users/nielash/rename_test/ö /Users/nielash/rename_test/ö -vv
2023/11/21 17:28:51 DEBUG : rclone: Version "v1.64.0" starting with parameters ["rclone" "moveto" "/Users/nielash/rename_test/ö" "/Users/nielash/rename_test/ö" "-vv"]
2023/11/21 17:28:51 DEBUG : Creating backend with remote "/Users/nielash/rename_test/ö"
2023/11/21 17:28:51 DEBUG : Using config file from "/Users/nielash/.config/rclone/rclone.conf"
2023/11/21 17:28:51 DEBUG : fs cache: adding new entry for parent of "/Users/nielash/rename_test/ö", "/Users/nielash/rename_test"
2023/11/21 17:28:51 DEBUG : Creating backend with remote "/Users/nielash/rename_test/"
2023/11/21 17:28:51 DEBUG : fs cache: renaming cache item "/Users/nielash/rename_test/" to be canonical "/Users/nielash/rename_test"
2023/11/21 17:28:51 DEBUG : ö: Size and modification time the same (differ by 0s, within tolerance 1ns)
2023/11/21 17:28:51 DEBUG : ö: Unchanged skipping
2023/11/21 17:28:51 INFO : ö: Deleted
2023/11/21 17:28:51 INFO :
Transferred: 0 B / 0 B, -, 0 B/s, ETA -
Checks: 1 / 1, 100%
Deleted: 1 (files), 0 (dirs)
Elapsed time: 0.0s
2023/11/21 17:28:51 DEBUG : 5 go routines active
~ % rclone lsl /Users/nielash/rename_test/
~ %
2023-11-20 16:04:54 +00:00
if Equal ( ctx , src , dst ) && ! SameObject ( src , dst ) {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Unchanged skipping" )
return false
}
}
return true
}
2018-08-30 15:45:41 +00:00
// RcatSize reads data from the Reader until EOF and uploads it to a file on remote.
// Pass in size >=0 if known, <0 if not known
2022-11-08 17:42:18 +00:00
func RcatSize ( ctx context . Context , fdst fs . Fs , dstFileName string , in io . ReadCloser , size int64 , modTime time . Time , meta fs . Metadata ) ( dst fs . Object , err error ) {
2018-08-30 15:45:41 +00:00
var obj fs . Object
if size >= 0 {
2019-07-16 11:56:20 +00:00
var err error
2018-08-30 15:45:41 +00:00
// Size known use Put
2024-01-18 16:44:13 +00:00
tr := accounting . Stats ( ctx ) . NewTransferRemoteSize ( dstFileName , size , nil , fdst )
2019-07-16 11:56:20 +00:00
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2019-07-16 11:56:20 +00:00
} ( )
2022-08-20 14:38:02 +00:00
body := io . NopCloser ( in ) // we let the server close the body
in := tr . Account ( ctx , body ) // account the transfer (no buffering)
2019-01-04 19:31:09 +00:00
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , dstFileName , "upload from pipe" ) {
2019-01-04 19:31:09 +00:00
// prevents "broken pipe" errors
2022-08-20 14:38:02 +00:00
_ , err = io . Copy ( io . Discard , in )
2019-01-04 19:31:09 +00:00
return nil , err
}
2022-11-08 17:42:18 +00:00
info := object . NewStaticObjectInfo ( dstFileName , modTime , size , true , nil , fdst ) . WithMetadata ( meta )
2019-06-17 08:34:30 +00:00
obj , err = fdst . Put ( ctx , in , info )
2018-08-30 15:45:41 +00:00
if err != nil {
fs . Errorf ( dstFileName , "Post request put error: %v" , err )
return nil , err
}
} else {
// Size unknown use Rcat
2022-11-08 17:42:18 +00:00
obj , err = Rcat ( ctx , fdst , dstFileName , in , modTime , meta )
2018-08-30 15:45:41 +00:00
if err != nil {
fs . Errorf ( dstFileName , "Post request rcat error: %v" , err )
return nil , err
}
}
return obj , nil
}
2019-12-18 17:02:13 +00:00
// copyURLFunc is called from CopyURLFn
type copyURLFunc func ( ctx context . Context , dstFileName string , in io . ReadCloser , size int64 , modTime time . Time ) ( err error )
// copyURLFn copies the data from the url to the function supplied
2021-07-28 16:05:21 +00:00
func copyURLFn ( ctx context . Context , dstFileName string , url string , autoFilename , dstFileNameFromHeader bool , fn copyURLFunc ) ( err error ) {
2020-11-13 15:24:43 +00:00
client := fshttp . NewClient ( ctx )
2019-03-08 20:33:22 +00:00
resp , err := client . Get ( url )
2018-11-02 17:29:57 +00:00
if err != nil {
2019-12-18 17:02:13 +00:00
return err
2018-11-02 17:29:57 +00:00
}
defer fs . CheckClose ( resp . Body , & err )
2019-08-05 18:20:50 +00:00
if resp . StatusCode < 200 || resp . StatusCode >= 300 {
2021-11-04 10:12:57 +00:00
return fmt . Errorf ( "CopyURL failed: %s" , resp . Status )
2019-12-18 17:02:13 +00:00
}
modTime , err := http . ParseTime ( resp . Header . Get ( "Last-Modified" ) )
if err != nil {
modTime = time . Now ( )
2019-08-05 18:20:50 +00:00
}
2021-07-28 16:05:21 +00:00
if autoFilename {
if dstFileNameFromHeader {
_ , params , err := mime . ParseMediaType ( resp . Header . Get ( "Content-Disposition" ) )
headerFilename := path . Base ( strings . Replace ( params [ "filename" ] , "\\" , "/" , - 1 ) )
if err != nil || headerFilename == "" {
2022-08-14 02:56:32 +00:00
return fmt . Errorf ( "CopyURL failed: filename not found in the Content-Disposition header" )
2021-07-28 16:05:21 +00:00
}
fs . Debugf ( headerFilename , "filename found in Content-Disposition header." )
return fn ( ctx , headerFilename , resp . Body , resp . ContentLength , modTime )
}
2019-09-03 16:25:19 +00:00
dstFileName = path . Base ( resp . Request . URL . Path )
if dstFileName == "." || dstFileName == "/" {
2021-11-04 10:12:57 +00:00
return fmt . Errorf ( "CopyURL failed: file name wasn't found in url" )
2019-09-03 16:25:19 +00:00
}
2021-03-18 09:04:59 +00:00
fs . Debugf ( dstFileName , "File name found in url" )
2019-09-03 16:25:19 +00:00
}
2019-12-18 17:02:13 +00:00
return fn ( ctx , dstFileName , resp . Body , resp . ContentLength , modTime )
}
2019-09-03 16:25:19 +00:00
2019-12-18 17:02:13 +00:00
// CopyURL copies the data from the url to (fdst, dstFileName)
2021-07-28 16:05:21 +00:00
func CopyURL ( ctx context . Context , fdst fs . Fs , dstFileName string , url string , autoFilename , dstFileNameFromHeader bool , noClobber bool ) ( dst fs . Object , err error ) {
2020-04-19 11:40:17 +00:00
2021-07-28 16:05:21 +00:00
err = copyURLFn ( ctx , dstFileName , url , autoFilename , dstFileNameFromHeader , func ( ctx context . Context , dstFileName string , in io . ReadCloser , size int64 , modTime time . Time ) ( err error ) {
2020-04-19 11:40:17 +00:00
if noClobber {
_ , err = fdst . NewObject ( ctx , dstFileName )
if err == nil {
return errors . New ( "CopyURL failed: file already exist" )
}
}
2022-11-08 17:42:18 +00:00
dst , err = RcatSize ( ctx , fdst , dstFileName , in , size , modTime , nil )
2019-12-18 17:02:13 +00:00
return err
} )
return dst , err
}
// CopyURLToWriter copies the data from the url to the io.Writer supplied
func CopyURLToWriter ( ctx context . Context , url string , out io . Writer ) ( err error ) {
2021-07-28 16:05:21 +00:00
return copyURLFn ( ctx , "" , url , false , false , func ( ctx context . Context , dstFileName string , in io . ReadCloser , size int64 , modTime time . Time ) ( err error ) {
2019-12-18 17:02:13 +00:00
_ , err = io . Copy ( out , in )
return err
} )
2018-11-02 17:29:57 +00:00
}
2019-06-23 03:50:09 +00:00
// BackupDir returns the correctly configured --backup-dir
2020-11-05 15:18:51 +00:00
func BackupDir ( ctx context . Context , fdst fs . Fs , fsrc fs . Fs , srcFileName string ) ( backupDir fs . Fs , err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
if ci . BackupDir != "" {
backupDir , err = cache . Get ( ctx , ci . BackupDir )
2019-06-23 03:52:09 +00:00
if err != nil {
2022-06-08 20:54:39 +00:00
return nil , fserrors . FatalError ( fmt . Errorf ( "failed to make fs for --backup-dir %q: %w" , ci . BackupDir , err ) )
2019-06-23 03:52:09 +00:00
}
if ! SameConfig ( fdst , backupDir ) {
return nil , fserrors . FatalError ( errors . New ( "parameter to --backup-dir has to be on the same remote as destination" ) )
}
if srcFileName == "" {
2022-07-11 21:26:50 +00:00
if OverlappingFilterCheck ( ctx , backupDir , fdst ) {
2019-06-23 03:52:09 +00:00
return nil , fserrors . FatalError ( errors . New ( "destination and parameter to --backup-dir mustn't overlap" ) )
}
2022-07-11 21:26:50 +00:00
if OverlappingFilterCheck ( ctx , backupDir , fsrc ) {
2019-06-23 03:52:09 +00:00
return nil , fserrors . FatalError ( errors . New ( "source and parameter to --backup-dir mustn't overlap" ) )
}
} else {
2020-11-05 11:33:32 +00:00
if ci . Suffix == "" {
2019-06-23 03:52:09 +00:00
if SameDir ( fdst , backupDir ) {
return nil , fserrors . FatalError ( errors . New ( "destination and parameter to --backup-dir mustn't be the same" ) )
}
if SameDir ( fsrc , backupDir ) {
return nil , fserrors . FatalError ( errors . New ( "source and parameter to --backup-dir mustn't be the same" ) )
}
}
}
2020-11-05 11:33:32 +00:00
} else if ci . Suffix != "" {
2019-06-23 03:52:09 +00:00
// --backup-dir is not set but --suffix is - use the destination as the backupDir
backupDir = fdst
2020-10-04 15:38:29 +00:00
} else {
return nil , fserrors . FatalError ( errors . New ( "internal error: BackupDir called when --backup-dir and --suffix both empty" ) )
2019-06-23 03:50:09 +00:00
}
if ! CanServerSideMove ( backupDir ) {
2020-10-13 21:43:40 +00:00
return nil , fserrors . FatalError ( errors . New ( "can't use --backup-dir on a remote which doesn't support server-side move or copy" ) )
2019-06-23 03:50:09 +00:00
}
return backupDir , nil
}
// MoveBackupDir moves a file to the backup dir
func MoveBackupDir ( ctx context . Context , backupDir fs . Fs , dst fs . Object ) ( err error ) {
2020-11-05 11:33:32 +00:00
remoteWithSuffix := SuffixName ( ctx , dst . Remote ( ) )
2019-06-23 03:50:09 +00:00
overwritten , _ := backupDir . NewObject ( ctx , remoteWithSuffix )
_ , err = Move ( ctx , backupDir , overwritten , remoteWithSuffix , dst )
return err
}
2016-10-23 16:34:17 +00:00
// moveOrCopyFile moves or copies a single file possibly to a new name
2019-06-17 08:34:30 +00:00
func moveOrCopyFile ( ctx context . Context , fdst fs . Fs , fsrc fs . Fs , dstFileName string , srcFileName string , cp bool ) ( err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2023-10-01 09:02:56 +00:00
logger , usingLogger := GetLogger ( ctx )
2017-10-12 19:45:36 +00:00
dstFilePath := path . Join ( fdst . Root ( ) , dstFileName )
srcFilePath := path . Join ( fsrc . Root ( ) , srcFileName )
if fdst . Name ( ) == fsrc . Name ( ) && dstFilePath == srcFilePath {
2018-01-12 16:30:54 +00:00
fs . Debugf ( fdst , "don't need to copy/move %s, it is already at target location" , dstFileName )
2023-10-01 09:02:56 +00:00
if usingLogger {
srcObj , _ := fsrc . NewObject ( ctx , srcFileName )
dstObj , _ := fsrc . NewObject ( ctx , dstFileName )
logger ( ctx , Match , srcObj , dstObj , nil )
}
2017-05-27 15:30:26 +00:00
return nil
}
2016-10-23 16:34:17 +00:00
// Choose operations
2024-01-04 11:28:47 +00:00
Op := MoveTransfer
2016-10-23 16:34:17 +00:00
if cp {
Op = Copy
}
// Find src object
2019-06-17 08:34:30 +00:00
srcObj , err := fsrc . NewObject ( ctx , srcFileName )
2016-10-23 16:34:17 +00:00
if err != nil {
2023-10-01 09:02:56 +00:00
logger ( ctx , TransferError , srcObj , nil , err )
2016-10-23 16:34:17 +00:00
return err
}
// Find dst object if it exists
2019-10-17 16:41:11 +00:00
var dstObj fs . Object
2020-11-05 11:33:32 +00:00
if ! ci . NoCheckDest {
2019-10-17 16:41:11 +00:00
dstObj , err = fdst . NewObject ( ctx , dstFileName )
2023-06-07 08:19:16 +00:00
if errors . Is ( err , fs . ErrorObjectNotFound ) {
2019-10-17 16:41:11 +00:00
dstObj = nil
} else if err != nil {
2023-10-01 09:02:56 +00:00
logger ( ctx , TransferError , nil , dstObj , err )
2019-10-17 16:41:11 +00:00
return err
}
2016-10-23 16:34:17 +00:00
}
2019-06-10 10:01:13 +00:00
// Special case for changing case of a file on a case insensitive remote
// This will move the file to a temporary name then
// move it back to the intended destination. This is required
// to avoid issues with certain remotes and avoid file deletion.
2021-07-07 15:34:16 +00:00
if ! cp && fdst . Name ( ) == fsrc . Name ( ) && fdst . Features ( ) . CaseInsensitive && dstFileName != srcFileName && strings . EqualFold ( dstFilePath , srcFilePath ) {
2023-10-10 11:21:56 +00:00
if SkipDestructive ( ctx , srcFileName , "rename to " + dstFileName ) {
// avoid fatalpanic on --dry-run (trying to access non-existent tmpObj)
return nil
}
2019-06-10 10:01:13 +00:00
// Create random name to temporarily move file to
2019-08-06 11:44:08 +00:00
tmpObjName := dstFileName + "-rclone-move-" + random . String ( 8 )
2023-10-01 09:02:56 +00:00
tmpObjFail , err := fdst . NewObject ( ctx , tmpObjName )
2019-06-10 10:01:13 +00:00
if err != fs . ErrorObjectNotFound {
if err == nil {
2023-10-01 09:02:56 +00:00
logger ( ctx , TransferError , nil , tmpObjFail , err )
2019-06-10 10:01:13 +00:00
return errors . New ( "found an already existing file with a randomly generated name. Try the operation again" )
}
2023-10-01 09:02:56 +00:00
logger ( ctx , TransferError , nil , tmpObjFail , err )
2021-11-04 10:12:57 +00:00
return fmt . Errorf ( "error while attempting to move file to a temporary location: %w" , err )
2019-06-10 10:01:13 +00:00
}
2024-01-18 16:44:13 +00:00
tr := accounting . Stats ( ctx ) . NewTransfer ( srcObj , fdst )
2019-07-16 11:56:20 +00:00
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2019-07-16 11:56:20 +00:00
} ( )
2019-06-17 08:34:30 +00:00
tmpObj , err := Op ( ctx , fdst , nil , tmpObjName , srcObj )
2019-06-10 10:01:13 +00:00
if err != nil {
2023-10-01 09:02:56 +00:00
logger ( ctx , TransferError , srcObj , tmpObj , err )
2021-11-04 10:12:57 +00:00
return fmt . Errorf ( "error while moving file to temporary location: %w" , err )
2019-06-10 10:01:13 +00:00
}
2019-06-17 08:34:30 +00:00
_ , err = Op ( ctx , fdst , nil , dstFileName , tmpObj )
2023-10-01 09:02:56 +00:00
logger ( ctx , MissingOnDst , tmpObj , nil , err )
2019-06-10 10:01:13 +00:00
return err
}
2020-11-16 03:04:29 +00:00
var backupDir fs . Fs
var copyDestDir [ ] fs . Fs
2020-11-05 11:33:32 +00:00
if ci . BackupDir != "" || ci . Suffix != "" {
2020-11-05 15:18:51 +00:00
backupDir , err = BackupDir ( ctx , fdst , fsrc , srcFileName )
2019-07-08 01:02:53 +00:00
if err != nil {
2021-11-04 10:12:57 +00:00
return fmt . Errorf ( "creating Fs for --backup-dir failed: %w" , err )
2019-07-08 01:02:53 +00:00
}
}
2020-11-16 03:04:29 +00:00
if len ( ci . CompareDest ) > 0 {
2020-11-05 15:18:51 +00:00
copyDestDir , err = GetCompareDest ( ctx )
2019-07-08 01:02:53 +00:00
if err != nil {
return err
}
2020-11-16 03:04:29 +00:00
} else if len ( ci . CopyDest ) > 0 {
2020-11-05 15:18:51 +00:00
copyDestDir , err = GetCopyDest ( ctx , fdst )
2019-07-08 01:02:53 +00:00
if err != nil {
return err
}
}
2022-08-01 16:51:46 +00:00
needTransfer := NeedTransfer ( ctx , dstObj , srcObj )
if needTransfer {
NoNeedTransfer , err := CompareOrCopyDest ( ctx , fdst , dstObj , srcObj , copyDestDir , backupDir )
if err != nil {
return err
}
if NoNeedTransfer {
needTransfer = false
}
2019-07-08 01:02:53 +00:00
}
2022-08-01 16:51:46 +00:00
if needTransfer {
2019-05-23 12:17:16 +00:00
// If destination already exists, then we must move it into --backup-dir if required
2019-07-08 01:02:53 +00:00
if dstObj != nil && backupDir != nil {
2019-06-23 03:50:09 +00:00
err = MoveBackupDir ( ctx , backupDir , dstObj )
2019-05-23 12:17:16 +00:00
if err != nil {
2023-10-01 09:02:56 +00:00
logger ( ctx , TransferError , dstObj , nil , err )
2021-11-04 10:12:57 +00:00
return fmt . Errorf ( "moving to --backup-dir failed: %w" , err )
2019-05-23 12:17:16 +00:00
}
// If successful zero out the dstObj as it is no longer there
2023-10-01 09:02:56 +00:00
logger ( ctx , MissingOnDst , dstObj , nil , nil )
2019-05-23 12:17:16 +00:00
dstObj = nil
}
2019-06-17 08:34:30 +00:00
_ , err = Op ( ctx , fdst , dstObj , dstFileName , srcObj )
2017-06-07 12:02:21 +00:00
} else {
if ! cp {
2021-07-29 16:42:55 +00:00
if ci . IgnoreExisting {
fs . Debugf ( srcObj , "Not removing source file as destination file exists and --ignore-existing is set" )
2023-10-01 09:02:56 +00:00
logger ( ctx , Match , srcObj , dstObj , nil )
operations: fix renaming a file on macOS
Before this change, a file would sometimes be silently deleted instead of
renamed on macOS, due to its unique handling of unicode normalization. Rclone
already had a SameObject check in place for case insensitivity before deleting
the source (for example if "hello.txt" was renamed to "HELLO.txt"), but had no
such check for unicode normalization. After this change, the delete is skipped
on macOS if the src and dst filenames normalize to the same NFC string.
Example of the previous behavior:
~ % rclone touch /Users/nielash/rename_test/ö
~ % rclone lsl /Users/nielash/rename_test/ö
0 2023-11-21 17:28:06.170486000 ö
~ % rclone moveto /Users/nielash/rename_test/ö /Users/nielash/rename_test/ö -vv
2023/11/21 17:28:51 DEBUG : rclone: Version "v1.64.0" starting with parameters ["rclone" "moveto" "/Users/nielash/rename_test/ö" "/Users/nielash/rename_test/ö" "-vv"]
2023/11/21 17:28:51 DEBUG : Creating backend with remote "/Users/nielash/rename_test/ö"
2023/11/21 17:28:51 DEBUG : Using config file from "/Users/nielash/.config/rclone/rclone.conf"
2023/11/21 17:28:51 DEBUG : fs cache: adding new entry for parent of "/Users/nielash/rename_test/ö", "/Users/nielash/rename_test"
2023/11/21 17:28:51 DEBUG : Creating backend with remote "/Users/nielash/rename_test/"
2023/11/21 17:28:51 DEBUG : fs cache: renaming cache item "/Users/nielash/rename_test/" to be canonical "/Users/nielash/rename_test"
2023/11/21 17:28:51 DEBUG : ö: Size and modification time the same (differ by 0s, within tolerance 1ns)
2023/11/21 17:28:51 DEBUG : ö: Unchanged skipping
2023/11/21 17:28:51 INFO : ö: Deleted
2023/11/21 17:28:51 INFO :
Transferred: 0 B / 0 B, -, 0 B/s, ETA -
Checks: 1 / 1, 100%
Deleted: 1 (files), 0 (dirs)
Elapsed time: 0.0s
2023/11/21 17:28:51 DEBUG : 5 go routines active
~ % rclone lsl /Users/nielash/rename_test/
~ %
2023-11-20 16:04:54 +00:00
} else if ! SameObject ( srcObj , dstObj ) {
2021-07-29 16:42:55 +00:00
err = DeleteFile ( ctx , srcObj )
2023-10-01 09:02:56 +00:00
logger ( ctx , Differ , srcObj , dstObj , nil )
2021-07-29 16:42:55 +00:00
}
2017-06-07 12:02:21 +00:00
}
2016-10-23 16:34:17 +00:00
}
2017-06-07 12:02:21 +00:00
return err
2016-10-23 16:34:17 +00:00
}
// MoveFile moves a single file possibly to a new name
2024-01-04 11:28:47 +00:00
//
// This is treated as a transfer.
2019-06-17 08:34:30 +00:00
func MoveFile ( ctx context . Context , fdst fs . Fs , fsrc fs . Fs , dstFileName string , srcFileName string ) ( err error ) {
return moveOrCopyFile ( ctx , fdst , fsrc , dstFileName , srcFileName , false )
2016-10-23 16:34:17 +00:00
}
2018-09-11 01:59:48 +00:00
// SetTier changes tier of object in remote
2019-06-17 08:34:30 +00:00
func SetTier ( ctx context . Context , fsrc fs . Fs , tier string ) error {
return ListFn ( ctx , fsrc , func ( o fs . Object ) {
2018-09-11 01:59:48 +00:00
objImpl , ok := o . ( fs . SetTierer )
if ! ok {
fs . Errorf ( fsrc , "Remote object does not implement SetTier" )
return
}
err := objImpl . SetTier ( tier )
if err != nil {
fs . Errorf ( fsrc , "Failed to do SetTier, %v" , err )
}
} )
}
2023-09-05 17:57:23 +00:00
// SetTierFile changes tier of a single file in remote
func SetTierFile ( ctx context . Context , o fs . Object , tier string ) error {
do , ok := o . ( fs . SetTierer )
if ! ok {
return errors . New ( "remote object does not implement SetTier" )
}
err := do . SetTier ( tier )
if err != nil {
fs . Errorf ( o , "Failed to do SetTier, %v" , err )
return err
}
return nil
}
2022-01-27 18:46:53 +00:00
// TouchDir touches every file in directory with time t
func TouchDir ( ctx context . Context , f fs . Fs , remote string , t time . Time , recursive bool ) error {
return walk . ListR ( ctx , f , remote , false , ConfigMaxDepth ( ctx , recursive ) , walk . ListObjects , func ( entries fs . DirEntries ) error {
2021-05-22 19:06:24 +00:00
entries . ForObject ( func ( o fs . Object ) {
if ! SkipDestructive ( ctx , o , "touch" ) {
fs . Debugf ( f , "Touching %q" , o . Remote ( ) )
err := o . SetModTime ( ctx , t )
if err != nil {
2021-11-04 10:12:57 +00:00
err = fmt . Errorf ( "failed to touch: %w" , err )
2021-05-22 19:06:24 +00:00
err = fs . CountError ( err )
2021-10-21 15:32:28 +00:00
fs . Errorf ( o , "%v" , err )
2021-05-22 19:06:24 +00:00
}
}
} )
return nil
} )
}
2018-01-06 14:39:31 +00:00
// ListFormat defines files information print format
type ListFormat struct {
separator string
dirSlash bool
2018-06-03 09:42:34 +00:00
absolute bool
2019-02-14 08:45:03 +00:00
output [ ] func ( entry * ListJSONItem ) string
2018-05-13 11:15:05 +00:00
csv * csv . Writer
buf bytes . Buffer
2018-01-06 14:39:31 +00:00
}
// SetSeparator changes separator in struct
func ( l * ListFormat ) SetSeparator ( separator string ) {
l . separator = separator
}
// SetDirSlash defines if slash should be printed
func ( l * ListFormat ) SetDirSlash ( dirSlash bool ) {
l . dirSlash = dirSlash
}
2018-06-03 09:42:34 +00:00
// SetAbsolute prints a leading slash in front of path names
func ( l * ListFormat ) SetAbsolute ( absolute bool ) {
l . absolute = absolute
}
2018-05-13 11:15:05 +00:00
// SetCSV defines if the output should be csv
//
// Note that you should call SetSeparator before this if you want a
// custom separator
func ( l * ListFormat ) SetCSV ( useCSV bool ) {
if useCSV {
l . csv = csv . NewWriter ( & l . buf )
if l . separator != "" {
l . csv . Comma = [ ] rune ( l . separator ) [ 0 ]
}
} else {
l . csv = nil
}
}
2018-01-06 14:39:31 +00:00
// SetOutput sets functions used to create files information
2019-02-14 08:45:03 +00:00
func ( l * ListFormat ) SetOutput ( output [ ] func ( entry * ListJSONItem ) string ) {
2018-01-06 14:39:31 +00:00
l . output = output
}
// AddModTime adds file's Mod Time to output
2023-12-08 00:29:55 +00:00
func ( l * ListFormat ) AddModTime ( timeFormat string ) {
switch timeFormat {
case "" :
timeFormat = "2006-01-02 15:04:05"
case "Layout" :
timeFormat = time . Layout
case "ANSIC" :
timeFormat = time . ANSIC
case "UnixDate" :
timeFormat = time . UnixDate
case "RubyDate" :
timeFormat = time . RubyDate
case "RFC822" :
timeFormat = time . RFC822
case "RFC822Z" :
timeFormat = time . RFC822Z
case "RFC850" :
timeFormat = time . RFC850
case "RFC1123" :
timeFormat = time . RFC1123
case "RFC1123Z" :
timeFormat = time . RFC1123Z
case "RFC3339" :
timeFormat = time . RFC3339
case "RFC3339Nano" :
timeFormat = time . RFC3339Nano
case "Kitchen" :
timeFormat = time . Kitchen
case "Stamp" :
timeFormat = time . Stamp
case "StampMilli" :
timeFormat = time . StampMilli
case "StampMicro" :
timeFormat = time . StampMicro
case "StampNano" :
timeFormat = time . StampNano
case "DateTime" :
// timeFormat = time.DateTime // missing in go1.19
timeFormat = "2006-01-02 15:04:05"
case "DateOnly" :
// timeFormat = time.DateOnly // missing in go1.19
timeFormat = "2006-01-02"
case "TimeOnly" :
// timeFormat = time.TimeOnly // missing in go1.19
timeFormat = "15:04:05"
}
2019-02-14 08:45:03 +00:00
l . AppendOutput ( func ( entry * ListJSONItem ) string {
2023-12-08 00:29:55 +00:00
return entry . ModTime . When . Local ( ) . Format ( timeFormat )
2019-02-14 08:45:03 +00:00
} )
2018-01-06 14:39:31 +00:00
}
// AddSize adds file's size to output
func ( l * ListFormat ) AddSize ( ) {
2019-02-14 08:45:03 +00:00
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return strconv . FormatInt ( entry . Size , 10 )
2018-01-06 17:53:37 +00:00
} )
2018-01-06 14:39:31 +00:00
}
2019-02-14 08:45:03 +00:00
// normalisePath makes sure the path has the correct slashes for the current mode
func ( l * ListFormat ) normalisePath ( entry * ListJSONItem , remote string ) string {
if l . absolute && ! strings . HasPrefix ( remote , "/" ) {
remote = "/" + remote
}
if entry . IsDir && l . dirSlash {
remote += "/"
}
return remote
}
2018-01-06 14:39:31 +00:00
// AddPath adds path to file to output
func ( l * ListFormat ) AddPath ( ) {
2019-02-14 08:45:03 +00:00
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return l . normalisePath ( entry , entry . Path )
} )
}
// AddEncrypted adds the encrypted path to file to output
func ( l * ListFormat ) AddEncrypted ( ) {
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return l . normalisePath ( entry , entry . Encrypted )
2018-01-06 14:39:31 +00:00
} )
}
2018-01-06 17:53:37 +00:00
// AddHash adds the hash of the type given to the output
2018-01-12 16:30:54 +00:00
func ( l * ListFormat ) AddHash ( ht hash . Type ) {
2019-02-14 08:45:03 +00:00
hashName := ht . String ( )
l . AppendOutput ( func ( entry * ListJSONItem ) string {
if entry . IsDir {
2018-01-06 17:53:37 +00:00
return ""
}
2019-02-14 08:45:03 +00:00
return entry . Hashes [ hashName ]
2018-01-06 17:53:37 +00:00
} )
}
2018-05-13 08:18:08 +00:00
// AddID adds file's ID to the output if known
func ( l * ListFormat ) AddID ( ) {
2019-02-14 08:45:03 +00:00
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return entry . ID
} )
}
// AddOrigID adds file's Original ID to the output if known
func ( l * ListFormat ) AddOrigID ( ) {
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return entry . OrigID
2018-05-13 08:18:08 +00:00
} )
}
2019-03-20 12:45:06 +00:00
// AddTier adds file's Tier to the output if known
func ( l * ListFormat ) AddTier ( ) {
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return entry . Tier
} )
}
2018-05-13 09:37:25 +00:00
// AddMimeType adds file's MimeType to the output if known
func ( l * ListFormat ) AddMimeType ( ) {
2019-02-14 08:45:03 +00:00
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return entry . MimeType
2018-05-13 09:37:25 +00:00
} )
}
2022-05-24 10:31:48 +00:00
// AddMetadata adds file's Metadata to the output if known
func ( l * ListFormat ) AddMetadata ( ) {
l . AppendOutput ( func ( entry * ListJSONItem ) string {
metadata := entry . Metadata
if metadata == nil {
metadata = make ( fs . Metadata )
}
out , err := json . Marshal ( metadata )
if err != nil {
return fmt . Sprintf ( "Failed to read metadata: %v" , err . Error ( ) )
}
return string ( out )
} )
}
2018-01-06 14:39:31 +00:00
// AppendOutput adds string generated by specific function to printed output
2019-02-14 08:45:03 +00:00
func ( l * ListFormat ) AppendOutput ( functionToAppend func ( item * ListJSONItem ) string ) {
2018-01-06 14:39:31 +00:00
l . output = append ( l . output , functionToAppend )
}
2018-05-13 09:55:18 +00:00
// Format prints information about the DirEntry in the format defined
2019-02-14 08:45:03 +00:00
func ( l * ListFormat ) Format ( entry * ListJSONItem ) ( result string ) {
2018-05-13 11:15:05 +00:00
var out [ ] string
2018-05-13 09:55:18 +00:00
for _ , fun := range l . output {
2019-02-14 08:45:03 +00:00
out = append ( out , fun ( entry ) )
2018-05-13 11:15:05 +00:00
}
if l . csv != nil {
l . buf . Reset ( )
_ = l . csv . Write ( out ) // can't fail writing to bytes.Buffer
l . csv . Flush ( )
result = strings . TrimRight ( l . buf . String ( ) , "\n" )
} else {
result = strings . Join ( out , l . separator )
2018-01-06 14:39:31 +00:00
}
2018-05-13 11:15:05 +00:00
return result
2018-01-06 14:39:31 +00:00
}
2019-01-15 16:43:55 +00:00
2023-12-08 00:29:55 +00:00
// FormatForLSFPrecision Returns a time format for the given precision
func FormatForLSFPrecision ( precision time . Duration ) string {
switch {
case precision <= time . Nanosecond :
return "2006-01-02 15:04:05.000000000"
case precision <= 10 * time . Nanosecond :
return "2006-01-02 15:04:05.00000000"
case precision <= 100 * time . Nanosecond :
return "2006-01-02 15:04:05.0000000"
case precision <= time . Microsecond :
return "2006-01-02 15:04:05.000000"
case precision <= 10 * time . Microsecond :
return "2006-01-02 15:04:05.00000"
case precision <= 100 * time . Microsecond :
return "2006-01-02 15:04:05.0000"
case precision <= time . Millisecond :
return "2006-01-02 15:04:05.000"
case precision <= 10 * time . Millisecond :
return "2006-01-02 15:04:05.00"
case precision <= 100 * time . Millisecond :
return "2006-01-02 15:04:05.0"
}
return "2006-01-02 15:04:05"
}
2019-01-15 16:43:55 +00:00
// DirMove renames srcRemote to dstRemote
//
// It does this by loading the directory tree into memory (using ListR
// if available) and doing renames in parallel.
2019-06-17 08:34:30 +00:00
func DirMove ( ctx context . Context , f fs . Fs , srcRemote , dstRemote string ) ( err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2020-11-05 07:15:42 +00:00
if SkipDestructive ( ctx , srcRemote , "dirMove" ) {
accounting . Stats ( ctx ) . Renames ( 1 )
return nil
}
2019-01-15 16:43:55 +00:00
// Use DirMove if possible
if doDirMove := f . Features ( ) . DirMove ; doDirMove != nil {
2020-03-30 17:12:32 +00:00
err = doDirMove ( ctx , f , srcRemote , dstRemote )
if err == nil {
accounting . Stats ( ctx ) . Renames ( 1 )
}
return err
2019-01-15 16:43:55 +00:00
}
// Load the directory tree into memory
2019-06-17 08:34:30 +00:00
tree , err := walk . NewDirTree ( ctx , f , srcRemote , true , - 1 )
2019-01-15 16:43:55 +00:00
if err != nil {
2021-11-04 10:12:57 +00:00
return fmt . Errorf ( "RenameDir tree walk: %w" , err )
2019-01-15 16:43:55 +00:00
}
// Get the directories in sorted order
dirs := tree . Dirs ( )
// Make the destination directories - must be done in order not in parallel
for _ , dir := range dirs {
dstPath := dstRemote + dir [ len ( srcRemote ) : ]
2019-06-17 08:34:30 +00:00
err := f . Mkdir ( ctx , dstPath )
2019-01-15 16:43:55 +00:00
if err != nil {
2021-11-04 10:12:57 +00:00
return fmt . Errorf ( "RenameDir mkdir: %w" , err )
2019-01-15 16:43:55 +00:00
}
}
// Rename the files in parallel
type rename struct {
o fs . Object
newPath string
}
2023-02-07 10:56:03 +00:00
renames := make ( chan rename , ci . Checkers )
2019-07-01 08:33:21 +00:00
g , gCtx := errgroup . WithContext ( context . Background ( ) )
2023-02-07 10:56:03 +00:00
for i := 0 ; i < ci . Checkers ; i ++ {
2019-01-15 16:43:55 +00:00
g . Go ( func ( ) error {
for job := range renames {
2019-07-01 08:33:21 +00:00
dstOverwritten , _ := f . NewObject ( gCtx , job . newPath )
_ , err := Move ( gCtx , f , dstOverwritten , job . newPath , job . o )
2019-01-15 16:43:55 +00:00
if err != nil {
return err
}
select {
2019-07-01 08:33:21 +00:00
case <- gCtx . Done ( ) :
return gCtx . Err ( )
2019-01-15 16:43:55 +00:00
default :
}
}
return nil
} )
}
for dir , entries := range tree {
dstPath := dstRemote + dir [ len ( srcRemote ) : ]
for _ , entry := range entries {
if o , ok := entry . ( fs . Object ) ; ok {
renames <- rename { o , path . Join ( dstPath , path . Base ( o . Remote ( ) ) ) }
}
}
}
close ( renames )
err = g . Wait ( )
if err != nil {
2021-11-04 10:12:57 +00:00
return fmt . Errorf ( "RenameDir renames: %w" , err )
2019-01-15 16:43:55 +00:00
}
// Remove the source directories in reverse order
for i := len ( dirs ) - 1 ; i >= 0 ; i -- {
2019-06-17 08:34:30 +00:00
err := f . Rmdir ( ctx , dirs [ i ] )
2019-01-15 16:43:55 +00:00
if err != nil {
2021-11-04 10:12:57 +00:00
return fmt . Errorf ( "RenameDir rmdir: %w" , err )
2019-01-15 16:43:55 +00:00
}
}
return nil
}
2019-06-08 08:19:07 +00:00
// FsInfo provides information about a remote
type FsInfo struct {
// Name of the remote (as passed into NewFs)
Name string
// Root of the remote (as passed into NewFs)
Root string
// String returns a description of the FS
String string
// Precision of the ModTimes in this Fs in Nanoseconds
Precision time . Duration
// Returns the supported hash types of the filesystem
Hashes [ ] string
// Features returns the optional features of this Fs
Features map [ string ] bool
2022-06-22 14:56:41 +00:00
// MetadataInfo returns info about the metadata for this backend
MetadataInfo * fs . MetadataInfo
2019-06-08 08:19:07 +00:00
}
// GetFsInfo gets the information (FsInfo) about a given Fs
func GetFsInfo ( f fs . Fs ) * FsInfo {
2022-06-22 14:56:41 +00:00
features := f . Features ( )
2019-06-08 08:19:07 +00:00
info := & FsInfo {
2022-06-22 14:56:41 +00:00
Name : f . Name ( ) ,
Root : f . Root ( ) ,
String : f . String ( ) ,
Precision : f . Precision ( ) ,
Hashes : make ( [ ] string , 0 , 4 ) ,
Features : features . Enabled ( ) ,
MetadataInfo : nil ,
2019-06-08 08:19:07 +00:00
}
for _ , hashType := range f . Hashes ( ) . Array ( ) {
info . Hashes = append ( info . Hashes , hashType . String ( ) )
}
2022-06-22 14:56:41 +00:00
fsInfo , _ , _ , _ , err := fs . ParseRemote ( fs . ConfigString ( f ) )
if err == nil && fsInfo != nil && fsInfo . MetadataInfo != nil {
info . MetadataInfo = fsInfo . MetadataInfo
}
2019-06-08 08:19:07 +00:00
return info
}
2020-03-20 18:43:29 +00:00
2020-06-05 15:13:10 +00:00
var (
2023-03-03 14:17:02 +00:00
interactiveMu sync . Mutex // protects the following variables
2020-06-05 15:13:10 +00:00
skipped = map [ string ] bool { }
)
2020-03-20 18:43:29 +00:00
2020-06-05 15:13:10 +00:00
// skipDestructiveChoose asks the user which action to take
//
// Call with interactiveMu held
func skipDestructiveChoose ( ctx context . Context , subject interface { } , action string ) ( skip bool ) {
2023-03-03 14:17:02 +00:00
// Lock the StdoutMutex - must not call fs.Log anything
// otherwise it will deadlock with --interactive --progress
StdoutMutex . Lock ( )
fmt . Printf ( "\nrclone: %s \"%v\"?\n" , action , subject )
i := config . CommandDefault ( [ ] string {
2020-06-05 15:13:10 +00:00
"yYes, this is OK" ,
"nNo, skip this" ,
fmt . Sprintf ( "sSkip all %s operations with no more questions" , action ) ,
fmt . Sprintf ( "!Do all %s operations with no more questions" , action ) ,
"qExit rclone now." ,
2023-03-03 14:17:02 +00:00
} , 0 )
StdoutMutex . Unlock ( )
switch i {
2020-06-05 15:13:10 +00:00
case 'y' :
skip = false
case 'n' :
skip = true
case 's' :
skip = true
skipped [ action ] = true
fs . Logf ( nil , "Skipping all %s operations from now on without asking" , action )
case '!' :
skip = false
skipped [ action ] = false
fs . Logf ( nil , "Doing all %s operations from now on without asking" , action )
case 'q' :
fs . Logf ( nil , "Quitting rclone now" )
atexit . Run ( )
os . Exit ( 0 )
default :
skip = true
fs . Errorf ( nil , "Bad choice %c" , i )
}
return skip
}
// SkipDestructive should be called whenever rclone is about to do an destructive operation.
//
// It will check the --dry-run flag and it will ask the user if the --interactive flag is set.
//
// subject should be the object or directory in use
//
// action should be a descriptive word or short phrase
//
// Together they should make sense in this sentence: "Rclone is about
// to action subject".
func SkipDestructive ( ctx context . Context , subject interface { } , action string ) ( skip bool ) {
var flag string
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2020-03-20 18:43:29 +00:00
switch {
2020-11-05 11:33:32 +00:00
case ci . DryRun :
2020-03-20 18:43:29 +00:00
flag = "--dry-run"
skip = true
2020-11-05 11:33:32 +00:00
case ci . Interactive :
2020-03-20 18:43:29 +00:00
flag = "--interactive"
2020-06-05 15:13:10 +00:00
interactiveMu . Lock ( )
defer interactiveMu . Unlock ( )
var found bool
skip , found = skipped [ action ]
if ! found {
skip = skipDestructiveChoose ( ctx , subject , action )
}
2020-03-20 18:43:29 +00:00
default :
2020-06-05 15:13:10 +00:00
return false
2020-03-20 18:43:29 +00:00
}
if skip {
2020-12-30 13:07:47 +00:00
size := int64 ( - 1 )
if do , ok := subject . ( interface { Size ( ) int64 } ) ; ok {
size = do . Size ( )
}
if size >= 0 {
fs . Logf ( subject , "Skipped %s as %s is set (size %v)" , fs . LogValue ( "skipped" , action ) , flag , fs . LogValue ( "size" , fs . SizeSuffix ( size ) ) )
} else {
fs . Logf ( subject , "Skipped %s as %s is set" , fs . LogValue ( "skipped" , action ) , flag )
}
2020-03-20 18:43:29 +00:00
}
return skip
}