2018-01-12 16:30:54 +00:00
// Package operations does generic operations on filesystems and objects
package operations
2014-03-28 17:56:04 +00:00
import (
2017-02-13 10:48:26 +00:00
"bytes"
2018-04-06 18:13:27 +00:00
"context"
2019-10-26 19:27:33 +00:00
"encoding/base64"
2018-05-13 11:15:05 +00:00
"encoding/csv"
2019-10-26 19:27:33 +00:00
"encoding/hex"
2014-03-28 17:56:04 +00:00
"fmt"
2014-08-01 16:58:39 +00:00
"io"
2017-08-03 19:42:35 +00:00
"io/ioutil"
2019-12-18 17:02:13 +00:00
"net/http"
2020-06-05 15:13:10 +00:00
"os"
2015-03-01 12:38:31 +00:00
"path"
2019-02-28 11:39:32 +00:00
"path/filepath"
2016-03-05 16:10:51 +00:00
"sort"
2018-01-06 14:39:31 +00:00
"strconv"
2016-01-23 20:16:47 +00:00
"strings"
2014-03-28 17:56:04 +00:00
"sync"
2015-10-02 18:48:48 +00:00
"sync/atomic"
2017-08-03 19:42:35 +00:00
"time"
2016-01-23 20:16:47 +00:00
2016-06-12 14:06:02 +00:00
"github.com/pkg/errors"
2019-07-28 17:47:38 +00:00
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
2020-03-20 18:43:29 +00:00
"github.com/rclone/rclone/fs/config"
2021-02-09 10:12:23 +00:00
"github.com/rclone/rclone/fs/filter"
2019-07-28 17:47:38 +00:00
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/walk"
2020-06-05 15:13:10 +00:00
"github.com/rclone/rclone/lib/atexit"
2021-02-10 17:59:10 +00:00
"github.com/rclone/rclone/lib/pacer"
2019-08-06 11:44:08 +00:00
"github.com/rclone/rclone/lib/random"
2019-07-28 17:47:38 +00:00
"github.com/rclone/rclone/lib/readers"
2019-01-15 16:43:55 +00:00
"golang.org/x/sync/errgroup"
2014-03-28 17:56:04 +00:00
)
2016-01-11 12:39:33 +00:00
// CheckHashes checks the two files to see if they have common
// known hash types and compares them
2014-03-28 17:56:04 +00:00
//
2016-01-24 18:06:57 +00:00
// Returns
2015-08-20 19:48:58 +00:00
//
2016-01-24 18:06:57 +00:00
// equal - which is equality of the hashes
//
// hash - the HashType. This is HashNone if either of the hashes were
// unset or a compatible hash couldn't be found.
//
// err - may return an error which will already have been logged
2014-03-28 17:56:04 +00:00
//
2015-08-20 19:48:58 +00:00
// If an error is returned it will return equal as false
2019-06-17 08:34:30 +00:00
func CheckHashes ( ctx context . Context , src fs . ObjectInfo , dst fs . Object ) ( equal bool , ht hash . Type , err error ) {
2016-01-11 12:39:33 +00:00
common := src . Fs ( ) . Hashes ( ) . Overlap ( dst . Fs ( ) . Hashes ( ) )
2018-01-12 16:30:54 +00:00
// fs.Debugf(nil, "Shared hashes: %v", common)
2016-01-11 12:39:33 +00:00
if common . Count ( ) == 0 {
2018-01-18 20:27:52 +00:00
return true , hash . None , nil
2016-01-11 12:39:33 +00:00
}
2019-08-10 09:28:26 +00:00
equal , ht , _ , _ , err = checkHashes ( ctx , src , dst , common . GetOne ( ) )
return equal , ht , err
}
// checkHashes does the work of CheckHashes but takes a hash.Type and
// returns the effective hash type used.
func checkHashes ( ctx context . Context , src fs . ObjectInfo , dst fs . Object , ht hash . Type ) ( equal bool , htOut hash . Type , srcHash , dstHash string , err error ) {
// Calculate hashes in parallel
g , ctx := errgroup . WithContext ( ctx )
g . Go ( func ( ) ( err error ) {
srcHash , err = src . Hash ( ctx , ht )
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2019-08-10 09:28:26 +00:00
fs . Errorf ( src , "Failed to calculate src hash: %v" , err )
}
return err
} )
g . Go ( func ( ) ( err error ) {
dstHash , err = dst . Hash ( ctx , ht )
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2019-08-10 09:28:26 +00:00
fs . Errorf ( dst , "Failed to calculate dst hash: %v" , err )
}
return err
} )
err = g . Wait ( )
2014-03-28 17:56:04 +00:00
if err != nil {
2019-08-10 09:28:26 +00:00
return false , ht , srcHash , dstHash , err
2015-08-20 19:48:58 +00:00
}
2016-01-11 12:39:33 +00:00
if srcHash == "" {
2019-08-10 09:28:26 +00:00
return true , hash . None , srcHash , dstHash , nil
2015-08-20 19:48:58 +00:00
}
2016-01-11 12:39:33 +00:00
if dstHash == "" {
2019-08-10 09:28:26 +00:00
return true , hash . None , srcHash , dstHash , nil
2014-03-28 17:56:04 +00:00
}
2017-02-23 11:23:19 +00:00
if srcHash != dstHash {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "%v = %s (%v)" , ht , srcHash , src . Fs ( ) )
fs . Debugf ( dst , "%v = %s (%v)" , ht , dstHash , dst . Fs ( ) )
2019-08-13 15:43:24 +00:00
} else {
fs . Debugf ( src , "%v = %s OK" , ht , srcHash )
2017-02-23 11:23:19 +00:00
}
2019-08-10 09:28:26 +00:00
return srcHash == dstHash , ht , srcHash , dstHash , nil
2014-03-28 17:56:04 +00:00
}
2015-09-22 17:47:16 +00:00
// Equal checks to see if the src and dst objects are equal by looking at
2016-01-11 12:39:33 +00:00
// size, mtime and hash
2014-03-28 17:56:04 +00:00
//
// If the src and dst size are different then it is considered to be
2015-06-06 07:38:45 +00:00
// not equal. If --size-only is in effect then this is the only check
2016-06-17 16:20:08 +00:00
// that is done. If --ignore-size is in effect then this check is
// skipped and the files are considered the same size.
2014-03-28 17:56:04 +00:00
//
// If the size is the same and the mtime is the same then it is
2015-06-06 07:38:45 +00:00
// considered to be equal. This check is skipped if using --checksum.
2014-03-28 17:56:04 +00:00
//
2015-06-06 07:38:45 +00:00
// If the size is the same and mtime is different, unreadable or
2016-01-11 12:39:33 +00:00
// --checksum is set and the hash is the same then the file is
2015-06-06 07:38:45 +00:00
// considered to be equal. In this case the mtime on the dst is
// updated if --checksum is not set.
2014-03-28 17:56:04 +00:00
//
// Otherwise the file is considered to be not equal including if there
// were errors reading info.
2019-06-17 08:34:30 +00:00
func Equal ( ctx context . Context , src fs . ObjectInfo , dst fs . Object ) bool {
2020-11-05 11:33:32 +00:00
return equal ( ctx , src , dst , defaultEqualOpt ( ctx ) )
2016-12-18 10:03:56 +00:00
}
2018-01-31 16:15:30 +00:00
// sizeDiffers compare the size of src and dst taking into account the
// various ways of ignoring sizes
2020-11-05 11:33:32 +00:00
func sizeDiffers ( ctx context . Context , src , dst fs . ObjectInfo ) bool {
ci := fs . GetConfig ( ctx )
if ci . IgnoreSize || src . Size ( ) < 0 || dst . Size ( ) < 0 {
2018-01-31 16:15:30 +00:00
return false
}
return src . Size ( ) != dst . Size ( )
}
2019-01-10 11:07:10 +00:00
var checksumWarning sync . Once
2019-06-08 13:08:23 +00:00
// options for equal function()
type equalOpt struct {
sizeOnly bool // if set only check size
checkSum bool // if set check checksum+size instead of modtime+size
updateModTime bool // if set update the modtime if hashes identical and checking with modtime+size
forceModTimeMatch bool // if set assume modtimes match
}
// default set of options for equal()
2020-11-05 11:33:32 +00:00
func defaultEqualOpt ( ctx context . Context ) equalOpt {
ci := fs . GetConfig ( ctx )
2019-06-08 13:08:23 +00:00
return equalOpt {
2020-11-05 11:33:32 +00:00
sizeOnly : ci . SizeOnly ,
checkSum : ci . CheckSum ,
updateModTime : ! ci . NoUpdateModTime ,
2019-06-08 13:08:23 +00:00
forceModTimeMatch : false ,
}
}
2020-09-29 16:03:25 +00:00
var modTimeUploadOnce sync . Once
// emit a log if we are about to upload a file to set its modification time
func logModTimeUpload ( dst fs . Object ) {
modTimeUploadOnce . Do ( func ( ) {
fs . Logf ( dst . Fs ( ) , "Forced to upload files to set modification times on this backend." )
} )
}
2019-06-08 13:08:23 +00:00
func equal ( ctx context . Context , src fs . ObjectInfo , dst fs . Object , opt equalOpt ) bool {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
if sizeDiffers ( ctx , src , dst ) {
2018-01-31 16:15:30 +00:00
fs . Debugf ( src , "Sizes differ (src %d vs dst %d)" , src . Size ( ) , dst . Size ( ) )
return false
2014-03-28 17:56:04 +00:00
}
2019-06-08 13:08:23 +00:00
if opt . sizeOnly {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Sizes identical" )
2015-06-06 07:38:45 +00:00
return true
}
2014-03-28 17:56:04 +00:00
2016-11-28 17:08:15 +00:00
// Assert: Size is equal or being ignored
// If checking checksum and not modtime
2019-06-08 13:08:23 +00:00
if opt . checkSum {
2016-11-28 17:08:15 +00:00
// Check the hash
2019-06-17 08:34:30 +00:00
same , ht , _ := CheckHashes ( ctx , src , dst )
2016-11-28 17:08:15 +00:00
if ! same {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "%v differ" , ht )
2016-11-28 17:08:15 +00:00
return false
2015-08-20 19:48:58 +00:00
}
2018-01-18 20:27:52 +00:00
if ht == hash . None {
2020-10-05 09:23:23 +00:00
common := src . Fs ( ) . Hashes ( ) . Overlap ( dst . Fs ( ) . Hashes ( ) )
if common . Count ( ) == 0 {
checksumWarning . Do ( func ( ) {
fs . Logf ( dst . Fs ( ) , "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only" )
} )
}
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Size of src and dst objects identical" )
2015-06-03 14:08:27 +00:00
} else {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Size and %v of src and dst objects identical" , ht )
2015-06-03 14:08:27 +00:00
}
2016-11-28 17:08:15 +00:00
return true
}
2019-06-17 08:34:30 +00:00
srcModTime := src . ModTime ( ctx )
2019-06-08 13:08:23 +00:00
if ! opt . forceModTimeMatch {
// Sizes the same so check the mtime
2020-11-05 16:27:01 +00:00
modifyWindow := fs . GetModifyWindow ( ctx , src . Fs ( ) , dst . Fs ( ) )
2019-06-08 13:08:23 +00:00
if modifyWindow == fs . ModTimeNotSupported {
fs . Debugf ( src , "Sizes identical" )
return true
}
dstModTime := dst . ModTime ( ctx )
dt := dstModTime . Sub ( srcModTime )
if dt < modifyWindow && dt > - modifyWindow {
fs . Debugf ( src , "Size and modification time the same (differ by %s, within tolerance %s)" , dt , modifyWindow )
return true
}
2014-03-28 17:56:04 +00:00
2019-06-08 13:08:23 +00:00
fs . Debugf ( src , "Modification times differ by %s: %v, %v" , dt , srcModTime , dstModTime )
}
2016-11-28 17:08:15 +00:00
// Check if the hashes are the same
2019-06-17 08:34:30 +00:00
same , ht , _ := CheckHashes ( ctx , src , dst )
2014-03-28 17:56:04 +00:00
if ! same {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "%v differ" , ht )
2016-11-28 17:08:15 +00:00
return false
}
2020-11-05 11:33:32 +00:00
if ht == hash . None && ! ci . RefreshTimes {
2016-11-28 17:08:15 +00:00
// if couldn't check hash, return that they differ
2014-03-28 17:56:04 +00:00
return false
}
2016-11-28 17:08:15 +00:00
// mod time differs but hash is the same to reset mod time if required
2019-06-08 13:08:23 +00:00
if opt . updateModTime {
2020-06-05 15:13:10 +00:00
if ! SkipDestructive ( ctx , src , "update modification time" ) {
2017-09-02 08:29:01 +00:00
// Size and hash the same but mtime different
// Error if objects are treated as immutable
2020-11-05 11:33:32 +00:00
if ci . Immutable {
2020-12-31 18:11:12 +00:00
fs . Errorf ( dst , "Timestamp mismatch between immutable objects" )
2017-09-02 08:29:01 +00:00
return false
}
// Update the mtime of the dst object here
2019-06-17 08:34:30 +00:00
err := dst . SetModTime ( ctx , srcModTime )
2018-01-12 16:30:54 +00:00
if err == fs . ErrorCantSetModTime {
2020-09-29 16:03:25 +00:00
logModTimeUpload ( dst )
fs . Infof ( dst , "src and dst identical but can't set mod time without re-uploading" )
2017-06-13 12:58:39 +00:00
return false
2018-01-12 16:30:54 +00:00
} else if err == fs . ErrorCantSetModTimeWithoutDelete {
2020-09-29 16:03:25 +00:00
logModTimeUpload ( dst )
fs . Infof ( dst , "src and dst identical but can't set mod time without deleting and re-uploading" )
2018-03-13 16:05:06 +00:00
// Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file
// put in the BackupDir than deleted which is what will happen if we don't delete it.
2020-11-05 11:33:32 +00:00
if ci . BackupDir == "" {
2019-06-17 08:34:30 +00:00
err = dst . Remove ( ctx )
2018-03-13 16:05:06 +00:00
if err != nil {
fs . Errorf ( dst , "failed to delete before re-upload: %v" , err )
}
2017-06-13 12:58:39 +00:00
}
2017-02-15 23:09:44 +00:00
return false
} else if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( dst , "Failed to set modification time: %v" , err )
2017-02-15 23:09:44 +00:00
} else {
2018-01-12 16:30:54 +00:00
fs . Infof ( src , "Updated modification time in destination" )
2017-02-15 23:09:44 +00:00
}
2016-03-22 15:07:10 +00:00
}
2015-06-03 14:08:27 +00:00
}
2014-03-28 17:56:04 +00:00
return true
}
2014-07-15 18:27:05 +00:00
// Used to remove a failed copy
2015-03-14 17:54:41 +00:00
//
2019-04-30 12:06:24 +00:00
// Returns whether the file was successfully removed or not
2019-06-17 08:34:30 +00:00
func removeFailedCopy ( ctx context . Context , dst fs . Object ) bool {
2015-03-14 17:54:41 +00:00
if dst == nil {
return false
}
2018-01-12 16:30:54 +00:00
fs . Infof ( dst , "Removing failed copy" )
2019-06-17 08:34:30 +00:00
removeErr := dst . Remove ( ctx )
2015-03-14 17:54:41 +00:00
if removeErr != nil {
2018-01-12 16:30:54 +00:00
fs . Infof ( dst , "Failed to remove failed copy: %s" , removeErr )
2015-03-14 17:54:41 +00:00
return false
2014-07-15 18:27:05 +00:00
}
2015-03-14 17:54:41 +00:00
return true
2014-07-15 18:27:05 +00:00
}
2019-07-04 11:24:58 +00:00
// OverrideRemote is a wrapper to override the Remote for an
// ObjectInfo
type OverrideRemote struct {
fs . ObjectInfo
2016-10-23 16:34:17 +00:00
remote string
}
2019-07-04 11:24:58 +00:00
// NewOverrideRemote returns an OverrideRemoteObject which will
// return the remote specified
func NewOverrideRemote ( oi fs . ObjectInfo , remote string ) * OverrideRemote {
return & OverrideRemote {
ObjectInfo : oi ,
remote : remote ,
}
}
2019-04-30 12:06:24 +00:00
// Remote returns the overridden remote name
2019-07-04 11:24:58 +00:00
func ( o * OverrideRemote ) Remote ( ) string {
2016-10-23 16:34:17 +00:00
return o . remote
}
2017-03-04 10:10:55 +00:00
// MimeType returns the mime type of the underlying object or "" if it
// can't be worked out
2019-07-04 11:24:58 +00:00
func ( o * OverrideRemote ) MimeType ( ctx context . Context ) string {
if do , ok := o . ObjectInfo . ( fs . MimeTyper ) ; ok {
2019-06-17 08:34:30 +00:00
return do . MimeType ( ctx )
2017-03-04 10:10:55 +00:00
}
return ""
}
2019-07-04 11:24:58 +00:00
// ID returns the ID of the Object if known, or "" if not
func ( o * OverrideRemote ) ID ( ) string {
if do , ok := o . ObjectInfo . ( fs . IDer ) ; ok {
return do . ID ( )
}
return ""
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func ( o * OverrideRemote ) UnWrap ( ) fs . Object {
if o , ok := o . ObjectInfo . ( fs . Object ) ; ok {
return o
}
return nil
}
// GetTier returns storage tier or class of the Object
func ( o * OverrideRemote ) GetTier ( ) string {
if do , ok := o . ObjectInfo . ( fs . GetTierer ) ; ok {
return do . GetTier ( )
}
return ""
}
// Check all optional interfaces satisfied
var _ fs . FullObjectInfo = ( * OverrideRemote ) ( nil )
2017-03-04 10:10:55 +00:00
2020-02-13 14:24:00 +00:00
// CommonHash returns a single hash.Type and a HashOption with that
// type which is in common between the two fs.Fs.
2020-11-05 11:33:32 +00:00
func CommonHash ( ctx context . Context , fa , fb fs . Info ) ( hash . Type , * fs . HashesOption ) {
ci := fs . GetConfig ( ctx )
2020-02-13 14:24:00 +00:00
// work out which hash to use - limit to 1 hash in common
var common hash . Set
hashType := hash . None
2020-11-05 11:33:32 +00:00
if ! ci . IgnoreChecksum {
2020-02-13 14:24:00 +00:00
common = fb . Hashes ( ) . Overlap ( fa . Hashes ( ) )
if common . Count ( ) > 0 {
hashType = common . GetOne ( )
common = hash . Set ( hashType )
}
}
return hashType , & fs . HashesOption { Hashes : common }
}
2016-10-22 16:53:10 +00:00
// Copy src object to dst or f if nil. If dst is nil then it uses
// remote as the name of the new object.
2017-12-01 15:31:20 +00:00
//
// It returns the destination object if possible. Note that this may
// be nil.
2019-06-17 08:34:30 +00:00
func Copy ( ctx context . Context , f fs . Fs , dst fs . Object , remote string , src fs . Object ) ( newDst fs . Object , err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2019-07-18 10:13:54 +00:00
tr := accounting . Stats ( ctx ) . NewTransfer ( src )
2019-04-23 15:19:12 +00:00
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2019-04-23 15:19:12 +00:00
} ( )
2017-12-01 15:31:20 +00:00
newDst = dst
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , src , "copy" ) {
2020-11-05 07:15:42 +00:00
in := tr . Account ( ctx , nil )
in . DryRun ( src . Size ( ) )
2017-12-01 15:31:20 +00:00
return newDst , nil
2016-10-22 16:53:10 +00:00
}
2020-11-05 11:33:32 +00:00
maxTries := ci . LowLevelRetries
2015-02-02 17:29:08 +00:00
tries := 0
doUpdate := dst != nil
2020-11-05 11:33:32 +00:00
hashType , hashOption := CommonHash ( ctx , f , src . Fs ( ) )
2020-02-13 14:24:00 +00:00
2016-06-18 09:55:58 +00:00
var actionTaken string
for {
2020-10-13 21:43:40 +00:00
// Try server-side copy first - if has optional interface and
2016-06-18 09:55:58 +00:00
// is same underlying remote
2020-10-13 21:43:40 +00:00
actionTaken = "Copied (server-side copy)"
2020-11-05 11:33:32 +00:00
if ci . MaxTransfer >= 0 {
2020-11-02 17:13:19 +00:00
var bytesSoFar int64
2020-11-05 11:33:32 +00:00
if ci . CutoffMode == fs . CutoffModeCautious {
2020-11-02 17:13:19 +00:00
bytesSoFar = accounting . Stats ( ctx ) . GetBytesWithPending ( ) + src . Size ( )
} else {
bytesSoFar = accounting . Stats ( ctx ) . GetBytes ( )
}
2020-11-05 11:33:32 +00:00
if bytesSoFar >= int64 ( ci . MaxTransfer ) {
if ci . CutoffMode == fs . CutoffModeHard {
2020-11-02 17:13:19 +00:00
return nil , accounting . ErrorMaxTransferLimitReachedFatal
}
return nil , accounting . ErrorMaxTransferLimitReachedGraceful
}
2020-02-25 17:03:21 +00:00
}
2019-02-11 01:36:47 +00:00
if doCopy := f . Features ( ) . Copy ; doCopy != nil && ( SameConfig ( src . Fs ( ) , f ) || ( SameRemoteType ( src . Fs ( ) , f ) && f . Features ( ) . ServerSideAcrossConfigs ) ) {
2020-06-04 14:09:03 +00:00
in := tr . Account ( ctx , nil ) // account the transfer
2019-08-28 16:35:58 +00:00
in . ServerSideCopyStart ( )
2019-06-17 08:34:30 +00:00
newDst , err = doCopy ( ctx , src , remote )
2016-06-18 09:55:58 +00:00
if err == nil {
dst = newDst
2020-10-13 21:43:40 +00:00
in . ServerSideCopyEnd ( dst . Size ( ) ) // account the bytes for the server-side transfer
2019-08-28 16:35:58 +00:00
err = in . Close ( )
} else {
_ = in . Close ( )
2016-06-18 09:55:58 +00:00
}
2019-09-13 17:08:01 +00:00
if err == fs . ErrorCantCopy {
2020-11-05 16:59:59 +00:00
tr . Reset ( ctx ) // skip incomplete accounting - will be overwritten by the manual copy below
2019-09-13 17:08:01 +00:00
}
2016-06-18 09:55:58 +00:00
} else {
2018-01-12 16:30:54 +00:00
err = fs . ErrorCantCopy
2015-10-06 14:35:22 +00:00
}
2020-10-13 21:43:40 +00:00
// If can't server-side copy, do it manually
2018-01-12 16:30:54 +00:00
if err == fs . ErrorCantCopy {
2020-11-05 11:33:32 +00:00
if doMultiThreadCopy ( ctx , f , src ) {
2019-04-24 16:04:40 +00:00
// Number of streams proportional to size
2020-11-05 11:33:32 +00:00
streams := src . Size ( ) / int64 ( ci . MultiThreadCutoff )
2019-04-24 16:04:40 +00:00
// With maximum
2020-11-05 11:33:32 +00:00
if streams > int64 ( ci . MultiThreadStreams ) {
streams = int64 ( ci . MultiThreadStreams )
2019-04-24 16:04:40 +00:00
}
if streams < 2 {
streams = 2
}
2019-07-16 11:56:20 +00:00
dst , err = multiThreadCopy ( ctx , f , remote , src , int ( streams ) , tr )
2019-04-24 16:04:40 +00:00
if doUpdate {
actionTaken = "Multi-thread Copied (replaced existing)"
} else {
actionTaken = "Multi-thread Copied (new)"
}
2016-06-18 09:55:58 +00:00
} else {
2019-04-24 16:04:40 +00:00
var in0 io . ReadCloser
2020-02-10 09:01:28 +00:00
options := [ ] fs . OpenOption { hashOption }
2020-11-05 11:33:32 +00:00
for _ , option := range ci . DownloadHeaders {
2020-02-10 09:01:28 +00:00
options = append ( options , option )
}
2020-11-05 11:33:32 +00:00
in0 , err = NewReOpen ( ctx , src , ci . LowLevelRetries , options ... )
2019-04-24 16:04:40 +00:00
if err != nil {
err = errors . Wrap ( err , "failed to open source object" )
2019-01-07 08:26:53 +00:00
} else {
2019-04-24 16:04:40 +00:00
if src . Size ( ) == - 1 {
// -1 indicates unknown size. Use Rcat to handle both remotes supporting and not supporting PutStream.
if doUpdate {
actionTaken = "Copied (Rcat, replaced existing)"
} else {
actionTaken = "Copied (Rcat, new)"
}
2019-09-12 10:12:19 +00:00
// NB Rcat closes in0
2019-06-17 08:34:30 +00:00
dst , err = Rcat ( ctx , f , remote , in0 , src . ModTime ( ctx ) )
2019-01-07 08:26:53 +00:00
newDst = dst
2019-04-24 16:04:40 +00:00
} else {
2020-06-04 14:09:03 +00:00
in := tr . Account ( ctx , in0 ) . WithBuffer ( ) // account and buffer the transfer
2019-04-24 16:04:40 +00:00
var wrappedSrc fs . ObjectInfo = src
// We try to pass the original object if possible
if src . Remote ( ) != remote {
2019-07-04 11:24:58 +00:00
wrappedSrc = NewOverrideRemote ( src , remote )
2019-04-24 16:04:40 +00:00
}
2020-02-10 09:01:28 +00:00
options := [ ] fs . OpenOption { hashOption }
2020-11-05 11:33:32 +00:00
for _ , option := range ci . UploadHeaders {
2020-02-10 09:01:28 +00:00
options = append ( options , option )
}
2019-04-24 16:04:40 +00:00
if doUpdate {
actionTaken = "Copied (replaced existing)"
2020-02-10 09:01:28 +00:00
err = dst . Update ( ctx , in , wrappedSrc , options ... )
2019-04-24 16:04:40 +00:00
} else {
actionTaken = "Copied (new)"
2020-02-10 09:01:28 +00:00
dst , err = f . Put ( ctx , in , wrappedSrc , options ... )
2019-04-24 16:04:40 +00:00
}
closeErr := in . Close ( )
if err == nil {
newDst = dst
err = closeErr
}
2019-01-07 08:26:53 +00:00
}
2016-06-18 09:55:58 +00:00
}
}
2015-02-14 18:48:08 +00:00
}
2015-02-02 17:29:08 +00:00
tries ++
2016-06-18 09:55:58 +00:00
if tries >= maxTries {
break
2015-03-14 17:54:41 +00:00
}
2016-06-18 09:55:58 +00:00
// Retry if err returned a retry error
2021-03-11 14:44:01 +00:00
if fserrors . ContextError ( ctx , & err ) {
break
}
2021-02-10 17:59:10 +00:00
var retry bool
2018-01-12 16:30:54 +00:00
if fserrors . IsRetryError ( err ) || fserrors . ShouldRetry ( err ) {
2021-02-10 17:59:10 +00:00
retry = true
} else if t , ok := pacer . IsRetryAfter ( err ) ; ok {
fs . Debugf ( src , "Sleeping for %v (as indicated by the server) to obey Retry-After error: %v" , t , err )
time . Sleep ( t )
retry = true
}
if retry {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Received error: %v - low level retry %d/%d" , err , tries , maxTries )
2020-11-05 16:59:59 +00:00
tr . Reset ( ctx ) // skip incomplete accounting - will be overwritten by retry
2016-06-18 09:55:58 +00:00
continue
}
// otherwise finish
break
2014-03-28 17:56:04 +00:00
}
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( src , "Failed to copy: %v" , err )
2017-12-01 15:31:20 +00:00
return newDst , err
2014-03-28 17:56:04 +00:00
}
2014-07-15 18:27:05 +00:00
2014-07-19 11:38:58 +00:00
// Verify sizes are the same after transfer
2020-11-05 11:33:32 +00:00
if sizeDiffers ( ctx , src , dst ) {
2016-06-12 14:06:02 +00:00
err = errors . Errorf ( "corrupted on transfer: sizes differ %d vs %d" , src . Size ( ) , dst . Size ( ) )
2018-01-12 16:30:54 +00:00
fs . Errorf ( dst , "%v" , err )
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2019-06-17 08:34:30 +00:00
removeFailedCopy ( ctx , dst )
2017-12-01 15:31:20 +00:00
return newDst , err
2014-07-19 11:38:58 +00:00
}
2016-01-11 12:39:33 +00:00
// Verify hashes are the same after transfer - ignoring blank hashes
2019-08-10 09:40:12 +00:00
if hashType != hash . None {
2019-08-10 09:28:26 +00:00
// checkHashes has logged and counted errors
equal , _ , srcSum , dstSum , _ := checkHashes ( ctx , src , dst , hashType )
if ! equal {
err = errors . Errorf ( "corrupted on transfer: %v hash differ %q vs %q" , hashType , srcSum , dstSum )
fs . Errorf ( dst , "%v" , err )
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2019-08-10 09:28:26 +00:00
removeFailedCopy ( ctx , dst )
return newDst , err
2014-07-15 18:27:05 +00:00
}
}
2020-11-09 12:36:10 +00:00
if newDst != nil && src . String ( ) != newDst . String ( ) {
2020-10-26 20:44:01 +00:00
fs . Infof ( src , "%s to: %s" , actionTaken , newDst . String ( ) )
} else {
fs . Infof ( src , actionTaken )
}
2017-12-01 15:31:20 +00:00
return newDst , err
2015-08-24 20:42:23 +00:00
}
2019-06-15 12:54:17 +00:00
// SameObject returns true if src and dst could be pointing to the
// same object.
func SameObject ( src , dst fs . Object ) bool {
2021-01-26 17:44:24 +00:00
srcFs , dstFs := src . Fs ( ) , dst . Fs ( )
if ! SameConfig ( srcFs , dstFs ) {
// If same remote type then check ID of objects if available
doSrcID , srcIDOK := src . ( fs . IDer )
doDstID , dstIDOK := dst . ( fs . IDer )
if srcIDOK && dstIDOK && SameRemoteType ( srcFs , dstFs ) {
srcID , dstID := doSrcID . ID ( ) , doDstID . ID ( )
if srcID != "" && srcID == dstID {
return true
}
}
2019-06-15 12:54:17 +00:00
return false
}
2021-01-26 17:44:24 +00:00
srcPath := path . Join ( srcFs . Root ( ) , src . Remote ( ) )
dstPath := path . Join ( dstFs . Root ( ) , dst . Remote ( ) )
2019-06-15 12:54:17 +00:00
if dst . Fs ( ) . Features ( ) . CaseInsensitive {
srcPath = strings . ToLower ( srcPath )
dstPath = strings . ToLower ( dstPath )
}
return srcPath == dstPath
}
2016-10-22 16:53:10 +00:00
// Move src object to dst or fdst if nil. If dst is nil then it uses
// remote as the name of the new object.
2017-12-01 15:31:20 +00:00
//
2018-09-20 09:46:44 +00:00
// Note that you must check the destination does not exist before
// calling this and pass it as dst. If you pass dst=nil and the
// destination does exist then this may create duplicates or return
// errors.
//
2017-12-01 15:31:20 +00:00
// It returns the destination object if possible. Note that this may
// be nil.
2019-06-17 08:34:30 +00:00
func Move ( ctx context . Context , fdst fs . Fs , dst fs . Object , remote string , src fs . Object ) ( newDst fs . Object , err error ) {
2019-07-22 19:11:46 +00:00
tr := accounting . Stats ( ctx ) . NewCheckingTransfer ( src )
2019-04-23 15:19:12 +00:00
defer func ( ) {
2020-03-30 17:12:32 +00:00
if err == nil {
accounting . Stats ( ctx ) . Renames ( 1 )
}
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2019-04-23 15:19:12 +00:00
} ( )
2017-12-01 15:31:20 +00:00
newDst = dst
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , src , "move" ) {
2020-11-05 07:15:42 +00:00
in := tr . Account ( ctx , nil )
in . DryRun ( src . Size ( ) )
2017-12-01 15:31:20 +00:00
return newDst , nil
2016-10-22 16:53:10 +00:00
}
// See if we have Move available
2019-02-11 01:36:47 +00:00
if doMove := fdst . Features ( ) . Move ; doMove != nil && ( SameConfig ( src . Fs ( ) , fdst ) || ( SameRemoteType ( src . Fs ( ) , fdst ) && fdst . Features ( ) . ServerSideAcrossConfigs ) ) {
2019-06-10 10:01:13 +00:00
// Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive)
2019-06-15 12:54:17 +00:00
if dst != nil && ! SameObject ( src , dst ) {
2019-06-17 08:34:30 +00:00
err = DeleteFile ( ctx , dst )
2016-10-22 16:53:10 +00:00
if err != nil {
2017-12-01 15:31:20 +00:00
return newDst , err
2016-10-22 16:53:10 +00:00
}
}
// Move dst <- src
2019-06-17 08:34:30 +00:00
newDst , err = doMove ( ctx , src , remote )
2016-10-22 16:53:10 +00:00
switch err {
case nil :
2020-11-09 12:36:10 +00:00
if newDst != nil && src . String ( ) != newDst . String ( ) {
2020-10-26 20:44:01 +00:00
fs . Infof ( src , "Moved (server-side) to: %s" , newDst . String ( ) )
} else {
fs . Infof ( src , "Moved (server-side)" )
}
2017-12-01 15:31:20 +00:00
return newDst , nil
2018-01-12 16:30:54 +00:00
case fs . ErrorCantMove :
fs . Debugf ( src , "Can't move, switching to copy" )
2016-10-22 16:53:10 +00:00
default :
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( src , "Couldn't move: %v" , err )
2017-12-01 15:31:20 +00:00
return newDst , err
2016-10-22 16:53:10 +00:00
}
}
// Move not found or didn't work so copy dst <- src
2019-06-17 08:34:30 +00:00
newDst , err = Copy ( ctx , fdst , dst , remote , src )
2016-10-22 16:53:10 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . Errorf ( src , "Not deleting source as copy failed: %v" , err )
2017-12-01 15:31:20 +00:00
return newDst , err
2016-10-22 16:53:10 +00:00
}
// Delete src if no error on copy
2019-06-17 08:34:30 +00:00
return newDst , DeleteFile ( ctx , src )
2016-10-22 16:53:10 +00:00
}
2020-10-13 21:43:40 +00:00
// CanServerSideMove returns true if fdst support server-side moves or
// server-side copies
2017-01-10 20:03:55 +00:00
//
// Some remotes simulate rename by server-side copy and delete, so include
// remotes that implements either Mover or Copier.
2018-01-12 16:30:54 +00:00
func CanServerSideMove ( fdst fs . Fs ) bool {
2017-01-13 17:21:47 +00:00
canMove := fdst . Features ( ) . Move != nil
canCopy := fdst . Features ( ) . Copy != nil
2017-01-10 20:03:55 +00:00
return canMove || canCopy
}
2019-03-10 16:50:28 +00:00
// SuffixName adds the current --suffix to the remote, obeying
// --suffix-keep-extension if set
2020-11-05 11:33:32 +00:00
func SuffixName ( ctx context . Context , remote string ) string {
ci := fs . GetConfig ( ctx )
if ci . Suffix == "" {
2019-03-10 16:50:28 +00:00
return remote
}
2020-11-05 11:33:32 +00:00
if ci . SuffixKeepExtension {
2019-03-10 16:50:28 +00:00
ext := path . Ext ( remote )
base := remote [ : len ( remote ) - len ( ext ) ]
2020-11-05 11:33:32 +00:00
return base + ci . Suffix + ext
2019-03-10 16:50:28 +00:00
}
2020-11-05 11:33:32 +00:00
return remote + ci . Suffix
2019-03-10 16:50:28 +00:00
}
2018-01-12 16:30:54 +00:00
// DeleteFileWithBackupDir deletes a single file respecting --dry-run
2017-01-10 21:47:03 +00:00
// and accumulating stats and errors.
//
// If backupDir is set then it moves the file to there instead of
// deleting
2019-06-17 08:34:30 +00:00
func DeleteFileWithBackupDir ( ctx context . Context , dst fs . Object , backupDir fs . Fs ) ( err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2019-07-22 19:11:46 +00:00
tr := accounting . Stats ( ctx ) . NewCheckingTransfer ( dst )
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2019-07-22 19:11:46 +00:00
} ( )
2019-07-18 10:13:54 +00:00
numDeletes := accounting . Stats ( ctx ) . Deletes ( 1 )
2020-11-05 11:33:32 +00:00
if ci . MaxDelete != - 1 && numDeletes > ci . MaxDelete {
2018-01-22 18:53:18 +00:00
return fserrors . FatalError ( errors . New ( "--max-delete threshold reached" ) )
}
2020-03-20 18:43:29 +00:00
action , actioned := "delete" , "Deleted"
2017-01-10 21:47:03 +00:00
if backupDir != nil {
2020-03-20 18:43:29 +00:00
action , actioned = "move into backup dir" , "Moved into backup dir"
2017-01-10 21:47:03 +00:00
}
2020-06-05 15:13:10 +00:00
skip := SkipDestructive ( ctx , dst , action )
if skip {
2020-03-20 18:43:29 +00:00
// do nothing
2017-01-10 21:47:03 +00:00
} else if backupDir != nil {
2019-06-23 03:50:09 +00:00
err = MoveBackupDir ( ctx , backupDir , dst )
2017-01-10 21:47:03 +00:00
} else {
2019-06-17 08:34:30 +00:00
err = dst . Remove ( ctx )
2017-01-10 21:47:03 +00:00
}
if err != nil {
2018-01-12 16:30:54 +00:00
fs . Errorf ( dst , "Couldn't %s: %v" , action , err )
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2020-06-05 15:13:10 +00:00
} else if ! skip {
2018-01-12 16:30:54 +00:00
fs . Infof ( dst , actioned )
2016-03-05 16:10:51 +00:00
}
2016-06-25 13:27:44 +00:00
return err
2016-03-05 16:10:51 +00:00
}
2017-01-10 21:47:03 +00:00
// DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors.
//
// If useBackupDir is set and --backup-dir is in effect then it moves
// the file to there instead of deleting
2019-06-17 08:34:30 +00:00
func DeleteFile ( ctx context . Context , dst fs . Object ) ( err error ) {
return DeleteFileWithBackupDir ( ctx , dst , nil )
2017-01-10 21:47:03 +00:00
}
2018-01-12 16:30:54 +00:00
// DeleteFilesWithBackupDir removes all the files passed in the
2017-01-10 21:47:03 +00:00
// channel
//
// If backupDir is set the files will be placed into that directory
// instead of being deleted.
2019-06-17 08:34:30 +00:00
func DeleteFilesWithBackupDir ( ctx context . Context , toBeDeleted fs . ObjectsChan , backupDir fs . Fs ) error {
2014-03-28 17:56:04 +00:00
var wg sync . WaitGroup
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
wg . Add ( ci . Transfers )
2016-06-25 13:27:44 +00:00
var errorCount int32
2018-01-22 18:53:18 +00:00
var fatalErrorCount int32
2020-11-05 11:33:32 +00:00
for i := 0 ; i < ci . Transfers ; i ++ {
2014-03-28 17:56:04 +00:00
go func ( ) {
defer wg . Done ( )
2015-09-22 17:47:16 +00:00
for dst := range toBeDeleted {
2019-06-17 08:34:30 +00:00
err := DeleteFileWithBackupDir ( ctx , dst , backupDir )
2016-06-25 13:27:44 +00:00
if err != nil {
atomic . AddInt32 ( & errorCount , 1 )
2018-01-22 18:53:18 +00:00
if fserrors . IsFatalError ( err ) {
fs . Errorf ( nil , "Got fatal error on delete: %s" , err )
atomic . AddInt32 ( & fatalErrorCount , 1 )
return
}
2016-06-25 13:27:44 +00:00
}
2014-03-28 17:56:04 +00:00
}
} ( )
}
2020-02-09 19:30:41 +00:00
fs . Debugf ( nil , "Waiting for deletions to finish" )
2014-03-28 17:56:04 +00:00
wg . Wait ( )
2016-06-25 13:27:44 +00:00
if errorCount > 0 {
2018-01-22 18:53:18 +00:00
err := errors . Errorf ( "failed to delete %d files" , errorCount )
if fatalErrorCount > 0 {
return fserrors . FatalError ( err )
}
return err
2016-06-25 13:27:44 +00:00
}
return nil
2014-03-28 17:56:04 +00:00
}
2017-01-10 21:47:03 +00:00
// DeleteFiles removes all the files passed in the channel
2019-06-17 08:34:30 +00:00
func DeleteFiles ( ctx context . Context , toBeDeleted fs . ObjectsChan ) error {
return DeleteFilesWithBackupDir ( ctx , toBeDeleted , nil )
2017-01-10 21:47:03 +00:00
}
2019-02-11 01:36:47 +00:00
// SameRemoteType returns true if fdst and fsrc are the same type
func SameRemoteType ( fdst , fsrc fs . Info ) bool {
return fmt . Sprintf ( "%T" , fdst ) == fmt . Sprintf ( "%T" , fsrc )
}
2017-01-11 14:59:53 +00:00
// SameConfig returns true if fdst and fsrc are using the same config
// file entry
2018-01-12 16:30:54 +00:00
func SameConfig ( fdst , fsrc fs . Info ) bool {
2017-01-11 14:59:53 +00:00
return fdst . Name ( ) == fsrc . Name ( )
}
2020-11-16 03:04:29 +00:00
// SameConfigArr returns true if any of []fsrcs has same config file entry with fdst
func SameConfigArr ( fdst fs . Info , fsrcs [ ] fs . Fs ) bool {
for _ , fsrc := range fsrcs {
if fdst . Name ( ) == fsrc . Name ( ) {
return true
}
}
return false
}
2015-09-22 17:47:16 +00:00
// Same returns true if fdst and fsrc point to the same underlying Fs
2018-01-12 16:30:54 +00:00
func Same ( fdst , fsrc fs . Info ) bool {
2019-02-14 12:06:26 +00:00
return SameConfig ( fdst , fsrc ) && strings . Trim ( fdst . Root ( ) , "/" ) == strings . Trim ( fsrc . Root ( ) , "/" )
2015-09-01 19:50:28 +00:00
}
2019-06-23 03:52:09 +00:00
// fixRoot returns the Root with a trailing / if not empty. It is
// aware of case insensitive filesystems.
func fixRoot ( f fs . Info ) string {
s := strings . Trim ( filepath . ToSlash ( f . Root ( ) ) , "/" )
if s != "" {
s += "/"
}
if f . Features ( ) . CaseInsensitive {
s = strings . ToLower ( s )
}
return s
}
2016-07-11 10:36:46 +00:00
// Overlapping returns true if fdst and fsrc point to the same
2017-01-11 14:59:53 +00:00
// underlying Fs and they overlap.
2018-01-12 16:30:54 +00:00
func Overlapping ( fdst , fsrc fs . Info ) bool {
2017-01-11 14:59:53 +00:00
if ! SameConfig ( fdst , fsrc ) {
return false
}
2019-06-23 03:52:09 +00:00
fdstRoot := fixRoot ( fdst )
fsrcRoot := fixRoot ( fsrc )
2017-01-11 14:59:53 +00:00
return strings . HasPrefix ( fdstRoot , fsrcRoot ) || strings . HasPrefix ( fsrcRoot , fdstRoot )
2016-07-11 10:36:46 +00:00
}
2019-06-23 03:52:09 +00:00
// SameDir returns true if fdst and fsrc point to the same
// underlying Fs and they are the same directory.
func SameDir ( fdst , fsrc fs . Info ) bool {
if ! SameConfig ( fdst , fsrc ) {
return false
}
fdstRoot := fixRoot ( fdst )
fsrcRoot := fixRoot ( fsrc )
return fdstRoot == fsrcRoot
}
2020-06-12 16:01:23 +00:00
// Retry runs fn up to maxTries times if it returns a retriable error
2021-03-11 14:44:01 +00:00
func Retry ( ctx context . Context , o interface { } , maxTries int , fn func ( ) error ) ( err error ) {
2020-06-12 16:01:23 +00:00
for tries := 1 ; tries <= maxTries ; tries ++ {
// Call the function which might error
err = fn ( )
if err == nil {
break
}
// Retry if err returned a retry error
2021-03-11 14:44:01 +00:00
if fserrors . ContextError ( ctx , & err ) {
break
}
2020-06-12 16:01:23 +00:00
if fserrors . IsRetryError ( err ) || fserrors . ShouldRetry ( err ) {
fs . Debugf ( o , "Received error: %v - low level retry %d/%d" , err , tries , maxTries )
continue
}
break
}
return err
}
2015-09-22 17:47:16 +00:00
// ListFn lists the Fs to the supplied function
2014-03-28 17:56:04 +00:00
//
// Lists in parallel which may get them out of order
2019-06-17 08:34:30 +00:00
func ListFn ( ctx context . Context , f fs . Fs , fn func ( fs . Object ) ) error {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
return walk . ListR ( ctx , f , "" , false , ci . MaxDepth , walk . ListObjects , func ( entries fs . DirEntries ) error {
2017-02-24 22:51:01 +00:00
entries . ForObject ( fn )
return nil
} )
2014-03-28 17:56:04 +00:00
}
2015-02-28 15:30:40 +00:00
// mutex for synchronized output
var outMutex sync . Mutex
2020-12-18 12:45:58 +00:00
// SyncPrintf is a global var holding the Printf function used in syncFprintf so that it can be overridden
// Note, despite name, does not provide sync and should not be called directly
// Call syncFprintf, which provides sync
var SyncPrintf = func ( format string , a ... interface { } ) {
fmt . Printf ( format , a ... )
}
2015-02-28 15:30:40 +00:00
// Synchronized fmt.Fprintf
2015-09-22 06:31:12 +00:00
//
// Ignores errors from Fprintf
2020-12-18 12:45:58 +00:00
//
// Updated to print to terminal if no writer is defined
// This special behavior is used to allow easier replacement of the print to terminal code by progress
2015-09-22 06:31:12 +00:00
func syncFprintf ( w io . Writer , format string , a ... interface { } ) {
2015-02-28 15:30:40 +00:00
outMutex . Lock ( )
defer outMutex . Unlock ( )
2021-07-07 15:34:16 +00:00
if w == nil || w == os . Stdout {
2020-12-18 12:45:58 +00:00
SyncPrintf ( format , a ... )
} else {
_ , _ = fmt . Fprintf ( w , format , a ... )
}
2015-02-28 15:30:40 +00:00
}
2021-04-02 14:11:21 +00:00
// SizeString make string representation of size for output
//
// Optional human-readable format including a binary suffix
func SizeString ( size int64 , humanReadable bool ) string {
if humanReadable {
if size < 0 {
return "-" + fs . SizeSuffix ( - size ) . String ( )
}
return fs . SizeSuffix ( size ) . String ( )
}
return strconv . FormatInt ( size , 10 )
}
// SizeStringField make string representation of size for output in fixed width field
//
// Optional human-readable format including a binary suffix
// Argument rawWidth is used to format field with of raw value. When humanReadable
// option the width is hard coded to 9, since SizeSuffix strings have precision 3
// and longest value will be "999.999Ei". This way the width can be optimized
// depending to the humanReadable option. To always use a longer width the return
// value can always be fed into another format string with a specific field with.
func SizeStringField ( size int64 , humanReadable bool , rawWidth int ) string {
str := SizeString ( size , humanReadable )
if humanReadable {
return fmt . Sprintf ( "%9s" , str )
}
return fmt . Sprintf ( "%[2]*[1]s" , str , rawWidth )
}
// CountString make string representation of count for output
//
// Optional human-readable format including a decimal suffix
func CountString ( count int64 , humanReadable bool ) string {
if humanReadable {
if count < 0 {
return "-" + fs . CountSuffix ( - count ) . String ( )
}
return fs . CountSuffix ( count ) . String ( )
}
return strconv . FormatInt ( count , 10 )
}
// CountStringField make string representation of count for output in fixed width field
//
// Similar to SizeStringField, but human readable with decimal prefix and field width 8
// since there is no 'i' in the decimal prefix symbols (e.g. "999.999E")
func CountStringField ( count int64 , humanReadable bool , rawWidth int ) string {
str := CountString ( count , humanReadable )
if humanReadable {
return fmt . Sprintf ( "%8s" , str )
}
return fmt . Sprintf ( "%[2]*[1]s" , str , rawWidth )
}
2015-09-15 14:46:06 +00:00
// List the Fs to the supplied writer
2014-07-12 11:09:20 +00:00
//
2015-11-24 16:54:12 +00:00
// Shows size and path - obeys includes and excludes
2014-07-12 11:09:20 +00:00
//
// Lists in parallel which may get them out of order
2019-06-17 08:34:30 +00:00
func List ( ctx context . Context , f fs . Fs , w io . Writer ) error {
2021-04-02 14:11:21 +00:00
ci := fs . GetConfig ( ctx )
2019-06-17 08:34:30 +00:00
return ListFn ( ctx , f , func ( o fs . Object ) {
2021-04-02 14:11:21 +00:00
syncFprintf ( w , "%s %s\n" , SizeStringField ( o . Size ( ) , ci . HumanReadable , 9 ) , o . Remote ( ) )
2014-07-12 11:09:20 +00:00
} )
}
2015-09-22 17:47:16 +00:00
// ListLong lists the Fs to the supplied writer
2014-07-12 11:09:20 +00:00
//
2015-11-24 16:54:12 +00:00
// Shows size, mod time and path - obeys includes and excludes
2014-07-12 11:09:20 +00:00
//
// Lists in parallel which may get them out of order
2019-06-17 08:34:30 +00:00
func ListLong ( ctx context . Context , f fs . Fs , w io . Writer ) error {
2021-04-02 14:11:21 +00:00
ci := fs . GetConfig ( ctx )
2019-06-17 08:34:30 +00:00
return ListFn ( ctx , f , func ( o fs . Object ) {
2019-07-22 19:11:46 +00:00
tr := accounting . Stats ( ctx ) . NewCheckingTransfer ( o )
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , nil )
2019-07-22 19:11:46 +00:00
} ( )
2019-06-17 08:34:30 +00:00
modTime := o . ModTime ( ctx )
2021-04-02 14:11:21 +00:00
syncFprintf ( w , "%s %s %s\n" , SizeStringField ( o . Size ( ) , ci . HumanReadable , 9 ) , modTime . Local ( ) . Format ( "2006-01-02 15:04:05.000000000" ) , o . Remote ( ) )
2014-07-12 11:09:20 +00:00
} )
}
2018-01-06 17:53:37 +00:00
// hashSum returns the human readable hash for ht passed in. This may
2019-10-26 19:27:33 +00:00
// be UNSUPPORTED or ERROR. If it isn't returning a valid hash it will
// return an error.
2020-12-18 12:45:58 +00:00
func hashSum ( ctx context . Context , ht hash . Type , downloadFlag bool , o fs . Object ) ( string , error ) {
var sum string
2019-07-22 19:11:46 +00:00
var err error
2020-12-18 12:45:58 +00:00
// If downloadFlag is true, download and hash the file.
// If downloadFlag is false, call o.Hash asking the remote for the hash
if downloadFlag {
// Setup: Define accounting, open the file with NewReOpen to provide restarts, account for the transfer, and setup a multi-hasher with the appropriate type
// Execution: io.Copy file to hasher, get hash and encode in hex
tr := accounting . Stats ( ctx ) . NewTransfer ( o )
defer func ( ) {
tr . Done ( ctx , err )
} ( )
// Open with NewReOpen to provide restarts
var options [ ] fs . OpenOption
for _ , option := range fs . GetConfig ( ctx ) . DownloadHeaders {
options = append ( options , option )
}
in , err := NewReOpen ( ctx , o , fs . GetConfig ( ctx ) . LowLevelRetries , options ... )
if err != nil {
return "ERROR" , errors . Wrapf ( err , "Failed to open file %v" , o )
}
// Account and buffer the transfer
in = tr . Account ( ctx , in ) . WithBuffer ( )
// Setup hasher
hasher , err := hash . NewMultiHasherTypes ( hash . NewHashSet ( ht ) )
if err != nil {
return "UNSUPPORTED" , errors . Wrap ( err , "Hash unsupported" )
}
// Copy to hasher, downloading the file and passing directly to hash
_ , err = io . Copy ( hasher , in )
if err != nil {
return "ERROR" , errors . Wrap ( err , "Failed to copy file to hasher" )
}
// Get hash and encode as hex
byteSum , err := hasher . Sum ( ht )
if err != nil {
return "ERROR" , errors . Wrap ( err , "Hasher returned an error" )
}
sum = hex . EncodeToString ( byteSum )
} else {
tr := accounting . Stats ( ctx ) . NewCheckingTransfer ( o )
defer func ( ) {
tr . Done ( ctx , err )
} ( )
sum , err = o . Hash ( ctx , ht )
if err == hash . ErrUnsupported {
return "UNSUPPORTED" , errors . Wrap ( err , "Hash unsupported" )
} else if err != nil {
return "ERROR" , errors . Wrapf ( err , "Failed to get hash %v from backed: %v" , ht , err )
}
2018-01-06 17:53:37 +00:00
}
2020-12-18 12:45:58 +00:00
return sum , nil
2014-07-12 11:09:20 +00:00
}
2020-12-18 12:45:58 +00:00
// HashLister does an md5sum equivalent for the hash type passed in
// Updated to handle both standard hex encoding and base64
// Updated to perform multiple hashes concurrently
func HashLister ( ctx context . Context , ht hash . Type , outputBase64 bool , downloadFlag bool , f fs . Fs , w io . Writer ) error {
concurrencyControl := make ( chan struct { } , fs . GetConfig ( ctx ) . Transfers )
var wg sync . WaitGroup
err := ListFn ( ctx , f , func ( o fs . Object ) {
wg . Add ( 1 )
concurrencyControl <- struct { } { }
go func ( ) {
defer func ( ) {
<- concurrencyControl
wg . Done ( )
} ( )
sum , err := hashSum ( ctx , ht , downloadFlag , o )
if outputBase64 && err == nil {
hexBytes , _ := hex . DecodeString ( sum )
sum = base64 . URLEncoding . EncodeToString ( hexBytes )
width := base64 . URLEncoding . EncodedLen ( hash . Width ( ht ) / 2 )
syncFprintf ( w , "%*s %s\n" , width , sum , o . Remote ( ) )
} else {
syncFprintf ( w , "%*s %s\n" , hash . Width ( ht ) , sum , o . Remote ( ) )
}
if err != nil {
err = fs . CountError ( err )
fs . Errorf ( o , "%v" , err )
}
} ( )
2019-10-26 19:27:33 +00:00
} )
2020-12-18 12:45:58 +00:00
wg . Wait ( )
return err
2019-10-26 19:27:33 +00:00
}
2015-10-02 18:48:48 +00:00
// Count counts the objects and their sizes in the Fs
2015-11-24 16:54:12 +00:00
//
// Obeys includes and excludes
2019-06-17 08:34:30 +00:00
func Count ( ctx context . Context , f fs . Fs ) ( objects int64 , size int64 , err error ) {
err = ListFn ( ctx , f , func ( o fs . Object ) {
2015-10-02 18:48:48 +00:00
atomic . AddInt64 ( & objects , 1 )
2019-05-28 18:51:25 +00:00
objectSize := o . Size ( )
if objectSize > 0 {
atomic . AddInt64 ( & size , objectSize )
}
2015-10-02 18:48:48 +00:00
} )
return
}
2017-02-24 22:51:01 +00:00
// ConfigMaxDepth returns the depth to use for a recursive or non recursive listing.
2020-11-05 11:33:32 +00:00
func ConfigMaxDepth ( ctx context . Context , recursive bool ) int {
ci := fs . GetConfig ( ctx )
depth := ci . MaxDepth
2017-02-24 22:51:01 +00:00
if ! recursive && depth < 0 {
depth = 1
}
return depth
}
2015-09-22 17:47:16 +00:00
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
2019-06-17 08:34:30 +00:00
func ListDir ( ctx context . Context , f fs . Fs , w io . Writer ) error {
2021-04-02 14:11:21 +00:00
ci := fs . GetConfig ( ctx )
2020-11-05 11:33:32 +00:00
return walk . ListR ( ctx , f , "" , false , ConfigMaxDepth ( ctx , false ) , walk . ListDirs , func ( entries fs . DirEntries ) error {
2018-01-12 16:30:54 +00:00
entries . ForDir ( func ( dir fs . Directory ) {
2017-02-24 22:51:01 +00:00
if dir != nil {
2021-04-02 14:11:21 +00:00
syncFprintf ( w , "%s %13s %s %s\n" , SizeStringField ( dir . Size ( ) , ci . HumanReadable , 12 ) , dir . ModTime ( ctx ) . Local ( ) . Format ( "2006-01-02 15:04:05" ) , CountStringField ( dir . Items ( ) , ci . HumanReadable , 9 ) , dir . Remote ( ) )
2017-02-24 22:51:01 +00:00
}
} )
return nil
} )
2014-03-28 17:56:04 +00:00
}
2015-09-22 17:47:16 +00:00
// Mkdir makes a destination directory or container
2019-06-17 08:34:30 +00:00
func Mkdir ( ctx context . Context , f fs . Fs , dir string ) error {
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , fs . LogDirName ( f , dir ) , "make directory" ) {
2016-02-28 19:47:22 +00:00
return nil
}
2018-01-12 16:30:54 +00:00
fs . Debugf ( fs . LogDirName ( f , dir ) , "Making directory" )
2019-06-17 08:34:30 +00:00
err := f . Mkdir ( ctx , dir )
2014-03-28 17:56:04 +00:00
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2014-03-28 17:56:04 +00:00
return err
}
return nil
}
2016-02-25 20:05:34 +00:00
// TryRmdir removes a container but not if not empty. It doesn't
// count errors but may return one.
2019-06-17 08:34:30 +00:00
func TryRmdir ( ctx context . Context , f fs . Fs , dir string ) error {
2020-10-28 12:54:31 +00:00
accounting . Stats ( ctx ) . DeletedDirs ( 1 )
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , fs . LogDirName ( f , dir ) , "remove directory" ) {
2016-02-25 20:05:34 +00:00
return nil
2014-03-28 17:56:04 +00:00
}
2021-07-23 19:10:27 +00:00
fs . Infof ( fs . LogDirName ( f , dir ) , "Removing directory" )
2019-06-17 08:34:30 +00:00
return f . Rmdir ( ctx , dir )
2016-02-25 20:05:34 +00:00
}
// Rmdir removes a container but not if not empty
2019-06-17 08:34:30 +00:00
func Rmdir ( ctx context . Context , f fs . Fs , dir string ) error {
err := TryRmdir ( ctx , f , dir )
2016-02-25 20:05:34 +00:00
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2016-02-25 20:05:34 +00:00
return err
}
return err
2014-03-28 17:56:04 +00:00
}
2017-12-07 12:25:56 +00:00
// Purge removes a directory and all of its contents
2020-06-04 21:25:14 +00:00
func Purge ( ctx context . Context , f fs . Fs , dir string ) ( err error ) {
2015-11-08 14:16:00 +00:00
doFallbackPurge := true
2020-06-04 21:25:14 +00:00
if doPurge := f . Features ( ) . Purge ; doPurge != nil {
doFallbackPurge = false
2020-10-28 12:54:31 +00:00
accounting . Stats ( ctx ) . DeletedDirs ( 1 )
2020-06-04 21:25:14 +00:00
if SkipDestructive ( ctx , fs . LogDirName ( f , dir ) , "purge directory" ) {
return nil
}
err = doPurge ( ctx , dir )
if err == fs . ErrorCantPurge {
doFallbackPurge = true
2014-03-28 17:56:04 +00:00
}
2015-11-08 14:16:00 +00:00
}
if doFallbackPurge {
2014-07-25 17:19:49 +00:00
// DeleteFiles and Rmdir observe --dry-run
2019-06-17 08:34:30 +00:00
err = DeleteFiles ( ctx , listToChan ( ctx , f , dir ) )
2016-06-25 13:27:44 +00:00
if err != nil {
return err
}
2019-06-17 08:34:30 +00:00
err = Rmdirs ( ctx , f , dir , false )
2014-07-25 17:19:49 +00:00
}
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2014-07-25 17:19:49 +00:00
return err
2014-03-28 17:56:04 +00:00
}
return nil
}
2015-12-02 22:25:32 +00:00
// Delete removes all the contents of a container. Unlike Purge, it
// obeys includes and excludes.
2019-06-17 08:34:30 +00:00
func Delete ( ctx context . Context , f fs . Fs ) error {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
delChan := make ( fs . ObjectsChan , ci . Transfers )
2016-06-25 13:27:44 +00:00
delErr := make ( chan error , 1 )
2015-12-02 22:25:32 +00:00
go func ( ) {
2019-06-17 08:34:30 +00:00
delErr <- DeleteFiles ( ctx , delChan )
2015-12-02 22:25:32 +00:00
} ( )
2019-06-17 08:34:30 +00:00
err := ListFn ( ctx , f , func ( o fs . Object ) {
2018-08-04 10:16:43 +00:00
delChan <- o
2015-12-02 22:25:32 +00:00
} )
2018-08-04 10:16:43 +00:00
close ( delChan )
2016-06-25 13:27:44 +00:00
delError := <- delErr
if err == nil {
err = delError
}
2015-12-02 22:25:32 +00:00
return err
}
2016-01-31 12:58:41 +00:00
2017-02-24 22:51:01 +00:00
// listToChan will transfer all objects in the listing to the output
2016-04-21 19:06:21 +00:00
//
// If an error occurs, the error will be logged, and it will close the
// channel.
//
// If the error was ErrorDirNotFound then it will be ignored
2019-06-17 08:34:30 +00:00
func listToChan ( ctx context . Context , f fs . Fs , dir string ) fs . ObjectsChan {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
o := make ( fs . ObjectsChan , ci . Checkers )
2016-04-21 19:06:21 +00:00
go func ( ) {
defer close ( o )
2020-11-05 11:33:32 +00:00
err := walk . ListR ( ctx , f , dir , true , ci . MaxDepth , walk . ListObjects , func ( entries fs . DirEntries ) error {
2018-01-12 16:30:54 +00:00
entries . ForObject ( func ( obj fs . Object ) {
2017-02-24 22:51:01 +00:00
o <- obj
} )
return nil
} )
2019-01-21 16:53:05 +00:00
if err != nil && err != fs . ErrorDirNotFound {
err = errors . Wrap ( err , "failed to list" )
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2019-01-21 16:53:05 +00:00
fs . Errorf ( nil , "%v" , err )
}
2016-04-21 19:06:21 +00:00
} ( )
return o
}
2016-07-01 15:35:36 +00:00
// CleanUp removes the trash for the Fs
2019-06-17 08:34:30 +00:00
func CleanUp ( ctx context . Context , f fs . Fs ) error {
2017-01-13 17:21:47 +00:00
doCleanUp := f . Features ( ) . CleanUp
if doCleanUp == nil {
2016-07-01 15:35:36 +00:00
return errors . Errorf ( "%v doesn't support cleanup" , f )
}
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , f , "clean up old files" ) {
2016-07-02 15:58:50 +00:00
return nil
}
2019-06-17 08:34:30 +00:00
return doCleanUp ( ctx )
2016-07-01 15:35:36 +00:00
}
2016-08-18 21:43:02 +00:00
2017-02-09 11:25:36 +00:00
// wrap a Reader and a Closer together into a ReadCloser
type readCloser struct {
io . Reader
2017-11-11 18:43:00 +00:00
io . Closer
2017-02-09 11:25:36 +00:00
}
2016-08-18 21:43:02 +00:00
// Cat any files to the io.Writer
2017-02-08 08:09:41 +00:00
//
// if offset == 0 it will be ignored
// if offset > 0 then the file will be seeked to that offset
// if offset < 0 then the file will be seeked that far from the end
//
// if count < 0 then it will be ignored
// if count >= 0 then only that many characters will be output
2019-06-17 08:34:30 +00:00
func Cat ( ctx context . Context , f fs . Fs , w io . Writer , offset , count int64 ) error {
2016-08-18 21:43:02 +00:00
var mu sync . Mutex
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2019-06-17 08:34:30 +00:00
return ListFn ( ctx , f , func ( o fs . Object ) {
2016-09-12 17:15:58 +00:00
var err error
2019-07-18 10:13:54 +00:00
tr := accounting . Stats ( ctx ) . NewTransfer ( o )
2016-09-12 17:15:58 +00:00
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2016-09-12 17:15:58 +00:00
} ( )
2018-02-19 16:12:43 +00:00
opt := fs . RangeOption { Start : offset , End : - 1 }
2017-02-09 11:46:53 +00:00
size := o . Size ( )
2018-02-19 16:12:43 +00:00
if opt . Start < 0 {
opt . Start += size
}
if count >= 0 {
opt . End = opt . Start + count - 1
2017-02-08 08:09:41 +00:00
}
2018-01-12 16:30:54 +00:00
var options [ ] fs . OpenOption
2018-02-19 16:12:43 +00:00
if opt . Start > 0 || opt . End >= 0 {
options = append ( options , & opt )
2017-02-08 08:09:41 +00:00
}
2020-11-05 11:33:32 +00:00
for _ , option := range ci . DownloadHeaders {
2020-04-23 10:32:27 +00:00
options = append ( options , option )
}
2019-06-17 08:34:30 +00:00
in , err := o . Open ( ctx , options ... )
2016-08-18 21:43:02 +00:00
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( o , "Failed to open: %v" , err )
2016-08-18 21:43:02 +00:00
return
}
2017-02-08 08:09:41 +00:00
if count >= 0 {
2017-02-09 11:25:36 +00:00
in = & readCloser { Reader : & io . LimitedReader { R : in , N : count } , Closer : in }
2017-02-08 08:09:41 +00:00
}
2020-06-04 14:09:03 +00:00
in = tr . Account ( ctx , in ) . WithBuffer ( ) // account and buffer the transfer
2017-02-08 08:09:41 +00:00
// take the lock just before we output stuff, so at the last possible moment
mu . Lock ( )
defer mu . Unlock ( )
2017-02-09 11:25:36 +00:00
_ , err = io . Copy ( w , in )
2016-08-18 21:43:02 +00:00
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( o , "Failed to send to output: %v" , err )
2016-08-18 21:43:02 +00:00
}
} )
}
2016-11-27 11:49:31 +00:00
2017-08-03 19:42:35 +00:00
// Rcat reads data from the Reader until EOF and uploads it to a file on remote
2019-06-17 08:34:30 +00:00
func Rcat ( ctx context . Context , fdst fs . Fs , dstFileName string , in io . ReadCloser , modTime time . Time ) ( dst fs . Object , err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2019-07-18 10:13:54 +00:00
tr := accounting . Stats ( ctx ) . NewTransferRemoteSize ( dstFileName , - 1 )
2017-08-03 19:42:35 +00:00
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2017-08-03 19:42:35 +00:00
} ( )
2020-06-04 14:09:03 +00:00
in = tr . Account ( ctx , in ) . WithBuffer ( )
2017-08-03 19:42:35 +00:00
2018-01-12 16:30:54 +00:00
readCounter := readers . NewCountingReader ( in )
2020-02-14 12:47:11 +00:00
var trackingIn io . Reader
var hasher * hash . MultiHasher
var options [ ] fs . OpenOption
2020-11-05 11:33:32 +00:00
if ! ci . IgnoreChecksum {
2020-02-14 12:47:11 +00:00
hashes := hash . NewHashSet ( fdst . Hashes ( ) . GetOne ( ) ) // just pick one hash
hashOption := & fs . HashesOption { Hashes : hashes }
options = append ( options , hashOption )
hasher , err = hash . NewMultiHasherTypes ( hashes )
if err != nil {
return nil , err
}
trackingIn = io . TeeReader ( readCounter , hasher )
} else {
trackingIn = readCounter
}
2020-11-05 11:33:32 +00:00
for _ , option := range ci . UploadHeaders {
2020-04-23 10:32:48 +00:00
options = append ( options , option )
}
2017-09-11 06:25:34 +00:00
2018-01-12 16:30:54 +00:00
compare := func ( dst fs . Object ) error {
2020-02-14 12:47:11 +00:00
var sums map [ hash . Type ] string
if hasher != nil {
sums = hasher . Sums ( )
}
src := object . NewStaticObjectInfo ( dstFileName , modTime , int64 ( readCounter . BytesRead ( ) ) , false , sums , fdst )
2019-06-17 08:34:30 +00:00
if ! Equal ( ctx , src , dst ) {
2017-09-11 06:26:53 +00:00
err = errors . Errorf ( "corrupted on transfer" )
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( dst , "%v" , err )
2017-09-11 06:26:53 +00:00
return err
2017-09-11 06:25:34 +00:00
}
2017-09-11 06:26:53 +00:00
return nil
}
// check if file small enough for direct upload
2020-11-05 11:33:32 +00:00
buf := make ( [ ] byte , ci . StreamingUploadCutoff )
2017-09-11 06:26:53 +00:00
if n , err := io . ReadFull ( trackingIn , buf ) ; err == io . EOF || err == io . ErrUnexpectedEOF {
2018-01-12 16:30:54 +00:00
fs . Debugf ( fdst , "File to upload is small (%d bytes), uploading instead of streaming" , n )
src := object . NewMemoryObject ( dstFileName , modTime , buf [ : n ] )
2019-06-17 08:34:30 +00:00
return Copy ( ctx , fdst , nil , dstFileName , src )
2017-09-11 06:25:34 +00:00
}
2017-11-11 18:43:00 +00:00
// Make a new ReadCloser with the bits we've already read
in = & readCloser {
Reader : io . MultiReader ( bytes . NewReader ( buf ) , trackingIn ) ,
Closer : in ,
}
2017-09-11 06:25:34 +00:00
2017-08-03 19:42:35 +00:00
fStreamTo := fdst
canStream := fdst . Features ( ) . PutStream != nil
if ! canStream {
2018-01-12 16:30:54 +00:00
fs . Debugf ( fdst , "Target remote doesn't support streaming uploads, creating temporary local FS to spool file" )
2020-11-05 15:18:51 +00:00
tmpLocalFs , err := fs . TemporaryLocalFs ( ctx )
2017-08-03 19:42:35 +00:00
if err != nil {
2017-09-16 20:49:08 +00:00
return nil , errors . Wrap ( err , "Failed to create temporary local FS to spool file" )
2017-08-03 19:42:35 +00:00
}
defer func ( ) {
2019-06-17 08:34:30 +00:00
err := Purge ( ctx , tmpLocalFs , "" )
2017-08-03 19:42:35 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . Infof ( tmpLocalFs , "Failed to cleanup temporary FS: %v" , err )
2017-08-03 19:42:35 +00:00
}
} ( )
fStreamTo = tmpLocalFs
}
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , dstFileName , "upload from pipe" ) {
2017-08-03 19:42:35 +00:00
// prevents "broken pipe" errors
_ , err = io . Copy ( ioutil . Discard , in )
2017-09-16 20:49:08 +00:00
return nil , err
2017-08-03 19:42:35 +00:00
}
2018-01-12 16:30:54 +00:00
objInfo := object . NewStaticObjectInfo ( dstFileName , modTime , - 1 , false , nil , nil )
2020-02-14 12:47:11 +00:00
if dst , err = fStreamTo . Features ( ) . PutStream ( ctx , in , objInfo , options ... ) ; err != nil {
2017-09-16 20:49:08 +00:00
return dst , err
2017-08-03 19:42:35 +00:00
}
2017-09-16 20:49:08 +00:00
if err = compare ( dst ) ; err != nil {
return dst , err
2017-09-11 06:26:53 +00:00
}
if ! canStream {
2017-12-01 15:16:11 +00:00
// copy dst (which is the local object we have just streamed to) to the remote
2019-06-17 08:34:30 +00:00
return Copy ( ctx , fdst , nil , dstFileName , dst )
2017-09-11 06:26:53 +00:00
}
2017-09-16 20:49:08 +00:00
return dst , nil
2017-08-03 19:42:35 +00:00
}
2018-03-29 07:10:19 +00:00
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
2020-05-31 21:18:01 +00:00
func PublicLink ( ctx context . Context , f fs . Fs , remote string , expire fs . Duration , unlink bool ) ( string , error ) {
2018-03-29 07:10:19 +00:00
doPublicLink := f . Features ( ) . PublicLink
if doPublicLink == nil {
return "" , errors . Errorf ( "%v doesn't support public links" , f )
}
2020-05-31 21:18:01 +00:00
return doPublicLink ( ctx , remote , expire , unlink )
2018-03-29 07:10:19 +00:00
}
2016-11-27 11:49:31 +00:00
// Rmdirs removes any empty directories (or directories only
// containing empty directories) under f, including f.
2021-02-09 10:12:23 +00:00
//
// Rmdirs obeys the filters
2019-06-17 08:34:30 +00:00
func Rmdirs ( ctx context . Context , f fs . Fs , dir string , leaveRoot bool ) error {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2021-02-09 10:12:23 +00:00
fi := filter . GetConfig ( ctx )
2016-11-27 11:49:31 +00:00
dirEmpty := make ( map [ string ] bool )
2018-10-26 22:47:23 +00:00
dirEmpty [ dir ] = ! leaveRoot
2021-02-09 10:12:23 +00:00
err := walk . Walk ( ctx , f , dir , false , ci . MaxDepth , func ( dirPath string , entries fs . DirEntries , err error ) error {
2016-11-27 11:49:31 +00:00
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( f , "Failed to list %q: %v" , dirPath , err )
2017-02-24 22:51:01 +00:00
return nil
}
for _ , entry := range entries {
switch x := entry . ( type ) {
2018-01-12 16:30:54 +00:00
case fs . Directory :
2017-02-24 22:51:01 +00:00
// add a new directory as empty
2017-06-30 12:37:29 +00:00
dir := x . Remote ( )
2017-02-24 22:51:01 +00:00
_ , found := dirEmpty [ dir ]
if ! found {
dirEmpty [ dir ] = true
2016-11-27 11:49:31 +00:00
}
2018-01-12 16:30:54 +00:00
case fs . Object :
2017-02-24 22:51:01 +00:00
// mark the parents of the file as being non-empty
dir := x . Remote ( )
for dir != "" {
dir = path . Dir ( dir )
if dir == "." || dir == "/" {
dir = ""
}
empty , found := dirEmpty [ dir ]
// End if we reach a directory which is non-empty
if found && ! empty {
break
}
dirEmpty [ dir ] = false
2016-11-27 11:49:31 +00:00
}
}
}
2017-02-24 22:51:01 +00:00
return nil
} )
if err != nil {
return errors . Wrap ( err , "failed to rmdirs" )
2016-11-27 11:49:31 +00:00
}
// Now delete the empty directories, starting from the longest path
var toDelete [ ] string
for dir , empty := range dirEmpty {
if empty {
toDelete = append ( toDelete , dir )
}
}
sort . Strings ( toDelete )
for i := len ( toDelete ) - 1 ; i >= 0 ; i -- {
dir := toDelete [ i ]
2021-02-09 10:12:23 +00:00
// If a filter matches the directory then that
// directory is a candidate for deletion
if ! fi . Include ( dir + "/" , 0 , time . Now ( ) ) {
continue
}
err = TryRmdir ( ctx , f , dir )
2016-11-27 11:49:31 +00:00
if err != nil {
2019-11-18 14:13:02 +00:00
err = fs . CountError ( err )
2018-01-12 16:30:54 +00:00
fs . Errorf ( dir , "Failed to rmdir: %v" , err )
2016-11-27 11:49:31 +00:00
return err
}
}
return nil
}
2016-10-23 16:34:17 +00:00
2019-07-08 01:02:53 +00:00
// GetCompareDest sets up --compare-dest
2020-11-16 03:04:29 +00:00
func GetCompareDest ( ctx context . Context ) ( CompareDest [ ] fs . Fs , err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2020-11-16 03:04:29 +00:00
CompareDest , err = cache . GetArr ( ctx , ci . CompareDest )
2019-07-08 01:02:53 +00:00
if err != nil {
2020-11-05 11:33:32 +00:00
return nil , fserrors . FatalError ( errors . Errorf ( "Failed to make fs for --compare-dest %q: %v" , ci . CompareDest , err ) )
2019-07-08 01:02:53 +00:00
}
return CompareDest , nil
}
// compareDest checks --compare-dest to see if src needs to
// be copied
//
// Returns True if src is in --compare-dest
func compareDest ( ctx context . Context , dst , src fs . Object , CompareDest fs . Fs ) ( NoNeedTransfer bool , err error ) {
var remote string
if dst == nil {
remote = src . Remote ( )
} else {
remote = dst . Remote ( )
}
CompareDestFile , err := CompareDest . NewObject ( ctx , remote )
switch err {
case fs . ErrorObjectNotFound :
return false , nil
case nil :
break
default :
return false , err
}
2021-06-14 20:19:12 +00:00
opt := defaultEqualOpt ( ctx )
opt . updateModTime = false
if equal ( ctx , src , CompareDestFile , opt ) {
2019-07-08 01:02:53 +00:00
fs . Debugf ( src , "Destination found in --compare-dest, skipping" )
return true , nil
}
return false , nil
}
// GetCopyDest sets up --copy-dest
2020-11-16 03:04:29 +00:00
func GetCopyDest ( ctx context . Context , fdst fs . Fs ) ( CopyDest [ ] fs . Fs , err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2020-11-16 03:04:29 +00:00
CopyDest , err = cache . GetArr ( ctx , ci . CopyDest )
2019-07-08 01:02:53 +00:00
if err != nil {
2020-11-05 11:33:32 +00:00
return nil , fserrors . FatalError ( errors . Errorf ( "Failed to make fs for --copy-dest %q: %v" , ci . CopyDest , err ) )
2019-07-08 01:02:53 +00:00
}
2020-11-16 03:04:29 +00:00
if ! SameConfigArr ( fdst , CopyDest ) {
2019-07-08 01:02:53 +00:00
return nil , fserrors . FatalError ( errors . New ( "parameter to --copy-dest has to be on the same remote as destination" ) )
}
2020-11-16 03:04:29 +00:00
for _ , cf := range CopyDest {
if cf . Features ( ) . Copy == nil {
return nil , fserrors . FatalError ( errors . New ( "can't use --copy-dest on a remote which doesn't support server side copy" ) )
}
2019-07-08 01:02:53 +00:00
}
2020-11-16 03:04:29 +00:00
2019-07-08 01:02:53 +00:00
return CopyDest , nil
}
// copyDest checks --copy-dest to see if src needs to
// be copied
//
// Returns True if src was copied from --copy-dest
func copyDest ( ctx context . Context , fdst fs . Fs , dst , src fs . Object , CopyDest , backupDir fs . Fs ) ( NoNeedTransfer bool , err error ) {
var remote string
if dst == nil {
remote = src . Remote ( )
} else {
remote = dst . Remote ( )
}
CopyDestFile , err := CopyDest . NewObject ( ctx , remote )
switch err {
case fs . ErrorObjectNotFound :
return false , nil
case nil :
break
default :
return false , err
}
2020-11-05 11:33:32 +00:00
opt := defaultEqualOpt ( ctx )
2019-06-08 13:08:23 +00:00
opt . updateModTime = false
if equal ( ctx , src , CopyDestFile , opt ) {
2019-07-08 01:02:53 +00:00
if dst == nil || ! Equal ( ctx , src , dst ) {
if dst != nil && backupDir != nil {
err = MoveBackupDir ( ctx , backupDir , dst )
if err != nil {
return false , errors . Wrap ( err , "moving to --backup-dir failed" )
}
// If successful zero out the dstObj as it is no longer there
dst = nil
}
_ , err := Copy ( ctx , fdst , dst , remote , CopyDestFile )
if err != nil {
fs . Errorf ( src , "Destination found in --copy-dest, error copying" )
return false , nil
}
2020-10-13 21:43:40 +00:00
fs . Debugf ( src , "Destination found in --copy-dest, using server-side copy" )
2019-07-08 01:02:53 +00:00
return true , nil
}
fs . Debugf ( src , "Unchanged skipping" )
return true , nil
}
fs . Debugf ( src , "Destination not found in --copy-dest" )
return false , nil
}
// CompareOrCopyDest checks --compare-dest and --copy-dest to see if src
// does not need to be copied
//
// Returns True if src does not need to be copied
2020-11-16 03:04:29 +00:00
func CompareOrCopyDest ( ctx context . Context , fdst fs . Fs , dst , src fs . Object , CompareOrCopyDest [ ] fs . Fs , backupDir fs . Fs ) ( NoNeedTransfer bool , err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2020-11-16 03:04:29 +00:00
if len ( ci . CompareDest ) > 0 {
for _ , compareF := range CompareOrCopyDest {
NoNeedTransfer , err := compareDest ( ctx , dst , src , compareF )
if NoNeedTransfer || err != nil {
return NoNeedTransfer , err
}
}
} else if len ( ci . CopyDest ) > 0 {
for _ , copyF := range CompareOrCopyDest {
NoNeedTransfer , err := copyDest ( ctx , fdst , dst , src , copyF , backupDir )
if NoNeedTransfer || err != nil {
return NoNeedTransfer , err
}
}
2019-07-08 01:02:53 +00:00
}
return false , nil
}
2018-01-12 16:30:54 +00:00
// NeedTransfer checks to see if src needs to be copied to dst using
// the current config.
//
// Returns a flag which indicates whether the file needs to be
// transferred or not.
2019-06-17 08:34:30 +00:00
func NeedTransfer ( ctx context . Context , dst , src fs . Object ) bool {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2018-01-12 16:30:54 +00:00
if dst == nil {
2019-10-10 12:44:05 +00:00
fs . Debugf ( src , "Need to transfer - File not found at Destination" )
2018-01-12 16:30:54 +00:00
return true
}
// If we should ignore existing files, don't transfer
2020-11-05 11:33:32 +00:00
if ci . IgnoreExisting {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Destination exists, skipping" )
return false
}
// If we should upload unconditionally
2020-11-05 11:33:32 +00:00
if ci . IgnoreTimes {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Transferring unconditionally as --ignore-times is in use" )
return true
}
// If UpdateOlder is in effect, skip if dst is newer than src
2020-11-05 11:33:32 +00:00
if ci . UpdateOlder {
2019-06-17 08:34:30 +00:00
srcModTime := src . ModTime ( ctx )
dstModTime := dst . ModTime ( ctx )
2018-01-12 16:30:54 +00:00
dt := dstModTime . Sub ( srcModTime )
// If have a mutually agreed precision then use that
2020-11-05 16:27:01 +00:00
modifyWindow := fs . GetModifyWindow ( ctx , dst . Fs ( ) , src . Fs ( ) )
2018-01-12 16:30:54 +00:00
if modifyWindow == fs . ModTimeNotSupported {
// Otherwise use 1 second as a safe default as
// the resolution of the time a file was
// uploaded.
modifyWindow = time . Second
}
switch {
case dt >= modifyWindow :
fs . Debugf ( src , "Destination is newer than source, skipping" )
return false
case dt <= - modifyWindow :
2019-06-08 13:08:23 +00:00
// force --checksum on for the check and do update modtimes by default
2020-11-05 11:33:32 +00:00
opt := defaultEqualOpt ( ctx )
2019-06-08 13:08:23 +00:00
opt . forceModTimeMatch = true
if equal ( ctx , src , dst , opt ) {
fs . Debugf ( src , "Unchanged skipping" )
return false
}
2018-01-12 16:30:54 +00:00
default :
2019-06-08 13:08:23 +00:00
// Do a size only compare unless --checksum is set
2020-11-05 11:33:32 +00:00
opt := defaultEqualOpt ( ctx )
opt . sizeOnly = ! ci . CheckSum
2019-06-08 13:08:23 +00:00
if equal ( ctx , src , dst , opt ) {
fs . Debugf ( src , "Destination mod time is within %v of source and files identical, skipping" , modifyWindow )
2018-01-12 16:30:54 +00:00
return false
}
2019-06-08 13:08:23 +00:00
fs . Debugf ( src , "Destination mod time is within %v of source but files differ, transferring" , modifyWindow )
2018-01-12 16:30:54 +00:00
}
} else {
// Check to see if changed or not
2019-06-17 08:34:30 +00:00
if Equal ( ctx , src , dst ) {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Unchanged skipping" )
return false
}
}
return true
}
2018-08-30 15:45:41 +00:00
// RcatSize reads data from the Reader until EOF and uploads it to a file on remote.
// Pass in size >=0 if known, <0 if not known
2019-06-17 08:34:30 +00:00
func RcatSize ( ctx context . Context , fdst fs . Fs , dstFileName string , in io . ReadCloser , size int64 , modTime time . Time ) ( dst fs . Object , err error ) {
2018-08-30 15:45:41 +00:00
var obj fs . Object
if size >= 0 {
2019-07-16 11:56:20 +00:00
var err error
2018-08-30 15:45:41 +00:00
// Size known use Put
2019-07-18 10:13:54 +00:00
tr := accounting . Stats ( ctx ) . NewTransferRemoteSize ( dstFileName , size )
2019-07-16 11:56:20 +00:00
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2019-07-16 11:56:20 +00:00
} ( )
body := ioutil . NopCloser ( in ) // we let the server close the body
2020-06-04 14:09:03 +00:00
in := tr . Account ( ctx , body ) // account the transfer (no buffering)
2019-01-04 19:31:09 +00:00
2020-06-05 15:13:10 +00:00
if SkipDestructive ( ctx , dstFileName , "upload from pipe" ) {
2019-01-04 19:31:09 +00:00
// prevents "broken pipe" errors
_ , err = io . Copy ( ioutil . Discard , in )
return nil , err
}
2018-08-30 15:45:41 +00:00
info := object . NewStaticObjectInfo ( dstFileName , modTime , size , true , nil , fdst )
2019-06-17 08:34:30 +00:00
obj , err = fdst . Put ( ctx , in , info )
2018-08-30 15:45:41 +00:00
if err != nil {
fs . Errorf ( dstFileName , "Post request put error: %v" , err )
return nil , err
}
} else {
// Size unknown use Rcat
2019-06-17 08:34:30 +00:00
obj , err = Rcat ( ctx , fdst , dstFileName , in , modTime )
2018-08-30 15:45:41 +00:00
if err != nil {
fs . Errorf ( dstFileName , "Post request rcat error: %v" , err )
return nil , err
}
}
return obj , nil
}
2019-12-18 17:02:13 +00:00
// copyURLFunc is called from CopyURLFn
type copyURLFunc func ( ctx context . Context , dstFileName string , in io . ReadCloser , size int64 , modTime time . Time ) ( err error )
// copyURLFn copies the data from the url to the function supplied
func copyURLFn ( ctx context . Context , dstFileName string , url string , dstFileNameFromURL bool , fn copyURLFunc ) ( err error ) {
2020-11-13 15:24:43 +00:00
client := fshttp . NewClient ( ctx )
2019-03-08 20:33:22 +00:00
resp , err := client . Get ( url )
2018-11-02 17:29:57 +00:00
if err != nil {
2019-12-18 17:02:13 +00:00
return err
2018-11-02 17:29:57 +00:00
}
defer fs . CheckClose ( resp . Body , & err )
2019-08-05 18:20:50 +00:00
if resp . StatusCode < 200 || resp . StatusCode >= 300 {
2019-12-18 17:02:13 +00:00
return errors . Errorf ( "CopyURL failed: %s" , resp . Status )
}
modTime , err := http . ParseTime ( resp . Header . Get ( "Last-Modified" ) )
if err != nil {
modTime = time . Now ( )
2019-08-05 18:20:50 +00:00
}
2019-09-03 16:25:19 +00:00
if dstFileNameFromURL {
dstFileName = path . Base ( resp . Request . URL . Path )
if dstFileName == "." || dstFileName == "/" {
2019-12-18 17:02:13 +00:00
return errors . Errorf ( "CopyURL failed: file name wasn't found in url" )
2019-09-03 16:25:19 +00:00
}
2021-03-18 09:04:59 +00:00
fs . Debugf ( dstFileName , "File name found in url" )
2019-09-03 16:25:19 +00:00
}
2019-12-18 17:02:13 +00:00
return fn ( ctx , dstFileName , resp . Body , resp . ContentLength , modTime )
}
2019-09-03 16:25:19 +00:00
2019-12-18 17:02:13 +00:00
// CopyURL copies the data from the url to (fdst, dstFileName)
2020-04-19 11:40:17 +00:00
func CopyURL ( ctx context . Context , fdst fs . Fs , dstFileName string , url string , dstFileNameFromURL bool , noClobber bool ) ( dst fs . Object , err error ) {
2019-12-18 17:02:13 +00:00
err = copyURLFn ( ctx , dstFileName , url , dstFileNameFromURL , func ( ctx context . Context , dstFileName string , in io . ReadCloser , size int64 , modTime time . Time ) ( err error ) {
2020-04-19 11:40:17 +00:00
if noClobber {
_ , err = fdst . NewObject ( ctx , dstFileName )
if err == nil {
return errors . New ( "CopyURL failed: file already exist" )
}
}
2019-12-18 17:02:13 +00:00
dst , err = RcatSize ( ctx , fdst , dstFileName , in , size , modTime )
return err
} )
return dst , err
}
// CopyURLToWriter copies the data from the url to the io.Writer supplied
func CopyURLToWriter ( ctx context . Context , url string , out io . Writer ) ( err error ) {
return copyURLFn ( ctx , "" , url , false , func ( ctx context . Context , dstFileName string , in io . ReadCloser , size int64 , modTime time . Time ) ( err error ) {
_ , err = io . Copy ( out , in )
return err
} )
2018-11-02 17:29:57 +00:00
}
2019-06-23 03:50:09 +00:00
// BackupDir returns the correctly configured --backup-dir
2020-11-05 15:18:51 +00:00
func BackupDir ( ctx context . Context , fdst fs . Fs , fsrc fs . Fs , srcFileName string ) ( backupDir fs . Fs , err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
if ci . BackupDir != "" {
backupDir , err = cache . Get ( ctx , ci . BackupDir )
2019-06-23 03:52:09 +00:00
if err != nil {
2020-11-05 11:33:32 +00:00
return nil , fserrors . FatalError ( errors . Errorf ( "Failed to make fs for --backup-dir %q: %v" , ci . BackupDir , err ) )
2019-06-23 03:52:09 +00:00
}
if ! SameConfig ( fdst , backupDir ) {
return nil , fserrors . FatalError ( errors . New ( "parameter to --backup-dir has to be on the same remote as destination" ) )
}
if srcFileName == "" {
if Overlapping ( fdst , backupDir ) {
return nil , fserrors . FatalError ( errors . New ( "destination and parameter to --backup-dir mustn't overlap" ) )
}
if Overlapping ( fsrc , backupDir ) {
return nil , fserrors . FatalError ( errors . New ( "source and parameter to --backup-dir mustn't overlap" ) )
}
} else {
2020-11-05 11:33:32 +00:00
if ci . Suffix == "" {
2019-06-23 03:52:09 +00:00
if SameDir ( fdst , backupDir ) {
return nil , fserrors . FatalError ( errors . New ( "destination and parameter to --backup-dir mustn't be the same" ) )
}
if SameDir ( fsrc , backupDir ) {
return nil , fserrors . FatalError ( errors . New ( "source and parameter to --backup-dir mustn't be the same" ) )
}
}
}
2020-11-05 11:33:32 +00:00
} else if ci . Suffix != "" {
2019-06-23 03:52:09 +00:00
// --backup-dir is not set but --suffix is - use the destination as the backupDir
backupDir = fdst
2020-10-04 15:38:29 +00:00
} else {
return nil , fserrors . FatalError ( errors . New ( "internal error: BackupDir called when --backup-dir and --suffix both empty" ) )
2019-06-23 03:50:09 +00:00
}
if ! CanServerSideMove ( backupDir ) {
2020-10-13 21:43:40 +00:00
return nil , fserrors . FatalError ( errors . New ( "can't use --backup-dir on a remote which doesn't support server-side move or copy" ) )
2019-06-23 03:50:09 +00:00
}
return backupDir , nil
}
// MoveBackupDir moves a file to the backup dir
func MoveBackupDir ( ctx context . Context , backupDir fs . Fs , dst fs . Object ) ( err error ) {
2020-11-05 11:33:32 +00:00
remoteWithSuffix := SuffixName ( ctx , dst . Remote ( ) )
2019-06-23 03:50:09 +00:00
overwritten , _ := backupDir . NewObject ( ctx , remoteWithSuffix )
_ , err = Move ( ctx , backupDir , overwritten , remoteWithSuffix , dst )
return err
}
2016-10-23 16:34:17 +00:00
// moveOrCopyFile moves or copies a single file possibly to a new name
2019-06-17 08:34:30 +00:00
func moveOrCopyFile ( ctx context . Context , fdst fs . Fs , fsrc fs . Fs , dstFileName string , srcFileName string , cp bool ) ( err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2017-10-12 19:45:36 +00:00
dstFilePath := path . Join ( fdst . Root ( ) , dstFileName )
srcFilePath := path . Join ( fsrc . Root ( ) , srcFileName )
if fdst . Name ( ) == fsrc . Name ( ) && dstFilePath == srcFilePath {
2018-01-12 16:30:54 +00:00
fs . Debugf ( fdst , "don't need to copy/move %s, it is already at target location" , dstFileName )
2017-05-27 15:30:26 +00:00
return nil
}
2016-10-23 16:34:17 +00:00
// Choose operations
Op := Move
if cp {
Op = Copy
}
// Find src object
2019-06-17 08:34:30 +00:00
srcObj , err := fsrc . NewObject ( ctx , srcFileName )
2016-10-23 16:34:17 +00:00
if err != nil {
return err
}
// Find dst object if it exists
2019-10-17 16:41:11 +00:00
var dstObj fs . Object
2020-11-05 11:33:32 +00:00
if ! ci . NoCheckDest {
2019-10-17 16:41:11 +00:00
dstObj , err = fdst . NewObject ( ctx , dstFileName )
if err == fs . ErrorObjectNotFound {
dstObj = nil
} else if err != nil {
return err
}
2016-10-23 16:34:17 +00:00
}
2019-06-10 10:01:13 +00:00
// Special case for changing case of a file on a case insensitive remote
// This will move the file to a temporary name then
// move it back to the intended destination. This is required
// to avoid issues with certain remotes and avoid file deletion.
2021-07-07 15:34:16 +00:00
if ! cp && fdst . Name ( ) == fsrc . Name ( ) && fdst . Features ( ) . CaseInsensitive && dstFileName != srcFileName && strings . EqualFold ( dstFilePath , srcFilePath ) {
2019-06-10 10:01:13 +00:00
// Create random name to temporarily move file to
2019-08-06 11:44:08 +00:00
tmpObjName := dstFileName + "-rclone-move-" + random . String ( 8 )
2019-06-17 08:34:30 +00:00
_ , err := fdst . NewObject ( ctx , tmpObjName )
2019-06-10 10:01:13 +00:00
if err != fs . ErrorObjectNotFound {
if err == nil {
return errors . New ( "found an already existing file with a randomly generated name. Try the operation again" )
}
return errors . Wrap ( err , "error while attempting to move file to a temporary location" )
}
2019-07-18 10:13:54 +00:00
tr := accounting . Stats ( ctx ) . NewTransfer ( srcObj )
2019-07-16 11:56:20 +00:00
defer func ( ) {
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2019-07-16 11:56:20 +00:00
} ( )
2019-06-17 08:34:30 +00:00
tmpObj , err := Op ( ctx , fdst , nil , tmpObjName , srcObj )
2019-06-10 10:01:13 +00:00
if err != nil {
return errors . Wrap ( err , "error while moving file to temporary location" )
}
2019-06-17 08:34:30 +00:00
_ , err = Op ( ctx , fdst , nil , dstFileName , tmpObj )
2019-06-10 10:01:13 +00:00
return err
}
2020-11-16 03:04:29 +00:00
var backupDir fs . Fs
var copyDestDir [ ] fs . Fs
2020-11-05 11:33:32 +00:00
if ci . BackupDir != "" || ci . Suffix != "" {
2020-11-05 15:18:51 +00:00
backupDir , err = BackupDir ( ctx , fdst , fsrc , srcFileName )
2019-07-08 01:02:53 +00:00
if err != nil {
return errors . Wrap ( err , "creating Fs for --backup-dir failed" )
}
}
2020-11-16 03:04:29 +00:00
if len ( ci . CompareDest ) > 0 {
2020-11-05 15:18:51 +00:00
copyDestDir , err = GetCompareDest ( ctx )
2019-07-08 01:02:53 +00:00
if err != nil {
return err
}
2020-11-16 03:04:29 +00:00
} else if len ( ci . CopyDest ) > 0 {
2020-11-05 15:18:51 +00:00
copyDestDir , err = GetCopyDest ( ctx , fdst )
2019-07-08 01:02:53 +00:00
if err != nil {
return err
}
}
NoNeedTransfer , err := CompareOrCopyDest ( ctx , fdst , dstObj , srcObj , copyDestDir , backupDir )
if err != nil {
return err
}
if ! NoNeedTransfer && NeedTransfer ( ctx , dstObj , srcObj ) {
2019-05-23 12:17:16 +00:00
// If destination already exists, then we must move it into --backup-dir if required
2019-07-08 01:02:53 +00:00
if dstObj != nil && backupDir != nil {
2019-06-23 03:50:09 +00:00
err = MoveBackupDir ( ctx , backupDir , dstObj )
2019-05-23 12:17:16 +00:00
if err != nil {
return errors . Wrap ( err , "moving to --backup-dir failed" )
}
// If successful zero out the dstObj as it is no longer there
dstObj = nil
}
2019-06-17 08:34:30 +00:00
_ , err = Op ( ctx , fdst , dstObj , dstFileName , srcObj )
2017-06-07 12:02:21 +00:00
} else {
2019-07-22 19:11:46 +00:00
tr := accounting . Stats ( ctx ) . NewCheckingTransfer ( srcObj )
2017-06-07 12:02:21 +00:00
if ! cp {
2021-07-29 16:42:55 +00:00
if ci . IgnoreExisting {
fs . Debugf ( srcObj , "Not removing source file as destination file exists and --ignore-existing is set" )
} else {
err = DeleteFile ( ctx , srcObj )
}
2017-06-07 12:02:21 +00:00
}
2020-11-05 16:59:59 +00:00
tr . Done ( ctx , err )
2016-10-23 16:34:17 +00:00
}
2017-06-07 12:02:21 +00:00
return err
2016-10-23 16:34:17 +00:00
}
// MoveFile moves a single file possibly to a new name
2019-06-17 08:34:30 +00:00
func MoveFile ( ctx context . Context , fdst fs . Fs , fsrc fs . Fs , dstFileName string , srcFileName string ) ( err error ) {
return moveOrCopyFile ( ctx , fdst , fsrc , dstFileName , srcFileName , false )
2016-10-23 16:34:17 +00:00
}
// CopyFile moves a single file possibly to a new name
2019-06-17 08:34:30 +00:00
func CopyFile ( ctx context . Context , fdst fs . Fs , fsrc fs . Fs , dstFileName string , srcFileName string ) ( err error ) {
return moveOrCopyFile ( ctx , fdst , fsrc , dstFileName , srcFileName , true )
2016-10-23 16:34:17 +00:00
}
2018-01-06 14:39:31 +00:00
2018-09-11 01:59:48 +00:00
// SetTier changes tier of object in remote
2019-06-17 08:34:30 +00:00
func SetTier ( ctx context . Context , fsrc fs . Fs , tier string ) error {
return ListFn ( ctx , fsrc , func ( o fs . Object ) {
2018-09-11 01:59:48 +00:00
objImpl , ok := o . ( fs . SetTierer )
if ! ok {
fs . Errorf ( fsrc , "Remote object does not implement SetTier" )
return
}
err := objImpl . SetTier ( tier )
if err != nil {
fs . Errorf ( fsrc , "Failed to do SetTier, %v" , err )
}
} )
}
2018-01-06 14:39:31 +00:00
// ListFormat defines files information print format
type ListFormat struct {
separator string
dirSlash bool
2018-06-03 09:42:34 +00:00
absolute bool
2019-02-14 08:45:03 +00:00
output [ ] func ( entry * ListJSONItem ) string
2018-05-13 11:15:05 +00:00
csv * csv . Writer
buf bytes . Buffer
2018-01-06 14:39:31 +00:00
}
// SetSeparator changes separator in struct
func ( l * ListFormat ) SetSeparator ( separator string ) {
l . separator = separator
}
// SetDirSlash defines if slash should be printed
func ( l * ListFormat ) SetDirSlash ( dirSlash bool ) {
l . dirSlash = dirSlash
}
2018-06-03 09:42:34 +00:00
// SetAbsolute prints a leading slash in front of path names
func ( l * ListFormat ) SetAbsolute ( absolute bool ) {
l . absolute = absolute
}
2018-05-13 11:15:05 +00:00
// SetCSV defines if the output should be csv
//
// Note that you should call SetSeparator before this if you want a
// custom separator
func ( l * ListFormat ) SetCSV ( useCSV bool ) {
if useCSV {
l . csv = csv . NewWriter ( & l . buf )
if l . separator != "" {
l . csv . Comma = [ ] rune ( l . separator ) [ 0 ]
}
} else {
l . csv = nil
}
}
2018-01-06 14:39:31 +00:00
// SetOutput sets functions used to create files information
2019-02-14 08:45:03 +00:00
func ( l * ListFormat ) SetOutput ( output [ ] func ( entry * ListJSONItem ) string ) {
2018-01-06 14:39:31 +00:00
l . output = output
}
// AddModTime adds file's Mod Time to output
func ( l * ListFormat ) AddModTime ( ) {
2019-02-14 08:45:03 +00:00
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return entry . ModTime . When . Local ( ) . Format ( "2006-01-02 15:04:05" )
} )
2018-01-06 14:39:31 +00:00
}
// AddSize adds file's size to output
func ( l * ListFormat ) AddSize ( ) {
2019-02-14 08:45:03 +00:00
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return strconv . FormatInt ( entry . Size , 10 )
2018-01-06 17:53:37 +00:00
} )
2018-01-06 14:39:31 +00:00
}
2019-02-14 08:45:03 +00:00
// normalisePath makes sure the path has the correct slashes for the current mode
func ( l * ListFormat ) normalisePath ( entry * ListJSONItem , remote string ) string {
if l . absolute && ! strings . HasPrefix ( remote , "/" ) {
remote = "/" + remote
}
if entry . IsDir && l . dirSlash {
remote += "/"
}
return remote
}
2018-01-06 14:39:31 +00:00
// AddPath adds path to file to output
func ( l * ListFormat ) AddPath ( ) {
2019-02-14 08:45:03 +00:00
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return l . normalisePath ( entry , entry . Path )
} )
}
// AddEncrypted adds the encrypted path to file to output
func ( l * ListFormat ) AddEncrypted ( ) {
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return l . normalisePath ( entry , entry . Encrypted )
2018-01-06 14:39:31 +00:00
} )
}
2018-01-06 17:53:37 +00:00
// AddHash adds the hash of the type given to the output
2018-01-12 16:30:54 +00:00
func ( l * ListFormat ) AddHash ( ht hash . Type ) {
2019-02-14 08:45:03 +00:00
hashName := ht . String ( )
l . AppendOutput ( func ( entry * ListJSONItem ) string {
if entry . IsDir {
2018-01-06 17:53:37 +00:00
return ""
}
2019-02-14 08:45:03 +00:00
return entry . Hashes [ hashName ]
2018-01-06 17:53:37 +00:00
} )
}
2018-05-13 08:18:08 +00:00
// AddID adds file's ID to the output if known
func ( l * ListFormat ) AddID ( ) {
2019-02-14 08:45:03 +00:00
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return entry . ID
} )
}
// AddOrigID adds file's Original ID to the output if known
func ( l * ListFormat ) AddOrigID ( ) {
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return entry . OrigID
2018-05-13 08:18:08 +00:00
} )
}
2019-03-20 12:45:06 +00:00
// AddTier adds file's Tier to the output if known
func ( l * ListFormat ) AddTier ( ) {
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return entry . Tier
} )
}
2018-05-13 09:37:25 +00:00
// AddMimeType adds file's MimeType to the output if known
func ( l * ListFormat ) AddMimeType ( ) {
2019-02-14 08:45:03 +00:00
l . AppendOutput ( func ( entry * ListJSONItem ) string {
return entry . MimeType
2018-05-13 09:37:25 +00:00
} )
}
2018-01-06 14:39:31 +00:00
// AppendOutput adds string generated by specific function to printed output
2019-02-14 08:45:03 +00:00
func ( l * ListFormat ) AppendOutput ( functionToAppend func ( item * ListJSONItem ) string ) {
2018-01-06 14:39:31 +00:00
l . output = append ( l . output , functionToAppend )
}
2018-05-13 09:55:18 +00:00
// Format prints information about the DirEntry in the format defined
2019-02-14 08:45:03 +00:00
func ( l * ListFormat ) Format ( entry * ListJSONItem ) ( result string ) {
2018-05-13 11:15:05 +00:00
var out [ ] string
2018-05-13 09:55:18 +00:00
for _ , fun := range l . output {
2019-02-14 08:45:03 +00:00
out = append ( out , fun ( entry ) )
2018-05-13 11:15:05 +00:00
}
if l . csv != nil {
l . buf . Reset ( )
_ = l . csv . Write ( out ) // can't fail writing to bytes.Buffer
l . csv . Flush ( )
result = strings . TrimRight ( l . buf . String ( ) , "\n" )
} else {
result = strings . Join ( out , l . separator )
2018-01-06 14:39:31 +00:00
}
2018-05-13 11:15:05 +00:00
return result
2018-01-06 14:39:31 +00:00
}
2019-01-15 16:43:55 +00:00
// DirMove renames srcRemote to dstRemote
//
// It does this by loading the directory tree into memory (using ListR
// if available) and doing renames in parallel.
2019-06-17 08:34:30 +00:00
func DirMove ( ctx context . Context , f fs . Fs , srcRemote , dstRemote string ) ( err error ) {
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2020-11-05 07:15:42 +00:00
if SkipDestructive ( ctx , srcRemote , "dirMove" ) {
accounting . Stats ( ctx ) . Renames ( 1 )
return nil
}
2019-01-15 16:43:55 +00:00
// Use DirMove if possible
if doDirMove := f . Features ( ) . DirMove ; doDirMove != nil {
2020-03-30 17:12:32 +00:00
err = doDirMove ( ctx , f , srcRemote , dstRemote )
if err == nil {
accounting . Stats ( ctx ) . Renames ( 1 )
}
return err
2019-01-15 16:43:55 +00:00
}
// Load the directory tree into memory
2019-06-17 08:34:30 +00:00
tree , err := walk . NewDirTree ( ctx , f , srcRemote , true , - 1 )
2019-01-15 16:43:55 +00:00
if err != nil {
return errors . Wrap ( err , "RenameDir tree walk" )
}
// Get the directories in sorted order
dirs := tree . Dirs ( )
// Make the destination directories - must be done in order not in parallel
for _ , dir := range dirs {
dstPath := dstRemote + dir [ len ( srcRemote ) : ]
2019-06-17 08:34:30 +00:00
err := f . Mkdir ( ctx , dstPath )
2019-01-15 16:43:55 +00:00
if err != nil {
return errors . Wrap ( err , "RenameDir mkdir" )
}
}
// Rename the files in parallel
type rename struct {
o fs . Object
newPath string
}
2020-11-05 11:33:32 +00:00
renames := make ( chan rename , ci . Transfers )
2019-07-01 08:33:21 +00:00
g , gCtx := errgroup . WithContext ( context . Background ( ) )
2020-11-05 11:33:32 +00:00
for i := 0 ; i < ci . Transfers ; i ++ {
2019-01-15 16:43:55 +00:00
g . Go ( func ( ) error {
for job := range renames {
2019-07-01 08:33:21 +00:00
dstOverwritten , _ := f . NewObject ( gCtx , job . newPath )
_ , err := Move ( gCtx , f , dstOverwritten , job . newPath , job . o )
2019-01-15 16:43:55 +00:00
if err != nil {
return err
}
select {
2019-07-01 08:33:21 +00:00
case <- gCtx . Done ( ) :
return gCtx . Err ( )
2019-01-15 16:43:55 +00:00
default :
}
}
return nil
} )
}
for dir , entries := range tree {
dstPath := dstRemote + dir [ len ( srcRemote ) : ]
for _ , entry := range entries {
if o , ok := entry . ( fs . Object ) ; ok {
renames <- rename { o , path . Join ( dstPath , path . Base ( o . Remote ( ) ) ) }
}
}
}
close ( renames )
err = g . Wait ( )
if err != nil {
return errors . Wrap ( err , "RenameDir renames" )
}
// Remove the source directories in reverse order
for i := len ( dirs ) - 1 ; i >= 0 ; i -- {
2019-06-17 08:34:30 +00:00
err := f . Rmdir ( ctx , dirs [ i ] )
2019-01-15 16:43:55 +00:00
if err != nil {
return errors . Wrap ( err , "RenameDir rmdir" )
}
}
return nil
}
2019-06-08 08:19:07 +00:00
// FsInfo provides information about a remote
type FsInfo struct {
// Name of the remote (as passed into NewFs)
Name string
// Root of the remote (as passed into NewFs)
Root string
// String returns a description of the FS
String string
// Precision of the ModTimes in this Fs in Nanoseconds
Precision time . Duration
// Returns the supported hash types of the filesystem
Hashes [ ] string
// Features returns the optional features of this Fs
Features map [ string ] bool
}
// GetFsInfo gets the information (FsInfo) about a given Fs
func GetFsInfo ( f fs . Fs ) * FsInfo {
info := & FsInfo {
Name : f . Name ( ) ,
Root : f . Root ( ) ,
String : f . String ( ) ,
Precision : f . Precision ( ) ,
Hashes : make ( [ ] string , 0 , 4 ) ,
Features : f . Features ( ) . Enabled ( ) ,
}
for _ , hashType := range f . Hashes ( ) . Array ( ) {
info . Hashes = append ( info . Hashes , hashType . String ( ) )
}
return info
}
2020-03-20 18:43:29 +00:00
2020-06-05 15:13:10 +00:00
var (
interactiveMu sync . Mutex
skipped = map [ string ] bool { }
)
2020-03-20 18:43:29 +00:00
2020-06-05 15:13:10 +00:00
// skipDestructiveChoose asks the user which action to take
//
// Call with interactiveMu held
func skipDestructiveChoose ( ctx context . Context , subject interface { } , action string ) ( skip bool ) {
fmt . Printf ( "rclone: %s \"%v\"?\n" , action , subject )
switch i := config . CommandDefault ( [ ] string {
"yYes, this is OK" ,
"nNo, skip this" ,
fmt . Sprintf ( "sSkip all %s operations with no more questions" , action ) ,
fmt . Sprintf ( "!Do all %s operations with no more questions" , action ) ,
"qExit rclone now." ,
} , 0 ) ; i {
case 'y' :
skip = false
case 'n' :
skip = true
case 's' :
skip = true
skipped [ action ] = true
fs . Logf ( nil , "Skipping all %s operations from now on without asking" , action )
case '!' :
skip = false
skipped [ action ] = false
fs . Logf ( nil , "Doing all %s operations from now on without asking" , action )
case 'q' :
fs . Logf ( nil , "Quitting rclone now" )
atexit . Run ( )
os . Exit ( 0 )
default :
skip = true
fs . Errorf ( nil , "Bad choice %c" , i )
}
return skip
}
// SkipDestructive should be called whenever rclone is about to do an destructive operation.
//
// It will check the --dry-run flag and it will ask the user if the --interactive flag is set.
//
// subject should be the object or directory in use
//
// action should be a descriptive word or short phrase
//
// Together they should make sense in this sentence: "Rclone is about
// to action subject".
func SkipDestructive ( ctx context . Context , subject interface { } , action string ) ( skip bool ) {
var flag string
2020-11-05 11:33:32 +00:00
ci := fs . GetConfig ( ctx )
2020-03-20 18:43:29 +00:00
switch {
2020-11-05 11:33:32 +00:00
case ci . DryRun :
2020-03-20 18:43:29 +00:00
flag = "--dry-run"
skip = true
2020-11-05 11:33:32 +00:00
case ci . Interactive :
2020-03-20 18:43:29 +00:00
flag = "--interactive"
2020-06-05 15:13:10 +00:00
interactiveMu . Lock ( )
defer interactiveMu . Unlock ( )
var found bool
skip , found = skipped [ action ]
if ! found {
skip = skipDestructiveChoose ( ctx , subject , action )
}
2020-03-20 18:43:29 +00:00
default :
2020-06-05 15:13:10 +00:00
return false
2020-03-20 18:43:29 +00:00
}
if skip {
2020-12-30 13:07:47 +00:00
size := int64 ( - 1 )
if do , ok := subject . ( interface { Size ( ) int64 } ) ; ok {
size = do . Size ( )
}
if size >= 0 {
fs . Logf ( subject , "Skipped %s as %s is set (size %v)" , fs . LogValue ( "skipped" , action ) , flag , fs . LogValue ( "size" , fs . SizeSuffix ( size ) ) )
} else {
fs . Logf ( subject , "Skipped %s as %s is set" , fs . LogValue ( "skipped" , action ) , flag )
}
2020-03-20 18:43:29 +00:00
}
return skip
}