2018-01-12 16:30:54 +00:00
// Package operations does generic operations on filesystems and objects
package operations
2014-03-28 17:56:04 +00:00
import (
2017-02-13 10:48:26 +00:00
"bytes"
2014-03-28 17:56:04 +00:00
"fmt"
2014-08-01 16:58:39 +00:00
"io"
2017-08-03 19:42:35 +00:00
"io/ioutil"
2016-03-05 16:10:51 +00:00
"log"
2015-03-01 12:38:31 +00:00
"path"
2016-03-05 16:10:51 +00:00
"sort"
2018-01-06 14:39:31 +00:00
"strconv"
2016-01-23 20:16:47 +00:00
"strings"
2014-03-28 17:56:04 +00:00
"sync"
2015-10-02 18:48:48 +00:00
"sync/atomic"
2017-08-03 19:42:35 +00:00
"time"
2016-01-23 20:16:47 +00:00
2018-01-12 16:30:54 +00:00
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/march"
"github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/readers"
2016-06-12 14:06:02 +00:00
"github.com/pkg/errors"
2016-10-31 14:45:52 +00:00
"github.com/spf13/pflag"
2017-09-01 15:33:09 +00:00
"golang.org/x/net/context"
2014-03-28 17:56:04 +00:00
)
2016-01-11 12:39:33 +00:00
// CheckHashes checks the two files to see if they have common
// known hash types and compares them
2014-03-28 17:56:04 +00:00
//
2016-01-24 18:06:57 +00:00
// Returns
2015-08-20 19:48:58 +00:00
//
2016-01-24 18:06:57 +00:00
// equal - which is equality of the hashes
//
// hash - the HashType. This is HashNone if either of the hashes were
// unset or a compatible hash couldn't be found.
//
// err - may return an error which will already have been logged
2014-03-28 17:56:04 +00:00
//
2015-08-20 19:48:58 +00:00
// If an error is returned it will return equal as false
2018-01-12 16:30:54 +00:00
func CheckHashes ( src fs . ObjectInfo , dst fs . Object ) ( equal bool , ht hash . Type , err error ) {
2016-01-11 12:39:33 +00:00
common := src . Fs ( ) . Hashes ( ) . Overlap ( dst . Fs ( ) . Hashes ( ) )
2018-01-12 16:30:54 +00:00
// fs.Debugf(nil, "Shared hashes: %v", common)
2016-01-11 12:39:33 +00:00
if common . Count ( ) == 0 {
2018-01-18 20:27:52 +00:00
return true , hash . None , nil
2016-01-11 12:39:33 +00:00
}
2018-01-12 16:30:54 +00:00
ht = common . GetOne ( )
srcHash , err := src . Hash ( ht )
2014-03-28 17:56:04 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( src , "Failed to calculate src hash: %v" , err )
return false , ht , err
2015-08-20 19:48:58 +00:00
}
2016-01-11 12:39:33 +00:00
if srcHash == "" {
2018-01-18 20:27:52 +00:00
return true , hash . None , nil
2014-03-28 17:56:04 +00:00
}
2018-01-12 16:30:54 +00:00
dstHash , err := dst . Hash ( ht )
2014-03-28 17:56:04 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( dst , "Failed to calculate dst hash: %v" , err )
return false , ht , err
2015-08-20 19:48:58 +00:00
}
2016-01-11 12:39:33 +00:00
if dstHash == "" {
2018-01-18 20:27:52 +00:00
return true , hash . None , nil
2014-03-28 17:56:04 +00:00
}
2017-02-23 11:23:19 +00:00
if srcHash != dstHash {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "%v = %s (%v)" , ht , srcHash , src . Fs ( ) )
fs . Debugf ( dst , "%v = %s (%v)" , ht , dstHash , dst . Fs ( ) )
2017-02-23 11:23:19 +00:00
}
2018-01-12 16:30:54 +00:00
return srcHash == dstHash , ht , nil
2014-03-28 17:56:04 +00:00
}
2015-09-22 17:47:16 +00:00
// Equal checks to see if the src and dst objects are equal by looking at
2016-01-11 12:39:33 +00:00
// size, mtime and hash
2014-03-28 17:56:04 +00:00
//
// If the src and dst size are different then it is considered to be
2015-06-06 07:38:45 +00:00
// not equal. If --size-only is in effect then this is the only check
2016-06-17 16:20:08 +00:00
// that is done. If --ignore-size is in effect then this check is
// skipped and the files are considered the same size.
2014-03-28 17:56:04 +00:00
//
// If the size is the same and the mtime is the same then it is
2015-06-06 07:38:45 +00:00
// considered to be equal. This check is skipped if using --checksum.
2014-03-28 17:56:04 +00:00
//
2015-06-06 07:38:45 +00:00
// If the size is the same and mtime is different, unreadable or
2016-01-11 12:39:33 +00:00
// --checksum is set and the hash is the same then the file is
2015-06-06 07:38:45 +00:00
// considered to be equal. In this case the mtime on the dst is
// updated if --checksum is not set.
2014-03-28 17:56:04 +00:00
//
// Otherwise the file is considered to be not equal including if there
// were errors reading info.
2018-01-12 16:30:54 +00:00
func Equal ( src fs . ObjectInfo , dst fs . Object ) bool {
return equal ( src , dst , fs . Config . SizeOnly , fs . Config . CheckSum )
2016-12-18 10:03:56 +00:00
}
2018-01-31 16:15:30 +00:00
// sizeDiffers compare the size of src and dst taking into account the
// various ways of ignoring sizes
func sizeDiffers ( src , dst fs . ObjectInfo ) bool {
if fs . Config . IgnoreSize || src . Size ( ) < 0 || dst . Size ( ) < 0 {
return false
}
return src . Size ( ) != dst . Size ( )
}
2018-01-12 16:30:54 +00:00
func equal ( src fs . ObjectInfo , dst fs . Object , sizeOnly , checkSum bool ) bool {
2018-01-31 16:15:30 +00:00
if sizeDiffers ( src , dst ) {
fs . Debugf ( src , "Sizes differ (src %d vs dst %d)" , src . Size ( ) , dst . Size ( ) )
return false
2014-03-28 17:56:04 +00:00
}
2016-12-18 10:03:56 +00:00
if sizeOnly {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Sizes identical" )
2015-06-06 07:38:45 +00:00
return true
}
2014-03-28 17:56:04 +00:00
2016-11-28 17:08:15 +00:00
// Assert: Size is equal or being ignored
// If checking checksum and not modtime
2016-12-18 10:03:56 +00:00
if checkSum {
2016-11-28 17:08:15 +00:00
// Check the hash
2018-01-12 16:30:54 +00:00
same , ht , _ := CheckHashes ( src , dst )
2016-11-28 17:08:15 +00:00
if ! same {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "%v differ" , ht )
2016-11-28 17:08:15 +00:00
return false
2015-08-20 19:48:58 +00:00
}
2018-01-18 20:27:52 +00:00
if ht == hash . None {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Size of src and dst objects identical" )
2015-06-03 14:08:27 +00:00
} else {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Size and %v of src and dst objects identical" , ht )
2015-06-03 14:08:27 +00:00
}
2016-11-28 17:08:15 +00:00
return true
}
// Sizes the same so check the mtime
2018-01-12 16:30:54 +00:00
if fs . Config . ModifyWindow == fs . ModTimeNotSupported {
fs . Debugf ( src , "Sizes identical" )
2016-11-28 17:08:15 +00:00
return true
}
srcModTime := src . ModTime ( )
dstModTime := dst . ModTime ( )
dt := dstModTime . Sub ( srcModTime )
2018-01-12 16:30:54 +00:00
ModifyWindow := fs . Config . ModifyWindow
2016-11-28 17:08:15 +00:00
if dt < ModifyWindow && dt > - ModifyWindow {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Size and modification time the same (differ by %s, within tolerance %s)" , dt , ModifyWindow )
2016-11-28 17:08:15 +00:00
return true
2014-03-28 17:56:04 +00:00
}
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "Modification times differ by %s: %v, %v" , dt , srcModTime , dstModTime )
2016-11-28 17:08:15 +00:00
// Check if the hashes are the same
2018-01-12 16:30:54 +00:00
same , ht , _ := CheckHashes ( src , dst )
2014-03-28 17:56:04 +00:00
if ! same {
2018-01-12 16:30:54 +00:00
fs . Debugf ( src , "%v differ" , ht )
2016-11-28 17:08:15 +00:00
return false
}
2018-01-18 20:27:52 +00:00
if ht == hash . None {
2016-11-28 17:08:15 +00:00
// if couldn't check hash, return that they differ
2014-03-28 17:56:04 +00:00
return false
}
2016-11-28 17:08:15 +00:00
// mod time differs but hash is the same to reset mod time if required
2018-01-12 16:30:54 +00:00
if ! fs . Config . NoUpdateModTime {
if fs . Config . DryRun {
fs . Logf ( src , "Not updating modification time as --dry-run" )
2016-11-28 17:08:15 +00:00
} else {
2017-09-02 08:29:01 +00:00
// Size and hash the same but mtime different
// Error if objects are treated as immutable
2018-01-12 16:30:54 +00:00
if fs . Config . Immutable {
fs . Errorf ( dst , "Timestamp mismatch between immutable objects" )
2017-09-02 08:29:01 +00:00
return false
}
// Update the mtime of the dst object here
2017-02-15 23:09:44 +00:00
err := dst . SetModTime ( srcModTime )
2018-01-12 16:30:54 +00:00
if err == fs . ErrorCantSetModTime {
fs . Debugf ( dst , "src and dst identical but can't set mod time without re-uploading" )
2017-06-13 12:58:39 +00:00
return false
2018-01-12 16:30:54 +00:00
} else if err == fs . ErrorCantSetModTimeWithoutDelete {
fs . Debugf ( dst , "src and dst identical but can't set mod time without deleting and re-uploading" )
2017-06-13 12:58:39 +00:00
err = dst . Remove ( )
if err != nil {
2018-01-12 16:30:54 +00:00
fs . Errorf ( dst , "failed to delete before re-upload: %v" , err )
2017-06-13 12:58:39 +00:00
}
2017-02-15 23:09:44 +00:00
return false
} else if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( dst , "Failed to set modification time: %v" , err )
2017-02-15 23:09:44 +00:00
} else {
2018-01-12 16:30:54 +00:00
fs . Infof ( src , "Updated modification time in destination" )
2017-02-15 23:09:44 +00:00
}
2016-03-22 15:07:10 +00:00
}
2015-06-03 14:08:27 +00:00
}
2014-03-28 17:56:04 +00:00
return true
}
2014-07-15 18:27:05 +00:00
// Used to remove a failed copy
2015-03-14 17:54:41 +00:00
//
// Returns whether the file was succesfully removed or not
2018-01-12 16:30:54 +00:00
func removeFailedCopy ( dst fs . Object ) bool {
2015-03-14 17:54:41 +00:00
if dst == nil {
return false
}
2018-01-12 16:30:54 +00:00
fs . Infof ( dst , "Removing failed copy" )
2015-03-14 17:54:41 +00:00
removeErr := dst . Remove ( )
if removeErr != nil {
2018-01-12 16:30:54 +00:00
fs . Infof ( dst , "Failed to remove failed copy: %s" , removeErr )
2015-03-14 17:54:41 +00:00
return false
2014-07-15 18:27:05 +00:00
}
2015-03-14 17:54:41 +00:00
return true
2014-07-15 18:27:05 +00:00
}
2016-10-23 16:34:17 +00:00
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
2018-01-12 16:30:54 +00:00
fs . Object
2016-10-23 16:34:17 +00:00
remote string
}
// Remote returns the overriden remote name
func ( o * overrideRemoteObject ) Remote ( ) string {
return o . remote
}
2017-03-04 10:10:55 +00:00
// MimeType returns the mime type of the underlying object or "" if it
// can't be worked out
func ( o * overrideRemoteObject ) MimeType ( ) string {
2018-01-12 16:30:54 +00:00
if do , ok := o . Object . ( fs . MimeTyper ) ; ok {
2017-03-04 10:10:55 +00:00
return do . MimeType ( )
}
return ""
}
// Check interface is satisfied
2018-01-12 16:30:54 +00:00
var _ fs . MimeTyper = ( * overrideRemoteObject ) ( nil )
2017-03-04 10:10:55 +00:00
2016-10-22 16:53:10 +00:00
// Copy src object to dst or f if nil. If dst is nil then it uses
// remote as the name of the new object.
2017-12-01 15:31:20 +00:00
//
// It returns the destination object if possible. Note that this may
// be nil.
2018-01-12 16:30:54 +00:00
func Copy ( f fs . Fs , dst fs . Object , remote string , src fs . Object ) ( newDst fs . Object , err error ) {
2017-12-01 15:31:20 +00:00
newDst = dst
2018-01-12 16:30:54 +00:00
if fs . Config . DryRun {
fs . Logf ( src , "Not copying as --dry-run" )
2017-12-01 15:31:20 +00:00
return newDst , nil
2016-10-22 16:53:10 +00:00
}
2018-01-12 16:30:54 +00:00
maxTries := fs . Config . LowLevelRetries
2015-02-02 17:29:08 +00:00
tries := 0
doUpdate := dst != nil
2017-05-28 11:44:22 +00:00
// work out which hash to use - limit to 1 hash in common
2018-01-12 16:30:54 +00:00
var common hash . Set
2018-01-18 20:27:52 +00:00
hashType := hash . None
2018-01-12 16:30:54 +00:00
if ! fs . Config . SizeOnly {
2017-05-28 11:44:22 +00:00
common = src . Fs ( ) . Hashes ( ) . Overlap ( f . Hashes ( ) )
if common . Count ( ) > 0 {
hashType = common . GetOne ( )
2018-01-12 16:30:54 +00:00
common = hash . Set ( hashType )
2017-05-28 11:44:22 +00:00
}
}
2018-01-12 16:30:54 +00:00
hashOption := & fs . HashesOption { Hashes : common }
2016-06-18 09:55:58 +00:00
var actionTaken string
for {
// Try server side copy first - if has optional interface and
// is same underlying remote
actionTaken = "Copied (server side copy)"
2017-01-13 17:21:47 +00:00
if doCopy := f . Features ( ) . Copy ; doCopy != nil && SameConfig ( src . Fs ( ) , f ) {
newDst , err = doCopy ( src , remote )
2016-06-18 09:55:58 +00:00
if err == nil {
dst = newDst
}
} else {
2018-01-12 16:30:54 +00:00
err = fs . ErrorCantCopy
2015-10-06 14:35:22 +00:00
}
2016-06-18 09:55:58 +00:00
// If can't server side copy, do it manually
2018-01-12 16:30:54 +00:00
if err == fs . ErrorCantCopy {
2016-06-18 09:55:58 +00:00
var in0 io . ReadCloser
2017-05-28 11:44:22 +00:00
in0 , err = src . Open ( hashOption )
2016-06-18 09:55:58 +00:00
if err != nil {
err = errors . Wrap ( err , "failed to open source object" )
} else {
2018-01-12 16:30:54 +00:00
in := accounting . NewAccount ( in0 , src ) . WithBuffer ( ) // account and buffer the transfer
var wrappedSrc fs . ObjectInfo = src
2017-03-04 10:10:55 +00:00
// We try to pass the original object if possible
if src . Remote ( ) != remote {
wrappedSrc = & overrideRemoteObject { Object : src , remote : remote }
}
2016-06-18 09:55:58 +00:00
if doUpdate {
2016-07-09 09:11:57 +00:00
actionTaken = "Copied (replaced existing)"
2017-05-28 11:44:22 +00:00
err = dst . Update ( in , wrappedSrc , hashOption )
2016-06-18 09:55:58 +00:00
} else {
actionTaken = "Copied (new)"
2017-05-28 11:44:22 +00:00
dst , err = f . Put ( in , wrappedSrc , hashOption )
2016-06-18 09:55:58 +00:00
}
closeErr := in . Close ( )
if err == nil {
2017-12-01 15:31:20 +00:00
newDst = dst
2016-06-18 09:55:58 +00:00
err = closeErr
}
}
2015-02-14 18:48:08 +00:00
}
2015-02-02 17:29:08 +00:00
tries ++
2016-06-18 09:55:58 +00:00
if tries >= maxTries {
break
2015-03-14 17:54:41 +00:00
}
2016-06-18 09:55:58 +00:00
// Retry if err returned a retry error
2018-01-12 16:30:54 +00:00
if fserrors . IsRetryError ( err ) || fserrors . ShouldRetry ( err ) {
fs . Debugf ( src , "Received error: %v - low level retry %d/%d" , err , tries , maxTries )
2016-06-18 09:55:58 +00:00
continue
}
// otherwise finish
break
2014-03-28 17:56:04 +00:00
}
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( src , "Failed to copy: %v" , err )
2017-12-01 15:31:20 +00:00
return newDst , err
2014-03-28 17:56:04 +00:00
}
2014-07-15 18:27:05 +00:00
2014-07-19 11:38:58 +00:00
// Verify sizes are the same after transfer
2018-01-31 16:15:30 +00:00
if sizeDiffers ( src , dst ) {
2016-06-12 14:06:02 +00:00
err = errors . Errorf ( "corrupted on transfer: sizes differ %d vs %d" , src . Size ( ) , dst . Size ( ) )
2018-01-12 16:30:54 +00:00
fs . Errorf ( dst , "%v" , err )
fs . CountError ( err )
2014-07-19 11:38:58 +00:00
removeFailedCopy ( dst )
2017-12-01 15:31:20 +00:00
return newDst , err
2014-07-19 11:38:58 +00:00
}
2016-01-11 12:39:33 +00:00
// Verify hashes are the same after transfer - ignoring blank hashes
// TODO(klauspost): This could be extended, so we always create a hash type matching
// the destination, and calculate it while sending.
2018-01-18 20:27:52 +00:00
if hashType != hash . None {
2016-07-04 12:12:33 +00:00
var srcSum string
srcSum , err = src . Hash ( hashType )
2016-01-11 12:39:33 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( src , "Failed to read src hash: %v" , err )
2016-01-11 12:39:33 +00:00
} else if srcSum != "" {
2016-07-04 12:12:33 +00:00
var dstSum string
dstSum , err = dst . Hash ( hashType )
2016-01-11 12:39:33 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( dst , "Failed to read hash: %v" , err )
} else if ! fs . Config . IgnoreChecksum && ! hash . Equals ( srcSum , dstSum ) {
2016-06-12 14:06:02 +00:00
err = errors . Errorf ( "corrupted on transfer: %v hash differ %q vs %q" , hashType , srcSum , dstSum )
2018-01-12 16:30:54 +00:00
fs . Errorf ( dst , "%v" , err )
fs . CountError ( err )
2015-06-09 12:18:40 +00:00
removeFailedCopy ( dst )
2017-12-01 15:31:20 +00:00
return newDst , err
2015-06-09 12:18:40 +00:00
}
2014-07-15 18:27:05 +00:00
}
}
2018-01-12 16:30:54 +00:00
fs . Infof ( src , actionTaken )
2017-12-01 15:31:20 +00:00
return newDst , err
2015-08-24 20:42:23 +00:00
}
2016-10-22 16:53:10 +00:00
// Move src object to dst or fdst if nil. If dst is nil then it uses
// remote as the name of the new object.
2017-12-01 15:31:20 +00:00
//
// It returns the destination object if possible. Note that this may
// be nil.
2018-01-12 16:30:54 +00:00
func Move ( fdst fs . Fs , dst fs . Object , remote string , src fs . Object ) ( newDst fs . Object , err error ) {
2017-12-01 15:31:20 +00:00
newDst = dst
2018-01-12 16:30:54 +00:00
if fs . Config . DryRun {
fs . Logf ( src , "Not moving as --dry-run" )
2017-12-01 15:31:20 +00:00
return newDst , nil
2016-10-22 16:53:10 +00:00
}
// See if we have Move available
2017-01-13 17:21:47 +00:00
if doMove := fdst . Features ( ) . Move ; doMove != nil && SameConfig ( src . Fs ( ) , fdst ) {
2016-10-22 16:53:10 +00:00
// Delete destination if it exists
if dst != nil {
err = DeleteFile ( dst )
if err != nil {
2017-12-01 15:31:20 +00:00
return newDst , err
2016-10-22 16:53:10 +00:00
}
}
// Move dst <- src
2017-12-01 15:31:20 +00:00
newDst , err = doMove ( src , remote )
2016-10-22 16:53:10 +00:00
switch err {
case nil :
2018-01-12 16:30:54 +00:00
fs . Infof ( src , "Moved (server side)" )
2017-12-01 15:31:20 +00:00
return newDst , nil
2018-01-12 16:30:54 +00:00
case fs . ErrorCantMove :
fs . Debugf ( src , "Can't move, switching to copy" )
2016-10-22 16:53:10 +00:00
default :
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( src , "Couldn't move: %v" , err )
2017-12-01 15:31:20 +00:00
return newDst , err
2016-10-22 16:53:10 +00:00
}
}
// Move not found or didn't work so copy dst <- src
2017-12-01 15:31:20 +00:00
newDst , err = Copy ( fdst , dst , remote , src )
2016-10-22 16:53:10 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . Errorf ( src , "Not deleting source as copy failed: %v" , err )
2017-12-01 15:31:20 +00:00
return newDst , err
2016-10-22 16:53:10 +00:00
}
// Delete src if no error on copy
2017-12-01 15:31:20 +00:00
return newDst , DeleteFile ( src )
2016-10-22 16:53:10 +00:00
}
2017-01-10 20:03:55 +00:00
// CanServerSideMove returns true if fdst support server side moves or
// server side copies
//
// Some remotes simulate rename by server-side copy and delete, so include
// remotes that implements either Mover or Copier.
2018-01-12 16:30:54 +00:00
func CanServerSideMove ( fdst fs . Fs ) bool {
2017-01-13 17:21:47 +00:00
canMove := fdst . Features ( ) . Move != nil
canCopy := fdst . Features ( ) . Copy != nil
2017-01-10 20:03:55 +00:00
return canMove || canCopy
}
2018-01-12 16:30:54 +00:00
// DeleteFileWithBackupDir deletes a single file respecting --dry-run
2017-01-10 21:47:03 +00:00
// and accumulating stats and errors.
//
// If backupDir is set then it moves the file to there instead of
// deleting
2018-01-12 16:30:54 +00:00
func DeleteFileWithBackupDir ( dst fs . Object , backupDir fs . Fs ) ( err error ) {
accounting . Stats . Checking ( dst . Remote ( ) )
2017-01-10 21:47:03 +00:00
action , actioned , actioning := "delete" , "Deleted" , "deleting"
if backupDir != nil {
action , actioned , actioning = "move into backup dir" , "Moved into backup dir" , "moving into backup dir"
}
2018-01-12 16:30:54 +00:00
if fs . Config . DryRun {
fs . Logf ( dst , "Not %s as --dry-run" , actioning )
2017-01-10 21:47:03 +00:00
} else if backupDir != nil {
if ! SameConfig ( dst . Fs ( ) , backupDir ) {
err = errors . New ( "parameter to --backup-dir has to be on the same remote as destination" )
2016-03-05 16:10:51 +00:00
} else {
2018-01-12 16:30:54 +00:00
remoteWithSuffix := dst . Remote ( ) + fs . Config . Suffix
2017-01-19 17:26:29 +00:00
overwritten , _ := backupDir . NewObject ( remoteWithSuffix )
2017-12-01 15:31:20 +00:00
_ , err = Move ( backupDir , overwritten , remoteWithSuffix , dst )
2016-03-05 16:10:51 +00:00
}
2017-01-10 21:47:03 +00:00
} else {
err = dst . Remove ( )
}
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( dst , "Couldn't %s: %v" , action , err )
} else if ! fs . Config . DryRun {
fs . Infof ( dst , actioned )
2016-03-05 16:10:51 +00:00
}
2018-01-12 16:30:54 +00:00
accounting . Stats . DoneChecking ( dst . Remote ( ) )
2016-06-25 13:27:44 +00:00
return err
2016-03-05 16:10:51 +00:00
}
2017-01-10 21:47:03 +00:00
// DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors.
//
// If useBackupDir is set and --backup-dir is in effect then it moves
// the file to there instead of deleting
2018-01-12 16:30:54 +00:00
func DeleteFile ( dst fs . Object ) ( err error ) {
return DeleteFileWithBackupDir ( dst , nil )
2017-01-10 21:47:03 +00:00
}
2018-01-12 16:30:54 +00:00
// DeleteFilesWithBackupDir removes all the files passed in the
2017-01-10 21:47:03 +00:00
// channel
//
// If backupDir is set the files will be placed into that directory
// instead of being deleted.
2018-01-12 16:30:54 +00:00
func DeleteFilesWithBackupDir ( toBeDeleted fs . ObjectsChan , backupDir fs . Fs ) error {
2014-03-28 17:56:04 +00:00
var wg sync . WaitGroup
2018-01-12 16:30:54 +00:00
wg . Add ( fs . Config . Transfers )
2016-06-25 13:27:44 +00:00
var errorCount int32
2018-01-12 16:30:54 +00:00
for i := 0 ; i < fs . Config . Transfers ; i ++ {
2014-03-28 17:56:04 +00:00
go func ( ) {
defer wg . Done ( )
2015-09-22 17:47:16 +00:00
for dst := range toBeDeleted {
2018-01-12 16:30:54 +00:00
err := DeleteFileWithBackupDir ( dst , backupDir )
2016-06-25 13:27:44 +00:00
if err != nil {
atomic . AddInt32 ( & errorCount , 1 )
}
2014-03-28 17:56:04 +00:00
}
} ( )
}
2018-01-12 16:30:54 +00:00
fs . Infof ( nil , "Waiting for deletions to finish" )
2014-03-28 17:56:04 +00:00
wg . Wait ( )
2016-06-25 13:27:44 +00:00
if errorCount > 0 {
return errors . Errorf ( "failed to delete %d files" , errorCount )
}
return nil
2014-03-28 17:56:04 +00:00
}
2017-01-10 21:47:03 +00:00
// DeleteFiles removes all the files passed in the channel
2018-01-12 16:30:54 +00:00
func DeleteFiles ( toBeDeleted fs . ObjectsChan ) error {
return DeleteFilesWithBackupDir ( toBeDeleted , nil )
2017-01-10 21:47:03 +00:00
}
2016-06-25 13:28:26 +00:00
// Read a Objects into add() for the given Fs.
2016-04-23 20:46:52 +00:00
// dir is the start directory, "" for root
2016-01-12 13:33:03 +00:00
// If includeAll is specified all files will be added,
// otherwise only files passing the filter will be added.
2016-07-04 12:12:33 +00:00
//
// Each object is passed ito the function provided. If that returns
// an error then the listing will be aborted and that error returned.
2018-01-12 16:30:54 +00:00
func readFilesFn ( f fs . Fs , includeAll bool , dir string , add func ( fs . Object ) error ) ( err error ) {
return walk . Walk ( f , "" , includeAll , fs . Config . MaxDepth , func ( dirPath string , entries fs . DirEntries , err error ) error {
2016-04-21 19:06:21 +00:00
if err != nil {
2016-06-25 13:28:26 +00:00
return err
2016-04-21 19:06:21 +00:00
}
2017-02-24 22:51:01 +00:00
return entries . ForObjectError ( add )
} )
2017-01-24 11:00:05 +00:00
}
2017-01-11 14:59:53 +00:00
// SameConfig returns true if fdst and fsrc are using the same config
// file entry
2018-01-12 16:30:54 +00:00
func SameConfig ( fdst , fsrc fs . Info ) bool {
2017-01-11 14:59:53 +00:00
return fdst . Name ( ) == fsrc . Name ( )
}
2015-09-22 17:47:16 +00:00
// Same returns true if fdst and fsrc point to the same underlying Fs
2018-01-12 16:30:54 +00:00
func Same ( fdst , fsrc fs . Info ) bool {
2017-01-11 14:59:53 +00:00
return SameConfig ( fdst , fsrc ) && fdst . Root ( ) == fsrc . Root ( )
2015-09-01 19:50:28 +00:00
}
2016-07-11 10:36:46 +00:00
// Overlapping returns true if fdst and fsrc point to the same
2017-01-11 14:59:53 +00:00
// underlying Fs and they overlap.
2018-01-12 16:30:54 +00:00
func Overlapping ( fdst , fsrc fs . Info ) bool {
2017-01-11 14:59:53 +00:00
if ! SameConfig ( fdst , fsrc ) {
return false
}
// Return the Root with a trailing / if not empty
2018-01-12 16:30:54 +00:00
fixedRoot := func ( f fs . Info ) string {
2017-01-11 14:59:53 +00:00
s := strings . Trim ( f . Root ( ) , "/" )
if s != "" {
s += "/"
}
return s
}
fdstRoot := fixedRoot ( fdst )
fsrcRoot := fixedRoot ( fsrc )
return strings . HasPrefix ( fdstRoot , fsrcRoot ) || strings . HasPrefix ( fsrcRoot , fdstRoot )
2016-07-11 10:36:46 +00:00
}
2016-04-07 13:56:27 +00:00
// checkIdentical checks to see if dst and src are identical
//
// it returns true if differences were found
2016-10-12 09:59:55 +00:00
// it also returns whether it couldn't be hashed
2018-01-12 16:30:54 +00:00
func checkIdentical ( dst , src fs . Object ) ( differ bool , noHash bool ) {
same , ht , err := CheckHashes ( src , dst )
2017-02-13 10:48:26 +00:00
if err != nil {
// CheckHashes will log and count errors
2016-10-12 09:59:55 +00:00
return true , false
2016-04-07 13:56:27 +00:00
}
2018-01-18 20:27:52 +00:00
if ht == hash . None {
2017-02-13 10:48:26 +00:00
return false , true
}
if ! same {
2018-01-12 16:30:54 +00:00
err = errors . Errorf ( "%v differ" , ht )
fs . Errorf ( src , "%v" , err )
fs . CountError ( err )
2017-02-13 10:48:26 +00:00
return true , false
2016-04-07 13:56:27 +00:00
}
2016-10-12 09:59:55 +00:00
return false , false
2016-04-07 13:56:27 +00:00
}
2017-09-01 15:33:09 +00:00
// checkFn is the the type of the checking function used in CheckFn()
2018-01-12 16:30:54 +00:00
type checkFn func ( a , b fs . Object ) ( differ bool , noHash bool )
2015-11-24 16:54:12 +00:00
2017-09-01 15:33:09 +00:00
// checkMarch is used to march over two Fses in the same way as
// sync/copy
type checkMarch struct {
2018-01-12 16:30:54 +00:00
fdst , fsrc fs . Fs
2017-09-01 15:33:09 +00:00
check checkFn
differences int32
noHashes int32
srcFilesMissing int32
dstFilesMissing int32
}
2015-03-14 17:11:24 +00:00
2017-09-01 15:33:09 +00:00
// DstOnly have an object which is in the destination only
2018-01-12 16:30:54 +00:00
func ( c * checkMarch ) DstOnly ( dst fs . DirEntry ) ( recurse bool ) {
2017-09-01 15:33:09 +00:00
switch dst . ( type ) {
2018-01-12 16:30:54 +00:00
case fs . Object :
2017-11-15 05:32:00 +00:00
err := errors . Errorf ( "File not in %v" , c . fsrc )
2018-01-12 16:30:54 +00:00
fs . Errorf ( dst , "%v" , err )
fs . CountError ( err )
2017-09-01 15:33:09 +00:00
atomic . AddInt32 ( & c . differences , 1 )
atomic . AddInt32 ( & c . srcFilesMissing , 1 )
2018-01-12 16:30:54 +00:00
case fs . Directory :
2017-09-01 15:33:09 +00:00
// Do the same thing to the entire contents of the directory
return true
default :
panic ( "Bad object in DirEntries" )
2014-03-28 17:56:04 +00:00
}
2017-09-01 15:33:09 +00:00
return false
}
2014-03-28 17:56:04 +00:00
2017-09-01 15:33:09 +00:00
// SrcOnly have an object which is in the source only
2018-01-12 16:30:54 +00:00
func ( c * checkMarch ) SrcOnly ( src fs . DirEntry ) ( recurse bool ) {
2017-09-01 15:33:09 +00:00
switch src . ( type ) {
2018-01-12 16:30:54 +00:00
case fs . Object :
2017-11-15 05:32:00 +00:00
err := errors . Errorf ( "File not in %v" , c . fdst )
2018-01-12 16:30:54 +00:00
fs . Errorf ( src , "%v" , err )
fs . CountError ( err )
2017-09-01 15:33:09 +00:00
atomic . AddInt32 ( & c . differences , 1 )
atomic . AddInt32 ( & c . dstFilesMissing , 1 )
2018-01-12 16:30:54 +00:00
case fs . Directory :
2017-09-01 15:33:09 +00:00
// Do the same thing to the entire contents of the directory
return true
default :
panic ( "Bad object in DirEntries" )
2014-03-28 17:56:04 +00:00
}
2017-09-01 15:33:09 +00:00
return false
}
2014-03-28 17:56:04 +00:00
2017-09-01 15:33:09 +00:00
// check to see if two objects are identical using the check function
2018-01-12 16:30:54 +00:00
func ( c * checkMarch ) checkIdentical ( dst , src fs . Object ) ( differ bool , noHash bool ) {
accounting . Stats . Checking ( src . Remote ( ) )
defer accounting . Stats . DoneChecking ( src . Remote ( ) )
2018-01-31 16:15:30 +00:00
if sizeDiffers ( src , dst ) {
2017-11-15 05:32:00 +00:00
err := errors . Errorf ( "Sizes differ" )
2018-01-12 16:30:54 +00:00
fs . Errorf ( src , "%v" , err )
fs . CountError ( err )
2017-09-01 15:33:09 +00:00
return true , false
2014-03-28 17:56:04 +00:00
}
2018-01-12 16:30:54 +00:00
if fs . Config . SizeOnly {
2017-09-01 15:33:09 +00:00
return false , false
}
return c . check ( dst , src )
}
2014-03-28 17:56:04 +00:00
2017-09-01 15:33:09 +00:00
// Match is called when src and dst are present, so sync src to dst
2018-01-12 16:30:54 +00:00
func ( c * checkMarch ) Match ( dst , src fs . DirEntry ) ( recurse bool ) {
2017-09-01 15:33:09 +00:00
switch srcX := src . ( type ) {
2018-01-12 16:30:54 +00:00
case fs . Object :
dstX , ok := dst . ( fs . Object )
2017-09-01 15:33:09 +00:00
if ok {
differ , noHash := c . checkIdentical ( dstX , srcX )
if differ {
atomic . AddInt32 ( & c . differences , 1 )
} else {
2018-01-12 16:30:54 +00:00
fs . Debugf ( dstX , "OK" )
2017-09-01 15:33:09 +00:00
}
if noHash {
atomic . AddInt32 ( & c . noHashes , 1 )
}
} else {
2017-11-15 05:32:00 +00:00
err := errors . Errorf ( "is file on %v but directory on %v" , c . fsrc , c . fdst )
2018-01-12 16:30:54 +00:00
fs . Errorf ( src , "%v" , err )
fs . CountError ( err )
2017-09-01 15:33:09 +00:00
atomic . AddInt32 ( & c . differences , 1 )
atomic . AddInt32 ( & c . dstFilesMissing , 1 )
2017-02-13 10:48:26 +00:00
}
2018-01-12 16:30:54 +00:00
case fs . Directory :
2017-09-01 15:33:09 +00:00
// Do the same thing to the entire contents of the directory
2018-01-12 16:30:54 +00:00
_ , ok := dst . ( fs . Directory )
2017-09-01 15:33:09 +00:00
if ok {
return true
2017-02-13 10:48:26 +00:00
}
2017-11-15 05:32:00 +00:00
err := errors . Errorf ( "is file on %v but directory on %v" , c . fdst , c . fsrc )
2018-01-12 16:30:54 +00:00
fs . Errorf ( dst , "%v" , err )
fs . CountError ( err )
2017-09-01 15:33:09 +00:00
atomic . AddInt32 ( & c . differences , 1 )
atomic . AddInt32 ( & c . srcFilesMissing , 1 )
default :
panic ( "Bad object in DirEntries" )
2017-02-13 10:48:26 +00:00
}
2017-09-01 15:33:09 +00:00
return false
}
2017-02-13 10:48:26 +00:00
2017-09-01 15:33:09 +00:00
// CheckFn checks the files in fsrc and fdst according to Size and
// hash using checkFunction on each file to check the hashes.
//
// checkFunction sees if dst and src are identical
//
// it returns true if differences were found
// it also returns whether it couldn't be hashed
2018-01-12 16:30:54 +00:00
func CheckFn ( fdst , fsrc fs . Fs , check checkFn ) error {
2017-09-01 15:33:09 +00:00
c := & checkMarch {
fdst : fdst ,
fsrc : fsrc ,
check : check ,
2014-03-28 17:56:04 +00:00
}
2017-09-01 15:33:09 +00:00
// set up a march over fdst and fsrc
2018-01-12 16:30:54 +00:00
m := march . New ( context . Background ( ) , fdst , fsrc , "" , c )
fs . Infof ( fdst , "Waiting for checks to finish" )
m . Run ( )
2017-09-01 15:33:09 +00:00
if c . dstFilesMissing > 0 {
2018-01-12 16:30:54 +00:00
fs . Logf ( fdst , "%d files missing" , c . dstFilesMissing )
2017-09-01 15:33:09 +00:00
}
if c . srcFilesMissing > 0 {
2018-01-12 16:30:54 +00:00
fs . Logf ( fsrc , "%d files missing" , c . srcFilesMissing )
2017-09-01 15:33:09 +00:00
}
2018-01-12 16:30:54 +00:00
fs . Logf ( fdst , "%d differences found" , accounting . Stats . GetErrors ( ) )
2017-09-01 15:33:09 +00:00
if c . noHashes > 0 {
2018-01-12 16:30:54 +00:00
fs . Logf ( fdst , "%d hashes could not be checked" , c . noHashes )
2016-10-12 09:59:55 +00:00
}
2017-09-01 15:33:09 +00:00
if c . differences > 0 {
return errors . Errorf ( "%d differences found" , c . differences )
2014-03-28 17:56:04 +00:00
}
return nil
}
2017-02-12 16:30:18 +00:00
// Check the files in fsrc and fdst according to Size and hash
2018-01-12 16:30:54 +00:00
func Check ( fdst , fsrc fs . Fs ) error {
2017-02-12 16:30:18 +00:00
return CheckFn ( fdst , fsrc , checkIdentical )
}
2017-02-13 10:48:26 +00:00
// CheckEqualReaders checks to see if in1 and in2 have the same
// content when read.
//
// it returns true if differences were found
func CheckEqualReaders ( in1 , in2 io . Reader ) ( differ bool , err error ) {
const bufSize = 64 * 1024
buf1 := make ( [ ] byte , bufSize )
buf2 := make ( [ ] byte , bufSize )
for {
2018-01-12 16:30:54 +00:00
n1 , err1 := readers . ReadFill ( in1 , buf1 )
n2 , err2 := readers . ReadFill ( in2 , buf2 )
2017-02-13 10:48:26 +00:00
// check errors
if err1 != nil && err1 != io . EOF {
return true , err1
} else if err2 != nil && err2 != io . EOF {
return true , err2
}
// err1 && err2 are nil or io.EOF here
// process the data
if n1 != n2 || ! bytes . Equal ( buf1 [ : n1 ] , buf2 [ : n2 ] ) {
return true , nil
}
// if both streams finished the we have finished
if err1 == io . EOF && err2 == io . EOF {
break
}
}
return false , nil
}
// CheckIdentical checks to see if dst and src are identical by
// reading all their bytes if necessary.
//
// it returns true if differences were found
2018-01-12 16:30:54 +00:00
func CheckIdentical ( dst , src fs . Object ) ( differ bool , err error ) {
2017-02-13 10:48:26 +00:00
in1 , err := dst . Open ( )
if err != nil {
return true , errors . Wrapf ( err , "failed to open %q" , dst )
}
2018-01-12 16:30:54 +00:00
in1 = accounting . NewAccount ( in1 , dst ) . WithBuffer ( ) // account and buffer the transfer
defer fs . CheckClose ( in1 , & err )
2017-02-13 10:48:26 +00:00
in2 , err := src . Open ( )
if err != nil {
return true , errors . Wrapf ( err , "failed to open %q" , src )
}
2018-01-12 16:30:54 +00:00
in2 = accounting . NewAccount ( in2 , src ) . WithBuffer ( ) // account and buffer the transfer
defer fs . CheckClose ( in2 , & err )
2017-02-13 10:48:26 +00:00
return CheckEqualReaders ( in1 , in2 )
}
// CheckDownload checks the files in fsrc and fdst according to Size
// and the actual contents of the files.
2018-01-12 16:30:54 +00:00
func CheckDownload ( fdst , fsrc fs . Fs ) error {
check := func ( a , b fs . Object ) ( differ bool , noHash bool ) {
2017-02-13 10:48:26 +00:00
differ , err := CheckIdentical ( a , b )
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( a , "Failed to download: %v" , err )
2017-02-13 10:48:26 +00:00
return true , true
}
return differ , false
}
return CheckFn ( fdst , fsrc , check )
}
2015-09-22 17:47:16 +00:00
// ListFn lists the Fs to the supplied function
2014-03-28 17:56:04 +00:00
//
// Lists in parallel which may get them out of order
2018-01-12 16:30:54 +00:00
func ListFn ( f fs . Fs , fn func ( fs . Object ) ) error {
return walk . Walk ( f , "" , false , fs . Config . MaxDepth , func ( dirPath string , entries fs . DirEntries , err error ) error {
2017-02-24 22:51:01 +00:00
if err != nil {
// FIXME count errors and carry on for listing
return err
}
entries . ForObject ( fn )
return nil
} )
2014-03-28 17:56:04 +00:00
}
2015-02-28 15:30:40 +00:00
// mutex for synchronized output
var outMutex sync . Mutex
// Synchronized fmt.Fprintf
2015-09-22 06:31:12 +00:00
//
// Ignores errors from Fprintf
func syncFprintf ( w io . Writer , format string , a ... interface { } ) {
2015-02-28 15:30:40 +00:00
outMutex . Lock ( )
defer outMutex . Unlock ( )
2015-09-22 06:31:12 +00:00
_ , _ = fmt . Fprintf ( w , format , a ... )
2015-02-28 15:30:40 +00:00
}
2015-09-15 14:46:06 +00:00
// List the Fs to the supplied writer
2014-07-12 11:09:20 +00:00
//
2015-11-24 16:54:12 +00:00
// Shows size and path - obeys includes and excludes
2014-07-12 11:09:20 +00:00
//
// Lists in parallel which may get them out of order
2018-01-12 16:30:54 +00:00
func List ( f fs . Fs , w io . Writer ) error {
return ListFn ( f , func ( o fs . Object ) {
2015-02-28 15:30:40 +00:00
syncFprintf ( w , "%9d %s\n" , o . Size ( ) , o . Remote ( ) )
2014-07-12 11:09:20 +00:00
} )
}
2015-09-22 17:47:16 +00:00
// ListLong lists the Fs to the supplied writer
2014-07-12 11:09:20 +00:00
//
2015-11-24 16:54:12 +00:00
// Shows size, mod time and path - obeys includes and excludes
2014-07-12 11:09:20 +00:00
//
// Lists in parallel which may get them out of order
2018-01-12 16:30:54 +00:00
func ListLong ( f fs . Fs , w io . Writer ) error {
return ListFn ( f , func ( o fs . Object ) {
accounting . Stats . Checking ( o . Remote ( ) )
2014-07-12 11:09:20 +00:00
modTime := o . ModTime ( )
2018-01-12 16:30:54 +00:00
accounting . Stats . DoneChecking ( o . Remote ( ) )
2015-09-22 18:04:12 +00:00
syncFprintf ( w , "%9d %s %s\n" , o . Size ( ) , modTime . Local ( ) . Format ( "2006-01-02 15:04:05.000000000" ) , o . Remote ( ) )
2014-07-12 11:09:20 +00:00
} )
}
2015-09-22 17:47:16 +00:00
// Md5sum list the Fs to the supplied writer
2014-07-12 11:09:20 +00:00
//
2015-11-24 16:54:12 +00:00
// Produces the same output as the md5sum command - obeys includes and
// excludes
2014-07-12 11:09:20 +00:00
//
// Lists in parallel which may get them out of order
2018-01-12 16:30:54 +00:00
func Md5sum ( f fs . Fs , w io . Writer ) error {
2018-01-18 20:27:52 +00:00
return hashLister ( hash . MD5 , f , w )
2016-01-11 12:39:33 +00:00
}
// Sha1sum list the Fs to the supplied writer
//
// Obeys includes and excludes
//
// Lists in parallel which may get them out of order
2018-01-12 16:30:54 +00:00
func Sha1sum ( f fs . Fs , w io . Writer ) error {
2018-01-18 20:27:52 +00:00
return hashLister ( hash . SHA1 , f , w )
2016-01-11 12:39:33 +00:00
}
2017-05-26 14:09:31 +00:00
// DropboxHashSum list the Fs to the supplied writer
//
// Obeys includes and excludes
//
// Lists in parallel which may get them out of order
2018-01-12 16:30:54 +00:00
func DropboxHashSum ( f fs . Fs , w io . Writer ) error {
2018-01-18 20:27:52 +00:00
return hashLister ( hash . Dropbox , f , w )
2017-05-26 14:09:31 +00:00
}
2018-01-06 17:53:37 +00:00
// hashSum returns the human readable hash for ht passed in. This may
// be UNSUPPORTED or ERROR.
2018-01-12 16:30:54 +00:00
func hashSum ( ht hash . Type , o fs . Object ) string {
accounting . Stats . Checking ( o . Remote ( ) )
2018-01-06 17:53:37 +00:00
sum , err := o . Hash ( ht )
2018-01-12 16:30:54 +00:00
accounting . Stats . DoneChecking ( o . Remote ( ) )
2018-01-18 20:27:52 +00:00
if err == hash . ErrUnsupported {
2018-01-06 17:53:37 +00:00
sum = "UNSUPPORTED"
} else if err != nil {
2018-01-12 16:30:54 +00:00
fs . Debugf ( o , "Failed to read %v: %v" , ht , err )
2018-01-06 17:53:37 +00:00
sum = "ERROR"
}
return sum
}
2018-01-12 16:30:54 +00:00
func hashLister ( ht hash . Type , f fs . Fs , w io . Writer ) error {
return ListFn ( f , func ( o fs . Object ) {
2018-01-06 17:53:37 +00:00
sum := hashSum ( ht , o )
2018-01-18 20:27:52 +00:00
syncFprintf ( w , "%*s %s\n" , hash . Width [ ht ] , sum , o . Remote ( ) )
2014-07-12 11:09:20 +00:00
} )
}
2015-10-02 18:48:48 +00:00
// Count counts the objects and their sizes in the Fs
2015-11-24 16:54:12 +00:00
//
// Obeys includes and excludes
2018-01-12 16:30:54 +00:00
func Count ( f fs . Fs ) ( objects int64 , size int64 , err error ) {
err = ListFn ( f , func ( o fs . Object ) {
2015-10-02 18:48:48 +00:00
atomic . AddInt64 ( & objects , 1 )
atomic . AddInt64 ( & size , o . Size ( ) )
} )
return
}
2017-02-24 22:51:01 +00:00
// ConfigMaxDepth returns the depth to use for a recursive or non recursive listing.
func ConfigMaxDepth ( recursive bool ) int {
2018-01-12 16:30:54 +00:00
depth := fs . Config . MaxDepth
2017-02-24 22:51:01 +00:00
if ! recursive && depth < 0 {
depth = 1
}
return depth
}
2015-09-22 17:47:16 +00:00
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
2018-01-12 16:30:54 +00:00
func ListDir ( f fs . Fs , w io . Writer ) error {
return walk . Walk ( f , "" , false , ConfigMaxDepth ( false ) , func ( dirPath string , entries fs . DirEntries , err error ) error {
2016-04-21 19:06:21 +00:00
if err != nil {
2017-02-24 22:51:01 +00:00
// FIXME count errors and carry on for listing
return err
2016-04-21 19:06:21 +00:00
}
2018-01-12 16:30:54 +00:00
entries . ForDir ( func ( dir fs . Directory ) {
2017-02-24 22:51:01 +00:00
if dir != nil {
2017-06-30 12:37:29 +00:00
syncFprintf ( w , "%12d %13s %9d %s\n" , dir . Size ( ) , dir . ModTime ( ) . Format ( "2006-01-02 15:04:05" ) , dir . Items ( ) , dir . Remote ( ) )
2017-02-24 22:51:01 +00:00
}
} )
return nil
} )
2014-03-28 17:56:04 +00:00
}
2015-09-22 17:47:16 +00:00
// Mkdir makes a destination directory or container
2018-01-12 16:30:54 +00:00
func Mkdir ( f fs . Fs , dir string ) error {
if fs . Config . DryRun {
fs . Logf ( fs . LogDirName ( f , dir ) , "Not making directory as dry run is set" )
2016-02-28 19:47:22 +00:00
return nil
}
2018-01-12 16:30:54 +00:00
fs . Debugf ( fs . LogDirName ( f , dir ) , "Making directory" )
2016-11-25 21:52:43 +00:00
err := f . Mkdir ( dir )
2014-03-28 17:56:04 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
2014-03-28 17:56:04 +00:00
return err
}
return nil
}
2016-02-25 20:05:34 +00:00
// TryRmdir removes a container but not if not empty. It doesn't
// count errors but may return one.
2018-01-12 16:30:54 +00:00
func TryRmdir ( f fs . Fs , dir string ) error {
if fs . Config . DryRun {
fs . Logf ( fs . LogDirName ( f , dir ) , "Not deleting as dry run is set" )
2016-02-25 20:05:34 +00:00
return nil
2014-03-28 17:56:04 +00:00
}
2018-01-12 16:30:54 +00:00
fs . Debugf ( fs . LogDirName ( f , dir ) , "Removing directory" )
2016-11-25 21:52:43 +00:00
return f . Rmdir ( dir )
2016-02-25 20:05:34 +00:00
}
// Rmdir removes a container but not if not empty
2018-01-12 16:30:54 +00:00
func Rmdir ( f fs . Fs , dir string ) error {
2016-11-25 21:52:43 +00:00
err := TryRmdir ( f , dir )
2016-02-25 20:05:34 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
2016-02-25 20:05:34 +00:00
return err
}
return err
2014-03-28 17:56:04 +00:00
}
2017-12-07 12:25:56 +00:00
// Purge removes a directory and all of its contents
func Purge ( f fs . Fs , dir string ) error {
2015-11-08 14:16:00 +00:00
doFallbackPurge := true
2014-07-25 17:19:49 +00:00
var err error
2017-12-07 12:25:56 +00:00
if dir == "" {
// FIXME change the Purge interface so it takes a dir - see #1891
if doPurge := f . Features ( ) . Purge ; doPurge != nil {
doFallbackPurge = false
if fs . Config . DryRun {
fs . Logf ( f , "Not purging as --dry-run set" )
} else {
err = doPurge ( )
if err == fs . ErrorCantPurge {
doFallbackPurge = true
}
2015-11-08 14:16:00 +00:00
}
2014-03-28 17:56:04 +00:00
}
2015-11-08 14:16:00 +00:00
}
if doFallbackPurge {
2014-07-25 17:19:49 +00:00
// DeleteFiles and Rmdir observe --dry-run
2017-12-07 12:25:56 +00:00
err = DeleteFiles ( listToChan ( f , dir ) )
2016-06-25 13:27:44 +00:00
if err != nil {
return err
}
2017-12-13 10:23:54 +00:00
err = Rmdirs ( f , "" , false )
2014-07-25 17:19:49 +00:00
}
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
2014-07-25 17:19:49 +00:00
return err
2014-03-28 17:56:04 +00:00
}
return nil
}
2015-12-02 22:25:32 +00:00
// Delete removes all the contents of a container. Unlike Purge, it
// obeys includes and excludes.
2018-01-12 16:30:54 +00:00
func Delete ( f fs . Fs ) error {
delete := make ( fs . ObjectsChan , fs . Config . Transfers )
2016-06-25 13:27:44 +00:00
delErr := make ( chan error , 1 )
2015-12-02 22:25:32 +00:00
go func ( ) {
2016-06-25 13:27:44 +00:00
delErr <- DeleteFiles ( delete )
2015-12-02 22:25:32 +00:00
} ( )
2018-01-12 16:30:54 +00:00
err := ListFn ( f , func ( o fs . Object ) {
2015-12-02 22:25:32 +00:00
delete <- o
} )
close ( delete )
2016-06-25 13:27:44 +00:00
delError := <- delErr
if err == nil {
err = delError
}
2015-12-02 22:25:32 +00:00
return err
}
2016-01-31 12:58:41 +00:00
2016-03-05 16:10:51 +00:00
// dedupeRename renames the objs slice to different names
2018-01-12 16:30:54 +00:00
func dedupeRename ( remote string , objs [ ] fs . Object ) {
2016-03-05 16:10:51 +00:00
f := objs [ 0 ] . Fs ( )
2017-01-13 17:21:47 +00:00
doMove := f . Features ( ) . Move
if doMove == nil {
2016-03-05 16:10:51 +00:00
log . Fatalf ( "Fs %v doesn't support Move" , f )
}
ext := path . Ext ( remote )
base := remote [ : len ( remote ) - len ( ext ) ]
for i , o := range objs {
newName := fmt . Sprintf ( "%s-%d%s" , base , i + 1 , ext )
2018-01-12 16:30:54 +00:00
if ! fs . Config . DryRun {
2017-01-13 17:21:47 +00:00
newObj , err := doMove ( o , newName )
2016-03-05 16:10:51 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( o , "Failed to rename: %v" , err )
2016-03-05 16:10:51 +00:00
continue
}
2018-01-12 16:30:54 +00:00
fs . Infof ( newObj , "renamed from: %v" , o )
2016-03-05 16:10:51 +00:00
} else {
2018-01-12 16:30:54 +00:00
fs . Logf ( remote , "Not renaming to %q as --dry-run" , newName )
2016-03-05 16:10:51 +00:00
}
}
}
// dedupeDeleteAllButOne deletes all but the one in keep
2018-01-12 16:30:54 +00:00
func dedupeDeleteAllButOne ( keep int , remote string , objs [ ] fs . Object ) {
2016-03-05 16:10:51 +00:00
for i , o := range objs {
if i == keep {
continue
}
2016-06-25 13:27:44 +00:00
_ = DeleteFile ( o )
2016-03-05 16:10:51 +00:00
}
2018-01-12 16:30:54 +00:00
fs . Logf ( remote , "Deleted %d extra copies" , len ( objs ) - 1 )
2016-03-05 16:10:51 +00:00
}
// dedupeDeleteIdentical deletes all but one of identical (by hash) copies
2018-01-12 16:30:54 +00:00
func dedupeDeleteIdentical ( remote string , objs [ ] fs . Object ) [ ] fs . Object {
2016-03-05 16:10:51 +00:00
// See how many of these duplicates are identical
2018-01-12 16:30:54 +00:00
byHash := make ( map [ string ] [ ] fs . Object , len ( objs ) )
2016-03-05 16:10:51 +00:00
for _ , o := range objs {
2018-01-18 20:27:52 +00:00
md5sum , err := o . Hash ( hash . MD5 )
2016-03-05 16:10:51 +00:00
if err == nil {
byHash [ md5sum ] = append ( byHash [ md5sum ] , o )
}
2016-01-31 12:58:41 +00:00
}
2016-03-05 16:10:51 +00:00
// Delete identical duplicates, refilling obj with the ones remaining
objs = nil
for md5sum , hashObjs := range byHash {
if len ( hashObjs ) > 1 {
2018-01-12 16:30:54 +00:00
fs . Logf ( remote , "Deleting %d/%d identical duplicates (md5sum %q)" , len ( hashObjs ) - 1 , len ( hashObjs ) , md5sum )
2016-03-05 16:10:51 +00:00
for _ , o := range hashObjs [ 1 : ] {
2016-06-25 13:27:44 +00:00
_ = DeleteFile ( o )
2016-03-05 16:10:51 +00:00
}
}
objs = append ( objs , hashObjs [ 0 ] )
}
return objs
}
// dedupeInteractive interactively dedupes the slice of objects
2018-01-12 16:30:54 +00:00
func dedupeInteractive ( remote string , objs [ ] fs . Object ) {
2016-03-05 16:10:51 +00:00
fmt . Printf ( "%s: %d duplicates remain\n" , remote , len ( objs ) )
for i , o := range objs {
2018-01-18 20:27:52 +00:00
md5sum , err := o . Hash ( hash . MD5 )
2016-03-05 16:10:51 +00:00
if err != nil {
md5sum = err . Error ( )
}
fmt . Printf ( " %d: %12d bytes, %s, md5sum %32s\n" , i + 1 , o . Size ( ) , o . ModTime ( ) . Format ( "2006-01-02 15:04:05.000000000" ) , md5sum )
}
2018-01-12 16:30:54 +00:00
switch config . Command ( [ ] string { "sSkip and do nothing" , "kKeep just one (choose which in next step)" , "rRename all to be different (by changing file.jpg to file-1.jpg)" } ) {
2016-03-05 16:10:51 +00:00
case 's' :
case 'k' :
2018-01-12 16:30:54 +00:00
keep := config . ChooseNumber ( "Enter the number of the file to keep" , 1 , len ( objs ) )
2016-03-05 16:10:51 +00:00
dedupeDeleteAllButOne ( keep - 1 , remote , objs )
case 'r' :
dedupeRename ( remote , objs )
}
}
2018-01-12 16:30:54 +00:00
type objectsSortedByModTime [ ] fs . Object
2016-03-05 16:10:51 +00:00
func ( objs objectsSortedByModTime ) Len ( ) int { return len ( objs ) }
func ( objs objectsSortedByModTime ) Swap ( i , j int ) { objs [ i ] , objs [ j ] = objs [ j ] , objs [ i ] }
func ( objs objectsSortedByModTime ) Less ( i , j int ) bool {
return objs [ i ] . ModTime ( ) . Before ( objs [ j ] . ModTime ( ) )
}
// DeduplicateMode is how the dedupe command chooses what to do
type DeduplicateMode int
// Deduplicate modes
const (
DeduplicateInteractive DeduplicateMode = iota // interactively ask the user
DeduplicateSkip // skip all conflicts
DeduplicateFirst // choose the first object
DeduplicateNewest // choose the newest object
DeduplicateOldest // choose the oldest object
DeduplicateRename // rename the objects
)
2016-08-03 16:35:29 +00:00
func ( x DeduplicateMode ) String ( ) string {
switch x {
2016-03-05 16:10:51 +00:00
case DeduplicateInteractive :
return "interactive"
case DeduplicateSkip :
return "skip"
case DeduplicateFirst :
return "first"
case DeduplicateNewest :
return "newest"
case DeduplicateOldest :
return "oldest"
case DeduplicateRename :
return "rename"
}
return "unknown"
}
2016-08-03 16:35:29 +00:00
// Set a DeduplicateMode from a string
func ( x * DeduplicateMode ) Set ( s string ) error {
switch strings . ToLower ( s ) {
case "interactive" :
* x = DeduplicateInteractive
case "skip" :
* x = DeduplicateSkip
case "first" :
* x = DeduplicateFirst
case "newest" :
* x = DeduplicateNewest
case "oldest" :
* x = DeduplicateOldest
case "rename" :
* x = DeduplicateRename
default :
return errors . Errorf ( "Unknown mode for dedupe %q." , s )
}
return nil
}
// Type of the value
func ( x * DeduplicateMode ) Type ( ) string {
return "string"
}
// Check it satisfies the interface
var _ pflag . Value = ( * DeduplicateMode ) ( nil )
2017-08-02 20:34:22 +00:00
// dedupeFindDuplicateDirs scans f for duplicate directories
2018-01-12 16:30:54 +00:00
func dedupeFindDuplicateDirs ( f fs . Fs ) ( [ ] [ ] fs . Directory , error ) {
duplicateDirs := [ ] [ ] fs . Directory { }
err := walk . Walk ( f , "" , true , fs . Config . MaxDepth , func ( dirPath string , entries fs . DirEntries , err error ) error {
2017-08-02 20:34:22 +00:00
if err != nil {
return err
}
2018-01-12 16:30:54 +00:00
dirs := map [ string ] [ ] fs . Directory { }
entries . ForDir ( func ( d fs . Directory ) {
2017-08-02 20:34:22 +00:00
dirs [ d . Remote ( ) ] = append ( dirs [ d . Remote ( ) ] , d )
} )
for _ , ds := range dirs {
if len ( ds ) > 1 {
duplicateDirs = append ( duplicateDirs , ds )
}
}
return nil
} )
if err != nil {
return nil , errors . Wrap ( err , "find duplicate dirs" )
}
return duplicateDirs , nil
}
// dedupeMergeDuplicateDirs merges all the duplicate directories found
2018-01-12 16:30:54 +00:00
func dedupeMergeDuplicateDirs ( f fs . Fs , duplicateDirs [ ] [ ] fs . Directory ) error {
2017-08-02 20:34:22 +00:00
mergeDirs := f . Features ( ) . MergeDirs
if mergeDirs == nil {
return errors . Errorf ( "%v: can't merge directories" , f )
}
dirCacheFlush := f . Features ( ) . DirCacheFlush
if dirCacheFlush == nil {
return errors . Errorf ( "%v: can't flush dir cache" , f )
}
for _ , dirs := range duplicateDirs {
2018-01-12 16:30:54 +00:00
if ! fs . Config . DryRun {
fs . Infof ( dirs [ 0 ] , "Merging contents of duplicate directories" )
2017-08-02 20:34:22 +00:00
err := mergeDirs ( dirs )
if err != nil {
return errors . Wrap ( err , "merge duplicate dirs" )
}
} else {
2018-01-12 16:30:54 +00:00
fs . Infof ( dirs [ 0 ] , "NOT Merging contents of duplicate directories as --dry-run" )
2017-08-02 20:34:22 +00:00
}
}
dirCacheFlush ( )
return nil
}
2016-03-05 16:10:51 +00:00
// Deduplicate interactively finds duplicate files and offers to
// delete all but one or rename them to be different. Only useful with
// Google Drive which can have duplicate file names.
2018-01-12 16:30:54 +00:00
func Deduplicate ( f fs . Fs , mode DeduplicateMode ) error {
fs . Infof ( f , "Looking for duplicates using %v mode." , mode )
2017-08-02 20:34:22 +00:00
// Find duplicate directories first and fix them - repeat
// until all fixed
for {
duplicateDirs , err := dedupeFindDuplicateDirs ( f )
if err != nil {
return err
}
if len ( duplicateDirs ) == 0 {
break
}
err = dedupeMergeDuplicateDirs ( f , duplicateDirs )
if err != nil {
return err
}
2018-01-12 16:30:54 +00:00
if fs . Config . DryRun {
2017-08-02 20:34:22 +00:00
break
}
}
// Now find duplicate files
2018-01-12 16:30:54 +00:00
files := map [ string ] [ ] fs . Object { }
err := walk . Walk ( f , "" , true , fs . Config . MaxDepth , func ( dirPath string , entries fs . DirEntries , err error ) error {
2016-04-21 19:06:21 +00:00
if err != nil {
return err
}
2018-01-12 16:30:54 +00:00
entries . ForObject ( func ( o fs . Object ) {
2017-02-24 22:51:01 +00:00
remote := o . Remote ( )
files [ remote ] = append ( files [ remote ] , o )
} )
return nil
} )
if err != nil {
return err
2016-01-31 12:58:41 +00:00
}
for remote , objs := range files {
if len ( objs ) > 1 {
2018-01-12 16:30:54 +00:00
fs . Logf ( remote , "Found %d duplicates - deleting identical copies" , len ( objs ) )
2016-03-05 16:10:51 +00:00
objs = dedupeDeleteIdentical ( remote , objs )
if len ( objs ) <= 1 {
2018-01-12 16:30:54 +00:00
fs . Logf ( remote , "All duplicates removed" )
2016-03-05 16:10:51 +00:00
continue
2016-01-31 12:58:41 +00:00
}
2016-03-05 16:10:51 +00:00
switch mode {
case DeduplicateInteractive :
dedupeInteractive ( remote , objs )
case DeduplicateFirst :
dedupeDeleteAllButOne ( 0 , remote , objs )
case DeduplicateNewest :
sort . Sort ( objectsSortedByModTime ( objs ) ) // sort oldest first
dedupeDeleteAllButOne ( len ( objs ) - 1 , remote , objs )
case DeduplicateOldest :
sort . Sort ( objectsSortedByModTime ( objs ) ) // sort oldest first
dedupeDeleteAllButOne ( 0 , remote , objs )
case DeduplicateRename :
dedupeRename ( remote , objs )
case DeduplicateSkip :
// skip
default :
//skip
2016-01-31 12:58:41 +00:00
}
}
}
return nil
}
2016-04-21 19:06:21 +00:00
2017-02-24 22:51:01 +00:00
// listToChan will transfer all objects in the listing to the output
2016-04-21 19:06:21 +00:00
//
// If an error occurs, the error will be logged, and it will close the
// channel.
//
// If the error was ErrorDirNotFound then it will be ignored
2017-12-07 12:25:56 +00:00
func listToChan ( f fs . Fs , dir string ) fs . ObjectsChan {
2018-01-12 16:30:54 +00:00
o := make ( fs . ObjectsChan , fs . Config . Checkers )
2016-04-21 19:06:21 +00:00
go func ( ) {
defer close ( o )
2017-12-07 12:25:56 +00:00
_ = walk . Walk ( f , dir , true , fs . Config . MaxDepth , func ( dirPath string , entries fs . DirEntries , err error ) error {
2016-04-21 19:06:21 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
if err == fs . ErrorDirNotFound {
2017-02-24 22:51:01 +00:00
return nil
2016-04-21 19:06:21 +00:00
}
2017-11-15 05:32:00 +00:00
err = errors . Errorf ( "Failed to list: %v" , err )
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( nil , "%v" , err )
2017-02-24 22:51:01 +00:00
return nil
2016-04-21 19:06:21 +00:00
}
2018-01-12 16:30:54 +00:00
entries . ForObject ( func ( obj fs . Object ) {
2017-02-24 22:51:01 +00:00
o <- obj
} )
return nil
} )
2016-04-21 19:06:21 +00:00
} ( )
return o
}
2016-07-01 15:35:36 +00:00
// CleanUp removes the trash for the Fs
2018-01-12 16:30:54 +00:00
func CleanUp ( f fs . Fs ) error {
2017-01-13 17:21:47 +00:00
doCleanUp := f . Features ( ) . CleanUp
if doCleanUp == nil {
2016-07-01 15:35:36 +00:00
return errors . Errorf ( "%v doesn't support cleanup" , f )
}
2018-01-12 16:30:54 +00:00
if fs . Config . DryRun {
fs . Logf ( f , "Not running cleanup as --dry-run set" )
2016-07-02 15:58:50 +00:00
return nil
}
2017-01-13 17:21:47 +00:00
return doCleanUp ( )
2016-07-01 15:35:36 +00:00
}
2016-08-18 21:43:02 +00:00
2017-02-09 11:25:36 +00:00
// wrap a Reader and a Closer together into a ReadCloser
type readCloser struct {
io . Reader
2017-11-11 18:43:00 +00:00
io . Closer
2017-02-09 11:25:36 +00:00
}
2016-08-18 21:43:02 +00:00
// Cat any files to the io.Writer
2017-02-08 08:09:41 +00:00
//
// if offset == 0 it will be ignored
// if offset > 0 then the file will be seeked to that offset
// if offset < 0 then the file will be seeked that far from the end
//
// if count < 0 then it will be ignored
// if count >= 0 then only that many characters will be output
2018-01-12 16:30:54 +00:00
func Cat ( f fs . Fs , w io . Writer , offset , count int64 ) error {
2016-08-18 21:43:02 +00:00
var mu sync . Mutex
2018-01-12 16:30:54 +00:00
return ListFn ( f , func ( o fs . Object ) {
2016-09-12 17:15:58 +00:00
var err error
2018-01-12 16:30:54 +00:00
accounting . Stats . Transferring ( o . Remote ( ) )
2016-09-12 17:15:58 +00:00
defer func ( ) {
2018-01-12 16:30:54 +00:00
accounting . Stats . DoneTransferring ( o . Remote ( ) , err == nil )
2016-09-12 17:15:58 +00:00
} ( )
2017-02-09 11:46:53 +00:00
size := o . Size ( )
2017-02-08 08:09:41 +00:00
thisOffset := offset
if thisOffset < 0 {
2017-02-09 11:46:53 +00:00
thisOffset += size
2017-02-08 08:09:41 +00:00
}
2017-02-09 11:46:53 +00:00
// size remaining is now reduced by thisOffset
size -= thisOffset
2018-01-12 16:30:54 +00:00
var options [ ] fs . OpenOption
2017-02-08 08:09:41 +00:00
if thisOffset > 0 {
2018-01-12 16:30:54 +00:00
options = append ( options , & fs . SeekOption { Offset : thisOffset } )
2017-02-08 08:09:41 +00:00
}
in , err := o . Open ( options ... )
2016-08-18 21:43:02 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( o , "Failed to open: %v" , err )
2016-08-18 21:43:02 +00:00
return
}
2017-02-08 08:09:41 +00:00
if count >= 0 {
2017-02-09 11:25:36 +00:00
in = & readCloser { Reader : & io . LimitedReader { R : in , N : count } , Closer : in }
2017-02-09 11:46:53 +00:00
// reduce remaining size to count
if size > count {
size = count
}
2017-02-08 08:09:41 +00:00
}
2018-01-12 16:30:54 +00:00
in = accounting . NewAccountSizeName ( in , size , o . Remote ( ) ) . WithBuffer ( ) // account and buffer the transfer
2016-08-18 21:43:02 +00:00
defer func ( ) {
err = in . Close ( )
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( o , "Failed to close: %v" , err )
2016-08-18 21:43:02 +00:00
}
} ( )
2017-02-08 08:09:41 +00:00
// take the lock just before we output stuff, so at the last possible moment
mu . Lock ( )
defer mu . Unlock ( )
2017-02-09 11:25:36 +00:00
_ , err = io . Copy ( w , in )
2016-08-18 21:43:02 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( o , "Failed to send to output: %v" , err )
2016-08-18 21:43:02 +00:00
}
} )
}
2016-11-27 11:49:31 +00:00
2017-08-03 19:42:35 +00:00
// Rcat reads data from the Reader until EOF and uploads it to a file on remote
2018-01-12 16:30:54 +00:00
func Rcat ( fdst fs . Fs , dstFileName string , in io . ReadCloser , modTime time . Time ) ( dst fs . Object , err error ) {
accounting . Stats . Transferring ( dstFileName )
in = accounting . NewAccountSizeName ( in , - 1 , dstFileName ) . WithBuffer ( )
2017-08-03 19:42:35 +00:00
defer func ( ) {
2018-01-12 16:30:54 +00:00
accounting . Stats . DoneTransferring ( dstFileName , err == nil )
2017-11-11 18:43:00 +00:00
if otherErr := in . Close ( ) ; otherErr != nil {
2018-01-12 16:30:54 +00:00
fs . Debugf ( fdst , "Rcat: failed to close source: %v" , err )
2017-09-11 06:25:34 +00:00
}
2017-08-03 19:42:35 +00:00
} ( )
2018-01-12 16:30:54 +00:00
hashOption := & fs . HashesOption { Hashes : fdst . Hashes ( ) }
hash , err := hash . NewMultiHasherTypes ( fdst . Hashes ( ) )
2017-09-11 06:26:53 +00:00
if err != nil {
2017-09-16 20:49:08 +00:00
return nil , err
2017-09-11 06:26:53 +00:00
}
2018-01-12 16:30:54 +00:00
readCounter := readers . NewCountingReader ( in )
2017-09-11 06:26:53 +00:00
trackingIn := io . TeeReader ( readCounter , hash )
2017-09-11 06:25:34 +00:00
2018-01-12 16:30:54 +00:00
compare := func ( dst fs . Object ) error {
src := object . NewStaticObjectInfo ( dstFileName , modTime , int64 ( readCounter . BytesRead ( ) ) , false , hash . Sums ( ) , fdst )
2017-09-11 06:26:53 +00:00
if ! Equal ( src , dst ) {
err = errors . Errorf ( "corrupted on transfer" )
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( dst , "%v" , err )
2017-09-11 06:26:53 +00:00
return err
2017-09-11 06:25:34 +00:00
}
2017-09-11 06:26:53 +00:00
return nil
}
// check if file small enough for direct upload
2018-01-12 16:30:54 +00:00
buf := make ( [ ] byte , fs . Config . StreamingUploadCutoff )
2017-09-11 06:26:53 +00:00
if n , err := io . ReadFull ( trackingIn , buf ) ; err == io . EOF || err == io . ErrUnexpectedEOF {
2018-01-12 16:30:54 +00:00
fs . Debugf ( fdst , "File to upload is small (%d bytes), uploading instead of streaming" , n )
src := object . NewMemoryObject ( dstFileName , modTime , buf [ : n ] )
2017-12-01 15:16:11 +00:00
return Copy ( fdst , nil , dstFileName , src )
2017-09-11 06:25:34 +00:00
}
2017-11-11 18:43:00 +00:00
// Make a new ReadCloser with the bits we've already read
in = & readCloser {
Reader : io . MultiReader ( bytes . NewReader ( buf ) , trackingIn ) ,
Closer : in ,
}
2017-09-11 06:25:34 +00:00
2017-08-03 19:42:35 +00:00
fStreamTo := fdst
canStream := fdst . Features ( ) . PutStream != nil
if ! canStream {
2018-01-12 16:30:54 +00:00
fs . Debugf ( fdst , "Target remote doesn't support streaming uploads, creating temporary local FS to spool file" )
tmpLocalFs , err := fs . TemporaryLocalFs ( )
2017-08-03 19:42:35 +00:00
if err != nil {
2017-09-16 20:49:08 +00:00
return nil , errors . Wrap ( err , "Failed to create temporary local FS to spool file" )
2017-08-03 19:42:35 +00:00
}
defer func ( ) {
2017-12-07 12:25:56 +00:00
err := Purge ( tmpLocalFs , "" )
2017-08-03 19:42:35 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . Infof ( tmpLocalFs , "Failed to cleanup temporary FS: %v" , err )
2017-08-03 19:42:35 +00:00
}
} ( )
fStreamTo = tmpLocalFs
}
2018-01-12 16:30:54 +00:00
if fs . Config . DryRun {
fs . Logf ( "stdin" , "Not uploading as --dry-run" )
2017-08-03 19:42:35 +00:00
// prevents "broken pipe" errors
_ , err = io . Copy ( ioutil . Discard , in )
2017-09-16 20:49:08 +00:00
return nil , err
2017-08-03 19:42:35 +00:00
}
2018-01-12 16:30:54 +00:00
objInfo := object . NewStaticObjectInfo ( dstFileName , modTime , - 1 , false , nil , nil )
2017-09-16 20:49:08 +00:00
if dst , err = fStreamTo . Features ( ) . PutStream ( in , objInfo , hashOption ) ; err != nil {
return dst , err
2017-08-03 19:42:35 +00:00
}
2017-09-16 20:49:08 +00:00
if err = compare ( dst ) ; err != nil {
return dst , err
2017-09-11 06:26:53 +00:00
}
if ! canStream {
2017-12-01 15:16:11 +00:00
// copy dst (which is the local object we have just streamed to) to the remote
2017-12-01 15:31:20 +00:00
return Copy ( fdst , nil , dstFileName , dst )
2017-09-11 06:26:53 +00:00
}
2017-09-16 20:49:08 +00:00
return dst , nil
2017-08-03 19:42:35 +00:00
}
2016-11-27 11:49:31 +00:00
// Rmdirs removes any empty directories (or directories only
// containing empty directories) under f, including f.
2018-01-12 16:30:54 +00:00
func Rmdirs ( f fs . Fs , dir string , leaveRoot bool ) error {
2016-11-27 11:49:31 +00:00
dirEmpty := make ( map [ string ] bool )
2017-12-13 10:23:54 +00:00
dirEmpty [ "" ] = ! leaveRoot
2018-01-12 16:30:54 +00:00
err := walk . Walk ( f , dir , true , fs . Config . MaxDepth , func ( dirPath string , entries fs . DirEntries , err error ) error {
2016-11-27 11:49:31 +00:00
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( f , "Failed to list %q: %v" , dirPath , err )
2017-02-24 22:51:01 +00:00
return nil
}
for _ , entry := range entries {
switch x := entry . ( type ) {
2018-01-12 16:30:54 +00:00
case fs . Directory :
2017-02-24 22:51:01 +00:00
// add a new directory as empty
2017-06-30 12:37:29 +00:00
dir := x . Remote ( )
2017-02-24 22:51:01 +00:00
_ , found := dirEmpty [ dir ]
if ! found {
dirEmpty [ dir ] = true
2016-11-27 11:49:31 +00:00
}
2018-01-12 16:30:54 +00:00
case fs . Object :
2017-02-24 22:51:01 +00:00
// mark the parents of the file as being non-empty
dir := x . Remote ( )
for dir != "" {
dir = path . Dir ( dir )
if dir == "." || dir == "/" {
dir = ""
}
empty , found := dirEmpty [ dir ]
// End if we reach a directory which is non-empty
if found && ! empty {
break
}
dirEmpty [ dir ] = false
2016-11-27 11:49:31 +00:00
}
}
}
2017-02-24 22:51:01 +00:00
return nil
} )
if err != nil {
return errors . Wrap ( err , "failed to rmdirs" )
2016-11-27 11:49:31 +00:00
}
// Now delete the empty directories, starting from the longest path
var toDelete [ ] string
for dir , empty := range dirEmpty {
if empty {
toDelete = append ( toDelete , dir )
}
}
sort . Strings ( toDelete )
for i := len ( toDelete ) - 1 ; i >= 0 ; i -- {
dir := toDelete [ i ]
err := TryRmdir ( f , dir )
if err != nil {
2018-01-12 16:30:54 +00:00
fs . CountError ( err )
fs . Errorf ( dir , "Failed to rmdir: %v" , err )
2016-11-27 11:49:31 +00:00
return err
}
}
return nil
}
2016-10-23 16:34:17 +00:00
2018-01-12 16:30:54 +00:00
// NeedTransfer checks to see if src needs to be copied to dst using
// the current config.
//
// Returns a flag which indicates whether the file needs to be
// transferred or not.
func NeedTransfer ( dst , src fs . Object ) bool {
if dst == nil {
fs . Debugf ( src , "Couldn't find file - need to transfer" )
return true
}
// If we should ignore existing files, don't transfer
if fs . Config . IgnoreExisting {
fs . Debugf ( src , "Destination exists, skipping" )
return false
}
// If we should upload unconditionally
if fs . Config . IgnoreTimes {
fs . Debugf ( src , "Transferring unconditionally as --ignore-times is in use" )
return true
}
// If UpdateOlder is in effect, skip if dst is newer than src
if fs . Config . UpdateOlder {
srcModTime := src . ModTime ( )
dstModTime := dst . ModTime ( )
dt := dstModTime . Sub ( srcModTime )
// If have a mutually agreed precision then use that
modifyWindow := fs . Config . ModifyWindow
if modifyWindow == fs . ModTimeNotSupported {
// Otherwise use 1 second as a safe default as
// the resolution of the time a file was
// uploaded.
modifyWindow = time . Second
}
switch {
case dt >= modifyWindow :
fs . Debugf ( src , "Destination is newer than source, skipping" )
return false
case dt <= - modifyWindow :
fs . Debugf ( src , "Destination is older than source, transferring" )
default :
if src . Size ( ) == dst . Size ( ) {
fs . Debugf ( src , "Destination mod time is within %v of source and sizes identical, skipping" , modifyWindow )
return false
}
fs . Debugf ( src , "Destination mod time is within %v of source but sizes differ, transferring" , modifyWindow )
}
} else {
// Check to see if changed or not
if Equal ( src , dst ) {
fs . Debugf ( src , "Unchanged skipping" )
return false
}
}
return true
}
2016-10-23 16:34:17 +00:00
// moveOrCopyFile moves or copies a single file possibly to a new name
2018-01-12 16:30:54 +00:00
func moveOrCopyFile ( fdst fs . Fs , fsrc fs . Fs , dstFileName string , srcFileName string , cp bool ) ( err error ) {
2017-10-12 19:45:36 +00:00
dstFilePath := path . Join ( fdst . Root ( ) , dstFileName )
srcFilePath := path . Join ( fsrc . Root ( ) , srcFileName )
if fdst . Name ( ) == fsrc . Name ( ) && dstFilePath == srcFilePath {
2018-01-12 16:30:54 +00:00
fs . Debugf ( fdst , "don't need to copy/move %s, it is already at target location" , dstFileName )
2017-05-27 15:30:26 +00:00
return nil
}
2016-10-23 16:34:17 +00:00
// Choose operations
Op := Move
if cp {
Op = Copy
}
// Find src object
srcObj , err := fsrc . NewObject ( srcFileName )
if err != nil {
return err
}
// Find dst object if it exists
dstObj , err := fdst . NewObject ( dstFileName )
2018-01-12 16:30:54 +00:00
if err == fs . ErrorObjectNotFound {
2016-10-23 16:34:17 +00:00
dstObj = nil
} else if err != nil {
return err
}
if NeedTransfer ( dstObj , srcObj ) {
2018-01-12 16:30:54 +00:00
accounting . Stats . Transferring ( srcFileName )
2017-12-01 15:31:20 +00:00
_ , err = Op ( fdst , dstObj , dstFileName , srcObj )
2018-01-12 16:30:54 +00:00
accounting . Stats . DoneTransferring ( srcFileName , err == nil )
2017-06-07 12:02:21 +00:00
} else {
2018-01-12 16:30:54 +00:00
accounting . Stats . Checking ( srcFileName )
2017-06-07 12:02:21 +00:00
if ! cp {
err = DeleteFile ( srcObj )
}
2018-01-12 16:30:54 +00:00
defer accounting . Stats . DoneChecking ( srcFileName )
2016-10-23 16:34:17 +00:00
}
2017-06-07 12:02:21 +00:00
return err
2016-10-23 16:34:17 +00:00
}
// MoveFile moves a single file possibly to a new name
2018-01-12 16:30:54 +00:00
func MoveFile ( fdst fs . Fs , fsrc fs . Fs , dstFileName string , srcFileName string ) ( err error ) {
2016-10-23 16:34:17 +00:00
return moveOrCopyFile ( fdst , fsrc , dstFileName , srcFileName , false )
}
// CopyFile moves a single file possibly to a new name
2018-01-12 16:30:54 +00:00
func CopyFile ( fdst fs . Fs , fsrc fs . Fs , dstFileName string , srcFileName string ) ( err error ) {
2016-10-23 16:34:17 +00:00
return moveOrCopyFile ( fdst , fsrc , dstFileName , srcFileName , true )
}
2018-01-06 14:39:31 +00:00
// ListFormat defines files information print format
type ListFormat struct {
separator string
dirSlash bool
output [ ] func ( ) string
2018-01-12 16:30:54 +00:00
entry fs . DirEntry
2018-01-06 17:53:37 +00:00
hash bool
2018-01-06 14:39:31 +00:00
}
// SetSeparator changes separator in struct
func ( l * ListFormat ) SetSeparator ( separator string ) {
l . separator = separator
}
// SetDirSlash defines if slash should be printed
func ( l * ListFormat ) SetDirSlash ( dirSlash bool ) {
l . dirSlash = dirSlash
}
// SetOutput sets functions used to create files information
func ( l * ListFormat ) SetOutput ( output [ ] func ( ) string ) {
l . output = output
}
// AddModTime adds file's Mod Time to output
func ( l * ListFormat ) AddModTime ( ) {
l . AppendOutput ( func ( ) string { return l . entry . ModTime ( ) . Format ( "2006-01-02 15:04:05" ) } )
}
// AddSize adds file's size to output
func ( l * ListFormat ) AddSize ( ) {
2018-01-06 17:53:37 +00:00
l . AppendOutput ( func ( ) string {
return strconv . FormatInt ( l . entry . Size ( ) , 10 )
} )
2018-01-06 14:39:31 +00:00
}
// AddPath adds path to file to output
func ( l * ListFormat ) AddPath ( ) {
l . AppendOutput ( func ( ) string {
2018-01-12 16:30:54 +00:00
_ , isDir := l . entry . ( fs . Directory )
2018-01-06 14:39:31 +00:00
if isDir && l . dirSlash {
return l . entry . Remote ( ) + "/"
}
return l . entry . Remote ( )
} )
}
2018-01-06 17:53:37 +00:00
// AddHash adds the hash of the type given to the output
2018-01-12 16:30:54 +00:00
func ( l * ListFormat ) AddHash ( ht hash . Type ) {
2018-01-06 17:53:37 +00:00
l . AppendOutput ( func ( ) string {
2018-01-12 16:30:54 +00:00
o , ok := l . entry . ( fs . Object )
2018-01-06 17:53:37 +00:00
if ! ok {
return ""
}
return hashSum ( ht , o )
} )
}
2018-01-06 14:39:31 +00:00
// AppendOutput adds string generated by specific function to printed output
func ( l * ListFormat ) AppendOutput ( functionToAppend func ( ) string ) {
if len ( l . output ) > 0 {
l . output = append ( l . output , func ( ) string { return l . separator } )
}
l . output = append ( l . output , functionToAppend )
}
// ListFormatted prints information about specific file in specific format
2018-01-12 16:30:54 +00:00
func ListFormatted ( entry * fs . DirEntry , list * ListFormat ) string {
2018-01-06 14:39:31 +00:00
list . entry = * entry
var out string
for _ , fun := range list . output {
out += fun ( )
}
return out
}