Redo error handling for sync/copy/move
* Factor sync/copy/move into its own file * Make fatal errors abort the sync * Make Copy return errors * Make Sync/Copy/Move return the last Copy error if there was one * Prioritise returning Fatal errors * NoRetry errors are returned if no other types of errors
This commit is contained in:
parent
28f4061892
commit
13591c7c00
4 changed files with 1129 additions and 1001 deletions
454
fs/operations.go
454
fs/operations.go
|
@ -198,11 +198,10 @@ func removeFailedCopy(dst Object) bool {
|
|||
// If dst is nil then the object must not exist already. If you do
|
||||
// call Copy() with dst nil on a pre-existing file then some filing
|
||||
// systems (eg Drive) may duplicate the file.
|
||||
func Copy(f Fs, dst, src Object) {
|
||||
func Copy(f Fs, dst, src Object) (err error) {
|
||||
maxTries := Config.LowLevelRetries
|
||||
tries := 0
|
||||
doUpdate := dst != nil
|
||||
var err error
|
||||
var actionTaken string
|
||||
for {
|
||||
// Try server side copy first - if has optional interface and
|
||||
|
@ -265,7 +264,7 @@ func Copy(f Fs, dst, src Object) {
|
|||
Stats.Error()
|
||||
ErrorLog(src, "Failed to copy: %v", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify sizes are the same after transfer
|
||||
|
@ -274,7 +273,7 @@ func Copy(f Fs, dst, src Object) {
|
|||
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
|
||||
ErrorLog(dst, "%v", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify hashes are the same after transfer - ignoring blank hashes
|
||||
|
@ -286,12 +285,14 @@ func Copy(f Fs, dst, src Object) {
|
|||
// Get common hash type
|
||||
hashType := common.GetOne()
|
||||
|
||||
srcSum, err := src.Hash(hashType)
|
||||
var srcSum string
|
||||
srcSum, err = src.Hash(hashType)
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(src, "Failed to read src hash: %v", err)
|
||||
} else if srcSum != "" {
|
||||
dstSum, err := dst.Hash(hashType)
|
||||
var dstSum string
|
||||
dstSum, err = dst.Hash(hashType)
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Failed to read hash: %v", err)
|
||||
|
@ -300,134 +301,13 @@ func Copy(f Fs, dst, src Object) {
|
|||
err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
|
||||
ErrorLog(dst, "%v", err)
|
||||
removeFailedCopy(dst)
|
||||
return
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Debug(src, actionTaken)
|
||||
}
|
||||
|
||||
// Check to see if src needs to be copied to dst and if so puts it in out
|
||||
func checkOne(pair ObjectPair, out ObjectPairChan) {
|
||||
src, dst := pair.src, pair.dst
|
||||
if dst == nil {
|
||||
Debug(src, "Couldn't find file - need to transfer")
|
||||
out <- pair
|
||||
return
|
||||
}
|
||||
// Check to see if can store this
|
||||
if !src.Storable() {
|
||||
return
|
||||
}
|
||||
// If we should ignore existing files, don't transfer
|
||||
if Config.IgnoreExisting {
|
||||
Debug(src, "Destination exists, skipping")
|
||||
return
|
||||
}
|
||||
// If we should upload unconditionally
|
||||
if Config.IgnoreTimes {
|
||||
Debug(src, "Uploading unconditionally as --ignore-times is in use")
|
||||
out <- pair
|
||||
return
|
||||
}
|
||||
// If UpdateOlder is in effect, skip if dst is newer than src
|
||||
if Config.UpdateOlder {
|
||||
srcModTime := src.ModTime()
|
||||
dstModTime := dst.ModTime()
|
||||
dt := dstModTime.Sub(srcModTime)
|
||||
// If have a mutually agreed precision then use that
|
||||
modifyWindow := Config.ModifyWindow
|
||||
if modifyWindow == ModTimeNotSupported {
|
||||
// Otherwise use 1 second as a safe default as
|
||||
// the resolution of the time a file was
|
||||
// uploaded.
|
||||
modifyWindow = time.Second
|
||||
}
|
||||
switch {
|
||||
case dt >= modifyWindow:
|
||||
Debug(src, "Destination is newer than source, skipping")
|
||||
return
|
||||
case dt <= -modifyWindow:
|
||||
Debug(src, "Destination is older than source, transferring")
|
||||
default:
|
||||
if src.Size() == dst.Size() {
|
||||
Debug(src, "Destination mod time is within %v of source and sizes identical, skipping", modifyWindow)
|
||||
return
|
||||
}
|
||||
Debug(src, "Destination mod time is within %v of source but sizes differ, transferring", modifyWindow)
|
||||
}
|
||||
} else {
|
||||
// Check to see if changed or not
|
||||
if Equal(src, dst) {
|
||||
Debug(src, "Unchanged skipping")
|
||||
return
|
||||
}
|
||||
}
|
||||
out <- pair
|
||||
}
|
||||
|
||||
// PairChecker reads Objects~s on in send to out if they need transferring.
|
||||
//
|
||||
// FIXME potentially doing lots of hashes at once
|
||||
func PairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for pair := range in {
|
||||
src := pair.src
|
||||
Stats.Checking(src.Remote())
|
||||
checkOne(pair, out)
|
||||
Stats.DoneChecking(src.Remote())
|
||||
}
|
||||
}
|
||||
|
||||
// PairCopier reads Objects on in and copies them.
|
||||
func PairCopier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for pair := range in {
|
||||
src := pair.src
|
||||
Stats.Transferring(src.Remote())
|
||||
if Config.DryRun {
|
||||
Log(src, "Not copying as --dry-run")
|
||||
} else {
|
||||
Copy(fdst, pair.dst, src)
|
||||
}
|
||||
Stats.DoneTransferring(src.Remote())
|
||||
}
|
||||
}
|
||||
|
||||
// PairMover reads Objects on in and moves them if possible, or copies
|
||||
// them if not
|
||||
func PairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// See if we have Move available
|
||||
fdstMover, haveMover := fdst.(Mover)
|
||||
for pair := range in {
|
||||
src := pair.src
|
||||
dst := pair.dst
|
||||
Stats.Transferring(src.Remote())
|
||||
if Config.DryRun {
|
||||
Log(src, "Not moving as --dry-run")
|
||||
} else if haveMover && src.Fs().Name() == fdst.Name() {
|
||||
// Delete destination if it exists
|
||||
if pair.dst != nil {
|
||||
err := dst.Remove()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Couldn't delete: %v", err)
|
||||
}
|
||||
}
|
||||
_, err := fdstMover.Move(src, src.Remote())
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Couldn't move: %v", err)
|
||||
} else {
|
||||
Debug(src, "Moved")
|
||||
}
|
||||
} else {
|
||||
Copy(fdst, pair.dst, src)
|
||||
}
|
||||
Stats.DoneTransferring(src.Remote())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors.
|
||||
|
@ -476,7 +356,10 @@ func DeleteFiles(toBeDeleted ObjectsChan) error {
|
|||
// dir is the start directory, "" for root
|
||||
// If includeAll is specified all files will be added,
|
||||
// otherwise only files passing the filter will be added.
|
||||
func readFilesFn(fs Fs, includeAll bool, dir string, add func(Object)) (err error) {
|
||||
//
|
||||
// Each object is passed ito the function provided. If that returns
|
||||
// an error then the listing will be aborted and that error returned.
|
||||
func readFilesFn(fs Fs, includeAll bool, dir string, add func(Object) error) (err error) {
|
||||
list := NewLister()
|
||||
if !includeAll {
|
||||
list.SetFilter(Config.Filter)
|
||||
|
@ -494,7 +377,10 @@ func readFilesFn(fs Fs, includeAll bool, dir string, add func(Object)) (err erro
|
|||
}
|
||||
// Make sure we don't delete excluded files if not required
|
||||
if includeAll || Config.Filter.IncludeObject(o) {
|
||||
add(o)
|
||||
err = add(o)
|
||||
if err != nil {
|
||||
list.SetError(err)
|
||||
}
|
||||
} else {
|
||||
Debug(o, "Excluded from sync (and deletion)")
|
||||
}
|
||||
|
@ -511,7 +397,7 @@ func readFilesFn(fs Fs, includeAll bool, dir string, add func(Object)) (err erro
|
|||
func readFilesMap(fs Fs, includeAll bool, dir string) (files map[string]Object, err error) {
|
||||
files = make(map[string]Object)
|
||||
normalised := make(map[string]struct{})
|
||||
err = readFilesFn(fs, includeAll, dir, func(o Object) {
|
||||
err = readFilesFn(fs, includeAll, dir, func(o Object) error {
|
||||
remote := o.Remote()
|
||||
normalisedRemote := strings.ToLower(norm.NFC.String(remote))
|
||||
if _, ok := files[remote]; !ok {
|
||||
|
@ -523,6 +409,7 @@ func readFilesMap(fs Fs, includeAll bool, dir string) (files map[string]Object,
|
|||
Log(o, "Duplicate file detected")
|
||||
}
|
||||
normalised[normalisedRemote] = struct{}{}
|
||||
return nil
|
||||
})
|
||||
return files, err
|
||||
}
|
||||
|
@ -565,309 +452,6 @@ func Same(fdst, fsrc Fs) bool {
|
|||
return fdst.Name() == fsrc.Name() && fdst.Root() == fsrc.Root()
|
||||
}
|
||||
|
||||
type syncCopyMove struct {
|
||||
// parameters
|
||||
fdst Fs
|
||||
fsrc Fs
|
||||
Delete bool
|
||||
DoMove bool
|
||||
dir string
|
||||
// internal state
|
||||
noTraverse bool // if set don't trafevers the dst
|
||||
deleteBefore bool // set if we must delete objects before copying
|
||||
dstFiles map[string]Object // dst files, only used if Delete
|
||||
srcFiles map[string]Object // src files, only used if deleteBefore
|
||||
srcFilesChan chan Object // passes src objects
|
||||
srcFilesResult chan error // error result of src listing
|
||||
dstFilesResult chan error // error result of dst listing
|
||||
checkerWg sync.WaitGroup // wait for checkers
|
||||
toBeChecked ObjectPairChan // checkers channel
|
||||
copierWg sync.WaitGroup // wait for copiers
|
||||
toBeUploaded ObjectPairChan // copiers channel
|
||||
}
|
||||
|
||||
func newSyncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) *syncCopyMove {
|
||||
s := &syncCopyMove{
|
||||
fdst: fdst,
|
||||
fsrc: fsrc,
|
||||
Delete: Delete,
|
||||
DoMove: DoMove,
|
||||
dir: "",
|
||||
srcFilesChan: make(chan Object, Config.Checkers+Config.Transfers),
|
||||
srcFilesResult: make(chan error, 1),
|
||||
dstFilesResult: make(chan error, 1),
|
||||
noTraverse: Config.NoTraverse,
|
||||
toBeChecked: make(ObjectPairChan, Config.Transfers),
|
||||
toBeUploaded: make(ObjectPairChan, Config.Transfers),
|
||||
deleteBefore: Delete && Config.DeleteBefore,
|
||||
}
|
||||
if s.noTraverse && s.Delete {
|
||||
Debug(s.fdst, "Ignoring --no-traverse with sync")
|
||||
s.noTraverse = false
|
||||
}
|
||||
return s
|
||||
|
||||
}
|
||||
|
||||
// This reads the source files from s.srcFiles into srcFilesChan then
|
||||
// closes it
|
||||
//
|
||||
// It returns the final result of the read into s.srcFilesResult
|
||||
func (s *syncCopyMove) readSrcUsingMap() {
|
||||
for _, o := range s.srcFiles {
|
||||
s.srcFilesChan <- o
|
||||
}
|
||||
close(s.srcFilesChan)
|
||||
s.srcFilesResult <- nil
|
||||
}
|
||||
|
||||
// This reads the source files into srcFilesChan then closes it
|
||||
//
|
||||
// It returns the final result of the read into s.srcFilesResult
|
||||
func (s *syncCopyMove) readSrcUsingChan() {
|
||||
err := readFilesFn(s.fsrc, false, s.dir, func(o Object) {
|
||||
s.srcFilesChan <- o
|
||||
})
|
||||
close(s.srcFilesChan)
|
||||
s.srcFilesResult <- err
|
||||
}
|
||||
|
||||
// This reads the destination files in into dstFiles
|
||||
//
|
||||
// It returns the final result of the read into s.dstFilesResult
|
||||
func (s *syncCopyMove) readDstFiles() {
|
||||
var err error
|
||||
s.dstFiles, err = readFilesMap(s.fdst, Config.Filter.DeleteExcluded, s.dir)
|
||||
s.dstFilesResult <- err
|
||||
}
|
||||
|
||||
// This deletes the files in the dstFiles map. If checkSrcMap is set
|
||||
// then it checks to see if they exist first in srcFiles the source
|
||||
// file map, otherwise it unconditionally deletes them. If
|
||||
// checkSrcMap is clear then it assumes that the any source files that
|
||||
// have been found have been removed from dstFiles already.
|
||||
func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
|
||||
if Stats.Errored() {
|
||||
ErrorLog(s.fdst, "%v", ErrorNotDeleting)
|
||||
return ErrorNotDeleting
|
||||
}
|
||||
|
||||
// Delete the spare files
|
||||
toDelete := make(ObjectsChan, Config.Transfers)
|
||||
go func() {
|
||||
for remote, o := range s.dstFiles {
|
||||
if checkSrcMap {
|
||||
_, exists := s.srcFiles[remote]
|
||||
if !exists {
|
||||
toDelete <- o
|
||||
}
|
||||
} else {
|
||||
toDelete <- o
|
||||
}
|
||||
}
|
||||
close(toDelete)
|
||||
}()
|
||||
return DeleteFiles(toDelete)
|
||||
}
|
||||
|
||||
// This starts the background checkers.
|
||||
func (s *syncCopyMove) startCheckers() {
|
||||
s.checkerWg.Add(Config.Checkers)
|
||||
for i := 0; i < Config.Checkers; i++ {
|
||||
go PairChecker(s.toBeChecked, s.toBeUploaded, &s.checkerWg)
|
||||
}
|
||||
}
|
||||
|
||||
// This stops the background checkers
|
||||
func (s *syncCopyMove) stopCheckers() {
|
||||
close(s.toBeChecked)
|
||||
Log(s.fdst, "Waiting for checks to finish")
|
||||
s.checkerWg.Wait()
|
||||
}
|
||||
|
||||
// This starts the background transfers
|
||||
func (s *syncCopyMove) startTransfers() {
|
||||
s.copierWg.Add(Config.Transfers)
|
||||
for i := 0; i < Config.Transfers; i++ {
|
||||
if s.DoMove {
|
||||
go PairMover(s.toBeUploaded, s.fdst, &s.copierWg)
|
||||
} else {
|
||||
go PairCopier(s.toBeUploaded, s.fdst, &s.copierWg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This stops the background transfers
|
||||
func (s *syncCopyMove) stopTransfers() {
|
||||
close(s.toBeUploaded)
|
||||
Log(s.fdst, "Waiting for transfers to finish")
|
||||
s.copierWg.Wait()
|
||||
}
|
||||
|
||||
// Syncs fsrc into fdst
|
||||
//
|
||||
// If Delete is true then it deletes any files in fdst that aren't in fsrc
|
||||
//
|
||||
// If DoMove is true then files will be moved instead of copied
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func (s *syncCopyMove) run() error {
|
||||
if Same(s.fdst, s.fsrc) {
|
||||
ErrorLog(s.fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
|
||||
err := Mkdir(s.fdst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start reading dstFiles if required
|
||||
if !s.noTraverse {
|
||||
go s.readDstFiles()
|
||||
}
|
||||
|
||||
// If s.deleteBefore then we need to read the whole source map first
|
||||
if s.deleteBefore {
|
||||
// Read source files into the map
|
||||
s.srcFiles, err = readFilesMap(s.fsrc, false, s.dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Pump the map into s.srcFilesChan
|
||||
go s.readSrcUsingMap()
|
||||
} else {
|
||||
go s.readSrcUsingChan()
|
||||
}
|
||||
|
||||
// Wait for dstfiles to finish reading if we were reading them
|
||||
// and report any errors
|
||||
if !s.noTraverse {
|
||||
err = <-s.dstFilesResult
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Delete files first if required
|
||||
// Have dstFiles and srcFiles complete at this point
|
||||
if s.deleteBefore {
|
||||
err = s.deleteFiles(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Start background checking and transferring pipeline
|
||||
s.startCheckers()
|
||||
s.startTransfers()
|
||||
|
||||
// Do the transfers
|
||||
for src := range s.srcFilesChan {
|
||||
remote := src.Remote()
|
||||
var dst Object
|
||||
if s.noTraverse {
|
||||
var err error
|
||||
dst, err = s.fdst.NewObject(remote)
|
||||
if err != nil {
|
||||
dst = nil
|
||||
if err != ErrorObjectNotFound {
|
||||
Debug(src, "Error making NewObject: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dst = s.dstFiles[remote]
|
||||
// Remove file from s.dstFiles because it exists in srcFiles
|
||||
delete(s.dstFiles, remote)
|
||||
}
|
||||
if dst != nil {
|
||||
s.toBeChecked <- ObjectPair{src, dst}
|
||||
} else {
|
||||
// No need to check since doesn't exist
|
||||
s.toBeUploaded <- ObjectPair{src, nil}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop background checking and transferring pipeline
|
||||
s.stopCheckers()
|
||||
s.stopTransfers()
|
||||
|
||||
// Retrieve the delayed error from the source listing goroutine
|
||||
err = <-s.srcFilesResult
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete files during or after
|
||||
if s.Delete && (Config.DeleteDuring || Config.DeleteAfter) {
|
||||
err = s.deleteFiles(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync fsrc into fdst
|
||||
func Sync(fdst, fsrc Fs) error {
|
||||
return newSyncCopyMove(fdst, fsrc, true, false).run()
|
||||
}
|
||||
|
||||
// CopyDir copies fsrc into fdst
|
||||
func CopyDir(fdst, fsrc Fs) error {
|
||||
return newSyncCopyMove(fdst, fsrc, false, false).run()
|
||||
}
|
||||
|
||||
// moveDir moves fsrc into fdst
|
||||
func moveDir(fdst, fsrc Fs) error {
|
||||
return newSyncCopyMove(fdst, fsrc, false, true).run()
|
||||
}
|
||||
|
||||
// MoveDir moves fsrc into fdst
|
||||
func MoveDir(fdst, fsrc Fs) error {
|
||||
if Same(fdst, fsrc) {
|
||||
ErrorLog(fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
|
||||
// First attempt to use DirMover if exists, same Fs and no filters are active
|
||||
if fdstDirMover, ok := fdst.(DirMover); ok && fsrc.Name() == fdst.Name() && Config.Filter.InActive() {
|
||||
err := fdstDirMover.DirMove(fsrc)
|
||||
Debug(fdst, "Using server side directory move")
|
||||
switch err {
|
||||
case ErrorCantDirMove, ErrorDirExists:
|
||||
Debug(fdst, "Server side directory move failed - fallback to copy/delete: %v", err)
|
||||
case nil:
|
||||
Debug(fdst, "Server side directory move succeeded")
|
||||
return nil
|
||||
default:
|
||||
Stats.Error()
|
||||
ErrorLog(fdst, "Server side directory move failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Now move the files
|
||||
err := moveDir(fdst, fsrc)
|
||||
if err != nil || Stats.Errored() {
|
||||
ErrorLog(fdst, "Not deleting files as there were IO errors")
|
||||
return err
|
||||
}
|
||||
// If no filters then purge
|
||||
if Config.Filter.InActive() {
|
||||
return Purge(fsrc)
|
||||
}
|
||||
// Otherwise remove any remaining files obeying filters
|
||||
err = Delete(fsrc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// and try to remove the directory if empty - ignoring error
|
||||
_ = TryRmdir(fsrc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkIdentical checks to see if dst and src are identical
|
||||
//
|
||||
// it returns true if differences were found
|
||||
|
|
|
@ -214,7 +214,7 @@ func (r *Run) WriteObjectTo(f fs.Fs, remote, content string, modTime time.Time,
|
|||
break
|
||||
}
|
||||
// Retry if err returned a retry error
|
||||
if retry, ok := err.(fs.Retry); ok && retry.Retry() && tries < maxTries {
|
||||
if fs.IsRetryError(err) && tries < maxTries {
|
||||
r.Logf("Retry Put of %q to %v: %d/%d (%v)", remote, f, tries, maxTries, err)
|
||||
continue
|
||||
}
|
||||
|
@ -263,104 +263,6 @@ func TestMkdir(t *testing.T) {
|
|||
fstest.TestMkdir(t, r.fremote)
|
||||
}
|
||||
|
||||
// Check dry run is working
|
||||
func TestCopyWithDryRun(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
err := fs.CopyDir(r.fremote, r.flocal)
|
||||
fs.Config.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote)
|
||||
}
|
||||
|
||||
// Now without dry run
|
||||
func TestCopy(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
err := fs.CopyDir(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Now with --no-traverse
|
||||
func TestCopyNoTraverse(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.NoTraverse = true
|
||||
defer func() { fs.Config.NoTraverse = false }()
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
err := fs.CopyDir(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Now with --no-traverse
|
||||
func TestSyncNoTraverse(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.NoTraverse = true
|
||||
defer func() { fs.Config.NoTraverse = false }()
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Test copy with depth
|
||||
func TestCopyWithDepth(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
file2 := r.WriteFile("hello world2", "hello world2", t2)
|
||||
|
||||
// Check the MaxDepth too
|
||||
fs.Config.MaxDepth = 1
|
||||
defer func() { fs.Config.MaxDepth = -1 }()
|
||||
|
||||
err := fs.CopyDir(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
}
|
||||
|
||||
// Test a server side copy if possible, or the backup path if not
|
||||
func TestServerSideCopy(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
fremoteCopy, finaliseCopy, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
require.NoError(t, err)
|
||||
defer finaliseCopy()
|
||||
t.Logf("Server side copy (if possible) %v -> %v", r.fremote, fremoteCopy)
|
||||
|
||||
err = fs.CopyDir(fremoteCopy, r.fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, fremoteCopy, file1)
|
||||
}
|
||||
|
||||
func TestLsd(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
@ -375,473 +277,6 @@ func TestLsd(t *testing.T) {
|
|||
assert.Contains(t, res, "sub dir\n")
|
||||
}
|
||||
|
||||
// Check that if the local file doesn't exist when we copy it up,
|
||||
// nothing happens to the remote file
|
||||
func TestCopyAfterDelete(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
fstest.CheckItems(t, r.flocal)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
err := fs.Mkdir(r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.CopyDir(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Check the copy downloading a file
|
||||
func TestCopyRedownload(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
err := fs.CopyDir(r.flocal, r.fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Change the last modified date and resync.
|
||||
// If we're only doing sync by size and checksum, we expect nothing to
|
||||
// to be transferred on the second sync.
|
||||
func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.CheckSum = true
|
||||
defer func() { fs.Config.CheckSum = false }()
|
||||
|
||||
file1 := r.WriteFile("check sum", "", t1)
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Change last modified date only
|
||||
file2 := r.WriteFile("check sum", "", t2)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Change the last modified date and the
|
||||
// file contents but not the size. If we're only doing sync by size
|
||||
// only, we expect nothing to to be transferred on the second sync.
|
||||
func TestSyncSizeOnly(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.SizeOnly = true
|
||||
defer func() { fs.Config.SizeOnly = false }()
|
||||
|
||||
file1 := r.WriteFile("sizeonly", "potato", t1)
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Update mtime, md5sum but not length of file
|
||||
file2 := r.WriteFile("sizeonly", "POTATO", t2)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Keep the last modified date but change
|
||||
// the size. With --ignore-size we expect nothing to to be
|
||||
// transferred on the second sync.
|
||||
func TestSyncIgnoreSize(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.IgnoreSize = true
|
||||
defer func() { fs.Config.IgnoreSize = false }()
|
||||
|
||||
file1 := r.WriteFile("ignore-size", "contents", t1)
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Update size but not date of file
|
||||
file2 := r.WriteFile("ignore-size", "longer contents but same date", t1)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
func TestSyncIgnoreTimes(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("existing", "potato", t1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly 0 files because the
|
||||
// files were identical.
|
||||
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
|
||||
|
||||
fs.Config.IgnoreTimes = true
|
||||
defer func() { fs.Config.IgnoreTimes = false }()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file even though the
|
||||
// files were identical.
|
||||
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
func TestSyncIgnoreExisting(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("existing", "potato", t1)
|
||||
|
||||
fs.Config.IgnoreExisting = true
|
||||
defer func() { fs.Config.IgnoreExisting = false }()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Change everything
|
||||
r.WriteFile("existing", "newpotatoes", t2)
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
// Items should not change
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("empty space", "", t2)
|
||||
r.WriteObject("empty space", "", t1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
func TestSyncAfterAddingAFile(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("empty space", "", t2)
|
||||
file2 := r.WriteFile("potato", "------------------------------------------------------------", t3)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1, file2)
|
||||
}
|
||||
|
||||
func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("potato", "------------------------------------------------------------", t3)
|
||||
file2 := r.WriteFile("potato", "smaller but same date", t3)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
}
|
||||
|
||||
// Sync after changing a file's contents, changing modtime but length
|
||||
// remaining the same
|
||||
func TestSyncAfterChangingContentsOnly(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
var file1 fstest.Item
|
||||
if r.fremote.Precision() == fs.ModTimeNotSupported {
|
||||
t.Logf("ModTimeNotSupported so forcing file to be a different size")
|
||||
file1 = r.WriteObject("potato", "different size to make sure it syncs", t3)
|
||||
} else {
|
||||
file1 = r.WriteObject("potato", "smaller but same date", t3)
|
||||
}
|
||||
file2 := r.WriteFile("potato", "SMALLER BUT SAME DATE", t2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
}
|
||||
|
||||
// Sync after removing a file and adding a file --dry-run
|
||||
func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteObject("potato", "SMALLER BUT SAME DATE", t2)
|
||||
file3 := r.WriteBoth("empty space", "", t2)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
fs.Config.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file3, file1)
|
||||
fstest.CheckItems(t, r.fremote, file3, file2)
|
||||
}
|
||||
|
||||
// Sync after removing a file and adding a file
|
||||
func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteObject("potato", "SMALLER BUT SAME DATE", t2)
|
||||
file3 := r.WriteBoth("empty space", "", t2)
|
||||
fstest.CheckItems(t, r.fremote, file2, file3)
|
||||
fstest.CheckItems(t, r.flocal, file1, file3)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file1, file3)
|
||||
fstest.CheckItems(t, r.fremote, file1, file3)
|
||||
}
|
||||
|
||||
// Sync after removing a file and adding a file with IO Errors
|
||||
func TestSyncAfterRemovingAFileAndAddingAFileWithErrors(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteObject("potato", "SMALLER BUT SAME DATE", t2)
|
||||
file3 := r.WriteBoth("empty space", "", t2)
|
||||
fstest.CheckItems(t, r.fremote, file2, file3)
|
||||
fstest.CheckItems(t, r.flocal, file1, file3)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
fs.Stats.Error()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
assert.Equal(t, fs.ErrorNotDeleting, err)
|
||||
fstest.CheckItems(t, r.flocal, file1, file3)
|
||||
fstest.CheckItems(t, r.fremote, file1, file2, file3)
|
||||
}
|
||||
|
||||
// Sync test delete during
|
||||
func TestSyncDeleteDuring(t *testing.T) {
|
||||
// This is the default so we've checked this already
|
||||
// check it is the default
|
||||
if !(!fs.Config.DeleteBefore && fs.Config.DeleteDuring && !fs.Config.DeleteAfter) {
|
||||
t.Fatalf("Didn't default to --delete-during")
|
||||
}
|
||||
}
|
||||
|
||||
// Sync test delete before
|
||||
func TestSyncDeleteBefore(t *testing.T) {
|
||||
fs.Config.DeleteBefore = true
|
||||
fs.Config.DeleteDuring = false
|
||||
fs.Config.DeleteAfter = false
|
||||
defer func() {
|
||||
fs.Config.DeleteBefore = false
|
||||
fs.Config.DeleteDuring = true
|
||||
fs.Config.DeleteAfter = false
|
||||
}()
|
||||
|
||||
TestSyncAfterRemovingAFileAndAddingAFile(t)
|
||||
}
|
||||
|
||||
// Sync test delete after
|
||||
func TestSyncDeleteAfter(t *testing.T) {
|
||||
fs.Config.DeleteBefore = false
|
||||
fs.Config.DeleteDuring = false
|
||||
fs.Config.DeleteAfter = true
|
||||
defer func() {
|
||||
fs.Config.DeleteBefore = false
|
||||
fs.Config.DeleteDuring = true
|
||||
fs.Config.DeleteAfter = false
|
||||
}()
|
||||
|
||||
TestSyncAfterRemovingAFileAndAddingAFile(t)
|
||||
}
|
||||
|
||||
// Test with exclude
|
||||
func TestSyncWithExclude(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
file3 := r.WriteFile("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
|
||||
|
||||
fs.Config.Filter.MaxSize = 40
|
||||
defer func() {
|
||||
fs.Config.Filter.MaxSize = -1
|
||||
}()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.fremote, file2, file1)
|
||||
|
||||
// Now sync the other way round and check enormous doesn't get
|
||||
// deleted as it is excluded from the sync
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.flocal, r.fremote)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file2, file1, file3)
|
||||
}
|
||||
|
||||
// Test with exclude and delete excluded
|
||||
func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) // 60 bytes
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
file3 := r.WriteBoth("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
|
||||
fstest.CheckItems(t, r.fremote, file1, file2, file3)
|
||||
fstest.CheckItems(t, r.flocal, file1, file2, file3)
|
||||
|
||||
fs.Config.Filter.MaxSize = 40
|
||||
fs.Config.Filter.DeleteExcluded = true
|
||||
defer func() {
|
||||
fs.Config.Filter.MaxSize = -1
|
||||
fs.Config.Filter.DeleteExcluded = false
|
||||
}()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
|
||||
// Check sync the other way round to make sure enormous gets
|
||||
// deleted even though it is excluded
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.flocal, r.fremote)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
}
|
||||
|
||||
// Test with UpdateOlder set
|
||||
func TestSyncWithUpdateOlder(t *testing.T) {
|
||||
if fs.Config.ModifyWindow == fs.ModTimeNotSupported {
|
||||
t.Skip("Can't run this test on fs which doesn't support mod time")
|
||||
}
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
t2plus := t2.Add(time.Second / 2)
|
||||
t2minus := t2.Add(time.Second / 2)
|
||||
oneF := r.WriteFile("one", "one", t1)
|
||||
twoF := r.WriteFile("two", "two", t3)
|
||||
threeF := r.WriteFile("three", "three", t2)
|
||||
fourF := r.WriteFile("four", "four", t2)
|
||||
fiveF := r.WriteFile("five", "five", t2)
|
||||
fstest.CheckItems(t, r.flocal, oneF, twoF, threeF, fourF, fiveF)
|
||||
oneO := r.WriteObject("one", "ONE", t2)
|
||||
twoO := r.WriteObject("two", "TWO", t2)
|
||||
threeO := r.WriteObject("three", "THREE", t2plus)
|
||||
fourO := r.WriteObject("four", "FOURFOUR", t2minus)
|
||||
fstest.CheckItems(t, r.fremote, oneO, twoO, threeO, fourO)
|
||||
|
||||
fs.Config.UpdateOlder = true
|
||||
oldModifyWindow := fs.Config.ModifyWindow
|
||||
fs.Config.ModifyWindow = fs.ModTimeNotSupported
|
||||
defer func() {
|
||||
fs.Config.UpdateOlder = false
|
||||
fs.Config.ModifyWindow = oldModifyWindow
|
||||
}()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.fremote, oneO, twoF, threeO, fourF, fiveF)
|
||||
}
|
||||
|
||||
// Test a server side move if possible, or the backup path if not
|
||||
func TestServerSideMove(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
|
||||
fstest.CheckItems(t, r.fremote, file2, file1)
|
||||
|
||||
fremoteMove, finaliseMove, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
require.NoError(t, err)
|
||||
defer finaliseMove()
|
||||
t.Logf("Server side move (if possible) %v -> %v", r.fremote, fremoteMove)
|
||||
|
||||
// Write just one file in the new remote
|
||||
r.WriteObjectTo(fremoteMove, "empty space", "", t2, false)
|
||||
fstest.CheckItems(t, fremoteMove, file2)
|
||||
|
||||
// Do server side move
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.MoveDir(fremoteMove, r.fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.fremote)
|
||||
fstest.CheckItems(t, fremoteMove, file2, file1)
|
||||
|
||||
// Move it back again, dst does not exist this time
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.MoveDir(r.fremote, fremoteMove)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.fremote, file2, file1)
|
||||
fstest.CheckItems(t, fremoteMove)
|
||||
}
|
||||
|
||||
func TestLs(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
|
530
fs/sync.go
Normal file
530
fs/sync.go
Normal file
|
@ -0,0 +1,530 @@
|
|||
// Implementation of sync/copy/move
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type syncCopyMove struct {
|
||||
// parameters
|
||||
fdst Fs
|
||||
fsrc Fs
|
||||
Delete bool
|
||||
DoMove bool
|
||||
dir string
|
||||
// internal state
|
||||
noTraverse bool // if set don't trafevers the dst
|
||||
deleteBefore bool // set if we must delete objects before copying
|
||||
dstFiles map[string]Object // dst files, only used if Delete
|
||||
srcFiles map[string]Object // src files, only used if deleteBefore
|
||||
srcFilesChan chan Object // passes src objects
|
||||
srcFilesResult chan error // error result of src listing
|
||||
dstFilesResult chan error // error result of dst listing
|
||||
abort chan struct{} // signal to abort the copiers
|
||||
checkerWg sync.WaitGroup // wait for checkers
|
||||
toBeChecked ObjectPairChan // checkers channel
|
||||
copierWg sync.WaitGroup // wait for copiers
|
||||
toBeUploaded ObjectPairChan // copiers channel
|
||||
errorMu sync.Mutex // Mutex covering the errors variables
|
||||
err error // normal error from copy process
|
||||
noRetryErr error // error with NoRetry set
|
||||
fatalErr error // fatal error
|
||||
}
|
||||
|
||||
func newSyncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) *syncCopyMove {
|
||||
s := &syncCopyMove{
|
||||
fdst: fdst,
|
||||
fsrc: fsrc,
|
||||
Delete: Delete,
|
||||
DoMove: DoMove,
|
||||
dir: "",
|
||||
srcFilesChan: make(chan Object, Config.Checkers+Config.Transfers),
|
||||
srcFilesResult: make(chan error, 1),
|
||||
dstFilesResult: make(chan error, 1),
|
||||
noTraverse: Config.NoTraverse,
|
||||
abort: make(chan struct{}),
|
||||
toBeChecked: make(ObjectPairChan, Config.Transfers),
|
||||
toBeUploaded: make(ObjectPairChan, Config.Transfers),
|
||||
deleteBefore: Delete && Config.DeleteBefore,
|
||||
}
|
||||
if s.noTraverse && s.Delete {
|
||||
Debug(s.fdst, "Ignoring --no-traverse with sync")
|
||||
s.noTraverse = false
|
||||
}
|
||||
return s
|
||||
|
||||
}
|
||||
|
||||
// Check to see if have set the abort flag
|
||||
func (s *syncCopyMove) aborting() bool {
|
||||
select {
|
||||
case <-s.abort:
|
||||
return true
|
||||
default:
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// This reads the source files from s.srcFiles into srcFilesChan then
|
||||
// closes it
|
||||
//
|
||||
// It returns the final result of the read into s.srcFilesResult
|
||||
func (s *syncCopyMove) readSrcUsingMap() {
|
||||
outer:
|
||||
for _, o := range s.srcFiles {
|
||||
if s.aborting() {
|
||||
break outer
|
||||
}
|
||||
select {
|
||||
case s.srcFilesChan <- o:
|
||||
case <-s.abort:
|
||||
break outer
|
||||
}
|
||||
}
|
||||
close(s.srcFilesChan)
|
||||
s.srcFilesResult <- nil
|
||||
}
|
||||
|
||||
// This reads the source files into srcFilesChan then closes it
|
||||
//
|
||||
// It returns the final result of the read into s.srcFilesResult
|
||||
func (s *syncCopyMove) readSrcUsingChan() {
|
||||
err := readFilesFn(s.fsrc, false, s.dir, func(o Object) error {
|
||||
if s.aborting() {
|
||||
return ErrorListAborted
|
||||
}
|
||||
select {
|
||||
case s.srcFilesChan <- o:
|
||||
case <-s.abort:
|
||||
return ErrorListAborted
|
||||
}
|
||||
return nil
|
||||
})
|
||||
close(s.srcFilesChan)
|
||||
s.srcFilesResult <- err
|
||||
}
|
||||
|
||||
// This reads the destination files in into dstFiles
|
||||
//
|
||||
// It returns the final result of the read into s.dstFilesResult
|
||||
func (s *syncCopyMove) readDstFiles() {
|
||||
var err error
|
||||
s.dstFiles, err = readFilesMap(s.fdst, Config.Filter.DeleteExcluded, s.dir)
|
||||
s.dstFilesResult <- err
|
||||
}
|
||||
|
||||
// Check to see if src needs to be copied to dst and if so puts it in out
|
||||
func (s *syncCopyMove) checkOne(pair ObjectPair, out ObjectPairChan) {
|
||||
src, dst := pair.src, pair.dst
|
||||
if dst == nil {
|
||||
Debug(src, "Couldn't find file - need to transfer")
|
||||
out <- pair
|
||||
return
|
||||
}
|
||||
// Check to see if can store this
|
||||
if !src.Storable() {
|
||||
return
|
||||
}
|
||||
// If we should ignore existing files, don't transfer
|
||||
if Config.IgnoreExisting {
|
||||
Debug(src, "Destination exists, skipping")
|
||||
return
|
||||
}
|
||||
// If we should upload unconditionally
|
||||
if Config.IgnoreTimes {
|
||||
Debug(src, "Uploading unconditionally as --ignore-times is in use")
|
||||
out <- pair
|
||||
return
|
||||
}
|
||||
// If UpdateOlder is in effect, skip if dst is newer than src
|
||||
if Config.UpdateOlder {
|
||||
srcModTime := src.ModTime()
|
||||
dstModTime := dst.ModTime()
|
||||
dt := dstModTime.Sub(srcModTime)
|
||||
// If have a mutually agreed precision then use that
|
||||
modifyWindow := Config.ModifyWindow
|
||||
if modifyWindow == ModTimeNotSupported {
|
||||
// Otherwise use 1 second as a safe default as
|
||||
// the resolution of the time a file was
|
||||
// uploaded.
|
||||
modifyWindow = time.Second
|
||||
}
|
||||
switch {
|
||||
case dt >= modifyWindow:
|
||||
Debug(src, "Destination is newer than source, skipping")
|
||||
return
|
||||
case dt <= -modifyWindow:
|
||||
Debug(src, "Destination is older than source, transferring")
|
||||
default:
|
||||
if src.Size() == dst.Size() {
|
||||
Debug(src, "Destination mod time is within %v of source and sizes identical, skipping", modifyWindow)
|
||||
return
|
||||
}
|
||||
Debug(src, "Destination mod time is within %v of source but sizes differ, transferring", modifyWindow)
|
||||
}
|
||||
} else {
|
||||
// Check to see if changed or not
|
||||
if Equal(src, dst) {
|
||||
Debug(src, "Unchanged skipping")
|
||||
return
|
||||
}
|
||||
}
|
||||
out <- pair
|
||||
}
|
||||
|
||||
// This checks the types of errors returned while copying files
|
||||
func (s *syncCopyMove) processError(err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
s.errorMu.Lock()
|
||||
defer s.errorMu.Unlock()
|
||||
switch {
|
||||
case IsFatalError(err):
|
||||
close(s.abort)
|
||||
s.fatalErr = err
|
||||
case IsNoRetryError(err):
|
||||
s.noRetryErr = err
|
||||
default:
|
||||
s.err = err
|
||||
}
|
||||
}
|
||||
|
||||
// pairChecker reads Objects~s on in send to out if they need transferring.
|
||||
//
|
||||
// FIXME potentially doing lots of hashes at once
|
||||
func (s *syncCopyMove) pairChecker(in ObjectPairChan, out ObjectPairChan, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
if s.aborting() {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case pair, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
src := pair.src
|
||||
Stats.Checking(src.Remote())
|
||||
s.checkOne(pair, out)
|
||||
Stats.DoneChecking(src.Remote())
|
||||
case <-s.abort:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pairCopier reads Objects on in and copies them.
|
||||
func (s *syncCopyMove) pairCopier(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
if s.aborting() {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case pair, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
src := pair.src
|
||||
Stats.Transferring(src.Remote())
|
||||
if Config.DryRun {
|
||||
Log(src, "Not copying as --dry-run")
|
||||
} else {
|
||||
s.processError(Copy(fdst, pair.dst, src))
|
||||
}
|
||||
Stats.DoneTransferring(src.Remote())
|
||||
case <-s.abort:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pairMover reads Objects on in and moves them if possible, or copies
|
||||
// them if not
|
||||
func (s *syncCopyMove) pairMover(in ObjectPairChan, fdst Fs, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// See if we have Move available
|
||||
fdstMover, haveMover := fdst.(Mover)
|
||||
for {
|
||||
if s.aborting() {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case pair, ok := <-in:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
src := pair.src
|
||||
dst := pair.dst
|
||||
Stats.Transferring(src.Remote())
|
||||
if Config.DryRun {
|
||||
Log(src, "Not moving as --dry-run")
|
||||
} else if haveMover && src.Fs().Name() == fdst.Name() {
|
||||
// Delete destination if it exists
|
||||
if pair.dst != nil {
|
||||
err := dst.Remove()
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Couldn't delete: %v", err)
|
||||
s.processError(err)
|
||||
}
|
||||
}
|
||||
_, err := fdstMover.Move(src, src.Remote())
|
||||
if err != nil {
|
||||
Stats.Error()
|
||||
ErrorLog(dst, "Couldn't move: %v", err)
|
||||
s.processError(err)
|
||||
} else {
|
||||
Debug(src, "Moved")
|
||||
}
|
||||
} else {
|
||||
s.processError(Copy(fdst, pair.dst, src))
|
||||
}
|
||||
Stats.DoneTransferring(src.Remote())
|
||||
case <-s.abort:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This starts the background checkers.
|
||||
func (s *syncCopyMove) startCheckers() {
|
||||
s.checkerWg.Add(Config.Checkers)
|
||||
for i := 0; i < Config.Checkers; i++ {
|
||||
go s.pairChecker(s.toBeChecked, s.toBeUploaded, &s.checkerWg)
|
||||
}
|
||||
}
|
||||
|
||||
// This stops the background checkers
|
||||
func (s *syncCopyMove) stopCheckers() {
|
||||
close(s.toBeChecked)
|
||||
Log(s.fdst, "Waiting for checks to finish")
|
||||
s.checkerWg.Wait()
|
||||
}
|
||||
|
||||
// This starts the background transfers
|
||||
func (s *syncCopyMove) startTransfers() {
|
||||
s.copierWg.Add(Config.Transfers)
|
||||
for i := 0; i < Config.Transfers; i++ {
|
||||
if s.DoMove {
|
||||
go s.pairMover(s.toBeUploaded, s.fdst, &s.copierWg)
|
||||
} else {
|
||||
go s.pairCopier(s.toBeUploaded, s.fdst, &s.copierWg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This stops the background transfers
|
||||
func (s *syncCopyMove) stopTransfers() {
|
||||
close(s.toBeUploaded)
|
||||
Log(s.fdst, "Waiting for transfers to finish")
|
||||
s.copierWg.Wait()
|
||||
}
|
||||
|
||||
// This deletes the files in the dstFiles map. If checkSrcMap is set
|
||||
// then it checks to see if they exist first in srcFiles the source
|
||||
// file map, otherwise it unconditionally deletes them. If
|
||||
// checkSrcMap is clear then it assumes that the any source files that
|
||||
// have been found have been removed from dstFiles already.
|
||||
func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
|
||||
if Stats.Errored() {
|
||||
ErrorLog(s.fdst, "%v", ErrorNotDeleting)
|
||||
return ErrorNotDeleting
|
||||
}
|
||||
|
||||
// Delete the spare files
|
||||
toDelete := make(ObjectsChan, Config.Transfers)
|
||||
go func() {
|
||||
for remote, o := range s.dstFiles {
|
||||
if checkSrcMap {
|
||||
_, exists := s.srcFiles[remote]
|
||||
if exists {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if s.aborting() {
|
||||
break
|
||||
}
|
||||
toDelete <- o
|
||||
}
|
||||
close(toDelete)
|
||||
}()
|
||||
return DeleteFiles(toDelete)
|
||||
}
|
||||
|
||||
// Syncs fsrc into fdst
|
||||
//
|
||||
// If Delete is true then it deletes any files in fdst that aren't in fsrc
|
||||
//
|
||||
// If DoMove is true then files will be moved instead of copied
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func (s *syncCopyMove) run() error {
|
||||
if Same(s.fdst, s.fsrc) {
|
||||
ErrorLog(s.fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
|
||||
err := Mkdir(s.fdst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start reading dstFiles if required
|
||||
if !s.noTraverse {
|
||||
go s.readDstFiles()
|
||||
}
|
||||
|
||||
// If s.deleteBefore then we need to read the whole source map first
|
||||
if s.deleteBefore {
|
||||
// Read source files into the map
|
||||
s.srcFiles, err = readFilesMap(s.fsrc, false, s.dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Pump the map into s.srcFilesChan
|
||||
go s.readSrcUsingMap()
|
||||
} else {
|
||||
go s.readSrcUsingChan()
|
||||
}
|
||||
|
||||
// Wait for dstfiles to finish reading if we were reading them
|
||||
// and report any errors
|
||||
if !s.noTraverse {
|
||||
err = <-s.dstFilesResult
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Delete files first if required
|
||||
// Have dstFiles and srcFiles complete at this point
|
||||
if s.deleteBefore {
|
||||
err = s.deleteFiles(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Start background checking and transferring pipeline
|
||||
s.startCheckers()
|
||||
s.startTransfers()
|
||||
|
||||
// Do the transfers
|
||||
for src := range s.srcFilesChan {
|
||||
remote := src.Remote()
|
||||
var dst Object
|
||||
if s.noTraverse {
|
||||
var err error
|
||||
dst, err = s.fdst.NewObject(remote)
|
||||
if err != nil {
|
||||
dst = nil
|
||||
if err != ErrorObjectNotFound {
|
||||
Debug(src, "Error making NewObject: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dst = s.dstFiles[remote]
|
||||
// Remove file from s.dstFiles because it exists in srcFiles
|
||||
delete(s.dstFiles, remote)
|
||||
}
|
||||
if dst != nil {
|
||||
s.toBeChecked <- ObjectPair{src, dst}
|
||||
} else {
|
||||
// No need to check since doesn't exist
|
||||
s.toBeUploaded <- ObjectPair{src, nil}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop background checking and transferring pipeline
|
||||
s.stopCheckers()
|
||||
s.stopTransfers()
|
||||
|
||||
// Retrieve the delayed error from the source listing goroutine
|
||||
err = <-s.srcFilesResult
|
||||
|
||||
// Delete files during or after
|
||||
if s.Delete && (Config.DeleteDuring || Config.DeleteAfter) {
|
||||
if err != nil {
|
||||
ErrorLog(s.fdst, "%v", ErrorNotDeleting)
|
||||
} else {
|
||||
err = s.deleteFiles(false)
|
||||
}
|
||||
}
|
||||
|
||||
// Return errors in the precedence
|
||||
// fatalErr
|
||||
// error from above
|
||||
// error from a copy
|
||||
// noRetryErr
|
||||
s.processError(err)
|
||||
if s.fatalErr != nil {
|
||||
return s.fatalErr
|
||||
}
|
||||
if s.err != nil {
|
||||
return s.err
|
||||
}
|
||||
return s.noRetryErr
|
||||
}
|
||||
|
||||
// Sync fsrc into fdst
|
||||
func Sync(fdst, fsrc Fs) error {
|
||||
return newSyncCopyMove(fdst, fsrc, true, false).run()
|
||||
}
|
||||
|
||||
// CopyDir copies fsrc into fdst
|
||||
func CopyDir(fdst, fsrc Fs) error {
|
||||
return newSyncCopyMove(fdst, fsrc, false, false).run()
|
||||
}
|
||||
|
||||
// moveDir moves fsrc into fdst
|
||||
func moveDir(fdst, fsrc Fs) error {
|
||||
return newSyncCopyMove(fdst, fsrc, false, true).run()
|
||||
}
|
||||
|
||||
// MoveDir moves fsrc into fdst
|
||||
func MoveDir(fdst, fsrc Fs) error {
|
||||
if Same(fdst, fsrc) {
|
||||
ErrorLog(fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
|
||||
// First attempt to use DirMover if exists, same Fs and no filters are active
|
||||
if fdstDirMover, ok := fdst.(DirMover); ok && fsrc.Name() == fdst.Name() && Config.Filter.InActive() {
|
||||
err := fdstDirMover.DirMove(fsrc)
|
||||
Debug(fdst, "Using server side directory move")
|
||||
switch err {
|
||||
case ErrorCantDirMove, ErrorDirExists:
|
||||
Debug(fdst, "Server side directory move failed - fallback to copy/delete: %v", err)
|
||||
case nil:
|
||||
Debug(fdst, "Server side directory move succeeded")
|
||||
return nil
|
||||
default:
|
||||
Stats.Error()
|
||||
ErrorLog(fdst, "Server side directory move failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Now move the files
|
||||
err := moveDir(fdst, fsrc)
|
||||
if err != nil || Stats.Errored() {
|
||||
ErrorLog(fdst, "Not deleting files as there were IO errors")
|
||||
return err
|
||||
}
|
||||
// If no filters then purge
|
||||
if Config.Filter.InActive() {
|
||||
return Purge(fsrc)
|
||||
}
|
||||
// Otherwise remove any remaining files obeying filters
|
||||
err = Delete(fsrc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// and try to remove the directory if empty - ignoring error
|
||||
_ = TryRmdir(fsrc)
|
||||
return nil
|
||||
}
|
579
fs/sync_test.go
Normal file
579
fs/sync_test.go
Normal file
|
@ -0,0 +1,579 @@
|
|||
// Test sync/copy/move
|
||||
|
||||
package fs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check dry run is working
|
||||
func TestCopyWithDryRun(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
err := fs.CopyDir(r.fremote, r.flocal)
|
||||
fs.Config.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote)
|
||||
}
|
||||
|
||||
// Now without dry run
|
||||
func TestCopy(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
err := fs.CopyDir(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Now with --no-traverse
|
||||
func TestCopyNoTraverse(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.NoTraverse = true
|
||||
defer func() { fs.Config.NoTraverse = false }()
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
err := fs.CopyDir(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Now with --no-traverse
|
||||
func TestSyncNoTraverse(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
fs.Config.NoTraverse = true
|
||||
defer func() { fs.Config.NoTraverse = false }()
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Test copy with depth
|
||||
func TestCopyWithDepth(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
file2 := r.WriteFile("hello world2", "hello world2", t2)
|
||||
|
||||
// Check the MaxDepth too
|
||||
fs.Config.MaxDepth = 1
|
||||
defer func() { fs.Config.MaxDepth = -1 }()
|
||||
|
||||
err := fs.CopyDir(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
}
|
||||
|
||||
// Test a server side copy if possible, or the backup path if not
|
||||
func TestServerSideCopy(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
fremoteCopy, finaliseCopy, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
require.NoError(t, err)
|
||||
defer finaliseCopy()
|
||||
t.Logf("Server side copy (if possible) %v -> %v", r.fremote, fremoteCopy)
|
||||
|
||||
err = fs.CopyDir(fremoteCopy, r.fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, fremoteCopy, file1)
|
||||
}
|
||||
|
||||
// Check that if the local file doesn't exist when we copy it up,
|
||||
// nothing happens to the remote file
|
||||
func TestCopyAfterDelete(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
fstest.CheckItems(t, r.flocal)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
err := fs.Mkdir(r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.CopyDir(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Check the copy downloading a file
|
||||
func TestCopyRedownload(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
err := fs.CopyDir(r.flocal, r.fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Change the last modified date and resync.
|
||||
// If we're only doing sync by size and checksum, we expect nothing to
|
||||
// to be transferred on the second sync.
|
||||
func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.CheckSum = true
|
||||
defer func() { fs.Config.CheckSum = false }()
|
||||
|
||||
file1 := r.WriteFile("check sum", "", t1)
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Change last modified date only
|
||||
file2 := r.WriteFile("check sum", "", t2)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Change the last modified date and the
|
||||
// file contents but not the size. If we're only doing sync by size
|
||||
// only, we expect nothing to to be transferred on the second sync.
|
||||
func TestSyncSizeOnly(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.SizeOnly = true
|
||||
defer func() { fs.Config.SizeOnly = false }()
|
||||
|
||||
file1 := r.WriteFile("sizeonly", "potato", t1)
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Update mtime, md5sum but not length of file
|
||||
file2 := r.WriteFile("sizeonly", "POTATO", t2)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
// Create a file and sync it. Keep the last modified date but change
|
||||
// the size. With --ignore-size we expect nothing to to be
|
||||
// transferred on the second sync.
|
||||
func TestSyncIgnoreSize(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
fs.Config.IgnoreSize = true
|
||||
defer func() { fs.Config.IgnoreSize = false }()
|
||||
|
||||
file1 := r.WriteFile("ignore-size", "contents", t1)
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Update size but not date of file
|
||||
file2 := r.WriteFile("ignore-size", "longer contents but same date", t1)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
func TestSyncIgnoreTimes(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("existing", "potato", t1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly 0 files because the
|
||||
// files were identical.
|
||||
assert.Equal(t, int64(0), fs.Stats.GetTransfers())
|
||||
|
||||
fs.Config.IgnoreTimes = true
|
||||
defer func() { fs.Config.IgnoreTimes = false }()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file even though the
|
||||
// files were identical.
|
||||
assert.Equal(t, int64(1), fs.Stats.GetTransfers())
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
func TestSyncIgnoreExisting(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("existing", "potato", t1)
|
||||
|
||||
fs.Config.IgnoreExisting = true
|
||||
defer func() { fs.Config.IgnoreExisting = false }()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
|
||||
// Change everything
|
||||
r.WriteFile("existing", "newpotatoes", t2)
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
// Items should not change
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("empty space", "", t2)
|
||||
r.WriteObject("empty space", "", t1)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file1)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
}
|
||||
|
||||
func TestSyncAfterAddingAFile(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("empty space", "", t2)
|
||||
file2 := r.WriteFile("potato", "------------------------------------------------------------", t3)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.fremote, file1, file2)
|
||||
}
|
||||
|
||||
func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteObject("potato", "------------------------------------------------------------", t3)
|
||||
file2 := r.WriteFile("potato", "smaller but same date", t3)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
}
|
||||
|
||||
// Sync after changing a file's contents, changing modtime but length
|
||||
// remaining the same
|
||||
func TestSyncAfterChangingContentsOnly(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
var file1 fstest.Item
|
||||
if r.fremote.Precision() == fs.ModTimeNotSupported {
|
||||
t.Logf("ModTimeNotSupported so forcing file to be a different size")
|
||||
file1 = r.WriteObject("potato", "different size to make sure it syncs", t3)
|
||||
} else {
|
||||
file1 = r.WriteObject("potato", "smaller but same date", t3)
|
||||
}
|
||||
file2 := r.WriteFile("potato", "SMALLER BUT SAME DATE", t2)
|
||||
fstest.CheckItems(t, r.fremote, file1)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
}
|
||||
|
||||
// Sync after removing a file and adding a file --dry-run
|
||||
func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteObject("potato", "SMALLER BUT SAME DATE", t2)
|
||||
file3 := r.WriteBoth("empty space", "", t2)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
fs.Config.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.flocal, file3, file1)
|
||||
fstest.CheckItems(t, r.fremote, file3, file2)
|
||||
}
|
||||
|
||||
// Sync after removing a file and adding a file
|
||||
func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteObject("potato", "SMALLER BUT SAME DATE", t2)
|
||||
file3 := r.WriteBoth("empty space", "", t2)
|
||||
fstest.CheckItems(t, r.fremote, file2, file3)
|
||||
fstest.CheckItems(t, r.flocal, file1, file3)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file1, file3)
|
||||
fstest.CheckItems(t, r.fremote, file1, file3)
|
||||
}
|
||||
|
||||
// Sync after removing a file and adding a file with IO Errors
|
||||
func TestSyncAfterRemovingAFileAndAddingAFileWithErrors(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteObject("potato", "SMALLER BUT SAME DATE", t2)
|
||||
file3 := r.WriteBoth("empty space", "", t2)
|
||||
fstest.CheckItems(t, r.fremote, file2, file3)
|
||||
fstest.CheckItems(t, r.flocal, file1, file3)
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
fs.Stats.Error()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
assert.Equal(t, fs.ErrorNotDeleting, err)
|
||||
fstest.CheckItems(t, r.flocal, file1, file3)
|
||||
fstest.CheckItems(t, r.fremote, file1, file2, file3)
|
||||
}
|
||||
|
||||
// Sync test delete during
|
||||
func TestSyncDeleteDuring(t *testing.T) {
|
||||
// This is the default so we've checked this already
|
||||
// check it is the default
|
||||
if !(!fs.Config.DeleteBefore && fs.Config.DeleteDuring && !fs.Config.DeleteAfter) {
|
||||
t.Fatalf("Didn't default to --delete-during")
|
||||
}
|
||||
}
|
||||
|
||||
// Sync test delete before
|
||||
func TestSyncDeleteBefore(t *testing.T) {
|
||||
fs.Config.DeleteBefore = true
|
||||
fs.Config.DeleteDuring = false
|
||||
fs.Config.DeleteAfter = false
|
||||
defer func() {
|
||||
fs.Config.DeleteBefore = false
|
||||
fs.Config.DeleteDuring = true
|
||||
fs.Config.DeleteAfter = false
|
||||
}()
|
||||
|
||||
TestSyncAfterRemovingAFileAndAddingAFile(t)
|
||||
}
|
||||
|
||||
// Sync test delete after
|
||||
func TestSyncDeleteAfter(t *testing.T) {
|
||||
fs.Config.DeleteBefore = false
|
||||
fs.Config.DeleteDuring = false
|
||||
fs.Config.DeleteAfter = true
|
||||
defer func() {
|
||||
fs.Config.DeleteBefore = false
|
||||
fs.Config.DeleteDuring = true
|
||||
fs.Config.DeleteAfter = false
|
||||
}()
|
||||
|
||||
TestSyncAfterRemovingAFileAndAddingAFile(t)
|
||||
}
|
||||
|
||||
// Test with exclude
|
||||
func TestSyncWithExclude(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
file3 := r.WriteFile("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
|
||||
|
||||
fs.Config.Filter.MaxSize = 40
|
||||
defer func() {
|
||||
fs.Config.Filter.MaxSize = -1
|
||||
}()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.fremote, file2, file1)
|
||||
|
||||
// Now sync the other way round and check enormous doesn't get
|
||||
// deleted as it is excluded from the sync
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.flocal, r.fremote)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file2, file1, file3)
|
||||
}
|
||||
|
||||
// Test with exclude and delete excluded
|
||||
func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) // 60 bytes
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
file3 := r.WriteBoth("enormous", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
|
||||
fstest.CheckItems(t, r.fremote, file1, file2, file3)
|
||||
fstest.CheckItems(t, r.flocal, file1, file2, file3)
|
||||
|
||||
fs.Config.Filter.MaxSize = 40
|
||||
fs.Config.Filter.DeleteExcluded = true
|
||||
defer func() {
|
||||
fs.Config.Filter.MaxSize = -1
|
||||
fs.Config.Filter.DeleteExcluded = false
|
||||
}()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.fremote, file2)
|
||||
|
||||
// Check sync the other way round to make sure enormous gets
|
||||
// deleted even though it is excluded
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.Sync(r.flocal, r.fremote)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.flocal, file2)
|
||||
}
|
||||
|
||||
// Test with UpdateOlder set
|
||||
func TestSyncWithUpdateOlder(t *testing.T) {
|
||||
if fs.Config.ModifyWindow == fs.ModTimeNotSupported {
|
||||
t.Skip("Can't run this test on fs which doesn't support mod time")
|
||||
}
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
t2plus := t2.Add(time.Second / 2)
|
||||
t2minus := t2.Add(time.Second / 2)
|
||||
oneF := r.WriteFile("one", "one", t1)
|
||||
twoF := r.WriteFile("two", "two", t3)
|
||||
threeF := r.WriteFile("three", "three", t2)
|
||||
fourF := r.WriteFile("four", "four", t2)
|
||||
fiveF := r.WriteFile("five", "five", t2)
|
||||
fstest.CheckItems(t, r.flocal, oneF, twoF, threeF, fourF, fiveF)
|
||||
oneO := r.WriteObject("one", "ONE", t2)
|
||||
twoO := r.WriteObject("two", "TWO", t2)
|
||||
threeO := r.WriteObject("three", "THREE", t2plus)
|
||||
fourO := r.WriteObject("four", "FOURFOUR", t2minus)
|
||||
fstest.CheckItems(t, r.fremote, oneO, twoO, threeO, fourO)
|
||||
|
||||
fs.Config.UpdateOlder = true
|
||||
oldModifyWindow := fs.Config.ModifyWindow
|
||||
fs.Config.ModifyWindow = fs.ModTimeNotSupported
|
||||
defer func() {
|
||||
fs.Config.UpdateOlder = false
|
||||
fs.Config.ModifyWindow = oldModifyWindow
|
||||
}()
|
||||
|
||||
fs.Stats.ResetCounters()
|
||||
err := fs.Sync(r.fremote, r.flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.fremote, oneO, twoF, threeO, fourF, fiveF)
|
||||
}
|
||||
|
||||
// Test a server side move if possible, or the backup path if not
|
||||
func TestServerSideMove(t *testing.T) {
|
||||
r := NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
|
||||
file2 := r.WriteBoth("empty space", "", t2)
|
||||
|
||||
fstest.CheckItems(t, r.fremote, file2, file1)
|
||||
|
||||
fremoteMove, finaliseMove, err := fstest.RandomRemote(*RemoteName, *SubDir)
|
||||
require.NoError(t, err)
|
||||
defer finaliseMove()
|
||||
t.Logf("Server side move (if possible) %v -> %v", r.fremote, fremoteMove)
|
||||
|
||||
// Write just one file in the new remote
|
||||
r.WriteObjectTo(fremoteMove, "empty space", "", t2, false)
|
||||
fstest.CheckItems(t, fremoteMove, file2)
|
||||
|
||||
// Do server side move
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.MoveDir(fremoteMove, r.fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.fremote)
|
||||
fstest.CheckItems(t, fremoteMove, file2, file1)
|
||||
|
||||
// Move it back again, dst does not exist this time
|
||||
fs.Stats.ResetCounters()
|
||||
err = fs.MoveDir(r.fremote, fremoteMove)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.fremote, file2, file1)
|
||||
fstest.CheckItems(t, fremoteMove)
|
||||
}
|
Loading…
Reference in a new issue