2018-04-07 17:48:11 +00:00
|
|
|
// Package fstests provides generic integration tests for the Fs and
|
|
|
|
// Object interfaces
|
2015-09-22 17:47:16 +00:00
|
|
|
package fstests
|
2015-02-14 18:48:08 +00:00
|
|
|
|
2014-07-24 21:50:11 +00:00
|
|
|
import (
|
|
|
|
"bytes"
|
2016-07-25 18:18:56 +00:00
|
|
|
"fmt"
|
2014-07-24 21:50:11 +00:00
|
|
|
"io"
|
2016-09-10 10:29:57 +00:00
|
|
|
"io/ioutil"
|
2014-07-31 20:24:52 +00:00
|
|
|
"os"
|
2016-04-23 20:46:52 +00:00
|
|
|
"path"
|
2017-07-08 15:26:41 +00:00
|
|
|
"path/filepath"
|
2016-05-07 13:50:35 +00:00
|
|
|
"sort"
|
2014-07-31 20:24:52 +00:00
|
|
|
"strings"
|
2014-07-24 21:50:11 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ncw/rclone/fs"
|
2018-01-12 16:30:54 +00:00
|
|
|
"github.com/ncw/rclone/fs/config"
|
|
|
|
"github.com/ncw/rclone/fs/fserrors"
|
|
|
|
"github.com/ncw/rclone/fs/hash"
|
|
|
|
"github.com/ncw/rclone/fs/object"
|
|
|
|
"github.com/ncw/rclone/fs/operations"
|
|
|
|
"github.com/ncw/rclone/fs/walk"
|
2014-07-24 21:50:11 +00:00
|
|
|
"github.com/ncw/rclone/fstest"
|
2018-10-09 07:42:45 +00:00
|
|
|
"github.com/ncw/rclone/lib/readers"
|
2017-02-25 11:09:57 +00:00
|
|
|
"github.com/pkg/errors"
|
2016-04-23 20:46:52 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2014-07-24 21:50:11 +00:00
|
|
|
)
|
|
|
|
|
2018-02-25 09:58:06 +00:00
|
|
|
// InternalTester is an optional interface for Fs which allows to execute internal tests
|
|
|
|
//
|
|
|
|
// This interface should be implemented in 'backend'_internal_test.go and not in 'backend'.go
|
|
|
|
type InternalTester interface {
|
|
|
|
InternalTest(*testing.T)
|
|
|
|
}
|
|
|
|
|
2018-09-07 10:45:28 +00:00
|
|
|
// ChunkedUploadConfig contains the values used by TestFsPutChunked
|
|
|
|
// to determine the limits of chunked uploading
|
|
|
|
type ChunkedUploadConfig struct {
|
|
|
|
// Minimum allowed chunk size
|
|
|
|
MinChunkSize fs.SizeSuffix
|
|
|
|
// Maximum allowed chunk size, 0 is no limit
|
|
|
|
MaxChunkSize fs.SizeSuffix
|
|
|
|
// Rounds the given chunk size up to the next valid value
|
|
|
|
// nil will disable rounding
|
|
|
|
// e.g. the next power of 2
|
|
|
|
CeilChunkSize func(fs.SizeSuffix) fs.SizeSuffix
|
2018-10-13 21:43:15 +00:00
|
|
|
// More than one chunk is required on upload
|
|
|
|
NeedMultipleChunks bool
|
2018-09-07 10:45:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SetUploadChunkSizer is a test only interface to change the upload chunk size at runtime
|
|
|
|
type SetUploadChunkSizer interface {
|
|
|
|
// Change the configured UploadChunkSize.
|
|
|
|
// Will only be called while no transfer is in progress.
|
|
|
|
SetUploadChunkSize(fs.SizeSuffix) (fs.SizeSuffix, error)
|
|
|
|
}
|
|
|
|
|
2018-10-13 21:43:15 +00:00
|
|
|
// SetUploadCutoffer is a test only interface to change the upload cutoff size at runtime
|
|
|
|
type SetUploadCutoffer interface {
|
|
|
|
// Change the configured UploadCutoff.
|
|
|
|
// Will only be called while no transfer is in progress.
|
|
|
|
SetUploadCutoff(fs.SizeSuffix) (fs.SizeSuffix, error)
|
|
|
|
}
|
|
|
|
|
2018-09-07 10:45:28 +00:00
|
|
|
// NextPowerOfTwo returns the current or next bigger power of two.
|
|
|
|
// All values less or equal 0 will return 0
|
|
|
|
func NextPowerOfTwo(i fs.SizeSuffix) fs.SizeSuffix {
|
|
|
|
return 1 << uint(64-leadingZeros64(uint64(i)-1))
|
|
|
|
}
|
|
|
|
|
|
|
|
// NextMultipleOf returns a function that can be used as a CeilChunkSize function.
|
|
|
|
// This function will return the next multiple of m that is equal or bigger than i.
|
|
|
|
// All values less or equal 0 will return 0.
|
|
|
|
func NextMultipleOf(m fs.SizeSuffix) func(fs.SizeSuffix) fs.SizeSuffix {
|
|
|
|
if m <= 0 {
|
|
|
|
panic(fmt.Sprintf("invalid multiplier %s", m))
|
|
|
|
}
|
|
|
|
return func(i fs.SizeSuffix) fs.SizeSuffix {
|
|
|
|
if i <= 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return (((i - 1) / m) + 1) * m
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-07 13:50:35 +00:00
|
|
|
// dirsToNames returns a sorted list of names
|
2017-06-30 12:37:29 +00:00
|
|
|
func dirsToNames(dirs []fs.Directory) []string {
|
2016-05-07 13:50:35 +00:00
|
|
|
names := []string{}
|
|
|
|
for _, dir := range dirs {
|
2018-04-15 09:08:11 +00:00
|
|
|
names = append(names, fstest.WinPath(fstest.Normalize(dir.Remote())))
|
2016-05-07 13:50:35 +00:00
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
return names
|
|
|
|
}
|
|
|
|
|
|
|
|
// objsToNames returns a sorted list of object names
|
|
|
|
func objsToNames(objs []fs.Object) []string {
|
|
|
|
names := []string{}
|
|
|
|
for _, obj := range objs {
|
2018-04-15 09:08:11 +00:00
|
|
|
names = append(names, fstest.WinPath(fstest.Normalize(obj.Remote())))
|
2016-05-07 13:50:35 +00:00
|
|
|
}
|
|
|
|
sort.Strings(names)
|
|
|
|
return names
|
|
|
|
}
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// findObject finds the object on the remote
|
|
|
|
func findObject(t *testing.T, f fs.Fs, Name string) fs.Object {
|
2015-12-30 10:46:04 +00:00
|
|
|
var obj fs.Object
|
2016-06-25 20:23:20 +00:00
|
|
|
var err error
|
2018-10-14 13:45:23 +00:00
|
|
|
sleepTime := 1 * time.Second
|
2017-02-22 10:14:40 +00:00
|
|
|
for i := 1; i <= *fstest.ListRetries; i++ {
|
2018-04-07 17:48:11 +00:00
|
|
|
obj, err = f.NewObject(Name)
|
2016-06-25 20:23:20 +00:00
|
|
|
if err == nil {
|
2015-12-30 10:46:04 +00:00
|
|
|
break
|
|
|
|
}
|
2018-10-14 13:45:23 +00:00
|
|
|
t.Logf("Sleeping for %v for findObject eventual consistency: %d/%d (%v)", sleepTime, i, *fstest.ListRetries, err)
|
|
|
|
time.Sleep(sleepTime)
|
|
|
|
sleepTime = (sleepTime * 3) / 2
|
2015-12-30 10:46:04 +00:00
|
|
|
}
|
2016-06-29 16:59:31 +00:00
|
|
|
require.NoError(t, err)
|
2014-07-24 21:50:11 +00:00
|
|
|
return obj
|
|
|
|
}
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// testPut puts file to the remote
|
|
|
|
func testPut(t *testing.T, f fs.Fs, file *fstest.Item) string {
|
2017-06-13 10:22:16 +00:00
|
|
|
tries := 1
|
|
|
|
const maxTries = 10
|
2016-05-07 13:50:35 +00:00
|
|
|
again:
|
2016-09-10 10:29:57 +00:00
|
|
|
contents := fstest.RandomString(100)
|
|
|
|
buf := bytes.NewBufferString(contents)
|
2018-01-12 16:30:54 +00:00
|
|
|
hash := hash.NewMultiHasher()
|
2014-07-24 21:50:11 +00:00
|
|
|
in := io.TeeReader(buf, hash)
|
|
|
|
|
|
|
|
file.Size = int64(buf.Len())
|
2018-01-12 16:30:54 +00:00
|
|
|
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
2018-04-07 17:48:11 +00:00
|
|
|
obj, err := f.Put(in, obji)
|
2014-07-24 21:50:11 +00:00
|
|
|
if err != nil {
|
2016-05-07 13:50:35 +00:00
|
|
|
// Retry if err returned a retry error
|
2018-01-12 16:30:54 +00:00
|
|
|
if fserrors.IsRetryError(err) && tries < maxTries {
|
2016-05-07 13:50:35 +00:00
|
|
|
t.Logf("Put error: %v - low level retry %d/%d", err, tries, maxTries)
|
2016-07-12 18:41:34 +00:00
|
|
|
time.Sleep(2 * time.Second)
|
2016-05-07 13:50:35 +00:00
|
|
|
|
|
|
|
tries++
|
|
|
|
goto again
|
|
|
|
}
|
2016-07-25 18:18:56 +00:00
|
|
|
require.NoError(t, err, fmt.Sprintf("Put error: %v", err))
|
2014-07-24 21:50:11 +00:00
|
|
|
}
|
2016-01-17 11:27:31 +00:00
|
|
|
file.Hashes = hash.Sums()
|
2018-04-07 17:48:11 +00:00
|
|
|
file.Check(t, obj, f.Precision())
|
2014-07-24 21:50:11 +00:00
|
|
|
// Re-read the object and check again
|
2018-04-07 17:48:11 +00:00
|
|
|
obj = findObject(t, f, file.Path)
|
|
|
|
file.Check(t, obj, f.Precision())
|
2016-09-10 10:29:57 +00:00
|
|
|
return contents
|
2014-07-24 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2018-10-09 07:42:45 +00:00
|
|
|
// testPutLarge puts file to the remote, checks it and removes it on success.
|
|
|
|
func testPutLarge(t *testing.T, f fs.Fs, file *fstest.Item) {
|
|
|
|
tries := 1
|
|
|
|
const maxTries = 10
|
|
|
|
again:
|
|
|
|
r := readers.NewPatternReader(file.Size)
|
2018-10-13 21:43:15 +00:00
|
|
|
uploadHash := hash.NewMultiHasher()
|
|
|
|
in := io.TeeReader(r, uploadHash)
|
2018-10-09 07:42:45 +00:00
|
|
|
|
|
|
|
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
|
|
|
obj, err := f.Put(in, obji)
|
|
|
|
if err != nil {
|
|
|
|
// Retry if err returned a retry error
|
|
|
|
if fserrors.IsRetryError(err) && tries < maxTries {
|
|
|
|
t.Logf("Put error: %v - low level retry %d/%d", err, tries, maxTries)
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
|
|
|
|
tries++
|
|
|
|
goto again
|
|
|
|
}
|
|
|
|
require.NoError(t, err, fmt.Sprintf("Put error: %v", err))
|
|
|
|
}
|
2018-10-13 21:43:15 +00:00
|
|
|
file.Hashes = uploadHash.Sums()
|
2018-10-09 07:42:45 +00:00
|
|
|
file.Check(t, obj, f.Precision())
|
2018-10-13 21:43:15 +00:00
|
|
|
|
2018-10-09 07:42:45 +00:00
|
|
|
// Re-read the object and check again
|
|
|
|
obj = findObject(t, f, file.Path)
|
|
|
|
file.Check(t, obj, f.Precision())
|
2018-10-13 21:43:15 +00:00
|
|
|
|
|
|
|
// Download the object and check it is OK
|
|
|
|
downloadHash := hash.NewMultiHasher()
|
|
|
|
download, err := obj.Open()
|
|
|
|
require.NoError(t, err)
|
|
|
|
n, err := io.Copy(downloadHash, download)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, file.Size, n)
|
|
|
|
require.NoError(t, download.Close())
|
|
|
|
assert.Equal(t, file.Hashes, downloadHash.Sums())
|
|
|
|
|
|
|
|
// Remove the object
|
2018-10-09 07:42:45 +00:00
|
|
|
require.NoError(t, obj.Remove())
|
|
|
|
}
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// errorReader just returne an error on Read
|
2016-11-04 17:06:56 +00:00
|
|
|
type errorReader struct {
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Read returns an error immediately
|
2016-11-04 17:06:56 +00:00
|
|
|
func (er errorReader) Read(p []byte) (n int, err error) {
|
|
|
|
return 0, er.err
|
|
|
|
}
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// read the contents of an object as a string
|
|
|
|
func readObject(t *testing.T, obj fs.Object, limit int64, options ...fs.OpenOption) string {
|
|
|
|
what := fmt.Sprintf("readObject(%q) limit=%d, options=%+v", obj, limit, options)
|
|
|
|
in, err := obj.Open(options...)
|
|
|
|
require.NoError(t, err, what)
|
|
|
|
var r io.Reader = in
|
|
|
|
if limit >= 0 {
|
|
|
|
r = &io.LimitedReader{R: r, N: limit}
|
|
|
|
}
|
|
|
|
contents, err := ioutil.ReadAll(r)
|
|
|
|
require.NoError(t, err, what)
|
|
|
|
err = in.Close()
|
|
|
|
require.NoError(t, err, what)
|
|
|
|
return string(contents)
|
2014-07-24 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// ExtraConfigItem describes a config item for the tests
|
|
|
|
type ExtraConfigItem struct{ Name, Key, Value string }
|
2016-06-12 14:06:27 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Opt is options for Run
|
|
|
|
type Opt struct {
|
2018-09-03 16:59:10 +00:00
|
|
|
RemoteName string
|
|
|
|
NilObject fs.Object
|
|
|
|
ExtraConfig []ExtraConfigItem
|
2018-09-18 12:25:20 +00:00
|
|
|
SkipBadWindowsCharacters bool // skips unusable characters for windows if set
|
|
|
|
SkipFsMatch bool // if set skip exact matching of Fs value
|
|
|
|
TiersToTest []string // List of tiers which can be tested in setTier test
|
2018-09-07 10:45:28 +00:00
|
|
|
ChunkedUpload ChunkedUploadConfig
|
2018-04-07 17:48:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Run runs the basic integration tests for a remote using the remote
|
|
|
|
// name passed in and the nil object
|
|
|
|
func Run(t *testing.T, opt *Opt) {
|
|
|
|
var (
|
|
|
|
remote fs.Fs
|
|
|
|
remoteName = opt.RemoteName
|
|
|
|
subRemoteName string
|
|
|
|
subRemoteLeaf string
|
|
|
|
file1 = fstest.Item{
|
|
|
|
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
|
|
|
Path: "file name.txt",
|
2016-01-20 20:06:05 +00:00
|
|
|
}
|
2018-04-07 17:48:11 +00:00
|
|
|
file1Contents string
|
|
|
|
file2 = fstest.Item{
|
|
|
|
ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
|
|
|
|
Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`,
|
|
|
|
WinPath: `hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠/z.txt`,
|
2017-02-22 10:14:40 +00:00
|
|
|
}
|
2018-04-07 17:48:11 +00:00
|
|
|
isLocalRemote bool
|
|
|
|
)
|
|
|
|
|
|
|
|
// Make the Fs we are testing with, initialising the global variables
|
|
|
|
// subRemoteName - name of the remote after the TestRemote:
|
|
|
|
// subRemoteLeaf - a subdirectory to use under that
|
|
|
|
// remote - the result of fs.NewFs(TestRemote:subRemoteName)
|
|
|
|
newFs := func(t *testing.T) {
|
|
|
|
var err error
|
|
|
|
subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(remoteName)
|
|
|
|
require.NoError(t, err)
|
|
|
|
remote, err = fs.NewFs(subRemoteName)
|
|
|
|
if err == fs.ErrorNotFoundInConfigFile {
|
|
|
|
t.Logf("Didn't find %q in config file - skipping tests", remoteName)
|
|
|
|
return
|
2017-02-22 10:14:40 +00:00
|
|
|
}
|
2018-04-07 17:48:11 +00:00
|
|
|
require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
|
2014-07-24 21:50:11 +00:00
|
|
|
}
|
2017-06-11 21:43:31 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Skip the test if the remote isn't configured
|
|
|
|
skipIfNotOk := func(t *testing.T) {
|
|
|
|
if remote == nil {
|
|
|
|
t.Skipf("WARN: %q not configured", remoteName)
|
2016-08-24 21:21:34 +00:00
|
|
|
}
|
2015-02-14 18:48:08 +00:00
|
|
|
}
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Skip if remote is not ListR capable, otherwise set the useListR
|
|
|
|
// flag, returning a function to restore its value
|
|
|
|
skipIfNotListR := func(t *testing.T) func() {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
if remote.Features().ListR == nil {
|
|
|
|
t.Skip("FS has no ListR interface")
|
|
|
|
}
|
|
|
|
previous := fs.Config.UseListR
|
|
|
|
fs.Config.UseListR = true
|
|
|
|
return func() {
|
|
|
|
fs.Config.UseListR = previous
|
|
|
|
}
|
2016-07-25 18:18:56 +00:00
|
|
|
}
|
2015-02-14 18:48:08 +00:00
|
|
|
|
2018-09-18 12:25:20 +00:00
|
|
|
// Skip if remote is not SetTier and GetTier capable
|
2018-09-11 01:57:43 +00:00
|
|
|
skipIfNotSetTier := func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
2018-09-18 12:25:20 +00:00
|
|
|
if remote.Features().SetTier == false ||
|
|
|
|
remote.Features().GetTier == false {
|
|
|
|
t.Skip("FS has no SetTier & GetTier interfaces")
|
2018-09-11 01:57:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// TestInit tests basic intitialisation
|
|
|
|
t.Run("TestInit", func(t *testing.T) {
|
|
|
|
var err error
|
2015-02-14 18:48:08 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Remove bad characters from Windows file name if set
|
|
|
|
if opt.SkipBadWindowsCharacters {
|
|
|
|
t.Logf("Removing bad windows characters from test file")
|
2018-04-15 09:08:11 +00:00
|
|
|
file2.Path = fstest.WinPath(file2.Path)
|
2018-04-07 17:48:11 +00:00
|
|
|
}
|
2015-02-14 18:48:08 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
fstest.Initialise()
|
2015-08-31 20:05:51 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Set extra config if supplied
|
|
|
|
for _, item := range opt.ExtraConfig {
|
|
|
|
config.FileSet(item.Name, item.Key, item.Value)
|
|
|
|
}
|
|
|
|
if *fstest.RemoteName != "" {
|
|
|
|
remoteName = *fstest.RemoteName
|
|
|
|
}
|
|
|
|
t.Logf("Using remote %q", remoteName)
|
|
|
|
if remoteName == "" {
|
|
|
|
remoteName, err = fstest.LocalRemote()
|
|
|
|
require.NoError(t, err)
|
|
|
|
isLocalRemote = true
|
|
|
|
}
|
2015-08-31 20:05:51 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
newFs(t)
|
2015-08-31 20:05:51 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
skipIfNotOk(t)
|
2015-08-31 20:05:51 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
err = remote.Mkdir("")
|
|
|
|
require.NoError(t, err)
|
|
|
|
fstest.CheckListing(t, remote, []fstest.Item{})
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsString tests the String method
|
|
|
|
t.Run("TestFsString", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
str := remote.String()
|
|
|
|
require.NotEqual(t, "", str)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsName tests the Name method
|
|
|
|
t.Run("TestFsName", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
got := remote.Name()
|
|
|
|
want := remoteName
|
|
|
|
if isLocalRemote {
|
|
|
|
want = "local:"
|
|
|
|
}
|
|
|
|
require.Equal(t, want, got+":")
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsRoot tests the Root method
|
|
|
|
t.Run("TestFsRoot", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
name := remote.Name() + ":"
|
|
|
|
root := remote.Root()
|
|
|
|
if isLocalRemote {
|
|
|
|
// only check last path element on local
|
|
|
|
require.Equal(t, filepath.Base(subRemoteName), filepath.Base(root))
|
|
|
|
} else {
|
|
|
|
require.Equal(t, subRemoteName, name+root)
|
|
|
|
}
|
|
|
|
})
|
2015-09-22 17:47:16 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// TestFsRmdirEmpty tests deleting an empty directory
|
|
|
|
t.Run("TestFsRmdirEmpty", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
err := remote.Rmdir("")
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
2015-08-31 20:05:51 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// TestFsRmdirNotFound tests deleting a non existent directory
|
|
|
|
t.Run("TestFsRmdirNotFound", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
err := remote.Rmdir("")
|
|
|
|
assert.Error(t, err, "Expecting error on Rmdir non existent")
|
|
|
|
})
|
2015-08-31 20:05:51 +00:00
|
|
|
|
2018-04-11 20:39:17 +00:00
|
|
|
// TestFsMkdir tests making a directory
|
2018-04-07 17:48:11 +00:00
|
|
|
t.Run("TestFsMkdir", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
2015-08-31 20:05:51 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Use a new directory here. This is for the container based
|
|
|
|
// remotes which take time to create and destroy a container
|
|
|
|
// (eg azure blob)
|
|
|
|
newFs(t)
|
2015-08-31 20:05:51 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
err := remote.Mkdir("")
|
|
|
|
require.NoError(t, err)
|
|
|
|
fstest.CheckListing(t, remote, []fstest.Item{})
|
2015-08-31 20:05:51 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
err = remote.Mkdir("")
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
2015-08-31 20:05:51 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// TestFsMkdirRmdirSubdir tests making and removing a sub directory
|
|
|
|
t.Run("TestFsMkdirRmdirSubdir", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
dir := "dir/subdir"
|
|
|
|
err := operations.Mkdir(remote, dir)
|
|
|
|
require.NoError(t, err)
|
2018-06-03 18:45:34 +00:00
|
|
|
fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir", "dir/subdir"}, fs.GetModifyWindow(remote))
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
err = operations.Rmdir(remote, dir)
|
|
|
|
require.NoError(t, err)
|
2018-06-03 18:45:34 +00:00
|
|
|
fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir"}, fs.GetModifyWindow(remote))
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
err = operations.Rmdir(remote, "dir")
|
|
|
|
require.NoError(t, err)
|
2018-06-03 18:45:34 +00:00
|
|
|
fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.GetModifyWindow(remote))
|
2018-04-07 17:48:11 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsListEmpty tests listing an empty directory
|
|
|
|
t.Run("TestFsListEmpty", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
fstest.CheckListing(t, remote, []fstest.Item{})
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsListDirEmpty tests listing the directories from an empty directory
|
|
|
|
TestFsListDirEmpty := func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
objs, dirs, err := walk.GetAll(remote, "", true, 1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, []string{}, objsToNames(objs))
|
|
|
|
assert.Equal(t, []string{}, dirsToNames(dirs))
|
2017-05-25 21:05:49 +00:00
|
|
|
}
|
2018-04-07 17:48:11 +00:00
|
|
|
t.Run("TestFsListDirEmpty", TestFsListDirEmpty)
|
|
|
|
|
|
|
|
// TestFsListRDirEmpty tests listing the directories from an empty directory using ListR
|
|
|
|
t.Run("TestFsListRDirEmpty", func(t *testing.T) {
|
|
|
|
defer skipIfNotListR(t)()
|
|
|
|
TestFsListDirEmpty(t)
|
|
|
|
})
|
|
|
|
|
2018-04-20 22:06:51 +00:00
|
|
|
// TestFsListDirNotFound tests listing the directories from an empty directory
|
|
|
|
TestFsListDirNotFound := func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
objs, dirs, err := walk.GetAll(remote, "does not exist", true, 1)
|
|
|
|
if !remote.Features().CanHaveEmptyDirectories {
|
|
|
|
if err != fs.ErrorDirNotFound {
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, 0, len(objs)+len(dirs))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert.Equal(t, fs.ErrorDirNotFound, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Run("TestFsListDirNotFound", TestFsListDirNotFound)
|
|
|
|
|
|
|
|
// TestFsListRDirNotFound tests listing the directories from an empty directory using ListR
|
|
|
|
t.Run("TestFsListRDirNotFound", func(t *testing.T) {
|
|
|
|
defer skipIfNotListR(t)()
|
|
|
|
TestFsListDirNotFound(t)
|
|
|
|
})
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// TestFsNewObjectNotFound tests not finding a object
|
|
|
|
t.Run("TestFsNewObjectNotFound", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
// Object in an existing directory
|
|
|
|
o, err := remote.NewObject("potato")
|
|
|
|
assert.Nil(t, o)
|
|
|
|
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
|
|
|
// Now try an object in a non existing directory
|
|
|
|
o, err = remote.NewObject("directory/not/found/potato")
|
|
|
|
assert.Nil(t, o)
|
|
|
|
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsPutFile1 tests putting a file
|
|
|
|
t.Run("TestFsPutFile1", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
file1Contents = testPut(t, remote, &file1)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsPutError tests uploading a file where there is an error
|
|
|
|
//
|
|
|
|
// It makes sure that aborting a file half way through does not create
|
|
|
|
// a file on the remote.
|
2018-09-01 09:01:02 +00:00
|
|
|
//
|
|
|
|
// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutError)$'
|
2018-04-07 17:48:11 +00:00
|
|
|
t.Run("TestFsPutError", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
|
2018-08-16 14:52:15 +00:00
|
|
|
const N = 5 * 1024
|
|
|
|
// Read N bytes then produce an error
|
|
|
|
contents := fstest.RandomString(N)
|
2018-04-07 17:48:11 +00:00
|
|
|
buf := bytes.NewBufferString(contents)
|
|
|
|
er := &errorReader{errors.New("potato")}
|
|
|
|
in := io.MultiReader(buf, er)
|
|
|
|
|
2018-08-16 14:52:15 +00:00
|
|
|
obji := object.NewStaticObjectInfo(file2.Path, file2.ModTime, 2*N, true, nil, nil)
|
2018-04-07 17:48:11 +00:00
|
|
|
_, err := remote.Put(in, obji)
|
|
|
|
// assert.Nil(t, obj) - FIXME some remotes return the object even on nil
|
|
|
|
assert.NotNil(t, err)
|
|
|
|
|
|
|
|
obj, err := remote.NewObject(file2.Path)
|
|
|
|
assert.Nil(t, obj)
|
|
|
|
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsPutFile2 tests putting a file into a subdirectory
|
|
|
|
t.Run("TestFsPutFile2", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
/* file2Contents = */ testPut(t, remote, &file2)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsUpdateFile1 tests updating file1 with new contents
|
|
|
|
t.Run("TestFsUpdateFile1", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
file1Contents = testPut(t, remote, &file1)
|
|
|
|
// Note that the next test will check there are no duplicated file names
|
|
|
|
})
|
|
|
|
|
2018-10-09 07:42:45 +00:00
|
|
|
t.Run("TestFsPutChunked", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
|
|
|
|
setUploadChunkSizer, _ := remote.(SetUploadChunkSizer)
|
|
|
|
if setUploadChunkSizer == nil {
|
|
|
|
t.Skipf("%T does not implement SetUploadChunkSizer", remote)
|
|
|
|
}
|
|
|
|
|
2018-10-13 21:43:15 +00:00
|
|
|
setUploadCutoffer, _ := remote.(SetUploadCutoffer)
|
|
|
|
|
2018-10-09 07:42:45 +00:00
|
|
|
minChunkSize := opt.ChunkedUpload.MinChunkSize
|
|
|
|
if minChunkSize < 100 {
|
|
|
|
minChunkSize = 100
|
|
|
|
}
|
|
|
|
if opt.ChunkedUpload.CeilChunkSize != nil {
|
|
|
|
minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
maxChunkSize := opt.ChunkedUpload.MaxChunkSize
|
|
|
|
if maxChunkSize < minChunkSize {
|
|
|
|
if minChunkSize <= fs.MebiByte {
|
|
|
|
maxChunkSize = 2 * fs.MebiByte
|
|
|
|
} else {
|
|
|
|
maxChunkSize = 2 * minChunkSize
|
|
|
|
}
|
|
|
|
} else if maxChunkSize >= 2*minChunkSize {
|
|
|
|
maxChunkSize = 2 * minChunkSize
|
|
|
|
}
|
|
|
|
if opt.ChunkedUpload.CeilChunkSize != nil {
|
|
|
|
maxChunkSize = opt.ChunkedUpload.CeilChunkSize(maxChunkSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
next := func(f func(fs.SizeSuffix) fs.SizeSuffix) fs.SizeSuffix {
|
|
|
|
s := f(minChunkSize)
|
|
|
|
if s > maxChunkSize {
|
|
|
|
s = minChunkSize
|
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
chunkSizes := fs.SizeSuffixList{
|
|
|
|
minChunkSize,
|
|
|
|
minChunkSize + (maxChunkSize-minChunkSize)/3,
|
|
|
|
next(NextPowerOfTwo),
|
|
|
|
next(NextMultipleOf(100000)),
|
|
|
|
next(NextMultipleOf(100001)),
|
|
|
|
maxChunkSize,
|
|
|
|
}
|
|
|
|
chunkSizes.Sort()
|
|
|
|
|
2018-10-13 21:43:15 +00:00
|
|
|
// Set the minimum chunk size, upload cutoff and reset it at the end
|
|
|
|
oldChunkSize, err := setUploadChunkSizer.SetUploadChunkSize(minChunkSize)
|
2018-10-09 07:42:45 +00:00
|
|
|
require.NoError(t, err)
|
2018-10-13 21:43:15 +00:00
|
|
|
var oldUploadCutoff fs.SizeSuffix
|
|
|
|
if setUploadCutoffer != nil {
|
|
|
|
oldUploadCutoff, err = setUploadCutoffer.SetUploadCutoff(minChunkSize)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
2018-10-09 07:42:45 +00:00
|
|
|
defer func() {
|
2018-10-13 21:43:15 +00:00
|
|
|
_, err := setUploadChunkSizer.SetUploadChunkSize(oldChunkSize)
|
2018-10-09 07:42:45 +00:00
|
|
|
assert.NoError(t, err)
|
2018-10-13 21:43:15 +00:00
|
|
|
if setUploadCutoffer != nil {
|
|
|
|
_, err := setUploadCutoffer.SetUploadCutoff(oldUploadCutoff)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
2018-10-09 07:42:45 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
var lastCs fs.SizeSuffix
|
|
|
|
for _, cs := range chunkSizes {
|
|
|
|
if cs <= lastCs {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if opt.ChunkedUpload.CeilChunkSize != nil {
|
|
|
|
cs = opt.ChunkedUpload.CeilChunkSize(cs)
|
|
|
|
}
|
|
|
|
lastCs = cs
|
|
|
|
|
|
|
|
t.Run(cs.String(), func(t *testing.T) {
|
|
|
|
_, err := setUploadChunkSizer.SetUploadChunkSize(cs)
|
|
|
|
require.NoError(t, err)
|
2018-10-13 21:43:15 +00:00
|
|
|
if setUploadCutoffer != nil {
|
|
|
|
_, err = setUploadCutoffer.SetUploadCutoff(cs)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var testChunks []fs.SizeSuffix
|
|
|
|
if opt.ChunkedUpload.NeedMultipleChunks {
|
|
|
|
// If NeedMultipleChunks is set then test with > cs
|
|
|
|
testChunks = []fs.SizeSuffix{cs + 1, 2 * cs, 2*cs + 1}
|
|
|
|
} else {
|
|
|
|
testChunks = []fs.SizeSuffix{cs - 1, cs, 2*cs + 1}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, fileSize := range testChunks {
|
2018-10-09 07:42:45 +00:00
|
|
|
t.Run(fmt.Sprintf("%d", fileSize), func(t *testing.T) {
|
|
|
|
testPutLarge(t, remote, &fstest.Item{
|
|
|
|
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
|
|
|
Path: fmt.Sprintf("chunked-%s-%s.bin", cs.String(), fileSize.String()),
|
|
|
|
Size: int64(fileSize),
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// TestFsListDirFile2 tests the files are correctly uploaded by doing
|
|
|
|
// Depth 1 directory listings
|
|
|
|
TestFsListDirFile2 := func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
list := func(dir string, expectedDirNames, expectedObjNames []string) {
|
|
|
|
var objNames, dirNames []string
|
|
|
|
for i := 1; i <= *fstest.ListRetries; i++ {
|
|
|
|
objs, dirs, err := walk.GetAll(remote, dir, true, 1)
|
|
|
|
if errors.Cause(err) == fs.ErrorDirNotFound {
|
2018-04-15 09:08:11 +00:00
|
|
|
objs, dirs, err = walk.GetAll(remote, fstest.WinPath(dir), true, 1)
|
2018-04-07 17:48:11 +00:00
|
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
|
|
objNames = objsToNames(objs)
|
|
|
|
dirNames = dirsToNames(dirs)
|
|
|
|
if len(objNames) >= len(expectedObjNames) && len(dirNames) >= len(expectedDirNames) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
t.Logf("Sleeping for 1 second for TestFsListDirFile2 eventual consistency: %d/%d", i, *fstest.ListRetries)
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
}
|
|
|
|
assert.Equal(t, expectedDirNames, dirNames)
|
|
|
|
assert.Equal(t, expectedObjNames, objNames)
|
2018-03-24 19:57:44 +00:00
|
|
|
}
|
2018-04-07 17:48:11 +00:00
|
|
|
dir := file2.Path
|
|
|
|
deepest := true
|
|
|
|
for dir != "" {
|
|
|
|
expectedObjNames := []string{}
|
|
|
|
expectedDirNames := []string{}
|
|
|
|
child := dir
|
|
|
|
dir = path.Dir(dir)
|
|
|
|
if dir == "." {
|
|
|
|
dir = ""
|
2018-04-15 09:08:11 +00:00
|
|
|
expectedObjNames = append(expectedObjNames, fstest.WinPath(file1.Path))
|
2018-04-07 17:48:11 +00:00
|
|
|
}
|
|
|
|
if deepest {
|
2018-04-15 09:08:11 +00:00
|
|
|
expectedObjNames = append(expectedObjNames, fstest.WinPath(file2.Path))
|
2018-04-07 17:48:11 +00:00
|
|
|
deepest = false
|
|
|
|
} else {
|
2018-04-15 09:08:11 +00:00
|
|
|
expectedDirNames = append(expectedDirNames, fstest.WinPath(child))
|
2018-04-07 17:48:11 +00:00
|
|
|
}
|
|
|
|
list(dir, expectedDirNames, expectedObjNames)
|
2018-03-08 20:03:34 +00:00
|
|
|
}
|
|
|
|
}
|
2018-04-07 17:48:11 +00:00
|
|
|
t.Run("TestFsListDirFile2", TestFsListDirFile2)
|
|
|
|
|
|
|
|
// TestFsListRDirFile2 tests the files are correctly uploaded by doing
|
|
|
|
// Depth 1 directory listings using ListR
|
|
|
|
t.Run("TestFsListRDirFile2", func(t *testing.T) {
|
|
|
|
defer skipIfNotListR(t)()
|
|
|
|
TestFsListDirFile2(t)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsListDirRoot tests that DirList works in the root
|
|
|
|
TestFsListDirRoot := func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
rootRemote, err := fs.NewFs(remoteName)
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, dirs, err := walk.GetAll(rootRemote, "", true, 1)
|
2018-03-08 20:03:34 +00:00
|
|
|
require.NoError(t, err)
|
2018-04-07 17:48:11 +00:00
|
|
|
assert.Contains(t, dirsToNames(dirs), subRemoteLeaf, "Remote leaf not found")
|
2018-03-08 20:03:34 +00:00
|
|
|
}
|
2018-04-07 17:48:11 +00:00
|
|
|
t.Run("TestFsListDirRoot", TestFsListDirRoot)
|
|
|
|
|
|
|
|
// TestFsListRDirRoot tests that DirList works in the root using ListR
|
|
|
|
t.Run("TestFsListRDirRoot", func(t *testing.T) {
|
|
|
|
defer skipIfNotListR(t)()
|
|
|
|
TestFsListDirRoot(t)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsListSubdir tests List works for a subdirectory
|
|
|
|
TestFsListSubdir := func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
fileName := file2.Path
|
|
|
|
var err error
|
|
|
|
var objs []fs.Object
|
|
|
|
var dirs []fs.Directory
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
dir, _ := path.Split(fileName)
|
|
|
|
dir = dir[:len(dir)-1]
|
|
|
|
objs, dirs, err = walk.GetAll(remote, dir, true, -1)
|
|
|
|
if err != fs.ErrorDirNotFound {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
fileName = file2.WinPath
|
|
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objs, 1)
|
|
|
|
assert.Equal(t, fileName, objs[0].Remote())
|
|
|
|
require.Len(t, dirs, 0)
|
2018-03-24 19:57:44 +00:00
|
|
|
}
|
2018-04-07 17:48:11 +00:00
|
|
|
t.Run("TestFsListSubdir", TestFsListSubdir)
|
|
|
|
|
|
|
|
// TestFsListRSubdir tests List works for a subdirectory using ListR
|
|
|
|
t.Run("TestFsListRSubdir", func(t *testing.T) {
|
|
|
|
defer skipIfNotListR(t)()
|
|
|
|
TestFsListSubdir(t)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsListLevel2 tests List works for 2 levels
|
|
|
|
TestFsListLevel2 := func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
objs, dirs, err := walk.GetAll(remote, "", true, 2)
|
|
|
|
if err == fs.ErrorLevelNotSupported {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, []string{file1.Path}, objsToNames(objs))
|
|
|
|
assert.Equal(t, []string{`hello_ sausage`, `hello_ sausage/êé`}, dirsToNames(dirs))
|
2018-03-24 19:57:44 +00:00
|
|
|
}
|
2018-04-07 17:48:11 +00:00
|
|
|
t.Run("TestFsListLevel2", TestFsListLevel2)
|
|
|
|
|
|
|
|
// TestFsListRLevel2 tests List works for 2 levels using ListR
|
|
|
|
t.Run("TestFsListRLevel2", func(t *testing.T) {
|
|
|
|
defer skipIfNotListR(t)()
|
|
|
|
TestFsListLevel2(t)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsListFile1 tests file present
|
|
|
|
t.Run("TestFsListFile1", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsNewObject tests NewObject
|
|
|
|
t.Run("TestFsNewObject", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
file1.Check(t, obj, remote.Precision())
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsListFile1and2 tests two files present
|
|
|
|
t.Run("TestFsListFile1and2", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsNewObjectDir tests NewObject on a directory which should produce an error
|
|
|
|
t.Run("TestFsNewObjectDir", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
dir := path.Dir(file2.Path)
|
|
|
|
obj, err := remote.NewObject(dir)
|
|
|
|
assert.Nil(t, obj)
|
|
|
|
assert.NotNil(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsCopy tests Copy
|
|
|
|
t.Run("TestFsCopy", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
|
|
|
|
// Check have Copy
|
|
|
|
doCopy := remote.Features().Copy
|
|
|
|
if doCopy == nil {
|
|
|
|
t.Skip("FS has no Copier interface")
|
|
|
|
}
|
2017-05-25 21:05:49 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Test with file2 so have + and ' ' in file name
|
|
|
|
var file2Copy = file2
|
|
|
|
file2Copy.Path += "-copy"
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// do the copy
|
|
|
|
src := findObject(t, remote, file2.Path)
|
|
|
|
dst, err := doCopy(src, file2Copy.Path)
|
|
|
|
if err == fs.ErrorCantCopy {
|
|
|
|
t.Skip("FS can't copy")
|
2015-11-08 15:29:58 +00:00
|
|
|
}
|
2018-04-07 17:48:11 +00:00
|
|
|
require.NoError(t, err, fmt.Sprintf("Error: %#v", err))
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// check file exists in new listing
|
|
|
|
fstest.CheckListing(t, remote, []fstest.Item{file1, file2, file2Copy})
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Check dst lightly - list above has checked ModTime/Hashes
|
|
|
|
assert.Equal(t, file2Copy.Path, dst.Remote())
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Delete copy
|
|
|
|
err = dst.Remove()
|
|
|
|
require.NoError(t, err)
|
2016-09-21 21:13:24 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
})
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// TestFsMove tests Move
|
|
|
|
t.Run("TestFsMove", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Check have Move
|
|
|
|
doMove := remote.Features().Move
|
|
|
|
if doMove == nil {
|
|
|
|
t.Skip("FS has no Mover interface")
|
|
|
|
}
|
2016-01-17 11:27:31 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// state of files now:
|
|
|
|
// 1: file name.txt
|
|
|
|
// 2: hello sausage?/../z.txt
|
|
|
|
|
|
|
|
var file1Move = file1
|
|
|
|
var file2Move = file2
|
|
|
|
|
|
|
|
// check happy path, i.e. no naming conflicts when rename and move are two
|
|
|
|
// separate operations
|
|
|
|
file2Move.Path = "other.txt"
|
|
|
|
file2Move.WinPath = ""
|
|
|
|
src := findObject(t, remote, file2.Path)
|
|
|
|
dst, err := doMove(src, file2Move.Path)
|
|
|
|
if err == fs.ErrorCantMove {
|
|
|
|
t.Skip("FS can't move")
|
|
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
|
|
// check file exists in new listing
|
|
|
|
fstest.CheckListing(t, remote, []fstest.Item{file1, file2Move})
|
|
|
|
// Check dst lightly - list above has checked ModTime/Hashes
|
|
|
|
assert.Equal(t, file2Move.Path, dst.Remote())
|
|
|
|
// 1: file name.txt
|
|
|
|
// 2: other.txt
|
|
|
|
|
|
|
|
// Check conflict on "rename, then move"
|
|
|
|
file1Move.Path = "moveTest/other.txt"
|
|
|
|
src = findObject(t, remote, file1.Path)
|
|
|
|
_, err = doMove(src, file1Move.Path)
|
|
|
|
require.NoError(t, err)
|
|
|
|
fstest.CheckListing(t, remote, []fstest.Item{file1Move, file2Move})
|
|
|
|
// 1: moveTest/other.txt
|
|
|
|
// 2: other.txt
|
2017-05-19 11:26:07 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Check conflict on "move, then rename"
|
|
|
|
src = findObject(t, remote, file1Move.Path)
|
|
|
|
_, err = doMove(src, file1.Path)
|
|
|
|
require.NoError(t, err)
|
|
|
|
fstest.CheckListing(t, remote, []fstest.Item{file1, file2Move})
|
|
|
|
// 1: file name.txt
|
|
|
|
// 2: other.txt
|
2018-01-21 16:56:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
src = findObject(t, remote, file2Move.Path)
|
|
|
|
_, err = doMove(src, file2.Path)
|
|
|
|
require.NoError(t, err)
|
|
|
|
fstest.CheckListing(t, remote, []fstest.Item{file1, file2})
|
|
|
|
// 1: file name.txt
|
|
|
|
// 2: hello sausage?/../z.txt
|
2018-04-14 16:15:00 +00:00
|
|
|
|
|
|
|
// Tidy up moveTest directory
|
|
|
|
require.NoError(t, remote.Rmdir("moveTest"))
|
2018-04-07 17:48:11 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Move src to this remote using server side move operations.
|
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantDirMove
|
|
|
|
//
|
|
|
|
// If destination exists then return fs.ErrorDirExists
|
|
|
|
|
|
|
|
// TestFsDirMove tests DirMove
|
|
|
|
//
|
2018-04-30 16:22:03 +00:00
|
|
|
// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsDirMove)$
|
2018-04-07 17:48:11 +00:00
|
|
|
t.Run("TestFsDirMove", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
|
|
|
|
// Check have DirMove
|
|
|
|
doDirMove := remote.Features().DirMove
|
|
|
|
if doDirMove == nil {
|
|
|
|
t.Skip("FS has no DirMover interface")
|
|
|
|
}
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// Check it can't move onto itself
|
|
|
|
err := doDirMove(remote, "", "")
|
|
|
|
require.Equal(t, fs.ErrorDirExists, err)
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// new remote
|
|
|
|
newRemote, _, removeNewRemote, err := fstest.RandomRemote(remoteName, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer removeNewRemote()
|
2016-09-10 10:29:57 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
const newName = "new_name/sub_new_name"
|
|
|
|
// try the move
|
|
|
|
err = newRemote.Features().DirMove(remote, "", newName)
|
|
|
|
require.NoError(t, err)
|
2016-09-10 10:29:57 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// check remotes
|
2018-04-14 16:15:00 +00:00
|
|
|
// remote should not exist here
|
|
|
|
_, err = remote.List("")
|
2018-04-20 22:06:51 +00:00
|
|
|
assert.Equal(t, fs.ErrorDirNotFound, errors.Cause(err))
|
2018-04-14 16:15:00 +00:00
|
|
|
//fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, remote.Precision())
|
2018-04-07 17:48:11 +00:00
|
|
|
file1Copy := file1
|
|
|
|
file1Copy.Path = path.Join(newName, file1.Path)
|
|
|
|
file2Copy := file2
|
|
|
|
file2Copy.Path = path.Join(newName, file2.Path)
|
|
|
|
file2Copy.WinPath = path.Join(newName, file2.WinPath)
|
2018-04-14 16:15:00 +00:00
|
|
|
fstest.CheckListingWithPrecision(t, newRemote, []fstest.Item{file2Copy, file1Copy}, []string{
|
|
|
|
"new_name",
|
|
|
|
"new_name/sub_new_name",
|
|
|
|
"new_name/sub_new_name/hello? sausage",
|
|
|
|
"new_name/sub_new_name/hello? sausage/êé",
|
|
|
|
"new_name/sub_new_name/hello? sausage/êé/Hello, 世界",
|
|
|
|
"new_name/sub_new_name/hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
|
|
|
|
}, newRemote.Precision())
|
2018-04-07 17:48:11 +00:00
|
|
|
|
|
|
|
// move it back
|
|
|
|
err = doDirMove(newRemote, newName, "")
|
|
|
|
require.NoError(t, err)
|
2016-09-10 10:29:57 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// check remotes
|
2018-04-14 16:15:00 +00:00
|
|
|
fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file2, file1}, []string{
|
|
|
|
"hello? sausage",
|
|
|
|
"hello? sausage/êé",
|
|
|
|
"hello? sausage/êé/Hello, 世界",
|
|
|
|
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
|
|
|
|
}, remote.Precision())
|
|
|
|
fstest.CheckListingWithPrecision(t, newRemote, []fstest.Item{}, []string{
|
|
|
|
"new_name",
|
|
|
|
}, newRemote.Precision())
|
2018-04-07 17:48:11 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsRmdirFull tests removing a non empty directory
|
|
|
|
t.Run("TestFsRmdirFull", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
err := remote.Rmdir("")
|
|
|
|
require.Error(t, err, "Expecting error on RMdir on non empty remote")
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsPrecision tests the Precision of the Fs
|
|
|
|
t.Run("TestFsPrecision", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
precision := remote.Precision()
|
|
|
|
if precision == fs.ModTimeNotSupported {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if precision > time.Second || precision < 0 {
|
|
|
|
t.Fatalf("Precision out of range %v", precision)
|
|
|
|
}
|
|
|
|
// FIXME check expected precision
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsChangeNotify tests that changes are properly
|
|
|
|
// propagated
|
|
|
|
//
|
|
|
|
// go test -v -remote TestDrive: -run '^Test(Setup|Init|FsChangeNotify)$' -verbose
|
|
|
|
t.Run("TestFsChangeNotify", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
|
|
|
|
// Check have ChangeNotify
|
|
|
|
doChangeNotify := remote.Features().ChangeNotify
|
|
|
|
if doChangeNotify == nil {
|
|
|
|
t.Skip("FS has no ChangeNotify interface")
|
|
|
|
}
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
err := operations.Mkdir(remote, "dir")
|
|
|
|
require.NoError(t, err)
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-08-25 19:28:57 +00:00
|
|
|
pollInterval := make(chan time.Duration)
|
2018-04-07 17:48:11 +00:00
|
|
|
dirChanges := []string{}
|
|
|
|
objChanges := []string{}
|
2018-08-25 19:28:57 +00:00
|
|
|
doChangeNotify(func(x string, e fs.EntryType) {
|
2018-04-07 17:48:11 +00:00
|
|
|
fs.Debugf(nil, "doChangeNotify(%q, %+v)", x, e)
|
|
|
|
if strings.HasPrefix(x, file1.Path[:5]) || strings.HasPrefix(x, file2.Path[:5]) {
|
|
|
|
fs.Debugf(nil, "Ignoring notify for file1 or file2: %q, %v", x, e)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if e == fs.EntryDirectory {
|
|
|
|
dirChanges = append(dirChanges, x)
|
|
|
|
} else if e == fs.EntryObject {
|
|
|
|
objChanges = append(objChanges, x)
|
|
|
|
}
|
2018-08-25 19:28:57 +00:00
|
|
|
}, pollInterval)
|
|
|
|
defer func() { close(pollInterval) }()
|
|
|
|
pollInterval <- time.Second
|
2014-07-31 20:24:52 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
var dirs []string
|
|
|
|
for _, idx := range []int{1, 3, 2} {
|
|
|
|
dir := fmt.Sprintf("dir/subdir%d", idx)
|
|
|
|
err = operations.Mkdir(remote, dir)
|
|
|
|
require.NoError(t, err)
|
|
|
|
dirs = append(dirs, dir)
|
|
|
|
}
|
2014-07-31 20:24:52 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
contents := fstest.RandomString(100)
|
|
|
|
buf := bytes.NewBufferString(contents)
|
2018-03-29 07:10:19 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
var objs []fs.Object
|
|
|
|
for _, idx := range []int{2, 4, 3} {
|
|
|
|
obji := object.NewStaticObjectInfo(fmt.Sprintf("dir/file%d", idx), time.Now(), int64(buf.Len()), true, nil, nil)
|
|
|
|
o, err := remote.Put(buf, obji)
|
|
|
|
require.NoError(t, err)
|
|
|
|
objs = append(objs, o)
|
|
|
|
}
|
2018-03-29 07:10:19 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
time.Sleep(3 * time.Second)
|
2018-03-29 07:10:19 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
assert.Equal(t, []string{"dir/subdir1", "dir/subdir3", "dir/subdir2"}, dirChanges)
|
|
|
|
assert.Equal(t, []string{"dir/file2", "dir/file4", "dir/file3"}, objChanges)
|
2018-03-29 07:10:19 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// tidy up afterwards
|
|
|
|
for _, o := range objs {
|
|
|
|
assert.NoError(t, o.Remove())
|
|
|
|
}
|
|
|
|
dirs = append(dirs, "dir")
|
|
|
|
for _, dir := range dirs {
|
|
|
|
assert.NoError(t, remote.Rmdir(dir))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectString tests the Object String method
|
|
|
|
t.Run("TestObjectString", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
assert.Equal(t, file1.Path, obj.String())
|
2018-09-03 16:59:10 +00:00
|
|
|
if opt.NilObject != nil {
|
|
|
|
assert.Equal(t, "<nil>", opt.NilObject.String())
|
|
|
|
}
|
2018-04-07 17:48:11 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectFs tests the object can be found
|
|
|
|
t.Run("TestObjectFs", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
2018-09-03 16:59:10 +00:00
|
|
|
// If this is set we don't do the direct comparison of
|
|
|
|
// the Fs from the object as it may be different
|
|
|
|
if opt.SkipFsMatch {
|
|
|
|
return
|
|
|
|
}
|
2018-04-07 17:48:11 +00:00
|
|
|
testRemote := remote
|
|
|
|
if obj.Fs() != testRemote {
|
|
|
|
// Check to see if this wraps something else
|
|
|
|
if doUnWrap := testRemote.Features().UnWrap; doUnWrap != nil {
|
|
|
|
testRemote = doUnWrap()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert.Equal(t, obj.Fs(), testRemote)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectRemote tests the Remote is correct
|
|
|
|
t.Run("TestObjectRemote", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
assert.Equal(t, file1.Path, obj.Remote())
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectHashes checks all the hashes the object supports
|
|
|
|
t.Run("TestObjectHashes", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
file1.CheckHashes(t, obj)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectModTime tests the ModTime of the object is correct
|
|
|
|
TestObjectModTime := func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
|
|
|
|
}
|
|
|
|
t.Run("TestObjectModTime", TestObjectModTime)
|
|
|
|
|
|
|
|
// TestObjectMimeType tests the MimeType of the object is correct
|
|
|
|
t.Run("TestObjectMimeType", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
do, ok := obj.(fs.MimeTyper)
|
|
|
|
if !ok {
|
|
|
|
t.Skip("MimeType method not supported")
|
|
|
|
}
|
|
|
|
mimeType := do.MimeType()
|
|
|
|
if strings.ContainsRune(mimeType, ';') {
|
|
|
|
assert.Equal(t, "text/plain; charset=utf-8", mimeType)
|
|
|
|
} else {
|
|
|
|
assert.Equal(t, "text/plain", mimeType)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectSetModTime tests that SetModTime works
|
|
|
|
t.Run("TestObjectSetModTime", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z")
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
err := obj.SetModTime(newModTime)
|
|
|
|
if err == fs.ErrorCantSetModTime || err == fs.ErrorCantSetModTimeWithoutDelete {
|
|
|
|
t.Log(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
require.NoError(t, err)
|
|
|
|
file1.ModTime = newModTime
|
|
|
|
file1.CheckModTime(t, obj, obj.ModTime(), remote.Precision())
|
|
|
|
// And make a new object and read it from there too
|
|
|
|
TestObjectModTime(t)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectSize tests that Size works
|
|
|
|
t.Run("TestObjectSize", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
assert.Equal(t, file1.Size, obj.Size())
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectOpen tests that Open works
|
|
|
|
t.Run("TestObjectOpen", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
assert.Equal(t, file1Contents, readObject(t, obj, -1), "contents of file1 differ")
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectOpenSeek tests that Open works with SeekOption
|
|
|
|
t.Run("TestObjectOpenSeek", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
assert.Equal(t, file1Contents[50:], readObject(t, obj, -1, &fs.SeekOption{Offset: 50}), "contents of file1 differ after seek")
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectOpenRange tests that Open works with RangeOption
|
2018-04-30 16:22:03 +00:00
|
|
|
//
|
|
|
|
// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|ObjectOpenRange)$'
|
2018-04-07 17:48:11 +00:00
|
|
|
t.Run("TestObjectOpenRange", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
for _, test := range []struct {
|
|
|
|
ro fs.RangeOption
|
|
|
|
wantStart, wantEnd int
|
|
|
|
}{
|
|
|
|
{fs.RangeOption{Start: 5, End: 15}, 5, 16},
|
|
|
|
{fs.RangeOption{Start: 80, End: -1}, 80, 100},
|
|
|
|
{fs.RangeOption{Start: 81, End: 100000}, 81, 100},
|
|
|
|
{fs.RangeOption{Start: -1, End: 20}, 80, 100}, // if start is omitted this means get the final bytes
|
|
|
|
// {fs.RangeOption{Start: -1, End: -1}, 0, 100}, - this seems to work but the RFC doesn't define it
|
|
|
|
} {
|
|
|
|
got := readObject(t, obj, -1, &test.ro)
|
|
|
|
foundAt := strings.Index(file1Contents, got)
|
|
|
|
help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
|
|
|
|
assert.Equal(t, file1Contents[test.wantStart:test.wantEnd], got, help)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectPartialRead tests that reading only part of the object does the correct thing
|
|
|
|
t.Run("TestObjectPartialRead", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
assert.Equal(t, file1Contents[:50], readObject(t, obj, 50), "contents of file1 differ after limited read")
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectUpdate tests that Update works
|
|
|
|
t.Run("TestObjectUpdate", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
contents := fstest.RandomString(200)
|
|
|
|
buf := bytes.NewBufferString(contents)
|
|
|
|
hash := hash.NewMultiHasher()
|
|
|
|
in := io.TeeReader(buf, hash)
|
|
|
|
|
|
|
|
file1.Size = int64(buf.Len())
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
obji := object.NewStaticObjectInfo(file1.Path, file1.ModTime, int64(len(contents)), true, nil, obj.Fs())
|
|
|
|
err := obj.Update(in, obji)
|
|
|
|
require.NoError(t, err)
|
|
|
|
file1.Hashes = hash.Sums()
|
|
|
|
|
|
|
|
// check the object has been updated
|
|
|
|
file1.Check(t, obj, remote.Precision())
|
|
|
|
|
|
|
|
// Re-read the object and check again
|
|
|
|
obj = findObject(t, remote, file1.Path)
|
|
|
|
file1.Check(t, obj, remote.Precision())
|
|
|
|
|
|
|
|
// check contents correct
|
|
|
|
assert.Equal(t, contents, readObject(t, obj, -1), "contents of updated file1 differ")
|
|
|
|
file1Contents = contents
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestObjectStorable tests that Storable works
|
|
|
|
t.Run("TestObjectStorable", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
require.NotNil(t, !obj.Storable(), "Expecting object to be storable")
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsIsFile tests that an error is returned along with a valid fs
|
|
|
|
// which points to the parent directory.
|
|
|
|
t.Run("TestFsIsFile", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
remoteName := subRemoteName + "/" + file2.Path
|
|
|
|
file2Copy := file2
|
|
|
|
file2Copy.Path = "z.txt"
|
|
|
|
file2Copy.WinPath = ""
|
|
|
|
fileRemote, err := fs.NewFs(remoteName)
|
2018-05-10 15:21:57 +00:00
|
|
|
require.NotNil(t, fileRemote)
|
2018-04-07 17:48:11 +00:00
|
|
|
assert.Equal(t, fs.ErrorIsFile, err)
|
|
|
|
fstest.CheckListing(t, fileRemote, []fstest.Item{file2Copy})
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsIsFileNotFound tests that an error is not returned if no object is found
|
|
|
|
t.Run("TestFsIsFileNotFound", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
remoteName := subRemoteName + "/not found.txt"
|
|
|
|
fileRemote, err := fs.NewFs(remoteName)
|
|
|
|
require.NoError(t, err)
|
|
|
|
fstest.CheckListing(t, fileRemote, []fstest.Item{})
|
|
|
|
})
|
2018-03-29 07:10:19 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// TestPublicLink tests creation of sharable, public links
|
2018-10-14 13:17:05 +00:00
|
|
|
// go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|PublicLink)$'
|
2018-04-07 17:48:11 +00:00
|
|
|
t.Run("TestPublicLink", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
2018-03-29 07:10:19 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
doPublicLink := remote.Features().PublicLink
|
|
|
|
if doPublicLink == nil {
|
|
|
|
t.Skip("FS has no PublicLinker interface")
|
|
|
|
}
|
2018-03-29 07:10:19 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// if object not found
|
|
|
|
link, err := doPublicLink(file1.Path + "_does_not_exist")
|
|
|
|
require.Error(t, err, "Expected to get error when file doesn't exist")
|
|
|
|
require.Equal(t, "", link, "Expected link to be empty on error")
|
2018-03-29 07:10:19 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// sharing file for the first time
|
|
|
|
link1, err := doPublicLink(file1.Path)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEqual(t, "", link1, "Link should not be empty")
|
2018-03-29 07:10:19 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
link2, err := doPublicLink(file2.Path)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEqual(t, "", link2, "Link should not be empty")
|
2018-03-29 07:10:19 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
require.NotEqual(t, link1, link2, "Links to different files should differ")
|
2018-03-29 07:10:19 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// sharing file for the 2nd time
|
|
|
|
link1, err = doPublicLink(file1.Path)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEqual(t, "", link1, "Link should not be empty")
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// sharing directory for the first time
|
|
|
|
path := path.Dir(file2.Path)
|
|
|
|
link3, err := doPublicLink(path)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEqual(t, "", link3, "Link should not be empty")
|
2017-08-03 19:42:35 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// sharing directory for the second time
|
|
|
|
link3, err = doPublicLink(path)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEqual(t, "", link3, "Link should not be empty")
|
2017-08-03 19:42:35 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// sharing the "root" directory in a subremote
|
|
|
|
subRemote, _, removeSubRemote, err := fstest.RandomRemote(remoteName, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer removeSubRemote()
|
|
|
|
// ensure sub remote isn't empty
|
|
|
|
buf := bytes.NewBufferString("somecontent")
|
|
|
|
obji := object.NewStaticObjectInfo("somefile", time.Now(), int64(buf.Len()), true, nil, nil)
|
|
|
|
_, err = subRemote.Put(buf, obji)
|
|
|
|
require.NoError(t, err)
|
2017-08-03 19:42:35 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
link4, err := subRemote.Features().PublicLink("")
|
|
|
|
require.NoError(t, err, "Sharing root in a sub-remote should work")
|
|
|
|
require.NotEqual(t, "", link4, "Link should not be empty")
|
|
|
|
})
|
2017-08-03 19:42:35 +00:00
|
|
|
|
2018-09-11 01:57:43 +00:00
|
|
|
// TestSetTier tests SetTier and GetTier functionality
|
|
|
|
t.Run("TestSetTier", func(t *testing.T) {
|
|
|
|
skipIfNotSetTier(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
setter, ok := obj.(fs.SetTierer)
|
|
|
|
assert.NotNil(t, ok)
|
|
|
|
getter, ok := obj.(fs.GetTierer)
|
|
|
|
assert.NotNil(t, ok)
|
2018-09-18 12:25:20 +00:00
|
|
|
// If interfaces are supported TiersToTest should contain
|
|
|
|
// at least one entry
|
|
|
|
supportedTiers := opt.TiersToTest
|
|
|
|
assert.NotEmpty(t, supportedTiers)
|
|
|
|
// test set tier changes on supported storage classes or tiers
|
2018-09-11 01:57:43 +00:00
|
|
|
for _, tier := range supportedTiers {
|
|
|
|
err := setter.SetTier(tier)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
got := getter.GetTier()
|
|
|
|
assert.Equal(t, tier, got)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// TestObjectRemove tests Remove
|
|
|
|
t.Run("TestObjectRemove", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
obj := findObject(t, remote, file1.Path)
|
|
|
|
err := obj.Remove()
|
|
|
|
require.NoError(t, err)
|
|
|
|
// check listing without modtime as TestPublicLink may change the modtime
|
|
|
|
fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file2}, nil, fs.ModTimeNotSupported)
|
|
|
|
})
|
|
|
|
|
|
|
|
// TestFsPutStream tests uploading files when size is not known in advance
|
|
|
|
t.Run("TestFsPutStream", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
if remote.Features().PutStream == nil {
|
|
|
|
t.Skip("FS has no PutStream interface")
|
2017-08-03 19:42:35 +00:00
|
|
|
}
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
file := fstest.Item{
|
|
|
|
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
|
|
|
Path: "piped data.txt",
|
|
|
|
Size: -1, // use unknown size during upload
|
|
|
|
}
|
2018-01-12 16:30:54 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
tries := 1
|
|
|
|
const maxTries = 10
|
|
|
|
again:
|
|
|
|
contentSize := 100
|
|
|
|
contents := fstest.RandomString(contentSize)
|
|
|
|
buf := bytes.NewBufferString(contents)
|
|
|
|
hash := hash.NewMultiHasher()
|
|
|
|
in := io.TeeReader(buf, hash)
|
|
|
|
|
|
|
|
file.Size = -1
|
|
|
|
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
|
|
|
obj, err := remote.Features().PutStream(in, obji)
|
|
|
|
if err != nil {
|
|
|
|
// Retry if err returned a retry error
|
|
|
|
if fserrors.IsRetryError(err) && tries < maxTries {
|
|
|
|
t.Logf("Put error: %v - low level retry %d/%d", err, tries, maxTries)
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
|
|
|
|
tries++
|
|
|
|
goto again
|
|
|
|
}
|
|
|
|
require.NoError(t, err, fmt.Sprintf("PutStream Unknown Length error: %v", err))
|
|
|
|
}
|
|
|
|
file.Hashes = hash.Sums()
|
|
|
|
file.Size = int64(contentSize) // use correct size when checking
|
|
|
|
file.Check(t, obj, remote.Precision())
|
|
|
|
// Re-read the object and check again
|
|
|
|
obj = findObject(t, remote, file.Path)
|
|
|
|
file.Check(t, obj, remote.Precision())
|
|
|
|
})
|
|
|
|
|
2018-04-16 21:19:25 +00:00
|
|
|
// TestAbout tests the About optional interface
|
|
|
|
t.Run("TestObjectAbout", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
|
|
|
|
// Check have About
|
|
|
|
doAbout := remote.Features().About
|
|
|
|
if doAbout == nil {
|
|
|
|
t.Skip("FS does not support About")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Can't really check the output much!
|
|
|
|
usage, err := doAbout()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, usage)
|
|
|
|
assert.NotEqual(t, int64(0), usage.Total)
|
|
|
|
})
|
|
|
|
|
2018-09-11 01:57:43 +00:00
|
|
|
// TestInternal calls InternalTest() on the Fs
|
|
|
|
t.Run("TestInternal", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
if it, ok := remote.(InternalTester); ok {
|
|
|
|
it.InternalTest(t)
|
|
|
|
} else {
|
|
|
|
t.Skipf("%T does not implement InternalTester", remote)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// TestObjectPurge tests Purge
|
|
|
|
t.Run("TestObjectPurge", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
|
|
|
|
err := operations.Purge(remote, "")
|
|
|
|
require.NoError(t, err)
|
|
|
|
fstest.CheckListing(t, remote, []fstest.Item{})
|
2014-07-24 21:50:11 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
err = operations.Purge(remote, "")
|
|
|
|
assert.Error(t, err, "Expecting error after on second purge")
|
|
|
|
})
|
2018-02-25 09:58:06 +00:00
|
|
|
|
2018-04-07 17:48:11 +00:00
|
|
|
// TestFinalise tidies up after the previous tests
|
|
|
|
t.Run("TestFinalise", func(t *testing.T) {
|
|
|
|
skipIfNotOk(t)
|
|
|
|
if strings.HasPrefix(remoteName, "/") {
|
|
|
|
// Remove temp directory
|
|
|
|
err := os.Remove(remoteName)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
})
|
2014-07-24 21:50:11 +00:00
|
|
|
}
|