backend: add fstests.ChunkedUploadConfig

- azureblob
- b2
- drive
- dropbox
- onedrive
- s3
- swift
This commit is contained in:
Fabian Möller 2018-09-07 13:02:27 +02:00 committed by Nick Craig-Wood
parent c00ec0cbe4
commit 98e2746e31
14 changed files with 245 additions and 50 deletions

View file

@ -45,10 +45,10 @@ const (
maxTotalParts = 50000 // in multipart upload maxTotalParts = 50000 // in multipart upload
storageDefaultBaseURL = "blob.core.windows.net" storageDefaultBaseURL = "blob.core.windows.net"
// maxUncommittedSize = 9 << 30 // can't upload bigger than this // maxUncommittedSize = 9 << 30 // can't upload bigger than this
defaultChunkSize = 4 * 1024 * 1024 defaultChunkSize = 4 * fs.MebiByte
maxChunkSize = 100 * 1024 * 1024 maxChunkSize = 100 * fs.MebiByte
defaultUploadCutoff = 256 * 1024 * 1024 defaultUploadCutoff = 256 * fs.MebiByte
maxUploadCutoff = 256 * 1024 * 1024 maxUploadCutoff = 256 * fs.MebiByte
defaultAccessTier = azblob.AccessTierNone defaultAccessTier = azblob.AccessTierNone
) )
@ -237,6 +237,25 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs contstructs an Fs from the path, container:path // NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
@ -249,8 +268,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.UploadCutoff > maxUploadCutoff { if opt.UploadCutoff > maxUploadCutoff {
return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", opt.UploadCutoff, maxUploadCutoff) return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", opt.UploadCutoff, maxUploadCutoff)
} }
if opt.ChunkSize > maxChunkSize { err = checkUploadChunkSize(opt.ChunkSize)
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize) if err != nil {
return nil, errors.Wrap(err, "azure: chunk size")
} }
if opt.ListChunkSize > maxListChunkSize { if opt.ListChunkSize > maxListChunkSize {
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize) return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)

View file

@ -2,12 +2,12 @@
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8 // +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
package azureblob_test package azureblob
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/azureblob" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@ -15,7 +15,16 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:", RemoteName: "TestAzureBlob:",
NilObject: (*azureblob.Object)(nil), NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"}, TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View file

@ -48,9 +48,9 @@ const (
decayConstant = 1 // bigger for slower decay, exponential decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000 maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode maxVersions = 100 // maximum number of versions we search in --b2-versions mode
minChunkSize = 5E6 minChunkSize = 5 * fs.MebiByte
defaultChunkSize = 96 * 1024 * 1024 defaultChunkSize = 96 * fs.MebiByte
defaultUploadCutoff = 200E6 defaultUploadCutoff = 200 * fs.MebiByte
) )
// Globals // Globals
@ -282,6 +282,21 @@ func errorHandler(resp *http.Response) error {
return errResponse return errResponse
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs contstructs an Fs from the path, bucket:path // NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
@ -293,8 +308,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.UploadCutoff < opt.ChunkSize { if opt.UploadCutoff < opt.ChunkSize {
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", opt.UploadCutoff, opt.ChunkSize) return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", opt.UploadCutoff, opt.ChunkSize)
} }
if opt.ChunkSize < minChunkSize { err = checkUploadChunkSize(opt.ChunkSize)
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, opt.ChunkSize) if err != nil {
return nil, errors.Wrap(err, "b2: chunk size")
} }
bucket, directory, err := parsePath(root) bucket, directory, err := parsePath(root)
if err != nil { if err != nil {

View file

@ -1,10 +1,10 @@
// Test B2 filesystem interface // Test B2 filesystem interface
package b2_test package b2
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/b2" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@ -12,6 +12,15 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestB2:", RemoteName: "TestB2:",
NilObject: (*b2.Object)(nil), NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View file

@ -57,7 +57,8 @@ const (
defaultScope = "drive" defaultScope = "drive"
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two. // chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum. // 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
defaultChunkSize = fs.SizeSuffix(8 * 1024 * 1024) minChunkSize = 256 * fs.KibiByte
defaultChunkSize = 8 * fs.MebiByte
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink" partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink"
) )
@ -787,6 +788,24 @@ func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Cli
return oAuthClient, nil return oAuthClient, nil
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if !isPowerOfTwo(int64(cs)) {
return errors.Errorf("%v isn't a power of two", cs)
}
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs contstructs an Fs from the path, container:path // NewFs contstructs an Fs from the path, container:path
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
@ -795,11 +814,9 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if !isPowerOfTwo(int64(opt.ChunkSize)) { err = checkUploadChunkSize(opt.ChunkSize)
return nil, errors.Errorf("drive: chunk size %v isn't a power of two", opt.ChunkSize) if err != nil {
} return nil, errors.Wrap(err, "drive: chunk size")
if opt.ChunkSize < 256*1024 {
return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", opt.ChunkSize)
} }
oAuthClient, err := createOAuthClient(opt, name, m) oAuthClient, err := createOAuthClient(opt, name, m)

View file

@ -1,10 +1,10 @@
// Test Drive filesystem interface // Test Drive filesystem interface
package drive_test package drive
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/drive" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@ -12,6 +12,16 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestDrive:", RemoteName: "TestDrive:",
NilObject: (*drive.Object)(nil), NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
CeilChunkSize: fstests.NextPowerOfTwo,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View file

@ -79,8 +79,8 @@ const (
// Choose 48MB which is 91% of Maximum speed. rclone by // Choose 48MB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48MB = 192MB // default does 4 transfers so this should use 4*48MB = 192MB
// by default. // by default.
defaultChunkSize = 48 * 1024 * 1024 defaultChunkSize = 48 * fs.MebiByte
maxChunkSize = 150 * 1024 * 1024 maxChunkSize = 150 * fs.MebiByte
) )
var ( var (
@ -202,6 +202,25 @@ func shouldRetry(err error) (bool, error) {
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs contstructs an Fs from the path, container:path // NewFs contstructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
@ -210,8 +229,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if opt.ChunkSize > maxChunkSize { err = checkUploadChunkSize(opt.ChunkSize)
return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize) if err != nil {
return nil, errors.Wrap(err, "dropbox: chunk size")
} }
// Convert the old token if it exists. The old token was just // Convert the old token if it exists. The old token was just

View file

@ -1,10 +1,10 @@
// Test Dropbox filesystem interface // Test Dropbox filesystem interface
package dropbox_test package dropbox
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/dropbox" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@ -12,6 +12,15 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestDropbox:", RemoteName: "TestDropbox:",
NilObject: (*dropbox.Object)(nil), NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View file

@ -43,6 +43,8 @@ const (
driveTypePersonal = "personal" driveTypePersonal = "personal"
driveTypeBusiness = "business" driveTypeBusiness = "business"
driveTypeSharepoint = "documentLibrary" driveTypeSharepoint = "documentLibrary"
defaultChunkSize = 10 * fs.MebiByte
chunkSizeMultiple = 320 * fs.KibiByte
) )
// Globals // Globals
@ -217,7 +219,7 @@ func init() {
Above this size files will be chunked - must be multiple of 320k. Note Above this size files will be chunked - must be multiple of 320k. Note
that the chunks will be buffered into memory.`, that the chunks will be buffered into memory.`,
Default: fs.SizeSuffix(10 * 1024 * 1024), Default: defaultChunkSize,
Advanced: true, Advanced: true,
}, { }, {
Name: "drive_id", Name: "drive_id",
@ -368,6 +370,25 @@ func errorHandler(resp *http.Response) error {
return errResponse return errResponse
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs%chunkSizeMultiple != 0 {
return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
}
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
@ -376,8 +397,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if opt.ChunkSize%(320*1024) != 0 {
return nil, errors.Errorf("chunk size %d is not a multiple of 320k", opt.ChunkSize) err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "onedrive: chunk size")
} }
if opt.DriveID == "" || opt.DriveType == "" { if opt.DriveID == "" || opt.DriveType == "" {

View file

@ -1,10 +1,10 @@
// Test OneDrive filesystem interface // Test OneDrive filesystem interface
package onedrive_test package onedrive
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/onedrive" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@ -12,6 +12,15 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestOneDrive:", RemoteName: "TestOneDrive:",
NilObject: (*onedrive.Object)(nil), NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View file

@ -547,7 +547,7 @@ in memory per transfer.
If you are transferring large files over high speed links and you have If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.`, enough memory, then increasing this will speed up the transfers.`,
Default: fs.SizeSuffix(s3manager.MinUploadPartSize), Default: minChunkSize,
Advanced: true, Advanced: true,
}, { }, {
Name: "disable_checksum", Name: "disable_checksum",
@ -595,7 +595,8 @@ const (
maxRetries = 10 // number of retries to make of operations maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep. minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
) )
// Options defines the configuration for this backend // Options defines the configuration for this backend
@ -806,6 +807,21 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
return c, ses, nil return c, ses, nil
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path // NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct // Parse config into Options struct
@ -814,8 +830,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if opt.ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) { err = checkUploadChunkSize(opt.ChunkSize)
return nil, errors.Errorf("s3 chunk size (%v) must be >= %v", opt.ChunkSize, fs.SizeSuffix(s3manager.MinUploadPartSize)) if err != nil {
return nil, errors.Wrap(err, "s3: chunk size")
} }
bucket, directory, err := s3ParsePath(root) bucket, directory, err := s3ParsePath(root)
if err != nil { if err != nil {

View file

@ -1,10 +1,10 @@
// Test S3 filesystem interface // Test S3 filesystem interface
package s3_test package s3
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/s3" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@ -12,6 +12,15 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestS3:", RemoteName: "TestS3:",
NilObject: (*s3.Object)(nil), NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View file

@ -29,6 +29,7 @@ import (
const ( const (
directoryMarkerContentType = "application/directory" // content type of directory marker objects directoryMarkerContentType = "application/directory" // content type of directory marker objects
listChunks = 1000 // chunk size to read directory listings listChunks = 1000 // chunk size to read directory listings
defaultChunkSize = 5 * fs.GibiByte
) )
// SharedOptions are shared between swift and hubic // SharedOptions are shared between swift and hubic
@ -38,7 +39,7 @@ var SharedOptions = []fs.Option{{
Above this size files will be chunked into a _segments container. The Above this size files will be chunked into a _segments container. The
default for this is 5GB which is its maximum value.`, default for this is 5GB which is its maximum value.`,
Default: fs.SizeSuffix(5 * 1024 * 1024 * 1024), Default: defaultChunkSize,
Advanced: true, Advanced: true,
}} }}
@ -302,6 +303,22 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
return c, nil return c, nil
} }
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// NewFsWithConnection constructs an Fs from the path, container:path // NewFsWithConnection constructs an Fs from the path, container:path
// and authenticated connection. // and authenticated connection.
// //
@ -352,6 +369,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "swift: chunk size")
}
c, err := swiftConnection(opt, name) c, err := swiftConnection(opt, name)
if err != nil { if err != nil {

View file

@ -1,10 +1,10 @@
// Test Swift filesystem interface // Test Swift filesystem interface
package swift_test package swift
import ( import (
"testing" "testing"
"github.com/ncw/rclone/backend/swift" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
@ -12,6 +12,12 @@ import (
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestSwift:", RemoteName: "TestSwift:",
NilObject: (*swift.Object)(nil), NilObject: (*Object)(nil),
}) })
} }
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)