backend: add fstests.ChunkedUploadConfig
- azureblob - b2 - drive - dropbox - onedrive - s3 - swift
This commit is contained in:
parent
c00ec0cbe4
commit
98e2746e31
14 changed files with 245 additions and 50 deletions
|
@ -45,10 +45,10 @@ const (
|
|||
maxTotalParts = 50000 // in multipart upload
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||
defaultChunkSize = 4 * 1024 * 1024
|
||||
maxChunkSize = 100 * 1024 * 1024
|
||||
defaultUploadCutoff = 256 * 1024 * 1024
|
||||
maxUploadCutoff = 256 * 1024 * 1024
|
||||
defaultChunkSize = 4 * fs.MebiByte
|
||||
maxChunkSize = 100 * fs.MebiByte
|
||||
defaultUploadCutoff = 256 * fs.MebiByte
|
||||
maxUploadCutoff = 256 * fs.MebiByte
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
)
|
||||
|
||||
|
@ -237,6 +237,25 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
|||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
|
@ -249,8 +268,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
if opt.UploadCutoff > maxUploadCutoff {
|
||||
return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", opt.UploadCutoff, maxUploadCutoff)
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "azure: chunk size")
|
||||
}
|
||||
if opt.ListChunkSize > maxListChunkSize {
|
||||
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||
|
|
|
@ -2,12 +2,12 @@
|
|||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
|
||||
package azureblob_test
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/azureblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
|
@ -15,7 +15,16 @@ import (
|
|||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*azureblob.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
|
|
@ -48,9 +48,9 @@ const (
|
|||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
minChunkSize = 5E6
|
||||
defaultChunkSize = 96 * 1024 * 1024
|
||||
defaultUploadCutoff = 200E6
|
||||
minChunkSize = 5 * fs.MebiByte
|
||||
defaultChunkSize = 96 * fs.MebiByte
|
||||
defaultUploadCutoff = 200 * fs.MebiByte
|
||||
)
|
||||
|
||||
// Globals
|
||||
|
@ -282,6 +282,21 @@ func errorHandler(resp *http.Response) error {
|
|||
return errResponse
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
|
@ -293,8 +308,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
if opt.UploadCutoff < opt.ChunkSize {
|
||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", opt.UploadCutoff, opt.ChunkSize)
|
||||
}
|
||||
if opt.ChunkSize < minChunkSize {
|
||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, opt.ChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: chunk size")
|
||||
}
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
// Test B2 filesystem interface
|
||||
package b2_test
|
||||
package b2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
|
@ -12,6 +12,15 @@ import (
|
|||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestB2:",
|
||||
NilObject: (*b2.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
|
|
@ -57,7 +57,8 @@ const (
|
|||
defaultScope = "drive"
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
defaultChunkSize = fs.SizeSuffix(8 * 1024 * 1024)
|
||||
minChunkSize = 256 * fs.KibiByte
|
||||
defaultChunkSize = 8 * fs.MebiByte
|
||||
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink"
|
||||
)
|
||||
|
||||
|
@ -787,6 +788,24 @@ func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Cli
|
|||
return oAuthClient, nil
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if !isPowerOfTwo(int64(cs)) {
|
||||
return errors.Errorf("%v isn't a power of two", cs)
|
||||
}
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
|
@ -795,11 +814,9 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !isPowerOfTwo(int64(opt.ChunkSize)) {
|
||||
return nil, errors.Errorf("drive: chunk size %v isn't a power of two", opt.ChunkSize)
|
||||
}
|
||||
if opt.ChunkSize < 256*1024 {
|
||||
return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", opt.ChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "drive: chunk size")
|
||||
}
|
||||
|
||||
oAuthClient, err := createOAuthClient(opt, name, m)
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
// Test Drive filesystem interface
|
||||
package drive_test
|
||||
package drive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
|
@ -12,6 +12,16 @@ import (
|
|||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrive:",
|
||||
NilObject: (*drive.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
CeilChunkSize: fstests.NextPowerOfTwo,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
|
|
@ -79,8 +79,8 @@ const (
|
|||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||
// by default.
|
||||
defaultChunkSize = 48 * 1024 * 1024
|
||||
maxChunkSize = 150 * 1024 * 1024
|
||||
defaultChunkSize = 48 * fs.MebiByte
|
||||
maxChunkSize = 150 * fs.MebiByte
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -202,6 +202,25 @@ func shouldRetry(err error) (bool, error) {
|
|||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
|
@ -210,8 +229,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "dropbox: chunk size")
|
||||
}
|
||||
|
||||
// Convert the old token if it exists. The old token was just
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
// Test Dropbox filesystem interface
|
||||
package dropbox_test
|
||||
package dropbox
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/dropbox"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
|
@ -12,6 +12,15 @@ import (
|
|||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDropbox:",
|
||||
NilObject: (*dropbox.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
|
|
@ -43,6 +43,8 @@ const (
|
|||
driveTypePersonal = "personal"
|
||||
driveTypeBusiness = "business"
|
||||
driveTypeSharepoint = "documentLibrary"
|
||||
defaultChunkSize = 10 * fs.MebiByte
|
||||
chunkSizeMultiple = 320 * fs.KibiByte
|
||||
)
|
||||
|
||||
// Globals
|
||||
|
@ -217,7 +219,7 @@ func init() {
|
|||
|
||||
Above this size files will be chunked - must be multiple of 320k. Note
|
||||
that the chunks will be buffered into memory.`,
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_id",
|
||||
|
@ -368,6 +370,25 @@ func errorHandler(resp *http.Response) error {
|
|||
return errResponse
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs%chunkSizeMultiple != 0 {
|
||||
return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
|
||||
}
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
|
@ -376,8 +397,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize%(320*1024) != 0 {
|
||||
return nil, errors.Errorf("chunk size %d is not a multiple of 320k", opt.ChunkSize)
|
||||
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "onedrive: chunk size")
|
||||
}
|
||||
|
||||
if opt.DriveID == "" || opt.DriveType == "" {
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
// Test OneDrive filesystem interface
|
||||
package onedrive_test
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/onedrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
|
@ -12,6 +12,15 @@ import (
|
|||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOneDrive:",
|
||||
NilObject: (*onedrive.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
CeilChunkSize: fstests.NextMultipleOf(chunkSizeMultiple),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
|
|
@ -547,7 +547,7 @@ in memory per transfer.
|
|||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
Default: fs.SizeSuffix(s3manager.MinUploadPartSize),
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
|
@ -595,7 +595,8 @@ const (
|
|||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
|
@ -806,6 +807,21 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
|||
return c, ses, nil
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
|
@ -814,8 +830,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) {
|
||||
return nil, errors.Errorf("s3 chunk size (%v) must be >= %v", opt.ChunkSize, fs.SizeSuffix(s3manager.MinUploadPartSize))
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: chunk size")
|
||||
}
|
||||
bucket, directory, err := s3ParsePath(root)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
// Test S3 filesystem interface
|
||||
package s3_test
|
||||
package s3
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/s3"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
|
@ -12,6 +12,15 @@ import (
|
|||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*s3.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
const (
|
||||
directoryMarkerContentType = "application/directory" // content type of directory marker objects
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
defaultChunkSize = 5 * fs.GibiByte
|
||||
)
|
||||
|
||||
// SharedOptions are shared between swift and hubic
|
||||
|
@ -38,7 +39,7 @@ var SharedOptions = []fs.Option{{
|
|||
|
||||
Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5GB which is its maximum value.`,
|
||||
Default: fs.SizeSuffix(5 * 1024 * 1024 * 1024),
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}}
|
||||
|
||||
|
@ -302,6 +303,22 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
|||
return c, nil
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
const minChunkSize = fs.Byte
|
||||
if cs < minChunkSize {
|
||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewFsWithConnection constructs an Fs from the path, container:path
|
||||
// and authenticated connection.
|
||||
//
|
||||
|
@ -352,6 +369,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "swift: chunk size")
|
||||
}
|
||||
|
||||
c, err := swiftConnection(opt, name)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
// Test Swift filesystem interface
|
||||
package swift_test
|
||||
package swift
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
|
@ -12,6 +12,12 @@ import (
|
|||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSwift:",
|
||||
NilObject: (*swift.Object)(nil),
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
|
Loading…
Reference in a new issue