diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 4472d0445..52fa696a3 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -220,6 +220,7 @@ func init() { }}, }}, }) + flags.VarP(&s3ChunkSize, "s3-chunk-size", "", "Chunk size to use for uploading") } // Constants @@ -237,6 +238,7 @@ var ( // Flags s3ACL = flags.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3") s3StorageClass = flags.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA)") + s3ChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize) ) // Fs represents a remote s3 server @@ -427,6 +429,9 @@ func NewFs(name, root string) (fs.Fs, error) { if *s3StorageClass != "" { f.storageClass = *s3StorageClass } + if s3ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) { + return nil, errors.Errorf("s3 chunk size must be >= %v", fs.SizeSuffix(s3manager.MinUploadPartSize)) + } if f.root != "" { f.root += "/" // Check to see if the object exists @@ -1040,7 +1045,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio u.Concurrency = 2 u.LeavePartsOnError = false u.S3 = o.fs.c - u.PartSize = s3manager.MinUploadPartSize + u.PartSize = int64(s3ChunkSize) if size == -1 { // Make parts as small as possible while still being able to upload to the diff --git a/docs/content/s3.md b/docs/content/s3.md index 5a378419a..7e1dcf0d1 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -339,6 +339,16 @@ Available options include: - STANDARD_IA - for less frequently accessed data (e.g backups) - REDUCED_REDUNDANCY (only for noncritical, reproducible data, has lower redundancy) +#### --s3-chunk-size=SIZE #### + +Any files larger than this will be uploaded in chunks of this +size. The default is 5MB. The minimum is 5MB. + +Note that 2 chunks of this size are buffered in memory per transfer. + +If you are transferring large files over high speed links and you have +enough memory, then increasing this will speed up the transfers. + ### Anonymous access to public buckets ### If you want to use rclone to access a public bucket, configure with a