s3: add --s3-chunk-size option - fixes #2203
This commit is contained in:
parent
fbe1c7f1ea
commit
92c5aa3786
2 changed files with 16 additions and 1 deletions
|
@ -220,6 +220,7 @@ func init() {
|
||||||
}},
|
}},
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
|
flags.VarP(&s3ChunkSize, "s3-chunk-size", "", "Chunk size to use for uploading")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
|
@ -237,6 +238,7 @@ var (
|
||||||
// Flags
|
// Flags
|
||||||
s3ACL = flags.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3")
|
s3ACL = flags.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3")
|
||||||
s3StorageClass = flags.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA)")
|
s3StorageClass = flags.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA)")
|
||||||
|
s3ChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Fs represents a remote s3 server
|
// Fs represents a remote s3 server
|
||||||
|
@ -427,6 +429,9 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||||
if *s3StorageClass != "" {
|
if *s3StorageClass != "" {
|
||||||
f.storageClass = *s3StorageClass
|
f.storageClass = *s3StorageClass
|
||||||
}
|
}
|
||||||
|
if s3ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) {
|
||||||
|
return nil, errors.Errorf("s3 chunk size must be >= %v", fs.SizeSuffix(s3manager.MinUploadPartSize))
|
||||||
|
}
|
||||||
if f.root != "" {
|
if f.root != "" {
|
||||||
f.root += "/"
|
f.root += "/"
|
||||||
// Check to see if the object exists
|
// Check to see if the object exists
|
||||||
|
@ -1040,7 +1045,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||||
u.Concurrency = 2
|
u.Concurrency = 2
|
||||||
u.LeavePartsOnError = false
|
u.LeavePartsOnError = false
|
||||||
u.S3 = o.fs.c
|
u.S3 = o.fs.c
|
||||||
u.PartSize = s3manager.MinUploadPartSize
|
u.PartSize = int64(s3ChunkSize)
|
||||||
|
|
||||||
if size == -1 {
|
if size == -1 {
|
||||||
// Make parts as small as possible while still being able to upload to the
|
// Make parts as small as possible while still being able to upload to the
|
||||||
|
|
|
@ -339,6 +339,16 @@ Available options include:
|
||||||
- STANDARD_IA - for less frequently accessed data (e.g backups)
|
- STANDARD_IA - for less frequently accessed data (e.g backups)
|
||||||
- REDUCED_REDUNDANCY (only for noncritical, reproducible data, has lower redundancy)
|
- REDUCED_REDUNDANCY (only for noncritical, reproducible data, has lower redundancy)
|
||||||
|
|
||||||
|
#### --s3-chunk-size=SIZE ####
|
||||||
|
|
||||||
|
Any files larger than this will be uploaded in chunks of this
|
||||||
|
size. The default is 5MB. The minimum is 5MB.
|
||||||
|
|
||||||
|
Note that 2 chunks of this size are buffered in memory per transfer.
|
||||||
|
|
||||||
|
If you are transferring large files over high speed links and you have
|
||||||
|
enough memory, then increasing this will speed up the transfers.
|
||||||
|
|
||||||
### Anonymous access to public buckets ###
|
### Anonymous access to public buckets ###
|
||||||
|
|
||||||
If you want to use rclone to access a public bucket, configure with a
|
If you want to use rclone to access a public bucket, configure with a
|
||||||
|
|
Loading…
Add table
Reference in a new issue