diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go index 73f277362..501c727ee 100644 --- a/backend/qingstor/qingstor.go +++ b/backend/qingstor/qingstor.go @@ -93,7 +93,7 @@ as multipart uploads using this chunk size. Note that "--qingstor-upload-concurrency" chunks of this size are buffered in memory per transfer. -If you are transferring large files over high speed links and you have +If you are transferring large files over high-speed links and you have enough memory, then increasing this will speed up the transfers.`, Default: minChunkSize, Advanced: true, @@ -107,7 +107,7 @@ concurrently. NB if you set this to > 1 then the checksums of multipart uploads become corrupted (the uploads themselves are not corrupted though). -If you are uploading small numbers of large file over high speed link +If you are uploading small numbers of large file over high-speed link and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers.`, Default: 1, diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 183a721a4..b1c05d158 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -1023,7 +1023,7 @@ using this chunk size. Note that "--s3-upload-concurrency" chunks of this size are buffered in memory per transfer. -If you are transferring large files over high speed links and you have +If you are transferring large files over high-speed links and you have enough memory, then increasing this will speed up the transfers. Rclone will automatically increase the chunk size when uploading a @@ -1107,7 +1107,7 @@ If empty it will default to the environment variable "AWS_PROFILE" or This is the number of chunks of the same file that are uploaded concurrently. -If you are uploading small numbers of large file over high speed link +If you are uploading small numbers of large file over high-speed link and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers.`, Default: 4, diff --git a/docs/content/qingstor.md b/docs/content/qingstor.md index f35dc15bc..e0e6e1eef 100644 --- a/docs/content/qingstor.md +++ b/docs/content/qingstor.md @@ -244,7 +244,7 @@ as multipart uploads using this chunk size. Note that "--qingstor-upload-concurrency" chunks of this size are buffered in memory per transfer. -If you are transferring large files over high speed links and you have +If you are transferring large files over high-speed links and you have enough memory, then increasing this will speed up the transfers. - Config: chunk_size @@ -262,7 +262,7 @@ concurrently. NB if you set this to > 1 then the checksums of multipart uploads become corrupted (the uploads themselves are not corrupted though). -If you are uploading small numbers of large file over high speed link +If you are uploading small numbers of large file over high-speed link and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers. diff --git a/docs/content/s3.md b/docs/content/s3.md index ccfad86a6..cba5ada88 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -1213,7 +1213,7 @@ using this chunk size. Note that "--s3-upload-concurrency" chunks of this size are buffered in memory per transfer. -If you are transferring large files over high speed links and you have +If you are transferring large files over high-speed links and you have enough memory, then increasing this will speed up the transfers. Rclone will automatically increase the chunk size when uploading a @@ -1328,7 +1328,7 @@ Concurrency for multipart uploads. This is the number of chunks of the same file that are uploaded concurrently. -If you are uploading small numbers of large file over high speed link +If you are uploading small numbers of large file over high-speed link and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers.