backends: change OpenChunkWriter interface to allow backend concurrency override
Before this change the concurrency used for an upload was rather inconsistent. - if size below `--backend-upload-cutoff` (default 200M) do single part upload. - if size below `--multi-thread-cutoff` (default 256M) or using streaming uploads (eg `rclone rcat) do multipart upload using `--backend-upload-concurrency` to set the concurrency used by the uploader. - otherwise do multipart upload using `--multi-thread-streams` to set the concurrency. This change makes the default for the concurrency used be the `--backend-upload-concurrency`. If `--multi-thread-streams` is set and larger than the `--backend-upload-concurrency` then that will be used instead. This means that if the user sets `--backend-upload-concurrency` then it will be obeyed for all multipart/multi-thread transfers and the user can override them all with `--multi-thread-streams`. See: #7056
This commit is contained in:
parent
a7337b0a95
commit
2db0e23584
8 changed files with 124 additions and 82 deletions
|
@ -53,10 +53,8 @@ type objectChunkWriter struct {
|
|||
|
||||
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
|
||||
_, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
OpenOptions: options,
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -69,7 +67,7 @@ func (f *Fs) OpenChunkWriter(
|
|||
ctx context.Context,
|
||||
remote string,
|
||||
src fs.ObjectInfo,
|
||||
options ...fs.OpenOption) (chunkSizeResult int64, writer fs.ChunkWriter, err error) {
|
||||
options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
|
@ -77,7 +75,7 @@ func (f *Fs) OpenChunkWriter(
|
|||
}
|
||||
ui, err := o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
return -1, nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
|
||||
uploadParts := f.opt.MaxUploadParts
|
||||
|
@ -105,7 +103,7 @@ func (f *Fs) OpenChunkWriter(
|
|||
|
||||
uploadID, existingParts, err := o.createMultipartUpload(ctx, ui.req)
|
||||
if err != nil {
|
||||
return -1, nil, fmt.Errorf("create multipart upload request failed: %w", err)
|
||||
return info, nil, fmt.Errorf("create multipart upload request failed: %w", err)
|
||||
}
|
||||
bucketName, bucketPath := o.split()
|
||||
chunkWriter := &objectChunkWriter{
|
||||
|
@ -119,8 +117,13 @@ func (f *Fs) OpenChunkWriter(
|
|||
ui: ui,
|
||||
o: o,
|
||||
}
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(chunkSize),
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadID)
|
||||
return int64(chunkSize), chunkWriter, err
|
||||
return info, chunkWriter, err
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue