From 7f0b2042925b88c0f510df15026fdb1f7b5a8601 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Mon, 15 Oct 2018 09:01:48 +0100 Subject: [PATCH] azureblob: work around SDK bug which causes errors for chunk-sized files (again) Until https://github.com/Azure/azure-storage-blob-go/pull/75 is merged the SDK can't upload a single blob of exactly the chunk size, so upload files of this size with a multpart upload as a work around. The previous fix for this 6a773289e7 turned out to cause problems uploading files with maximum chunk size so needed to be redone. Fixes #2653 --- backend/azureblob/azureblob.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index fa05c3e5b..5e81d13ea 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -1285,16 +1285,25 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio } putBlobOptions := azblob.UploadStreamToBlockBlobOptions{ - BufferSize: int(o.fs.opt.ChunkSize) + 1, // +1 Needed until https://github.com/Azure/azure-storage-blob-go/pull/75 is merged + BufferSize: int(o.fs.opt.ChunkSize), MaxBuffers: 4, Metadata: o.meta, BlobHTTPHeaders: httpHeaders, } + // FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75 + // is merged the SDK can't upload a single blob of exactly the chunk + // size, so upload with a multpart upload to work around. + // See: https://github.com/ncw/rclone/issues/2653 + multipartUpload := size >= int64(o.fs.opt.UploadCutoff) + if size == int64(o.fs.opt.ChunkSize) { + multipartUpload = true + fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size) + } ctx := context.Background() // Don't retry, return a retry error instead err = o.fs.pacer.CallNoRetry(func() (bool, error) { - if size >= int64(o.fs.opt.UploadCutoff) { + if multipartUpload { // If a large file upload in chunks err = o.uploadMultipart(in, size, &blob, &httpHeaders) } else {