azureblob,b2,s3: fix chunksize calculations producing too many parts

Before this fix, the chunksize calculator was using the previous size
of the object, not the new size of the object to calculate the chunk
sizes.

This meant that uploading a replacement object which needed a new
chunk size would fail, using too many parts.

This fix fixes the calculator to take the size explicitly.
This commit is contained in:
Nick Craig-Wood 2022-08-09 10:44:54 +01:00
parent cb8842941b
commit 0501773db1
5 changed files with 111 additions and 37 deletions

View file

@ -1678,14 +1678,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
}
uploadParts := int64(maxUploadParts)
uploadParts := maxUploadParts
if uploadParts < 1 {
uploadParts = 1
} else if uploadParts > maxUploadParts {
uploadParts = maxUploadParts
}
// calculate size of parts/blocks
partSize := chunksize.Calculator(o, int(uploadParts), o.fs.opt.ChunkSize)
partSize := chunksize.Calculator(o, src.Size(), uploadParts, o.fs.opt.ChunkSize)
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: int(partSize),