From 0914ec316c6521da19180969a8e6610039e15949 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Wed, 13 Dec 2017 10:11:20 +0000 Subject: [PATCH] b2: fix multipart upload retries #1733 Prior to this fix we were uploading 0 length bodies if a retry was needed on a multipart upload chunk. This gave this error `http: ContentLength=268435496 with Body length 0`. Fix by remaking the hash appending reader in the Call loop. This is inefficient in the face of retries, but these are uncommon. --- b2/upload.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/b2/upload.go b/b2/upload.go index dbf53731e..18c17e3c9 100644 --- a/b2/upload.go +++ b/b2/upload.go @@ -182,9 +182,6 @@ func (up *largeUpload) clearUploadURL() { // Transfer a chunk func (up *largeUpload) transferChunk(part int64, body []byte) error { - in := newHashAppendingReader(bytes.NewReader(body), sha1.New()) - size := int64(len(body)) + int64(in.AdditionalLength()) - err := up.f.pacer.Call(func() (bool, error) { fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body)) @@ -194,6 +191,9 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error { return false, err } + in := newHashAppendingReader(bytes.NewReader(body), sha1.New()) + size := int64(len(body)) + int64(in.AdditionalLength()) + // Authorization // // An upload authorization token, from b2_get_upload_part_url. @@ -238,6 +238,7 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error { upload = nil } up.returnUploadURL(upload) + up.sha1s[part-1] = in.HexSum() return retry, err }) if err != nil { @@ -245,7 +246,6 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error { } else { fs.Debugf(up.o, "Done sending chunk %d", part) } - up.sha1s[part-1] = in.HexSum() return err }