forked from TrueCloudLab/rclone
operations: fix retries downloading too much data with certain backends
Before this fix if more than one retry happened on a file that rclone had opened for read with a backend that uses fs.FixRangeOption then rclone would read too much data and the transfer would fail. Backends affected: - azureblob, azurefiles, b2, box, dropbox, fichier, filefabric - googlecloudstorage, hidrive, imagekit, jottacloud, koofr, netstorage - onedrive, opendrive, oracleobjectstorage, pikpak, premiumizeme - protondrive, qingstor, quatrix, s3, sharefile, sugarsync, swift - uptobox, webdav, zoho This was because rclone was emitting Range requests for the wrong data range on the second and subsequent retries. This was caused by fs.FixRangeOption modifying the options and the reopen code relying on them not being modified. This fix makes a copy of the fs.FixRangeOption in the reopen code to fix the problem. In future it might be best to change fs.FixRangeOption so it returns a new options slice. Fixes #7759
This commit is contained in:
parent
a4fc5edc5e
commit
93955b755f
2 changed files with 4 additions and 0 deletions
|
@ -138,6 +138,8 @@ func (h *ReOpen) open() error {
|
|||
// Adjust range start to where we have got to
|
||||
h.rangeOption.Start = h.start + h.offset
|
||||
}
|
||||
// Make a copy of the options as fs.FixRangeOption modifies them :-(
|
||||
opts = append(make([]fs.OpenOption, 0, len(opts)), opts...)
|
||||
h.tries++
|
||||
if h.tries > h.maxTries {
|
||||
h.err = errTooManyTries
|
||||
|
|
|
@ -39,6 +39,8 @@ type reOpenTestObject struct {
|
|||
//
|
||||
// This will break after reading the number of bytes in breaks
|
||||
func (o *reOpenTestObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
// Lots of backends do this - make sure it works as it modifies options
|
||||
fs.FixRangeOption(options, o.Size())
|
||||
gotHash := false
|
||||
gotRange := false
|
||||
startPos := int64(0)
|
||||
|
|
Loading…
Reference in a new issue