amazonclouddrive: Restart directory listings on error - fixes #475

Before this change rclone would retry only the page that was missing
from the directory listing.  However it turns out that on 429 errors
at least, that page is gone from the directory listing which results
in missing files in the list.  The workaround for this is to restart
the directory listing on any retryable errors.
This commit is contained in:
Nick Craig-Wood 2016-05-14 17:15:42 +01:00
parent ac9c20b048
commit 536526cc92

View file

@ -308,11 +308,11 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
Filters: query, Filters: query,
} }
var nodes []*acd.Node var nodes []*acd.Node
var out []*acd.Node
//var resp *http.Response //var resp *http.Response
OUTER:
for { for {
var resp *http.Response var resp *http.Response
err = f.pacer.Call(func() (bool, error) { err = f.pacer.CallNoRetry(func() (bool, error) {
nodes, resp, err = f.c.Nodes.GetNodes(&opts) nodes, resp, err = f.c.Nodes.GetNodes(&opts)
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
@ -328,48 +328,64 @@ OUTER:
if *node.Status != statusAvailable { if *node.Status != statusAvailable {
continue continue
} }
if fn(node) { // Store the nodes up in case we have to retry the listing
found = true out = append(out, node)
break OUTER
}
} }
} }
} }
// Send the nodes now
for _, node := range out {
if fn(node) {
found = true
break
}
}
return return
} }
// ListDir reads the directory specified by the job into out, returning any more jobs // ListDir reads the directory specified by the job into out, returning any more jobs
func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) { func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
fs.Debug(f, "Reading %q", job.Path) fs.Debug(f, "Reading %q", job.Path)
_, err = f.listAll(job.DirID, "", false, false, func(node *acd.Node) bool { maxTries := fs.Config.LowLevelRetries
remote := job.Path + *node.Name for tries := 1; tries <= maxTries; tries++ {
switch *node.Kind { _, err = f.listAll(job.DirID, "", false, false, func(node *acd.Node) bool {
case folderKind: remote := job.Path + *node.Name
if out.IncludeDirectory(remote) { switch *node.Kind {
dir := &fs.Dir{ case folderKind:
Name: remote, if out.IncludeDirectory(remote) {
Bytes: -1, dir := &fs.Dir{
Count: -1, Name: remote,
Bytes: -1,
Count: -1,
}
dir.When, _ = time.Parse(timeFormat, *node.ModifiedDate) // FIXME
if out.AddDir(dir) {
return true
}
if job.Depth > 0 {
jobs = append(jobs, dircache.ListDirJob{DirID: *node.Id, Path: remote + "/", Depth: job.Depth - 1})
}
} }
dir.When, _ = time.Parse(timeFormat, *node.ModifiedDate) // FIXME case fileKind:
if out.AddDir(dir) { if o := f.newFsObjectWithInfo(remote, node); o != nil {
return true if out.Add(o) {
} return true
if job.Depth > 0 { }
jobs = append(jobs, dircache.ListDirJob{DirID: *node.Id, Path: remote + "/", Depth: job.Depth - 1})
} }
default:
// ignore ASSET etc
} }
case fileKind: return false
if o := f.newFsObjectWithInfo(remote, node); o != nil { })
if out.Add(o) { if fs.IsRetryError(err) {
return true fs.Debug(f, "Directory listing error for %q: %v - low level retry %d/%d", job.Path, err, tries, maxTries)
} continue
}
default:
// ignore ASSET etc
} }
return false if err != nil {
}) return nil, err
}
break
}
fs.Debug(f, "Finished reading %q", job.Path) fs.Debug(f, "Finished reading %q", job.Path)
return jobs, err return jobs, err
} }