2020-06-22 15:31:08 +00:00
|
|
|
package downloaders
|
2020-02-29 18:08:22 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"sync"
|
2020-06-04 08:33:50 +00:00
|
|
|
"time"
|
2020-02-29 18:08:22 +00:00
|
|
|
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/accounting"
|
|
|
|
"github.com/rclone/rclone/fs/asyncreader"
|
2020-06-04 08:33:50 +00:00
|
|
|
"github.com/rclone/rclone/fs/chunkedreader"
|
2020-02-29 18:08:22 +00:00
|
|
|
"github.com/rclone/rclone/lib/ranges"
|
2020-06-22 15:31:08 +00:00
|
|
|
"github.com/rclone/rclone/vfs/vfscommon"
|
2020-02-29 18:08:22 +00:00
|
|
|
)
|
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
// FIXME implement max downloaders
|
|
|
|
|
|
|
|
const (
|
|
|
|
// max time a downloader can be idle before closing itself
|
|
|
|
maxDownloaderIdleTime = 5 * time.Second
|
|
|
|
// max number of bytes a reader should skip over before closing it
|
|
|
|
maxSkipBytes = 1024 * 1024
|
2020-06-12 13:53:47 +00:00
|
|
|
// time between background kicks of waiters to pick up errors
|
|
|
|
backgroundKickerInterval = 5 * time.Second
|
2020-06-22 13:39:13 +00:00
|
|
|
// maximum number of errors before declaring dead
|
|
|
|
maxErrorCount = 10
|
2020-06-04 08:33:50 +00:00
|
|
|
)
|
|
|
|
|
2020-06-22 15:31:08 +00:00
|
|
|
// Item is the interface that an item to download must obey
|
|
|
|
type Item interface {
|
|
|
|
// FindMissing adjusts r returning a new ranges.Range which only
|
|
|
|
// contains the range which needs to be downloaded. This could be
|
|
|
|
// empty - check with IsEmpty. It also adjust this to make sure it is
|
|
|
|
// not larger than the file.
|
|
|
|
FindMissing(r ranges.Range) (outr ranges.Range)
|
|
|
|
|
|
|
|
// HasRange returns true if the current ranges entirely include range
|
|
|
|
HasRange(r ranges.Range) bool
|
|
|
|
|
|
|
|
// WriteAtNoOverwrite writes b to the file, but will not overwrite
|
|
|
|
// already present ranges.
|
|
|
|
//
|
|
|
|
// This is used by the downloader to write bytes to the file
|
|
|
|
//
|
|
|
|
// It returns n the total bytes processed and skipped the number of
|
|
|
|
// bytes which were processed but not actually written to the file.
|
|
|
|
WriteAtNoOverwrite(b []byte, off int64) (n int, skipped int, err error)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Downloaders is a number of downloader~s and a queue of waiters
|
|
|
|
// waiting for segments to be downloaded to a file.
|
|
|
|
type Downloaders struct {
|
2020-06-04 08:33:50 +00:00
|
|
|
// Write once - no locking required
|
2020-02-29 18:08:22 +00:00
|
|
|
ctx context.Context
|
2020-06-12 13:53:47 +00:00
|
|
|
cancel context.CancelFunc
|
2020-06-22 15:31:08 +00:00
|
|
|
item Item
|
|
|
|
opt *vfscommon.Options
|
2020-02-29 18:08:22 +00:00
|
|
|
src fs.Object // source object
|
2020-06-04 08:33:50 +00:00
|
|
|
remote string
|
2020-06-12 13:53:47 +00:00
|
|
|
wg sync.WaitGroup
|
2020-02-29 18:08:22 +00:00
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
// Read write
|
2020-06-22 13:39:13 +00:00
|
|
|
mu sync.Mutex
|
|
|
|
dls []*downloader
|
|
|
|
waiters []waiter
|
|
|
|
errorCount int // number of consecutive errors
|
|
|
|
lastErr error // last error received
|
2020-02-29 18:08:22 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
// waiter is a range we are waiting for and a channel to signal when
|
|
|
|
// the range is found
|
2020-02-29 18:08:22 +00:00
|
|
|
type waiter struct {
|
|
|
|
r ranges.Range
|
|
|
|
errChan chan<- error
|
|
|
|
}
|
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
// downloader represents a running download for part of a file.
|
|
|
|
type downloader struct {
|
|
|
|
// Write once
|
2020-06-22 15:31:08 +00:00
|
|
|
dls *Downloaders // parent structure
|
2020-06-04 08:33:50 +00:00
|
|
|
quit chan struct{} // close to quit the downloader
|
|
|
|
wg sync.WaitGroup // to keep track of downloader goroutine
|
|
|
|
kick chan struct{} // kick the downloader when needed
|
|
|
|
|
|
|
|
// Read write
|
|
|
|
mu sync.Mutex
|
|
|
|
start int64 // start offset
|
|
|
|
offset int64 // current offset
|
|
|
|
maxOffset int64 // maximum offset we are reading to
|
|
|
|
tr *accounting.Transfer
|
|
|
|
in *accounting.Account // input we are reading from
|
|
|
|
skipped int64 // number of bytes we have skipped sequentially
|
|
|
|
_closed bool // set to true if downloader is closed
|
|
|
|
stop bool // set to true if we have called _stop()
|
|
|
|
}
|
2020-02-29 18:08:22 +00:00
|
|
|
|
2020-06-22 15:31:08 +00:00
|
|
|
// New makes a downloader for item
|
|
|
|
func New(item Item, opt *vfscommon.Options, remote string, src fs.Object) (dls *Downloaders) {
|
2020-06-12 13:53:47 +00:00
|
|
|
if src == nil {
|
|
|
|
panic("internal error: newDownloaders called with nil src object")
|
|
|
|
}
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2020-06-22 15:31:08 +00:00
|
|
|
dls = &Downloaders{
|
2020-06-12 13:53:47 +00:00
|
|
|
ctx: ctx,
|
|
|
|
cancel: cancel,
|
2020-02-29 18:08:22 +00:00
|
|
|
item: item,
|
2020-06-22 15:31:08 +00:00
|
|
|
opt: opt,
|
2020-02-29 18:08:22 +00:00
|
|
|
src: src,
|
2020-06-04 08:33:50 +00:00
|
|
|
remote: remote,
|
2020-02-29 18:08:22 +00:00
|
|
|
}
|
2020-06-12 13:53:47 +00:00
|
|
|
dls.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer dls.wg.Done()
|
|
|
|
ticker := time.NewTicker(backgroundKickerInterval)
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
err := dls.kickWaiters()
|
|
|
|
if err != nil {
|
2020-07-06 15:06:42 +00:00
|
|
|
fs.Errorf(dls.src, "vfs cache: failed to kick waiters: %v", err)
|
2020-06-12 13:53:47 +00:00
|
|
|
}
|
|
|
|
case <-ctx.Done():
|
|
|
|
break
|
|
|
|
}
|
|
|
|
ticker.Stop()
|
|
|
|
}()
|
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
return dls
|
|
|
|
}
|
2020-02-29 18:08:22 +00:00
|
|
|
|
2020-06-22 13:39:13 +00:00
|
|
|
// Accumulate errors for this downloader
|
|
|
|
//
|
|
|
|
// It should be called with
|
|
|
|
//
|
|
|
|
// n bytes downloaded
|
|
|
|
// err is error from download
|
|
|
|
//
|
|
|
|
// call with lock held
|
2020-06-22 15:31:08 +00:00
|
|
|
func (dls *Downloaders) _countErrors(n int64, err error) {
|
2020-06-22 13:39:13 +00:00
|
|
|
if err == nil && n != 0 {
|
|
|
|
if dls.errorCount != 0 {
|
2020-07-06 15:06:42 +00:00
|
|
|
fs.Infof(dls.src, "vfs cache: downloader: resetting error count to 0")
|
2020-06-22 13:39:13 +00:00
|
|
|
dls.errorCount = 0
|
|
|
|
dls.lastErr = nil
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
dls.errorCount++
|
|
|
|
dls.lastErr = err
|
2020-07-06 15:06:42 +00:00
|
|
|
fs.Infof(dls.src, "vfs cache: downloader: error count now %d: %v", dls.errorCount, err)
|
2020-06-22 13:39:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-22 15:31:08 +00:00
|
|
|
func (dls *Downloaders) countErrors(n int64, err error) {
|
2020-06-22 13:39:13 +00:00
|
|
|
dls.mu.Lock()
|
|
|
|
dls._countErrors(n, err)
|
|
|
|
dls.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
// Make a new downloader, starting it to download r
|
|
|
|
//
|
|
|
|
// call with lock held
|
2020-06-22 15:31:08 +00:00
|
|
|
func (dls *Downloaders) _newDownloader(r ranges.Range) (dl *downloader, err error) {
|
2020-07-06 15:06:42 +00:00
|
|
|
// defer log.Trace(dls.src, "r=%v", r)("err=%v", &err)
|
2020-06-04 08:33:50 +00:00
|
|
|
|
|
|
|
dl = &downloader{
|
|
|
|
kick: make(chan struct{}, 1),
|
|
|
|
quit: make(chan struct{}),
|
|
|
|
dls: dls,
|
|
|
|
start: r.Pos,
|
|
|
|
offset: r.Pos,
|
|
|
|
maxOffset: r.End(),
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dl.open(dl.offset)
|
|
|
|
if err != nil {
|
|
|
|
_ = dl.close(err)
|
|
|
|
return nil, errors.Wrap(err, "failed to open downloader")
|
2020-02-29 18:08:22 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
dls.dls = append(dls.dls, dl)
|
|
|
|
|
|
|
|
dl.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer dl.wg.Done()
|
2020-06-22 13:39:13 +00:00
|
|
|
n, err := dl.download()
|
2020-06-04 08:33:50 +00:00
|
|
|
_ = dl.close(err)
|
2020-06-22 13:39:13 +00:00
|
|
|
dl.dls.countErrors(n, err)
|
2020-06-04 08:33:50 +00:00
|
|
|
if err != nil {
|
2020-07-06 15:06:42 +00:00
|
|
|
fs.Errorf(dl.dls.src, "vfs cache: failed to download: %v", err)
|
2020-06-04 08:33:50 +00:00
|
|
|
}
|
|
|
|
err = dl.dls.kickWaiters()
|
|
|
|
if err != nil {
|
2020-07-06 15:06:42 +00:00
|
|
|
fs.Errorf(dl.dls.src, "vfs cache: failed to kick waiters: %v", err)
|
2020-06-04 08:33:50 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-02-29 18:08:22 +00:00
|
|
|
return dl, nil
|
|
|
|
}
|
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
// _removeClosed() removes any downloaders which are closed.
|
|
|
|
//
|
|
|
|
// Call with the mutex held
|
2020-06-22 15:31:08 +00:00
|
|
|
func (dls *Downloaders) _removeClosed() {
|
2020-06-04 08:33:50 +00:00
|
|
|
newDownloaders := dls.dls[:0]
|
|
|
|
for _, dl := range dls.dls {
|
|
|
|
if !dl.closed() {
|
|
|
|
newDownloaders = append(newDownloaders, dl)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dls.dls = newDownloaders
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close all running downloaders and return any unfulfilled waiters
|
|
|
|
// with inErr
|
2020-06-22 15:31:08 +00:00
|
|
|
func (dls *Downloaders) Close(inErr error) (err error) {
|
2020-06-04 08:33:50 +00:00
|
|
|
dls.mu.Lock()
|
|
|
|
defer dls.mu.Unlock()
|
|
|
|
dls._removeClosed()
|
|
|
|
for _, dl := range dls.dls {
|
|
|
|
dls.mu.Unlock()
|
|
|
|
closeErr := dl.stopAndClose(inErr)
|
|
|
|
dls.mu.Lock()
|
|
|
|
if closeErr != nil && err != nil {
|
|
|
|
err = closeErr
|
|
|
|
}
|
|
|
|
}
|
2020-06-12 13:53:47 +00:00
|
|
|
dls.cancel()
|
|
|
|
dls.wg.Wait()
|
2020-06-04 08:33:50 +00:00
|
|
|
dls.dls = nil
|
|
|
|
dls._dispatchWaiters()
|
|
|
|
dls._closeWaiters(inErr)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-22 15:31:08 +00:00
|
|
|
// Download the range passed in returning when it has been downloaded
|
|
|
|
// with an error from the downloading go routine.
|
|
|
|
func (dls *Downloaders) Download(r ranges.Range) (err error) {
|
2020-07-06 15:06:42 +00:00
|
|
|
// defer log.Trace(dls.src, "r=%+v", r)("err=%v", &err)
|
2020-06-04 08:33:50 +00:00
|
|
|
|
|
|
|
dls.mu.Lock()
|
|
|
|
|
|
|
|
errChan := make(chan error)
|
|
|
|
waiter := waiter{
|
|
|
|
r: r,
|
|
|
|
errChan: errChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dls._ensureDownloader(r)
|
|
|
|
if err != nil {
|
|
|
|
dls.mu.Unlock()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
dls.waiters = append(dls.waiters, waiter)
|
|
|
|
dls.mu.Unlock()
|
|
|
|
return <-errChan
|
|
|
|
}
|
|
|
|
|
2020-02-29 18:08:22 +00:00
|
|
|
// close any waiters with the error passed in
|
|
|
|
//
|
|
|
|
// call with lock held
|
2020-06-22 15:31:08 +00:00
|
|
|
func (dls *Downloaders) _closeWaiters(err error) {
|
2020-06-04 08:33:50 +00:00
|
|
|
for _, waiter := range dls.waiters {
|
2020-02-29 18:08:22 +00:00
|
|
|
waiter.errChan <- err
|
|
|
|
}
|
2020-06-04 08:33:50 +00:00
|
|
|
dls.waiters = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensure a downloader is running for the range if required. If one isn't found
|
|
|
|
// then it starts it.
|
|
|
|
//
|
|
|
|
// call with lock held
|
2020-06-22 15:31:08 +00:00
|
|
|
func (dls *Downloaders) _ensureDownloader(r ranges.Range) (err error) {
|
2020-06-04 08:33:50 +00:00
|
|
|
// FIXME this window could be a different config var?
|
|
|
|
window := int64(fs.Config.BufferSize)
|
|
|
|
|
|
|
|
// We may be reopening a downloader after a failure here or
|
|
|
|
// doing a tentative prefetch so check to see that we haven't
|
|
|
|
// read some stuff already.
|
|
|
|
//
|
|
|
|
// Clip r to stuff which needs downloading
|
2020-06-22 15:31:08 +00:00
|
|
|
r = dls.item.FindMissing(r)
|
2020-06-04 08:33:50 +00:00
|
|
|
|
|
|
|
// If the range is entirely present then we only need to start a
|
|
|
|
// dowloader if the window isn't full.
|
|
|
|
if r.IsEmpty() {
|
|
|
|
// Make a new range which includes the window
|
|
|
|
rWindow := r
|
|
|
|
if rWindow.Size < window {
|
|
|
|
rWindow.Size = window
|
|
|
|
}
|
|
|
|
// Clip rWindow to stuff which needs downloading
|
2020-06-22 15:31:08 +00:00
|
|
|
rWindow = dls.item.FindMissing(rWindow)
|
2020-06-04 08:33:50 +00:00
|
|
|
// If rWindow is empty then just return without starting a
|
|
|
|
// downloader as there is no data within the window which needs
|
|
|
|
// downloading.
|
|
|
|
if rWindow.IsEmpty() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Start downloading at the start of the unread window
|
|
|
|
r.Pos = rWindow.Pos
|
|
|
|
// But don't write anything for the moment
|
|
|
|
r.Size = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
var dl *downloader
|
|
|
|
// Look through downloaders to find one in range
|
|
|
|
// If there isn't one then start a new one
|
|
|
|
dls._removeClosed()
|
|
|
|
for _, dl = range dls.dls {
|
|
|
|
start, maxOffset := dl.getRange()
|
|
|
|
|
|
|
|
// The downloader's offset to offset+window is the gap
|
|
|
|
// in which we would like to re-use this
|
|
|
|
// downloader. The downloader will never reach before
|
|
|
|
// start and maxOffset+windows is too far away - we'd
|
|
|
|
// rather start another downloader.
|
|
|
|
// fs.Debugf(nil, "r=%v start=%d, maxOffset=%d, found=%v", r, start, maxOffset, r.Pos >= start && r.Pos < maxOffset+window)
|
|
|
|
if r.Pos >= start && r.Pos < maxOffset+window {
|
|
|
|
// Found downloader which will soon have our data
|
|
|
|
dl.setRange(r)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Downloader not found so start a new one
|
|
|
|
dl, err = dls._newDownloader(r)
|
|
|
|
if err != nil {
|
2020-06-22 13:39:13 +00:00
|
|
|
dls._countErrors(0, err)
|
2020-06-04 08:33:50 +00:00
|
|
|
return errors.Wrap(err, "failed to start downloader")
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-22 15:31:08 +00:00
|
|
|
// EnsureDownloader makes sure a downloader is running for the range
|
|
|
|
// passed in. If one isn't found then it starts it.
|
|
|
|
//
|
|
|
|
// It does not wait for the range to be downloaded
|
|
|
|
func (dls *Downloaders) EnsureDownloader(r ranges.Range) (err error) {
|
2020-06-04 08:33:50 +00:00
|
|
|
dls.mu.Lock()
|
|
|
|
defer dls.mu.Unlock()
|
|
|
|
return dls._ensureDownloader(r)
|
|
|
|
}
|
|
|
|
|
|
|
|
// _dispatchWaiters() sends any waiters which have completed back to
|
|
|
|
// their callers.
|
|
|
|
//
|
|
|
|
// Call with the mutex held
|
2020-06-22 15:31:08 +00:00
|
|
|
func (dls *Downloaders) _dispatchWaiters() {
|
2020-06-04 08:33:50 +00:00
|
|
|
if len(dls.waiters) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
newWaiters := dls.waiters[:0]
|
|
|
|
for _, waiter := range dls.waiters {
|
2020-06-22 15:31:08 +00:00
|
|
|
if dls.item.HasRange(waiter.r) {
|
2020-06-04 08:33:50 +00:00
|
|
|
waiter.errChan <- nil
|
|
|
|
} else {
|
|
|
|
newWaiters = append(newWaiters, waiter)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dls.waiters = newWaiters
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send any waiters which have completed back to their callers and make sure
|
|
|
|
// there is a downloader appropriate for each waiter
|
2020-06-22 15:31:08 +00:00
|
|
|
func (dls *Downloaders) kickWaiters() (err error) {
|
2020-06-04 08:33:50 +00:00
|
|
|
dls.mu.Lock()
|
|
|
|
defer dls.mu.Unlock()
|
|
|
|
|
|
|
|
dls._dispatchWaiters()
|
|
|
|
|
|
|
|
if len(dls.waiters) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure each waiter has a downloader
|
2020-06-22 15:31:08 +00:00
|
|
|
// This is an O(waiters*Downloaders) algorithm
|
2020-06-04 08:33:50 +00:00
|
|
|
// However the number of waiters and the number of downloaders
|
|
|
|
// are both expected to be small.
|
|
|
|
for _, waiter := range dls.waiters {
|
|
|
|
err = dls._ensureDownloader(waiter.r)
|
|
|
|
if err != nil {
|
2020-06-12 13:53:47 +00:00
|
|
|
// Failures here will be retried by background kicker
|
2020-07-06 15:06:42 +00:00
|
|
|
fs.Errorf(dls.src, "vfs cache: restart download failed: %v", err)
|
2020-06-04 08:33:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-22 13:39:13 +00:00
|
|
|
if dls.errorCount > maxErrorCount {
|
2020-07-06 15:06:42 +00:00
|
|
|
fs.Errorf(dls.src, "vfs cache: too many errors %d/%d: last error: %v", dls.errorCount, maxErrorCount, dls.lastErr)
|
2020-06-22 13:39:13 +00:00
|
|
|
dls._closeWaiters(dls.lastErr)
|
|
|
|
return dls.lastErr
|
2020-06-12 13:53:47 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
return nil
|
2020-02-29 18:08:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write writes len(p) bytes from p to the underlying data stream. It
|
|
|
|
// returns the number of bytes written from p (0 <= n <= len(p)) and
|
|
|
|
// any error encountered that caused the write to stop early. Write
|
|
|
|
// must return a non-nil error if it returns n < len(p). Write must
|
|
|
|
// not modify the slice data, even temporarily.
|
|
|
|
//
|
|
|
|
// Implementations must not retain p.
|
|
|
|
func (dl *downloader) Write(p []byte) (n int, err error) {
|
2020-07-06 15:06:42 +00:00
|
|
|
// defer log.Trace(dl.dls.src, "p_len=%d", len(p))("n=%d, err=%v", &n, &err)
|
2020-02-29 18:08:22 +00:00
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
// Kick the waiters on exit if some characters received
|
|
|
|
defer func() {
|
|
|
|
if n <= 0 {
|
|
|
|
return
|
2020-02-29 18:08:22 +00:00
|
|
|
}
|
2020-06-04 08:33:50 +00:00
|
|
|
if waitErr := dl.dls.kickWaiters(); waitErr != nil {
|
|
|
|
fs.Errorf(dl.dls.src, "vfs cache: download write: failed to kick waiters: %v", waitErr)
|
|
|
|
if err == nil {
|
|
|
|
err = waitErr
|
2020-02-29 18:08:22 +00:00
|
|
|
}
|
|
|
|
}
|
2020-06-04 08:33:50 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
dl.mu.Lock()
|
|
|
|
defer dl.mu.Unlock()
|
|
|
|
|
|
|
|
// Wait here if we have reached maxOffset until
|
|
|
|
// - we are quitting
|
|
|
|
// - we get kicked
|
|
|
|
// - timeout happens
|
|
|
|
if dl.offset >= dl.maxOffset {
|
|
|
|
var timeout = time.NewTimer(maxDownloaderIdleTime)
|
|
|
|
dl.mu.Unlock()
|
|
|
|
select {
|
|
|
|
case <-dl.quit:
|
|
|
|
dl.mu.Lock()
|
|
|
|
timeout.Stop()
|
|
|
|
case <-dl.kick:
|
|
|
|
dl.mu.Lock()
|
|
|
|
timeout.Stop()
|
|
|
|
case <-timeout.C:
|
|
|
|
// stop any future reading
|
|
|
|
dl.mu.Lock()
|
|
|
|
if !dl.stop {
|
2020-07-06 15:06:42 +00:00
|
|
|
fs.Debugf(dl.dls.src, "vfs cache: stopping download thread as it timed out")
|
2020-06-04 08:33:50 +00:00
|
|
|
dl._stop()
|
2020-02-29 18:08:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-22 15:31:08 +00:00
|
|
|
n, skipped, err := dl.dls.item.WriteAtNoOverwrite(p, dl.offset)
|
2020-06-04 08:33:50 +00:00
|
|
|
if skipped == n {
|
|
|
|
dl.skipped += int64(skipped)
|
|
|
|
} else {
|
|
|
|
dl.skipped = 0
|
2020-02-29 18:08:22 +00:00
|
|
|
}
|
2020-06-04 08:33:50 +00:00
|
|
|
dl.offset += int64(n)
|
2020-02-29 18:08:22 +00:00
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
// Kill this downloader if skipped too many bytes
|
|
|
|
if !dl.stop && dl.skipped > maxSkipBytes {
|
2020-07-06 15:06:42 +00:00
|
|
|
fs.Debugf(dl.dls.src, "vfs cache: stopping download thread as it has skipped %d bytes", dl.skipped)
|
2020-06-04 08:33:50 +00:00
|
|
|
dl._stop()
|
|
|
|
}
|
2020-06-26 16:24:36 +00:00
|
|
|
|
|
|
|
// If running without a async buffer then stop now as
|
|
|
|
// StopBuffering has no effect if the Account wasn't buffered
|
|
|
|
// so we need to stop manually now rather than wait for the
|
|
|
|
// AsyncReader to stop.
|
|
|
|
if dl.stop && !dl.in.HasBuffer() {
|
|
|
|
err = asyncreader.ErrorStreamAbandoned
|
|
|
|
}
|
2020-06-04 08:33:50 +00:00
|
|
|
return n, err
|
2020-02-29 18:08:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// open the file from offset
|
|
|
|
//
|
|
|
|
// should be called on a fresh downloader
|
|
|
|
func (dl *downloader) open(offset int64) (err error) {
|
2020-07-06 15:06:42 +00:00
|
|
|
// defer log.Trace(dl.dls.src, "offset=%d", offset)("err=%v", &err)
|
2020-06-04 08:33:50 +00:00
|
|
|
dl.tr = accounting.Stats(dl.dls.ctx).NewTransfer(dl.dls.src)
|
2020-02-29 18:08:22 +00:00
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
size := dl.dls.src.Size()
|
2020-02-29 18:08:22 +00:00
|
|
|
if size < 0 {
|
|
|
|
// FIXME should just completely download these
|
|
|
|
return errors.New("can't open unknown sized file")
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME hashType needs to ignore when --no-checksum is set too? Which is a VFS flag.
|
2020-06-04 08:33:50 +00:00
|
|
|
// var rangeOption *fs.RangeOption
|
|
|
|
// if offset > 0 {
|
|
|
|
// rangeOption = &fs.RangeOption{Start: offset, End: size - 1}
|
|
|
|
// }
|
|
|
|
// in0, err := operations.NewReOpen(dl.dls.ctx, dl.dls.src, fs.Config.LowLevelRetries, dl.dls.item.c.hashOption, rangeOption)
|
|
|
|
|
2020-06-22 15:31:08 +00:00
|
|
|
in0 := chunkedreader.New(context.TODO(), dl.dls.src, int64(dl.dls.opt.ChunkSize), int64(dl.dls.opt.ChunkSizeLimit))
|
2020-06-04 08:33:50 +00:00
|
|
|
_, err = in0.Seek(offset, 0)
|
2020-02-29 18:08:22 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "vfs reader: failed to open source file")
|
|
|
|
}
|
|
|
|
dl.in = dl.tr.Account(in0).WithBuffer() // account and buffer the transfer
|
|
|
|
|
|
|
|
dl.offset = offset
|
|
|
|
|
|
|
|
// FIXME set mod time
|
|
|
|
// FIXME check checksums
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
// close the downloader
|
2020-02-29 18:08:22 +00:00
|
|
|
func (dl *downloader) close(inErr error) (err error) {
|
2020-07-06 15:06:42 +00:00
|
|
|
// defer log.Trace(dl.dls.src, "inErr=%v", err)("err=%v", &err)
|
2020-06-04 08:33:50 +00:00
|
|
|
checkErr := func(e error) {
|
|
|
|
if e == nil || errors.Cause(err) == asyncreader.ErrorStreamAbandoned {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = e
|
|
|
|
}
|
2020-02-29 18:08:22 +00:00
|
|
|
dl.mu.Lock()
|
|
|
|
if dl.in != nil {
|
2020-06-04 08:33:50 +00:00
|
|
|
checkErr(dl.in.Close())
|
2020-02-29 18:08:22 +00:00
|
|
|
dl.in = nil
|
|
|
|
}
|
|
|
|
if dl.tr != nil {
|
|
|
|
dl.tr.Done(inErr)
|
|
|
|
dl.tr = nil
|
|
|
|
}
|
2020-06-04 08:33:50 +00:00
|
|
|
dl._closed = true
|
2020-02-29 18:08:22 +00:00
|
|
|
dl.mu.Unlock()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
// closed returns true if the downloader has been closed alread
|
|
|
|
func (dl *downloader) closed() bool {
|
|
|
|
dl.mu.Lock()
|
|
|
|
defer dl.mu.Unlock()
|
|
|
|
return dl._closed
|
|
|
|
}
|
|
|
|
|
|
|
|
// stop the downloader if running
|
|
|
|
//
|
|
|
|
// Call with the mutex held
|
|
|
|
func (dl *downloader) _stop() {
|
2020-07-06 15:06:42 +00:00
|
|
|
// defer log.Trace(dl.dls.src, "")("")
|
2020-06-04 08:33:50 +00:00
|
|
|
|
|
|
|
// exit if have already called _stop
|
|
|
|
if dl.stop {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
dl.stop = true
|
|
|
|
|
|
|
|
// Signal quit now to unblock the downloader
|
|
|
|
close(dl.quit)
|
|
|
|
|
|
|
|
// stop the downloader by stopping the async reader buffering
|
|
|
|
// any more input. This causes all the stuff in the async
|
|
|
|
// buffer (which can be many MB) to be written to the disk
|
|
|
|
// before exiting.
|
|
|
|
if dl.in != nil {
|
|
|
|
dl.in.StopBuffering()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// stop the downloader if running then close it with the error passed in
|
|
|
|
func (dl *downloader) stopAndClose(inErr error) (err error) {
|
|
|
|
// Stop the downloader by closing its input
|
|
|
|
dl.mu.Lock()
|
|
|
|
dl._stop()
|
|
|
|
dl.mu.Unlock()
|
|
|
|
// wait for downloader to finish...
|
|
|
|
// do this without mutex as asyncreader
|
|
|
|
// calls back into Write() which needs the lock
|
|
|
|
dl.wg.Wait()
|
|
|
|
return dl.close(inErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start downloading to the local file starting at offset until maxOffset.
|
2020-06-22 13:39:13 +00:00
|
|
|
func (dl *downloader) download() (n int64, err error) {
|
2020-07-06 15:06:42 +00:00
|
|
|
// defer log.Trace(dl.dls.src, "")("err=%v", &err)
|
2020-06-22 13:39:13 +00:00
|
|
|
n, err = dl.in.WriteTo(dl)
|
2020-06-04 08:33:50 +00:00
|
|
|
if err != nil && errors.Cause(err) != asyncreader.ErrorStreamAbandoned {
|
2020-06-22 13:39:13 +00:00
|
|
|
return n, errors.Wrap(err, "vfs reader: failed to write to cache file")
|
2020-02-29 18:08:22 +00:00
|
|
|
}
|
2020-06-22 13:39:13 +00:00
|
|
|
return n, nil
|
2020-02-29 18:08:22 +00:00
|
|
|
}
|
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
// setRange makes sure the downloader is downloading the range passed in
|
|
|
|
func (dl *downloader) setRange(r ranges.Range) {
|
2020-02-29 18:08:22 +00:00
|
|
|
dl.mu.Lock()
|
2020-06-04 08:33:50 +00:00
|
|
|
maxOffset := r.End()
|
|
|
|
if maxOffset > dl.maxOffset {
|
|
|
|
dl.maxOffset = maxOffset
|
|
|
|
// fs.Debugf(dl.dls.src, "kicking downloader with maxOffset %d", maxOffset)
|
|
|
|
select {
|
|
|
|
case dl.kick <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
2020-02-29 18:08:22 +00:00
|
|
|
dl.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2020-06-04 08:33:50 +00:00
|
|
|
// get the current range this downloader is working on
|
|
|
|
func (dl *downloader) getRange() (start, maxOffset int64) {
|
2020-02-29 18:08:22 +00:00
|
|
|
dl.mu.Lock()
|
|
|
|
defer dl.mu.Unlock()
|
2020-06-04 08:33:50 +00:00
|
|
|
return dl.start, dl.maxOffset
|
2020-02-29 18:08:22 +00:00
|
|
|
}
|