2015-09-17 17:12:37 +00:00
|
|
|
// Package pacer makes pacing and retrying API calls easy
|
2015-09-11 18:18:41 +00:00
|
|
|
package pacer
|
|
|
|
|
|
|
|
import (
|
2015-09-17 17:12:37 +00:00
|
|
|
"math/rand"
|
2015-09-16 10:25:55 +00:00
|
|
|
"sync"
|
2015-09-11 18:18:41 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ncw/rclone/fs"
|
2018-01-12 16:30:54 +00:00
|
|
|
"github.com/ncw/rclone/fs/fserrors"
|
2015-09-11 18:18:41 +00:00
|
|
|
)
|
|
|
|
|
2015-09-17 17:12:37 +00:00
|
|
|
// Pacer state
|
2015-09-11 18:18:41 +00:00
|
|
|
type Pacer struct {
|
2015-09-17 17:12:37 +00:00
|
|
|
mu sync.Mutex // Protecting read/writes
|
|
|
|
minSleep time.Duration // minimum sleep time
|
|
|
|
maxSleep time.Duration // maximum sleep time
|
|
|
|
decayConstant uint // decay constant
|
2016-03-05 16:07:39 +00:00
|
|
|
attackConstant uint // attack constant
|
2015-09-17 17:12:37 +00:00
|
|
|
pacer chan struct{} // To pace the operations
|
|
|
|
sleepTime time.Duration // Time to sleep for each transaction
|
|
|
|
retries int // Max number of retries
|
|
|
|
maxConnections int // Maximum number of concurrent connections
|
|
|
|
connTokens chan struct{} // Connection tokens
|
|
|
|
calculatePace func(bool) // switchable pacing algorithm - call with mu held
|
|
|
|
consecutiveRetries int // number of consecutive retries
|
2015-09-11 18:18:41 +00:00
|
|
|
}
|
|
|
|
|
2015-09-17 17:12:37 +00:00
|
|
|
// Type is for selecting different pacing algorithms
|
|
|
|
type Type int
|
|
|
|
|
|
|
|
const (
|
|
|
|
// DefaultPacer is a truncated exponential attack and decay.
|
|
|
|
//
|
|
|
|
// On retries the sleep time is doubled, on non errors then
|
|
|
|
// sleeptime decays according to the decay constant as set
|
|
|
|
// with SetDecayConstant.
|
|
|
|
//
|
|
|
|
// The sleep never goes below that set with SetMinSleep or
|
|
|
|
// above that set with SetMaxSleep.
|
|
|
|
DefaultPacer = Type(iota)
|
|
|
|
|
2016-07-11 11:42:44 +00:00
|
|
|
// AmazonCloudDrivePacer is a specialised pacer for Amazon Drive
|
2015-09-17 17:12:37 +00:00
|
|
|
//
|
|
|
|
// It implements a truncated exponential backoff strategy with
|
|
|
|
// randomization. Normally operations are paced at the
|
|
|
|
// interval set with SetMinSleep. On errors the sleep timer
|
|
|
|
// is set to 0..2**retries seconds.
|
|
|
|
//
|
|
|
|
// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices
|
|
|
|
AmazonCloudDrivePacer
|
2016-10-17 16:57:09 +00:00
|
|
|
|
|
|
|
// GoogleDrivePacer is a specialised pacer for Google Drive
|
|
|
|
//
|
|
|
|
// It implements a truncated exponential backoff strategy with
|
|
|
|
// randomization. Normally operations are paced at the
|
|
|
|
// interval set with SetMinSleep. On errors the sleep timer
|
|
|
|
// is set to (2 ^ n) + random_number_milliseconds seconds
|
|
|
|
//
|
|
|
|
// See https://developers.google.com/drive/v2/web/handle-errors#exponential-backoff
|
|
|
|
GoogleDrivePacer
|
2018-09-03 04:41:04 +00:00
|
|
|
|
|
|
|
// S3Pacer is a specialised pacer for S3
|
|
|
|
//
|
|
|
|
// It is basically the defaultPacer, but allows the sleep time to go to 0
|
|
|
|
// when things are going well.
|
|
|
|
S3Pacer
|
2015-09-17 17:12:37 +00:00
|
|
|
)
|
|
|
|
|
2015-09-11 18:18:41 +00:00
|
|
|
// Paced is a function which is called by the Call and CallNoRetry
|
|
|
|
// methods. It should return a boolean, true if it would like to be
|
|
|
|
// retried, and an error. This error may be returned or returned
|
|
|
|
// wrapped in a RetryError.
|
|
|
|
type Paced func() (bool, error)
|
|
|
|
|
|
|
|
// New returns a Pacer with sensible defaults
|
|
|
|
func New() *Pacer {
|
|
|
|
p := &Pacer{
|
2016-03-05 16:07:39 +00:00
|
|
|
minSleep: 10 * time.Millisecond,
|
|
|
|
maxSleep: 2 * time.Second,
|
|
|
|
decayConstant: 2,
|
|
|
|
attackConstant: 1,
|
|
|
|
retries: fs.Config.LowLevelRetries,
|
|
|
|
pacer: make(chan struct{}, 1),
|
2015-09-11 18:18:41 +00:00
|
|
|
}
|
|
|
|
p.sleepTime = p.minSleep
|
2015-09-17 17:12:37 +00:00
|
|
|
p.SetPacer(DefaultPacer)
|
|
|
|
p.SetMaxConnections(fs.Config.Checkers + fs.Config.Transfers)
|
|
|
|
|
2015-09-11 18:18:41 +00:00
|
|
|
// Put the first pacing token in
|
|
|
|
p.pacer <- struct{}{}
|
|
|
|
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2016-07-01 15:22:51 +00:00
|
|
|
// SetSleep sets the current sleep time
|
|
|
|
func (p *Pacer) SetSleep(t time.Duration) *Pacer {
|
|
|
|
p.mu.Lock()
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
p.sleepTime = t
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetSleep gets the current sleep time
|
|
|
|
func (p *Pacer) GetSleep() time.Duration {
|
|
|
|
p.mu.Lock()
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
return p.sleepTime
|
|
|
|
}
|
|
|
|
|
2015-09-11 18:18:41 +00:00
|
|
|
// SetMinSleep sets the minimum sleep time for the pacer
|
|
|
|
func (p *Pacer) SetMinSleep(t time.Duration) *Pacer {
|
2015-09-16 10:25:55 +00:00
|
|
|
p.mu.Lock()
|
|
|
|
defer p.mu.Unlock()
|
2015-09-11 18:18:41 +00:00
|
|
|
p.minSleep = t
|
|
|
|
p.sleepTime = p.minSleep
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetMaxSleep sets the maximum sleep time for the pacer
|
|
|
|
func (p *Pacer) SetMaxSleep(t time.Duration) *Pacer {
|
2015-09-16 10:25:55 +00:00
|
|
|
p.mu.Lock()
|
|
|
|
defer p.mu.Unlock()
|
2015-09-11 18:18:41 +00:00
|
|
|
p.maxSleep = t
|
|
|
|
p.sleepTime = p.minSleep
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2015-09-17 11:49:52 +00:00
|
|
|
// SetMaxConnections sets the maximum number of concurrent connections.
|
|
|
|
// Setting the value to 0 will allow unlimited number of connections.
|
|
|
|
// Should not be changed once you have started calling the pacer.
|
|
|
|
// By default this will be set to fs.Config.Checkers.
|
|
|
|
func (p *Pacer) SetMaxConnections(n int) *Pacer {
|
|
|
|
p.mu.Lock()
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
p.maxConnections = n
|
|
|
|
if n <= 0 {
|
|
|
|
p.connTokens = nil
|
|
|
|
} else {
|
|
|
|
p.connTokens = make(chan struct{}, n)
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
p.connTokens <- struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2015-09-11 18:18:41 +00:00
|
|
|
// SetDecayConstant sets the decay constant for the pacer
|
|
|
|
//
|
|
|
|
// This is the speed the time falls back to the minimum after errors
|
|
|
|
// have occurred.
|
|
|
|
//
|
2016-03-05 16:07:39 +00:00
|
|
|
// bigger for slower decay, exponential. 1 is halve, 0 is go straight to minimum
|
2015-09-11 18:18:41 +00:00
|
|
|
func (p *Pacer) SetDecayConstant(decay uint) *Pacer {
|
2015-09-16 10:25:55 +00:00
|
|
|
p.mu.Lock()
|
|
|
|
defer p.mu.Unlock()
|
2015-09-11 18:18:41 +00:00
|
|
|
p.decayConstant = decay
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2016-03-05 16:07:39 +00:00
|
|
|
// SetAttackConstant sets the attack constant for the pacer
|
|
|
|
//
|
|
|
|
// This is the speed the time grows from the minimum after errors have
|
|
|
|
// occurred.
|
|
|
|
//
|
|
|
|
// bigger for slower attack, 1 is double, 0 is go straight to maximum
|
|
|
|
func (p *Pacer) SetAttackConstant(attack uint) *Pacer {
|
|
|
|
p.mu.Lock()
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
p.attackConstant = attack
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2015-09-11 18:18:41 +00:00
|
|
|
// SetRetries sets the max number of tries for Call
|
|
|
|
func (p *Pacer) SetRetries(retries int) *Pacer {
|
2015-09-16 10:25:55 +00:00
|
|
|
p.mu.Lock()
|
|
|
|
defer p.mu.Unlock()
|
2015-09-11 18:18:41 +00:00
|
|
|
p.retries = retries
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2015-09-17 17:12:37 +00:00
|
|
|
// SetPacer sets the pacing algorithm
|
|
|
|
//
|
|
|
|
// It will choose the default algorithm if an incorrect value is
|
|
|
|
// passed in.
|
|
|
|
func (p *Pacer) SetPacer(t Type) *Pacer {
|
|
|
|
p.mu.Lock()
|
|
|
|
defer p.mu.Unlock()
|
|
|
|
switch t {
|
|
|
|
case AmazonCloudDrivePacer:
|
|
|
|
p.calculatePace = p.acdPacer
|
2016-10-17 16:57:09 +00:00
|
|
|
case GoogleDrivePacer:
|
|
|
|
p.calculatePace = p.drivePacer
|
2018-09-03 04:41:04 +00:00
|
|
|
case S3Pacer:
|
|
|
|
p.calculatePace = p.s3Pacer
|
2015-09-17 17:12:37 +00:00
|
|
|
default:
|
|
|
|
p.calculatePace = p.defaultPacer
|
|
|
|
}
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2015-09-11 18:18:41 +00:00
|
|
|
// Start a call to the API
|
|
|
|
//
|
|
|
|
// This must be called as a pair with endCall
|
|
|
|
//
|
|
|
|
// This waits for the pacer token
|
|
|
|
func (p *Pacer) beginCall() {
|
|
|
|
// pacer starts with a token in and whenever we take one out
|
|
|
|
// XXX ms later we put another in. We could do this with a
|
|
|
|
// Ticker more accurately, but then we'd have to work out how
|
|
|
|
// not to run it when it wasn't needed
|
|
|
|
<-p.pacer
|
2015-09-17 11:49:52 +00:00
|
|
|
if p.maxConnections > 0 {
|
|
|
|
<-p.connTokens
|
|
|
|
}
|
2015-09-11 18:18:41 +00:00
|
|
|
|
2015-09-16 10:25:55 +00:00
|
|
|
p.mu.Lock()
|
2015-09-11 18:18:41 +00:00
|
|
|
// Restart the timer
|
|
|
|
go func(t time.Duration) {
|
2017-02-09 11:01:20 +00:00
|
|
|
// fs.Debugf(f, "New sleep for %v at %v", t, time.Now())
|
2015-09-11 18:18:41 +00:00
|
|
|
time.Sleep(t)
|
|
|
|
p.pacer <- struct{}{}
|
|
|
|
}(p.sleepTime)
|
2015-09-16 10:25:55 +00:00
|
|
|
p.mu.Unlock()
|
2015-09-11 18:18:41 +00:00
|
|
|
}
|
|
|
|
|
2015-09-17 17:12:37 +00:00
|
|
|
// exponentialImplementation implements a exponentialImplementation up
|
|
|
|
// and down pacing algorithm
|
2015-09-11 18:18:41 +00:00
|
|
|
//
|
2015-09-17 17:12:37 +00:00
|
|
|
// See the description for DefaultPacer
|
|
|
|
//
|
|
|
|
// This should calculate a new sleepTime. It takes a boolean as to
|
|
|
|
// whether the operation should be retried or not.
|
|
|
|
//
|
|
|
|
// Call with p.mu held
|
|
|
|
func (p *Pacer) defaultPacer(retry bool) {
|
2015-09-11 18:18:41 +00:00
|
|
|
oldSleepTime := p.sleepTime
|
2015-09-17 17:12:37 +00:00
|
|
|
if retry {
|
2016-03-05 16:07:39 +00:00
|
|
|
if p.attackConstant == 0 {
|
|
|
|
p.sleepTime = p.maxSleep
|
|
|
|
} else {
|
|
|
|
p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
|
|
|
|
}
|
2015-09-11 18:18:41 +00:00
|
|
|
if p.sleepTime > p.maxSleep {
|
|
|
|
p.sleepTime = p.maxSleep
|
|
|
|
}
|
|
|
|
if p.sleepTime != oldSleepTime {
|
2017-02-09 11:01:20 +00:00
|
|
|
fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
|
2015-09-11 18:18:41 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
|
|
|
|
if p.sleepTime < p.minSleep {
|
|
|
|
p.sleepTime = p.minSleep
|
|
|
|
}
|
|
|
|
if p.sleepTime != oldSleepTime {
|
2017-02-09 11:01:20 +00:00
|
|
|
fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
|
2015-09-11 18:18:41 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-17 17:12:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// acdPacer implements a truncated exponential backoff
|
2016-07-11 11:42:44 +00:00
|
|
|
// strategy with randomization for Amazon Drive
|
2015-09-17 17:12:37 +00:00
|
|
|
//
|
|
|
|
// See the description for AmazonCloudDrivePacer
|
|
|
|
//
|
|
|
|
// This should calculate a new sleepTime. It takes a boolean as to
|
|
|
|
// whether the operation should be retried or not.
|
|
|
|
//
|
|
|
|
// Call with p.mu held
|
|
|
|
func (p *Pacer) acdPacer(retry bool) {
|
|
|
|
consecutiveRetries := p.consecutiveRetries
|
|
|
|
if consecutiveRetries == 0 {
|
|
|
|
if p.sleepTime != p.minSleep {
|
|
|
|
p.sleepTime = p.minSleep
|
2017-02-09 11:01:20 +00:00
|
|
|
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
|
2015-09-17 17:12:37 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if consecutiveRetries > 9 {
|
|
|
|
consecutiveRetries = 9
|
|
|
|
}
|
|
|
|
// consecutiveRetries starts at 1 so
|
|
|
|
// maxSleep is 2**(consecutiveRetries-1) seconds
|
|
|
|
maxSleep := time.Second << uint(consecutiveRetries-1)
|
|
|
|
// actual sleep is random from 0..maxSleep
|
|
|
|
p.sleepTime = time.Duration(rand.Int63n(int64(maxSleep)))
|
|
|
|
if p.sleepTime < p.minSleep {
|
|
|
|
p.sleepTime = p.minSleep
|
|
|
|
}
|
2017-02-09 11:01:20 +00:00
|
|
|
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
|
2016-10-17 16:57:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// drivePacer implements a truncated exponential backoff strategy with
|
|
|
|
// randomization for Google Drive
|
|
|
|
//
|
|
|
|
// See the description for GoogleDrivePacer
|
|
|
|
//
|
|
|
|
// This should calculate a new sleepTime. It takes a boolean as to
|
|
|
|
// whether the operation should be retried or not.
|
|
|
|
//
|
|
|
|
// Call with p.mu held
|
|
|
|
func (p *Pacer) drivePacer(retry bool) {
|
|
|
|
consecutiveRetries := p.consecutiveRetries
|
|
|
|
if consecutiveRetries == 0 {
|
|
|
|
if p.sleepTime != p.minSleep {
|
|
|
|
p.sleepTime = p.minSleep
|
2017-02-09 11:01:20 +00:00
|
|
|
fs.Debugf("pacer", "Resetting sleep to minimum %v on success", p.sleepTime)
|
2016-10-17 16:57:09 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if consecutiveRetries > 5 {
|
|
|
|
consecutiveRetries = 5
|
|
|
|
}
|
|
|
|
// consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16
|
|
|
|
// maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds
|
|
|
|
p.sleepTime = time.Second<<uint(consecutiveRetries-1) + time.Duration(rand.Int63n(int64(time.Second)))
|
2017-02-09 11:01:20 +00:00
|
|
|
fs.Debugf("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, p.consecutiveRetries)
|
2015-09-17 17:12:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-03 04:41:04 +00:00
|
|
|
// s3Pacer implements a pacer compatible with our expectations of S3, where it tries to not
|
|
|
|
// delay at all between successful calls, but backs off in the default fashion in response
|
|
|
|
// to any errors.
|
|
|
|
// The assumption is that errors should be exceedingly rare (S3 seems to have largely solved
|
|
|
|
// the sort of scability questions rclone is likely to run into), and in the happy case
|
|
|
|
// it can handle calls with no delays between them.
|
|
|
|
//
|
|
|
|
// Basically defaultPacer, but with some handling of sleepTime going to/from 0ms
|
|
|
|
// Ignores minSleep entirely
|
|
|
|
//
|
|
|
|
// Call with p.mu held
|
|
|
|
func (p *Pacer) s3Pacer(retry bool) {
|
|
|
|
oldSleepTime := p.sleepTime
|
|
|
|
if retry {
|
|
|
|
if p.attackConstant == 0 {
|
|
|
|
p.sleepTime = p.maxSleep
|
|
|
|
} else {
|
|
|
|
if p.sleepTime == 0 {
|
|
|
|
p.sleepTime = p.minSleep
|
|
|
|
} else {
|
|
|
|
p.sleepTime = (p.sleepTime << p.attackConstant) / ((1 << p.attackConstant) - 1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if p.sleepTime > p.maxSleep {
|
|
|
|
p.sleepTime = p.maxSleep
|
|
|
|
}
|
|
|
|
if p.sleepTime != oldSleepTime {
|
|
|
|
fs.Debugf("pacer", "Rate limited, increasing sleep to %v", p.sleepTime)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
p.sleepTime = (p.sleepTime<<p.decayConstant - p.sleepTime) >> p.decayConstant
|
|
|
|
if p.sleepTime < p.minSleep {
|
|
|
|
p.sleepTime = 0
|
|
|
|
}
|
|
|
|
if p.sleepTime != oldSleepTime {
|
|
|
|
fs.Debugf("pacer", "Reducing sleep to %v", p.sleepTime)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-17 17:12:37 +00:00
|
|
|
// endCall implements the pacing algorithm
|
|
|
|
//
|
|
|
|
// This should calculate a new sleepTime. It takes a boolean as to
|
|
|
|
// whether the operation should be retried or not.
|
|
|
|
func (p *Pacer) endCall(retry bool) {
|
|
|
|
if p.maxConnections > 0 {
|
|
|
|
p.connTokens <- struct{}{}
|
|
|
|
}
|
|
|
|
p.mu.Lock()
|
|
|
|
if retry {
|
|
|
|
p.consecutiveRetries++
|
|
|
|
} else {
|
|
|
|
p.consecutiveRetries = 0
|
|
|
|
}
|
|
|
|
p.calculatePace(retry)
|
2015-09-16 10:25:55 +00:00
|
|
|
p.mu.Unlock()
|
2015-09-11 18:18:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// call implements Call but with settable retries
|
|
|
|
func (p *Pacer) call(fn Paced, retries int) (err error) {
|
2015-09-17 17:12:37 +00:00
|
|
|
var retry bool
|
2016-01-12 17:38:28 +00:00
|
|
|
for i := 1; i <= retries; i++ {
|
2015-09-11 18:18:41 +00:00
|
|
|
p.beginCall()
|
2015-09-17 17:12:37 +00:00
|
|
|
retry, err = fn()
|
|
|
|
p.endCall(retry)
|
|
|
|
if !retry {
|
2015-09-11 18:18:41 +00:00
|
|
|
break
|
|
|
|
}
|
2017-02-09 11:01:20 +00:00
|
|
|
fs.Debugf("pacer", "low level retry %d/%d (error %v)", i, retries, err)
|
2015-09-11 18:18:41 +00:00
|
|
|
}
|
2015-09-17 17:12:37 +00:00
|
|
|
if retry {
|
2018-01-12 16:30:54 +00:00
|
|
|
err = fserrors.RetryError(err)
|
2015-09-11 18:18:41 +00:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call paces the remote operations to not exceed the limits and retry
|
|
|
|
// on rate limit exceeded
|
|
|
|
//
|
|
|
|
// This calls fn, expecting it to return a retry flag and an
|
|
|
|
// error. This error may be returned wrapped in a RetryError if the
|
|
|
|
// number of retries is exceeded.
|
|
|
|
func (p *Pacer) Call(fn Paced) (err error) {
|
2015-09-16 10:25:55 +00:00
|
|
|
p.mu.Lock()
|
|
|
|
retries := p.retries
|
|
|
|
p.mu.Unlock()
|
|
|
|
return p.call(fn, retries)
|
2015-09-11 18:18:41 +00:00
|
|
|
}
|
|
|
|
|
2015-09-17 17:12:37 +00:00
|
|
|
// CallNoRetry paces the remote operations to not exceed the limits
|
|
|
|
// and return a retry error on rate limit exceeded
|
2015-09-11 18:18:41 +00:00
|
|
|
//
|
|
|
|
// This calls fn and wraps the output in a RetryError if it would like
|
|
|
|
// it to be retried
|
|
|
|
func (p *Pacer) CallNoRetry(fn Paced) error {
|
|
|
|
return p.call(fn, 1)
|
|
|
|
}
|