bisync: use global --retries and --retries-sleep flags instead of overriding
This commit is contained in:
parent
407a0f3733
commit
f4c058e13e
2 changed files with 16 additions and 18 deletions
|
@ -51,8 +51,6 @@ type Options struct {
|
||||||
Resilient bool
|
Resilient bool
|
||||||
Recover bool
|
Recover bool
|
||||||
TestFn TestFunc // test-only option, for mocking errors
|
TestFn TestFunc // test-only option, for mocking errors
|
||||||
Retries int
|
|
||||||
RetriesInterval time.Duration
|
|
||||||
Compare CompareOpt
|
Compare CompareOpt
|
||||||
CompareFlag string
|
CompareFlag string
|
||||||
DebugName string
|
DebugName string
|
||||||
|
@ -119,7 +117,6 @@ func (x *CheckSyncMode) Type() string {
|
||||||
var Opt Options
|
var Opt Options
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Opt.Retries = 3
|
|
||||||
Opt.MaxLock = 0
|
Opt.MaxLock = 0
|
||||||
cmd.Root.AddCommand(commandDefinition)
|
cmd.Root.AddCommand(commandDefinition)
|
||||||
cmdFlags := commandDefinition.Flags()
|
cmdFlags := commandDefinition.Flags()
|
||||||
|
@ -144,8 +141,6 @@ func init() {
|
||||||
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
|
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
|
||||||
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
|
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
|
||||||
flags.BoolVarP(cmdFlags, &Opt.Recover, "recover", "", Opt.Recover, "Automatically recover from interruptions without requiring --resync.", "")
|
flags.BoolVarP(cmdFlags, &Opt.Recover, "recover", "", Opt.Recover, "Automatically recover from interruptions without requiring --resync.", "")
|
||||||
flags.IntVarP(cmdFlags, &Opt.Retries, "retries", "", Opt.Retries, "Retry operations this many times if they fail (requires --resilient).", "")
|
|
||||||
flags.DurationVarP(cmdFlags, &Opt.RetriesInterval, "retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable)", "")
|
|
||||||
flags.StringVarP(cmdFlags, &Opt.CompareFlag, "compare", "", Opt.CompareFlag, "Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')", "")
|
flags.StringVarP(cmdFlags, &Opt.CompareFlag, "compare", "", Opt.CompareFlag, "Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')", "")
|
||||||
flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "")
|
flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "")
|
||||||
flags.BoolVarP(cmdFlags, &Opt.Compare.SlowHashSyncOnly, "slow-hash-sync-only", "", Opt.Compare.SlowHashSyncOnly, "Ignore slow checksums for listings and deltas, but still consider them during sync calls.", "")
|
flags.BoolVarP(cmdFlags, &Opt.Compare.SlowHashSyncOnly, "slow-hash-sync-only", "", Opt.Compare.SlowHashSyncOnly, "Ignore slow checksums for listings and deltas, but still consider them during sync calls.", "")
|
||||||
|
|
|
@ -51,13 +51,15 @@ func (rs *ResultsSlice) has(name string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
var logger = operations.NewLoggerOpt()
|
var (
|
||||||
var lock mutex.Mutex
|
logger = operations.NewLoggerOpt()
|
||||||
var once mutex.Once
|
lock mutex.Mutex
|
||||||
var ignoreListingChecksum bool
|
once mutex.Once
|
||||||
var ignoreListingModtime bool
|
ignoreListingChecksum bool
|
||||||
var hashTypes map[string]hash.Type
|
ignoreListingModtime bool
|
||||||
var queueCI *fs.ConfigInfo
|
hashTypes map[string]hash.Type
|
||||||
|
queueCI *fs.ConfigInfo
|
||||||
|
)
|
||||||
|
|
||||||
// allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2
|
// allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2
|
||||||
func getHashType(fname string) hash.Type {
|
func getHashType(fname string) hash.Type {
|
||||||
|
@ -262,9 +264,10 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) retryFastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.Names, queueName string, results []Results, err error) ([]Results, error) {
|
func (b *bisyncRun) retryFastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.Names, queueName string, results []Results, err error) ([]Results, error) {
|
||||||
if err != nil && b.opt.Resilient && !b.InGracefulShutdown && b.opt.Retries > 1 {
|
ci := fs.GetConfig(ctx)
|
||||||
for tries := 1; tries <= b.opt.Retries; tries++ {
|
if err != nil && b.opt.Resilient && !b.InGracefulShutdown && ci.Retries > 1 {
|
||||||
fs.Logf(queueName, Color(terminal.YellowFg, "Received error: %v - retrying as --resilient is set. Retry %d/%d"), err, tries, b.opt.Retries)
|
for tries := 1; tries <= ci.Retries; tries++ {
|
||||||
|
fs.Logf(queueName, Color(terminal.YellowFg, "Received error: %v - retrying as --resilient is set. Retry %d/%d"), err, tries, ci.Retries)
|
||||||
accounting.GlobalStats().ResetErrors()
|
accounting.GlobalStats().ResetErrors()
|
||||||
if retryAfter := accounting.GlobalStats().RetryAfter(); !retryAfter.IsZero() {
|
if retryAfter := accounting.GlobalStats().RetryAfter(); !retryAfter.IsZero() {
|
||||||
d := time.Until(retryAfter)
|
d := time.Until(retryAfter)
|
||||||
|
@ -273,8 +276,8 @@ func (b *bisyncRun) retryFastCopy(ctx context.Context, fsrc, fdst fs.Fs, files b
|
||||||
time.Sleep(d)
|
time.Sleep(d)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if b.opt.RetriesInterval > 0 {
|
if ci.RetriesInterval > 0 {
|
||||||
naptime(b.opt.RetriesInterval)
|
naptime(ci.RetriesInterval)
|
||||||
}
|
}
|
||||||
results, err = b.fastCopy(ctx, fsrc, fdst, files, queueName)
|
results, err = b.fastCopy(ctx, fsrc, fdst, files, queueName)
|
||||||
if err == nil || b.InGracefulShutdown {
|
if err == nil || b.InGracefulShutdown {
|
||||||
|
@ -313,7 +316,7 @@ func (b *bisyncRun) syncEmptyDirs(ctx context.Context, dst fs.Fs, candidates bil
|
||||||
|
|
||||||
for _, s := range candidatesList {
|
for _, s := range candidatesList {
|
||||||
var direrr error
|
var direrr error
|
||||||
if dirsList.has(s) { //make sure it's a dir, not a file
|
if dirsList.has(s) { // make sure it's a dir, not a file
|
||||||
r := Results{}
|
r := Results{}
|
||||||
r.Name = s
|
r.Name = s
|
||||||
r.Size = -1
|
r.Size = -1
|
||||||
|
|
Loading…
Reference in a new issue