forked from TrueCloudLab/restic
Merge pull request #2661 from creativeprojects/issue-1734
"Everything is ok" message after retrying
This commit is contained in:
commit
1b233c4e2e
4 changed files with 114 additions and 11 deletions
16
changelog/unreleased/issue-1734
Normal file
16
changelog/unreleased/issue-1734
Normal file
|
@ -0,0 +1,16 @@
|
|||
Enhancement: "Everything is ok" message after retrying
|
||||
|
||||
When a recoverable error is encountered, restic displays a warning message
|
||||
saying it's retrying, like this one:
|
||||
|
||||
Save(<data/956b9ced99>) returned error, retrying after 357.131936ms: CreateBlockBlobFromReader: Put https://example.blob.core.windows.net/restic/data/95/956b9ced99...aac: write tcp 192.168.11.18:51620->1.2.3.4:443: write: connection reset by peer
|
||||
|
||||
This message can be a little confusing because it seems like there was an
|
||||
error, but we're not sure if it was actually fixed by retrying.
|
||||
|
||||
restic is now displaying a confirmation that the action was successful after retrying:
|
||||
|
||||
Save(<data/956b9ced99>) operation successful after 1 retries
|
||||
|
||||
https://github.com/restic/restic/issues/1734
|
||||
https://github.com/restic/restic/pull/2661
|
|
@ -439,9 +439,13 @@ func OpenRepository(opts GlobalOptions) (*repository.Repository, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
be = backend.NewRetryBackend(be, 10, func(msg string, err error, d time.Duration) {
|
||||
report := func(msg string, err error, d time.Duration) {
|
||||
Warnf("%v returned error, retrying after %v: %v\n", msg, d, err)
|
||||
})
|
||||
}
|
||||
success := func(msg string, retries int) {
|
||||
Warnf("%v operation successful after %d retries\n", msg, retries)
|
||||
}
|
||||
be = backend.NewRetryBackend(be, 10, report, success)
|
||||
|
||||
// wrap backend if a test specified a hook
|
||||
if opts.backendTestHook != nil {
|
||||
|
|
|
@ -17,6 +17,7 @@ type RetryBackend struct {
|
|||
restic.Backend
|
||||
MaxTries int
|
||||
Report func(string, error, time.Duration)
|
||||
Success func(string, int)
|
||||
}
|
||||
|
||||
// statically ensure that RetryBackend implements restic.Backend.
|
||||
|
@ -24,14 +25,36 @@ var _ restic.Backend = &RetryBackend{}
|
|||
|
||||
// NewRetryBackend wraps be with a backend that retries operations after a
|
||||
// backoff. report is called with a description and the error, if one occurred.
|
||||
func NewRetryBackend(be restic.Backend, maxTries int, report func(string, error, time.Duration)) *RetryBackend {
|
||||
// success is called with the number of retries before a successful operation
|
||||
// (it is not called if it succeeded on the first try)
|
||||
func NewRetryBackend(be restic.Backend, maxTries int, report func(string, error, time.Duration), success func(string, int)) *RetryBackend {
|
||||
return &RetryBackend{
|
||||
Backend: be,
|
||||
MaxTries: maxTries,
|
||||
Report: report,
|
||||
Success: success,
|
||||
}
|
||||
}
|
||||
|
||||
// retryNotifyErrorWithSuccess is an extension of backoff.RetryNotify with notification of success after an error.
|
||||
// success is NOT notified on the first run of operation (only after an error).
|
||||
func retryNotifyErrorWithSuccess(operation backoff.Operation, b backoff.BackOff, notify backoff.Notify, success func(retries int)) error {
|
||||
if success == nil {
|
||||
return backoff.RetryNotify(operation, b, notify)
|
||||
}
|
||||
retries := 0
|
||||
operationWrapper := func() error {
|
||||
err := operation()
|
||||
if err != nil {
|
||||
retries++
|
||||
} else if retries > 0 {
|
||||
success(retries)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return backoff.RetryNotify(operationWrapper, b, notify)
|
||||
}
|
||||
|
||||
func (be *RetryBackend) retry(ctx context.Context, msg string, f func() error) error {
|
||||
// Don't do anything when called with an already cancelled context. There would be
|
||||
// no retries in that case either, so be consistent and abort always.
|
||||
|
@ -43,13 +66,18 @@ func (be *RetryBackend) retry(ctx context.Context, msg string, f func() error) e
|
|||
return ctx.Err()
|
||||
}
|
||||
|
||||
err := backoff.RetryNotify(f,
|
||||
err := retryNotifyErrorWithSuccess(f,
|
||||
backoff.WithContext(backoff.WithMaxRetries(backoff.NewExponentialBackOff(), uint64(be.MaxTries)), ctx),
|
||||
func(err error, d time.Duration) {
|
||||
if be.Report != nil {
|
||||
be.Report(msg, err, d)
|
||||
}
|
||||
},
|
||||
func(retries int) {
|
||||
if be.Success != nil {
|
||||
be.Success(msg, retries)
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
return err
|
||||
|
|
|
@ -6,7 +6,9 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/restic/restic/internal/backend/mock"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
@ -33,7 +35,7 @@ func TestBackendSaveRetry(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
retryBackend := NewRetryBackend(be, 10, nil)
|
||||
retryBackend := NewRetryBackend(be, 10, nil, nil)
|
||||
|
||||
data := test.Random(23, 5*1024*1024+11241)
|
||||
err := retryBackend.Save(context.TODO(), restic.Handle{}, restic.NewByteReader(data, be.Hasher()))
|
||||
|
@ -68,7 +70,7 @@ func TestBackendSaveRetryAtomic(t *testing.T) {
|
|||
HasAtomicReplaceFn: func() bool { return true },
|
||||
}
|
||||
|
||||
retryBackend := NewRetryBackend(be, 10, nil)
|
||||
retryBackend := NewRetryBackend(be, 10, nil, nil)
|
||||
|
||||
data := test.Random(23, 5*1024*1024+11241)
|
||||
err := retryBackend.Save(context.TODO(), restic.Handle{}, restic.NewByteReader(data, be.Hasher()))
|
||||
|
@ -101,7 +103,7 @@ func TestBackendListRetry(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
retryBackend := NewRetryBackend(be, 10, nil)
|
||||
retryBackend := NewRetryBackend(be, 10, nil, nil)
|
||||
|
||||
var listed []string
|
||||
err := retryBackend.List(context.TODO(), restic.PackFile, func(fi restic.FileInfo) error {
|
||||
|
@ -130,7 +132,7 @@ func TestBackendListRetryErrorFn(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
retryBackend := NewRetryBackend(be, 10, nil)
|
||||
retryBackend := NewRetryBackend(be, 10, nil, nil)
|
||||
|
||||
var ErrTest = errors.New("test error")
|
||||
|
||||
|
@ -186,7 +188,7 @@ func TestBackendListRetryErrorBackend(t *testing.T) {
|
|||
}
|
||||
|
||||
const maxRetries = 2
|
||||
retryBackend := NewRetryBackend(be, maxRetries, nil)
|
||||
retryBackend := NewRetryBackend(be, maxRetries, nil, nil)
|
||||
|
||||
var listed []string
|
||||
err := retryBackend.List(context.TODO(), restic.PackFile, func(fi restic.FileInfo) error {
|
||||
|
@ -255,7 +257,7 @@ func TestBackendLoadRetry(t *testing.T) {
|
|||
return failingReader{data: data, limit: limit}, nil
|
||||
}
|
||||
|
||||
retryBackend := NewRetryBackend(be, 10, nil)
|
||||
retryBackend := NewRetryBackend(be, 10, nil, nil)
|
||||
|
||||
var buf []byte
|
||||
err := retryBackend.Load(context.TODO(), restic.Handle{}, 0, 0, func(rd io.Reader) (err error) {
|
||||
|
@ -274,7 +276,7 @@ func assertIsCanceled(t *testing.T, err error) {
|
|||
func TestBackendCanceledContext(t *testing.T) {
|
||||
// unimplemented mock backend functions return an error by default
|
||||
// check that we received the expected context canceled error instead
|
||||
retryBackend := NewRetryBackend(mock.NewBackend(), 2, nil)
|
||||
retryBackend := NewRetryBackend(mock.NewBackend(), 2, nil, nil)
|
||||
h := restic.Handle{Type: restic.PackFile, Name: restic.NewRandomID().String()}
|
||||
|
||||
// create an already canceled context
|
||||
|
@ -301,3 +303,56 @@ func TestBackendCanceledContext(t *testing.T) {
|
|||
|
||||
// don't test "Delete" as it is not used by normal code
|
||||
}
|
||||
|
||||
func TestNotifyWithSuccessIsNotCalled(t *testing.T) {
|
||||
operation := func() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
notify := func(error, time.Duration) {
|
||||
t.Fatal("Notify should not have been called")
|
||||
}
|
||||
|
||||
success := func(retries int) {
|
||||
t.Fatal("Success should not have been called")
|
||||
}
|
||||
|
||||
err := retryNotifyErrorWithSuccess(operation, &backoff.ZeroBackOff{}, notify, success)
|
||||
if err != nil {
|
||||
t.Fatal("retry should not have returned an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotifyWithSuccessIsCalled(t *testing.T) {
|
||||
operationCalled := 0
|
||||
operation := func() error {
|
||||
operationCalled++
|
||||
if operationCalled <= 2 {
|
||||
return errors.New("expected error in test")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
notifyCalled := 0
|
||||
notify := func(error, time.Duration) {
|
||||
notifyCalled++
|
||||
}
|
||||
|
||||
successCalled := 0
|
||||
success := func(retries int) {
|
||||
successCalled++
|
||||
}
|
||||
|
||||
err := retryNotifyErrorWithSuccess(operation, &backoff.ZeroBackOff{}, notify, success)
|
||||
if err != nil {
|
||||
t.Fatal("retry should not have returned an error")
|
||||
}
|
||||
|
||||
if notifyCalled != 2 {
|
||||
t.Fatalf("Notify should have been called 2 times, but was called %d times instead", notifyCalled)
|
||||
}
|
||||
|
||||
if successCalled != 1 {
|
||||
t.Fatalf("Success should have been called only once, but was called %d times instead", successCalled)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue