Address review comments

This commit is contained in:
Michael Eischer 2023-06-08 20:24:21 +02:00
parent 0372c7ef04
commit 0b908bb1fb
4 changed files with 42 additions and 67 deletions

View file

@ -1,13 +1,6 @@
# The first line must start with Bugfix:, Enhancement: or Change:,
# including the colon. Use present use. Remove lines starting with '#'
# from this template.
Enhancement: Improve the ETA displayed during backup Enhancement: Improve the ETA displayed during backup
# Describe the problem in the past tense, the new behavior in the present Restic's `backup` command displayed an ETA that did not adapt when the rate
# tense. Mention the affected commands, backends, operating systems, etc.
# Focus on user-facing behavior, not the implementation.
Restic's backup command displayed an ETA that did not adapt when the rate
of progress made during the backup changed during the course of the of progress made during the backup changed during the course of the
backup. Restic now uses recent progress when computing the ETA. It is backup. Restic now uses recent progress when computing the ETA. It is
important to realize that the estimate may still be wrong, because restic important to realize that the estimate may still be wrong, because restic
@ -15,3 +8,4 @@ cannot predict the future, but the hope is that the ETA will be more
accurate in most cases. accurate in most cases.
https://github.com/restic/restic/issues/3397 https://github.com/restic/restic/issues/3397
https://github.com/restic/restic/pull/3563

View file

@ -76,7 +76,8 @@ func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress {
var secondsRemaining uint64 var secondsRemaining uint64
if p.scanFinished { if p.scanFinished {
rate := p.estimator.rate(time.Now()) rate := p.estimator.rate(time.Now())
if rate <= 0 { tooSlowCutoff := 1024.
if rate <= tooSlowCutoff {
secondsRemaining = 0 secondsRemaining = 0
} else { } else {
todo := float64(p.total.Bytes - p.processed.Bytes) todo := float64(p.total.Bytes - p.processed.Bytes)

View file

@ -18,7 +18,7 @@ type rateEstimator struct {
totalBytes uint64 totalBytes uint64
} }
// newRateEstimator returns an esimator initialized to a presumed start time. // newRateEstimator returns an estimator initialized to a presumed start time.
func newRateEstimator(start time.Time) *rateEstimator { func newRateEstimator(start time.Time) *rateEstimator {
return &rateEstimator{buckets: list.New(), start: start} return &rateEstimator{buckets: list.New(), start: start}
} }

View file

@ -1,9 +1,12 @@
package backup package backup
import ( import (
"fmt"
"math" "math"
"testing" "testing"
"time" "time"
rtest "github.com/restic/restic/internal/test"
) )
const float64EqualityThreshold = 1e-6 const float64EqualityThreshold = 1e-6
@ -19,17 +22,13 @@ func TestEstimatorDefault(t *testing.T) {
var start time.Time var start time.Time
e := newRateEstimator(start) e := newRateEstimator(start)
r := e.rate(start) r := e.rate(start)
if math.IsNaN(r) || r != 0 { rtest.Assert(t, r == 0, "e.Rate == %v, want zero", r)
t.Fatalf("e.Rate == %v, want zero", r)
}
r = e.rate(start.Add(time.Hour)) r = e.rate(start.Add(time.Hour))
if math.IsNaN(r) || r != 0 { rtest.Assert(t, r == 0, "e.Rate == %v, want zero", r)
t.Fatalf("e.Rate == %v, want zero", r)
}
} }
func TestEstimatorSimple(t *testing.T) { func TestEstimatorSimple(t *testing.T) {
var when time.Time var start time.Time
type testcase struct { type testcase struct {
bytes uint64 bytes uint64
when time.Duration when time.Duration
@ -43,12 +42,13 @@ func TestEstimatorSimple(t *testing.T) {
{60, time.Minute, 1}, {60, time.Minute, 1},
} }
for _, c := range cases { for _, c := range cases {
e := newRateEstimator(when) name := fmt.Sprintf("%+v", c)
e.recordBytes(when.Add(time.Second), c.bytes) t.Run(name, func(t *testing.T) {
rate := e.rate(when.Add(c.when)) e := newRateEstimator(start)
if !almostEqual(rate, c.rate) { e.recordBytes(start.Add(time.Second), c.bytes)
t.Fatalf("e.Rate == %v, want %v (testcase %+v)", rate, c.rate, c) rate := e.rate(start.Add(c.when))
} rtest.Assert(t, almostEqual(rate, c.rate), "e.Rate == %v, want %v", rate, c.rate)
})
} }
} }
@ -60,42 +60,27 @@ func TestBucketWidth(t *testing.T) {
e := newRateEstimator(when) e := newRateEstimator(when)
e.recordBytes(when, 1) e.recordBytes(when, 1)
e.recordBytes(when.Add(bucketWidth-time.Nanosecond), 1) e.recordBytes(when.Add(bucketWidth-time.Nanosecond), 1)
if e.buckets.Len() != 1 { rtest.Assert(t, e.buckets.Len() == 1, "e.buckets.Len() is %d, want 1", e.buckets.Len())
t.Fatalf("e.buckets.Len() is %d, want 1", e.buckets.Len())
}
b := e.buckets.Back().Value.(*rateBucket) b := e.buckets.Back().Value.(*rateBucket)
if b.totalBytes != 2 { rtest.Assert(t, b.totalBytes == 2, "b.totalBytes is %d, want 2", b.totalBytes)
t.Fatalf("b.totalBytes is %d, want 2", b.totalBytes) rtest.Assert(t, b.end == when.Add(bucketWidth), "b.end is %v, want %v", b.end, when.Add(bucketWidth))
}
if b.end != when.Add(bucketWidth) {
t.Fatalf("b.end is %v, want %v", b.end, when.Add(bucketWidth))
}
// Recording a byte outside the bucket width causes another bucket. // Recording a byte outside the bucket width causes another bucket.
e.recordBytes(when.Add(bucketWidth), 1) e.recordBytes(when.Add(bucketWidth), 1)
if e.buckets.Len() != 2 { rtest.Assert(t, e.buckets.Len() == 2, "e.buckets.Len() is %d, want 2", e.buckets.Len())
t.Fatalf("e.buckets.Len() is %d, want 2", e.buckets.Len())
}
b = e.buckets.Back().Value.(*rateBucket) b = e.buckets.Back().Value.(*rateBucket)
if b.totalBytes != 1 { rtest.Assert(t, b.totalBytes == 1, "b.totalBytes is %d, want 1", b.totalBytes)
t.Fatalf("b.totalBytes is %d, want 1", b.totalBytes) rtest.Assert(t, b.end == when.Add(2*bucketWidth), "b.end is %v, want %v", b.end, when.Add(bucketWidth))
}
if b.end != when.Add(2*bucketWidth) {
t.Fatalf("b.end is %v, want %v", b.end, when.Add(bucketWidth))
}
// Recording a byte after a longer delay creates a sparse bucket list. // Recording a byte after a longer delay creates a sparse bucket list.
e.recordBytes(when.Add(time.Hour+time.Millisecond), 7) e.recordBytes(when.Add(time.Hour+time.Millisecond), 7)
if e.buckets.Len() != 3 { rtest.Assert(t, e.buckets.Len() == 3, "e.buckets.Len() is %d, want 3", e.buckets.Len())
t.Fatalf("e.buckets.Len() is %d, want 3", e.buckets.Len())
}
b = e.buckets.Back().Value.(*rateBucket) b = e.buckets.Back().Value.(*rateBucket)
if b.totalBytes != 7 { rtest.Assert(t, b.totalBytes == 7, "b.totalBytes is %d, want 7", b.totalBytes)
t.Fatalf("b.totalBytes is %d, want 7", b.totalBytes) rtest.Equals(t, when.Add(time.Hour+time.Millisecond+time.Second), b.end)
}
if b.end != when.Add(time.Hour+time.Millisecond+time.Second) {
t.Fatalf("b.end is %v, want %v", b.end, when.Add(time.Hour+time.Millisecond+time.Second))
}
} }
type chunk struct { type chunk struct {
@ -103,17 +88,12 @@ type chunk struct {
bytes uint64 // byte count (every second) bytes uint64 // byte count (every second)
} }
func applyChunk(c chunk, t time.Time, e *rateEstimator) time.Time {
for i := uint64(0); i < c.repetitions; i++ {
e.recordBytes(t, c.bytes)
t = t.Add(time.Second)
}
return t
}
func applyChunks(chunks []chunk, t time.Time, e *rateEstimator) time.Time { func applyChunks(chunks []chunk, t time.Time, e *rateEstimator) time.Time {
for _, c := range chunks { for _, c := range chunks {
t = applyChunk(c, t, e) for i := uint64(0); i < c.repetitions; i++ {
e.recordBytes(t, c.bytes)
t = t.Add(time.Second)
}
} }
return t return t
} }
@ -221,13 +201,13 @@ func TestEstimatorResponsiveness(t *testing.T) {
}, },
} }
for i, c := range cases { for _, c := range cases {
var w time.Time t.Run(c.description, func(t *testing.T) {
e := newRateEstimator(w) var w time.Time
w = applyChunks(c.chunks, w, e) e := newRateEstimator(w)
r := e.rate(w) w = applyChunks(c.chunks, w, e)
if !almostEqual(r, c.rate) { r := e.rate(w)
t.Fatalf("e.Rate == %f, want %f (testcase %d %+v)", r, c.rate, i, c) rtest.Assert(t, almostEqual(r, c.rate), "e.Rate == %f, want %f", r, c.rate)
} })
} }
} }