forked from TrueCloudLab/restic
ui/backup: Replace channels with a mutex
The channel-based algorithm had grown quite complicated. This is easier to reason about and likely to be more performant with very many CompleteBlob calls.
This commit is contained in:
parent
201e5c7e74
commit
04216eb9aa
2 changed files with 181 additions and 128 deletions
|
@ -11,6 +11,8 @@ import (
|
|||
"github.com/restic/restic/internal/ui/signals"
|
||||
)
|
||||
|
||||
// A ProgressPrinter can print various progress messages.
|
||||
// It must be safe to call its methods from concurrent goroutines.
|
||||
type ProgressPrinter interface {
|
||||
Update(total, processed Counter, errors uint, currentFiles map[string]struct{}, start time.Time, secs uint64)
|
||||
Error(item string, err error) error
|
||||
|
@ -32,13 +34,7 @@ type Counter struct {
|
|||
Files, Dirs, Bytes uint64
|
||||
}
|
||||
|
||||
type fileWorkerMessage struct {
|
||||
filename string
|
||||
done bool
|
||||
}
|
||||
|
||||
type Summary struct {
|
||||
sync.Mutex
|
||||
Files, Dirs struct {
|
||||
New uint
|
||||
Changed uint
|
||||
|
@ -50,14 +46,18 @@ type Summary struct {
|
|||
|
||||
// Progress reports progress for the `backup` command.
|
||||
type Progress struct {
|
||||
mu sync.Mutex
|
||||
|
||||
interval time.Duration
|
||||
start time.Time
|
||||
|
||||
totalCh chan Counter
|
||||
processedCh chan Counter
|
||||
errCh chan struct{}
|
||||
workerCh chan fileWorkerMessage
|
||||
closed chan struct{}
|
||||
scanStarted, scanFinished bool
|
||||
|
||||
currentFiles map[string]struct{}
|
||||
processed, total Counter
|
||||
errors uint
|
||||
|
||||
closed chan struct{}
|
||||
|
||||
summary Summary
|
||||
printer ProgressPrinter
|
||||
|
@ -68,14 +68,8 @@ func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress {
|
|||
interval: interval,
|
||||
start: time.Now(),
|
||||
|
||||
// use buffered channels for the information used to update the status
|
||||
// the shutdown of the `Run()` method is somewhat racy, but won't affect
|
||||
// the final backup statistics
|
||||
totalCh: make(chan Counter, 100),
|
||||
processedCh: make(chan Counter, 100),
|
||||
errCh: make(chan struct{}),
|
||||
workerCh: make(chan fileWorkerMessage, 100),
|
||||
closed: make(chan struct{}),
|
||||
currentFiles: make(map[string]struct{}),
|
||||
closed: make(chan struct{}),
|
||||
|
||||
printer: printer,
|
||||
}
|
||||
|
@ -84,104 +78,82 @@ func NewProgress(printer ProgressPrinter, interval time.Duration) *Progress {
|
|||
// Run regularly updates the status lines. It should be called in a separate
|
||||
// goroutine.
|
||||
func (p *Progress) Run(ctx context.Context) {
|
||||
var (
|
||||
lastUpdate time.Time
|
||||
total, processed Counter
|
||||
errors uint
|
||||
started bool
|
||||
currentFiles = make(map[string]struct{})
|
||||
secondsRemaining uint64
|
||||
)
|
||||
|
||||
t := time.NewTicker(time.Second)
|
||||
signalsCh := signals.GetProgressChannel()
|
||||
defer t.Stop()
|
||||
defer close(p.closed)
|
||||
// Reset status when finished
|
||||
defer p.printer.Reset()
|
||||
|
||||
var tick <-chan time.Time
|
||||
if p.interval != 0 {
|
||||
t := time.NewTicker(p.interval)
|
||||
defer t.Stop()
|
||||
tick = t.C
|
||||
}
|
||||
|
||||
signalsCh := signals.GetProgressChannel()
|
||||
|
||||
for {
|
||||
forceUpdate := false
|
||||
var now time.Time
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case t, ok := <-p.totalCh:
|
||||
if ok {
|
||||
total = t
|
||||
started = true
|
||||
} else {
|
||||
// scan has finished
|
||||
p.totalCh = nil
|
||||
}
|
||||
case s := <-p.processedCh:
|
||||
processed.Files += s.Files
|
||||
processed.Dirs += s.Dirs
|
||||
processed.Bytes += s.Bytes
|
||||
started = true
|
||||
case <-p.errCh:
|
||||
errors++
|
||||
started = true
|
||||
case m := <-p.workerCh:
|
||||
if m.done {
|
||||
delete(currentFiles, m.filename)
|
||||
} else {
|
||||
currentFiles[m.filename] = struct{}{}
|
||||
}
|
||||
case <-t.C:
|
||||
if !started {
|
||||
continue
|
||||
}
|
||||
|
||||
if p.totalCh == nil {
|
||||
secs := float64(time.Since(p.start) / time.Second)
|
||||
todo := float64(total.Bytes - processed.Bytes)
|
||||
secondsRemaining = uint64(secs / float64(processed.Bytes) * todo)
|
||||
}
|
||||
case now = <-tick:
|
||||
case <-signalsCh:
|
||||
forceUpdate = true
|
||||
now = time.Now()
|
||||
}
|
||||
|
||||
// limit update frequency
|
||||
if !forceUpdate && (p.interval == 0 || time.Since(lastUpdate) < p.interval) {
|
||||
p.mu.Lock()
|
||||
if p.scanStarted {
|
||||
p.mu.Unlock()
|
||||
continue
|
||||
}
|
||||
lastUpdate = time.Now()
|
||||
|
||||
p.printer.Update(total, processed, errors, currentFiles, p.start, secondsRemaining)
|
||||
var secondsRemaining uint64
|
||||
if p.scanFinished {
|
||||
secs := float64(now.Sub(p.start) / time.Second)
|
||||
todo := float64(p.total.Bytes - p.processed.Bytes)
|
||||
secondsRemaining = uint64(secs / float64(p.processed.Bytes) * todo)
|
||||
}
|
||||
|
||||
p.printer.Update(p.total, p.processed, p.errors, p.currentFiles, p.start, secondsRemaining)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Error is the error callback function for the archiver, it prints the error and returns nil.
|
||||
func (p *Progress) Error(item string, err error) error {
|
||||
cbErr := p.printer.Error(item, err)
|
||||
p.mu.Lock()
|
||||
p.errors++
|
||||
p.scanStarted = true
|
||||
p.mu.Unlock()
|
||||
|
||||
select {
|
||||
case p.errCh <- struct{}{}:
|
||||
case <-p.closed:
|
||||
}
|
||||
return cbErr
|
||||
return p.printer.Error(item, err)
|
||||
}
|
||||
|
||||
// StartFile is called when a file is being processed by a worker.
|
||||
func (p *Progress) StartFile(filename string) {
|
||||
select {
|
||||
case p.workerCh <- fileWorkerMessage{filename: filename}:
|
||||
case <-p.closed:
|
||||
}
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.currentFiles[filename] = struct{}{}
|
||||
}
|
||||
|
||||
func (p *Progress) addProcessed(c Counter) {
|
||||
p.processed.Files += c.Files
|
||||
p.processed.Dirs += c.Dirs
|
||||
p.processed.Bytes += c.Bytes
|
||||
p.scanStarted = true
|
||||
}
|
||||
|
||||
// CompleteBlob is called for all saved blobs for files.
|
||||
func (p *Progress) CompleteBlob(bytes uint64) {
|
||||
select {
|
||||
case p.processedCh <- Counter{Bytes: bytes}:
|
||||
case <-p.closed:
|
||||
}
|
||||
p.mu.Lock()
|
||||
p.addProcessed(Counter{Bytes: bytes})
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// CompleteItem is the status callback function for the archiver when a
|
||||
// file/dir has been saved successfully.
|
||||
func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) {
|
||||
p.summary.Lock()
|
||||
p.mu.Lock()
|
||||
p.summary.ItemStats.Add(s)
|
||||
|
||||
// for the last item "/", current is nil
|
||||
|
@ -189,86 +161,80 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a
|
|||
p.summary.ProcessedBytes += current.Size
|
||||
}
|
||||
|
||||
p.summary.Unlock()
|
||||
p.mu.Unlock()
|
||||
|
||||
if current == nil {
|
||||
// error occurred, tell the status display to remove the line
|
||||
select {
|
||||
case p.workerCh <- fileWorkerMessage{filename: item, done: true}:
|
||||
case <-p.closed:
|
||||
}
|
||||
p.mu.Lock()
|
||||
delete(p.currentFiles, item)
|
||||
p.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
switch current.Type {
|
||||
case "dir":
|
||||
select {
|
||||
case p.processedCh <- Counter{Dirs: 1}:
|
||||
case <-p.closed:
|
||||
}
|
||||
p.mu.Lock()
|
||||
p.addProcessed(Counter{Dirs: 1})
|
||||
p.mu.Unlock()
|
||||
|
||||
if previous == nil {
|
||||
switch {
|
||||
case previous == nil:
|
||||
p.printer.CompleteItem("dir new", item, previous, current, s, d)
|
||||
p.summary.Lock()
|
||||
p.mu.Lock()
|
||||
p.summary.Dirs.New++
|
||||
p.summary.Unlock()
|
||||
return
|
||||
}
|
||||
p.mu.Unlock()
|
||||
|
||||
if previous.Equals(*current) {
|
||||
case previous.Equals(*current):
|
||||
p.printer.CompleteItem("dir unchanged", item, previous, current, s, d)
|
||||
p.summary.Lock()
|
||||
p.mu.Lock()
|
||||
p.summary.Dirs.Unchanged++
|
||||
p.summary.Unlock()
|
||||
} else {
|
||||
p.mu.Unlock()
|
||||
|
||||
default:
|
||||
p.printer.CompleteItem("dir modified", item, previous, current, s, d)
|
||||
p.summary.Lock()
|
||||
p.mu.Lock()
|
||||
p.summary.Dirs.Changed++
|
||||
p.summary.Unlock()
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
case "file":
|
||||
select {
|
||||
case p.processedCh <- Counter{Files: 1}:
|
||||
case <-p.closed:
|
||||
}
|
||||
select {
|
||||
case p.workerCh <- fileWorkerMessage{filename: item, done: true}:
|
||||
case <-p.closed:
|
||||
}
|
||||
p.mu.Lock()
|
||||
p.addProcessed(Counter{Files: 1})
|
||||
delete(p.currentFiles, item)
|
||||
p.mu.Unlock()
|
||||
|
||||
if previous == nil {
|
||||
switch {
|
||||
case previous == nil:
|
||||
p.printer.CompleteItem("file new", item, previous, current, s, d)
|
||||
p.summary.Lock()
|
||||
p.mu.Lock()
|
||||
p.summary.Files.New++
|
||||
p.summary.Unlock()
|
||||
return
|
||||
}
|
||||
p.mu.Unlock()
|
||||
|
||||
if previous.Equals(*current) {
|
||||
case previous.Equals(*current):
|
||||
p.printer.CompleteItem("file unchanged", item, previous, current, s, d)
|
||||
p.summary.Lock()
|
||||
p.mu.Lock()
|
||||
p.summary.Files.Unchanged++
|
||||
p.summary.Unlock()
|
||||
} else {
|
||||
p.mu.Unlock()
|
||||
|
||||
default:
|
||||
p.printer.CompleteItem("file modified", item, previous, current, s, d)
|
||||
p.summary.Lock()
|
||||
p.mu.Lock()
|
||||
p.summary.Files.Changed++
|
||||
p.summary.Unlock()
|
||||
p.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ReportTotal sets the total stats up to now
|
||||
func (p *Progress) ReportTotal(item string, s archiver.ScanStats) {
|
||||
select {
|
||||
case p.totalCh <- Counter{Files: uint64(s.Files), Dirs: uint64(s.Dirs), Bytes: s.Bytes}:
|
||||
case <-p.closed:
|
||||
}
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
p.total = Counter{Files: uint64(s.Files), Dirs: uint64(s.Dirs), Bytes: s.Bytes}
|
||||
|
||||
if item == "" {
|
||||
p.printer.ReportTotal(item, p.start, s)
|
||||
close(p.totalCh)
|
||||
p.scanStarted = true
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
87
internal/ui/backup/progress_test.go
Normal file
87
internal/ui/backup/progress_test.go
Normal file
|
@ -0,0 +1,87 @@
|
|||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/archiver"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
type mockPrinter struct {
|
||||
sync.Mutex
|
||||
dirUnchanged, fileNew bool
|
||||
id restic.ID
|
||||
}
|
||||
|
||||
func (p *mockPrinter) Update(total, processed Counter, errors uint, currentFiles map[string]struct{}, start time.Time, secs uint64) {
|
||||
}
|
||||
func (p *mockPrinter) Error(item string, err error) error { return err }
|
||||
func (p *mockPrinter) ScannerError(item string, err error) error { return err }
|
||||
|
||||
func (p *mockPrinter) CompleteItem(messageType string, item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
switch messageType {
|
||||
case "dir unchanged":
|
||||
p.dirUnchanged = true
|
||||
case "file new":
|
||||
p.fileNew = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *mockPrinter) ReportTotal(_ string, _ time.Time, _ archiver.ScanStats) {}
|
||||
func (p *mockPrinter) Finish(id restic.ID, _ time.Time, summary *Summary, dryRun bool) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
_ = *summary // Should not be nil.
|
||||
p.id = id
|
||||
}
|
||||
|
||||
func (p *mockPrinter) Reset() {}
|
||||
|
||||
func (p *mockPrinter) Stdout() io.WriteCloser { return nil }
|
||||
func (p *mockPrinter) Stderr() io.WriteCloser { return nil }
|
||||
|
||||
func (p *mockPrinter) P(msg string, args ...interface{}) {}
|
||||
func (p *mockPrinter) V(msg string, args ...interface{}) {}
|
||||
|
||||
func TestProgress(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
prnt := &mockPrinter{}
|
||||
prog := NewProgress(prnt, time.Millisecond)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go prog.Run(ctx)
|
||||
|
||||
prog.StartFile("foo")
|
||||
prog.CompleteBlob(1024)
|
||||
|
||||
// "dir unchanged"
|
||||
node := restic.Node{Type: "dir"}
|
||||
prog.CompleteItem("foo", &node, &node, archiver.ItemStats{}, 0)
|
||||
// "file new"
|
||||
node.Type = "file"
|
||||
prog.CompleteItem("foo", nil, &node, archiver.ItemStats{}, 0)
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
cancel()
|
||||
id := restic.NewRandomID()
|
||||
prog.Finish(id, false)
|
||||
|
||||
if !prnt.dirUnchanged {
|
||||
t.Error(`"dir unchanged" event not seen`)
|
||||
}
|
||||
if !prnt.fileNew {
|
||||
t.Error(`"file new" event not seen`)
|
||||
}
|
||||
if prnt.id != id {
|
||||
t.Errorf("id not stored (has %v)", prnt.id)
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue