Merge pull request #2630 from MichaelEischer/fix-staticcheck
Fix lots of small issues reported by staticcheck
This commit is contained in:
commit
0c48e515f0
31 changed files with 74 additions and 106 deletions
|
@ -196,7 +196,7 @@ func uniqueNodeNames(tree1, tree2 *restic.Tree) (tree1Nodes, tree2Nodes map[stri
|
|||
uniqueNames = append(uniqueNames, name)
|
||||
}
|
||||
|
||||
sort.Sort(sort.StringSlice(uniqueNames))
|
||||
sort.Strings(uniqueNames)
|
||||
return tree1Nodes, tree2Nodes, uniqueNames
|
||||
}
|
||||
|
||||
|
|
|
@ -270,7 +270,7 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error
|
|||
|
||||
Printf("Unable to load tree %s\n ... which belongs to snapshot %s.\n", parentTreeID, sn.ID())
|
||||
|
||||
return false, walker.SkipNode
|
||||
return false, walker.ErrSkipNode
|
||||
}
|
||||
|
||||
if node == nil {
|
||||
|
@ -314,7 +314,7 @@ func (f *Finder) findInSnapshot(ctx context.Context, sn *restic.Snapshot) error
|
|||
|
||||
if !childMayMatch {
|
||||
ignoreIfNoMatch = true
|
||||
errIfNoMatch = walker.SkipNode
|
||||
errIfNoMatch = walker.ErrSkipNode
|
||||
} else {
|
||||
ignoreIfNoMatch = false
|
||||
}
|
||||
|
@ -354,7 +354,7 @@ func (f *Finder) findIDs(ctx context.Context, sn *restic.Snapshot) error {
|
|||
|
||||
Printf("Unable to load tree %s\n ... which belongs to snapshot %s.\n", parentTreeID, sn.ID())
|
||||
|
||||
return false, walker.SkipNode
|
||||
return false, walker.ErrSkipNode
|
||||
}
|
||||
|
||||
if node == nil {
|
||||
|
|
|
@ -222,7 +222,7 @@ func runLs(opts LsOptions, gopts GlobalOptions, args []string) error {
|
|||
// otherwise, signal the walker to not walk recursively into any
|
||||
// subdirs
|
||||
if node.Type == "dir" {
|
||||
return false, walker.SkipNode
|
||||
return false, walker.ErrSkipNode
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
|
|
@ -251,9 +251,8 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke
|
|||
// Prints nothing, if we did not group at all.
|
||||
func PrintSnapshotGroupHeader(stdout io.Writer, groupKeyJSON string) error {
|
||||
var key restic.SnapshotGroupKey
|
||||
var err error
|
||||
|
||||
err = json.Unmarshal([]byte(groupKeyJSON), &key)
|
||||
err := json.Unmarshal([]byte(groupKeyJSON), &key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ func DeleteFiles(gopts GlobalOptions, repo restic.Repository, fileList restic.ID
|
|||
deleteFiles(gopts, true, repo, fileList, fileType)
|
||||
}
|
||||
|
||||
// DeleteFiles deletes the given fileList of fileType in parallel
|
||||
// DeleteFilesChecked deletes the given fileList of fileType in parallel
|
||||
// if an error occurs, it will cancel and return this error
|
||||
func DeleteFilesChecked(gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error {
|
||||
return deleteFiles(gopts, false, repo, fileList, fileType)
|
||||
|
|
|
@ -190,7 +190,7 @@ func isDirExcludedByFile(dir, tagFilename, header string) bool {
|
|||
Warnf("could not read signature from exclusion tagfile %q: %v\n", tf, err)
|
||||
return false
|
||||
}
|
||||
if bytes.Compare(buf, []byte(header)) != 0 {
|
||||
if !bytes.Equal(buf, []byte(header)) {
|
||||
Warnf("invalid signature in exclusion tagfile %q\n", tf)
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ func walkDir(dir string) <-chan *dirEntry {
|
|||
}()
|
||||
|
||||
// first element is root
|
||||
_ = <-ch
|
||||
<-ch
|
||||
|
||||
return ch
|
||||
}
|
||||
|
|
|
@ -151,7 +151,7 @@ func testRunCheckOutput(gopts GlobalOptions) (string, error) {
|
|||
}
|
||||
|
||||
err := runCheck(opts, gopts, nil)
|
||||
return string(buf.Bytes()), err
|
||||
return buf.String(), err
|
||||
}
|
||||
|
||||
func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) {
|
||||
|
@ -177,7 +177,7 @@ func testRunLs(t testing.TB, gopts GlobalOptions, snapshotID string) []string {
|
|||
|
||||
rtest.OK(t, runLs(opts, gopts, []string{snapshotID}))
|
||||
|
||||
return strings.Split(string(buf.Bytes()), "\n")
|
||||
return strings.Split(buf.String(), "\n")
|
||||
}
|
||||
|
||||
func testRunFind(t testing.TB, wantJSON bool, gopts GlobalOptions, pattern string) []byte {
|
||||
|
@ -253,7 +253,6 @@ func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) {
|
|||
"Expected 1 snapshot to be kept, got %v", len(forgets[0].Keep))
|
||||
rtest.Assert(t, len(forgets[0].Remove) == 2,
|
||||
"Expected 2 snapshots to be removed, got %v", len(forgets[0].Remove))
|
||||
return
|
||||
}
|
||||
|
||||
func testRunPrune(t testing.TB, gopts GlobalOptions) {
|
||||
|
@ -450,7 +449,7 @@ func TestBackupExclude(t *testing.T) {
|
|||
f, err := os.Create(fp)
|
||||
rtest.OK(t, err)
|
||||
|
||||
fmt.Fprintf(f, filename)
|
||||
fmt.Fprint(f, filename)
|
||||
rtest.OK(t, f.Close())
|
||||
}
|
||||
|
||||
|
@ -1105,14 +1104,14 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) {
|
|||
testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID, []string{"*.ext"})
|
||||
|
||||
f1 := filepath.Join(env.base, "restore0", "testdata", "subdir1", "subdir2")
|
||||
fi, err := os.Stat(f1)
|
||||
_, err := os.Stat(f1)
|
||||
rtest.OK(t, err)
|
||||
|
||||
// restore with filter "*", this should restore meta data on everything.
|
||||
testRunRestoreIncludes(t, env.gopts, filepath.Join(env.base, "restore1"), snapshotID, []string{"*"})
|
||||
|
||||
f2 := filepath.Join(env.base, "restore1", "testdata", "subdir1", "subdir2")
|
||||
fi, err = os.Stat(f2)
|
||||
fi, err := os.Stat(f2)
|
||||
rtest.OK(t, err)
|
||||
|
||||
rtest.Assert(t, fi.ModTime() == time.Unix(0, 0),
|
||||
|
@ -1417,11 +1416,7 @@ func linksEqual(source, dest map[uint64][]string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
if len(dest) != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return len(dest) == 0
|
||||
}
|
||||
|
||||
func linkEqual(source, dest []string) bool {
|
||||
|
|
|
@ -414,12 +414,12 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous
|
|||
|
||||
_ = file.Close()
|
||||
return fn, false, nil
|
||||
} else {
|
||||
debug.Log("%v hasn't changed, but contents are missing!", target)
|
||||
// There are contents missing - inform user!
|
||||
err := errors.Errorf("parts of %v not found in the repository index; storing the file again", target)
|
||||
arch.error(abstarget, fi, err)
|
||||
}
|
||||
|
||||
debug.Log("%v hasn't changed, but contents are missing!", target)
|
||||
// There are contents missing - inform user!
|
||||
err := errors.Errorf("parts of %v not found in the repository index; storing the file again", target)
|
||||
arch.error(abstarget, fi, err)
|
||||
}
|
||||
|
||||
fn.isFile = true
|
||||
|
|
|
@ -832,7 +832,7 @@ func TestArchiverSaveDir(t *testing.T) {
|
|||
if stats.DataBlobs != 0 {
|
||||
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
|
||||
}
|
||||
if stats.TreeSize <= 0 {
|
||||
if stats.TreeSize == 0 {
|
||||
t.Errorf("wrong stats returned in TreeSize, want > 0, got %d", stats.TreeSize)
|
||||
}
|
||||
if stats.TreeBlobs <= 0 {
|
||||
|
@ -910,7 +910,7 @@ func TestArchiverSaveDirIncremental(t *testing.T) {
|
|||
if stats.DataBlobs != 0 {
|
||||
t.Errorf("wrong stats returned in DataBlobs, want 0, got %d", stats.DataBlobs)
|
||||
}
|
||||
if stats.TreeSize <= 0 {
|
||||
if stats.TreeSize == 0 {
|
||||
t.Errorf("wrong stats returned in TreeSize, want > 0, got %d", stats.TreeSize)
|
||||
}
|
||||
if stats.TreeBlobs <= 0 {
|
||||
|
@ -1458,10 +1458,7 @@ func TestArchiverSnapshotSelect(t *testing.T) {
|
|||
"other": TestFile{Content: "another file"},
|
||||
},
|
||||
selFn: func(item string, fi os.FileInfo) bool {
|
||||
if filepath.Ext(item) == ".txt" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return filepath.Ext(item) != ".txt"
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -1485,10 +1482,7 @@ func TestArchiverSnapshotSelect(t *testing.T) {
|
|||
"other": TestFile{Content: "another file"},
|
||||
},
|
||||
selFn: func(item string, fi os.FileInfo) bool {
|
||||
if filepath.Base(item) == "subdir" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return filepath.Base(item) != "subdir"
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -1990,13 +1984,6 @@ func snapshot(t testing.TB, repo restic.Repository, fs fs.FS, parent restic.ID,
|
|||
return snapshotID, node
|
||||
}
|
||||
|
||||
func chmod(t testing.TB, filename string, mode os.FileMode) {
|
||||
err := os.Chmod(filename, mode)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// StatFS allows overwriting what is returned by the Lstat function.
|
||||
type StatFS struct {
|
||||
fs.FS
|
||||
|
|
|
@ -92,12 +92,9 @@ func TestBlobSaverError(t *testing.T) {
|
|||
|
||||
b := NewBlobSaver(ctx, tmb, saver, uint(runtime.NumCPU()))
|
||||
|
||||
var results []FutureBlob
|
||||
|
||||
for i := 0; i < test.blobs; i++ {
|
||||
buf := &Buffer{Data: []byte(fmt.Sprintf("foo%d", i))}
|
||||
fb := b.Save(ctx, restic.DataBlob, buf)
|
||||
results = append(results, fb)
|
||||
b.Save(ctx, restic.DataBlob, buf)
|
||||
}
|
||||
|
||||
tmb.Kill(nil)
|
||||
|
|
|
@ -125,7 +125,6 @@ func TestTestCreateFiles(t *testing.T) {
|
|||
if _, ok := item.(TestSymlink); ok {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
targetPath := filepath.Join(tempdir, filepath.FromSlash(name))
|
||||
|
|
|
@ -2,8 +2,9 @@ package backend
|
|||
|
||||
import (
|
||||
"context"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"io"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
)
|
||||
|
||||
// Semaphore limits access to a restricted resource.
|
||||
|
@ -13,7 +14,7 @@ type Semaphore struct {
|
|||
|
||||
// NewSemaphore returns a new semaphore with capacity n.
|
||||
func NewSemaphore(n uint) (*Semaphore, error) {
|
||||
if n <= 0 {
|
||||
if n == 0 {
|
||||
return nil, errors.New("must be a positive number")
|
||||
}
|
||||
return &Semaphore{
|
||||
|
|
|
@ -142,7 +142,7 @@ func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) {
|
|||
|
||||
// run workers on ch
|
||||
wg.Go(func() error {
|
||||
return repository.RunWorkers(ctx, defaultParallelism, worker, final)
|
||||
return repository.RunWorkers(defaultParallelism, worker, final)
|
||||
})
|
||||
|
||||
// receive decoded indexes
|
||||
|
|
|
@ -231,10 +231,6 @@ func (k *EncryptionKey) Valid() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// ErrInvalidCiphertext is returned when trying to encrypt into the slice that
|
||||
// holds the plaintext.
|
||||
var ErrInvalidCiphertext = errors.New("invalid ciphertext, same slice used for plaintext")
|
||||
|
||||
// validNonce checks that nonce is not all zero.
|
||||
func validNonce(nonce []byte) bool {
|
||||
var sum byte
|
||||
|
|
|
@ -125,8 +125,7 @@ func (d *dir) Attr(ctx context.Context, a *fuse.Attr) error {
|
|||
func (d *dir) calcNumberOfLinks() uint32 {
|
||||
// a directory d has 2 hardlinks + the number
|
||||
// of directories contained by d
|
||||
var count uint32
|
||||
count = 2
|
||||
count := uint32(2)
|
||||
for _, node := range d.items {
|
||||
if node.Type == "dir" {
|
||||
count++
|
||||
|
|
|
@ -107,7 +107,7 @@ var IndexFull = func(idx *Index) bool {
|
|||
for typ := range idx.byType {
|
||||
blobs += idx.byType[typ].len()
|
||||
}
|
||||
age := time.Now().Sub(idx.created)
|
||||
age := time.Since(idx.created)
|
||||
|
||||
switch {
|
||||
case age >= indexMaxAge:
|
||||
|
@ -394,7 +394,7 @@ func (idx *Index) Finalize() {
|
|||
idx.packIDToIndex = nil
|
||||
}
|
||||
|
||||
// ID returns the IDs of the index, if available. If the index is not yet
|
||||
// IDs returns the IDs of the index, if available. If the index is not yet
|
||||
// finalized, an error is returned.
|
||||
func (idx *Index) IDs() (restic.IDs, error) {
|
||||
idx.m.Lock()
|
||||
|
@ -472,7 +472,7 @@ func (idx *Index) merge(idx2 *Index) error {
|
|||
defer idx2.m.Unlock()
|
||||
|
||||
if !idx2.final {
|
||||
return errors.New("index to merge is not final!")
|
||||
return errors.New("index to merge is not final")
|
||||
}
|
||||
|
||||
packlen := len(idx.packs)
|
||||
|
|
|
@ -134,7 +134,7 @@ func (mi *MasterIndex) Insert(idx *Index) {
|
|||
mi.idx = append(mi.idx, idx)
|
||||
}
|
||||
|
||||
// Store remembers the id and pack in the index.
|
||||
// StorePack remembers the id and pack in the index.
|
||||
func (mi *MasterIndex) StorePack(id restic.ID, blobs []restic.Blob) {
|
||||
mi.idxMutex.Lock()
|
||||
defer mi.idxMutex.Unlock()
|
||||
|
|
|
@ -120,7 +120,7 @@ func TestMasterIndex(t *testing.T) {
|
|||
rtest.Assert(t, !found, "Expected no blobs when fetching with a random id")
|
||||
blobs = mIdx.Lookup(restic.NewRandomID(), restic.DataBlob)
|
||||
rtest.Assert(t, blobs == nil, "Expected no blobs when fetching with a random id")
|
||||
size, found = mIdx.LookupSize(restic.NewRandomID(), restic.DataBlob)
|
||||
_, found = mIdx.LookupSize(restic.NewRandomID(), restic.DataBlob)
|
||||
rtest.Assert(t, !found, "Expected no blobs when fetching with a random id")
|
||||
|
||||
// Test Count
|
||||
|
@ -172,7 +172,7 @@ func TestMasterMergeFinalIndexes(t *testing.T) {
|
|||
rtest.Equals(t, 1, len(allIndexes))
|
||||
|
||||
blobCount := 0
|
||||
for _ = range mIdx.Each(context.TODO()) {
|
||||
for range mIdx.Each(context.TODO()) {
|
||||
blobCount++
|
||||
}
|
||||
rtest.Equals(t, 2, blobCount)
|
||||
|
@ -207,7 +207,7 @@ func TestMasterMergeFinalIndexes(t *testing.T) {
|
|||
rtest.Equals(t, []restic.PackedBlob{blob2}, blobs)
|
||||
|
||||
blobCount = 0
|
||||
for _ = range mIdx.Each(context.TODO()) {
|
||||
for range mIdx.Each(context.TODO()) {
|
||||
blobCount++
|
||||
}
|
||||
rtest.Equals(t, 2, blobCount)
|
||||
|
|
|
@ -47,6 +47,8 @@ func New(be restic.Backend) *Repository {
|
|||
return repo
|
||||
}
|
||||
|
||||
// DisableAutoIndexUpdate deactives the automatic finalization and upload of new
|
||||
// indexes once these are full
|
||||
func (r *Repository) DisableAutoIndexUpdate() {
|
||||
r.noAutoIndexUpdate = true
|
||||
}
|
||||
|
@ -479,7 +481,7 @@ func (r *Repository) LoadIndex(ctx context.Context) error {
|
|||
|
||||
// run workers on ch
|
||||
wg.Go(func() error {
|
||||
return RunWorkers(ctx, loadIndexParallelism, worker, final)
|
||||
return RunWorkers(loadIndexParallelism, worker, final)
|
||||
})
|
||||
|
||||
// receive decoded indexes
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
|
@ -10,8 +8,8 @@ import (
|
|||
// After all workers have terminated, finalFunc is run. If an error occurs in
|
||||
// one of the workers, it is returned. FinalFunc is always run, regardless of
|
||||
// any other previous errors.
|
||||
func RunWorkers(ctx context.Context, count int, workerFunc func() error, finalFunc func()) error {
|
||||
wg, ctx := errgroup.WithContext(ctx)
|
||||
func RunWorkers(count int, workerFunc func() error, finalFunc func()) error {
|
||||
var wg errgroup.Group
|
||||
|
||||
// run workers
|
||||
for i := 0; i < count; i++ {
|
||||
|
|
|
@ -67,7 +67,7 @@ func PrefixLength(be Lister, t FileType) (int, error) {
|
|||
}
|
||||
|
||||
// select prefixes of length l, test if the last one is the same as the current one
|
||||
id := ID{}
|
||||
var id ID
|
||||
outer:
|
||||
for l := minPrefixLength; l < len(id); l++ {
|
||||
var last string
|
||||
|
|
|
@ -42,9 +42,9 @@ func TestFind(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
expected_match := "20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"
|
||||
if f != expected_match {
|
||||
t.Errorf("Wrong match returned want %s, got %s", expected_match, f)
|
||||
expectedMatch := "20bdc1402a6fc9b633aaffffffffffffffffffffffffffffffffffffffffffff"
|
||||
if f != expectedMatch {
|
||||
t.Errorf("Wrong match returned want %s, got %s", expectedMatch, f)
|
||||
}
|
||||
|
||||
f, err = Find(m, SnapshotFile, "NotAPrefix")
|
||||
|
@ -56,8 +56,8 @@ func TestFind(t *testing.T) {
|
|||
}
|
||||
|
||||
// Try to match with a prefix longer than any ID.
|
||||
extra_length_id := samples[0].String() + "f"
|
||||
f, err = Find(m, SnapshotFile, extra_length_id)
|
||||
extraLengthID := samples[0].String() + "f"
|
||||
f, err = Find(m, SnapshotFile, extraLengthID)
|
||||
if err != ErrNoIDPrefixFound {
|
||||
t.Error("Expected no snapshots to be matched.")
|
||||
}
|
||||
|
|
|
@ -12,11 +12,11 @@ type FileType string
|
|||
// These are the different data types a backend can store.
|
||||
const (
|
||||
PackFile FileType = "data" // use data, as packs are stored under /data in repo
|
||||
KeyFile = "key"
|
||||
LockFile = "lock"
|
||||
SnapshotFile = "snapshot"
|
||||
IndexFile = "index"
|
||||
ConfigFile = "config"
|
||||
KeyFile FileType = "key"
|
||||
LockFile FileType = "lock"
|
||||
SnapshotFile FileType = "snapshot"
|
||||
IndexFile FileType = "index"
|
||||
ConfigFile FileType = "config"
|
||||
)
|
||||
|
||||
// Handle is used to store and access data in a backend.
|
||||
|
|
|
@ -15,15 +15,13 @@ func TestHardLinks(t *testing.T) {
|
|||
idx.Add(1, 2, "inode1-file1-on-device2")
|
||||
idx.Add(2, 3, "inode2-file2-on-device3")
|
||||
|
||||
var sresult string
|
||||
sresult = idx.GetFilename(1, 2)
|
||||
sresult := idx.GetFilename(1, 2)
|
||||
rtest.Equals(t, sresult, "inode1-file1-on-device2")
|
||||
|
||||
sresult = idx.GetFilename(2, 3)
|
||||
rtest.Equals(t, sresult, "inode2-file2-on-device3")
|
||||
|
||||
var bresult bool
|
||||
bresult = idx.Has(1, 2)
|
||||
bresult := idx.Has(1, 2)
|
||||
rtest.Equals(t, bresult, true)
|
||||
|
||||
bresult = idx.Has(1, 3)
|
||||
|
|
|
@ -25,9 +25,7 @@ func GroupSnapshots(snapshots Snapshots, options string) (map[string]Snapshots,
|
|||
var GroupByTag bool
|
||||
var GroupByHost bool
|
||||
var GroupByPath bool
|
||||
var GroupOptionList []string
|
||||
|
||||
GroupOptionList = strings.Split(options, ",")
|
||||
GroupOptionList := strings.Split(options, ",")
|
||||
|
||||
for _, option := range GroupOptionList {
|
||||
switch option {
|
||||
|
@ -51,7 +49,7 @@ func GroupSnapshots(snapshots Snapshots, options string) (map[string]Snapshots,
|
|||
|
||||
if GroupByTag {
|
||||
tags = sn.Tags
|
||||
sort.StringSlice(tags).Sort()
|
||||
sort.Strings(tags)
|
||||
}
|
||||
if GroupByHost {
|
||||
hostname = sn.Hostname
|
||||
|
@ -60,7 +58,7 @@ func GroupSnapshots(snapshots Snapshots, options string) (map[string]Snapshots,
|
|||
paths = sn.Paths
|
||||
}
|
||||
|
||||
sort.StringSlice(sn.Paths).Sort()
|
||||
sort.Strings(sn.Paths)
|
||||
var k []byte
|
||||
var err error
|
||||
|
||||
|
|
|
@ -19,8 +19,7 @@ import (
|
|||
type Node interface{}
|
||||
|
||||
type Snapshot struct {
|
||||
Nodes map[string]Node
|
||||
treeID restic.ID
|
||||
Nodes map[string]Node
|
||||
}
|
||||
|
||||
type File struct {
|
||||
|
|
|
@ -230,10 +230,10 @@ func (b *Backup) CompleteItem(item string, previous, current *restic.Node, s arc
|
|||
done: true,
|
||||
}
|
||||
return
|
||||
} else {
|
||||
b.summary.ProcessedBytes += current.Size
|
||||
}
|
||||
|
||||
b.summary.ProcessedBytes += current.Size
|
||||
|
||||
switch current.Type {
|
||||
case "file":
|
||||
b.processedCh <- counter{Files: 1}
|
||||
|
|
|
@ -204,7 +204,7 @@ func (t *Terminal) runWithoutStatus(ctx context.Context) {
|
|||
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
|
||||
}
|
||||
|
||||
case _ = <-t.status:
|
||||
case <-t.status:
|
||||
// discard status lines
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,15 +10,15 @@ import (
|
|||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// SkipNode is returned by WalkFunc when a dir node should not be walked.
|
||||
var SkipNode = errors.New("skip this node")
|
||||
// ErrSkipNode is returned by WalkFunc when a dir node should not be walked.
|
||||
var ErrSkipNode = errors.New("skip this node")
|
||||
|
||||
// WalkFunc is the type of the function called for each node visited by Walk.
|
||||
// Path is the slash-separated path from the root node. If there was a problem
|
||||
// loading a node, err is set to a non-nil error. WalkFunc can chose to ignore
|
||||
// it by returning nil.
|
||||
//
|
||||
// When the special value SkipNode is returned and node is a dir node, it is
|
||||
// When the special value ErrSkipNode is returned and node is a dir node, it is
|
||||
// not walked. When the node is not a dir node, the remaining items in this
|
||||
// tree are skipped.
|
||||
//
|
||||
|
@ -26,7 +26,7 @@ var SkipNode = errors.New("skip this node")
|
|||
// For tree nodes, this means that the function is not called for the
|
||||
// referenced tree. If the node is not a tree, and all nodes in the current
|
||||
// tree have ignore set to true, the current tree will not be visited again.
|
||||
// When err is not nil and different from SkipNode, the value returned for
|
||||
// When err is not nil and different from ErrSkipNode, the value returned for
|
||||
// ignore is ignored.
|
||||
type WalkFunc func(parentTreeID restic.ID, path string, node *restic.Node, nodeErr error) (ignore bool, err error)
|
||||
|
||||
|
@ -38,7 +38,7 @@ func Walk(ctx context.Context, repo restic.TreeLoader, root restic.ID, ignoreTre
|
|||
_, err = walkFn(root, "/", nil, err)
|
||||
|
||||
if err != nil {
|
||||
if err == SkipNode {
|
||||
if err == ErrSkipNode {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
|
@ -76,7 +76,7 @@ func walk(ctx context.Context, repo restic.TreeLoader, prefix string, parentTree
|
|||
if node.Type != "dir" {
|
||||
ignore, err := walkFn(parentTreeID, p, node, nil)
|
||||
if err != nil {
|
||||
if err == SkipNode {
|
||||
if err == ErrSkipNode {
|
||||
// skip the remaining entries in this tree
|
||||
return allNodesIgnored, nil
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ func walk(ctx context.Context, repo restic.TreeLoader, prefix string, parentTree
|
|||
return false, err
|
||||
}
|
||||
|
||||
if ignore == false {
|
||||
if !ignore {
|
||||
allNodesIgnored = false
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ func walk(ctx context.Context, repo restic.TreeLoader, prefix string, parentTree
|
|||
subtree, err := repo.LoadTree(ctx, *node.Subtree)
|
||||
ignore, err := walkFn(parentTreeID, p, node, err)
|
||||
if err != nil {
|
||||
if err == SkipNode {
|
||||
if err == ErrSkipNode {
|
||||
if ignore {
|
||||
ignoreTrees.Insert(*node.Subtree)
|
||||
}
|
||||
|
|
|
@ -138,7 +138,7 @@ func checkParentTreeOrder(want []string) checkFunc {
|
|||
}
|
||||
}
|
||||
|
||||
// checkSkipFor returns SkipNode if path is in skipFor, it checks that the
|
||||
// checkSkipFor returns ErrSkipNode if path is in skipFor, it checks that the
|
||||
// paths the walk func is called for are exactly the ones in wantPaths.
|
||||
func checkSkipFor(skipFor map[string]struct{}, wantPaths []string) checkFunc {
|
||||
var pos int
|
||||
|
@ -161,7 +161,7 @@ func checkSkipFor(skipFor map[string]struct{}, wantPaths []string) checkFunc {
|
|||
pos++
|
||||
|
||||
if _, ok := skipFor[path]; ok {
|
||||
return false, SkipNode
|
||||
return false, ErrSkipNode
|
||||
}
|
||||
|
||||
return false, nil
|
||||
|
@ -177,7 +177,7 @@ func checkSkipFor(skipFor map[string]struct{}, wantPaths []string) checkFunc {
|
|||
}
|
||||
}
|
||||
|
||||
// checkIgnore returns SkipNode if path is in skipFor and sets ignore according
|
||||
// checkIgnore returns ErrSkipNode if path is in skipFor and sets ignore according
|
||||
// to ignoreFor. It checks that the paths the walk func is called for are exactly
|
||||
// the ones in wantPaths.
|
||||
func checkIgnore(skipFor map[string]struct{}, ignoreFor map[string]bool, wantPaths []string) checkFunc {
|
||||
|
@ -201,7 +201,7 @@ func checkIgnore(skipFor map[string]struct{}, ignoreFor map[string]bool, wantPat
|
|||
pos++
|
||||
|
||||
if _, ok := skipFor[path]; ok {
|
||||
return ignoreFor[path], SkipNode
|
||||
return ignoreFor[path], ErrSkipNode
|
||||
}
|
||||
|
||||
return ignoreFor[path], nil
|
||||
|
|
Loading…
Reference in a new issue