clean up some errors from 'go vet ./...'

This commit is contained in:
George Armhold 2018-09-05 08:04:55 -04:00
parent 35e9885e8b
commit bfc1bc6ee6
6 changed files with 17 additions and 21 deletions

1
go.sum
View file

@ -58,6 +58,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/restic/chunker v0.2.0 h1:GjvmvFuv2mx0iekZs+iAlrioo2UtgsGSSplvoXaVHDU= github.com/restic/chunker v0.2.0 h1:GjvmvFuv2mx0iekZs+iAlrioo2UtgsGSSplvoXaVHDU=
github.com/restic/chunker v0.2.0/go.mod h1:VdjruEj+7BU1ZZTW8Qqi1exxRx2Omf2JH0NsUEkQ29s= github.com/restic/chunker v0.2.0/go.mod h1:VdjruEj+7BU1ZZTW8Qqi1exxRx2Omf2JH0NsUEkQ29s=
github.com/russross/blackfriday v1.5.1 h1:B8ZN6pD4PVofmlDCDUdELeYrbsVIDM/bpjW3v3zgcRc=
github.com/russross/blackfriday v1.5.1/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.1/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=

View file

@ -137,7 +137,7 @@ func TestArchiverSaveFile(t *testing.T) {
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, TestDir{"file": testfile}) tempdir, repo, cleanup := prepareTempdirRepoSrc(t, TestDir{"file": testfile})
defer cleanup() defer cleanup()
node, stats := saveFile(t, repo, filepath.Join(tempdir, "file"), fs.Track{fs.Local{}}) node, stats := saveFile(t, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}})
TestEnsureFileContent(ctx, t, repo, "file", node, testfile) TestEnsureFileContent(ctx, t, repo, "file", node, testfile)
if stats.DataSize != uint64(len(testfile.Content)) { if stats.DataSize != uint64(len(testfile.Content)) {
@ -218,7 +218,7 @@ func TestArchiverSave(t *testing.T) {
var tmb tomb.Tomb var tmb tomb.Tomb
arch := New(repo, fs.Track{fs.Local{}}, Options{}) arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
arch.Error = func(item string, fi os.FileInfo, err error) error { arch.Error = func(item string, fi os.FileInfo, err error) error {
t.Errorf("archiver error for %v: %v", item, err) t.Errorf("archiver error for %v: %v", item, err)
return err return err
@ -358,7 +358,7 @@ func BenchmarkArchiverSaveFileSmall(b *testing.B) {
tempdir, repo, cleanup := prepareTempdirRepoSrc(b, d) tempdir, repo, cleanup := prepareTempdirRepoSrc(b, d)
b.StartTimer() b.StartTimer()
_, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{fs.Local{}}) _, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}})
b.StopTimer() b.StopTimer()
if stats.DataSize != fileSize { if stats.DataSize != fileSize {
@ -391,7 +391,7 @@ func BenchmarkArchiverSaveFileLarge(b *testing.B) {
tempdir, repo, cleanup := prepareTempdirRepoSrc(b, d) tempdir, repo, cleanup := prepareTempdirRepoSrc(b, d)
b.StartTimer() b.StartTimer()
_, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{fs.Local{}}) _, stats := saveFile(b, repo, filepath.Join(tempdir, "file"), fs.Track{FS: fs.Local{}})
b.StopTimer() b.StopTimer()
if stats.DataSize != fileSize { if stats.DataSize != fileSize {
@ -471,7 +471,7 @@ func TestArchiverSaveFileIncremental(t *testing.T) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
appendToFile(t, testfile, data) appendToFile(t, testfile, data)
node, _ := saveFile(t, repo, testfile, fs.Track{fs.Local{}}) node, _ := saveFile(t, repo, testfile, fs.Track{FS: fs.Local{}})
t.Logf("node blobs: %v", node.Content) t.Logf("node blobs: %v", node.Content)
@ -752,7 +752,7 @@ func TestArchiverSaveDir(t *testing.T) {
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src)
defer cleanup() defer cleanup()
arch := New(repo, fs.Track{fs.Local{}}, Options{}) arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
arch.runWorkers(ctx, &tmb) arch.runWorkers(ctx, &tmb)
chdir := tempdir chdir := tempdir
@ -842,7 +842,7 @@ func TestArchiverSaveDirIncremental(t *testing.T) {
var tmb tomb.Tomb var tmb tomb.Tomb
ctx := tmb.Context(context.Background()) ctx := tmb.Context(context.Background())
arch := New(repo, fs.Track{fs.Local{}}, Options{}) arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
arch.runWorkers(ctx, &tmb) arch.runWorkers(ctx, &tmb)
fi, err := fs.Lstat(tempdir) fi, err := fs.Lstat(tempdir)
@ -1002,7 +1002,7 @@ func TestArchiverSaveTree(t *testing.T) {
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src)
defer cleanup() defer cleanup()
testFS := fs.Track{fs.Local{}} testFS := fs.Track{FS: fs.Local{}}
arch := New(repo, testFS, Options{}) arch := New(repo, testFS, Options{})
arch.runWorkers(ctx, &tmb) arch.runWorkers(ctx, &tmb)
@ -1291,7 +1291,7 @@ func TestArchiverSnapshot(t *testing.T) {
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src)
defer cleanup() defer cleanup()
arch := New(repo, fs.Track{fs.Local{}}, Options{}) arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
chdir := tempdir chdir := tempdir
if test.chdir != "" { if test.chdir != "" {
@ -1455,7 +1455,7 @@ func TestArchiverSnapshotSelect(t *testing.T) {
tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src) tempdir, repo, cleanup := prepareTempdirRepoSrc(t, test.src)
defer cleanup() defer cleanup()
arch := New(repo, fs.Track{fs.Local{}}, Options{}) arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
arch.Select = test.selFn arch.Select = test.selFn
back := fs.TestChdir(t, tempdir) back := fs.TestChdir(t, tempdir)
@ -1559,7 +1559,7 @@ func TestArchiverParent(t *testing.T) {
defer cleanup() defer cleanup()
testFS := &MockFS{ testFS := &MockFS{
FS: fs.Track{fs.Local{}}, FS: fs.Track{FS: fs.Local{}},
bytesRead: make(map[string]int), bytesRead: make(map[string]int),
} }
@ -1732,7 +1732,7 @@ func TestArchiverErrorReporting(t *testing.T) {
test.prepare(t) test.prepare(t)
} }
arch := New(repo, fs.Track{fs.Local{}}, Options{}) arch := New(repo, fs.Track{FS: fs.Local{}}, Options{})
arch.Error = test.errFn arch.Error = test.errFn
_, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) _, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()})
@ -1867,7 +1867,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) {
defer back() defer back()
testFS := &TrackFS{ testFS := &TrackFS{
FS: fs.Track{fs.Local{}}, FS: fs.Track{FS: fs.Local{}},
opened: make(map[string]uint), opened: make(map[string]uint),
} }

View file

@ -96,7 +96,7 @@ func TestScanner(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
sc := NewScanner(fs.Track{fs.Local{}}) sc := NewScanner(fs.Track{FS: fs.Local{}})
if test.selFn != nil { if test.selFn != nil {
sc.Select = test.selFn sc.Select = test.selFn
} }
@ -237,7 +237,7 @@ func TestScannerError(t *testing.T) {
test.prepare(t) test.prepare(t)
} }
sc := NewScanner(fs.Track{fs.Local{}}) sc := NewScanner(fs.Track{FS: fs.Local{}})
if test.selFn != nil { if test.selFn != nil {
sc.Select = test.selFn sc.Select = test.selFn
} }
@ -307,7 +307,7 @@ func TestScannerCancel(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
sc := NewScanner(fs.Track{fs.Local{}}) sc := NewScanner(fs.Track{FS: fs.Local{}})
var lastStats ScanStats var lastStats ScanStats
sc.Result = func(item string, s ScanStats) { sc.Result = func(item string, s ScanStats) {
lastStats = s lastStats = s

View file

@ -232,7 +232,6 @@ func unrollTree(f fs.FS, t *Tree) error {
} }
return errors.Errorf("tree unrollTree: collision on path, node %#v, path %q", node, f.Join(t.Path, entry)) return errors.Errorf("tree unrollTree: collision on path, node %#v, path %q", node, f.Join(t.Path, entry))
continue
} }
t.Nodes[entry] = Tree{Path: f.Join(t.Path, entry)} t.Nodes[entry] = Tree{Path: f.Join(t.Path, entry)}
} }

View file

@ -320,8 +320,6 @@ func (be *b2Backend) List(ctx context.Context, t restic.FileType, fn func(restic
} }
cur = c cur = c
} }
return ctx.Err()
} }
// Remove keys for a specified backend type. // Remove keys for a specified backend type.

View file

@ -67,8 +67,6 @@ func (m *S3Layout) moveFiles(ctx context.Context, be *s3.Backend, l backend.Layo
return be.Rename(h, l) return be.Rename(h, l)
}) })
}) })
return nil
} }
// Apply runs the migration. // Apply runs the migration.