Merge pull request #2583 from greatroar/unused
Remove some unused or duplicated code
This commit is contained in:
commit
b67b7ebfe6
7 changed files with 23 additions and 98 deletions
|
@ -216,10 +216,11 @@ func (arch *Archiver) SaveDir(ctx context.Context, snPath string, fi os.FileInfo
|
||||||
return FutureTree{}, err
|
return FutureTree{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
names, err := readdirnames(arch.FS, dir)
|
names, err := readdirnames(arch.FS, dir, fs.O_NOFOLLOW)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return FutureTree{}, err
|
return FutureTree{}, err
|
||||||
}
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
|
||||||
nodes := make([]FutureNode, 0, len(names))
|
nodes := make([]FutureNode, 0, len(names))
|
||||||
|
|
||||||
|
@ -628,43 +629,9 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree,
|
||||||
return tree, nil
|
return tree, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type fileInfoSlice []os.FileInfo
|
// flags are passed to fs.OpenFile. O_RDONLY is implied.
|
||||||
|
func readdirnames(filesystem fs.FS, dir string, flags int) ([]string, error) {
|
||||||
func (fi fileInfoSlice) Len() int {
|
f, err := filesystem.OpenFile(dir, fs.O_RDONLY|flags, 0)
|
||||||
return len(fi)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fi fileInfoSlice) Swap(i, j int) {
|
|
||||||
fi[i], fi[j] = fi[j], fi[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fi fileInfoSlice) Less(i, j int) bool {
|
|
||||||
return fi[i].Name() < fi[j].Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
func readdir(filesystem fs.FS, dir string) ([]os.FileInfo, error) {
|
|
||||||
f, err := filesystem.OpenFile(dir, fs.O_RDONLY|fs.O_NOFOLLOW, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "Open")
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err := f.Readdir(-1)
|
|
||||||
if err != nil {
|
|
||||||
_ = f.Close()
|
|
||||||
return nil, errors.Wrapf(err, "Readdir %v failed", dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(fileInfoSlice(entries))
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readdirnames(filesystem fs.FS, dir string) ([]string, error) {
|
|
||||||
f, err := filesystem.OpenFile(dir, fs.O_RDONLY|fs.O_NOFOLLOW, 0)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Open")
|
return nil, errors.Wrap(err, "Open")
|
||||||
}
|
}
|
||||||
|
@ -680,32 +647,32 @@ func readdirnames(filesystem fs.FS, dir string) ([]string, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Sort(sort.StringSlice(entries))
|
|
||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveRelativeTargets replaces targets that only contain relative
|
// resolveRelativeTargets replaces targets that only contain relative
|
||||||
// directories ("." or "../../") with the contents of the directory. Each
|
// directories ("." or "../../") with the contents of the directory. Each
|
||||||
// element of target is processed with fs.Clean().
|
// element of target is processed with fs.Clean().
|
||||||
func resolveRelativeTargets(fs fs.FS, targets []string) ([]string, error) {
|
func resolveRelativeTargets(filesys fs.FS, targets []string) ([]string, error) {
|
||||||
debug.Log("targets before resolving: %v", targets)
|
debug.Log("targets before resolving: %v", targets)
|
||||||
result := make([]string, 0, len(targets))
|
result := make([]string, 0, len(targets))
|
||||||
for _, target := range targets {
|
for _, target := range targets {
|
||||||
target = fs.Clean(target)
|
target = filesys.Clean(target)
|
||||||
pc, _ := pathComponents(fs, target, false)
|
pc, _ := pathComponents(filesys, target, false)
|
||||||
if len(pc) > 0 {
|
if len(pc) > 0 {
|
||||||
result = append(result, target)
|
result = append(result, target)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
debug.Log("replacing %q with readdir(%q)", target, target)
|
debug.Log("replacing %q with readdir(%q)", target, target)
|
||||||
entries, err := readdirnames(fs, target)
|
entries, err := readdirnames(filesys, target, fs.O_NOFOLLOW)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
sort.Strings(entries)
|
||||||
|
|
||||||
for _, name := range entries {
|
for _, name := range entries {
|
||||||
result = append(result, fs.Join(target, name))
|
result = append(result, filesys.Join(target, name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -754,7 +721,6 @@ func (arch *Archiver) runWorkers(ctx context.Context, t *tomb.Tomb) {
|
||||||
arch.blobSaver = NewBlobSaver(ctx, t, arch.Repo, arch.Options.SaveBlobConcurrency)
|
arch.blobSaver = NewBlobSaver(ctx, t, arch.Repo, arch.Options.SaveBlobConcurrency)
|
||||||
|
|
||||||
arch.fileSaver = NewFileSaver(ctx, t,
|
arch.fileSaver = NewFileSaver(ctx, t,
|
||||||
arch.FS,
|
|
||||||
arch.blobSaver.Save,
|
arch.blobSaver.Save,
|
||||||
arch.Repo.Config().ChunkerPolynomial,
|
arch.Repo.Config().ChunkerPolynomial,
|
||||||
arch.Options.FileReadConcurrency, arch.Options.SaveBlobConcurrency)
|
arch.Options.FileReadConcurrency, arch.Options.SaveBlobConcurrency)
|
||||||
|
|
|
@ -53,7 +53,6 @@ type SaveBlobFn func(context.Context, restic.BlobType, *Buffer) FutureBlob
|
||||||
|
|
||||||
// FileSaver concurrently saves incoming files to the repo.
|
// FileSaver concurrently saves incoming files to the repo.
|
||||||
type FileSaver struct {
|
type FileSaver struct {
|
||||||
fs fs.FS
|
|
||||||
saveFilePool *BufferPool
|
saveFilePool *BufferPool
|
||||||
saveBlob SaveBlobFn
|
saveBlob SaveBlobFn
|
||||||
|
|
||||||
|
@ -69,7 +68,7 @@ type FileSaver struct {
|
||||||
|
|
||||||
// NewFileSaver returns a new file saver. A worker pool with fileWorkers is
|
// NewFileSaver returns a new file saver. A worker pool with fileWorkers is
|
||||||
// started, it is stopped when ctx is cancelled.
|
// started, it is stopped when ctx is cancelled.
|
||||||
func NewFileSaver(ctx context.Context, t *tomb.Tomb, fs fs.FS, save SaveBlobFn, pol chunker.Pol, fileWorkers, blobWorkers uint) *FileSaver {
|
func NewFileSaver(ctx context.Context, t *tomb.Tomb, save SaveBlobFn, pol chunker.Pol, fileWorkers, blobWorkers uint) *FileSaver {
|
||||||
ch := make(chan saveFileJob)
|
ch := make(chan saveFileJob)
|
||||||
|
|
||||||
debug.Log("new file saver with %v file workers and %v blob workers", fileWorkers, blobWorkers)
|
debug.Log("new file saver with %v file workers and %v blob workers", fileWorkers, blobWorkers)
|
||||||
|
@ -77,7 +76,6 @@ func NewFileSaver(ctx context.Context, t *tomb.Tomb, fs fs.FS, save SaveBlobFn,
|
||||||
poolSize := fileWorkers + blobWorkers
|
poolSize := fileWorkers + blobWorkers
|
||||||
|
|
||||||
s := &FileSaver{
|
s := &FileSaver{
|
||||||
fs: fs,
|
|
||||||
saveBlob: save,
|
saveBlob: save,
|
||||||
saveFilePool: NewBufferPool(ctx, int(poolSize), chunker.MaxSize),
|
saveFilePool: NewBufferPool(ctx, int(poolSize), chunker.MaxSize),
|
||||||
pol: pol,
|
pol: pol,
|
||||||
|
|
|
@ -30,7 +30,7 @@ func createTestFiles(t testing.TB, num int) (files []string, cleanup func()) {
|
||||||
return files, cleanup
|
return files, cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
func startFileSaver(ctx context.Context, t testing.TB, fs fs.FS) (*FileSaver, *tomb.Tomb) {
|
func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, *tomb.Tomb) {
|
||||||
var tmb tomb.Tomb
|
var tmb tomb.Tomb
|
||||||
|
|
||||||
saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *Buffer) FutureBlob {
|
saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *Buffer) FutureBlob {
|
||||||
|
@ -45,7 +45,7 @@ func startFileSaver(ctx context.Context, t testing.TB, fs fs.FS) (*FileSaver, *t
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s := NewFileSaver(ctx, &tmb, fs, saveBlob, pol, workers, workers)
|
s := NewFileSaver(ctx, &tmb, saveBlob, pol, workers, workers)
|
||||||
s.NodeFromFileInfo = restic.NodeFromFileInfo
|
s.NodeFromFileInfo = restic.NodeFromFileInfo
|
||||||
|
|
||||||
return s, &tmb
|
return s, &tmb
|
||||||
|
@ -62,7 +62,7 @@ func TestFileSaver(t *testing.T) {
|
||||||
completeFn := func(*restic.Node, ItemStats) {}
|
completeFn := func(*restic.Node, ItemStats) {}
|
||||||
|
|
||||||
testFs := fs.Local{}
|
testFs := fs.Local{}
|
||||||
s, tmb := startFileSaver(ctx, t, testFs)
|
s, tmb := startFileSaver(ctx, t)
|
||||||
|
|
||||||
var results []FutureFile
|
var results []FutureFile
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/fs"
|
"github.com/restic/restic/internal/fs"
|
||||||
)
|
)
|
||||||
|
@ -86,10 +87,11 @@ func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (Sca
|
||||||
stats.Files++
|
stats.Files++
|
||||||
stats.Bytes += uint64(fi.Size())
|
stats.Bytes += uint64(fi.Size())
|
||||||
case fi.Mode().IsDir():
|
case fi.Mode().IsDir():
|
||||||
names, err := readdirnames(s.FS, target)
|
names, err := readdirnames(s.FS, target, fs.O_NOFOLLOW)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return stats, s.Error(target, fi, err)
|
return stats, s.Error(target, fi, err)
|
||||||
}
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
stats, err = s.scan(ctx, stats, filepath.Join(target, name))
|
stats, err = s.scan(ctx, stats, filepath.Join(target, name))
|
||||||
|
|
|
@ -214,7 +214,7 @@ func unrollTree(f fs.FS, t *Tree) error {
|
||||||
// nodes, add the contents of Path to the nodes.
|
// nodes, add the contents of Path to the nodes.
|
||||||
if t.Path != "" && len(t.Nodes) > 0 {
|
if t.Path != "" && len(t.Nodes) > 0 {
|
||||||
debug.Log("resolve path %v", t.Path)
|
debug.Log("resolve path %v", t.Path)
|
||||||
entries, err := fs.ReadDirNames(f, t.Path)
|
entries, err := readdirnames(f, t.Path, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package archiver
|
package archiver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -10,6 +11,9 @@ import (
|
||||||
restictest "github.com/restic/restic/internal/test"
|
restictest "github.com/restic/restic/internal/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// debug.Log requires Tree.String.
|
||||||
|
var _ fmt.Stringer = Tree{}
|
||||||
|
|
||||||
func TestPathComponents(t *testing.T) {
|
func TestPathComponents(t *testing.T) {
|
||||||
var tests = []struct {
|
var tests = []struct {
|
||||||
p string
|
p string
|
||||||
|
|
|
@ -1,45 +0,0 @@
|
||||||
package fs
|
|
||||||
|
|
||||||
import "os"
|
|
||||||
|
|
||||||
// ReadDir reads the directory named by dirname within fs and returns a list of
|
|
||||||
// directory entries.
|
|
||||||
func ReadDir(fs FS, dirname string) ([]os.FileInfo, error) {
|
|
||||||
f, err := fs.Open(dirname)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err := f.Readdir(-1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadDirNames reads the directory named by dirname within fs and returns a
|
|
||||||
// list of entry names.
|
|
||||||
func ReadDirNames(fs FS, dirname string) ([]string, error) {
|
|
||||||
f, err := fs.Open(dirname)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err := f.Readdirnames(-1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
Loading…
Reference in a new issue