forked from TrueCloudLab/rclone
Add --max-depth parameter
This will apply to ls/lsd/sync/copy etc Fixes #412 Fixes #213
This commit is contained in:
parent
348734584b
commit
ccdd1ea6c4
4 changed files with 54 additions and 3 deletions
|
@ -432,6 +432,24 @@ to reduce the value so rclone moves on to a high level retry (see the
|
||||||
|
|
||||||
Disable low level retries with `--low-level-retries 1`.
|
Disable low level retries with `--low-level-retries 1`.
|
||||||
|
|
||||||
|
### --max-depth=N ###
|
||||||
|
|
||||||
|
This modifies the recursion depth for all the commands except purge.
|
||||||
|
|
||||||
|
So if you do `rclone --max-depth 1 ls remote:path` you will see only
|
||||||
|
the files in the top level directory. Using `--max-depth 2` means you
|
||||||
|
will see all the files in first two directory levels and so on.
|
||||||
|
|
||||||
|
For historical reasons the `lsd` command defaults to using a
|
||||||
|
`--max-depth` of 1 - you can override this with the command line flag.
|
||||||
|
|
||||||
|
You can use this command to disable recursion (with `--max-depth 1`).
|
||||||
|
|
||||||
|
Note that if you use this with `sync` and `--delete-excluded` the
|
||||||
|
files not recursed through are considered excluded and will be deleted
|
||||||
|
on the destination. Test first with `--dry-run` if you are not sure
|
||||||
|
what will happen.
|
||||||
|
|
||||||
### --modify-window=TIME ###
|
### --modify-window=TIME ###
|
||||||
|
|
||||||
When checking whether a file has been modified, this is the maximum
|
When checking whether a file has been modified, this is the maximum
|
||||||
|
|
|
@ -85,6 +85,7 @@ var (
|
||||||
updateOlder = pflag.BoolP("update", "u", false, "Skip files that are newer on the destination.")
|
updateOlder = pflag.BoolP("update", "u", false, "Skip files that are newer on the destination.")
|
||||||
noGzip = pflag.BoolP("no-gzip-encoding", "", false, "Don't set Accept-Encoding: gzip.")
|
noGzip = pflag.BoolP("no-gzip-encoding", "", false, "Don't set Accept-Encoding: gzip.")
|
||||||
dedupeMode = pflag.StringP("dedupe-mode", "", "interactive", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
|
dedupeMode = pflag.StringP("dedupe-mode", "", "interactive", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
|
||||||
|
maxDepth = pflag.IntP("max-depth", "", -1, "If set limits the recursion depth to this.")
|
||||||
bwLimit SizeSuffix
|
bwLimit SizeSuffix
|
||||||
|
|
||||||
// Key to use for password en/decryption.
|
// Key to use for password en/decryption.
|
||||||
|
@ -207,6 +208,7 @@ type ConfigInfo struct {
|
||||||
UpdateOlder bool // Skip files that are newer on the destination
|
UpdateOlder bool // Skip files that are newer on the destination
|
||||||
NoGzip bool // Disable compression
|
NoGzip bool // Disable compression
|
||||||
DedupeMode DeduplicateMode
|
DedupeMode DeduplicateMode
|
||||||
|
MaxDepth int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transport returns an http.RoundTripper with the correct timeouts
|
// Transport returns an http.RoundTripper with the correct timeouts
|
||||||
|
@ -309,6 +311,7 @@ func LoadConfig() {
|
||||||
Config.LowLevelRetries = *lowLevelRetries
|
Config.LowLevelRetries = *lowLevelRetries
|
||||||
Config.UpdateOlder = *updateOlder
|
Config.UpdateOlder = *updateOlder
|
||||||
Config.NoGzip = *noGzip
|
Config.NoGzip = *noGzip
|
||||||
|
Config.MaxDepth = *maxDepth
|
||||||
|
|
||||||
ConfigPath = *configFile
|
ConfigPath = *configFile
|
||||||
|
|
||||||
|
|
|
@ -463,6 +463,7 @@ func readFilesMap(fs Fs, includeAll bool, dir string) (files map[string]Object,
|
||||||
list := NewLister()
|
list := NewLister()
|
||||||
if !includeAll {
|
if !includeAll {
|
||||||
list.SetFilter(Config.Filter)
|
list.SetFilter(Config.Filter)
|
||||||
|
list.SetLevel(Config.MaxDepth)
|
||||||
}
|
}
|
||||||
list.Start(fs, dir)
|
list.Start(fs, dir)
|
||||||
for {
|
for {
|
||||||
|
@ -804,7 +805,7 @@ func Check(fdst, fsrc Fs) error {
|
||||||
//
|
//
|
||||||
// Lists in parallel which may get them out of order
|
// Lists in parallel which may get them out of order
|
||||||
func ListFn(f Fs, fn func(Object)) error {
|
func ListFn(f Fs, fn func(Object)) error {
|
||||||
list := NewLister().SetFilter(Config.Filter).Start(f, "")
|
list := NewLister().SetFilter(Config.Filter).SetLevel(Config.MaxDepth).Start(f, "")
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(Config.Checkers)
|
wg.Add(Config.Checkers)
|
||||||
for i := 0; i < Config.Checkers; i++ {
|
for i := 0; i < Config.Checkers; i++ {
|
||||||
|
@ -913,7 +914,11 @@ func Count(f Fs) (objects int64, size int64, err error) {
|
||||||
|
|
||||||
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
|
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
|
||||||
func ListDir(f Fs, w io.Writer) error {
|
func ListDir(f Fs, w io.Writer) error {
|
||||||
list := NewLister().SetLevel(1).Start(f, "")
|
level := 1
|
||||||
|
if Config.MaxDepth > 0 {
|
||||||
|
level = Config.MaxDepth
|
||||||
|
}
|
||||||
|
list := NewLister().SetLevel(level).Start(f, "")
|
||||||
for {
|
for {
|
||||||
dir, err := list.GetDir()
|
dir, err := list.GetDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -280,6 +280,26 @@ func TestCopy(t *testing.T) {
|
||||||
fstest.CheckItems(t, r.fremote, file1)
|
fstest.CheckItems(t, r.fremote, file1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test copy with depth
|
||||||
|
func TestCopyWithDepth(t *testing.T) {
|
||||||
|
r := NewRun(t)
|
||||||
|
defer r.Finalise()
|
||||||
|
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||||
|
file2 := r.WriteFile("hello world2", "hello world2", t2)
|
||||||
|
|
||||||
|
// Check the MaxDepth too
|
||||||
|
fs.Config.MaxDepth = 1
|
||||||
|
defer func() { fs.Config.MaxDepth = -1 }()
|
||||||
|
|
||||||
|
err := fs.CopyDir(r.fremote, r.flocal)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Copy failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fstest.CheckItems(t, r.flocal, file1, file2)
|
||||||
|
fstest.CheckItems(t, r.fremote, file2)
|
||||||
|
}
|
||||||
|
|
||||||
// Test a server side copy if possible, or the backup path if not
|
// Test a server side copy if possible, or the backup path if not
|
||||||
func TestServerSideCopy(t *testing.T) {
|
func TestServerSideCopy(t *testing.T) {
|
||||||
r := NewRun(t)
|
r := NewRun(t)
|
||||||
|
@ -949,8 +969,13 @@ func TestCount(t *testing.T) {
|
||||||
defer r.Finalise()
|
defer r.Finalise()
|
||||||
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
|
file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1)
|
||||||
file2 := r.WriteBoth("empty space", "", t2)
|
file2 := r.WriteBoth("empty space", "", t2)
|
||||||
|
file3 := r.WriteBoth("sub dir/potato3", "hello", t2)
|
||||||
|
|
||||||
fstest.CheckItems(t, r.fremote, file1, file2)
|
fstest.CheckItems(t, r.fremote, file1, file2, file3)
|
||||||
|
|
||||||
|
// Check the MaxDepth too
|
||||||
|
fs.Config.MaxDepth = 1
|
||||||
|
defer func() { fs.Config.MaxDepth = -1 }()
|
||||||
|
|
||||||
objects, size, err := fs.Count(r.fremote)
|
objects, size, err := fs.Count(r.fremote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
Loading…
Reference in a new issue